blob: 1492cf9d71d3ad2cfc79058857abda27c530c11a [file] [log] [blame]
Jesse Barnes79e53942008-11-07 14:24:08 -08001/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
Daniel Vetter618563e2012-04-01 13:38:50 +020027#include <linux/dmi.h>
Jesse Barnesc1c7af62009-09-10 15:28:03 -070028#include <linux/module.h>
29#include <linux/input.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080030#include <linux/i2c.h>
Shaohua Li7662c8b2009-06-26 11:23:55 +080031#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Jesse Barnes9cce37f2010-08-13 15:11:26 -070033#include <linux/vgaarb.h>
Wu Fengguange0dac652011-09-05 14:25:34 +080034#include <drm/drm_edid.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drmP.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080036#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010037#include "intel_frontbuffer.h"
David Howells760285e2012-10-02 18:01:07 +010038#include <drm/i915_drm.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080039#include "i915_drv.h"
Chris Wilson57822dc2017-02-22 11:40:48 +000040#include "i915_gem_clflush.h"
Imre Deakdb18b6a2016-03-24 12:41:40 +020041#include "intel_dsi.h"
Jesse Barnese5510fa2010-07-01 16:48:37 -070042#include "i915_trace.h"
Xi Ruoyao319c1d42015-03-12 20:16:32 +080043#include <drm/drm_atomic.h>
Matt Roperc196e1d2015-01-21 16:35:48 -080044#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010045#include <drm/drm_dp_helper.h>
46#include <drm/drm_crtc_helper.h>
Matt Roper465c1202014-05-29 08:06:54 -070047#include <drm/drm_plane_helper.h>
48#include <drm/drm_rect.h>
Keith Packardc0f372b32011-11-16 22:24:52 -080049#include <linux/dma_remapping.h>
Alex Goinsfd8e0582015-11-25 18:43:38 -080050#include <linux/reservation.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080051
Matt Roper465c1202014-05-29 08:06:54 -070052/* Primary plane formats for gen <= 3 */
Damien Lespiau568db4f2015-05-12 16:13:18 +010053static const uint32_t i8xx_primary_formats[] = {
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010054 DRM_FORMAT_C8,
55 DRM_FORMAT_RGB565,
Matt Roper465c1202014-05-29 08:06:54 -070056 DRM_FORMAT_XRGB1555,
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010057 DRM_FORMAT_XRGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070058};
59
60/* Primary plane formats for gen >= 4 */
Damien Lespiau568db4f2015-05-12 16:13:18 +010061static const uint32_t i965_primary_formats[] = {
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010062 DRM_FORMAT_C8,
63 DRM_FORMAT_RGB565,
64 DRM_FORMAT_XRGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070065 DRM_FORMAT_XBGR8888,
Damien Lespiau6c0fd452015-05-19 12:29:16 +010066 DRM_FORMAT_XRGB2101010,
67 DRM_FORMAT_XBGR2101010,
68};
69
Ben Widawsky714244e2017-08-01 09:58:16 -070070static const uint64_t i9xx_format_modifiers[] = {
71 I915_FORMAT_MOD_X_TILED,
72 DRM_FORMAT_MOD_LINEAR,
73 DRM_FORMAT_MOD_INVALID
74};
75
Damien Lespiau6c0fd452015-05-19 12:29:16 +010076static const uint32_t skl_primary_formats[] = {
77 DRM_FORMAT_C8,
78 DRM_FORMAT_RGB565,
79 DRM_FORMAT_XRGB8888,
80 DRM_FORMAT_XBGR8888,
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010081 DRM_FORMAT_ARGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070082 DRM_FORMAT_ABGR8888,
83 DRM_FORMAT_XRGB2101010,
Matt Roper465c1202014-05-29 08:06:54 -070084 DRM_FORMAT_XBGR2101010,
Kumar, Maheshea916ea2015-09-03 16:17:09 +053085 DRM_FORMAT_YUYV,
86 DRM_FORMAT_YVYU,
87 DRM_FORMAT_UYVY,
88 DRM_FORMAT_VYUY,
Matt Roper465c1202014-05-29 08:06:54 -070089};
90
Ben Widawsky714244e2017-08-01 09:58:16 -070091static const uint64_t skl_format_modifiers_noccs[] = {
92 I915_FORMAT_MOD_Yf_TILED,
93 I915_FORMAT_MOD_Y_TILED,
94 I915_FORMAT_MOD_X_TILED,
95 DRM_FORMAT_MOD_LINEAR,
96 DRM_FORMAT_MOD_INVALID
97};
98
99static const uint64_t skl_format_modifiers_ccs[] = {
100 I915_FORMAT_MOD_Yf_TILED_CCS,
101 I915_FORMAT_MOD_Y_TILED_CCS,
102 I915_FORMAT_MOD_Yf_TILED,
103 I915_FORMAT_MOD_Y_TILED,
104 I915_FORMAT_MOD_X_TILED,
105 DRM_FORMAT_MOD_LINEAR,
106 DRM_FORMAT_MOD_INVALID
107};
108
Matt Roper3d7d6512014-06-10 08:28:13 -0700109/* Cursor formats */
110static const uint32_t intel_cursor_formats[] = {
111 DRM_FORMAT_ARGB8888,
112};
113
Ben Widawsky714244e2017-08-01 09:58:16 -0700114static const uint64_t cursor_format_modifiers[] = {
115 DRM_FORMAT_MOD_LINEAR,
116 DRM_FORMAT_MOD_INVALID
117};
118
Jesse Barnesf1f644d2013-06-27 00:39:25 +0300119static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200120 struct intel_crtc_state *pipe_config);
Ville Syrjälä18442d02013-09-13 16:00:08 +0300121static void ironlake_pch_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200122 struct intel_crtc_state *pipe_config);
Jesse Barnesf1f644d2013-06-27 00:39:25 +0300123
Chris Wilson24dbf512017-02-15 10:59:18 +0000124static int intel_framebuffer_init(struct intel_framebuffer *ifb,
125 struct drm_i915_gem_object *obj,
126 struct drm_mode_fb_cmd2 *mode_cmd);
Daniel Vetter5b18e572014-04-24 23:55:06 +0200127static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
128static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
Jani Nikulabc58be62016-03-18 17:05:39 +0200129static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
Daniel Vetter29407aa2014-04-24 23:55:08 +0200130static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
Vandana Kannanf769cd22014-08-05 07:51:22 -0700131 struct intel_link_m_n *m_n,
132 struct intel_link_m_n *m2_n2);
Daniel Vetter29407aa2014-04-24 23:55:08 +0200133static void ironlake_set_pipeconf(struct drm_crtc *crtc);
Daniel Vetter229fca92014-04-24 23:55:09 +0200134static void haswell_set_pipeconf(struct drm_crtc *crtc);
Jani Nikula391bf042016-03-18 17:05:40 +0200135static void haswell_set_pipemisc(struct drm_crtc *crtc);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200136static void vlv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200137 const struct intel_crtc_state *pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200138static void chv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200139 const struct intel_crtc_state *pipe_config);
Daniel Vetter5a21b662016-05-24 17:13:53 +0200140static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
141static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
Nabendu Maiti1c74eea2016-11-29 11:23:14 +0530142static void intel_crtc_init_scalers(struct intel_crtc *crtc,
143 struct intel_crtc_state *crtc_state);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +0200144static void skylake_pfit_enable(struct intel_crtc *crtc);
145static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
146static void ironlake_pfit_enable(struct intel_crtc *crtc);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +0300147static void intel_modeset_setup_hw_state(struct drm_device *dev,
148 struct drm_modeset_acquire_ctx *ctx);
Ville Syrjälä2622a082016-03-09 19:07:26 +0200149static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
Damien Lespiaue7457a92013-08-08 22:28:59 +0100150
Ma Lingd4906092009-03-18 20:13:27 +0800151struct intel_limit {
Ander Conselvan de Oliveira4c5def92016-05-04 12:11:58 +0300152 struct {
153 int min, max;
154 } dot, vco, n, m, m1, m2, p, p1;
155
156 struct {
157 int dot_limit;
158 int p2_slow, p2_fast;
159 } p2;
Ma Lingd4906092009-03-18 20:13:27 +0800160};
Jesse Barnes79e53942008-11-07 14:24:08 -0800161
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300162/* returns HPLL frequency in kHz */
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200163int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300164{
165 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
166
167 /* Obtain SKU information */
168 mutex_lock(&dev_priv->sb_lock);
169 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
170 CCK_FUSE_HPLL_FREQ_MASK;
171 mutex_unlock(&dev_priv->sb_lock);
172
173 return vco_freq[hpll_freq] * 1000;
174}
175
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200176int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
177 const char *name, u32 reg, int ref_freq)
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300178{
179 u32 val;
180 int divider;
181
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300182 mutex_lock(&dev_priv->sb_lock);
183 val = vlv_cck_read(dev_priv, reg);
184 mutex_unlock(&dev_priv->sb_lock);
185
186 divider = val & CCK_FREQUENCY_VALUES;
187
188 WARN((val & CCK_FREQUENCY_STATUS) !=
189 (divider << CCK_FREQUENCY_STATUS_SHIFT),
190 "%s change in progress\n", name);
191
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200192 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
193}
194
Ville Syrjälä7ff89ca2017-02-07 20:33:05 +0200195int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
196 const char *name, u32 reg)
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200197{
198 if (dev_priv->hpll_freq == 0)
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200199 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200200
201 return vlv_get_cck_clock(dev_priv, name, reg,
202 dev_priv->hpll_freq);
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300203}
204
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300205static void intel_update_czclk(struct drm_i915_private *dev_priv)
206{
Wayne Boyer666a4532015-12-09 12:29:35 -0800207 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300208 return;
209
210 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
211 CCK_CZ_CLOCK_CONTROL);
212
213 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
214}
215
Chris Wilson021357a2010-09-07 20:54:59 +0100216static inline u32 /* units of 100MHz */
Ville Syrjälä21a727b2016-02-17 21:41:10 +0200217intel_fdi_link_freq(struct drm_i915_private *dev_priv,
218 const struct intel_crtc_state *pipe_config)
Chris Wilson021357a2010-09-07 20:54:59 +0100219{
Ville Syrjälä21a727b2016-02-17 21:41:10 +0200220 if (HAS_DDI(dev_priv))
221 return pipe_config->port_clock; /* SPLL */
Ville Syrjäläe3b247d2016-02-17 21:41:09 +0200222 else
Chris Wilson58ecd9d2017-11-05 13:49:05 +0000223 return dev_priv->fdi_pll_freq;
Chris Wilson021357a2010-09-07 20:54:59 +0100224}
225
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300226static const struct intel_limit intel_limits_i8xx_dac = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400227 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200228 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200229 .n = { .min = 2, .max = 16 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400230 .m = { .min = 96, .max = 140 },
231 .m1 = { .min = 18, .max = 26 },
232 .m2 = { .min = 6, .max = 16 },
233 .p = { .min = 4, .max = 128 },
234 .p1 = { .min = 2, .max = 33 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700235 .p2 = { .dot_limit = 165000,
236 .p2_slow = 4, .p2_fast = 2 },
Keith Packarde4b36692009-06-05 19:22:17 -0700237};
238
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300239static const struct intel_limit intel_limits_i8xx_dvo = {
Daniel Vetter5d536e22013-07-06 12:52:06 +0200240 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200241 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200242 .n = { .min = 2, .max = 16 },
Daniel Vetter5d536e22013-07-06 12:52:06 +0200243 .m = { .min = 96, .max = 140 },
244 .m1 = { .min = 18, .max = 26 },
245 .m2 = { .min = 6, .max = 16 },
246 .p = { .min = 4, .max = 128 },
247 .p1 = { .min = 2, .max = 33 },
248 .p2 = { .dot_limit = 165000,
249 .p2_slow = 4, .p2_fast = 4 },
250};
251
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300252static const struct intel_limit intel_limits_i8xx_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400253 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200254 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200255 .n = { .min = 2, .max = 16 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400256 .m = { .min = 96, .max = 140 },
257 .m1 = { .min = 18, .max = 26 },
258 .m2 = { .min = 6, .max = 16 },
259 .p = { .min = 4, .max = 128 },
260 .p1 = { .min = 1, .max = 6 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700261 .p2 = { .dot_limit = 165000,
262 .p2_slow = 14, .p2_fast = 7 },
Keith Packarde4b36692009-06-05 19:22:17 -0700263};
Eric Anholt273e27c2011-03-30 13:01:10 -0700264
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300265static const struct intel_limit intel_limits_i9xx_sdvo = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400266 .dot = { .min = 20000, .max = 400000 },
267 .vco = { .min = 1400000, .max = 2800000 },
268 .n = { .min = 1, .max = 6 },
269 .m = { .min = 70, .max = 120 },
Patrik Jakobsson4f7dfb62013-02-13 22:20:22 +0100270 .m1 = { .min = 8, .max = 18 },
271 .m2 = { .min = 3, .max = 7 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400272 .p = { .min = 5, .max = 80 },
273 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700274 .p2 = { .dot_limit = 200000,
275 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700276};
277
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300278static const struct intel_limit intel_limits_i9xx_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400279 .dot = { .min = 20000, .max = 400000 },
280 .vco = { .min = 1400000, .max = 2800000 },
281 .n = { .min = 1, .max = 6 },
282 .m = { .min = 70, .max = 120 },
Patrik Jakobsson53a7d2d2013-02-13 22:20:21 +0100283 .m1 = { .min = 8, .max = 18 },
284 .m2 = { .min = 3, .max = 7 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400285 .p = { .min = 7, .max = 98 },
286 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700287 .p2 = { .dot_limit = 112000,
288 .p2_slow = 14, .p2_fast = 7 },
Keith Packarde4b36692009-06-05 19:22:17 -0700289};
290
Eric Anholt273e27c2011-03-30 13:01:10 -0700291
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300292static const struct intel_limit intel_limits_g4x_sdvo = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700293 .dot = { .min = 25000, .max = 270000 },
294 .vco = { .min = 1750000, .max = 3500000},
295 .n = { .min = 1, .max = 4 },
296 .m = { .min = 104, .max = 138 },
297 .m1 = { .min = 17, .max = 23 },
298 .m2 = { .min = 5, .max = 11 },
299 .p = { .min = 10, .max = 30 },
300 .p1 = { .min = 1, .max = 3},
301 .p2 = { .dot_limit = 270000,
302 .p2_slow = 10,
303 .p2_fast = 10
Ma Ling044c7c42009-03-18 20:13:23 +0800304 },
Keith Packarde4b36692009-06-05 19:22:17 -0700305};
306
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300307static const struct intel_limit intel_limits_g4x_hdmi = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700308 .dot = { .min = 22000, .max = 400000 },
309 .vco = { .min = 1750000, .max = 3500000},
310 .n = { .min = 1, .max = 4 },
311 .m = { .min = 104, .max = 138 },
312 .m1 = { .min = 16, .max = 23 },
313 .m2 = { .min = 5, .max = 11 },
314 .p = { .min = 5, .max = 80 },
315 .p1 = { .min = 1, .max = 8},
316 .p2 = { .dot_limit = 165000,
317 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700318};
319
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300320static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700321 .dot = { .min = 20000, .max = 115000 },
322 .vco = { .min = 1750000, .max = 3500000 },
323 .n = { .min = 1, .max = 3 },
324 .m = { .min = 104, .max = 138 },
325 .m1 = { .min = 17, .max = 23 },
326 .m2 = { .min = 5, .max = 11 },
327 .p = { .min = 28, .max = 112 },
328 .p1 = { .min = 2, .max = 8 },
329 .p2 = { .dot_limit = 0,
330 .p2_slow = 14, .p2_fast = 14
Ma Ling044c7c42009-03-18 20:13:23 +0800331 },
Keith Packarde4b36692009-06-05 19:22:17 -0700332};
333
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300334static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700335 .dot = { .min = 80000, .max = 224000 },
336 .vco = { .min = 1750000, .max = 3500000 },
337 .n = { .min = 1, .max = 3 },
338 .m = { .min = 104, .max = 138 },
339 .m1 = { .min = 17, .max = 23 },
340 .m2 = { .min = 5, .max = 11 },
341 .p = { .min = 14, .max = 42 },
342 .p1 = { .min = 2, .max = 6 },
343 .p2 = { .dot_limit = 0,
344 .p2_slow = 7, .p2_fast = 7
Ma Ling044c7c42009-03-18 20:13:23 +0800345 },
Keith Packarde4b36692009-06-05 19:22:17 -0700346};
347
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300348static const struct intel_limit intel_limits_pineview_sdvo = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400349 .dot = { .min = 20000, .max = 400000},
350 .vco = { .min = 1700000, .max = 3500000 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700351 /* Pineview's Ncounter is a ring counter */
Akshay Joshi0206e352011-08-16 15:34:10 -0400352 .n = { .min = 3, .max = 6 },
353 .m = { .min = 2, .max = 256 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700354 /* Pineview only has one combined m divider, which we treat as m2. */
Akshay Joshi0206e352011-08-16 15:34:10 -0400355 .m1 = { .min = 0, .max = 0 },
356 .m2 = { .min = 0, .max = 254 },
357 .p = { .min = 5, .max = 80 },
358 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700359 .p2 = { .dot_limit = 200000,
360 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700361};
362
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300363static const struct intel_limit intel_limits_pineview_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400364 .dot = { .min = 20000, .max = 400000 },
365 .vco = { .min = 1700000, .max = 3500000 },
366 .n = { .min = 3, .max = 6 },
367 .m = { .min = 2, .max = 256 },
368 .m1 = { .min = 0, .max = 0 },
369 .m2 = { .min = 0, .max = 254 },
370 .p = { .min = 7, .max = 112 },
371 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700372 .p2 = { .dot_limit = 112000,
373 .p2_slow = 14, .p2_fast = 14 },
Keith Packarde4b36692009-06-05 19:22:17 -0700374};
375
Eric Anholt273e27c2011-03-30 13:01:10 -0700376/* Ironlake / Sandybridge
377 *
378 * We calculate clock using (register_value + 2) for N/M1/M2, so here
379 * the range value for them is (actual_value - 2).
380 */
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300381static const struct intel_limit intel_limits_ironlake_dac = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700382 .dot = { .min = 25000, .max = 350000 },
383 .vco = { .min = 1760000, .max = 3510000 },
384 .n = { .min = 1, .max = 5 },
385 .m = { .min = 79, .max = 127 },
386 .m1 = { .min = 12, .max = 22 },
387 .m2 = { .min = 5, .max = 9 },
388 .p = { .min = 5, .max = 80 },
389 .p1 = { .min = 1, .max = 8 },
390 .p2 = { .dot_limit = 225000,
391 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700392};
393
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300394static const struct intel_limit intel_limits_ironlake_single_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700395 .dot = { .min = 25000, .max = 350000 },
396 .vco = { .min = 1760000, .max = 3510000 },
397 .n = { .min = 1, .max = 3 },
398 .m = { .min = 79, .max = 118 },
399 .m1 = { .min = 12, .max = 22 },
400 .m2 = { .min = 5, .max = 9 },
401 .p = { .min = 28, .max = 112 },
402 .p1 = { .min = 2, .max = 8 },
403 .p2 = { .dot_limit = 225000,
404 .p2_slow = 14, .p2_fast = 14 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800405};
406
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300407static const struct intel_limit intel_limits_ironlake_dual_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700408 .dot = { .min = 25000, .max = 350000 },
409 .vco = { .min = 1760000, .max = 3510000 },
410 .n = { .min = 1, .max = 3 },
411 .m = { .min = 79, .max = 127 },
412 .m1 = { .min = 12, .max = 22 },
413 .m2 = { .min = 5, .max = 9 },
414 .p = { .min = 14, .max = 56 },
415 .p1 = { .min = 2, .max = 8 },
416 .p2 = { .dot_limit = 225000,
417 .p2_slow = 7, .p2_fast = 7 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800418};
419
Eric Anholt273e27c2011-03-30 13:01:10 -0700420/* LVDS 100mhz refclk limits. */
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300421static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700422 .dot = { .min = 25000, .max = 350000 },
423 .vco = { .min = 1760000, .max = 3510000 },
424 .n = { .min = 1, .max = 2 },
425 .m = { .min = 79, .max = 126 },
426 .m1 = { .min = 12, .max = 22 },
427 .m2 = { .min = 5, .max = 9 },
428 .p = { .min = 28, .max = 112 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400429 .p1 = { .min = 2, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700430 .p2 = { .dot_limit = 225000,
431 .p2_slow = 14, .p2_fast = 14 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800432};
433
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300434static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700435 .dot = { .min = 25000, .max = 350000 },
436 .vco = { .min = 1760000, .max = 3510000 },
437 .n = { .min = 1, .max = 3 },
438 .m = { .min = 79, .max = 126 },
439 .m1 = { .min = 12, .max = 22 },
440 .m2 = { .min = 5, .max = 9 },
441 .p = { .min = 14, .max = 42 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400442 .p1 = { .min = 2, .max = 6 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700443 .p2 = { .dot_limit = 225000,
444 .p2_slow = 7, .p2_fast = 7 },
Zhao Yakui45476682009-12-31 16:06:04 +0800445};
446
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300447static const struct intel_limit intel_limits_vlv = {
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300448 /*
449 * These are the data rate limits (measured in fast clocks)
450 * since those are the strictest limits we have. The fast
451 * clock and actual rate limits are more relaxed, so checking
452 * them would make no difference.
453 */
454 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
Daniel Vetter75e53982013-04-18 21:10:43 +0200455 .vco = { .min = 4000000, .max = 6000000 },
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700456 .n = { .min = 1, .max = 7 },
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700457 .m1 = { .min = 2, .max = 3 },
458 .m2 = { .min = 11, .max = 156 },
Ville Syrjäläb99ab662013-09-24 21:26:26 +0300459 .p1 = { .min = 2, .max = 3 },
Ville Syrjälä5fdc9c492013-09-24 21:26:29 +0300460 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700461};
462
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300463static const struct intel_limit intel_limits_chv = {
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300464 /*
465 * These are the data rate limits (measured in fast clocks)
466 * since those are the strictest limits we have. The fast
467 * clock and actual rate limits are more relaxed, so checking
468 * them would make no difference.
469 */
470 .dot = { .min = 25000 * 5, .max = 540000 * 5},
Ville Syrjälä17fe1022015-02-26 21:01:52 +0200471 .vco = { .min = 4800000, .max = 6480000 },
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300472 .n = { .min = 1, .max = 1 },
473 .m1 = { .min = 2, .max = 2 },
474 .m2 = { .min = 24 << 22, .max = 175 << 22 },
475 .p1 = { .min = 2, .max = 4 },
476 .p2 = { .p2_slow = 1, .p2_fast = 14 },
477};
478
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300479static const struct intel_limit intel_limits_bxt = {
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200480 /* FIXME: find real dot limits */
481 .dot = { .min = 0, .max = INT_MAX },
Vandana Kannane6292552015-07-01 17:02:57 +0530482 .vco = { .min = 4800000, .max = 6700000 },
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200483 .n = { .min = 1, .max = 1 },
484 .m1 = { .min = 2, .max = 2 },
485 /* FIXME: find real m2 limits */
486 .m2 = { .min = 2 << 22, .max = 255 << 22 },
487 .p1 = { .min = 2, .max = 4 },
488 .p2 = { .p2_slow = 1, .p2_fast = 20 },
489};
490
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530491static void
Vidya Srinivas6deef9b602018-05-12 03:03:13 +0530492skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
493{
494 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
495 return;
496
497 if (enable)
498 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
499 else
500 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
501}
502
503static void
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530504skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
505{
Vidya Srinivas6deef9b602018-05-12 03:03:13 +0530506 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530507 return;
508
509 if (enable)
510 I915_WRITE(CLKGATE_DIS_PSL(pipe),
511 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
512 else
513 I915_WRITE(CLKGATE_DIS_PSL(pipe),
514 I915_READ(CLKGATE_DIS_PSL(pipe)) &
515 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
516}
517
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200518static bool
Maarten Lankhorst24f28452017-11-22 19:39:01 +0100519needs_modeset(const struct drm_crtc_state *state)
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200520{
Maarten Lankhorstfc596662015-07-21 13:28:57 +0200521 return drm_atomic_crtc_needs_modeset(state);
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200522}
523
Imre Deakdccbea32015-06-22 23:35:51 +0300524/*
525 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
526 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
527 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
528 * The helpers' return value is the rate of the clock that is fed to the
529 * display engine's pipe which can be the above fast dot clock rate or a
530 * divided-down version of it.
531 */
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500532/* m1 is reserved as 0 in Pineview, n is a ring counter */
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300533static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
Jesse Barnes79e53942008-11-07 14:24:08 -0800534{
Shaohua Li21778322009-02-23 15:19:16 +0800535 clock->m = clock->m2 + 2;
536 clock->p = clock->p1 * clock->p2;
Ville Syrjäläed5ca772013-12-02 19:00:45 +0200537 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300538 return 0;
Ville Syrjäläfb03ac02013-10-14 14:50:30 +0300539 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
540 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300541
542 return clock->dot;
Shaohua Li21778322009-02-23 15:19:16 +0800543}
544
Daniel Vetter7429e9d2013-04-20 17:19:46 +0200545static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
546{
547 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
548}
549
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300550static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
Shaohua Li21778322009-02-23 15:19:16 +0800551{
Daniel Vetter7429e9d2013-04-20 17:19:46 +0200552 clock->m = i9xx_dpll_compute_m(clock);
Jesse Barnes79e53942008-11-07 14:24:08 -0800553 clock->p = clock->p1 * clock->p2;
Ville Syrjäläed5ca772013-12-02 19:00:45 +0200554 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300555 return 0;
Ville Syrjäläfb03ac02013-10-14 14:50:30 +0300556 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
557 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300558
559 return clock->dot;
Jesse Barnes79e53942008-11-07 14:24:08 -0800560}
561
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300562static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
Imre Deak589eca62015-06-22 23:35:50 +0300563{
564 clock->m = clock->m1 * clock->m2;
565 clock->p = clock->p1 * clock->p2;
566 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300567 return 0;
Imre Deak589eca62015-06-22 23:35:50 +0300568 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
569 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300570
571 return clock->dot / 5;
Imre Deak589eca62015-06-22 23:35:50 +0300572}
573
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300574int chv_calc_dpll_params(int refclk, struct dpll *clock)
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300575{
576 clock->m = clock->m1 * clock->m2;
577 clock->p = clock->p1 * clock->p2;
578 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300579 return 0;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300580 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
581 clock->n << 22);
582 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300583
584 return clock->dot / 5;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300585}
586
Jesse Barnes7c04d1d2009-02-23 15:36:40 -0800587#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
Chris Wilsonc38c1452018-02-14 13:49:22 +0000588
589/*
Jesse Barnes79e53942008-11-07 14:24:08 -0800590 * Returns whether the given set of divisors are valid for a given refclk with
591 * the given connectors.
592 */
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100593static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300594 const struct intel_limit *limit,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300595 const struct dpll *clock)
Jesse Barnes79e53942008-11-07 14:24:08 -0800596{
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300597 if (clock->n < limit->n.min || limit->n.max < clock->n)
598 INTELPllInvalid("n out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800599 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
Akshay Joshi0206e352011-08-16 15:34:10 -0400600 INTELPllInvalid("p1 out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800601 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
Akshay Joshi0206e352011-08-16 15:34:10 -0400602 INTELPllInvalid("m2 out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800603 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
Akshay Joshi0206e352011-08-16 15:34:10 -0400604 INTELPllInvalid("m1 out of range\n");
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300605
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100606 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200607 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300608 if (clock->m1 <= clock->m2)
609 INTELPllInvalid("m1 <= m2\n");
610
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100611 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200612 !IS_GEN9_LP(dev_priv)) {
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300613 if (clock->p < limit->p.min || limit->p.max < clock->p)
614 INTELPllInvalid("p out of range\n");
615 if (clock->m < limit->m.min || limit->m.max < clock->m)
616 INTELPllInvalid("m out of range\n");
617 }
618
Jesse Barnes79e53942008-11-07 14:24:08 -0800619 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
Akshay Joshi0206e352011-08-16 15:34:10 -0400620 INTELPllInvalid("vco out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800621 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
622 * connector, etc., rather than just a single range.
623 */
624 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
Akshay Joshi0206e352011-08-16 15:34:10 -0400625 INTELPllInvalid("dot out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800626
627 return true;
628}
629
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300630static int
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300631i9xx_select_p2_div(const struct intel_limit *limit,
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300632 const struct intel_crtc_state *crtc_state,
633 int target)
Jesse Barnes79e53942008-11-07 14:24:08 -0800634{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300635 struct drm_device *dev = crtc_state->base.crtc->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -0800636
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +0300637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Jesse Barnes79e53942008-11-07 14:24:08 -0800638 /*
Daniel Vettera210b022012-11-26 17:22:08 +0100639 * For LVDS just rely on its current settings for dual-channel.
640 * We haven't figured out how to reliably set up different
641 * single/dual channel state, if we even can.
Jesse Barnes79e53942008-11-07 14:24:08 -0800642 */
Daniel Vetter1974cad2012-11-26 17:22:09 +0100643 if (intel_is_dual_link_lvds(dev))
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300644 return limit->p2.p2_fast;
Jesse Barnes79e53942008-11-07 14:24:08 -0800645 else
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300646 return limit->p2.p2_slow;
Jesse Barnes79e53942008-11-07 14:24:08 -0800647 } else {
648 if (target < limit->p2.dot_limit)
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300649 return limit->p2.p2_slow;
Jesse Barnes79e53942008-11-07 14:24:08 -0800650 else
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300651 return limit->p2.p2_fast;
Jesse Barnes79e53942008-11-07 14:24:08 -0800652 }
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300653}
654
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200655/*
656 * Returns a set of divisors for the desired target clock with the given
657 * refclk, or FALSE. The returned values represent the clock equation:
658 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
659 *
660 * Target and reference clocks are specified in kHz.
661 *
662 * If match_clock is provided, then best_clock P divider must match the P
663 * divider from @match_clock used for LVDS downclocking.
664 */
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300665static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300666i9xx_find_best_dpll(const struct intel_limit *limit,
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300667 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300668 int target, int refclk, struct dpll *match_clock,
669 struct dpll *best_clock)
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300670{
671 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300672 struct dpll clock;
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300673 int err = target;
Jesse Barnes79e53942008-11-07 14:24:08 -0800674
Akshay Joshi0206e352011-08-16 15:34:10 -0400675 memset(best_clock, 0, sizeof(*best_clock));
Jesse Barnes79e53942008-11-07 14:24:08 -0800676
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300677 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
678
Zhao Yakui42158662009-11-20 11:24:18 +0800679 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
680 clock.m1++) {
681 for (clock.m2 = limit->m2.min;
682 clock.m2 <= limit->m2.max; clock.m2++) {
Daniel Vetterc0efc382013-06-03 20:56:24 +0200683 if (clock.m2 >= clock.m1)
Zhao Yakui42158662009-11-20 11:24:18 +0800684 break;
685 for (clock.n = limit->n.min;
686 clock.n <= limit->n.max; clock.n++) {
687 for (clock.p1 = limit->p1.min;
688 clock.p1 <= limit->p1.max; clock.p1++) {
Jesse Barnes79e53942008-11-07 14:24:08 -0800689 int this_err;
690
Imre Deakdccbea32015-06-22 23:35:51 +0300691 i9xx_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100692 if (!intel_PLL_is_valid(to_i915(dev),
693 limit,
Chris Wilson1b894b52010-12-14 20:04:54 +0000694 &clock))
Jesse Barnes79e53942008-11-07 14:24:08 -0800695 continue;
Sean Paulcec2f352012-01-10 15:09:36 -0800696 if (match_clock &&
697 clock.p != match_clock->p)
698 continue;
Jesse Barnes79e53942008-11-07 14:24:08 -0800699
700 this_err = abs(clock.dot - target);
701 if (this_err < err) {
702 *best_clock = clock;
703 err = this_err;
704 }
705 }
706 }
707 }
708 }
709
710 return (err != target);
711}
712
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200713/*
714 * Returns a set of divisors for the desired target clock with the given
715 * refclk, or FALSE. The returned values represent the clock equation:
716 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
717 *
718 * Target and reference clocks are specified in kHz.
719 *
720 * If match_clock is provided, then best_clock P divider must match the P
721 * divider from @match_clock used for LVDS downclocking.
722 */
Ma Lingd4906092009-03-18 20:13:27 +0800723static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300724pnv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200725 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300726 int target, int refclk, struct dpll *match_clock,
727 struct dpll *best_clock)
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200728{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300729 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300730 struct dpll clock;
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200731 int err = target;
732
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200733 memset(best_clock, 0, sizeof(*best_clock));
734
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300735 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
736
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200737 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
738 clock.m1++) {
739 for (clock.m2 = limit->m2.min;
740 clock.m2 <= limit->m2.max; clock.m2++) {
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200741 for (clock.n = limit->n.min;
742 clock.n <= limit->n.max; clock.n++) {
743 for (clock.p1 = limit->p1.min;
744 clock.p1 <= limit->p1.max; clock.p1++) {
745 int this_err;
746
Imre Deakdccbea32015-06-22 23:35:51 +0300747 pnv_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100748 if (!intel_PLL_is_valid(to_i915(dev),
749 limit,
Jesse Barnes79e53942008-11-07 14:24:08 -0800750 &clock))
751 continue;
752 if (match_clock &&
753 clock.p != match_clock->p)
754 continue;
755
756 this_err = abs(clock.dot - target);
757 if (this_err < err) {
758 *best_clock = clock;
759 err = this_err;
760 }
761 }
762 }
763 }
764 }
765
766 return (err != target);
767}
768
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +0200769/*
770 * Returns a set of divisors for the desired target clock with the given
771 * refclk, or FALSE. The returned values represent the clock equation:
772 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200773 *
774 * Target and reference clocks are specified in kHz.
775 *
776 * If match_clock is provided, then best_clock P divider must match the P
777 * divider from @match_clock used for LVDS downclocking.
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +0200778 */
Ma Lingd4906092009-03-18 20:13:27 +0800779static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300780g4x_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200781 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300782 int target, int refclk, struct dpll *match_clock,
783 struct dpll *best_clock)
Ma Lingd4906092009-03-18 20:13:27 +0800784{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300785 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300786 struct dpll clock;
Ma Lingd4906092009-03-18 20:13:27 +0800787 int max_n;
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300788 bool found = false;
Adam Jackson6ba770d2010-07-02 16:43:30 -0400789 /* approximately equals target * 0.00585 */
790 int err_most = (target >> 8) + (target >> 9);
Ma Lingd4906092009-03-18 20:13:27 +0800791
792 memset(best_clock, 0, sizeof(*best_clock));
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300793
794 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
795
Ma Lingd4906092009-03-18 20:13:27 +0800796 max_n = limit->n.max;
Gilles Espinassef77f13e2010-03-29 15:41:47 +0200797 /* based on hardware requirement, prefer smaller n to precision */
Ma Lingd4906092009-03-18 20:13:27 +0800798 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
Gilles Espinassef77f13e2010-03-29 15:41:47 +0200799 /* based on hardware requirement, prefere larger m1,m2 */
Ma Lingd4906092009-03-18 20:13:27 +0800800 for (clock.m1 = limit->m1.max;
801 clock.m1 >= limit->m1.min; clock.m1--) {
802 for (clock.m2 = limit->m2.max;
803 clock.m2 >= limit->m2.min; clock.m2--) {
804 for (clock.p1 = limit->p1.max;
805 clock.p1 >= limit->p1.min; clock.p1--) {
806 int this_err;
807
Imre Deakdccbea32015-06-22 23:35:51 +0300808 i9xx_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100809 if (!intel_PLL_is_valid(to_i915(dev),
810 limit,
Chris Wilson1b894b52010-12-14 20:04:54 +0000811 &clock))
Ma Lingd4906092009-03-18 20:13:27 +0800812 continue;
Chris Wilson1b894b52010-12-14 20:04:54 +0000813
814 this_err = abs(clock.dot - target);
Ma Lingd4906092009-03-18 20:13:27 +0800815 if (this_err < err_most) {
816 *best_clock = clock;
817 err_most = this_err;
818 max_n = clock.n;
819 found = true;
820 }
821 }
822 }
823 }
824 }
Zhenyu Wang2c072452009-06-05 15:38:42 +0800825 return found;
826}
Ma Lingd4906092009-03-18 20:13:27 +0800827
Imre Deakd5dd62b2015-03-17 11:40:03 +0200828/*
829 * Check if the calculated PLL configuration is more optimal compared to the
830 * best configuration and error found so far. Return the calculated error.
831 */
832static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300833 const struct dpll *calculated_clock,
834 const struct dpll *best_clock,
Imre Deakd5dd62b2015-03-17 11:40:03 +0200835 unsigned int best_error_ppm,
836 unsigned int *error_ppm)
837{
Imre Deak9ca3ba02015-03-17 11:40:05 +0200838 /*
839 * For CHV ignore the error and consider only the P value.
840 * Prefer a bigger P value based on HW requirements.
841 */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100842 if (IS_CHERRYVIEW(to_i915(dev))) {
Imre Deak9ca3ba02015-03-17 11:40:05 +0200843 *error_ppm = 0;
844
845 return calculated_clock->p > best_clock->p;
846 }
847
Imre Deak24be4e42015-03-17 11:40:04 +0200848 if (WARN_ON_ONCE(!target_freq))
849 return false;
850
Imre Deakd5dd62b2015-03-17 11:40:03 +0200851 *error_ppm = div_u64(1000000ULL *
852 abs(target_freq - calculated_clock->dot),
853 target_freq);
854 /*
855 * Prefer a better P value over a better (smaller) error if the error
856 * is small. Ensure this preference for future configurations too by
857 * setting the error to 0.
858 */
859 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
860 *error_ppm = 0;
861
862 return true;
863 }
864
865 return *error_ppm + 10 < best_error_ppm;
866}
867
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200868/*
869 * Returns a set of divisors for the desired target clock with the given
870 * refclk, or FALSE. The returned values represent the clock equation:
871 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
872 */
Zhenyu Wang2c072452009-06-05 15:38:42 +0800873static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300874vlv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200875 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300876 int target, int refclk, struct dpll *match_clock,
877 struct dpll *best_clock)
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700878{
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200879 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveiraa919ff12014-10-20 13:46:43 +0300880 struct drm_device *dev = crtc->base.dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300881 struct dpll clock;
Ville Syrjälä69e4f9002013-09-24 21:26:20 +0300882 unsigned int bestppm = 1000000;
Ville Syrjälä27e639b2013-09-24 21:26:24 +0300883 /* min update 19.2 MHz */
884 int max_n = min(limit->n.max, refclk / 19200);
Ville Syrjälä49e497e2013-09-24 21:26:31 +0300885 bool found = false;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700886
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300887 target *= 5; /* fast clock */
888
889 memset(best_clock, 0, sizeof(*best_clock));
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700890
891 /* based on hardware requirement, prefer smaller n to precision */
Ville Syrjälä27e639b2013-09-24 21:26:24 +0300892 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
Ville Syrjälä811bbf02013-09-24 21:26:25 +0300893 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
Ville Syrjälä889059d2013-09-24 21:26:27 +0300894 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
Ville Syrjäläc1a9ae42013-09-24 21:26:23 +0300895 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300896 clock.p = clock.p1 * clock.p2;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700897 /* based on hardware requirement, prefer bigger m1,m2 values */
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300898 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
Imre Deakd5dd62b2015-03-17 11:40:03 +0200899 unsigned int ppm;
Ville Syrjälä69e4f9002013-09-24 21:26:20 +0300900
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300901 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
902 refclk * clock.m1);
Ville Syrjälä43b0ac52013-09-24 21:26:18 +0300903
Imre Deakdccbea32015-06-22 23:35:51 +0300904 vlv_calc_dpll_params(refclk, &clock);
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300905
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100906 if (!intel_PLL_is_valid(to_i915(dev),
907 limit,
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300908 &clock))
Ville Syrjälä43b0ac52013-09-24 21:26:18 +0300909 continue;
910
Imre Deakd5dd62b2015-03-17 11:40:03 +0200911 if (!vlv_PLL_is_optimal(dev, target,
912 &clock,
913 best_clock,
914 bestppm, &ppm))
915 continue;
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300916
Imre Deakd5dd62b2015-03-17 11:40:03 +0200917 *best_clock = clock;
918 bestppm = ppm;
919 found = true;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700920 }
921 }
922 }
923 }
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700924
Ville Syrjälä49e497e2013-09-24 21:26:31 +0300925 return found;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700926}
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700927
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200928/*
929 * Returns a set of divisors for the desired target clock with the given
930 * refclk, or FALSE. The returned values represent the clock equation:
931 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
932 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300933static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300934chv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200935 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300936 int target, int refclk, struct dpll *match_clock,
937 struct dpll *best_clock)
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300938{
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200939 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveiraa919ff12014-10-20 13:46:43 +0300940 struct drm_device *dev = crtc->base.dev;
Imre Deak9ca3ba02015-03-17 11:40:05 +0200941 unsigned int best_error_ppm;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300942 struct dpll clock;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300943 uint64_t m2;
944 int found = false;
945
946 memset(best_clock, 0, sizeof(*best_clock));
Imre Deak9ca3ba02015-03-17 11:40:05 +0200947 best_error_ppm = 1000000;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300948
949 /*
950 * Based on hardware doc, the n always set to 1, and m1 always
951 * set to 2. If requires to support 200Mhz refclk, we need to
952 * revisit this because n may not 1 anymore.
953 */
954 clock.n = 1, clock.m1 = 2;
955 target *= 5; /* fast clock */
956
957 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
958 for (clock.p2 = limit->p2.p2_fast;
959 clock.p2 >= limit->p2.p2_slow;
960 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
Imre Deak9ca3ba02015-03-17 11:40:05 +0200961 unsigned int error_ppm;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300962
963 clock.p = clock.p1 * clock.p2;
964
965 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
966 clock.n) << 22, refclk * clock.m1);
967
968 if (m2 > INT_MAX/clock.m1)
969 continue;
970
971 clock.m2 = m2;
972
Imre Deakdccbea32015-06-22 23:35:51 +0300973 chv_calc_dpll_params(refclk, &clock);
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300974
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100975 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300976 continue;
977
Imre Deak9ca3ba02015-03-17 11:40:05 +0200978 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
979 best_error_ppm, &error_ppm))
980 continue;
981
982 *best_clock = clock;
983 best_error_ppm = error_ppm;
984 found = true;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300985 }
986 }
987
988 return found;
989}
990
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200991bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300992 struct dpll *best_clock)
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200993{
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200994 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300995 const struct intel_limit *limit = &intel_limits_bxt;
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200996
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200997 return chv_find_best_dpll(limit, crtc_state,
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200998 target_clock, refclk, NULL, best_clock);
999}
1000
Ville Syrjälä525b9312016-10-31 22:37:02 +02001001bool intel_crtc_active(struct intel_crtc *crtc)
Ville Syrjälä20ddf662013-09-04 18:25:25 +03001002{
Ville Syrjälä20ddf662013-09-04 18:25:25 +03001003 /* Be paranoid as we can arrive here with only partial
1004 * state retrieved from the hardware during setup.
1005 *
Damien Lespiau241bfc32013-09-25 16:45:37 +01001006 * We can ditch the adjusted_mode.crtc_clock check as soon
Ville Syrjälä20ddf662013-09-04 18:25:25 +03001007 * as Haswell has gained clock readout/fastboot support.
1008 *
Dave Airlie66e514c2014-04-03 07:51:54 +10001009 * We can ditch the crtc->primary->fb check as soon as we can
Ville Syrjälä20ddf662013-09-04 18:25:25 +03001010 * properly reconstruct framebuffers.
Matt Roperc3d1f432015-03-09 10:19:23 -07001011 *
1012 * FIXME: The intel_crtc->active here should be switched to
1013 * crtc->state->active once we have proper CRTC states wired up
1014 * for atomic.
Ville Syrjälä20ddf662013-09-04 18:25:25 +03001015 */
Ville Syrjälä525b9312016-10-31 22:37:02 +02001016 return crtc->active && crtc->base.primary->state->fb &&
1017 crtc->config->base.adjusted_mode.crtc_clock;
Ville Syrjälä20ddf662013-09-04 18:25:25 +03001018}
1019
Paulo Zanonia5c961d2012-10-24 15:59:34 -02001020enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1021 enum pipe pipe)
1022{
Ville Syrjälä98187832016-10-31 22:37:10 +02001023 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Paulo Zanonia5c961d2012-10-24 15:59:34 -02001024
Ville Syrjäläe2af48c2016-10-31 22:37:05 +02001025 return crtc->config->cpu_transcoder;
Paulo Zanonia5c961d2012-10-24 15:59:34 -02001026}
1027
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001028static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1029 enum pipe pipe)
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001030{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001031 i915_reg_t reg = PIPEDSL(pipe);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001032 u32 line1, line2;
1033 u32 line_mask;
1034
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001035 if (IS_GEN2(dev_priv))
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001036 line_mask = DSL_LINEMASK_GEN2;
1037 else
1038 line_mask = DSL_LINEMASK_GEN3;
1039
1040 line1 = I915_READ(reg) & line_mask;
Daniel Vetter6adfb1e2015-07-07 09:10:40 +02001041 msleep(5);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001042 line2 = I915_READ(reg) & line_mask;
1043
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001044 return line1 != line2;
1045}
1046
1047static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1048{
1049 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1050 enum pipe pipe = crtc->pipe;
1051
1052 /* Wait for the display line to settle/start moving */
1053 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1054 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1055 pipe_name(pipe), onoff(state));
1056}
1057
1058static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1059{
1060 wait_for_pipe_scanline_moving(crtc, false);
1061}
1062
1063static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1064{
1065 wait_for_pipe_scanline_moving(crtc, true);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001066}
1067
Ville Syrjälä4972f702017-11-29 17:37:32 +02001068static void
1069intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001070{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001071 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001072 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001073
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001074 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001075 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001076 i915_reg_t reg = PIPECONF(cpu_transcoder);
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001077
Keith Packardab7ad7f2010-10-03 00:33:06 -07001078 /* Wait for the Pipe State to go off */
Chris Wilsonb8511f52016-06-30 15:32:53 +01001079 if (intel_wait_for_register(dev_priv,
1080 reg, I965_PIPECONF_ACTIVE, 0,
1081 100))
Daniel Vetter284637d2012-07-09 09:51:57 +02001082 WARN(1, "pipe_off wait timed out\n");
Keith Packardab7ad7f2010-10-03 00:33:06 -07001083 } else {
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001084 intel_wait_for_pipe_scanline_stopped(crtc);
Keith Packardab7ad7f2010-10-03 00:33:06 -07001085 }
Jesse Barnes79e53942008-11-07 14:24:08 -08001086}
1087
Jesse Barnesb24e7172011-01-04 15:09:30 -08001088/* Only for pre-ILK configs */
Daniel Vetter55607e82013-06-16 21:42:39 +02001089void assert_pll(struct drm_i915_private *dev_priv,
1090 enum pipe pipe, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001091{
Jesse Barnesb24e7172011-01-04 15:09:30 -08001092 u32 val;
1093 bool cur_state;
1094
Ville Syrjälä649636e2015-09-22 19:50:01 +03001095 val = I915_READ(DPLL(pipe));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001096 cur_state = !!(val & DPLL_VCO_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001097 I915_STATE_WARN(cur_state != state,
Jesse Barnesb24e7172011-01-04 15:09:30 -08001098 "PLL state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001099 onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001100}
Jesse Barnesb24e7172011-01-04 15:09:30 -08001101
Jani Nikula23538ef2013-08-27 15:12:22 +03001102/* XXX: the dsi pll is shared between MIPI DSI ports */
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +00001103void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
Jani Nikula23538ef2013-08-27 15:12:22 +03001104{
1105 u32 val;
1106 bool cur_state;
1107
Ville Syrjäläa5805162015-05-26 20:42:30 +03001108 mutex_lock(&dev_priv->sb_lock);
Jani Nikula23538ef2013-08-27 15:12:22 +03001109 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
Ville Syrjäläa5805162015-05-26 20:42:30 +03001110 mutex_unlock(&dev_priv->sb_lock);
Jani Nikula23538ef2013-08-27 15:12:22 +03001111
1112 cur_state = val & DSI_PLL_VCO_EN;
Rob Clarke2c719b2014-12-15 13:56:32 -05001113 I915_STATE_WARN(cur_state != state,
Jani Nikula23538ef2013-08-27 15:12:22 +03001114 "DSI PLL state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001115 onoff(state), onoff(cur_state));
Jani Nikula23538ef2013-08-27 15:12:22 +03001116}
Jani Nikula23538ef2013-08-27 15:12:22 +03001117
Jesse Barnes040484a2011-01-03 12:14:26 -08001118static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1119 enum pipe pipe, bool state)
1120{
Jesse Barnes040484a2011-01-03 12:14:26 -08001121 bool cur_state;
Paulo Zanoniad80a812012-10-24 16:06:19 -02001122 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1123 pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001124
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001125 if (HAS_DDI(dev_priv)) {
Paulo Zanoniaffa9352012-11-23 15:30:39 -02001126 /* DDI does not have a specific FDI_TX register */
Ville Syrjälä649636e2015-09-22 19:50:01 +03001127 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
Paulo Zanoniad80a812012-10-24 16:06:19 -02001128 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001129 } else {
Ville Syrjälä649636e2015-09-22 19:50:01 +03001130 u32 val = I915_READ(FDI_TX_CTL(pipe));
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001131 cur_state = !!(val & FDI_TX_ENABLE);
1132 }
Rob Clarke2c719b2014-12-15 13:56:32 -05001133 I915_STATE_WARN(cur_state != state,
Jesse Barnes040484a2011-01-03 12:14:26 -08001134 "FDI TX state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001135 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001136}
1137#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1138#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1139
1140static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1141 enum pipe pipe, bool state)
1142{
Jesse Barnes040484a2011-01-03 12:14:26 -08001143 u32 val;
1144 bool cur_state;
1145
Ville Syrjälä649636e2015-09-22 19:50:01 +03001146 val = I915_READ(FDI_RX_CTL(pipe));
Paulo Zanonid63fa0d2012-11-20 13:27:35 -02001147 cur_state = !!(val & FDI_RX_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001148 I915_STATE_WARN(cur_state != state,
Jesse Barnes040484a2011-01-03 12:14:26 -08001149 "FDI RX state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001150 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001151}
1152#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1154
1155static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1156 enum pipe pipe)
1157{
Jesse Barnes040484a2011-01-03 12:14:26 -08001158 u32 val;
1159
1160 /* ILK FDI PLL is always enabled */
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01001161 if (IS_GEN5(dev_priv))
Jesse Barnes040484a2011-01-03 12:14:26 -08001162 return;
1163
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001164 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001165 if (HAS_DDI(dev_priv))
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001166 return;
1167
Ville Syrjälä649636e2015-09-22 19:50:01 +03001168 val = I915_READ(FDI_TX_CTL(pipe));
Rob Clarke2c719b2014-12-15 13:56:32 -05001169 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
Jesse Barnes040484a2011-01-03 12:14:26 -08001170}
1171
Daniel Vetter55607e82013-06-16 21:42:39 +02001172void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1173 enum pipe pipe, bool state)
Jesse Barnes040484a2011-01-03 12:14:26 -08001174{
Jesse Barnes040484a2011-01-03 12:14:26 -08001175 u32 val;
Daniel Vetter55607e82013-06-16 21:42:39 +02001176 bool cur_state;
Jesse Barnes040484a2011-01-03 12:14:26 -08001177
Ville Syrjälä649636e2015-09-22 19:50:01 +03001178 val = I915_READ(FDI_RX_CTL(pipe));
Daniel Vetter55607e82013-06-16 21:42:39 +02001179 cur_state = !!(val & FDI_RX_PLL_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001180 I915_STATE_WARN(cur_state != state,
Daniel Vetter55607e82013-06-16 21:42:39 +02001181 "FDI RX PLL assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001182 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001183}
1184
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001185void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
Jesse Barnesea0760c2011-01-04 15:09:32 -08001186{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001187 i915_reg_t pp_reg;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001188 u32 val;
1189 enum pipe panel_pipe = PIPE_A;
Thomas Jarosch0de3b482011-08-25 15:37:45 +02001190 bool locked = true;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001191
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001192 if (WARN_ON(HAS_DDI(dev_priv)))
Jani Nikulabedd4db2014-08-22 15:04:13 +03001193 return;
1194
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001195 if (HAS_PCH_SPLIT(dev_priv)) {
Jani Nikulabedd4db2014-08-22 15:04:13 +03001196 u32 port_sel;
1197
Imre Deak44cb7342016-08-10 14:07:29 +03001198 pp_reg = PP_CONTROL(0);
1199 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
Jani Nikulabedd4db2014-08-22 15:04:13 +03001200
1201 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1202 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1203 panel_pipe = PIPE_B;
1204 /* XXX: else fix for eDP */
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001205 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Jani Nikulabedd4db2014-08-22 15:04:13 +03001206 /* presumably write lock depends on pipe, not port select */
Imre Deak44cb7342016-08-10 14:07:29 +03001207 pp_reg = PP_CONTROL(pipe);
Jani Nikulabedd4db2014-08-22 15:04:13 +03001208 panel_pipe = pipe;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001209 } else {
Imre Deak44cb7342016-08-10 14:07:29 +03001210 pp_reg = PP_CONTROL(0);
Jani Nikulabedd4db2014-08-22 15:04:13 +03001211 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1212 panel_pipe = PIPE_B;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001213 }
1214
1215 val = I915_READ(pp_reg);
1216 if (!(val & PANEL_POWER_ON) ||
Jani Nikulaec49ba22014-08-21 15:06:25 +03001217 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
Jesse Barnesea0760c2011-01-04 15:09:32 -08001218 locked = false;
1219
Rob Clarke2c719b2014-12-15 13:56:32 -05001220 I915_STATE_WARN(panel_pipe == pipe && locked,
Jesse Barnesea0760c2011-01-04 15:09:32 -08001221 "panel assertion failure, pipe %c regs locked\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001222 pipe_name(pipe));
Jesse Barnesea0760c2011-01-04 15:09:32 -08001223}
1224
Jesse Barnesb840d907f2011-12-13 13:19:38 -08001225void assert_pipe(struct drm_i915_private *dev_priv,
1226 enum pipe pipe, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001227{
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001228 bool cur_state;
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001229 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1230 pipe);
Imre Deak4feed0e2016-02-12 18:55:14 +02001231 enum intel_display_power_domain power_domain;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001232
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001233 /* we keep both pipes enabled on 830 */
1234 if (IS_I830(dev_priv))
Daniel Vetter8e636782012-01-22 01:36:48 +01001235 state = true;
1236
Imre Deak4feed0e2016-02-12 18:55:14 +02001237 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1238 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
Ville Syrjälä649636e2015-09-22 19:50:01 +03001239 u32 val = I915_READ(PIPECONF(cpu_transcoder));
Paulo Zanoni69310162013-01-29 16:35:19 -02001240 cur_state = !!(val & PIPECONF_ENABLE);
Imre Deak4feed0e2016-02-12 18:55:14 +02001241
1242 intel_display_power_put(dev_priv, power_domain);
1243 } else {
1244 cur_state = false;
Paulo Zanoni69310162013-01-29 16:35:19 -02001245 }
1246
Rob Clarke2c719b2014-12-15 13:56:32 -05001247 I915_STATE_WARN(cur_state != state,
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001248 "pipe %c assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001249 pipe_name(pipe), onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001250}
1251
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001252static void assert_plane(struct intel_plane *plane, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001253{
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001254 bool cur_state = plane->get_hw_state(plane);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001255
Rob Clarke2c719b2014-12-15 13:56:32 -05001256 I915_STATE_WARN(cur_state != state,
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001257 "%s assertion failure (expected %s, current %s)\n",
1258 plane->base.name, onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001259}
1260
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001261#define assert_plane_enabled(p) assert_plane(p, true)
1262#define assert_plane_disabled(p) assert_plane(p, false)
Chris Wilson931872f2012-01-16 23:01:13 +00001263
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001264static void assert_planes_disabled(struct intel_crtc *crtc)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001265{
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001266 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1267 struct intel_plane *plane;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001268
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001269 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1270 assert_plane_disabled(plane);
Jesse Barnes19332d72013-03-28 09:55:38 -07001271}
1272
Ville Syrjälä08c71e52014-08-06 14:49:45 +03001273static void assert_vblank_disabled(struct drm_crtc *crtc)
1274{
Rob Clarke2c719b2014-12-15 13:56:32 -05001275 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
Ville Syrjälä08c71e52014-08-06 14:49:45 +03001276 drm_crtc_vblank_put(crtc);
1277}
1278
Ander Conselvan de Oliveira7abd4b32016-03-08 17:46:15 +02001279void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1280 enum pipe pipe)
Jesse Barnes92f25842011-01-04 15:09:34 -08001281{
Jesse Barnes92f25842011-01-04 15:09:34 -08001282 u32 val;
1283 bool enabled;
1284
Ville Syrjälä649636e2015-09-22 19:50:01 +03001285 val = I915_READ(PCH_TRANSCONF(pipe));
Jesse Barnes92f25842011-01-04 15:09:34 -08001286 enabled = !!(val & TRANS_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001287 I915_STATE_WARN(enabled,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001288 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1289 pipe_name(pipe));
Jesse Barnes92f25842011-01-04 15:09:34 -08001290}
1291
Keith Packard4e634382011-08-06 10:39:45 -07001292static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1293 enum pipe pipe, u32 port_sel, u32 val)
Keith Packardf0575e92011-07-25 22:12:43 -07001294{
1295 if ((val & DP_PORT_EN) == 0)
1296 return false;
1297
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001298 if (HAS_PCH_CPT(dev_priv)) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001299 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
Keith Packardf0575e92011-07-25 22:12:43 -07001300 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1301 return false;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001302 } else if (IS_CHERRYVIEW(dev_priv)) {
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001303 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1304 return false;
Keith Packardf0575e92011-07-25 22:12:43 -07001305 } else {
1306 if ((val & DP_PIPE_MASK) != (pipe << 30))
1307 return false;
1308 }
1309 return true;
1310}
1311
Keith Packard1519b992011-08-06 10:35:34 -07001312static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1313 enum pipe pipe, u32 val)
1314{
Paulo Zanonidc0fa712013-02-19 16:21:46 -03001315 if ((val & SDVO_ENABLE) == 0)
Keith Packard1519b992011-08-06 10:35:34 -07001316 return false;
1317
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001318 if (HAS_PCH_CPT(dev_priv)) {
Paulo Zanonidc0fa712013-02-19 16:21:46 -03001319 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
Keith Packard1519b992011-08-06 10:35:34 -07001320 return false;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001321 } else if (IS_CHERRYVIEW(dev_priv)) {
Chon Ming Lee44f37d12014-04-09 13:28:21 +03001322 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1323 return false;
Keith Packard1519b992011-08-06 10:35:34 -07001324 } else {
Paulo Zanonidc0fa712013-02-19 16:21:46 -03001325 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
Keith Packard1519b992011-08-06 10:35:34 -07001326 return false;
1327 }
1328 return true;
1329}
1330
1331static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1332 enum pipe pipe, u32 val)
1333{
1334 if ((val & LVDS_PORT_EN) == 0)
1335 return false;
1336
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001337 if (HAS_PCH_CPT(dev_priv)) {
Keith Packard1519b992011-08-06 10:35:34 -07001338 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1339 return false;
1340 } else {
1341 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1342 return false;
1343 }
1344 return true;
1345}
1346
1347static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1348 enum pipe pipe, u32 val)
1349{
1350 if ((val & ADPA_DAC_ENABLE) == 0)
1351 return false;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001352 if (HAS_PCH_CPT(dev_priv)) {
Keith Packard1519b992011-08-06 10:35:34 -07001353 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1354 return false;
1355 } else {
1356 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1357 return false;
1358 }
1359 return true;
1360}
1361
Jesse Barnes291906f2011-02-02 12:28:03 -08001362static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001363 enum pipe pipe, i915_reg_t reg,
1364 u32 port_sel)
Jesse Barnes291906f2011-02-02 12:28:03 -08001365{
Jesse Barnes47a05ec2011-02-07 13:46:40 -08001366 u32 val = I915_READ(reg);
Rob Clarke2c719b2014-12-15 13:56:32 -05001367 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
Jesse Barnes291906f2011-02-02 12:28:03 -08001368 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001369 i915_mmio_reg_offset(reg), pipe_name(pipe));
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001370
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001371 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
Daniel Vetter75c5da22012-09-10 21:58:29 +02001372 && (val & DP_PIPEB_SELECT),
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001373 "IBX PCH dp port still using transcoder B\n");
Jesse Barnes291906f2011-02-02 12:28:03 -08001374}
1375
1376static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001377 enum pipe pipe, i915_reg_t reg)
Jesse Barnes291906f2011-02-02 12:28:03 -08001378{
Jesse Barnes47a05ec2011-02-07 13:46:40 -08001379 u32 val = I915_READ(reg);
Rob Clarke2c719b2014-12-15 13:56:32 -05001380 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
Adam Jackson23c99e72011-10-07 14:38:43 -04001381 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001382 i915_mmio_reg_offset(reg), pipe_name(pipe));
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001383
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001384 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
Daniel Vetter75c5da22012-09-10 21:58:29 +02001385 && (val & SDVO_PIPE_B_SELECT),
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001386 "IBX PCH hdmi port still using transcoder B\n");
Jesse Barnes291906f2011-02-02 12:28:03 -08001387}
1388
1389static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1390 enum pipe pipe)
1391{
Jesse Barnes291906f2011-02-02 12:28:03 -08001392 u32 val;
Jesse Barnes291906f2011-02-02 12:28:03 -08001393
Keith Packardf0575e92011-07-25 22:12:43 -07001394 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1395 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1396 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
Jesse Barnes291906f2011-02-02 12:28:03 -08001397
Ville Syrjälä649636e2015-09-22 19:50:01 +03001398 val = I915_READ(PCH_ADPA);
Rob Clarke2c719b2014-12-15 13:56:32 -05001399 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
Jesse Barnes291906f2011-02-02 12:28:03 -08001400 "PCH VGA enabled on transcoder %c, should be disabled\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001401 pipe_name(pipe));
Jesse Barnes291906f2011-02-02 12:28:03 -08001402
Ville Syrjälä649636e2015-09-22 19:50:01 +03001403 val = I915_READ(PCH_LVDS);
Rob Clarke2c719b2014-12-15 13:56:32 -05001404 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
Jesse Barnes291906f2011-02-02 12:28:03 -08001405 "PCH LVDS enabled on transcoder %c, should be disabled\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001406 pipe_name(pipe));
Jesse Barnes291906f2011-02-02 12:28:03 -08001407
Paulo Zanonie2debe92013-02-18 19:00:27 -03001408 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1409 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1410 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
Jesse Barnes291906f2011-02-02 12:28:03 -08001411}
1412
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001413static void _vlv_enable_pll(struct intel_crtc *crtc,
1414 const struct intel_crtc_state *pipe_config)
1415{
1416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1417 enum pipe pipe = crtc->pipe;
1418
1419 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1420 POSTING_READ(DPLL(pipe));
1421 udelay(150);
1422
Chris Wilson2c30b432016-06-30 15:32:54 +01001423 if (intel_wait_for_register(dev_priv,
1424 DPLL(pipe),
1425 DPLL_LOCK_VLV,
1426 DPLL_LOCK_VLV,
1427 1))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001428 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1429}
1430
Ville Syrjäläd288f652014-10-28 13:20:22 +02001431static void vlv_enable_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001432 const struct intel_crtc_state *pipe_config)
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001433{
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001434 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001435 enum pipe pipe = crtc->pipe;
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001436
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001437 assert_pipe_disabled(dev_priv, pipe);
Daniel Vetter58c6eaa2013-04-11 16:29:09 +02001438
Daniel Vetter87442f72013-06-06 00:52:17 +02001439 /* PLL is protected by panel, make sure we can write it */
Ville Syrjälä7d1a83c2016-03-15 16:39:58 +02001440 assert_panel_unlocked(dev_priv, pipe);
Daniel Vetter87442f72013-06-06 00:52:17 +02001441
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001442 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1443 _vlv_enable_pll(crtc, pipe_config);
Daniel Vetter426115c2013-07-11 22:13:42 +02001444
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001445 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1446 POSTING_READ(DPLL_MD(pipe));
Daniel Vetter87442f72013-06-06 00:52:17 +02001447}
1448
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001449
1450static void _chv_enable_pll(struct intel_crtc *crtc,
1451 const struct intel_crtc_state *pipe_config)
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001452{
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001453 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001454 enum pipe pipe = crtc->pipe;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001455 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001456 u32 tmp;
1457
Ville Syrjäläa5805162015-05-26 20:42:30 +03001458 mutex_lock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001459
1460 /* Enable back the 10bit clock to display controller */
1461 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1462 tmp |= DPIO_DCLKP_EN;
1463 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1464
Ville Syrjälä54433e92015-05-26 20:42:31 +03001465 mutex_unlock(&dev_priv->sb_lock);
1466
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001467 /*
1468 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1469 */
1470 udelay(1);
1471
1472 /* Enable PLL */
Ville Syrjäläd288f652014-10-28 13:20:22 +02001473 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001474
1475 /* Check PLL is locked */
Chris Wilson6b188262016-06-30 15:32:55 +01001476 if (intel_wait_for_register(dev_priv,
1477 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1478 1))
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001479 DRM_ERROR("PLL %d failed to lock\n", pipe);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001480}
1481
1482static void chv_enable_pll(struct intel_crtc *crtc,
1483 const struct intel_crtc_state *pipe_config)
1484{
1485 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1486 enum pipe pipe = crtc->pipe;
1487
1488 assert_pipe_disabled(dev_priv, pipe);
1489
1490 /* PLL is protected by panel, make sure we can write it */
1491 assert_panel_unlocked(dev_priv, pipe);
1492
1493 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1494 _chv_enable_pll(crtc, pipe_config);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001495
Ville Syrjäläc2317752016-03-15 16:39:56 +02001496 if (pipe != PIPE_A) {
1497 /*
1498 * WaPixelRepeatModeFixForC0:chv
1499 *
1500 * DPLLCMD is AWOL. Use chicken bits to propagate
1501 * the value from DPLLBMD to either pipe B or C.
1502 */
Ville Syrjälädfa311f2017-09-13 17:08:54 +03001503 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
Ville Syrjäläc2317752016-03-15 16:39:56 +02001504 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1505 I915_WRITE(CBR4_VLV, 0);
1506 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1507
1508 /*
1509 * DPLLB VGA mode also seems to cause problems.
1510 * We should always have it disabled.
1511 */
1512 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1513 } else {
1514 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1515 POSTING_READ(DPLL_MD(pipe));
1516 }
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001517}
1518
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001519static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001520{
1521 struct intel_crtc *crtc;
1522 int count = 0;
1523
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001524 for_each_intel_crtc(&dev_priv->drm, crtc) {
Maarten Lankhorst3538b9d2015-06-01 12:50:10 +02001525 count += crtc->base.state->active &&
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03001526 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1527 }
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001528
1529 return count;
1530}
1531
Ville Syrjälä939994d2017-09-13 17:08:56 +03001532static void i9xx_enable_pll(struct intel_crtc *crtc,
1533 const struct intel_crtc_state *crtc_state)
Daniel Vetter87442f72013-06-06 00:52:17 +02001534{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001535 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001536 i915_reg_t reg = DPLL(crtc->pipe);
Ville Syrjälä939994d2017-09-13 17:08:56 +03001537 u32 dpll = crtc_state->dpll_hw_state.dpll;
Ville Syrjäläbb408dd2017-06-01 17:36:15 +03001538 int i;
Daniel Vetter87442f72013-06-06 00:52:17 +02001539
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001540 assert_pipe_disabled(dev_priv, crtc->pipe);
Daniel Vetter87442f72013-06-06 00:52:17 +02001541
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001542 /* PLL is protected by panel, make sure we can write it */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001543 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001544 assert_panel_unlocked(dev_priv, crtc->pipe);
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001545
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001546 /* Enable DVO 2x clock on both PLLs if necessary */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001547 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001548 /*
1549 * It appears to be important that we don't enable this
1550 * for the current pipe before otherwise configuring the
1551 * PLL. No idea how this should be handled if multiple
1552 * DVO outputs are enabled simultaneosly.
1553 */
1554 dpll |= DPLL_DVO_2X_MODE;
1555 I915_WRITE(DPLL(!crtc->pipe),
1556 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1557 }
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001558
Ville Syrjäläc2b63372015-10-07 22:08:25 +03001559 /*
1560 * Apparently we need to have VGA mode enabled prior to changing
1561 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1562 * dividers, even though the register value does change.
1563 */
1564 I915_WRITE(reg, 0);
1565
Ville Syrjälä8e7a65a2015-10-07 22:08:24 +03001566 I915_WRITE(reg, dpll);
1567
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001568 /* Wait for the clocks to stabilize. */
1569 POSTING_READ(reg);
1570 udelay(150);
1571
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001572 if (INTEL_GEN(dev_priv) >= 4) {
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001573 I915_WRITE(DPLL_MD(crtc->pipe),
Ville Syrjälä939994d2017-09-13 17:08:56 +03001574 crtc_state->dpll_hw_state.dpll_md);
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001575 } else {
1576 /* The pixel multiplier can only be updated once the
1577 * DPLL is enabled and the clocks are stable.
1578 *
1579 * So write it again.
1580 */
1581 I915_WRITE(reg, dpll);
1582 }
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001583
1584 /* We do this three times for luck */
Ville Syrjäläbb408dd2017-06-01 17:36:15 +03001585 for (i = 0; i < 3; i++) {
1586 I915_WRITE(reg, dpll);
1587 POSTING_READ(reg);
1588 udelay(150); /* wait for warmup */
1589 }
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001590}
1591
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001592static void i9xx_disable_pll(struct intel_crtc *crtc)
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001593{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001594 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001595 enum pipe pipe = crtc->pipe;
1596
1597 /* Disable DVO 2x clock on both PLLs if necessary */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001598 if (IS_I830(dev_priv) &&
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03001599 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001600 !intel_num_dvo_pipes(dev_priv)) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001601 I915_WRITE(DPLL(PIPE_B),
1602 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1603 I915_WRITE(DPLL(PIPE_A),
1604 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1605 }
1606
Ville Syrjäläb6b5d042014-08-15 01:22:07 +03001607 /* Don't disable pipe or pipe PLLs if needed */
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001608 if (IS_I830(dev_priv))
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001609 return;
1610
1611 /* Make sure the pipe isn't still relying on us */
1612 assert_pipe_disabled(dev_priv, pipe);
1613
Ville Syrjäläb8afb912015-06-29 15:25:48 +03001614 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
Daniel Vetter50b44a42013-06-05 13:34:33 +02001615 POSTING_READ(DPLL(pipe));
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001616}
1617
Jesse Barnesf6071162013-10-01 10:41:38 -07001618static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1619{
Ville Syrjäläb8afb912015-06-29 15:25:48 +03001620 u32 val;
Jesse Barnesf6071162013-10-01 10:41:38 -07001621
1622 /* Make sure the pipe isn't still relying on us */
1623 assert_pipe_disabled(dev_priv, pipe);
1624
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02001625 val = DPLL_INTEGRATED_REF_CLK_VLV |
1626 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1627 if (pipe != PIPE_A)
1628 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1629
Jesse Barnesf6071162013-10-01 10:41:38 -07001630 I915_WRITE(DPLL(pipe), val);
1631 POSTING_READ(DPLL(pipe));
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001632}
1633
1634static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1635{
Ville Syrjäläd7520482014-04-09 13:28:59 +03001636 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001637 u32 val;
1638
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001639 /* Make sure the pipe isn't still relying on us */
1640 assert_pipe_disabled(dev_priv, pipe);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001641
Ville Syrjälä60bfe442015-06-29 15:25:49 +03001642 val = DPLL_SSC_REF_CLK_CHV |
1643 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001644 if (pipe != PIPE_A)
1645 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02001646
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001647 I915_WRITE(DPLL(pipe), val);
1648 POSTING_READ(DPLL(pipe));
Ville Syrjäläd7520482014-04-09 13:28:59 +03001649
Ville Syrjäläa5805162015-05-26 20:42:30 +03001650 mutex_lock(&dev_priv->sb_lock);
Ville Syrjäläd7520482014-04-09 13:28:59 +03001651
1652 /* Disable 10bit clock to display controller */
1653 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1654 val &= ~DPIO_DCLKP_EN;
1655 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1656
Ville Syrjäläa5805162015-05-26 20:42:30 +03001657 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesf6071162013-10-01 10:41:38 -07001658}
1659
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001660void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001661 struct intel_digital_port *dport,
1662 unsigned int expected_mask)
Jesse Barnes89b667f2013-04-18 14:51:36 -07001663{
1664 u32 port_mask;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001665 i915_reg_t dpll_reg;
Jesse Barnes89b667f2013-04-18 14:51:36 -07001666
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02001667 switch (dport->base.port) {
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001668 case PORT_B:
Jesse Barnes89b667f2013-04-18 14:51:36 -07001669 port_mask = DPLL_PORTB_READY_MASK;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001670 dpll_reg = DPLL(0);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001671 break;
1672 case PORT_C:
Jesse Barnes89b667f2013-04-18 14:51:36 -07001673 port_mask = DPLL_PORTC_READY_MASK;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001674 dpll_reg = DPLL(0);
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001675 expected_mask <<= 4;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001676 break;
1677 case PORT_D:
1678 port_mask = DPLL_PORTD_READY_MASK;
1679 dpll_reg = DPIO_PHY_STATUS;
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001680 break;
1681 default:
1682 BUG();
1683 }
Jesse Barnes89b667f2013-04-18 14:51:36 -07001684
Chris Wilson370004d2016-06-30 15:32:56 +01001685 if (intel_wait_for_register(dev_priv,
1686 dpll_reg, port_mask, expected_mask,
1687 1000))
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001688 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02001689 port_name(dport->base.port),
1690 I915_READ(dpll_reg) & port_mask, expected_mask);
Jesse Barnes89b667f2013-04-18 14:51:36 -07001691}
1692
Paulo Zanonib8a4f402012-10-31 18:12:42 -02001693static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1694 enum pipe pipe)
Jesse Barnes040484a2011-01-03 12:14:26 -08001695{
Ville Syrjälä98187832016-10-31 22:37:10 +02001696 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1697 pipe);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001698 i915_reg_t reg;
1699 uint32_t val, pipeconf_val;
Jesse Barnes040484a2011-01-03 12:14:26 -08001700
Jesse Barnes040484a2011-01-03 12:14:26 -08001701 /* Make sure PCH DPLL is enabled */
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02001702 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
Jesse Barnes040484a2011-01-03 12:14:26 -08001703
1704 /* FDI must be feeding us bits for PCH ports */
1705 assert_fdi_tx_enabled(dev_priv, pipe);
1706 assert_fdi_rx_enabled(dev_priv, pipe);
1707
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01001708 if (HAS_PCH_CPT(dev_priv)) {
Daniel Vetter23670b322012-11-01 09:15:30 +01001709 /* Workaround: Set the timing override bit before enabling the
1710 * pch transcoder. */
1711 reg = TRANS_CHICKEN2(pipe);
1712 val = I915_READ(reg);
1713 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1714 I915_WRITE(reg, val);
Eugeni Dodonov59c859d2012-05-09 15:37:19 -03001715 }
Daniel Vetter23670b322012-11-01 09:15:30 +01001716
Daniel Vetterab9412b2013-05-03 11:49:46 +02001717 reg = PCH_TRANSCONF(pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001718 val = I915_READ(reg);
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001719 pipeconf_val = I915_READ(PIPECONF(pipe));
Jesse Barnese9bcff52011-06-24 12:19:20 -07001720
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001721 if (HAS_PCH_IBX(dev_priv)) {
Jesse Barnese9bcff52011-06-24 12:19:20 -07001722 /*
Ville Syrjäläc5de7c62015-05-05 17:06:22 +03001723 * Make the BPC in transcoder be consistent with
1724 * that in pipeconf reg. For HDMI we must use 8bpc
1725 * here for both 8bpc and 12bpc.
Jesse Barnese9bcff52011-06-24 12:19:20 -07001726 */
Daniel Vetterdfd07d72012-12-17 11:21:38 +01001727 val &= ~PIPECONF_BPC_MASK;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03001728 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
Ville Syrjäläc5de7c62015-05-05 17:06:22 +03001729 val |= PIPECONF_8BPC;
1730 else
1731 val |= pipeconf_val & PIPECONF_BPC_MASK;
Jesse Barnese9bcff52011-06-24 12:19:20 -07001732 }
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001733
1734 val &= ~TRANS_INTERLACE_MASK;
1735 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001736 if (HAS_PCH_IBX(dev_priv) &&
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03001737 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
Paulo Zanoni7c26e5c2012-02-14 17:07:09 -02001738 val |= TRANS_LEGACY_INTERLACED_ILK;
1739 else
1740 val |= TRANS_INTERLACED;
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001741 else
1742 val |= TRANS_PROGRESSIVE;
1743
Jesse Barnes040484a2011-01-03 12:14:26 -08001744 I915_WRITE(reg, val | TRANS_ENABLE);
Chris Wilson650fbd82016-06-30 15:32:57 +01001745 if (intel_wait_for_register(dev_priv,
1746 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1747 100))
Ville Syrjälä4bb6f1f2013-04-17 17:48:50 +03001748 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
Jesse Barnes040484a2011-01-03 12:14:26 -08001749}
1750
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001751static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
Paulo Zanoni937bb612012-10-31 18:12:47 -02001752 enum transcoder cpu_transcoder)
Jesse Barnes040484a2011-01-03 12:14:26 -08001753{
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001754 u32 val, pipeconf_val;
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001755
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001756 /* FDI must be feeding us bits for PCH ports */
Daniel Vetter1a240d42012-11-29 22:18:51 +01001757 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001758 assert_fdi_rx_enabled(dev_priv, PIPE_A);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001759
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001760 /* Workaround: set timing override bit. */
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001761 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
Daniel Vetter23670b322012-11-01 09:15:30 +01001762 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001763 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001764
Paulo Zanoni25f3ef12012-10-31 18:12:49 -02001765 val = TRANS_ENABLE;
Paulo Zanoni937bb612012-10-31 18:12:47 -02001766 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001767
Paulo Zanoni9a76b1c2012-10-31 18:12:48 -02001768 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1769 PIPECONF_INTERLACED_ILK)
Paulo Zanonia35f2672012-10-31 18:12:45 -02001770 val |= TRANS_INTERLACED;
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001771 else
1772 val |= TRANS_PROGRESSIVE;
1773
Daniel Vetterab9412b2013-05-03 11:49:46 +02001774 I915_WRITE(LPT_TRANSCONF, val);
Chris Wilsond9f96242016-06-30 15:32:58 +01001775 if (intel_wait_for_register(dev_priv,
1776 LPT_TRANSCONF,
1777 TRANS_STATE_ENABLE,
1778 TRANS_STATE_ENABLE,
1779 100))
Paulo Zanoni937bb612012-10-31 18:12:47 -02001780 DRM_ERROR("Failed to enable PCH transcoder\n");
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001781}
1782
Paulo Zanonib8a4f402012-10-31 18:12:42 -02001783static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1784 enum pipe pipe)
Jesse Barnes040484a2011-01-03 12:14:26 -08001785{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001786 i915_reg_t reg;
1787 uint32_t val;
Jesse Barnes040484a2011-01-03 12:14:26 -08001788
1789 /* FDI relies on the transcoder */
1790 assert_fdi_tx_disabled(dev_priv, pipe);
1791 assert_fdi_rx_disabled(dev_priv, pipe);
1792
Jesse Barnes291906f2011-02-02 12:28:03 -08001793 /* Ports must be off as well */
1794 assert_pch_ports_disabled(dev_priv, pipe);
1795
Daniel Vetterab9412b2013-05-03 11:49:46 +02001796 reg = PCH_TRANSCONF(pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001797 val = I915_READ(reg);
1798 val &= ~TRANS_ENABLE;
1799 I915_WRITE(reg, val);
1800 /* wait for PCH transcoder off, transcoder state */
Chris Wilsona7d04662016-06-30 15:32:59 +01001801 if (intel_wait_for_register(dev_priv,
1802 reg, TRANS_STATE_ENABLE, 0,
1803 50))
Ville Syrjälä4bb6f1f2013-04-17 17:48:50 +03001804 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
Daniel Vetter23670b322012-11-01 09:15:30 +01001805
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01001806 if (HAS_PCH_CPT(dev_priv)) {
Daniel Vetter23670b322012-11-01 09:15:30 +01001807 /* Workaround: Clear the timing override chicken bit again. */
1808 reg = TRANS_CHICKEN2(pipe);
1809 val = I915_READ(reg);
1810 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1811 I915_WRITE(reg, val);
1812 }
Jesse Barnes040484a2011-01-03 12:14:26 -08001813}
1814
Maarten Lankhorstb7076542016-08-23 16:18:08 +02001815void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001816{
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001817 u32 val;
1818
Daniel Vetterab9412b2013-05-03 11:49:46 +02001819 val = I915_READ(LPT_TRANSCONF);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001820 val &= ~TRANS_ENABLE;
Daniel Vetterab9412b2013-05-03 11:49:46 +02001821 I915_WRITE(LPT_TRANSCONF, val);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001822 /* wait for PCH transcoder off, transcoder state */
Chris Wilsondfdb4742016-06-30 15:33:00 +01001823 if (intel_wait_for_register(dev_priv,
1824 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1825 50))
Paulo Zanoni8a52fd92012-10-31 18:12:51 -02001826 DRM_ERROR("Failed to disable PCH transcoder\n");
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001827
1828 /* Workaround: clear timing override bit. */
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001829 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
Daniel Vetter23670b322012-11-01 09:15:30 +01001830 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001831 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
Jesse Barnes92f25842011-01-04 15:09:34 -08001832}
1833
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001834enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
Ville Syrjälä65f21302016-10-14 20:02:53 +03001835{
1836 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1837
Ville Syrjälä65f21302016-10-14 20:02:53 +03001838 if (HAS_PCH_LPT(dev_priv))
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001839 return PIPE_A;
Ville Syrjälä65f21302016-10-14 20:02:53 +03001840 else
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001841 return crtc->pipe;
Ville Syrjälä65f21302016-10-14 20:02:53 +03001842}
1843
Ville Syrjälä4972f702017-11-29 17:37:32 +02001844static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001845{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001846 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1847 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1848 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
Paulo Zanoni03722642014-01-17 13:51:09 -02001849 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001850 i915_reg_t reg;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001851 u32 val;
1852
Ville Syrjälä9e2ee2d2015-06-24 21:59:35 +03001853 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1854
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001855 assert_planes_disabled(crtc);
Daniel Vetter58c6eaa2013-04-11 16:29:09 +02001856
Jesse Barnesb24e7172011-01-04 15:09:30 -08001857 /*
1858 * A pipe without a PLL won't actually be able to drive bits from
1859 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1860 * need the check.
1861 */
Ville Syrjälä09fa8bb2016-08-05 20:41:34 +03001862 if (HAS_GMCH_DISPLAY(dev_priv)) {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001863 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
Jani Nikula23538ef2013-08-27 15:12:22 +03001864 assert_dsi_pll_enabled(dev_priv);
1865 else
1866 assert_pll_enabled(dev_priv, pipe);
Ville Syrjälä09fa8bb2016-08-05 20:41:34 +03001867 } else {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001868 if (new_crtc_state->has_pch_encoder) {
Jesse Barnes040484a2011-01-03 12:14:26 -08001869 /* if driving the PCH, we need FDI enabled */
Ville Syrjälä65f21302016-10-14 20:02:53 +03001870 assert_fdi_rx_pll_enabled(dev_priv,
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001871 intel_crtc_pch_transcoder(crtc));
Daniel Vetter1a240d42012-11-29 22:18:51 +01001872 assert_fdi_tx_pll_enabled(dev_priv,
1873 (enum pipe) cpu_transcoder);
Jesse Barnes040484a2011-01-03 12:14:26 -08001874 }
1875 /* FIXME: assert CPU port conditions for SNB+ */
1876 }
Jesse Barnesb24e7172011-01-04 15:09:30 -08001877
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001878 reg = PIPECONF(cpu_transcoder);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001879 val = I915_READ(reg);
Paulo Zanoni7ad25d42014-01-17 13:51:13 -02001880 if (val & PIPECONF_ENABLE) {
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001881 /* we keep both pipes enabled on 830 */
1882 WARN_ON(!IS_I830(dev_priv));
Chris Wilson00d70b12011-03-17 07:18:29 +00001883 return;
Paulo Zanoni7ad25d42014-01-17 13:51:13 -02001884 }
Chris Wilson00d70b12011-03-17 07:18:29 +00001885
1886 I915_WRITE(reg, val | PIPECONF_ENABLE);
Paulo Zanoni851855d2013-12-19 19:12:29 -02001887 POSTING_READ(reg);
Ville Syrjäläb7792d82015-12-14 18:23:43 +02001888
1889 /*
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001890 * Until the pipe starts PIPEDSL reads will return a stale value,
1891 * which causes an apparent vblank timestamp jump when PIPEDSL
1892 * resets to its proper value. That also messes up the frame count
1893 * when it's derived from the timestamps. So let's wait for the
1894 * pipe to start properly before we call drm_crtc_vblank_on()
Ville Syrjäläb7792d82015-12-14 18:23:43 +02001895 */
Ville Syrjälä4972f702017-11-29 17:37:32 +02001896 if (dev_priv->drm.max_vblank_count == 0)
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001897 intel_wait_for_pipe_scanline_moving(crtc);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001898}
1899
Ville Syrjälä4972f702017-11-29 17:37:32 +02001900static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001901{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001902 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Chris Wilsonfac5e232016-07-04 11:34:36 +01001903 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä4972f702017-11-29 17:37:32 +02001904 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Ville Syrjälä575f7ab2014-08-15 01:21:56 +03001905 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001906 i915_reg_t reg;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001907 u32 val;
1908
Ville Syrjälä9e2ee2d2015-06-24 21:59:35 +03001909 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1910
Jesse Barnesb24e7172011-01-04 15:09:30 -08001911 /*
1912 * Make sure planes won't keep trying to pump pixels to us,
1913 * or we might hang the display.
1914 */
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001915 assert_planes_disabled(crtc);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001916
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001917 reg = PIPECONF(cpu_transcoder);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001918 val = I915_READ(reg);
Chris Wilson00d70b12011-03-17 07:18:29 +00001919 if ((val & PIPECONF_ENABLE) == 0)
1920 return;
1921
Ville Syrjälä67adc642014-08-15 01:21:57 +03001922 /*
1923 * Double wide has implications for planes
1924 * so best keep it disabled when not needed.
1925 */
Ville Syrjälä4972f702017-11-29 17:37:32 +02001926 if (old_crtc_state->double_wide)
Ville Syrjälä67adc642014-08-15 01:21:57 +03001927 val &= ~PIPECONF_DOUBLE_WIDE;
1928
1929 /* Don't disable pipe or pipe PLLs if needed */
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001930 if (!IS_I830(dev_priv))
Ville Syrjälä67adc642014-08-15 01:21:57 +03001931 val &= ~PIPECONF_ENABLE;
1932
1933 I915_WRITE(reg, val);
1934 if ((val & PIPECONF_ENABLE) == 0)
Ville Syrjälä4972f702017-11-29 17:37:32 +02001935 intel_wait_for_pipe_off(old_crtc_state);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001936}
1937
Ville Syrjälä832be822016-01-12 21:08:33 +02001938static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1939{
1940 return IS_GEN2(dev_priv) ? 2048 : 4096;
1941}
1942
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001943static unsigned int
1944intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001945{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001946 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1947 unsigned int cpp = fb->format->cpp[plane];
1948
1949 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07001950 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001951 return cpp;
1952 case I915_FORMAT_MOD_X_TILED:
1953 if (IS_GEN2(dev_priv))
1954 return 128;
1955 else
1956 return 512;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001957 case I915_FORMAT_MOD_Y_TILED_CCS:
1958 if (plane == 1)
1959 return 128;
1960 /* fall through */
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001961 case I915_FORMAT_MOD_Y_TILED:
1962 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
1963 return 128;
1964 else
1965 return 512;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001966 case I915_FORMAT_MOD_Yf_TILED_CCS:
1967 if (plane == 1)
1968 return 128;
1969 /* fall through */
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001970 case I915_FORMAT_MOD_Yf_TILED:
1971 switch (cpp) {
1972 case 1:
1973 return 64;
1974 case 2:
1975 case 4:
1976 return 128;
1977 case 8:
1978 case 16:
1979 return 256;
1980 default:
1981 MISSING_CASE(cpp);
1982 return cpp;
1983 }
1984 break;
1985 default:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001986 MISSING_CASE(fb->modifier);
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001987 return cpp;
1988 }
1989}
1990
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001991static unsigned int
1992intel_tile_height(const struct drm_framebuffer *fb, int plane)
Jesse Barnesa57ce0b2014-02-07 12:10:35 -08001993{
Ben Widawsky2f075562017-03-24 14:29:48 -07001994 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
Ville Syrjälä832be822016-01-12 21:08:33 +02001995 return 1;
1996 else
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001997 return intel_tile_size(to_i915(fb->dev)) /
1998 intel_tile_width_bytes(fb, plane);
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001999}
2000
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002001/* Return the tile dimensions in pixel units */
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002002static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002003 unsigned int *tile_width,
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002004 unsigned int *tile_height)
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002005{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002006 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
2007 unsigned int cpp = fb->format->cpp[plane];
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002008
2009 *tile_width = tile_width_bytes / cpp;
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002010 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002011}
2012
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00002013unsigned int
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002014intel_fb_align_height(const struct drm_framebuffer *fb,
2015 int plane, unsigned int height)
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00002016{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002017 unsigned int tile_height = intel_tile_height(fb, plane);
Ville Syrjälä832be822016-01-12 21:08:33 +02002018
2019 return ALIGN(height, tile_height);
Jesse Barnesa57ce0b2014-02-07 12:10:35 -08002020}
2021
Ville Syrjälä1663b9d2016-02-15 22:54:45 +02002022unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2023{
2024 unsigned int size = 0;
2025 int i;
2026
2027 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2028 size += rot_info->plane[i].width * rot_info->plane[i].height;
2029
2030 return size;
2031}
2032
Daniel Vetter75c82a52015-10-14 16:51:04 +02002033static void
Ville Syrjälä3465c582016-02-15 22:54:43 +02002034intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2035 const struct drm_framebuffer *fb,
2036 unsigned int rotation)
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00002037{
Chris Wilson7b92c042017-01-14 00:28:26 +00002038 view->type = I915_GGTT_VIEW_NORMAL;
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002039 if (drm_rotation_90_or_270(rotation)) {
Chris Wilson7b92c042017-01-14 00:28:26 +00002040 view->type = I915_GGTT_VIEW_ROTATED;
Chris Wilson8bab11932017-01-14 00:28:25 +00002041 view->rotated = to_intel_framebuffer(fb)->rot_info;
Ville Syrjälä2d7a2152016-02-15 22:54:47 +02002042 }
2043}
2044
Ville Syrjäläfabac482017-03-27 21:55:43 +03002045static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2046{
2047 if (IS_I830(dev_priv))
2048 return 16 * 1024;
2049 else if (IS_I85X(dev_priv))
2050 return 256;
Ville Syrjäläd9e15512017-03-27 21:55:45 +03002051 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2052 return 32;
Ville Syrjäläfabac482017-03-27 21:55:43 +03002053 else
2054 return 4 * 1024;
2055}
2056
Ville Syrjälä603525d2016-01-12 21:08:37 +02002057static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002058{
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00002059 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002060 return 256 * 1024;
Jani Nikulac0f86832016-12-07 12:13:04 +02002061 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
Wayne Boyer666a4532015-12-09 12:29:35 -08002062 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002063 return 128 * 1024;
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00002064 else if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002065 return 4 * 1024;
2066 else
Ville Syrjälä44c59052015-06-11 16:31:16 +03002067 return 0;
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002068}
2069
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002070static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2071 int plane)
Ville Syrjälä603525d2016-01-12 21:08:37 +02002072{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002073 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2074
Ville Syrjäläb90c1ee2017-03-07 21:42:07 +02002075 /* AUX_DIST needs only 4K alignment */
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002076 if (plane == 1)
Ville Syrjäläb90c1ee2017-03-07 21:42:07 +02002077 return 4096;
2078
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002079 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07002080 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjälä603525d2016-01-12 21:08:37 +02002081 return intel_linear_alignment(dev_priv);
2082 case I915_FORMAT_MOD_X_TILED:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002083 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä603525d2016-01-12 21:08:37 +02002084 return 256 * 1024;
2085 return 0;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002086 case I915_FORMAT_MOD_Y_TILED_CCS:
2087 case I915_FORMAT_MOD_Yf_TILED_CCS:
Ville Syrjälä603525d2016-01-12 21:08:37 +02002088 case I915_FORMAT_MOD_Y_TILED:
2089 case I915_FORMAT_MOD_Yf_TILED:
2090 return 1 * 1024 * 1024;
2091 default:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002092 MISSING_CASE(fb->modifier);
Ville Syrjälä603525d2016-01-12 21:08:37 +02002093 return 0;
2094 }
2095}
2096
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002097static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2098{
2099 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2100 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2101
Ville Syrjälä32febd92018-02-21 18:02:33 +02002102 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002103}
2104
Chris Wilson058d88c2016-08-15 10:49:06 +01002105struct i915_vma *
Chris Wilson59354852018-02-20 13:42:06 +00002106intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2107 unsigned int rotation,
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002108 bool uses_fence,
Chris Wilson59354852018-02-20 13:42:06 +00002109 unsigned long *out_flags)
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002110{
Tvrtko Ursulin850c4cd2014-10-30 16:39:38 +00002111 struct drm_device *dev = fb->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002112 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulin850c4cd2014-10-30 16:39:38 +00002113 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00002114 struct i915_ggtt_view view;
Chris Wilson058d88c2016-08-15 10:49:06 +01002115 struct i915_vma *vma;
Chris Wilson59354852018-02-20 13:42:06 +00002116 unsigned int pinctl;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002117 u32 alignment;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002118
Matt Roperebcdd392014-07-09 16:22:11 -07002119 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2120
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002121 alignment = intel_surf_alignment(fb, 0);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002122
Ville Syrjälä3465c582016-02-15 22:54:43 +02002123 intel_fill_fb_ggtt_view(&view, fb, rotation);
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00002124
Chris Wilson693db182013-03-05 14:52:39 +00002125 /* Note that the w/a also requires 64 PTE of padding following the
2126 * bo. We currently fill all unused PTE with the shadow page and so
2127 * we should always have valid PTE following the scanout preventing
2128 * the VT-d warning.
2129 */
Chris Wilson48f112f2016-06-24 14:07:14 +01002130 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
Chris Wilson693db182013-03-05 14:52:39 +00002131 alignment = 256 * 1024;
2132
Paulo Zanonid6dd6842014-08-15 15:59:32 -03002133 /*
2134 * Global gtt pte registers are special registers which actually forward
2135 * writes to a chunk of system memory. Which means that there is no risk
2136 * that the register values disappear as soon as we call
2137 * intel_runtime_pm_put(), so it is correct to wrap only the
2138 * pin/unpin/fence and not more.
2139 */
2140 intel_runtime_pm_get(dev_priv);
2141
Daniel Vetter9db529a2017-08-08 10:08:28 +02002142 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2143
Chris Wilson59354852018-02-20 13:42:06 +00002144 pinctl = 0;
2145
2146 /* Valleyview is definitely limited to scanning out the first
2147 * 512MiB. Lets presume this behaviour was inherited from the
2148 * g4x display engine and that all earlier gen are similarly
2149 * limited. Testing suggests that it is a little more
2150 * complicated than this. For example, Cherryview appears quite
2151 * happy to scanout from anywhere within its global aperture.
2152 */
2153 if (HAS_GMCH_DISPLAY(dev_priv))
2154 pinctl |= PIN_MAPPABLE;
2155
2156 vma = i915_gem_object_pin_to_display_plane(obj,
2157 alignment, &view, pinctl);
Chris Wilson49ef5292016-08-18 17:17:00 +01002158 if (IS_ERR(vma))
2159 goto err;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002160
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002161 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002162 int ret;
2163
Chris Wilson49ef5292016-08-18 17:17:00 +01002164 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2165 * fence, whereas 965+ only requires a fence if using
2166 * framebuffer compression. For simplicity, we always, when
2167 * possible, install a fence as the cost is not that onerous.
2168 *
2169 * If we fail to fence the tiled scanout, then either the
2170 * modeset will reject the change (which is highly unlikely as
2171 * the affected systems, all but one, do not have unmappable
2172 * space) or we will not be able to enable full powersaving
2173 * techniques (also likely not to apply due to various limits
2174 * FBC and the like impose on the size of the buffer, which
2175 * presumably we violated anyway with this unmappable buffer).
2176 * Anyway, it is presumably better to stumble onwards with
2177 * something and try to run the system in a "less than optimal"
2178 * mode that matches the user configuration.
2179 */
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002180 ret = i915_vma_pin_fence(vma);
2181 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
Chris Wilson75097022018-03-05 10:33:12 +00002182 i915_gem_object_unpin_from_display_plane(vma);
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002183 vma = ERR_PTR(ret);
2184 goto err;
2185 }
2186
2187 if (ret == 0 && vma->fence)
Chris Wilson59354852018-02-20 13:42:06 +00002188 *out_flags |= PLANE_HAS_FENCE;
Vivek Kasireddy98072162015-10-29 18:54:38 -07002189 }
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002190
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002191 i915_vma_get(vma);
Chris Wilson49ef5292016-08-18 17:17:00 +01002192err:
Daniel Vetter9db529a2017-08-08 10:08:28 +02002193 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2194
Paulo Zanonid6dd6842014-08-15 15:59:32 -03002195 intel_runtime_pm_put(dev_priv);
Chris Wilson058d88c2016-08-15 10:49:06 +01002196 return vma;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002197}
2198
Chris Wilson59354852018-02-20 13:42:06 +00002199void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
Chris Wilson1690e1e2011-12-14 13:57:08 +01002200{
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002201 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00002202
Chris Wilson59354852018-02-20 13:42:06 +00002203 if (flags & PLANE_HAS_FENCE)
2204 i915_vma_unpin_fence(vma);
Chris Wilson058d88c2016-08-15 10:49:06 +01002205 i915_gem_object_unpin_from_display_plane(vma);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002206 i915_vma_put(vma);
Chris Wilson1690e1e2011-12-14 13:57:08 +01002207}
2208
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002209static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
2210 unsigned int rotation)
2211{
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002212 if (drm_rotation_90_or_270(rotation))
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002213 return to_intel_framebuffer(fb)->rotated[plane].pitch;
2214 else
2215 return fb->pitches[plane];
2216}
2217
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002218/*
Ville Syrjälä6687c902015-09-15 13:16:41 +03002219 * Convert the x/y offsets into a linear offset.
2220 * Only valid with 0/180 degree rotation, which is fine since linear
2221 * offset is only used with linear buffers on pre-hsw and tiled buffers
2222 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2223 */
2224u32 intel_fb_xy_to_linear(int x, int y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002225 const struct intel_plane_state *state,
2226 int plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002227{
Ville Syrjälä29490562016-01-20 18:02:50 +02002228 const struct drm_framebuffer *fb = state->base.fb;
Ville Syrjälä353c8592016-12-14 23:30:57 +02002229 unsigned int cpp = fb->format->cpp[plane];
Ville Syrjälä6687c902015-09-15 13:16:41 +03002230 unsigned int pitch = fb->pitches[plane];
2231
2232 return y * pitch + x * cpp;
2233}
2234
2235/*
2236 * Add the x/y offsets derived from fb->offsets[] to the user
2237 * specified plane src x/y offsets. The resulting x/y offsets
2238 * specify the start of scanout from the beginning of the gtt mapping.
2239 */
2240void intel_add_fb_offsets(int *x, int *y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002241 const struct intel_plane_state *state,
2242 int plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002243
2244{
Ville Syrjälä29490562016-01-20 18:02:50 +02002245 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2246 unsigned int rotation = state->base.rotation;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002247
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002248 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä6687c902015-09-15 13:16:41 +03002249 *x += intel_fb->rotated[plane].x;
2250 *y += intel_fb->rotated[plane].y;
2251 } else {
2252 *x += intel_fb->normal[plane].x;
2253 *y += intel_fb->normal[plane].y;
2254 }
2255}
2256
Ville Syrjälä303ba692017-08-24 22:10:49 +03002257static u32 __intel_adjust_tile_offset(int *x, int *y,
2258 unsigned int tile_width,
2259 unsigned int tile_height,
2260 unsigned int tile_size,
2261 unsigned int pitch_tiles,
2262 u32 old_offset,
2263 u32 new_offset)
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002264{
Ville Syrjäläb9b24032016-02-08 18:28:00 +02002265 unsigned int pitch_pixels = pitch_tiles * tile_width;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002266 unsigned int tiles;
2267
2268 WARN_ON(old_offset & (tile_size - 1));
2269 WARN_ON(new_offset & (tile_size - 1));
2270 WARN_ON(new_offset > old_offset);
2271
2272 tiles = (old_offset - new_offset) / tile_size;
2273
2274 *y += tiles / pitch_tiles * tile_height;
2275 *x += tiles % pitch_tiles * tile_width;
2276
Ville Syrjäläb9b24032016-02-08 18:28:00 +02002277 /* minimize x in case it got needlessly big */
2278 *y += *x / pitch_pixels * tile_height;
2279 *x %= pitch_pixels;
2280
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002281 return new_offset;
2282}
2283
Ville Syrjälä303ba692017-08-24 22:10:49 +03002284static u32 _intel_adjust_tile_offset(int *x, int *y,
2285 const struct drm_framebuffer *fb, int plane,
2286 unsigned int rotation,
2287 u32 old_offset, u32 new_offset)
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002288{
Ville Syrjälä303ba692017-08-24 22:10:49 +03002289 const struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä353c8592016-12-14 23:30:57 +02002290 unsigned int cpp = fb->format->cpp[plane];
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002291 unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
2292
2293 WARN_ON(new_offset > old_offset);
2294
Ben Widawsky2f075562017-03-24 14:29:48 -07002295 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002296 unsigned int tile_size, tile_width, tile_height;
2297 unsigned int pitch_tiles;
2298
2299 tile_size = intel_tile_size(dev_priv);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002300 intel_tile_dims(fb, plane, &tile_width, &tile_height);
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002301
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002302 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002303 pitch_tiles = pitch / tile_height;
2304 swap(tile_width, tile_height);
2305 } else {
2306 pitch_tiles = pitch / (tile_width * cpp);
2307 }
2308
Ville Syrjälä303ba692017-08-24 22:10:49 +03002309 __intel_adjust_tile_offset(x, y, tile_width, tile_height,
2310 tile_size, pitch_tiles,
2311 old_offset, new_offset);
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002312 } else {
2313 old_offset += *y * pitch + *x * cpp;
2314
2315 *y = (old_offset - new_offset) / pitch;
2316 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2317 }
2318
2319 return new_offset;
2320}
2321
2322/*
Ville Syrjälä303ba692017-08-24 22:10:49 +03002323 * Adjust the tile offset by moving the difference into
2324 * the x/y offsets.
2325 */
2326static u32 intel_adjust_tile_offset(int *x, int *y,
2327 const struct intel_plane_state *state, int plane,
2328 u32 old_offset, u32 new_offset)
2329{
2330 return _intel_adjust_tile_offset(x, y, state->base.fb, plane,
2331 state->base.rotation,
2332 old_offset, new_offset);
2333}
2334
2335/*
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002336 * Computes the linear offset to the base tile and adjusts
2337 * x, y. bytes per pixel is assumed to be a power-of-two.
2338 *
2339 * In the 90/270 rotated case, x and y are assumed
2340 * to be already rotated to match the rotated GTT view, and
2341 * pitch is the tile_height aligned framebuffer height.
Ville Syrjälä6687c902015-09-15 13:16:41 +03002342 *
2343 * This function is used when computing the derived information
2344 * under intel_framebuffer, so using any of that information
2345 * here is not allowed. Anything under drm_framebuffer can be
2346 * used. This is why the user has to pass in the pitch since it
2347 * is specified in the rotated orientation.
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002348 */
Ville Syrjälä6687c902015-09-15 13:16:41 +03002349static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2350 int *x, int *y,
2351 const struct drm_framebuffer *fb, int plane,
2352 unsigned int pitch,
2353 unsigned int rotation,
2354 u32 alignment)
Daniel Vetterc2c75132012-07-05 12:17:30 +02002355{
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002356 uint64_t fb_modifier = fb->modifier;
Ville Syrjälä353c8592016-12-14 23:30:57 +02002357 unsigned int cpp = fb->format->cpp[plane];
Ville Syrjälä6687c902015-09-15 13:16:41 +03002358 u32 offset, offset_aligned;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002359
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002360 if (alignment)
2361 alignment--;
2362
Ben Widawsky2f075562017-03-24 14:29:48 -07002363 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002364 unsigned int tile_size, tile_width, tile_height;
2365 unsigned int tile_rows, tiles, pitch_tiles;
Daniel Vetterc2c75132012-07-05 12:17:30 +02002366
Ville Syrjäläd8433102016-01-12 21:08:35 +02002367 tile_size = intel_tile_size(dev_priv);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002368 intel_tile_dims(fb, plane, &tile_width, &tile_height);
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002369
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002370 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002371 pitch_tiles = pitch / tile_height;
2372 swap(tile_width, tile_height);
2373 } else {
2374 pitch_tiles = pitch / (tile_width * cpp);
2375 }
Daniel Vetterc2c75132012-07-05 12:17:30 +02002376
Ville Syrjäläd8433102016-01-12 21:08:35 +02002377 tile_rows = *y / tile_height;
2378 *y %= tile_height;
Chris Wilsonbc752862013-02-21 20:04:31 +00002379
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002380 tiles = *x / tile_width;
2381 *x %= tile_width;
Ville Syrjäläd8433102016-01-12 21:08:35 +02002382
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002383 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2384 offset_aligned = offset & ~alignment;
Chris Wilsonbc752862013-02-21 20:04:31 +00002385
Ville Syrjälä303ba692017-08-24 22:10:49 +03002386 __intel_adjust_tile_offset(x, y, tile_width, tile_height,
2387 tile_size, pitch_tiles,
2388 offset, offset_aligned);
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002389 } else {
Chris Wilsonbc752862013-02-21 20:04:31 +00002390 offset = *y * pitch + *x * cpp;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002391 offset_aligned = offset & ~alignment;
2392
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002393 *y = (offset & alignment) / pitch;
2394 *x = ((offset & alignment) - *y * pitch) / cpp;
Chris Wilsonbc752862013-02-21 20:04:31 +00002395 }
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002396
2397 return offset_aligned;
Daniel Vetterc2c75132012-07-05 12:17:30 +02002398}
2399
Ville Syrjälä6687c902015-09-15 13:16:41 +03002400u32 intel_compute_tile_offset(int *x, int *y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002401 const struct intel_plane_state *state,
2402 int plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002403{
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03002404 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2405 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
Ville Syrjälä29490562016-01-20 18:02:50 +02002406 const struct drm_framebuffer *fb = state->base.fb;
2407 unsigned int rotation = state->base.rotation;
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002408 int pitch = intel_fb_pitch(fb, plane, rotation);
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03002409 u32 alignment;
2410
2411 if (intel_plane->id == PLANE_CURSOR)
2412 alignment = intel_cursor_alignment(dev_priv);
2413 else
2414 alignment = intel_surf_alignment(fb, plane);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002415
2416 return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
2417 rotation, alignment);
2418}
2419
Ville Syrjälä303ba692017-08-24 22:10:49 +03002420/* Convert the fb->offset[] into x/y offsets */
2421static int intel_fb_offset_to_xy(int *x, int *y,
2422 const struct drm_framebuffer *fb, int plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002423{
Ville Syrjälä303ba692017-08-24 22:10:49 +03002424 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002425
Ville Syrjälä303ba692017-08-24 22:10:49 +03002426 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2427 fb->offsets[plane] % intel_tile_size(dev_priv))
2428 return -EINVAL;
2429
2430 *x = 0;
2431 *y = 0;
2432
2433 _intel_adjust_tile_offset(x, y,
2434 fb, plane, DRM_MODE_ROTATE_0,
2435 fb->offsets[plane], 0);
2436
2437 return 0;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002438}
2439
Ville Syrjälä72618eb2016-02-04 20:38:20 +02002440static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2441{
2442 switch (fb_modifier) {
2443 case I915_FORMAT_MOD_X_TILED:
2444 return I915_TILING_X;
2445 case I915_FORMAT_MOD_Y_TILED:
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002446 case I915_FORMAT_MOD_Y_TILED_CCS:
Ville Syrjälä72618eb2016-02-04 20:38:20 +02002447 return I915_TILING_Y;
2448 default:
2449 return I915_TILING_NONE;
2450 }
2451}
2452
Ville Syrjälä16af25f2018-01-19 16:41:52 +02002453/*
2454 * From the Sky Lake PRM:
2455 * "The Color Control Surface (CCS) contains the compression status of
2456 * the cache-line pairs. The compression state of the cache-line pair
2457 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2458 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2459 * cache-line-pairs. CCS is always Y tiled."
2460 *
2461 * Since cache line pairs refers to horizontally adjacent cache lines,
2462 * each cache line in the CCS corresponds to an area of 32x16 cache
2463 * lines on the main surface. Since each pixel is 4 bytes, this gives
2464 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2465 * main surface.
2466 */
Ville Syrjäläbbfb6ce2017-08-01 09:58:12 -07002467static const struct drm_format_info ccs_formats[] = {
2468 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2469 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2470 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2471 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2472};
2473
2474static const struct drm_format_info *
2475lookup_format_info(const struct drm_format_info formats[],
2476 int num_formats, u32 format)
2477{
2478 int i;
2479
2480 for (i = 0; i < num_formats; i++) {
2481 if (formats[i].format == format)
2482 return &formats[i];
2483 }
2484
2485 return NULL;
2486}
2487
2488static const struct drm_format_info *
2489intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2490{
2491 switch (cmd->modifier[0]) {
2492 case I915_FORMAT_MOD_Y_TILED_CCS:
2493 case I915_FORMAT_MOD_Yf_TILED_CCS:
2494 return lookup_format_info(ccs_formats,
2495 ARRAY_SIZE(ccs_formats),
2496 cmd->pixel_format);
2497 default:
2498 return NULL;
2499 }
2500}
2501
Ville Syrjälä6687c902015-09-15 13:16:41 +03002502static int
2503intel_fill_fb_info(struct drm_i915_private *dev_priv,
2504 struct drm_framebuffer *fb)
2505{
2506 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2507 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2508 u32 gtt_offset_rotated = 0;
2509 unsigned int max_size = 0;
Ville Syrjäläbcb0b462016-12-14 23:30:22 +02002510 int i, num_planes = fb->format->num_planes;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002511 unsigned int tile_size = intel_tile_size(dev_priv);
2512
2513 for (i = 0; i < num_planes; i++) {
2514 unsigned int width, height;
2515 unsigned int cpp, size;
2516 u32 offset;
2517 int x, y;
Ville Syrjälä303ba692017-08-24 22:10:49 +03002518 int ret;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002519
Ville Syrjälä353c8592016-12-14 23:30:57 +02002520 cpp = fb->format->cpp[i];
Ville Syrjälä145fcb12016-11-18 21:53:06 +02002521 width = drm_framebuffer_plane_width(fb->width, fb, i);
2522 height = drm_framebuffer_plane_height(fb->height, fb, i);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002523
Ville Syrjälä303ba692017-08-24 22:10:49 +03002524 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2525 if (ret) {
2526 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2527 i, fb->offsets[i]);
2528 return ret;
2529 }
Ville Syrjälä6687c902015-09-15 13:16:41 +03002530
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002531 if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2532 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
2533 int hsub = fb->format->hsub;
2534 int vsub = fb->format->vsub;
2535 int tile_width, tile_height;
2536 int main_x, main_y;
2537 int ccs_x, ccs_y;
2538
2539 intel_tile_dims(fb, i, &tile_width, &tile_height);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002540 tile_width *= hsub;
2541 tile_height *= vsub;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002542
Ville Syrjälä303ba692017-08-24 22:10:49 +03002543 ccs_x = (x * hsub) % tile_width;
2544 ccs_y = (y * vsub) % tile_height;
2545 main_x = intel_fb->normal[0].x % tile_width;
2546 main_y = intel_fb->normal[0].y % tile_height;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002547
2548 /*
2549 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2550 * x/y offsets must match between CCS and the main surface.
2551 */
2552 if (main_x != ccs_x || main_y != ccs_y) {
2553 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2554 main_x, main_y,
2555 ccs_x, ccs_y,
2556 intel_fb->normal[0].x,
2557 intel_fb->normal[0].y,
2558 x, y);
2559 return -EINVAL;
2560 }
2561 }
2562
Ville Syrjälä6687c902015-09-15 13:16:41 +03002563 /*
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002564 * The fence (if used) is aligned to the start of the object
2565 * so having the framebuffer wrap around across the edge of the
2566 * fenced region doesn't really work. We have no API to configure
2567 * the fence start offset within the object (nor could we probably
2568 * on gen2/3). So it's just easier if we just require that the
2569 * fb layout agrees with the fence layout. We already check that the
2570 * fb stride matches the fence stride elsewhere.
2571 */
Ville Syrjälä2ec4cf42017-08-24 22:10:50 +03002572 if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002573 (x + width) * cpp > fb->pitches[i]) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +02002574 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2575 i, fb->offsets[i]);
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002576 return -EINVAL;
2577 }
2578
2579 /*
Ville Syrjälä6687c902015-09-15 13:16:41 +03002580 * First pixel of the framebuffer from
2581 * the start of the normal gtt mapping.
2582 */
2583 intel_fb->normal[i].x = x;
2584 intel_fb->normal[i].y = y;
2585
2586 offset = _intel_compute_tile_offset(dev_priv, &x, &y,
Ville Syrjälä3ca46c02017-03-07 21:42:09 +02002587 fb, i, fb->pitches[i],
Robert Fossc2c446a2017-05-19 16:50:17 -04002588 DRM_MODE_ROTATE_0, tile_size);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002589 offset /= tile_size;
2590
Ben Widawsky2f075562017-03-24 14:29:48 -07002591 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
Ville Syrjälä6687c902015-09-15 13:16:41 +03002592 unsigned int tile_width, tile_height;
2593 unsigned int pitch_tiles;
2594 struct drm_rect r;
2595
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002596 intel_tile_dims(fb, i, &tile_width, &tile_height);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002597
2598 rot_info->plane[i].offset = offset;
2599 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2600 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2601 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2602
2603 intel_fb->rotated[i].pitch =
2604 rot_info->plane[i].height * tile_height;
2605
2606 /* how many tiles does this plane need */
2607 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2608 /*
2609 * If the plane isn't horizontally tile aligned,
2610 * we need one more tile.
2611 */
2612 if (x != 0)
2613 size++;
2614
2615 /* rotate the x/y offsets to match the GTT view */
2616 r.x1 = x;
2617 r.y1 = y;
2618 r.x2 = x + width;
2619 r.y2 = y + height;
2620 drm_rect_rotate(&r,
2621 rot_info->plane[i].width * tile_width,
2622 rot_info->plane[i].height * tile_height,
Robert Fossc2c446a2017-05-19 16:50:17 -04002623 DRM_MODE_ROTATE_270);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002624 x = r.x1;
2625 y = r.y1;
2626
2627 /* rotate the tile dimensions to match the GTT view */
2628 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2629 swap(tile_width, tile_height);
2630
2631 /*
2632 * We only keep the x/y offsets, so push all of the
2633 * gtt offset into the x/y offsets.
2634 */
Ville Syrjälä303ba692017-08-24 22:10:49 +03002635 __intel_adjust_tile_offset(&x, &y,
2636 tile_width, tile_height,
2637 tile_size, pitch_tiles,
2638 gtt_offset_rotated * tile_size, 0);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002639
2640 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2641
2642 /*
2643 * First pixel of the framebuffer from
2644 * the start of the rotated gtt mapping.
2645 */
2646 intel_fb->rotated[i].x = x;
2647 intel_fb->rotated[i].y = y;
2648 } else {
2649 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2650 x * cpp, tile_size);
2651 }
2652
2653 /* how many tiles in total needed in the bo */
2654 max_size = max(max_size, offset + size);
2655 }
2656
Ville Syrjälä144cc1432017-03-07 21:42:10 +02002657 if (max_size * tile_size > intel_fb->obj->base.size) {
2658 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
2659 max_size * tile_size, intel_fb->obj->base.size);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002660 return -EINVAL;
2661 }
2662
2663 return 0;
2664}
2665
Damien Lespiaub35d63f2015-01-20 12:51:50 +00002666static int i9xx_format_to_fourcc(int format)
Jesse Barnes46f297f2014-03-07 08:57:48 -08002667{
2668 switch (format) {
2669 case DISPPLANE_8BPP:
2670 return DRM_FORMAT_C8;
2671 case DISPPLANE_BGRX555:
2672 return DRM_FORMAT_XRGB1555;
2673 case DISPPLANE_BGRX565:
2674 return DRM_FORMAT_RGB565;
2675 default:
2676 case DISPPLANE_BGRX888:
2677 return DRM_FORMAT_XRGB8888;
2678 case DISPPLANE_RGBX888:
2679 return DRM_FORMAT_XBGR8888;
2680 case DISPPLANE_BGRX101010:
2681 return DRM_FORMAT_XRGB2101010;
2682 case DISPPLANE_RGBX101010:
2683 return DRM_FORMAT_XBGR2101010;
2684 }
2685}
2686
Mahesh Kumarddf34312018-04-09 09:11:03 +05302687int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00002688{
2689 switch (format) {
2690 case PLANE_CTL_FORMAT_RGB_565:
2691 return DRM_FORMAT_RGB565;
Mahesh Kumarf34a2912018-04-09 09:11:02 +05302692 case PLANE_CTL_FORMAT_NV12:
2693 return DRM_FORMAT_NV12;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00002694 default:
2695 case PLANE_CTL_FORMAT_XRGB_8888:
2696 if (rgb_order) {
2697 if (alpha)
2698 return DRM_FORMAT_ABGR8888;
2699 else
2700 return DRM_FORMAT_XBGR8888;
2701 } else {
2702 if (alpha)
2703 return DRM_FORMAT_ARGB8888;
2704 else
2705 return DRM_FORMAT_XRGB8888;
2706 }
2707 case PLANE_CTL_FORMAT_XRGB_2101010:
2708 if (rgb_order)
2709 return DRM_FORMAT_XBGR2101010;
2710 else
2711 return DRM_FORMAT_XRGB2101010;
2712 }
2713}
2714
Damien Lespiau5724dbd2015-01-20 12:51:52 +00002715static bool
Daniel Vetterf6936e22015-03-26 12:17:05 +01002716intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2717 struct intel_initial_plane_config *plane_config)
Jesse Barnes46f297f2014-03-07 08:57:48 -08002718{
2719 struct drm_device *dev = crtc->base.dev;
Paulo Zanoni3badb492015-09-23 12:52:23 -03002720 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes46f297f2014-03-07 08:57:48 -08002721 struct drm_i915_gem_object *obj = NULL;
2722 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
Damien Lespiau2d140302015-02-05 17:22:18 +00002723 struct drm_framebuffer *fb = &plane_config->fb->base;
Daniel Vetterf37b5c22015-02-10 23:12:27 +01002724 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2725 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2726 PAGE_SIZE);
2727
2728 size_aligned -= base_aligned;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002729
Chris Wilsonff2652e2014-03-10 08:07:02 +00002730 if (plane_config->size == 0)
2731 return false;
2732
Paulo Zanoni3badb492015-09-23 12:52:23 -03002733 /* If the FB is too big, just don't use it since fbdev is not very
2734 * important and we should probably use that space with FBC or other
2735 * features. */
Matthew Auldb1ace602017-12-11 15:18:21 +00002736 if (size_aligned * 2 > dev_priv->stolen_usable_size)
Paulo Zanoni3badb492015-09-23 12:52:23 -03002737 return false;
2738
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +00002739 mutex_lock(&dev->struct_mutex);
Tvrtko Ursulin187685c2016-12-01 14:16:36 +00002740 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
Daniel Vetterf37b5c22015-02-10 23:12:27 +01002741 base_aligned,
2742 base_aligned,
2743 size_aligned);
Chris Wilson24dbf512017-02-15 10:59:18 +00002744 mutex_unlock(&dev->struct_mutex);
2745 if (!obj)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002746 return false;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002747
Chris Wilson3e510a82016-08-05 10:14:23 +01002748 if (plane_config->tiling == I915_TILING_X)
2749 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002750
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002751 mode_cmd.pixel_format = fb->format->format;
Damien Lespiau6bf129d2015-02-05 17:22:16 +00002752 mode_cmd.width = fb->width;
2753 mode_cmd.height = fb->height;
2754 mode_cmd.pitches[0] = fb->pitches[0];
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002755 mode_cmd.modifier[0] = fb->modifier;
Daniel Vetter18c52472015-02-10 17:16:09 +00002756 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002757
Chris Wilson24dbf512017-02-15 10:59:18 +00002758 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
Jesse Barnes46f297f2014-03-07 08:57:48 -08002759 DRM_DEBUG_KMS("intel fb init failed\n");
2760 goto out_unref_obj;
2761 }
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +00002762
Jesse Barnes484b41d2014-03-07 08:57:55 -08002763
Daniel Vetterf6936e22015-03-26 12:17:05 +01002764 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002765 return true;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002766
2767out_unref_obj:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002768 i915_gem_object_put(obj);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002769 return false;
2770}
2771
Damien Lespiau5724dbd2015-01-20 12:51:52 +00002772static void
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002773intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2774 struct intel_plane_state *plane_state,
2775 bool visible)
2776{
2777 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2778
2779 plane_state->base.visible = visible;
2780
2781 /* FIXME pre-g4x don't work like this */
2782 if (visible) {
2783 crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
2784 crtc_state->active_planes |= BIT(plane->id);
2785 } else {
2786 crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
2787 crtc_state->active_planes &= ~BIT(plane->id);
2788 }
2789
2790 DRM_DEBUG_KMS("%s active planes 0x%x\n",
2791 crtc_state->base.crtc->name,
2792 crtc_state->active_planes);
2793}
2794
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002795static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2796 struct intel_plane *plane)
2797{
2798 struct intel_crtc_state *crtc_state =
2799 to_intel_crtc_state(crtc->base.state);
2800 struct intel_plane_state *plane_state =
2801 to_intel_plane_state(plane->base.state);
2802
2803 intel_set_plane_visible(crtc_state, plane_state, false);
2804
2805 if (plane->id == PLANE_PRIMARY)
2806 intel_pre_disable_primary_noatomic(&crtc->base);
2807
2808 trace_intel_disable_plane(&plane->base, crtc);
2809 plane->disable_plane(plane, crtc);
2810}
2811
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002812static void
Daniel Vetterf6936e22015-03-26 12:17:05 +01002813intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2814 struct intel_initial_plane_config *plane_config)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002815{
2816 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002817 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002818 struct drm_crtc *c;
Matt Roper2ff8fde2014-07-08 07:50:07 -07002819 struct drm_i915_gem_object *obj;
Daniel Vetter88595ac2015-03-26 12:42:24 +01002820 struct drm_plane *primary = intel_crtc->base.primary;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002821 struct drm_plane_state *plane_state = primary->state;
Matt Roper200757f2015-12-03 11:37:36 -08002822 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2823 struct intel_plane *intel_plane = to_intel_plane(primary);
Matt Roper0a8d8a82015-12-03 11:37:38 -08002824 struct intel_plane_state *intel_state =
2825 to_intel_plane_state(plane_state);
Daniel Vetter88595ac2015-03-26 12:42:24 +01002826 struct drm_framebuffer *fb;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002827
Damien Lespiau2d140302015-02-05 17:22:18 +00002828 if (!plane_config->fb)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002829 return;
2830
Daniel Vetterf6936e22015-03-26 12:17:05 +01002831 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
Daniel Vetter88595ac2015-03-26 12:42:24 +01002832 fb = &plane_config->fb->base;
2833 goto valid_fb;
Damien Lespiauf55548b2015-02-05 18:30:20 +00002834 }
Jesse Barnes484b41d2014-03-07 08:57:55 -08002835
Damien Lespiau2d140302015-02-05 17:22:18 +00002836 kfree(plane_config->fb);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002837
2838 /*
2839 * Failed to alloc the obj, check to see if we should share
2840 * an fb with another CRTC instead
2841 */
Damien Lespiau70e1e0e2014-05-13 23:32:24 +01002842 for_each_crtc(dev, c) {
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002843 struct intel_plane_state *state;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002844
2845 if (c == &intel_crtc->base)
2846 continue;
2847
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002848 if (!to_intel_crtc(c)->active)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002849 continue;
2850
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002851 state = to_intel_plane_state(c->primary->state);
2852 if (!state->vma)
Matt Roper2ff8fde2014-07-08 07:50:07 -07002853 continue;
2854
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002855 if (intel_plane_ggtt_offset(state) == plane_config->base) {
Ville Syrjälä8bc20f62018-03-22 17:22:59 +02002856 fb = state->base.fb;
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302857 drm_framebuffer_get(fb);
Daniel Vetter88595ac2015-03-26 12:42:24 +01002858 goto valid_fb;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002859 }
2860 }
Daniel Vetter88595ac2015-03-26 12:42:24 +01002861
Matt Roper200757f2015-12-03 11:37:36 -08002862 /*
2863 * We've failed to reconstruct the BIOS FB. Current display state
2864 * indicates that the primary plane is visible, but has a NULL FB,
2865 * which will lead to problems later if we don't fix it up. The
2866 * simplest solution is to just disable the primary plane now and
2867 * pretend the BIOS never had it enabled.
2868 */
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002869 intel_plane_disable_noatomic(intel_crtc, intel_plane);
Matt Roper200757f2015-12-03 11:37:36 -08002870
Daniel Vetter88595ac2015-03-26 12:42:24 +01002871 return;
2872
2873valid_fb:
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002874 mutex_lock(&dev->struct_mutex);
2875 intel_state->vma =
Chris Wilson59354852018-02-20 13:42:06 +00002876 intel_pin_and_fence_fb_obj(fb,
2877 primary->state->rotation,
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002878 intel_plane_uses_fence(intel_state),
Chris Wilson59354852018-02-20 13:42:06 +00002879 &intel_state->flags);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002880 mutex_unlock(&dev->struct_mutex);
2881 if (IS_ERR(intel_state->vma)) {
2882 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2883 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2884
2885 intel_state->vma = NULL;
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302886 drm_framebuffer_put(fb);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002887 return;
2888 }
2889
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -08002890 obj = intel_fb_obj(fb);
2891 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2892
Ville Syrjäläf44e2652015-11-13 19:16:13 +02002893 plane_state->src_x = 0;
2894 plane_state->src_y = 0;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002895 plane_state->src_w = fb->width << 16;
2896 plane_state->src_h = fb->height << 16;
2897
Ville Syrjäläf44e2652015-11-13 19:16:13 +02002898 plane_state->crtc_x = 0;
2899 plane_state->crtc_y = 0;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002900 plane_state->crtc_w = fb->width;
2901 plane_state->crtc_h = fb->height;
2902
Rob Clark1638d302016-11-05 11:08:08 -04002903 intel_state->base.src = drm_plane_state_src(plane_state);
2904 intel_state->base.dst = drm_plane_state_dest(plane_state);
Matt Roper0a8d8a82015-12-03 11:37:38 -08002905
Chris Wilson3e510a82016-08-05 10:14:23 +01002906 if (i915_gem_object_is_tiled(obj))
Daniel Vetter88595ac2015-03-26 12:42:24 +01002907 dev_priv->preserve_bios_swizzle = true;
2908
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302909 drm_framebuffer_get(fb);
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002910 primary->fb = primary->state->fb = fb;
Maarten Lankhorst36750f22015-06-01 12:49:54 +02002911 primary->crtc = primary->state->crtc = &intel_crtc->base;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002912
2913 intel_set_plane_visible(to_intel_crtc_state(crtc_state),
2914 to_intel_plane_state(plane_state),
2915 true);
2916
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01002917 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2918 &obj->frontbuffer_bits);
Jesse Barnes46f297f2014-03-07 08:57:48 -08002919}
2920
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002921static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
2922 unsigned int rotation)
2923{
Ville Syrjälä353c8592016-12-14 23:30:57 +02002924 int cpp = fb->format->cpp[plane];
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002925
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002926 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07002927 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002928 case I915_FORMAT_MOD_X_TILED:
2929 switch (cpp) {
2930 case 8:
2931 return 4096;
2932 case 4:
2933 case 2:
2934 case 1:
2935 return 8192;
2936 default:
2937 MISSING_CASE(cpp);
2938 break;
2939 }
2940 break;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002941 case I915_FORMAT_MOD_Y_TILED_CCS:
2942 case I915_FORMAT_MOD_Yf_TILED_CCS:
2943 /* FIXME AUX plane? */
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002944 case I915_FORMAT_MOD_Y_TILED:
2945 case I915_FORMAT_MOD_Yf_TILED:
2946 switch (cpp) {
2947 case 8:
2948 return 2048;
2949 case 4:
2950 return 4096;
2951 case 2:
2952 case 1:
2953 return 8192;
2954 default:
2955 MISSING_CASE(cpp);
2956 break;
2957 }
2958 break;
2959 default:
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002960 MISSING_CASE(fb->modifier);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002961 }
2962
2963 return 2048;
2964}
2965
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002966static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2967 int main_x, int main_y, u32 main_offset)
2968{
2969 const struct drm_framebuffer *fb = plane_state->base.fb;
2970 int hsub = fb->format->hsub;
2971 int vsub = fb->format->vsub;
2972 int aux_x = plane_state->aux.x;
2973 int aux_y = plane_state->aux.y;
2974 u32 aux_offset = plane_state->aux.offset;
2975 u32 alignment = intel_surf_alignment(fb, 1);
2976
2977 while (aux_offset >= main_offset && aux_y <= main_y) {
2978 int x, y;
2979
2980 if (aux_x == main_x && aux_y == main_y)
2981 break;
2982
2983 if (aux_offset == 0)
2984 break;
2985
2986 x = aux_x / hsub;
2987 y = aux_y / vsub;
2988 aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1,
2989 aux_offset, aux_offset - alignment);
2990 aux_x = x * hsub + aux_x % hsub;
2991 aux_y = y * vsub + aux_y % vsub;
2992 }
2993
2994 if (aux_x != main_x || aux_y != main_y)
2995 return false;
2996
2997 plane_state->aux.offset = aux_offset;
2998 plane_state->aux.x = aux_x;
2999 plane_state->aux.y = aux_y;
3000
3001 return true;
3002}
3003
Imre Deakc322c642018-01-16 13:24:14 +02003004static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
3005 struct intel_plane_state *plane_state)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003006{
Imre Deakc322c642018-01-16 13:24:14 +02003007 struct drm_i915_private *dev_priv =
3008 to_i915(plane_state->base.plane->dev);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003009 const struct drm_framebuffer *fb = plane_state->base.fb;
3010 unsigned int rotation = plane_state->base.rotation;
Daniel Vettercc926382016-08-15 10:41:47 +02003011 int x = plane_state->base.src.x1 >> 16;
3012 int y = plane_state->base.src.y1 >> 16;
3013 int w = drm_rect_width(&plane_state->base.src) >> 16;
3014 int h = drm_rect_height(&plane_state->base.src) >> 16;
Imre Deakc322c642018-01-16 13:24:14 +02003015 int dst_x = plane_state->base.dst.x1;
3016 int pipe_src_w = crtc_state->pipe_src_w;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003017 int max_width = skl_max_plane_width(fb, 0, rotation);
3018 int max_height = 4096;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003019 u32 alignment, offset, aux_offset = plane_state->aux.offset;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003020
3021 if (w > max_width || h > max_height) {
3022 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3023 w, h, max_width, max_height);
3024 return -EINVAL;
3025 }
3026
Imre Deakc322c642018-01-16 13:24:14 +02003027 /*
3028 * Display WA #1175: cnl,glk
3029 * Planes other than the cursor may cause FIFO underflow and display
3030 * corruption if starting less than 4 pixels from the right edge of
3031 * the screen.
Imre Deak394676f2018-01-16 13:24:15 +02003032 * Besides the above WA fix the similar problem, where planes other
3033 * than the cursor ending less than 4 pixels from the left edge of the
3034 * screen may cause FIFO underflow and display corruption.
Imre Deakc322c642018-01-16 13:24:14 +02003035 */
3036 if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
Imre Deak394676f2018-01-16 13:24:15 +02003037 (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
3038 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
3039 dst_x + w < 4 ? "end" : "start",
3040 dst_x + w < 4 ? dst_x + w : dst_x,
3041 4, pipe_src_w - 4);
Imre Deakc322c642018-01-16 13:24:14 +02003042 return -ERANGE;
3043 }
3044
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003045 intel_add_fb_offsets(&x, &y, plane_state, 0);
3046 offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003047 alignment = intel_surf_alignment(fb, 0);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003048
3049 /*
Ville Syrjälä8d970652016-01-28 16:30:28 +02003050 * AUX surface offset is specified as the distance from the
3051 * main surface offset, and it must be non-negative. Make
3052 * sure that is what we will get.
3053 */
3054 if (offset > aux_offset)
3055 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3056 offset, aux_offset & ~(alignment - 1));
3057
3058 /*
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003059 * When using an X-tiled surface, the plane blows up
3060 * if the x offset + width exceed the stride.
3061 *
3062 * TODO: linear and Y-tiled seem fine, Yf untested,
3063 */
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003064 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
Ville Syrjälä353c8592016-12-14 23:30:57 +02003065 int cpp = fb->format->cpp[0];
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003066
3067 while ((x + w) * cpp > fb->pitches[0]) {
3068 if (offset == 0) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003069 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003070 return -EINVAL;
3071 }
3072
3073 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3074 offset, offset - alignment);
3075 }
3076 }
3077
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003078 /*
3079 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3080 * they match with the main surface x/y offsets.
3081 */
3082 if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
3083 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
3084 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3085 if (offset == 0)
3086 break;
3087
3088 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3089 offset, offset - alignment);
3090 }
3091
3092 if (x != plane_state->aux.x || y != plane_state->aux.y) {
3093 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3094 return -EINVAL;
3095 }
3096 }
3097
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003098 plane_state->main.offset = offset;
3099 plane_state->main.x = x;
3100 plane_state->main.y = y;
3101
3102 return 0;
3103}
3104
Maarten Lankhorst5d794282018-05-12 03:03:14 +05303105static int
3106skl_check_nv12_surface(const struct intel_crtc_state *crtc_state,
3107 struct intel_plane_state *plane_state)
3108{
3109 /* Display WA #1106 */
3110 if (plane_state->base.rotation !=
3111 (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3112 plane_state->base.rotation != DRM_MODE_ROTATE_270)
3113 return 0;
3114
3115 /*
3116 * src coordinates are rotated here.
3117 * We check height but report it as width
3118 */
3119 if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3120 DRM_DEBUG_KMS("src width must be multiple "
3121 "of 4 for rotated NV12\n");
3122 return -EINVAL;
3123 }
3124
3125 return 0;
3126}
3127
Ville Syrjälä8d970652016-01-28 16:30:28 +02003128static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3129{
3130 const struct drm_framebuffer *fb = plane_state->base.fb;
3131 unsigned int rotation = plane_state->base.rotation;
3132 int max_width = skl_max_plane_width(fb, 1, rotation);
3133 int max_height = 4096;
Daniel Vettercc926382016-08-15 10:41:47 +02003134 int x = plane_state->base.src.x1 >> 17;
3135 int y = plane_state->base.src.y1 >> 17;
3136 int w = drm_rect_width(&plane_state->base.src) >> 17;
3137 int h = drm_rect_height(&plane_state->base.src) >> 17;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003138 u32 offset;
3139
3140 intel_add_fb_offsets(&x, &y, plane_state, 1);
3141 offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
3142
3143 /* FIXME not quite sure how/if these apply to the chroma plane */
3144 if (w > max_width || h > max_height) {
3145 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3146 w, h, max_width, max_height);
3147 return -EINVAL;
3148 }
3149
3150 plane_state->aux.offset = offset;
3151 plane_state->aux.x = x;
3152 plane_state->aux.y = y;
3153
3154 return 0;
3155}
3156
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003157static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3158{
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003159 const struct drm_framebuffer *fb = plane_state->base.fb;
3160 int src_x = plane_state->base.src.x1 >> 16;
3161 int src_y = plane_state->base.src.y1 >> 16;
3162 int hsub = fb->format->hsub;
3163 int vsub = fb->format->vsub;
3164 int x = src_x / hsub;
3165 int y = src_y / vsub;
3166 u32 offset;
3167
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003168 if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
3169 DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
3170 plane_state->base.rotation);
3171 return -EINVAL;
3172 }
3173
3174 intel_add_fb_offsets(&x, &y, plane_state, 1);
3175 offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
3176
3177 plane_state->aux.offset = offset;
3178 plane_state->aux.x = x * hsub + src_x % hsub;
3179 plane_state->aux.y = y * vsub + src_y % vsub;
3180
3181 return 0;
3182}
3183
Imre Deakc322c642018-01-16 13:24:14 +02003184int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
3185 struct intel_plane_state *plane_state)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003186{
3187 const struct drm_framebuffer *fb = plane_state->base.fb;
3188 unsigned int rotation = plane_state->base.rotation;
3189 int ret;
3190
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003191 if (rotation & DRM_MODE_REFLECT_X &&
3192 fb->modifier == DRM_FORMAT_MOD_LINEAR) {
3193 DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
3194 return -EINVAL;
3195 }
3196
Ville Syrjäläa5e4c7d2016-11-07 22:20:54 +02003197 if (!plane_state->base.visible)
3198 return 0;
3199
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003200 /* Rotate src coordinates to match rotated GTT view */
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03003201 if (drm_rotation_90_or_270(rotation))
Daniel Vettercc926382016-08-15 10:41:47 +02003202 drm_rect_rotate(&plane_state->base.src,
Ville Syrjäläda064b42016-10-24 19:13:04 +03003203 fb->width << 16, fb->height << 16,
Robert Fossc2c446a2017-05-19 16:50:17 -04003204 DRM_MODE_ROTATE_270);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003205
Ville Syrjälä8d970652016-01-28 16:30:28 +02003206 /*
3207 * Handle the AUX surface first since
3208 * the main surface setup depends on it.
3209 */
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003210 if (fb->format->format == DRM_FORMAT_NV12) {
Maarten Lankhorst5d794282018-05-12 03:03:14 +05303211 ret = skl_check_nv12_surface(crtc_state, plane_state);
3212 if (ret)
3213 return ret;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003214 ret = skl_check_nv12_aux_surface(plane_state);
3215 if (ret)
3216 return ret;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003217 } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
3218 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
3219 ret = skl_check_ccs_aux_surface(plane_state);
3220 if (ret)
3221 return ret;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003222 } else {
3223 plane_state->aux.offset = ~0xfff;
3224 plane_state->aux.x = 0;
3225 plane_state->aux.y = 0;
3226 }
3227
Imre Deakc322c642018-01-16 13:24:14 +02003228 ret = skl_check_main_surface(crtc_state, plane_state);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003229 if (ret)
3230 return ret;
3231
3232 return 0;
3233}
3234
Ville Syrjälä7145f602017-03-23 21:27:07 +02003235static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3236 const struct intel_plane_state *plane_state)
Jesse Barnes81255562010-08-02 12:07:50 -07003237{
Ville Syrjälä7145f602017-03-23 21:27:07 +02003238 struct drm_i915_private *dev_priv =
3239 to_i915(plane_state->base.plane->dev);
3240 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3241 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02003242 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003243 u32 dspcntr;
Ville Syrjäläc9ba6fa2014-08-27 17:48:41 +03003244
Ville Syrjälä7145f602017-03-23 21:27:07 +02003245 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003246
Ville Syrjälä6a4407a2017-03-23 21:27:08 +02003247 if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
3248 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
Ville Syrjälä7145f602017-03-23 21:27:07 +02003249 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003250
Ville Syrjälä6a4407a2017-03-23 21:27:08 +02003251 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3252 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003253
Ville Syrjäläc154d1e2018-01-30 22:38:02 +02003254 if (INTEL_GEN(dev_priv) < 5)
Ville Syrjäläd509e282017-03-27 21:55:32 +03003255 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003256
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003257 switch (fb->format->format) {
Ville Syrjälä57779d02012-10-31 17:50:14 +02003258 case DRM_FORMAT_C8:
Jesse Barnes81255562010-08-02 12:07:50 -07003259 dspcntr |= DISPPLANE_8BPP;
3260 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +02003261 case DRM_FORMAT_XRGB1555:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003262 dspcntr |= DISPPLANE_BGRX555;
Jesse Barnes81255562010-08-02 12:07:50 -07003263 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +02003264 case DRM_FORMAT_RGB565:
3265 dspcntr |= DISPPLANE_BGRX565;
3266 break;
3267 case DRM_FORMAT_XRGB8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003268 dspcntr |= DISPPLANE_BGRX888;
3269 break;
3270 case DRM_FORMAT_XBGR8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003271 dspcntr |= DISPPLANE_RGBX888;
3272 break;
3273 case DRM_FORMAT_XRGB2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003274 dspcntr |= DISPPLANE_BGRX101010;
3275 break;
3276 case DRM_FORMAT_XBGR2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003277 dspcntr |= DISPPLANE_RGBX101010;
Jesse Barnes81255562010-08-02 12:07:50 -07003278 break;
3279 default:
Ville Syrjälä7145f602017-03-23 21:27:07 +02003280 MISSING_CASE(fb->format->format);
3281 return 0;
Jesse Barnes81255562010-08-02 12:07:50 -07003282 }
Ville Syrjälä57779d02012-10-31 17:50:14 +02003283
Ville Syrjälä72618eb2016-02-04 20:38:20 +02003284 if (INTEL_GEN(dev_priv) >= 4 &&
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003285 fb->modifier == I915_FORMAT_MOD_X_TILED)
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003286 dspcntr |= DISPPLANE_TILED;
Jesse Barnes81255562010-08-02 12:07:50 -07003287
Robert Fossc2c446a2017-05-19 16:50:17 -04003288 if (rotation & DRM_MODE_ROTATE_180)
Ville Syrjälädf0cd452016-11-14 18:53:59 +02003289 dspcntr |= DISPPLANE_ROTATE_180;
3290
Robert Fossc2c446a2017-05-19 16:50:17 -04003291 if (rotation & DRM_MODE_REFLECT_X)
Ville Syrjälä4ea7be22016-11-14 18:54:00 +02003292 dspcntr |= DISPPLANE_MIRROR;
3293
Ville Syrjälä7145f602017-03-23 21:27:07 +02003294 return dspcntr;
3295}
Ville Syrjäläde1aa622013-06-07 10:47:01 +03003296
Ville Syrjäläf9407ae2017-03-23 21:27:12 +02003297int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003298{
3299 struct drm_i915_private *dev_priv =
3300 to_i915(plane_state->base.plane->dev);
3301 int src_x = plane_state->base.src.x1 >> 16;
3302 int src_y = plane_state->base.src.y1 >> 16;
3303 u32 offset;
3304
3305 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
Jesse Barnes81255562010-08-02 12:07:50 -07003306
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003307 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003308 offset = intel_compute_tile_offset(&src_x, &src_y,
3309 plane_state, 0);
3310 else
3311 offset = 0;
Daniel Vettere506a0c2012-07-05 12:17:29 +02003312
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003313 /* HSW/BDW do this automagically in hardware */
3314 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3315 unsigned int rotation = plane_state->base.rotation;
3316 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3317 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3318
Robert Fossc2c446a2017-05-19 16:50:17 -04003319 if (rotation & DRM_MODE_ROTATE_180) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003320 src_x += src_w - 1;
3321 src_y += src_h - 1;
Robert Fossc2c446a2017-05-19 16:50:17 -04003322 } else if (rotation & DRM_MODE_REFLECT_X) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003323 src_x += src_w - 1;
3324 }
Sonika Jindal48404c12014-08-22 14:06:04 +05303325 }
3326
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003327 plane_state->main.offset = offset;
3328 plane_state->main.x = src_x;
3329 plane_state->main.y = src_y;
3330
3331 return 0;
3332}
3333
Ville Syrjäläed150302017-11-17 21:19:10 +02003334static void i9xx_update_plane(struct intel_plane *plane,
3335 const struct intel_crtc_state *crtc_state,
3336 const struct intel_plane_state *plane_state)
Ville Syrjälä7145f602017-03-23 21:27:07 +02003337{
Ville Syrjäläed150302017-11-17 21:19:10 +02003338 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjälä282dbf92017-03-27 21:55:33 +03003339 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjäläed150302017-11-17 21:19:10 +02003340 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003341 u32 linear_offset;
Ville Syrjäläa0864d52017-03-23 21:27:09 +02003342 u32 dspcntr = plane_state->ctl;
Ville Syrjäläed150302017-11-17 21:19:10 +02003343 i915_reg_t reg = DSPCNTR(i9xx_plane);
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003344 int x = plane_state->main.x;
3345 int y = plane_state->main.y;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003346 unsigned long irqflags;
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003347 u32 dspaddr_offset;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003348
Ville Syrjälä29490562016-01-20 18:02:50 +02003349 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
Ville Syrjälä6687c902015-09-15 13:16:41 +03003350
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003351 if (INTEL_GEN(dev_priv) >= 4)
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003352 dspaddr_offset = plane_state->main.offset;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003353 else
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003354 dspaddr_offset = linear_offset;
Ville Syrjälä6687c902015-09-15 13:16:41 +03003355
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003356 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3357
Ville Syrjälä78587de2017-03-09 17:44:32 +02003358 if (INTEL_GEN(dev_priv) < 4) {
3359 /* pipesrc and dspsize control the size that is scaled from,
3360 * which should always be the user's requested size.
3361 */
Ville Syrjäläed150302017-11-17 21:19:10 +02003362 I915_WRITE_FW(DSPSIZE(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003363 ((crtc_state->pipe_src_h - 1) << 16) |
3364 (crtc_state->pipe_src_w - 1));
Ville Syrjäläed150302017-11-17 21:19:10 +02003365 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3366 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3367 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003368 ((crtc_state->pipe_src_h - 1) << 16) |
3369 (crtc_state->pipe_src_w - 1));
Ville Syrjäläed150302017-11-17 21:19:10 +02003370 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3371 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
Ville Syrjälä78587de2017-03-09 17:44:32 +02003372 }
3373
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003374 I915_WRITE_FW(reg, dspcntr);
Sonika Jindal48404c12014-08-22 14:06:04 +05303375
Ville Syrjäläed150302017-11-17 21:19:10 +02003376 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), fb->pitches[0]);
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003377 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjäläed150302017-11-17 21:19:10 +02003378 I915_WRITE_FW(DSPSURF(i9xx_plane),
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003379 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003380 dspaddr_offset);
Ville Syrjäläed150302017-11-17 21:19:10 +02003381 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003382 } else if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjäläed150302017-11-17 21:19:10 +02003383 I915_WRITE_FW(DSPSURF(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003384 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003385 dspaddr_offset);
Ville Syrjäläed150302017-11-17 21:19:10 +02003386 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3387 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
Ville Syrjäläbfb81042016-11-07 22:20:57 +02003388 } else {
Ville Syrjäläed150302017-11-17 21:19:10 +02003389 I915_WRITE_FW(DSPADDR(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003390 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003391 dspaddr_offset);
Ville Syrjäläbfb81042016-11-07 22:20:57 +02003392 }
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003393 POSTING_READ_FW(reg);
3394
3395 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Jesse Barnes17638cd2011-06-24 12:19:23 -07003396}
3397
Ville Syrjäläed150302017-11-17 21:19:10 +02003398static void i9xx_disable_plane(struct intel_plane *plane,
3399 struct intel_crtc *crtc)
Jesse Barnes17638cd2011-06-24 12:19:23 -07003400{
Ville Syrjäläed150302017-11-17 21:19:10 +02003401 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3402 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003403 unsigned long irqflags;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003404
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003405 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3406
Ville Syrjäläed150302017-11-17 21:19:10 +02003407 I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3408 if (INTEL_GEN(dev_priv) >= 4)
3409 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003410 else
Ville Syrjäläed150302017-11-17 21:19:10 +02003411 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3412 POSTING_READ_FW(DSPCNTR(i9xx_plane));
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003413
3414 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003415}
3416
Ville Syrjäläed150302017-11-17 21:19:10 +02003417static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003418{
Ville Syrjäläed150302017-11-17 21:19:10 +02003419 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003420 enum intel_display_power_domain power_domain;
Ville Syrjäläed150302017-11-17 21:19:10 +02003421 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3422 enum pipe pipe = plane->pipe;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003423 bool ret;
3424
3425 /*
3426 * Not 100% correct for planes that can move between pipes,
3427 * but that's only the case for gen2-4 which don't have any
3428 * display power wells.
3429 */
3430 power_domain = POWER_DOMAIN_PIPE(pipe);
3431 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3432 return false;
3433
Ville Syrjäläed150302017-11-17 21:19:10 +02003434 ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003435
3436 intel_display_power_put(dev_priv, power_domain);
3437
3438 return ret;
3439}
3440
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003441static u32
3442intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
Damien Lespiaub3218032015-02-27 11:15:18 +00003443{
Ben Widawsky2f075562017-03-24 14:29:48 -07003444 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
Ville Syrjälä7b49f942016-01-12 21:08:32 +02003445 return 64;
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003446 else
3447 return intel_tile_width_bytes(fb, plane);
Damien Lespiaub3218032015-02-27 11:15:18 +00003448}
3449
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003450static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3451{
3452 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003453 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003454
3455 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3456 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3457 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003458}
3459
Chandra Kondurua1b22782015-04-07 15:28:45 -07003460/*
3461 * This function detaches (aka. unbinds) unused scalers in hardware
3462 */
Maarten Lankhorst05832362015-06-15 12:33:48 +02003463static void skl_detach_scalers(struct intel_crtc *intel_crtc)
Chandra Kondurua1b22782015-04-07 15:28:45 -07003464{
Chandra Kondurua1b22782015-04-07 15:28:45 -07003465 struct intel_crtc_scaler_state *scaler_state;
3466 int i;
3467
Chandra Kondurua1b22782015-04-07 15:28:45 -07003468 scaler_state = &intel_crtc->config->scaler_state;
3469
3470 /* loop through and disable scalers that aren't in use */
3471 for (i = 0; i < intel_crtc->num_scalers; i++) {
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003472 if (!scaler_state->scalers[i].in_use)
3473 skl_detach_scaler(intel_crtc, i);
Chandra Kondurua1b22782015-04-07 15:28:45 -07003474 }
3475}
3476
Ville Syrjäläd2196772016-01-28 18:33:11 +02003477u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
3478 unsigned int rotation)
3479{
Ville Syrjälä1b500532017-03-07 21:42:08 +02003480 u32 stride;
3481
3482 if (plane >= fb->format->num_planes)
3483 return 0;
3484
3485 stride = intel_fb_pitch(fb, plane, rotation);
Ville Syrjäläd2196772016-01-28 18:33:11 +02003486
3487 /*
3488 * The stride is either expressed as a multiple of 64 bytes chunks for
3489 * linear buffers or in number of tiles for tiled buffers.
3490 */
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003491 if (drm_rotation_90_or_270(rotation))
3492 stride /= intel_tile_height(fb, plane);
3493 else
3494 stride /= intel_fb_stride_alignment(fb, plane);
Ville Syrjäläd2196772016-01-28 18:33:11 +02003495
3496 return stride;
3497}
3498
Ville Syrjälä2e881262017-03-17 23:17:56 +02003499static u32 skl_plane_ctl_format(uint32_t pixel_format)
Chandra Konduru6156a452015-04-27 13:48:39 -07003500{
Chandra Konduru6156a452015-04-27 13:48:39 -07003501 switch (pixel_format) {
Damien Lespiaud161cf72015-05-12 16:13:17 +01003502 case DRM_FORMAT_C8:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003503 return PLANE_CTL_FORMAT_INDEXED;
Chandra Konduru6156a452015-04-27 13:48:39 -07003504 case DRM_FORMAT_RGB565:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003505 return PLANE_CTL_FORMAT_RGB_565;
Chandra Konduru6156a452015-04-27 13:48:39 -07003506 case DRM_FORMAT_XBGR8888:
James Ausmus4036c782017-11-13 10:11:28 -08003507 case DRM_FORMAT_ABGR8888:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003508 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
Chandra Konduru6156a452015-04-27 13:48:39 -07003509 case DRM_FORMAT_XRGB8888:
Chandra Konduru6156a452015-04-27 13:48:39 -07003510 case DRM_FORMAT_ARGB8888:
James Ausmus4036c782017-11-13 10:11:28 -08003511 return PLANE_CTL_FORMAT_XRGB_8888;
Chandra Konduru6156a452015-04-27 13:48:39 -07003512 case DRM_FORMAT_XRGB2101010:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003513 return PLANE_CTL_FORMAT_XRGB_2101010;
Chandra Konduru6156a452015-04-27 13:48:39 -07003514 case DRM_FORMAT_XBGR2101010:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003515 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
Chandra Konduru6156a452015-04-27 13:48:39 -07003516 case DRM_FORMAT_YUYV:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003517 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
Chandra Konduru6156a452015-04-27 13:48:39 -07003518 case DRM_FORMAT_YVYU:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003519 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
Chandra Konduru6156a452015-04-27 13:48:39 -07003520 case DRM_FORMAT_UYVY:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003521 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
Chandra Konduru6156a452015-04-27 13:48:39 -07003522 case DRM_FORMAT_VYUY:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003523 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
Chandra Konduru77224cd2018-04-09 09:11:13 +05303524 case DRM_FORMAT_NV12:
3525 return PLANE_CTL_FORMAT_NV12;
Chandra Konduru6156a452015-04-27 13:48:39 -07003526 default:
Damien Lespiau4249eee2015-05-12 16:13:16 +01003527 MISSING_CASE(pixel_format);
Chandra Konduru6156a452015-04-27 13:48:39 -07003528 }
Damien Lespiau8cfcba42015-05-12 16:13:14 +01003529
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003530 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003531}
3532
James Ausmus4036c782017-11-13 10:11:28 -08003533/*
3534 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3535 * to be already pre-multiplied. We need to add a knob (or a different
3536 * DRM_FORMAT) for user-space to configure that.
3537 */
3538static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
3539{
3540 switch (pixel_format) {
3541 case DRM_FORMAT_ABGR8888:
3542 case DRM_FORMAT_ARGB8888:
3543 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3544 default:
3545 return PLANE_CTL_ALPHA_DISABLE;
3546 }
3547}
3548
3549static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
3550{
3551 switch (pixel_format) {
3552 case DRM_FORMAT_ABGR8888:
3553 case DRM_FORMAT_ARGB8888:
3554 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3555 default:
3556 return PLANE_COLOR_ALPHA_DISABLE;
3557 }
3558}
3559
Ville Syrjälä2e881262017-03-17 23:17:56 +02003560static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
Chandra Konduru6156a452015-04-27 13:48:39 -07003561{
Chandra Konduru6156a452015-04-27 13:48:39 -07003562 switch (fb_modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07003563 case DRM_FORMAT_MOD_LINEAR:
Chandra Konduru6156a452015-04-27 13:48:39 -07003564 break;
3565 case I915_FORMAT_MOD_X_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003566 return PLANE_CTL_TILED_X;
Chandra Konduru6156a452015-04-27 13:48:39 -07003567 case I915_FORMAT_MOD_Y_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003568 return PLANE_CTL_TILED_Y;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003569 case I915_FORMAT_MOD_Y_TILED_CCS:
3570 return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
Chandra Konduru6156a452015-04-27 13:48:39 -07003571 case I915_FORMAT_MOD_Yf_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003572 return PLANE_CTL_TILED_YF;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003573 case I915_FORMAT_MOD_Yf_TILED_CCS:
3574 return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
Chandra Konduru6156a452015-04-27 13:48:39 -07003575 default:
3576 MISSING_CASE(fb_modifier);
3577 }
Damien Lespiau8cfcba42015-05-12 16:13:14 +01003578
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003579 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003580}
3581
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003582static u32 skl_plane_ctl_rotate(unsigned int rotate)
Chandra Konduru6156a452015-04-27 13:48:39 -07003583{
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003584 switch (rotate) {
Robert Fossc2c446a2017-05-19 16:50:17 -04003585 case DRM_MODE_ROTATE_0:
Chandra Konduru6156a452015-04-27 13:48:39 -07003586 break;
Sonika Jindal1e8df162015-05-20 13:40:48 +05303587 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003588 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
Sonika Jindal1e8df162015-05-20 13:40:48 +05303589 * while i915 HW rotation is clockwise, thats why this swapping.
3590 */
Robert Fossc2c446a2017-05-19 16:50:17 -04003591 case DRM_MODE_ROTATE_90:
Sonika Jindal1e8df162015-05-20 13:40:48 +05303592 return PLANE_CTL_ROTATE_270;
Robert Fossc2c446a2017-05-19 16:50:17 -04003593 case DRM_MODE_ROTATE_180:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003594 return PLANE_CTL_ROTATE_180;
Robert Fossc2c446a2017-05-19 16:50:17 -04003595 case DRM_MODE_ROTATE_270:
Sonika Jindal1e8df162015-05-20 13:40:48 +05303596 return PLANE_CTL_ROTATE_90;
Chandra Konduru6156a452015-04-27 13:48:39 -07003597 default:
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003598 MISSING_CASE(rotate);
3599 }
3600
3601 return 0;
3602}
3603
3604static u32 cnl_plane_ctl_flip(unsigned int reflect)
3605{
3606 switch (reflect) {
3607 case 0:
3608 break;
3609 case DRM_MODE_REFLECT_X:
3610 return PLANE_CTL_FLIP_HORIZONTAL;
3611 case DRM_MODE_REFLECT_Y:
3612 default:
3613 MISSING_CASE(reflect);
Chandra Konduru6156a452015-04-27 13:48:39 -07003614 }
3615
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003616 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003617}
3618
Ville Syrjälä2e881262017-03-17 23:17:56 +02003619u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3620 const struct intel_plane_state *plane_state)
Damien Lespiau70d21f02013-07-03 21:06:04 +01003621{
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003622 struct drm_i915_private *dev_priv =
3623 to_i915(plane_state->base.plane->dev);
3624 const struct drm_framebuffer *fb = plane_state->base.fb;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003625 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä2e881262017-03-17 23:17:56 +02003626 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003627 u32 plane_ctl;
Damien Lespiau70d21f02013-07-03 21:06:04 +01003628
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003629 plane_ctl = PLANE_CTL_ENABLE;
3630
James Ausmus4036c782017-11-13 10:11:28 -08003631 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3632 plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003633 plane_ctl |=
3634 PLANE_CTL_PIPE_GAMMA_ENABLE |
3635 PLANE_CTL_PIPE_CSC_ENABLE |
3636 PLANE_CTL_PLANE_GAMMA_DISABLE;
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003637
3638 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3639 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
Ville Syrjäläc8624ed2018-02-14 21:23:27 +02003640
3641 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3642 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003643 }
Damien Lespiau70d21f02013-07-03 21:06:04 +01003644
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003645 plane_ctl |= skl_plane_ctl_format(fb->format->format);
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003646 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003647 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3648
3649 if (INTEL_GEN(dev_priv) >= 10)
3650 plane_ctl |= cnl_plane_ctl_flip(rotation &
3651 DRM_MODE_REFLECT_MASK);
Damien Lespiau70d21f02013-07-03 21:06:04 +01003652
Ville Syrjälä2e881262017-03-17 23:17:56 +02003653 if (key->flags & I915_SET_COLORKEY_DESTINATION)
3654 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3655 else if (key->flags & I915_SET_COLORKEY_SOURCE)
3656 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3657
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003658 return plane_ctl;
3659}
3660
James Ausmus4036c782017-11-13 10:11:28 -08003661u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3662 const struct intel_plane_state *plane_state)
3663{
James Ausmus077ef1f2018-03-28 14:57:56 -07003664 struct drm_i915_private *dev_priv =
3665 to_i915(plane_state->base.plane->dev);
James Ausmus4036c782017-11-13 10:11:28 -08003666 const struct drm_framebuffer *fb = plane_state->base.fb;
3667 u32 plane_color_ctl = 0;
3668
James Ausmus077ef1f2018-03-28 14:57:56 -07003669 if (INTEL_GEN(dev_priv) < 11) {
3670 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3671 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3672 }
James Ausmus4036c782017-11-13 10:11:28 -08003673 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3674 plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
3675
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003676 if (intel_format_is_yuv(fb->format->format)) {
Vidya Srinivas8ed30ab2018-04-09 09:11:10 +05303677 if (fb->format->format == DRM_FORMAT_NV12) {
3678 plane_color_ctl |=
3679 PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3680 goto out;
3681 }
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003682 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3683 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3684 else
3685 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
Ville Syrjäläc8624ed2018-02-14 21:23:27 +02003686
3687 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3688 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003689 }
Vidya Srinivas8ed30ab2018-04-09 09:11:10 +05303690out:
James Ausmus4036c782017-11-13 10:11:28 -08003691 return plane_color_ctl;
3692}
3693
Maarten Lankhorst73974892016-08-05 23:28:27 +03003694static int
3695__intel_display_resume(struct drm_device *dev,
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003696 struct drm_atomic_state *state,
3697 struct drm_modeset_acquire_ctx *ctx)
Maarten Lankhorst73974892016-08-05 23:28:27 +03003698{
3699 struct drm_crtc_state *crtc_state;
3700 struct drm_crtc *crtc;
3701 int i, ret;
3702
Ville Syrjäläaecd36b2017-06-01 17:36:13 +03003703 intel_modeset_setup_hw_state(dev, ctx);
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +00003704 i915_redisable_vga(to_i915(dev));
Maarten Lankhorst73974892016-08-05 23:28:27 +03003705
3706 if (!state)
3707 return 0;
3708
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01003709 /*
3710 * We've duplicated the state, pointers to the old state are invalid.
3711 *
3712 * Don't attempt to use the old state until we commit the duplicated state.
3713 */
3714 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst73974892016-08-05 23:28:27 +03003715 /*
3716 * Force recalculation even if we restore
3717 * current state. With fast modeset this may not result
3718 * in a modeset when the state is compatible.
3719 */
3720 crtc_state->mode_changed = true;
3721 }
3722
3723 /* ignore any reset values/BIOS leftovers in the WM registers */
Ville Syrjälä602ae832017-03-02 19:15:02 +02003724 if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3725 to_intel_atomic_state(state)->skip_intermediate_wm = true;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003726
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003727 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003728
3729 WARN_ON(ret == -EDEADLK);
3730 return ret;
3731}
3732
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003733static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3734{
Ville Syrjäläae981042016-08-05 23:28:30 +03003735 return intel_has_gpu_reset(dev_priv) &&
3736 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003737}
3738
Chris Wilsonc0336662016-05-06 15:40:21 +01003739void intel_prepare_reset(struct drm_i915_private *dev_priv)
Ville Syrjälä75147472014-11-24 18:28:11 +02003740{
Maarten Lankhorst73974892016-08-05 23:28:27 +03003741 struct drm_device *dev = &dev_priv->drm;
3742 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3743 struct drm_atomic_state *state;
3744 int ret;
3745
Daniel Vetterce87ea12017-07-19 14:54:55 +02003746 /* reset doesn't touch the display */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00003747 if (!i915_modparams.force_reset_modeset_test &&
Daniel Vetterce87ea12017-07-19 14:54:55 +02003748 !gpu_reset_clobbers_display(dev_priv))
3749 return;
3750
Daniel Vetter9db529a2017-08-08 10:08:28 +02003751 /* We have a modeset vs reset deadlock, defensively unbreak it. */
3752 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3753 wake_up_all(&dev_priv->gpu_error.wait_queue);
3754
3755 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3756 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3757 i915_gem_set_wedged(dev_priv);
3758 }
Daniel Vetter97154ec2017-08-08 10:08:26 +02003759
Maarten Lankhorst73974892016-08-05 23:28:27 +03003760 /*
3761 * Need mode_config.mutex so that we don't
3762 * trample ongoing ->detect() and whatnot.
3763 */
3764 mutex_lock(&dev->mode_config.mutex);
3765 drm_modeset_acquire_init(ctx, 0);
3766 while (1) {
3767 ret = drm_modeset_lock_all_ctx(dev, ctx);
3768 if (ret != -EDEADLK)
3769 break;
3770
3771 drm_modeset_backoff(ctx);
3772 }
Ville Syrjäläf98ce922014-11-21 21:54:30 +02003773 /*
3774 * Disabling the crtcs gracefully seems nicer. Also the
3775 * g33 docs say we should at least disable all the planes.
3776 */
Maarten Lankhorst73974892016-08-05 23:28:27 +03003777 state = drm_atomic_helper_duplicate_state(dev, ctx);
3778 if (IS_ERR(state)) {
3779 ret = PTR_ERR(state);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003780 DRM_ERROR("Duplicating state failed with %i\n", ret);
Ander Conselvan de Oliveira1e5a15d2017-01-18 14:34:28 +02003781 return;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003782 }
3783
3784 ret = drm_atomic_helper_disable_all(dev, ctx);
3785 if (ret) {
3786 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
Ander Conselvan de Oliveira1e5a15d2017-01-18 14:34:28 +02003787 drm_atomic_state_put(state);
3788 return;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003789 }
3790
3791 dev_priv->modeset_restore_state = state;
3792 state->acquire_ctx = ctx;
Ville Syrjälä75147472014-11-24 18:28:11 +02003793}
3794
Chris Wilsonc0336662016-05-06 15:40:21 +01003795void intel_finish_reset(struct drm_i915_private *dev_priv)
Ville Syrjälä75147472014-11-24 18:28:11 +02003796{
Maarten Lankhorst73974892016-08-05 23:28:27 +03003797 struct drm_device *dev = &dev_priv->drm;
3798 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
Chris Wilson40da1d32018-04-05 13:37:14 +01003799 struct drm_atomic_state *state;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003800 int ret;
3801
Daniel Vetterce87ea12017-07-19 14:54:55 +02003802 /* reset doesn't touch the display */
Chris Wilson40da1d32018-04-05 13:37:14 +01003803 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
Daniel Vetterce87ea12017-07-19 14:54:55 +02003804 return;
3805
Chris Wilson40da1d32018-04-05 13:37:14 +01003806 state = fetch_and_zero(&dev_priv->modeset_restore_state);
Daniel Vetterce87ea12017-07-19 14:54:55 +02003807 if (!state)
3808 goto unlock;
3809
Ville Syrjälä75147472014-11-24 18:28:11 +02003810 /* reset doesn't touch the display */
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003811 if (!gpu_reset_clobbers_display(dev_priv)) {
Daniel Vetterce87ea12017-07-19 14:54:55 +02003812 /* for testing only restore the display */
3813 ret = __intel_display_resume(dev, state, ctx);
Chris Wilson942d5d02017-08-28 11:46:04 +01003814 if (ret)
3815 DRM_ERROR("Restoring old state failed with %i\n", ret);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003816 } else {
3817 /*
3818 * The display has been reset as well,
3819 * so need a full re-initialization.
3820 */
3821 intel_runtime_pm_disable_interrupts(dev_priv);
3822 intel_runtime_pm_enable_interrupts(dev_priv);
3823
Imre Deak51f59202016-09-14 13:04:13 +03003824 intel_pps_unlock_regs_wa(dev_priv);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003825 intel_modeset_init_hw(dev);
Ville Syrjäläf72b84c2017-11-08 15:35:55 +02003826 intel_init_clock_gating(dev_priv);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003827
3828 spin_lock_irq(&dev_priv->irq_lock);
3829 if (dev_priv->display.hpd_irq_setup)
3830 dev_priv->display.hpd_irq_setup(dev_priv);
3831 spin_unlock_irq(&dev_priv->irq_lock);
3832
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003833 ret = __intel_display_resume(dev, state, ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003834 if (ret)
3835 DRM_ERROR("Restoring old state failed with %i\n", ret);
3836
3837 intel_hpd_init(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02003838 }
3839
Daniel Vetterce87ea12017-07-19 14:54:55 +02003840 drm_atomic_state_put(state);
3841unlock:
Maarten Lankhorst73974892016-08-05 23:28:27 +03003842 drm_modeset_drop_locks(ctx);
3843 drm_modeset_acquire_fini(ctx);
3844 mutex_unlock(&dev->mode_config.mutex);
Daniel Vetter9db529a2017-08-08 10:08:28 +02003845
3846 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
Ville Syrjälä75147472014-11-24 18:28:11 +02003847}
3848
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003849static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3850 const struct intel_crtc_state *new_crtc_state)
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003851{
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003852 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003853 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003854
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003855 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003856 crtc->base.mode = new_crtc_state->base.mode;
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003857
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003858 /*
3859 * Update pipe size and adjust fitter if needed: the reason for this is
3860 * that in compute_mode_changes we check the native mode (not the pfit
3861 * mode) to see if we can flip rather than do a full mode set. In the
3862 * fastboot case, we'll flip, but if we don't update the pipesrc and
3863 * pfit state, we'll end up with a big fb scanned out into the wrong
3864 * sized surface.
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003865 */
3866
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003867 I915_WRITE(PIPESRC(crtc->pipe),
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003868 ((new_crtc_state->pipe_src_w - 1) << 16) |
3869 (new_crtc_state->pipe_src_h - 1));
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003870
3871 /* on skylake this is done by detaching scalers */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003872 if (INTEL_GEN(dev_priv) >= 9) {
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003873 skl_detach_scalers(crtc);
3874
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003875 if (new_crtc_state->pch_pfit.enabled)
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003876 skylake_pfit_enable(crtc);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003877 } else if (HAS_PCH_SPLIT(dev_priv)) {
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003878 if (new_crtc_state->pch_pfit.enabled)
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003879 ironlake_pfit_enable(crtc);
3880 else if (old_crtc_state->pch_pfit.enabled)
3881 ironlake_pfit_disable(crtc, true);
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003882 }
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003883}
3884
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003885static void intel_fdi_normal_train(struct intel_crtc *crtc)
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003886{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003887 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003888 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003889 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003890 i915_reg_t reg;
3891 u32 temp;
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003892
3893 /* enable normal train */
3894 reg = FDI_TX_CTL(pipe);
3895 temp = I915_READ(reg);
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01003896 if (IS_IVYBRIDGE(dev_priv)) {
Jesse Barnes357555c2011-04-28 15:09:55 -07003897 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3898 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
Keith Packard61e499b2011-05-17 16:13:52 -07003899 } else {
3900 temp &= ~FDI_LINK_TRAIN_NONE;
3901 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
Jesse Barnes357555c2011-04-28 15:09:55 -07003902 }
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003903 I915_WRITE(reg, temp);
3904
3905 reg = FDI_RX_CTL(pipe);
3906 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003907 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003908 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3909 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3910 } else {
3911 temp &= ~FDI_LINK_TRAIN_NONE;
3912 temp |= FDI_LINK_TRAIN_NONE;
3913 }
3914 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3915
3916 /* wait one idle pattern time */
3917 POSTING_READ(reg);
3918 udelay(1000);
Jesse Barnes357555c2011-04-28 15:09:55 -07003919
3920 /* IVB wants error correction enabled */
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01003921 if (IS_IVYBRIDGE(dev_priv))
Jesse Barnes357555c2011-04-28 15:09:55 -07003922 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3923 FDI_FE_ERRC_ENABLE);
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003924}
3925
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003926/* The FDI link training functions for ILK/Ibexpeak. */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02003927static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3928 const struct intel_crtc_state *crtc_state)
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003929{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003930 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003931 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003932 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003933 i915_reg_t reg;
3934 u32 temp, tries;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003935
Ville Syrjälä1c8562f2014-04-25 22:12:07 +03003936 /* FDI needs bits from pipe first */
Jesse Barnes0fc932b2011-01-04 15:09:37 -08003937 assert_pipe_enabled(dev_priv, pipe);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08003938
Adam Jacksone1a44742010-06-25 15:32:14 -04003939 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3940 for train result */
Chris Wilson5eddb702010-09-11 13:48:45 +01003941 reg = FDI_RX_IMR(pipe);
3942 temp = I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04003943 temp &= ~FDI_RX_SYMBOL_LOCK;
3944 temp &= ~FDI_RX_BIT_LOCK;
Chris Wilson5eddb702010-09-11 13:48:45 +01003945 I915_WRITE(reg, temp);
3946 I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04003947 udelay(150);
3948
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003949 /* enable CPU FDI TX and PCH FDI RX */
Chris Wilson5eddb702010-09-11 13:48:45 +01003950 reg = FDI_TX_CTL(pipe);
3951 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02003952 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02003953 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003954 temp &= ~FDI_LINK_TRAIN_NONE;
3955 temp |= FDI_LINK_TRAIN_PATTERN_1;
Chris Wilson5eddb702010-09-11 13:48:45 +01003956 I915_WRITE(reg, temp | FDI_TX_ENABLE);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003957
Chris Wilson5eddb702010-09-11 13:48:45 +01003958 reg = FDI_RX_CTL(pipe);
3959 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003960 temp &= ~FDI_LINK_TRAIN_NONE;
3961 temp |= FDI_LINK_TRAIN_PATTERN_1;
Chris Wilson5eddb702010-09-11 13:48:45 +01003962 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3963
3964 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003965 udelay(150);
3966
Jesse Barnes5b2adf82010-10-07 16:01:15 -07003967 /* Ironlake workaround, enable clock pointer after FDI enable*/
Daniel Vetter8f5718a2012-10-31 22:52:28 +01003968 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3969 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3970 FDI_RX_PHASE_SYNC_POINTER_EN);
Jesse Barnes5b2adf82010-10-07 16:01:15 -07003971
Chris Wilson5eddb702010-09-11 13:48:45 +01003972 reg = FDI_RX_IIR(pipe);
Adam Jacksone1a44742010-06-25 15:32:14 -04003973 for (tries = 0; tries < 5; tries++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01003974 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003975 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3976
3977 if ((temp & FDI_RX_BIT_LOCK)) {
3978 DRM_DEBUG_KMS("FDI train 1 done.\n");
Chris Wilson5eddb702010-09-11 13:48:45 +01003979 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003980 break;
3981 }
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003982 }
Adam Jacksone1a44742010-06-25 15:32:14 -04003983 if (tries == 5)
Chris Wilson5eddb702010-09-11 13:48:45 +01003984 DRM_ERROR("FDI train 1 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003985
3986 /* Train 2 */
Chris Wilson5eddb702010-09-11 13:48:45 +01003987 reg = FDI_TX_CTL(pipe);
3988 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003989 temp &= ~FDI_LINK_TRAIN_NONE;
3990 temp |= FDI_LINK_TRAIN_PATTERN_2;
Chris Wilson5eddb702010-09-11 13:48:45 +01003991 I915_WRITE(reg, temp);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003992
Chris Wilson5eddb702010-09-11 13:48:45 +01003993 reg = FDI_RX_CTL(pipe);
3994 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003995 temp &= ~FDI_LINK_TRAIN_NONE;
3996 temp |= FDI_LINK_TRAIN_PATTERN_2;
Chris Wilson5eddb702010-09-11 13:48:45 +01003997 I915_WRITE(reg, temp);
3998
3999 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004000 udelay(150);
4001
Chris Wilson5eddb702010-09-11 13:48:45 +01004002 reg = FDI_RX_IIR(pipe);
Adam Jacksone1a44742010-06-25 15:32:14 -04004003 for (tries = 0; tries < 5; tries++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004004 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004005 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4006
4007 if (temp & FDI_RX_SYMBOL_LOCK) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004008 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004009 DRM_DEBUG_KMS("FDI train 2 done.\n");
4010 break;
4011 }
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004012 }
Adam Jacksone1a44742010-06-25 15:32:14 -04004013 if (tries == 5)
Chris Wilson5eddb702010-09-11 13:48:45 +01004014 DRM_ERROR("FDI train 2 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004015
4016 DRM_DEBUG_KMS("FDI train done\n");
Jesse Barnes5c5313c2010-10-07 16:01:11 -07004017
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004018}
4019
Akshay Joshi0206e352011-08-16 15:34:10 -04004020static const int snb_b_fdi_train_param[] = {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004021 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4022 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4023 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4024 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4025};
4026
4027/* The FDI link training functions for SNB/Cougarpoint. */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004028static void gen6_fdi_link_train(struct intel_crtc *crtc,
4029 const struct intel_crtc_state *crtc_state)
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004030{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004031 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004032 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004033 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004034 i915_reg_t reg;
4035 u32 temp, i, retry;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004036
Adam Jacksone1a44742010-06-25 15:32:14 -04004037 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4038 for train result */
Chris Wilson5eddb702010-09-11 13:48:45 +01004039 reg = FDI_RX_IMR(pipe);
4040 temp = I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04004041 temp &= ~FDI_RX_SYMBOL_LOCK;
4042 temp &= ~FDI_RX_BIT_LOCK;
Chris Wilson5eddb702010-09-11 13:48:45 +01004043 I915_WRITE(reg, temp);
4044
4045 POSTING_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04004046 udelay(150);
4047
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004048 /* enable CPU FDI TX and PCH FDI RX */
Chris Wilson5eddb702010-09-11 13:48:45 +01004049 reg = FDI_TX_CTL(pipe);
4050 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02004051 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004052 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004053 temp &= ~FDI_LINK_TRAIN_NONE;
4054 temp |= FDI_LINK_TRAIN_PATTERN_1;
4055 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4056 /* SNB-B */
4057 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
Chris Wilson5eddb702010-09-11 13:48:45 +01004058 I915_WRITE(reg, temp | FDI_TX_ENABLE);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004059
Daniel Vetterd74cf322012-10-26 10:58:13 +02004060 I915_WRITE(FDI_RX_MISC(pipe),
4061 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4062
Chris Wilson5eddb702010-09-11 13:48:45 +01004063 reg = FDI_RX_CTL(pipe);
4064 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004065 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004066 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4067 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4068 } else {
4069 temp &= ~FDI_LINK_TRAIN_NONE;
4070 temp |= FDI_LINK_TRAIN_PATTERN_1;
4071 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004072 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4073
4074 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004075 udelay(150);
4076
Akshay Joshi0206e352011-08-16 15:34:10 -04004077 for (i = 0; i < 4; i++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004078 reg = FDI_TX_CTL(pipe);
4079 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004080 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4081 temp |= snb_b_fdi_train_param[i];
Chris Wilson5eddb702010-09-11 13:48:45 +01004082 I915_WRITE(reg, temp);
4083
4084 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004085 udelay(500);
4086
Sean Paulfa37d392012-03-02 12:53:39 -05004087 for (retry = 0; retry < 5; retry++) {
4088 reg = FDI_RX_IIR(pipe);
4089 temp = I915_READ(reg);
4090 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4091 if (temp & FDI_RX_BIT_LOCK) {
4092 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4093 DRM_DEBUG_KMS("FDI train 1 done.\n");
4094 break;
4095 }
4096 udelay(50);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004097 }
Sean Paulfa37d392012-03-02 12:53:39 -05004098 if (retry < 5)
4099 break;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004100 }
4101 if (i == 4)
Chris Wilson5eddb702010-09-11 13:48:45 +01004102 DRM_ERROR("FDI train 1 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004103
4104 /* Train 2 */
Chris Wilson5eddb702010-09-11 13:48:45 +01004105 reg = FDI_TX_CTL(pipe);
4106 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004107 temp &= ~FDI_LINK_TRAIN_NONE;
4108 temp |= FDI_LINK_TRAIN_PATTERN_2;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004109 if (IS_GEN6(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004110 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4111 /* SNB-B */
4112 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4113 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004114 I915_WRITE(reg, temp);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004115
Chris Wilson5eddb702010-09-11 13:48:45 +01004116 reg = FDI_RX_CTL(pipe);
4117 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004118 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004119 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4120 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4121 } else {
4122 temp &= ~FDI_LINK_TRAIN_NONE;
4123 temp |= FDI_LINK_TRAIN_PATTERN_2;
4124 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004125 I915_WRITE(reg, temp);
4126
4127 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004128 udelay(150);
4129
Akshay Joshi0206e352011-08-16 15:34:10 -04004130 for (i = 0; i < 4; i++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004131 reg = FDI_TX_CTL(pipe);
4132 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004133 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4134 temp |= snb_b_fdi_train_param[i];
Chris Wilson5eddb702010-09-11 13:48:45 +01004135 I915_WRITE(reg, temp);
4136
4137 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004138 udelay(500);
4139
Sean Paulfa37d392012-03-02 12:53:39 -05004140 for (retry = 0; retry < 5; retry++) {
4141 reg = FDI_RX_IIR(pipe);
4142 temp = I915_READ(reg);
4143 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4144 if (temp & FDI_RX_SYMBOL_LOCK) {
4145 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4146 DRM_DEBUG_KMS("FDI train 2 done.\n");
4147 break;
4148 }
4149 udelay(50);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004150 }
Sean Paulfa37d392012-03-02 12:53:39 -05004151 if (retry < 5)
4152 break;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004153 }
4154 if (i == 4)
Chris Wilson5eddb702010-09-11 13:48:45 +01004155 DRM_ERROR("FDI train 2 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004156
4157 DRM_DEBUG_KMS("FDI train done.\n");
4158}
4159
Jesse Barnes357555c2011-04-28 15:09:55 -07004160/* Manual link training for Ivy Bridge A0 parts */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004161static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4162 const struct intel_crtc_state *crtc_state)
Jesse Barnes357555c2011-04-28 15:09:55 -07004163{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004164 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004165 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004166 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004167 i915_reg_t reg;
4168 u32 temp, i, j;
Jesse Barnes357555c2011-04-28 15:09:55 -07004169
4170 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4171 for train result */
4172 reg = FDI_RX_IMR(pipe);
4173 temp = I915_READ(reg);
4174 temp &= ~FDI_RX_SYMBOL_LOCK;
4175 temp &= ~FDI_RX_BIT_LOCK;
4176 I915_WRITE(reg, temp);
4177
4178 POSTING_READ(reg);
4179 udelay(150);
4180
Daniel Vetter01a415f2012-10-27 15:58:40 +02004181 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4182 I915_READ(FDI_RX_IIR(pipe)));
4183
Jesse Barnes139ccd32013-08-19 11:04:55 -07004184 /* Try each vswing and preemphasis setting twice before moving on */
4185 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4186 /* disable first in case we need to retry */
Jesse Barnes357555c2011-04-28 15:09:55 -07004187 reg = FDI_TX_CTL(pipe);
4188 temp = I915_READ(reg);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004189 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4190 temp &= ~FDI_TX_ENABLE;
4191 I915_WRITE(reg, temp);
4192
4193 reg = FDI_RX_CTL(pipe);
4194 temp = I915_READ(reg);
4195 temp &= ~FDI_LINK_TRAIN_AUTO;
4196 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4197 temp &= ~FDI_RX_ENABLE;
4198 I915_WRITE(reg, temp);
4199
4200 /* enable CPU FDI TX and PCH FDI RX */
4201 reg = FDI_TX_CTL(pipe);
4202 temp = I915_READ(reg);
4203 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004204 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004205 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
Jesse Barnes357555c2011-04-28 15:09:55 -07004206 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
Jesse Barnes139ccd32013-08-19 11:04:55 -07004207 temp |= snb_b_fdi_train_param[j/2];
4208 temp |= FDI_COMPOSITE_SYNC;
4209 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4210
4211 I915_WRITE(FDI_RX_MISC(pipe),
4212 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4213
4214 reg = FDI_RX_CTL(pipe);
4215 temp = I915_READ(reg);
4216 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4217 temp |= FDI_COMPOSITE_SYNC;
4218 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4219
4220 POSTING_READ(reg);
4221 udelay(1); /* should be 0.5us */
4222
4223 for (i = 0; i < 4; i++) {
4224 reg = FDI_RX_IIR(pipe);
4225 temp = I915_READ(reg);
4226 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4227
4228 if (temp & FDI_RX_BIT_LOCK ||
4229 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4230 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4231 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4232 i);
4233 break;
4234 }
4235 udelay(1); /* should be 0.5us */
4236 }
4237 if (i == 4) {
4238 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4239 continue;
4240 }
4241
4242 /* Train 2 */
4243 reg = FDI_TX_CTL(pipe);
4244 temp = I915_READ(reg);
4245 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4246 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4247 I915_WRITE(reg, temp);
4248
4249 reg = FDI_RX_CTL(pipe);
4250 temp = I915_READ(reg);
4251 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4252 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
Jesse Barnes357555c2011-04-28 15:09:55 -07004253 I915_WRITE(reg, temp);
4254
4255 POSTING_READ(reg);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004256 udelay(2); /* should be 1.5us */
Jesse Barnes357555c2011-04-28 15:09:55 -07004257
Jesse Barnes139ccd32013-08-19 11:04:55 -07004258 for (i = 0; i < 4; i++) {
4259 reg = FDI_RX_IIR(pipe);
4260 temp = I915_READ(reg);
4261 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
Jesse Barnes357555c2011-04-28 15:09:55 -07004262
Jesse Barnes139ccd32013-08-19 11:04:55 -07004263 if (temp & FDI_RX_SYMBOL_LOCK ||
4264 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4265 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4266 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4267 i);
4268 goto train_done;
4269 }
4270 udelay(2); /* should be 1.5us */
Jesse Barnes357555c2011-04-28 15:09:55 -07004271 }
Jesse Barnes139ccd32013-08-19 11:04:55 -07004272 if (i == 4)
4273 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
Jesse Barnes357555c2011-04-28 15:09:55 -07004274 }
Jesse Barnes357555c2011-04-28 15:09:55 -07004275
Jesse Barnes139ccd32013-08-19 11:04:55 -07004276train_done:
Jesse Barnes357555c2011-04-28 15:09:55 -07004277 DRM_DEBUG_KMS("FDI train done.\n");
4278}
4279
Daniel Vetter88cefb62012-08-12 19:27:14 +02004280static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
Jesse Barnes0e23b992010-09-10 11:10:00 -07004281{
Daniel Vetter88cefb62012-08-12 19:27:14 +02004282 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004283 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004284 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004285 i915_reg_t reg;
4286 u32 temp;
Jesse Barnesc64e3112010-09-10 11:27:03 -07004287
Jesse Barnes0e23b992010-09-10 11:10:00 -07004288 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
Chris Wilson5eddb702010-09-11 13:48:45 +01004289 reg = FDI_RX_CTL(pipe);
4290 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02004291 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02004292 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004293 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Chris Wilson5eddb702010-09-11 13:48:45 +01004294 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4295
4296 POSTING_READ(reg);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004297 udelay(200);
4298
4299 /* Switch from Rawclk to PCDclk */
Chris Wilson5eddb702010-09-11 13:48:45 +01004300 temp = I915_READ(reg);
4301 I915_WRITE(reg, temp | FDI_PCDCLK);
4302
4303 POSTING_READ(reg);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004304 udelay(200);
4305
Paulo Zanoni20749732012-11-23 15:30:38 -02004306 /* Enable CPU FDI TX PLL, always on for Ironlake */
4307 reg = FDI_TX_CTL(pipe);
4308 temp = I915_READ(reg);
4309 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4310 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
Chris Wilson5eddb702010-09-11 13:48:45 +01004311
Paulo Zanoni20749732012-11-23 15:30:38 -02004312 POSTING_READ(reg);
4313 udelay(100);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004314 }
4315}
4316
Daniel Vetter88cefb62012-08-12 19:27:14 +02004317static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4318{
4319 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004320 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter88cefb62012-08-12 19:27:14 +02004321 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004322 i915_reg_t reg;
4323 u32 temp;
Daniel Vetter88cefb62012-08-12 19:27:14 +02004324
4325 /* Switch from PCDclk to Rawclk */
4326 reg = FDI_RX_CTL(pipe);
4327 temp = I915_READ(reg);
4328 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4329
4330 /* Disable CPU FDI TX PLL */
4331 reg = FDI_TX_CTL(pipe);
4332 temp = I915_READ(reg);
4333 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4334
4335 POSTING_READ(reg);
4336 udelay(100);
4337
4338 reg = FDI_RX_CTL(pipe);
4339 temp = I915_READ(reg);
4340 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4341
4342 /* Wait for the clocks to turn off. */
4343 POSTING_READ(reg);
4344 udelay(100);
4345}
4346
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004347static void ironlake_fdi_disable(struct drm_crtc *crtc)
4348{
4349 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004350 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004351 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4352 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004353 i915_reg_t reg;
4354 u32 temp;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004355
4356 /* disable CPU FDI tx and PCH FDI rx */
4357 reg = FDI_TX_CTL(pipe);
4358 temp = I915_READ(reg);
4359 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4360 POSTING_READ(reg);
4361
4362 reg = FDI_RX_CTL(pipe);
4363 temp = I915_READ(reg);
4364 temp &= ~(0x7 << 16);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004365 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004366 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4367
4368 POSTING_READ(reg);
4369 udelay(100);
4370
4371 /* Ironlake workaround, disable clock pointer after downing FDI */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004372 if (HAS_PCH_IBX(dev_priv))
Jesse Barnes6f06ce12011-01-04 15:09:38 -08004373 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004374
4375 /* still set train pattern 1 */
4376 reg = FDI_TX_CTL(pipe);
4377 temp = I915_READ(reg);
4378 temp &= ~FDI_LINK_TRAIN_NONE;
4379 temp |= FDI_LINK_TRAIN_PATTERN_1;
4380 I915_WRITE(reg, temp);
4381
4382 reg = FDI_RX_CTL(pipe);
4383 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004384 if (HAS_PCH_CPT(dev_priv)) {
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004385 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4386 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4387 } else {
4388 temp &= ~FDI_LINK_TRAIN_NONE;
4389 temp |= FDI_LINK_TRAIN_PATTERN_1;
4390 }
4391 /* BPC in FDI rx is consistent with that in PIPECONF */
4392 temp &= ~(0x07 << 16);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004393 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004394 I915_WRITE(reg, temp);
4395
4396 POSTING_READ(reg);
4397 udelay(100);
4398}
4399
Chris Wilson49d73912016-11-29 09:50:08 +00004400bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
Chris Wilson5dce5b932014-01-20 10:17:36 +00004401{
Daniel Vetterfa058872017-07-20 19:57:52 +02004402 struct drm_crtc *crtc;
4403 bool cleanup_done;
Chris Wilson5dce5b932014-01-20 10:17:36 +00004404
Daniel Vetterfa058872017-07-20 19:57:52 +02004405 drm_for_each_crtc(crtc, &dev_priv->drm) {
4406 struct drm_crtc_commit *commit;
4407 spin_lock(&crtc->commit_lock);
4408 commit = list_first_entry_or_null(&crtc->commit_list,
4409 struct drm_crtc_commit, commit_entry);
4410 cleanup_done = commit ?
4411 try_wait_for_completion(&commit->cleanup_done) : true;
4412 spin_unlock(&crtc->commit_lock);
4413
4414 if (cleanup_done)
Chris Wilson5dce5b932014-01-20 10:17:36 +00004415 continue;
4416
Daniel Vetterfa058872017-07-20 19:57:52 +02004417 drm_crtc_wait_one_vblank(crtc);
Chris Wilson5dce5b932014-01-20 10:17:36 +00004418
4419 return true;
4420 }
4421
4422 return false;
4423}
4424
Maarten Lankhorstb7076542016-08-23 16:18:08 +02004425void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004426{
4427 u32 temp;
4428
4429 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4430
4431 mutex_lock(&dev_priv->sb_lock);
4432
4433 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4434 temp |= SBI_SSCCTL_DISABLE;
4435 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4436
4437 mutex_unlock(&dev_priv->sb_lock);
4438}
4439
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004440/* Program iCLKIP clock to the desired frequency */
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004441static void lpt_program_iclkip(struct intel_crtc *crtc)
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004442{
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004443 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4444 int clock = crtc->config->base.adjusted_mode.crtc_clock;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004445 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4446 u32 temp;
4447
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004448 lpt_disable_iclkip(dev_priv);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004449
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004450 /* The iCLK virtual clock root frequency is in MHz,
4451 * but the adjusted_mode->crtc_clock in in KHz. To get the
4452 * divisors, it is necessary to divide one by another, so we
4453 * convert the virtual clock precision to KHz here for higher
4454 * precision.
4455 */
4456 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004457 u32 iclk_virtual_root_freq = 172800 * 1000;
4458 u32 iclk_pi_range = 64;
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004459 u32 desired_divisor;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004460
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004461 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4462 clock << auxdiv);
4463 divsel = (desired_divisor / iclk_pi_range) - 2;
4464 phaseinc = desired_divisor % iclk_pi_range;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004465
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004466 /*
4467 * Near 20MHz is a corner case which is
4468 * out of range for the 7-bit divisor
4469 */
4470 if (divsel <= 0x7f)
4471 break;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004472 }
4473
4474 /* This should not happen with any sane values */
4475 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4476 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4477 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4478 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4479
4480 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
Ville Syrjälä12d7cee2013-09-04 18:25:19 +03004481 clock,
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004482 auxdiv,
4483 divsel,
4484 phasedir,
4485 phaseinc);
4486
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004487 mutex_lock(&dev_priv->sb_lock);
4488
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004489 /* Program SSCDIVINTPHASE6 */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004490 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004491 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4492 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4493 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4494 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4495 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4496 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004497 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004498
4499 /* Program SSCAUXDIV */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004500 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004501 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4502 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004503 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004504
4505 /* Enable modulator and associated divider */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004506 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004507 temp &= ~SBI_SSCCTL_DISABLE;
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004508 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004509
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004510 mutex_unlock(&dev_priv->sb_lock);
4511
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004512 /* Wait for initialization time */
4513 udelay(24);
4514
4515 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4516}
4517
Ville Syrjälä8802e5b2016-02-17 21:41:12 +02004518int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4519{
4520 u32 divsel, phaseinc, auxdiv;
4521 u32 iclk_virtual_root_freq = 172800 * 1000;
4522 u32 iclk_pi_range = 64;
4523 u32 desired_divisor;
4524 u32 temp;
4525
4526 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4527 return 0;
4528
4529 mutex_lock(&dev_priv->sb_lock);
4530
4531 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4532 if (temp & SBI_SSCCTL_DISABLE) {
4533 mutex_unlock(&dev_priv->sb_lock);
4534 return 0;
4535 }
4536
4537 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4538 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4539 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4540 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4541 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4542
4543 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4544 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4545 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4546
4547 mutex_unlock(&dev_priv->sb_lock);
4548
4549 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4550
4551 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4552 desired_divisor << auxdiv);
4553}
4554
Daniel Vetter275f01b22013-05-03 11:49:47 +02004555static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4556 enum pipe pch_transcoder)
4557{
4558 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004559 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02004560 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
Daniel Vetter275f01b22013-05-03 11:49:47 +02004561
4562 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4563 I915_READ(HTOTAL(cpu_transcoder)));
4564 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4565 I915_READ(HBLANK(cpu_transcoder)));
4566 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4567 I915_READ(HSYNC(cpu_transcoder)));
4568
4569 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4570 I915_READ(VTOTAL(cpu_transcoder)));
4571 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4572 I915_READ(VBLANK(cpu_transcoder)));
4573 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4574 I915_READ(VSYNC(cpu_transcoder)));
4575 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4576 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4577}
4578
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004579static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004580{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004581 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004582 uint32_t temp;
4583
4584 temp = I915_READ(SOUTH_CHICKEN1);
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004585 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004586 return;
4587
4588 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4589 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4590
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004591 temp &= ~FDI_BC_BIFURCATION_SELECT;
4592 if (enable)
4593 temp |= FDI_BC_BIFURCATION_SELECT;
4594
4595 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004596 I915_WRITE(SOUTH_CHICKEN1, temp);
4597 POSTING_READ(SOUTH_CHICKEN1);
4598}
4599
4600static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4601{
4602 struct drm_device *dev = intel_crtc->base.dev;
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004603
4604 switch (intel_crtc->pipe) {
4605 case PIPE_A:
4606 break;
4607 case PIPE_B:
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02004608 if (intel_crtc->config->fdi_lanes > 2)
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004609 cpt_set_fdi_bc_bifurcation(dev, false);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004610 else
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004611 cpt_set_fdi_bc_bifurcation(dev, true);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004612
4613 break;
4614 case PIPE_C:
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004615 cpt_set_fdi_bc_bifurcation(dev, true);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004616
4617 break;
4618 default:
4619 BUG();
4620 }
4621}
4622
Ville Syrjäläc48b5302015-11-04 23:19:56 +02004623/* Return which DP Port should be selected for Transcoder DP control */
4624static enum port
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004625intel_trans_dp_port_sel(struct intel_crtc *crtc)
Ville Syrjäläc48b5302015-11-04 23:19:56 +02004626{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004627 struct drm_device *dev = crtc->base.dev;
Ville Syrjäläc48b5302015-11-04 23:19:56 +02004628 struct intel_encoder *encoder;
4629
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004630 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
Ville Syrjäläcca05022016-06-22 21:57:06 +03004631 if (encoder->type == INTEL_OUTPUT_DP ||
Ville Syrjäläc48b5302015-11-04 23:19:56 +02004632 encoder->type == INTEL_OUTPUT_EDP)
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02004633 return encoder->port;
Ville Syrjäläc48b5302015-11-04 23:19:56 +02004634 }
4635
4636 return -1;
4637}
4638
Jesse Barnesf67a5592011-01-05 10:31:48 -08004639/*
4640 * Enable PCH resources required for PCH ports:
4641 * - PCH PLLs
4642 * - FDI training & RX/TX
4643 * - update transcoder timings
4644 * - DP transcoding bits
4645 * - transcoder
4646 */
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004647static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08004648{
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004649 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004650 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004651 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004652 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004653 u32 temp;
Jesse Barnes6be4a602010-09-10 10:26:01 -07004654
Daniel Vetterab9412b2013-05-03 11:49:46 +02004655 assert_pch_transcoder_disabled(dev_priv, pipe);
Chris Wilsone7e164d2012-05-11 09:21:25 +01004656
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004657 if (IS_IVYBRIDGE(dev_priv))
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004658 ivybridge_update_fdi_bc_bifurcation(crtc);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004659
Daniel Vettercd986ab2012-10-26 10:58:12 +02004660 /* Write the TU size bits before fdi link training, so that error
4661 * detection works. */
4662 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4663 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4664
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004665 /* For PCH output, training FDI link */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004666 dev_priv->display.fdi_link_train(crtc, crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07004667
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004668 /* We need to program the right clock selection before writing the pixel
4669 * mutliplier into the DPLL. */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004670 if (HAS_PCH_CPT(dev_priv)) {
Jesse Barnesee7b9f92012-04-20 17:11:53 +01004671 u32 sel;
Jesse Barnes4b645f12011-10-12 09:51:31 -07004672
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004673 temp = I915_READ(PCH_DPLL_SEL);
Daniel Vetter11887392013-06-05 13:34:09 +02004674 temp |= TRANS_DPLL_ENABLE(pipe);
4675 sel = TRANS_DPLLB_SEL(pipe);
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004676 if (crtc_state->shared_dpll ==
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02004677 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
Jesse Barnesee7b9f92012-04-20 17:11:53 +01004678 temp |= sel;
4679 else
4680 temp &= ~sel;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004681 I915_WRITE(PCH_DPLL_SEL, temp);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004682 }
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004683
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004684 /* XXX: pch pll's can be enabled any time before we enable the PCH
4685 * transcoder, and we actually should do this to not upset any PCH
4686 * transcoder that already use the clock when we share it.
4687 *
4688 * Note that enable_shared_dpll tries to do the right thing, but
4689 * get_shared_dpll unconditionally resets the pll - we need that to have
4690 * the right LVDS enable sequence. */
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004691 intel_enable_shared_dpll(crtc);
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004692
Jesse Barnesd9b6cb52011-01-04 15:09:35 -08004693 /* set transcoder timing, panel must allow it */
4694 assert_panel_unlocked(dev_priv, pipe);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004695 ironlake_pch_transcoder_set_timings(crtc, pipe);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004696
Paulo Zanoni303b81e2012-10-31 18:12:23 -02004697 intel_fdi_normal_train(crtc);
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08004698
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004699 /* For PCH DP, enable TRANS_DP_CTL */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004700 if (HAS_PCH_CPT(dev_priv) &&
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004701 intel_crtc_has_dp_encoder(crtc_state)) {
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004702 const struct drm_display_mode *adjusted_mode =
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004703 &crtc_state->base.adjusted_mode;
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004704 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004705 i915_reg_t reg = TRANS_DP_CTL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +01004706 temp = I915_READ(reg);
4707 temp &= ~(TRANS_DP_PORT_SEL_MASK |
Eric Anholt220cad32010-11-18 09:32:58 +08004708 TRANS_DP_SYNC_MASK |
4709 TRANS_DP_BPC_MASK);
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03004710 temp |= TRANS_DP_OUTPUT_ENABLE;
Jesse Barnes9325c9f2011-06-24 12:19:21 -07004711 temp |= bpc << 9; /* same format but at 11:9 */
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004712
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004713 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
Chris Wilson5eddb702010-09-11 13:48:45 +01004714 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004715 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
Chris Wilson5eddb702010-09-11 13:48:45 +01004716 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004717
4718 switch (intel_trans_dp_port_sel(crtc)) {
Ville Syrjäläc48b5302015-11-04 23:19:56 +02004719 case PORT_B:
Chris Wilson5eddb702010-09-11 13:48:45 +01004720 temp |= TRANS_DP_PORT_SEL_B;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004721 break;
Ville Syrjäläc48b5302015-11-04 23:19:56 +02004722 case PORT_C:
Chris Wilson5eddb702010-09-11 13:48:45 +01004723 temp |= TRANS_DP_PORT_SEL_C;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004724 break;
Ville Syrjäläc48b5302015-11-04 23:19:56 +02004725 case PORT_D:
Chris Wilson5eddb702010-09-11 13:48:45 +01004726 temp |= TRANS_DP_PORT_SEL_D;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004727 break;
4728 default:
Daniel Vettere95d41e2012-10-26 10:58:16 +02004729 BUG();
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004730 }
4731
Chris Wilson5eddb702010-09-11 13:48:45 +01004732 I915_WRITE(reg, temp);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004733 }
4734
Paulo Zanonib8a4f402012-10-31 18:12:42 -02004735 ironlake_enable_pch_transcoder(dev_priv, pipe);
Jesse Barnesf67a5592011-01-05 10:31:48 -08004736}
4737
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004738static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004739{
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004740 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004741 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004742 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004743
Matthias Kaehlckea2196032017-07-17 11:14:03 -07004744 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004745
Paulo Zanoni8c52b5e2012-10-31 18:12:24 -02004746 lpt_program_iclkip(crtc);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004747
Paulo Zanoni0540e482012-10-31 18:12:40 -02004748 /* Set transcoder timing. */
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004749 ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004750
Paulo Zanoni937bb612012-10-31 18:12:47 -02004751 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
Jesse Barnesf67a5592011-01-05 10:31:48 -08004752}
4753
Daniel Vettera1520312013-05-03 11:49:50 +02004754static void cpt_verify_modeset(struct drm_device *dev, int pipe)
Jesse Barnesd4270e52011-10-11 10:43:02 -07004755{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004756 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004757 i915_reg_t dslreg = PIPEDSL(pipe);
Jesse Barnesd4270e52011-10-11 10:43:02 -07004758 u32 temp;
4759
4760 temp = I915_READ(dslreg);
4761 udelay(500);
4762 if (wait_for(I915_READ(dslreg) != temp, 5)) {
Jesse Barnesd4270e52011-10-11 10:43:02 -07004763 if (wait_for(I915_READ(dslreg) != temp, 5))
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03004764 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
Jesse Barnesd4270e52011-10-11 10:43:02 -07004765 }
4766}
4767
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004768static int
4769skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
Ville Syrjäläd96a7d22017-03-31 21:00:54 +03004770 unsigned int scaler_user, int *scaler_id,
Chandra Konduru77224cd2018-04-09 09:11:13 +05304771 int src_w, int src_h, int dst_w, int dst_h,
4772 bool plane_scaler_check,
4773 uint32_t pixel_format)
Chandra Kondurua1b22782015-04-07 15:28:45 -07004774{
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004775 struct intel_crtc_scaler_state *scaler_state =
4776 &crtc_state->scaler_state;
4777 struct intel_crtc *intel_crtc =
4778 to_intel_crtc(crtc_state->base.crtc);
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304779 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4780 const struct drm_display_mode *adjusted_mode =
4781 &crtc_state->base.adjusted_mode;
Chandra Kondurua1b22782015-04-07 15:28:45 -07004782 int need_scaling;
Chandra Konduru6156a452015-04-27 13:48:39 -07004783
Ville Syrjäläd96a7d22017-03-31 21:00:54 +03004784 /*
4785 * Src coordinates are already rotated by 270 degrees for
4786 * the 90/270 degree plane rotation cases (to match the
4787 * GTT mapping), hence no need to account for rotation here.
4788 */
4789 need_scaling = src_w != dst_w || src_h != dst_h;
Chandra Kondurua1b22782015-04-07 15:28:45 -07004790
Chandra Konduru77224cd2018-04-09 09:11:13 +05304791 if (plane_scaler_check)
4792 if (pixel_format == DRM_FORMAT_NV12)
4793 need_scaling = true;
4794
Shashank Sharmae5c05932017-07-21 20:55:05 +05304795 if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
4796 need_scaling = true;
4797
Chandra Kondurua1b22782015-04-07 15:28:45 -07004798 /*
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304799 * Scaling/fitting not supported in IF-ID mode in GEN9+
4800 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4801 * Once NV12 is enabled, handle it here while allocating scaler
4802 * for NV12.
4803 */
4804 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4805 need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4806 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4807 return -EINVAL;
4808 }
4809
4810 /*
Chandra Kondurua1b22782015-04-07 15:28:45 -07004811 * if plane is being disabled or scaler is no more required or force detach
4812 * - free scaler binded to this plane/crtc
4813 * - in order to do this, update crtc->scaler_usage
4814 *
4815 * Here scaler state in crtc_state is set free so that
4816 * scaler can be assigned to other user. Actual register
4817 * update to free the scaler is done in plane/panel-fit programming.
4818 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4819 */
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004820 if (force_detach || !need_scaling) {
Chandra Kondurua1b22782015-04-07 15:28:45 -07004821 if (*scaler_id >= 0) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004822 scaler_state->scaler_users &= ~(1 << scaler_user);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004823 scaler_state->scalers[*scaler_id].in_use = 0;
4824
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004825 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4826 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4827 intel_crtc->pipe, scaler_user, *scaler_id,
Chandra Kondurua1b22782015-04-07 15:28:45 -07004828 scaler_state->scaler_users);
4829 *scaler_id = -1;
4830 }
4831 return 0;
4832 }
4833
Chandra Konduru77224cd2018-04-09 09:11:13 +05304834 if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
Maarten Lankhorst5d794282018-05-12 03:03:14 +05304835 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
Chandra Konduru77224cd2018-04-09 09:11:13 +05304836 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4837 return -EINVAL;
4838 }
4839
Chandra Kondurua1b22782015-04-07 15:28:45 -07004840 /* range checks */
4841 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
Nabendu Maiti323301a2018-03-23 10:24:18 -07004842 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4843 (IS_GEN11(dev_priv) &&
4844 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4845 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
4846 (!IS_GEN11(dev_priv) &&
4847 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4848 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004849 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
Chandra Kondurua1b22782015-04-07 15:28:45 -07004850 "size is out of scaler range\n",
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004851 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004852 return -EINVAL;
4853 }
4854
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004855 /* mark this plane as a scaler user in crtc_state */
4856 scaler_state->scaler_users |= (1 << scaler_user);
4857 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4858 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4859 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4860 scaler_state->scaler_users);
4861
4862 return 0;
4863}
4864
4865/**
4866 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4867 *
4868 * @state: crtc's scaler state
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004869 *
4870 * Return
4871 * 0 - scaler_usage updated successfully
4872 * error - requested scaling cannot be supported or other error condition
4873 */
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004874int skl_update_scaler_crtc(struct intel_crtc_state *state)
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004875{
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03004876 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004877
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004878 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
Chandra Konduru77224cd2018-04-09 09:11:13 +05304879 &state->scaler_state.scaler_id,
4880 state->pipe_src_w, state->pipe_src_h,
4881 adjusted_mode->crtc_hdisplay,
4882 adjusted_mode->crtc_vdisplay, false, 0);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004883}
4884
4885/**
4886 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
Chris Wilsonc38c1452018-02-14 13:49:22 +00004887 * @crtc_state: crtc's scaler state
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004888 * @plane_state: atomic plane state to update
4889 *
4890 * Return
4891 * 0 - scaler_usage updated successfully
4892 * error - requested scaling cannot be supported or other error condition
4893 */
Maarten Lankhorstda20eab2015-06-15 12:33:44 +02004894static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4895 struct intel_plane_state *plane_state)
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004896{
4897
Maarten Lankhorstda20eab2015-06-15 12:33:44 +02004898 struct intel_plane *intel_plane =
4899 to_intel_plane(plane_state->base.plane);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004900 struct drm_framebuffer *fb = plane_state->base.fb;
4901 int ret;
4902
Ville Syrjälä936e71e2016-07-26 19:06:59 +03004903 bool force_detach = !fb || !plane_state->base.visible;
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004904
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004905 ret = skl_update_scaler(crtc_state, force_detach,
4906 drm_plane_index(&intel_plane->base),
4907 &plane_state->scaler_id,
Ville Syrjälä936e71e2016-07-26 19:06:59 +03004908 drm_rect_width(&plane_state->base.src) >> 16,
4909 drm_rect_height(&plane_state->base.src) >> 16,
4910 drm_rect_width(&plane_state->base.dst),
Chandra Konduru77224cd2018-04-09 09:11:13 +05304911 drm_rect_height(&plane_state->base.dst),
4912 fb ? true : false, fb ? fb->format->format : 0);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004913
4914 if (ret || plane_state->scaler_id < 0)
4915 return ret;
4916
Chandra Kondurua1b22782015-04-07 15:28:45 -07004917 /* check colorkey */
Ville Syrjälä6ec5bd32018-02-02 22:42:31 +02004918 if (plane_state->ckey.flags) {
Ville Syrjälä72660ce2016-05-27 20:59:20 +03004919 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4920 intel_plane->base.base.id,
4921 intel_plane->base.name);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004922 return -EINVAL;
4923 }
4924
4925 /* Check src format */
Ville Syrjälä438b74a2016-12-14 23:32:55 +02004926 switch (fb->format->format) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004927 case DRM_FORMAT_RGB565:
4928 case DRM_FORMAT_XBGR8888:
4929 case DRM_FORMAT_XRGB8888:
4930 case DRM_FORMAT_ABGR8888:
4931 case DRM_FORMAT_ARGB8888:
4932 case DRM_FORMAT_XRGB2101010:
4933 case DRM_FORMAT_XBGR2101010:
4934 case DRM_FORMAT_YUYV:
4935 case DRM_FORMAT_YVYU:
4936 case DRM_FORMAT_UYVY:
4937 case DRM_FORMAT_VYUY:
Chandra Konduru77224cd2018-04-09 09:11:13 +05304938 case DRM_FORMAT_NV12:
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004939 break;
4940 default:
Ville Syrjälä72660ce2016-05-27 20:59:20 +03004941 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4942 intel_plane->base.base.id, intel_plane->base.name,
Ville Syrjälä438b74a2016-12-14 23:32:55 +02004943 fb->base.id, fb->format->format);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004944 return -EINVAL;
Chandra Kondurua1b22782015-04-07 15:28:45 -07004945 }
4946
Chandra Kondurua1b22782015-04-07 15:28:45 -07004947 return 0;
4948}
4949
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004950static void skylake_scaler_disable(struct intel_crtc *crtc)
4951{
4952 int i;
4953
4954 for (i = 0; i < crtc->num_scalers; i++)
4955 skl_detach_scaler(crtc, i);
4956}
4957
4958static void skylake_pfit_enable(struct intel_crtc *crtc)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00004959{
4960 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004961 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00004962 int pipe = crtc->pipe;
Chandra Kondurua1b22782015-04-07 15:28:45 -07004963 struct intel_crtc_scaler_state *scaler_state =
4964 &crtc->config->scaler_state;
4965
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02004966 if (crtc->config->pch_pfit.enabled) {
Chandra Kondurua1b22782015-04-07 15:28:45 -07004967 int id;
4968
Ville Syrjäläc3f8ad52017-03-07 22:54:19 +02004969 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
Chandra Kondurua1b22782015-04-07 15:28:45 -07004970 return;
Chandra Kondurua1b22782015-04-07 15:28:45 -07004971
4972 id = scaler_state->scaler_id;
4973 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4974 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4975 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4976 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00004977 }
4978}
4979
Jesse Barnesb074cec2013-04-25 12:55:02 -07004980static void ironlake_pfit_enable(struct intel_crtc *crtc)
4981{
4982 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004983 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesb074cec2013-04-25 12:55:02 -07004984 int pipe = crtc->pipe;
4985
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02004986 if (crtc->config->pch_pfit.enabled) {
Jesse Barnesb074cec2013-04-25 12:55:02 -07004987 /* Force use of hard-coded filter coefficients
4988 * as some pre-programmed values are broken,
4989 * e.g. x201.
4990 */
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004991 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
Jesse Barnesb074cec2013-04-25 12:55:02 -07004992 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4993 PF_PIPE_SEL_IVB(pipe));
4994 else
4995 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02004996 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4997 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
Jesse Barnes040484a2011-01-03 12:14:26 -08004998 }
Jesse Barnesf67a5592011-01-05 10:31:48 -08004999}
5000
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005001void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005002{
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005003 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ville Syrjäläcea165c2014-04-15 21:41:35 +03005004 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005005 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005006
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005007 if (!crtc_state->ips_enabled)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005008 return;
5009
Maarten Lankhorst307e4492016-03-23 14:33:28 +01005010 /*
5011 * We can only enable IPS after we enable a plane and wait for a vblank
5012 * This function is called from post_plane_update, which is run after
5013 * a vblank wait.
5014 */
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005015 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02005016
Tvrtko Ursulin86527442016-10-13 11:03:00 +01005017 if (IS_BROADWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005018 mutex_lock(&dev_priv->pcu_lock);
Ville Syrjälä61843f02017-09-12 18:34:11 +03005019 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5020 IPS_ENABLE | IPS_PCODE_CONTROL));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005021 mutex_unlock(&dev_priv->pcu_lock);
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005022 /* Quoting Art Runyan: "its not safe to expect any particular
5023 * value in IPS_CTL bit 31 after enabling IPS through the
Jesse Barnese59150d2014-01-07 13:30:45 -08005024 * mailbox." Moreover, the mailbox may return a bogus state,
5025 * so we need to just enable it and continue on.
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005026 */
5027 } else {
5028 I915_WRITE(IPS_CTL, IPS_ENABLE);
5029 /* The bit only becomes 1 in the next vblank, so this wait here
5030 * is essentially intel_wait_for_vblank. If we don't have this
5031 * and don't wait for vblanks until the end of crtc_enable, then
5032 * the HW state readout code will complain that the expected
5033 * IPS_CTL value is not the one we read. */
Chris Wilson2ec9ba32016-06-30 15:33:01 +01005034 if (intel_wait_for_register(dev_priv,
5035 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5036 50))
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005037 DRM_ERROR("Timed out waiting for IPS enable\n");
5038 }
Paulo Zanonid77e4532013-09-24 13:52:55 -03005039}
5040
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005041void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005042{
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005043 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005044 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005045 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005046
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005047 if (!crtc_state->ips_enabled)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005048 return;
5049
Tvrtko Ursulin86527442016-10-13 11:03:00 +01005050 if (IS_BROADWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005051 mutex_lock(&dev_priv->pcu_lock);
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005052 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005053 mutex_unlock(&dev_priv->pcu_lock);
Ben Widawsky23d0b132014-04-10 14:32:41 -07005054 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
Chris Wilsonb85c1ec2016-06-30 15:33:02 +01005055 if (intel_wait_for_register(dev_priv,
5056 IPS_CTL, IPS_ENABLE, 0,
5057 42))
Ben Widawsky23d0b132014-04-10 14:32:41 -07005058 DRM_ERROR("Timed out waiting for IPS disable\n");
Jesse Barnese59150d2014-01-07 13:30:45 -08005059 } else {
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005060 I915_WRITE(IPS_CTL, 0);
Jesse Barnese59150d2014-01-07 13:30:45 -08005061 POSTING_READ(IPS_CTL);
5062 }
Paulo Zanonid77e4532013-09-24 13:52:55 -03005063
5064 /* We need to wait for a vblank before we can disable the plane. */
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005065 intel_wait_for_vblank(dev_priv, crtc->pipe);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005066}
5067
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005068static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005069{
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005070 if (intel_crtc->overlay) {
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005071 struct drm_device *dev = intel_crtc->base.dev;
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005072
5073 mutex_lock(&dev->struct_mutex);
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005074 (void) intel_overlay_switch_off(intel_crtc->overlay);
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005075 mutex_unlock(&dev->struct_mutex);
5076 }
5077
5078 /* Let userspace switch the overlay on again. In most cases userspace
5079 * has to recompute where to put it anyway.
5080 */
5081}
5082
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005083/**
5084 * intel_post_enable_primary - Perform operations after enabling primary plane
5085 * @crtc: the CRTC whose primary plane was just enabled
Chris Wilsonc38c1452018-02-14 13:49:22 +00005086 * @new_crtc_state: the enabling state
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005087 *
5088 * Performs potentially sleeping operations that must be done after the primary
5089 * plane is enabled, such as updating FBC and IPS. Note that this may be
5090 * called due to an explicit primary plane update, or due to an implicit
5091 * re-enable that is caused when a sprite plane is updated to no longer
5092 * completely hide the primary plane.
5093 */
5094static void
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005095intel_post_enable_primary(struct drm_crtc *crtc,
5096 const struct intel_crtc_state *new_crtc_state)
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005097{
5098 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005099 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5101 int pipe = intel_crtc->pipe;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005102
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005103 /*
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005104 * Gen2 reports pipe underruns whenever all planes are disabled.
5105 * So don't enable underrun reporting before at least some planes
5106 * are enabled.
5107 * FIXME: Need to fix the logic to work when we turn off all planes
5108 * but leave the pipe running.
Daniel Vetterf99d7062014-06-19 16:01:59 +02005109 */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01005110 if (IS_GEN2(dev_priv))
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005111 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5112
Ville Syrjäläaca7b682015-10-30 19:22:21 +02005113 /* Underruns don't always raise interrupts, so check manually. */
5114 intel_check_cpu_fifo_underruns(dev_priv);
5115 intel_check_pch_fifo_underruns(dev_priv);
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005116}
5117
Ville Syrjälä2622a082016-03-09 19:07:26 +02005118/* FIXME get rid of this and use pre_plane_update */
5119static void
5120intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5121{
5122 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005123 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä2622a082016-03-09 19:07:26 +02005124 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5125 int pipe = intel_crtc->pipe;
5126
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005127 /*
5128 * Gen2 reports pipe underruns whenever all planes are disabled.
5129 * So disable underrun reporting before all the planes get disabled.
5130 */
5131 if (IS_GEN2(dev_priv))
5132 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5133
5134 hsw_disable_ips(to_intel_crtc_state(crtc->state));
Ville Syrjälä2622a082016-03-09 19:07:26 +02005135
5136 /*
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005137 * Vblank time updates from the shadow to live plane control register
5138 * are blocked if the memory self-refresh mode is active at that
5139 * moment. So to make sure the plane gets truly disabled, disable
5140 * first the self-refresh mode. The self-refresh enable bit in turn
5141 * will be checked/applied by the HW only at the next frame start
5142 * event which is after the vblank start event, so we need to have a
5143 * wait-for-vblank between disabling the plane and the pipe.
5144 */
Ville Syrjälä11a85d62016-11-28 19:37:12 +02005145 if (HAS_GMCH_DISPLAY(dev_priv) &&
5146 intel_set_memory_cxsr(dev_priv, false))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005147 intel_wait_for_vblank(dev_priv, pipe);
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005148}
5149
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005150static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5151 const struct intel_crtc_state *new_crtc_state)
5152{
5153 if (!old_crtc_state->ips_enabled)
5154 return false;
5155
5156 if (needs_modeset(&new_crtc_state->base))
5157 return true;
5158
5159 return !new_crtc_state->ips_enabled;
5160}
5161
5162static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5163 const struct intel_crtc_state *new_crtc_state)
5164{
5165 if (!new_crtc_state->ips_enabled)
5166 return false;
5167
5168 if (needs_modeset(&new_crtc_state->base))
5169 return true;
5170
5171 /*
5172 * We can't read out IPS on broadwell, assume the worst and
5173 * forcibly enable IPS on the first fastset.
5174 */
5175 if (new_crtc_state->update_pipe &&
5176 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5177 return true;
5178
5179 return !old_crtc_state->ips_enabled;
5180}
5181
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305182static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5183 const struct intel_crtc_state *crtc_state)
5184{
5185 if (!crtc_state->nv12_planes)
5186 return false;
5187
5188 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
5189 return false;
5190
5191 if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5192 IS_CANNONLAKE(dev_priv))
5193 return true;
5194
5195 return false;
5196}
5197
Daniel Vetter5a21b662016-05-24 17:13:53 +02005198static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5199{
5200 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +05305201 struct drm_device *dev = crtc->base.dev;
5202 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005203 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5204 struct intel_crtc_state *pipe_config =
Ville Syrjäläf9a8c142017-08-23 18:22:24 +03005205 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5206 crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005207 struct drm_plane *primary = crtc->base.primary;
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005208 struct drm_plane_state *old_primary_state =
5209 drm_atomic_get_old_plane_state(old_state, primary);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005210
Chris Wilson5748b6a2016-08-04 16:32:38 +01005211 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005212
Daniel Vetter5a21b662016-05-24 17:13:53 +02005213 if (pipe_config->update_wm_post && pipe_config->base.active)
Ville Syrjälä432081b2016-10-31 22:37:03 +02005214 intel_update_watermarks(crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005215
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005216 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5217 hsw_enable_ips(pipe_config);
5218
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005219 if (old_primary_state) {
5220 struct drm_plane_state *new_primary_state =
5221 drm_atomic_get_new_plane_state(old_state, primary);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005222
5223 intel_fbc_post_update(crtc);
5224
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005225 if (new_primary_state->visible &&
Daniel Vetter5a21b662016-05-24 17:13:53 +02005226 (needs_modeset(&pipe_config->base) ||
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005227 !old_primary_state->visible))
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005228 intel_post_enable_primary(&crtc->base, pipe_config);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005229 }
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305230
5231 /* Display WA 827 */
5232 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
Vidya Srinivas6deef9b602018-05-12 03:03:13 +05305233 !needs_nv12_wa(dev_priv, pipe_config)) {
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305234 skl_wa_clkgate(dev_priv, crtc->pipe, false);
Vidya Srinivas6deef9b602018-05-12 03:03:13 +05305235 skl_wa_528(dev_priv, crtc->pipe, false);
5236 }
Daniel Vetter5a21b662016-05-24 17:13:53 +02005237}
5238
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005239static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5240 struct intel_crtc_state *pipe_config)
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005241{
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005242 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005243 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005244 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005245 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5246 struct drm_plane *primary = crtc->base.primary;
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005247 struct drm_plane_state *old_primary_state =
5248 drm_atomic_get_old_plane_state(old_state, primary);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005249 bool modeset = needs_modeset(&pipe_config->base);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005250 struct intel_atomic_state *old_intel_state =
5251 to_intel_atomic_state(old_state);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005252
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005253 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5254 hsw_disable_ips(old_crtc_state);
5255
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005256 if (old_primary_state) {
5257 struct intel_plane_state *new_primary_state =
Ville Syrjäläf9a8c142017-08-23 18:22:24 +03005258 intel_atomic_get_new_plane_state(old_intel_state,
5259 to_intel_plane(primary));
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005260
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005261 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005262 /*
5263 * Gen2 reports pipe underruns whenever all planes are disabled.
5264 * So disable underrun reporting before all the planes get disabled.
5265 */
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005266 if (IS_GEN2(dev_priv) && old_primary_state->visible &&
5267 (modeset || !new_primary_state->base.visible))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005268 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005269 }
Ville Syrjälä852eb002015-06-24 22:00:07 +03005270
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305271 /* Display WA 827 */
5272 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
Vidya Srinivas6deef9b602018-05-12 03:03:13 +05305273 needs_nv12_wa(dev_priv, pipe_config)) {
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305274 skl_wa_clkgate(dev_priv, crtc->pipe, true);
Vidya Srinivas6deef9b602018-05-12 03:03:13 +05305275 skl_wa_528(dev_priv, crtc->pipe, true);
5276 }
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305277
Ville Syrjälä5eeb7982017-03-02 19:15:00 +02005278 /*
5279 * Vblank time updates from the shadow to live plane control register
5280 * are blocked if the memory self-refresh mode is active at that
5281 * moment. So to make sure the plane gets truly disabled, disable
5282 * first the self-refresh mode. The self-refresh enable bit in turn
5283 * will be checked/applied by the HW only at the next frame start
5284 * event which is after the vblank start event, so we need to have a
5285 * wait-for-vblank between disabling the plane and the pipe.
5286 */
5287 if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5288 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5289 intel_wait_for_vblank(dev_priv, crtc->pipe);
Maarten Lankhorst92826fc2015-12-03 13:49:13 +01005290
Matt Ropered4a6a72016-02-23 17:20:13 -08005291 /*
5292 * IVB workaround: must disable low power watermarks for at least
5293 * one frame before enabling scaling. LP watermarks can be re-enabled
5294 * when scaling is disabled.
5295 *
5296 * WaCxSRDisabledForSpriteScaling:ivb
5297 */
Ville Syrjäläddd2b792016-11-28 19:37:04 +02005298 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005299 intel_wait_for_vblank(dev_priv, crtc->pipe);
Matt Ropered4a6a72016-02-23 17:20:13 -08005300
5301 /*
5302 * If we're doing a modeset, we're done. No need to do any pre-vblank
5303 * watermark programming here.
5304 */
5305 if (needs_modeset(&pipe_config->base))
5306 return;
5307
5308 /*
5309 * For platforms that support atomic watermarks, program the
5310 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5311 * will be the intermediate values that are safe for both pre- and
5312 * post- vblank; when vblank happens, the 'active' values will be set
5313 * to the final 'target' values and we'll do this again to get the
5314 * optimal watermarks. For gen9+ platforms, the values we program here
5315 * will be the final target values which will get automatically latched
5316 * at vblank time; no further programming will be necessary.
5317 *
5318 * If a platform hasn't been transitioned to atomic watermarks yet,
5319 * we'll continue to update watermarks the old way, if flags tell
5320 * us to.
5321 */
5322 if (dev_priv->display.initial_watermarks != NULL)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005323 dev_priv->display.initial_watermarks(old_intel_state,
5324 pipe_config);
Ville Syrjäläcaed3612016-03-09 19:07:25 +02005325 else if (pipe_config->update_wm_pre)
Ville Syrjälä432081b2016-10-31 22:37:03 +02005326 intel_update_watermarks(crtc);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005327}
5328
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +02005329static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005330{
5331 struct drm_device *dev = crtc->dev;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +02005333 struct drm_plane *p;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005334 int pipe = intel_crtc->pipe;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005335
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005336 intel_crtc_dpms_overlay_disable(intel_crtc);
Maarten Lankhorst27321ae2015-04-21 17:12:52 +03005337
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +02005338 drm_for_each_plane_mask(p, dev, plane_mask)
Ville Syrjälä282dbf92017-03-27 21:55:33 +03005339 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
Ville Syrjäläf98551a2014-05-22 17:48:06 +03005340
Daniel Vetterf99d7062014-06-19 16:01:59 +02005341 /*
5342 * FIXME: Once we grow proper nuclear flip support out of this we need
5343 * to compute the mask of flip planes precisely. For the time being
5344 * consider this a flip to a NULL plane.
5345 */
Chris Wilson5748b6a2016-08-04 16:32:38 +01005346 intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005347}
5348
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005349static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005350 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005351 struct drm_atomic_state *old_state)
5352{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005353 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005354 struct drm_connector *conn;
5355 int i;
5356
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005357 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005358 struct intel_encoder *encoder =
5359 to_intel_encoder(conn_state->best_encoder);
5360
5361 if (conn_state->crtc != crtc)
5362 continue;
5363
5364 if (encoder->pre_pll_enable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005365 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005366 }
5367}
5368
5369static void intel_encoders_pre_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005370 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005371 struct drm_atomic_state *old_state)
5372{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005373 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005374 struct drm_connector *conn;
5375 int i;
5376
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005377 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005378 struct intel_encoder *encoder =
5379 to_intel_encoder(conn_state->best_encoder);
5380
5381 if (conn_state->crtc != crtc)
5382 continue;
5383
5384 if (encoder->pre_enable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005385 encoder->pre_enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005386 }
5387}
5388
5389static void intel_encoders_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005390 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005391 struct drm_atomic_state *old_state)
5392{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005393 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005394 struct drm_connector *conn;
5395 int i;
5396
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005397 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005398 struct intel_encoder *encoder =
5399 to_intel_encoder(conn_state->best_encoder);
5400
5401 if (conn_state->crtc != crtc)
5402 continue;
5403
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005404 encoder->enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005405 intel_opregion_notify_encoder(encoder, true);
5406 }
5407}
5408
5409static void intel_encoders_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005410 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005411 struct drm_atomic_state *old_state)
5412{
5413 struct drm_connector_state *old_conn_state;
5414 struct drm_connector *conn;
5415 int i;
5416
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005417 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005418 struct intel_encoder *encoder =
5419 to_intel_encoder(old_conn_state->best_encoder);
5420
5421 if (old_conn_state->crtc != crtc)
5422 continue;
5423
5424 intel_opregion_notify_encoder(encoder, false);
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005425 encoder->disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005426 }
5427}
5428
5429static void intel_encoders_post_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005430 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005431 struct drm_atomic_state *old_state)
5432{
5433 struct drm_connector_state *old_conn_state;
5434 struct drm_connector *conn;
5435 int i;
5436
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005437 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005438 struct intel_encoder *encoder =
5439 to_intel_encoder(old_conn_state->best_encoder);
5440
5441 if (old_conn_state->crtc != crtc)
5442 continue;
5443
5444 if (encoder->post_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005445 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005446 }
5447}
5448
5449static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005450 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005451 struct drm_atomic_state *old_state)
5452{
5453 struct drm_connector_state *old_conn_state;
5454 struct drm_connector *conn;
5455 int i;
5456
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005457 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005458 struct intel_encoder *encoder =
5459 to_intel_encoder(old_conn_state->best_encoder);
5460
5461 if (old_conn_state->crtc != crtc)
5462 continue;
5463
5464 if (encoder->post_pll_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005465 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005466 }
5467}
5468
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005469static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5470 struct drm_atomic_state *old_state)
Jesse Barnesf67a5592011-01-05 10:31:48 -08005471{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005472 struct drm_crtc *crtc = pipe_config->base.crtc;
Jesse Barnesf67a5592011-01-05 10:31:48 -08005473 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005474 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005475 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5476 int pipe = intel_crtc->pipe;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005477 struct intel_atomic_state *old_intel_state =
5478 to_intel_atomic_state(old_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005479
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005480 if (WARN_ON(intel_crtc->active))
Jesse Barnesf67a5592011-01-05 10:31:48 -08005481 return;
5482
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005483 /*
5484 * Sometimes spurious CPU pipe underruns happen during FDI
5485 * training, at least with VGA+HDMI cloning. Suppress them.
5486 *
5487 * On ILK we get an occasional spurious CPU pipe underruns
5488 * between eDP port A enable and vdd enable. Also PCH port
5489 * enable seems to result in the occasional CPU pipe underrun.
5490 *
5491 * Spurious PCH underruns also occur during PCH enabling.
5492 */
5493 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
5494 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005495 if (intel_crtc->config->has_pch_encoder)
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005496 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5497
5498 if (intel_crtc->config->has_pch_encoder)
Daniel Vetterb14b1052014-04-24 23:55:13 +02005499 intel_prepare_shared_dpll(intel_crtc);
5500
Ville Syrjälä37a56502016-06-22 21:57:04 +03005501 if (intel_crtc_has_dp_encoder(intel_crtc->config))
Ramalingam Cfe3cd482015-02-13 15:32:59 +05305502 intel_dp_set_m_n(intel_crtc, M1_N1);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005503
5504 intel_set_pipe_timings(intel_crtc);
Jani Nikulabc58be62016-03-18 17:05:39 +02005505 intel_set_pipe_src_size(intel_crtc);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005506
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005507 if (intel_crtc->config->has_pch_encoder) {
Daniel Vetter29407aa2014-04-24 23:55:08 +02005508 intel_cpu_transcoder_set_m_n(intel_crtc,
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005509 &intel_crtc->config->fdi_m_n, NULL);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005510 }
5511
5512 ironlake_set_pipeconf(crtc);
5513
Jesse Barnesf67a5592011-01-05 10:31:48 -08005514 intel_crtc->active = true;
Paulo Zanoni86642812013-04-12 17:57:57 -03005515
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005516 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005517
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005518 if (intel_crtc->config->has_pch_encoder) {
Daniel Vetterfff367c2012-10-27 15:50:28 +02005519 /* Note: FDI PLL enabling _must_ be done before we enable the
5520 * cpu pipes, hence this is separate from all the other fdi/pch
5521 * enabling. */
Daniel Vetter88cefb62012-08-12 19:27:14 +02005522 ironlake_fdi_pll_enable(intel_crtc);
Daniel Vetter46b6f812012-09-06 22:08:33 +02005523 } else {
5524 assert_fdi_tx_disabled(dev_priv, pipe);
5525 assert_fdi_rx_disabled(dev_priv, pipe);
5526 }
Jesse Barnesf67a5592011-01-05 10:31:48 -08005527
Jesse Barnesb074cec2013-04-25 12:55:02 -07005528 ironlake_pfit_enable(intel_crtc);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005529
Jesse Barnes9c54c0d2011-06-15 23:32:33 +02005530 /*
5531 * On ILK+ LUT must be loaded before the pipe is running but with
5532 * clocks enabled
5533 */
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02005534 intel_color_load_luts(&pipe_config->base);
Jesse Barnes9c54c0d2011-06-15 23:32:33 +02005535
Imre Deak1d5bf5d2016-02-29 22:10:33 +02005536 if (dev_priv->display.initial_watermarks != NULL)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005537 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
Ville Syrjälä4972f702017-11-29 17:37:32 +02005538 intel_enable_pipe(pipe_config);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005539
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005540 if (intel_crtc->config->has_pch_encoder)
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02005541 ironlake_pch_enable(pipe_config);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005542
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005543 assert_vblank_disabled(crtc);
5544 drm_crtc_vblank_on(crtc);
5545
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005546 intel_encoders_enable(crtc, pipe_config, old_state);
Daniel Vetter61b77dd2012-07-02 00:16:19 +02005547
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01005548 if (HAS_PCH_CPT(dev_priv))
Daniel Vettera1520312013-05-03 11:49:50 +02005549 cpt_verify_modeset(dev, intel_crtc->pipe);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005550
5551 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
5552 if (intel_crtc->config->has_pch_encoder)
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005553 intel_wait_for_vblank(dev_priv, pipe);
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005554 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005555 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005556}
5557
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005558/* IPS only exists on ULT machines and is tied to pipe A. */
5559static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5560{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01005561 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005562}
5563
Imre Deaked69cd42017-10-02 10:55:57 +03005564static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5565 enum pipe pipe, bool apply)
5566{
5567 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5568 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5569
5570 if (apply)
5571 val |= mask;
5572 else
5573 val &= ~mask;
5574
5575 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5576}
5577
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005578static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5579{
5580 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5581 enum pipe pipe = crtc->pipe;
5582 uint32_t val;
5583
5584 val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
5585
5586 /* Program B credit equally to all pipes */
5587 val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5588
5589 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5590}
5591
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005592static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5593 struct drm_atomic_state *old_state)
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005594{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005595 struct drm_crtc *crtc = pipe_config->base.crtc;
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005596 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005597 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005598 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
Jani Nikula4d1de972016-03-18 17:05:42 +02005599 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005600 struct intel_atomic_state *old_intel_state =
5601 to_intel_atomic_state(old_state);
Imre Deaked69cd42017-10-02 10:55:57 +03005602 bool psl_clkgate_wa;
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005603
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005604 if (WARN_ON(intel_crtc->active))
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005605 return;
5606
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005607 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
Imre Deak95a7a2a2016-06-13 16:44:35 +03005608
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02005609 if (intel_crtc->config->shared_dpll)
Daniel Vetterdf8ad702014-06-25 22:02:03 +03005610 intel_enable_shared_dpll(intel_crtc);
5611
Paulo Zanonic27e9172018-04-27 16:14:36 -07005612 if (INTEL_GEN(dev_priv) >= 11)
5613 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5614
Ville Syrjälä37a56502016-06-22 21:57:04 +03005615 if (intel_crtc_has_dp_encoder(intel_crtc->config))
Ramalingam Cfe3cd482015-02-13 15:32:59 +05305616 intel_dp_set_m_n(intel_crtc, M1_N1);
Daniel Vetter229fca92014-04-24 23:55:09 +02005617
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005618 if (!transcoder_is_dsi(cpu_transcoder))
Jani Nikula4d1de972016-03-18 17:05:42 +02005619 intel_set_pipe_timings(intel_crtc);
5620
Jani Nikulabc58be62016-03-18 17:05:39 +02005621 intel_set_pipe_src_size(intel_crtc);
Daniel Vetter229fca92014-04-24 23:55:09 +02005622
Jani Nikula4d1de972016-03-18 17:05:42 +02005623 if (cpu_transcoder != TRANSCODER_EDP &&
5624 !transcoder_is_dsi(cpu_transcoder)) {
5625 I915_WRITE(PIPE_MULT(cpu_transcoder),
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005626 intel_crtc->config->pixel_multiplier - 1);
Clint Taylorebb69c92014-09-30 10:30:22 -07005627 }
5628
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005629 if (intel_crtc->config->has_pch_encoder) {
Daniel Vetter229fca92014-04-24 23:55:09 +02005630 intel_cpu_transcoder_set_m_n(intel_crtc,
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005631 &intel_crtc->config->fdi_m_n, NULL);
Daniel Vetter229fca92014-04-24 23:55:09 +02005632 }
5633
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005634 if (!transcoder_is_dsi(cpu_transcoder))
Jani Nikula4d1de972016-03-18 17:05:42 +02005635 haswell_set_pipeconf(crtc);
5636
Jani Nikula391bf042016-03-18 17:05:40 +02005637 haswell_set_pipemisc(crtc);
Daniel Vetter229fca92014-04-24 23:55:09 +02005638
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02005639 intel_color_set_csc(&pipe_config->base);
Daniel Vetter229fca92014-04-24 23:55:09 +02005640
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005641 intel_crtc->active = true;
Paulo Zanoni86642812013-04-12 17:57:57 -03005642
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005643 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005644
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005645 if (!transcoder_is_dsi(cpu_transcoder))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005646 intel_ddi_enable_pipe_clock(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005647
Imre Deaked69cd42017-10-02 10:55:57 +03005648 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5649 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5650 intel_crtc->config->pch_pfit.enabled;
5651 if (psl_clkgate_wa)
5652 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5653
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005654 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02005655 skylake_pfit_enable(intel_crtc);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08005656 else
Rodrigo Vivi1c132b42015-09-02 15:19:26 -07005657 ironlake_pfit_enable(intel_crtc);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005658
5659 /*
5660 * On ILK+ LUT must be loaded before the pipe is running but with
5661 * clocks enabled
5662 */
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02005663 intel_color_load_luts(&pipe_config->base);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005664
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005665 intel_ddi_set_pipe_settings(pipe_config);
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005666 if (!transcoder_is_dsi(cpu_transcoder))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005667 intel_ddi_enable_transcoder_func(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005668
Imre Deak1d5bf5d2016-02-29 22:10:33 +02005669 if (dev_priv->display.initial_watermarks != NULL)
Ville Syrjälä3125d392016-11-28 19:37:03 +02005670 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005671
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005672 if (INTEL_GEN(dev_priv) >= 11)
5673 icl_pipe_mbus_enable(intel_crtc);
5674
Jani Nikula4d1de972016-03-18 17:05:42 +02005675 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005676 if (!transcoder_is_dsi(cpu_transcoder))
Ville Syrjälä4972f702017-11-29 17:37:32 +02005677 intel_enable_pipe(pipe_config);
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005678
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005679 if (intel_crtc->config->has_pch_encoder)
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02005680 lpt_pch_enable(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005681
Ville Syrjälä00370712016-11-14 19:44:06 +02005682 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005683 intel_ddi_set_vc_payload_alloc(pipe_config, true);
Dave Airlie0e32b392014-05-02 14:02:48 +10005684
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005685 assert_vblank_disabled(crtc);
5686 drm_crtc_vblank_on(crtc);
5687
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005688 intel_encoders_enable(crtc, pipe_config, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005689
Imre Deaked69cd42017-10-02 10:55:57 +03005690 if (psl_clkgate_wa) {
5691 intel_wait_for_vblank(dev_priv, pipe);
5692 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5693 }
5694
Paulo Zanonie4916942013-09-20 16:21:19 -03005695 /* If we change the relative order between pipe/planes enabling, we need
5696 * to change the workaround. */
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005697 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01005698 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005699 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5700 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005701 }
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005702}
5703
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02005704static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005705{
5706 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005707 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005708 int pipe = crtc->pipe;
5709
5710 /* To avoid upsetting the power well on haswell only disable the pfit if
5711 * it's in use. The hw state code will make sure we get this right. */
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02005712 if (force || crtc->config->pch_pfit.enabled) {
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005713 I915_WRITE(PF_CTL(pipe), 0);
5714 I915_WRITE(PF_WIN_POS(pipe), 0);
5715 I915_WRITE(PF_WIN_SZ(pipe), 0);
5716 }
5717}
5718
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005719static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5720 struct drm_atomic_state *old_state)
Jesse Barnes6be4a602010-09-10 10:26:01 -07005721{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005722 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Jesse Barnes6be4a602010-09-10 10:26:01 -07005723 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005724 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005725 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5726 int pipe = intel_crtc->pipe;
Jesse Barnes6be4a602010-09-10 10:26:01 -07005727
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005728 /*
5729 * Sometimes spurious CPU pipe underruns happen when the
5730 * pipe is already disabled, but FDI RX/TX is still enabled.
5731 * Happens at least with VGA+HDMI cloning. Suppress them.
5732 */
5733 if (intel_crtc->config->has_pch_encoder) {
5734 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005735 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005736 }
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005737
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005738 intel_encoders_disable(crtc, old_crtc_state, old_state);
Daniel Vetterea9d7582012-07-10 10:42:52 +02005739
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005740 drm_crtc_vblank_off(crtc);
5741 assert_vblank_disabled(crtc);
5742
Ville Syrjälä4972f702017-11-29 17:37:32 +02005743 intel_disable_pipe(old_crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005744
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02005745 ironlake_pfit_disable(intel_crtc, false);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005746
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005747 if (intel_crtc->config->has_pch_encoder)
Ville Syrjälä5a74f702015-05-05 17:17:38 +03005748 ironlake_fdi_disable(crtc);
5749
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005750 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005751
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005752 if (intel_crtc->config->has_pch_encoder) {
Daniel Vetterd925c592013-06-05 13:34:04 +02005753 ironlake_disable_pch_transcoder(dev_priv, pipe);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005754
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01005755 if (HAS_PCH_CPT(dev_priv)) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005756 i915_reg_t reg;
5757 u32 temp;
5758
Daniel Vetterd925c592013-06-05 13:34:04 +02005759 /* disable TRANS_DP_CTL */
5760 reg = TRANS_DP_CTL(pipe);
5761 temp = I915_READ(reg);
5762 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5763 TRANS_DP_PORT_SEL_MASK);
5764 temp |= TRANS_DP_PORT_SEL_NONE;
5765 I915_WRITE(reg, temp);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005766
Daniel Vetterd925c592013-06-05 13:34:04 +02005767 /* disable DPLL_SEL */
5768 temp = I915_READ(PCH_DPLL_SEL);
Daniel Vetter11887392013-06-05 13:34:09 +02005769 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
Daniel Vetterd925c592013-06-05 13:34:04 +02005770 I915_WRITE(PCH_DPLL_SEL, temp);
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08005771 }
Daniel Vetterd925c592013-06-05 13:34:04 +02005772
Daniel Vetterd925c592013-06-05 13:34:04 +02005773 ironlake_fdi_pll_disable(intel_crtc);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005774 }
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005775
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005776 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005777 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005778}
5779
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005780static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5781 struct drm_atomic_state *old_state)
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005782{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005783 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005784 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005785 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005786 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005787
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005788 intel_encoders_disable(crtc, old_crtc_state, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005789
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005790 drm_crtc_vblank_off(crtc);
5791 assert_vblank_disabled(crtc);
5792
Jani Nikula4d1de972016-03-18 17:05:42 +02005793 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005794 if (!transcoder_is_dsi(cpu_transcoder))
Ville Syrjälä4972f702017-11-29 17:37:32 +02005795 intel_disable_pipe(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005796
Ville Syrjälä00370712016-11-14 19:44:06 +02005797 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005798 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
Ville Syrjäläa4bf2142014-08-18 21:27:34 +03005799
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005800 if (!transcoder_is_dsi(cpu_transcoder))
Shashank Sharma7d4aefd2015-10-01 22:23:49 +05305801 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005802
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005803 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02005804 skylake_scaler_disable(intel_crtc);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08005805 else
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02005806 ironlake_pfit_disable(intel_crtc, false);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005807
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005808 if (!transcoder_is_dsi(cpu_transcoder))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005809 intel_ddi_disable_pipe_clock(intel_crtc->config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005810
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005811 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Paulo Zanonic27e9172018-04-27 16:14:36 -07005812
5813 if (INTEL_GEN(dev_priv) >= 11)
5814 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005815}
5816
Jesse Barnes2dd24552013-04-25 12:55:01 -07005817static void i9xx_pfit_enable(struct intel_crtc *crtc)
5818{
5819 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005820 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005821 struct intel_crtc_state *pipe_config = crtc->config;
Jesse Barnes2dd24552013-04-25 12:55:01 -07005822
Ander Conselvan de Oliveira681a8502015-01-15 14:55:24 +02005823 if (!pipe_config->gmch_pfit.control)
Jesse Barnes2dd24552013-04-25 12:55:01 -07005824 return;
5825
Daniel Vetterc0b03412013-05-28 12:05:54 +02005826 /*
5827 * The panel fitter should only be adjusted whilst the pipe is disabled,
5828 * according to register description and PRM.
5829 */
Jesse Barnes2dd24552013-04-25 12:55:01 -07005830 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5831 assert_pipe_disabled(dev_priv, crtc->pipe);
5832
Jesse Barnesb074cec2013-04-25 12:55:02 -07005833 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5834 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
Daniel Vetter5a80c452013-04-25 22:52:18 +02005835
5836 /* Border color in case we don't scale up to the full screen. Black by
5837 * default, change to something else for debugging. */
5838 I915_WRITE(BCLRPAT(crtc->pipe), 0);
Jesse Barnes2dd24552013-04-25 12:55:01 -07005839}
5840
Ander Conselvan de Oliveira79f255a2017-02-22 08:34:27 +02005841enum intel_display_power_domain intel_port_to_power_domain(enum port port)
Dave Airlied05410f2014-06-05 13:22:59 +10005842{
5843 switch (port) {
5844 case PORT_A:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005845 return POWER_DOMAIN_PORT_DDI_A_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10005846 case PORT_B:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005847 return POWER_DOMAIN_PORT_DDI_B_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10005848 case PORT_C:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005849 return POWER_DOMAIN_PORT_DDI_C_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10005850 case PORT_D:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005851 return POWER_DOMAIN_PORT_DDI_D_LANES;
Xiong Zhangd8e19f92015-08-13 18:00:12 +08005852 case PORT_E:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005853 return POWER_DOMAIN_PORT_DDI_E_LANES;
Rodrigo Vivi9787e832018-01-29 15:22:22 -08005854 case PORT_F:
5855 return POWER_DOMAIN_PORT_DDI_F_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10005856 default:
Imre Deakb9fec162015-11-18 15:57:25 +02005857 MISSING_CASE(port);
Dave Airlied05410f2014-06-05 13:22:59 +10005858 return POWER_DOMAIN_PORT_OTHER;
5859 }
5860}
5861
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005862static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5863 struct intel_crtc_state *crtc_state)
Imre Deak319be8a2014-03-04 19:22:57 +02005864{
5865 struct drm_device *dev = crtc->dev;
Maarten Lankhorst37255d82016-12-15 15:29:43 +01005866 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005867 struct drm_encoder *encoder;
Imre Deak319be8a2014-03-04 19:22:57 +02005868 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5869 enum pipe pipe = intel_crtc->pipe;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005870 u64 mask;
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005871 enum transcoder transcoder = crtc_state->cpu_transcoder;
Imre Deak77d22dc2014-03-05 16:20:52 +02005872
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005873 if (!crtc_state->base.active)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005874 return 0;
5875
Imre Deak17bd6e62018-01-09 14:20:40 +02005876 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5877 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005878 if (crtc_state->pch_pfit.enabled ||
5879 crtc_state->pch_pfit.force_thru)
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005880 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
Imre Deak77d22dc2014-03-05 16:20:52 +02005881
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005882 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5883 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5884
Ander Conselvan de Oliveira79f255a2017-02-22 08:34:27 +02005885 mask |= BIT_ULL(intel_encoder->power_domain);
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005886 }
Imre Deak319be8a2014-03-04 19:22:57 +02005887
Maarten Lankhorst37255d82016-12-15 15:29:43 +01005888 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
Imre Deak17bd6e62018-01-09 14:20:40 +02005889 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
Maarten Lankhorst37255d82016-12-15 15:29:43 +01005890
Maarten Lankhorst15e7ec22016-03-14 09:27:54 +01005891 if (crtc_state->shared_dpll)
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005892 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
Maarten Lankhorst15e7ec22016-03-14 09:27:54 +01005893
Imre Deak77d22dc2014-03-05 16:20:52 +02005894 return mask;
5895}
5896
Ander Conselvan de Oliveirad2d15012017-02-13 16:57:33 +02005897static u64
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005898modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5899 struct intel_crtc_state *crtc_state)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005900{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005901 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5903 enum intel_display_power_domain domain;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005904 u64 domains, new_domains, old_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005905
5906 old_domains = intel_crtc->enabled_power_domains;
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005907 intel_crtc->enabled_power_domains = new_domains =
5908 get_crtc_power_domains(crtc, crtc_state);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005909
Daniel Vetter5a21b662016-05-24 17:13:53 +02005910 domains = new_domains & ~old_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005911
5912 for_each_power_domain(domain, domains)
5913 intel_display_power_get(dev_priv, domain);
5914
Daniel Vetter5a21b662016-05-24 17:13:53 +02005915 return old_domains & ~new_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005916}
5917
5918static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005919 u64 domains)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005920{
5921 enum intel_display_power_domain domain;
5922
5923 for_each_power_domain(domain, domains)
5924 intel_display_power_put(dev_priv, domain);
5925}
5926
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005927static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
5928 struct drm_atomic_state *old_state)
Jesse Barnes89b667f2013-04-18 14:51:36 -07005929{
Ville Syrjäläff32c542017-03-02 19:14:57 +02005930 struct intel_atomic_state *old_intel_state =
5931 to_intel_atomic_state(old_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005932 struct drm_crtc *crtc = pipe_config->base.crtc;
Jesse Barnes89b667f2013-04-18 14:51:36 -07005933 struct drm_device *dev = crtc->dev;
Daniel Vettera72e4c92014-09-30 10:56:47 +02005934 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes89b667f2013-04-18 14:51:36 -07005935 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Jesse Barnes89b667f2013-04-18 14:51:36 -07005936 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07005937
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005938 if (WARN_ON(intel_crtc->active))
Jesse Barnes89b667f2013-04-18 14:51:36 -07005939 return;
5940
Ville Syrjälä37a56502016-06-22 21:57:04 +03005941 if (intel_crtc_has_dp_encoder(intel_crtc->config))
Ramalingam Cfe3cd482015-02-13 15:32:59 +05305942 intel_dp_set_m_n(intel_crtc, M1_N1);
Daniel Vetter5b18e572014-04-24 23:55:06 +02005943
5944 intel_set_pipe_timings(intel_crtc);
Jani Nikulabc58be62016-03-18 17:05:39 +02005945 intel_set_pipe_src_size(intel_crtc);
Daniel Vetter5b18e572014-04-24 23:55:06 +02005946
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01005947 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
Chris Wilsonfac5e232016-07-04 11:34:36 +01005948 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläc14b0482014-10-16 20:52:34 +03005949
5950 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5951 I915_WRITE(CHV_CANVAS(pipe), 0);
5952 }
5953
Daniel Vetter5b18e572014-04-24 23:55:06 +02005954 i9xx_set_pipeconf(intel_crtc);
5955
Jesse Barnes89b667f2013-04-18 14:51:36 -07005956 intel_crtc->active = true;
Jesse Barnes89b667f2013-04-18 14:51:36 -07005957
Daniel Vettera72e4c92014-09-30 10:56:47 +02005958 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä4a3436e2014-05-16 19:40:25 +03005959
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005960 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07005961
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01005962 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03005963 chv_prepare_pll(intel_crtc, intel_crtc->config);
5964 chv_enable_pll(intel_crtc, intel_crtc->config);
5965 } else {
5966 vlv_prepare_pll(intel_crtc, intel_crtc->config);
5967 vlv_enable_pll(intel_crtc, intel_crtc->config);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03005968 }
Jesse Barnes89b667f2013-04-18 14:51:36 -07005969
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005970 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07005971
Jesse Barnes2dd24552013-04-25 12:55:01 -07005972 i9xx_pfit_enable(intel_crtc);
5973
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02005974 intel_color_load_luts(&pipe_config->base);
Ville Syrjälä63cbb072013-06-04 13:48:59 +03005975
Ville Syrjäläff32c542017-03-02 19:14:57 +02005976 dev_priv->display.initial_watermarks(old_intel_state,
5977 pipe_config);
Ville Syrjälä4972f702017-11-29 17:37:32 +02005978 intel_enable_pipe(pipe_config);
Daniel Vetterbe6a6f82014-04-15 18:41:22 +02005979
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03005980 assert_vblank_disabled(crtc);
5981 drm_crtc_vblank_on(crtc);
5982
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005983 intel_encoders_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07005984}
5985
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02005986static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
5987{
5988 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005989 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02005990
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02005991 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
5992 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02005993}
5994
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005995static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
5996 struct drm_atomic_state *old_state)
Zhenyu Wang2c072452009-06-05 15:38:42 +08005997{
Ville Syrjälä04548cb2017-04-21 21:14:29 +03005998 struct intel_atomic_state *old_intel_state =
5999 to_intel_atomic_state(old_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006000 struct drm_crtc *crtc = pipe_config->base.crtc;
Zhenyu Wang2c072452009-06-05 15:38:42 +08006001 struct drm_device *dev = crtc->dev;
Daniel Vettera72e4c92014-09-30 10:56:47 +02006002 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08006003 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006004 enum pipe pipe = intel_crtc->pipe;
Jesse Barnes79e53942008-11-07 14:24:08 -08006005
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02006006 if (WARN_ON(intel_crtc->active))
Chris Wilsonf7abfe82010-09-13 14:19:16 +01006007 return;
6008
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006009 i9xx_set_pll_dividers(intel_crtc);
6010
Ville Syrjälä37a56502016-06-22 21:57:04 +03006011 if (intel_crtc_has_dp_encoder(intel_crtc->config))
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306012 intel_dp_set_m_n(intel_crtc, M1_N1);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006013
6014 intel_set_pipe_timings(intel_crtc);
Jani Nikulabc58be62016-03-18 17:05:39 +02006015 intel_set_pipe_src_size(intel_crtc);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006016
Daniel Vetter5b18e572014-04-24 23:55:06 +02006017 i9xx_set_pipeconf(intel_crtc);
6018
Chris Wilsonf7abfe82010-09-13 14:19:16 +01006019 intel_crtc->active = true;
Chris Wilson6b383a72010-09-13 13:54:26 +01006020
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01006021 if (!IS_GEN2(dev_priv))
Daniel Vettera72e4c92014-09-30 10:56:47 +02006022 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä4a3436e2014-05-16 19:40:25 +03006023
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006024 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Mika Kuoppala9d6d9f12013-02-08 16:35:38 +02006025
Ville Syrjälä939994d2017-09-13 17:08:56 +03006026 i9xx_enable_pll(intel_crtc, pipe_config);
Daniel Vetterf6736a12013-06-05 13:34:30 +02006027
Jesse Barnes2dd24552013-04-25 12:55:01 -07006028 i9xx_pfit_enable(intel_crtc);
6029
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02006030 intel_color_load_luts(&pipe_config->base);
Ville Syrjälä63cbb072013-06-04 13:48:59 +03006031
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006032 if (dev_priv->display.initial_watermarks != NULL)
6033 dev_priv->display.initial_watermarks(old_intel_state,
6034 intel_crtc->config);
6035 else
6036 intel_update_watermarks(intel_crtc);
Ville Syrjälä4972f702017-11-29 17:37:32 +02006037 intel_enable_pipe(pipe_config);
Daniel Vetterbe6a6f82014-04-15 18:41:22 +02006038
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006039 assert_vblank_disabled(crtc);
6040 drm_crtc_vblank_on(crtc);
6041
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006042 intel_encoders_enable(crtc, pipe_config, old_state);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006043}
6044
Daniel Vetter87476d62013-04-11 16:29:06 +02006045static void i9xx_pfit_disable(struct intel_crtc *crtc)
6046{
6047 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006048 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter328d8e82013-05-08 10:36:31 +02006049
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02006050 if (!crtc->config->gmch_pfit.control)
Daniel Vetter328d8e82013-05-08 10:36:31 +02006051 return;
Daniel Vetter87476d62013-04-11 16:29:06 +02006052
6053 assert_pipe_disabled(dev_priv, crtc->pipe);
6054
Daniel Vetter328d8e82013-05-08 10:36:31 +02006055 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6056 I915_READ(PFIT_CONTROL));
6057 I915_WRITE(PFIT_CONTROL, 0);
Daniel Vetter87476d62013-04-11 16:29:06 +02006058}
6059
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006060static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6061 struct drm_atomic_state *old_state)
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006062{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006063 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006064 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006065 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006066 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6067 int pipe = intel_crtc->pipe;
Daniel Vetteref9c3ae2012-06-29 22:40:09 +02006068
Ville Syrjälä6304cd92014-04-25 13:30:12 +03006069 /*
6070 * On gen2 planes are double buffered but the pipe isn't, so we must
6071 * wait for planes to fully turn off before disabling the pipe.
6072 */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01006073 if (IS_GEN2(dev_priv))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02006074 intel_wait_for_vblank(dev_priv, pipe);
Ville Syrjälä6304cd92014-04-25 13:30:12 +03006075
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006076 intel_encoders_disable(crtc, old_crtc_state, old_state);
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006077
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01006078 drm_crtc_vblank_off(crtc);
6079 assert_vblank_disabled(crtc);
6080
Ville Syrjälä4972f702017-11-29 17:37:32 +02006081 intel_disable_pipe(old_crtc_state);
Mika Kuoppala24a1f162013-02-08 16:35:37 +02006082
Daniel Vetter87476d62013-04-11 16:29:06 +02006083 i9xx_pfit_disable(intel_crtc);
Mika Kuoppala24a1f162013-02-08 16:35:37 +02006084
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006085 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006086
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006087 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006088 if (IS_CHERRYVIEW(dev_priv))
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006089 chv_disable_pll(dev_priv, pipe);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01006090 else if (IS_VALLEYVIEW(dev_priv))
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006091 vlv_disable_pll(dev_priv, pipe);
6092 else
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03006093 i9xx_disable_pll(intel_crtc);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006094 }
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006095
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006096 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03006097
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01006098 if (!IS_GEN2(dev_priv))
Daniel Vettera72e4c92014-09-30 10:56:47 +02006099 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjäläff32c542017-03-02 19:14:57 +02006100
6101 if (!dev_priv->display.initial_watermarks)
6102 intel_update_watermarks(intel_crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03006103
6104 /* clock the pipe down to 640x480@60 to potentially save power */
6105 if (IS_I830(dev_priv))
6106 i830_enable_pipe(dev_priv, pipe);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006107}
6108
Ville Syrjäläda1d0e22017-06-01 17:36:14 +03006109static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6110 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnesee7b9f92012-04-20 17:11:53 +01006111{
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006112 struct intel_encoder *encoder;
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006114 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006115 enum intel_display_power_domain domain;
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006116 struct intel_plane *plane;
Ander Conselvan de Oliveirad2d15012017-02-13 16:57:33 +02006117 u64 domains;
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006118 struct drm_atomic_state *state;
6119 struct intel_crtc_state *crtc_state;
6120 int ret;
Daniel Vetter976f8a22012-07-08 22:34:21 +02006121
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006122 if (!intel_crtc->active)
6123 return;
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006124
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006125 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6126 const struct intel_plane_state *plane_state =
6127 to_intel_plane_state(plane->base.state);
Maarten Lankhorst54a419612015-11-23 10:25:28 +01006128
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006129 if (plane_state->base.visible)
6130 intel_plane_disable_noatomic(intel_crtc, plane);
Maarten Lankhorsta5392052015-06-15 12:33:52 +02006131 }
6132
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006133 state = drm_atomic_state_alloc(crtc->dev);
Ander Conselvan de Oliveira31bb2ef2017-01-20 16:28:45 +02006134 if (!state) {
6135 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6136 crtc->base.id, crtc->name);
6137 return;
6138 }
6139
Ville Syrjäläda1d0e22017-06-01 17:36:14 +03006140 state->acquire_ctx = ctx;
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006141
6142 /* Everything's already locked, -EDEADLK can't happen. */
6143 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6144 ret = drm_atomic_add_affected_connectors(state, crtc);
6145
6146 WARN_ON(IS_ERR(crtc_state) || ret);
6147
6148 dev_priv->display.crtc_disable(crtc_state, state);
6149
Chris Wilson08536952016-10-14 13:18:18 +01006150 drm_atomic_state_put(state);
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006151
Ville Syrjälä78108b72016-05-27 20:59:19 +03006152 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6153 crtc->base.id, crtc->name);
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006154
6155 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6156 crtc->state->active = false;
Matt Roper37d90782015-09-24 15:53:06 -07006157 intel_crtc->active = false;
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006158 crtc->enabled = false;
6159 crtc->state->connector_mask = 0;
6160 crtc->state->encoder_mask = 0;
6161
6162 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6163 encoder->base.crtc = NULL;
6164
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -02006165 intel_fbc_disable(intel_crtc);
Ville Syrjälä432081b2016-10-31 22:37:03 +02006166 intel_update_watermarks(intel_crtc);
Maarten Lankhorst1f7457b2015-07-13 11:55:05 +02006167 intel_disable_shared_dpll(intel_crtc);
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006168
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006169 domains = intel_crtc->enabled_power_domains;
6170 for_each_power_domain(domain, domains)
6171 intel_display_power_put(dev_priv, domain);
6172 intel_crtc->enabled_power_domains = 0;
Maarten Lankhorst565602d2015-12-10 12:33:57 +01006173
6174 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
Ville Syrjäläd305e062017-08-30 21:57:03 +03006175 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
Ville Syrjälä53e9bf52017-10-24 12:52:14 +03006176 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006177}
6178
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006179/*
6180 * turn all crtc's off, but do not adjust state
6181 * This has to be paired with a call to intel_modeset_setup_hw_state.
6182 */
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006183int intel_display_suspend(struct drm_device *dev)
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006184{
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006185 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006186 struct drm_atomic_state *state;
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006187 int ret;
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006188
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006189 state = drm_atomic_helper_suspend(dev);
6190 ret = PTR_ERR_OR_ZERO(state);
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006191 if (ret)
6192 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006193 else
6194 dev_priv->modeset_restore_state = state;
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006195 return ret;
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006196}
6197
Chris Wilsonea5b2132010-08-04 13:50:23 +01006198void intel_encoder_destroy(struct drm_encoder *encoder)
6199{
Chris Wilson4ef69c72010-09-09 15:14:28 +01006200 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
Chris Wilsonea5b2132010-08-04 13:50:23 +01006201
Chris Wilsonea5b2132010-08-04 13:50:23 +01006202 drm_encoder_cleanup(encoder);
6203 kfree(intel_encoder);
6204}
6205
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006206/* Cross check the actual hw state with our own modeset state tracking (and it's
6207 * internal consistency). */
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006208static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6209 struct drm_connector_state *conn_state)
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006210{
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006211 struct intel_connector *connector = to_intel_connector(conn_state->connector);
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006212
6213 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6214 connector->base.base.id,
6215 connector->base.name);
6216
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006217 if (connector->get_hw_state(connector)) {
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006218 struct intel_encoder *encoder = connector->encoder;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006219
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006220 I915_STATE_WARN(!crtc_state,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006221 "connector enabled without attached crtc\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006222
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006223 if (!crtc_state)
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006224 return;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006225
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006226 I915_STATE_WARN(!crtc_state->active,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006227 "connector is active, but attached crtc isn't\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006228
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006229 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006230 return;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006231
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006232 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006233 "atomic encoder doesn't match attached encoder\n");
Dave Airlie36cd7442014-05-02 13:44:18 +10006234
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006235 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006236 "attached encoder crtc differs from connector crtc\n");
6237 } else {
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006238 I915_STATE_WARN(crtc_state && crtc_state->active,
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02006239 "attached crtc is active, but connector isn't\n");
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006240 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006241 "best encoder set without crtc!\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006242 }
6243}
6244
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03006245int intel_connector_init(struct intel_connector *connector)
6246{
Maarten Lankhorst11c1a9e2017-05-01 15:37:57 +02006247 struct intel_digital_connector_state *conn_state;
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03006248
Maarten Lankhorst11c1a9e2017-05-01 15:37:57 +02006249 /*
6250 * Allocate enough memory to hold intel_digital_connector_state,
6251 * This might be a few bytes too many, but for connectors that don't
6252 * need it we'll free the state and allocate a smaller one on the first
6253 * succesful commit anyway.
6254 */
6255 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
6256 if (!conn_state)
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03006257 return -ENOMEM;
6258
Maarten Lankhorst11c1a9e2017-05-01 15:37:57 +02006259 __drm_atomic_helper_connector_reset(&connector->base,
6260 &conn_state->base);
6261
Ander Conselvan de Oliveira08d9bc92015-04-10 10:59:10 +03006262 return 0;
6263}
6264
6265struct intel_connector *intel_connector_alloc(void)
6266{
6267 struct intel_connector *connector;
6268
6269 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6270 if (!connector)
6271 return NULL;
6272
6273 if (intel_connector_init(connector) < 0) {
6274 kfree(connector);
6275 return NULL;
6276 }
6277
6278 return connector;
6279}
6280
James Ausmus091a4f92017-10-13 11:01:44 -07006281/*
6282 * Free the bits allocated by intel_connector_alloc.
6283 * This should only be used after intel_connector_alloc has returned
6284 * successfully, and before drm_connector_init returns successfully.
6285 * Otherwise the destroy callbacks for the connector and the state should
6286 * take care of proper cleanup/free
6287 */
6288void intel_connector_free(struct intel_connector *connector)
6289{
6290 kfree(to_intel_digital_connector_state(connector->base.state));
6291 kfree(connector);
6292}
6293
Daniel Vetterf0947c32012-07-02 13:10:34 +02006294/* Simple connector->get_hw_state implementation for encoders that support only
6295 * one connector and no cloning and hence the encoder state determines the state
6296 * of the connector. */
6297bool intel_connector_get_hw_state(struct intel_connector *connector)
6298{
Daniel Vetter24929352012-07-02 20:28:59 +02006299 enum pipe pipe = 0;
Daniel Vetterf0947c32012-07-02 13:10:34 +02006300 struct intel_encoder *encoder = connector->encoder;
6301
6302 return encoder->get_hw_state(encoder, &pipe);
6303}
6304
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006305static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
Ville Syrjäläd272ddf2015-03-11 18:52:31 +02006306{
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006307 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6308 return crtc_state->fdi_lanes;
Ville Syrjäläd272ddf2015-03-11 18:52:31 +02006309
6310 return 0;
6311}
6312
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006313static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006314 struct intel_crtc_state *pipe_config)
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006315{
Tvrtko Ursulin86527442016-10-13 11:03:00 +01006316 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006317 struct drm_atomic_state *state = pipe_config->base.state;
6318 struct intel_crtc *other_crtc;
6319 struct intel_crtc_state *other_crtc_state;
6320
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006321 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6322 pipe_name(pipe), pipe_config->fdi_lanes);
6323 if (pipe_config->fdi_lanes > 4) {
6324 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6325 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006326 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006327 }
6328
Tvrtko Ursulin86527442016-10-13 11:03:00 +01006329 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006330 if (pipe_config->fdi_lanes > 2) {
6331 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6332 pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006333 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006334 } else {
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006335 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006336 }
6337 }
6338
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +00006339 if (INTEL_INFO(dev_priv)->num_pipes == 2)
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006340 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006341
6342 /* Ivybridge 3 pipe is really complicated */
6343 switch (pipe) {
6344 case PIPE_A:
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006345 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006346 case PIPE_B:
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006347 if (pipe_config->fdi_lanes <= 2)
6348 return 0;
6349
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02006350 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006351 other_crtc_state =
6352 intel_atomic_get_crtc_state(state, other_crtc);
6353 if (IS_ERR(other_crtc_state))
6354 return PTR_ERR(other_crtc_state);
6355
6356 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006357 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6358 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006359 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006360 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006361 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006362 case PIPE_C:
Ville Syrjälä251cc672015-03-11 18:52:30 +02006363 if (pipe_config->fdi_lanes > 2) {
6364 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6365 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006366 return -EINVAL;
Ville Syrjälä251cc672015-03-11 18:52:30 +02006367 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006368
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02006369 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006370 other_crtc_state =
6371 intel_atomic_get_crtc_state(state, other_crtc);
6372 if (IS_ERR(other_crtc_state))
6373 return PTR_ERR(other_crtc_state);
6374
6375 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006376 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006377 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006378 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006379 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006380 default:
6381 BUG();
6382 }
6383}
6384
Daniel Vettere29c22c2013-02-21 00:00:16 +01006385#define RETRY 1
6386static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006387 struct intel_crtc_state *pipe_config)
Daniel Vetter877d48d2013-04-19 11:24:43 +02006388{
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006389 struct drm_device *dev = intel_crtc->base.dev;
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03006390 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006391 int lane, link_bw, fdi_dotclock, ret;
6392 bool needs_recompute = false;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006393
Daniel Vettere29c22c2013-02-21 00:00:16 +01006394retry:
Daniel Vetter877d48d2013-04-19 11:24:43 +02006395 /* FDI is a binary signal running at ~2.7GHz, encoding
6396 * each output octet as 10 bits. The actual frequency
6397 * is stored as a divider into a 100MHz clock, and the
6398 * mode pixel clock is stored in units of 1KHz.
6399 * Hence the bw of each lane in terms of the mode signal
6400 * is:
6401 */
Ville Syrjälä21a727b2016-02-17 21:41:10 +02006402 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
Daniel Vetter877d48d2013-04-19 11:24:43 +02006403
Damien Lespiau241bfc32013-09-25 16:45:37 +01006404 fdi_dotclock = adjusted_mode->crtc_clock;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006405
Daniel Vetter2bd89a02013-06-01 17:16:19 +02006406 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
Daniel Vetter877d48d2013-04-19 11:24:43 +02006407 pipe_config->pipe_bpp);
6408
6409 pipe_config->fdi_lanes = lane;
6410
Daniel Vetter2bd89a02013-06-01 17:16:19 +02006411 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006412 link_bw, &pipe_config->fdi_m_n, false);
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006413
Ville Syrjäläe3b247d2016-02-17 21:41:09 +02006414 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006415 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
Daniel Vettere29c22c2013-02-21 00:00:16 +01006416 pipe_config->pipe_bpp -= 2*3;
6417 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6418 pipe_config->pipe_bpp);
6419 needs_recompute = true;
6420 pipe_config->bw_constrained = true;
6421
6422 goto retry;
6423 }
6424
6425 if (needs_recompute)
6426 return RETRY;
6427
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006428 return ret;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006429}
6430
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006431bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006432{
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006433 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6434 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6435
6436 /* IPS only exists on ULT machines and is tied to pipe A. */
6437 if (!hsw_crtc_supports_ips(crtc))
Ville Syrjälä6e644622017-08-17 17:55:09 +03006438 return false;
6439
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006440 if (!i915_modparams.enable_ips)
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006441 return false;
6442
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006443 if (crtc_state->pipe_bpp > 24)
6444 return false;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006445
6446 /*
Ville Syrjäläb432e5c2015-06-03 15:45:13 +03006447 * We compare against max which means we must take
6448 * the increased cdclk requirement into account when
6449 * calculating the new cdclk.
6450 *
6451 * Should measure whether using a lower cdclk w/o IPS
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006452 */
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006453 if (IS_BROADWELL(dev_priv) &&
6454 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6455 return false;
6456
6457 return true;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006458}
6459
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006460static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
Paulo Zanoni42db64e2013-05-31 16:33:22 -03006461{
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006462 struct drm_i915_private *dev_priv =
6463 to_i915(crtc_state->base.crtc->dev);
6464 struct intel_atomic_state *intel_state =
6465 to_intel_atomic_state(crtc_state->base.state);
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006466
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006467 if (!hsw_crtc_state_ips_capable(crtc_state))
6468 return false;
6469
6470 if (crtc_state->ips_force_disable)
6471 return false;
6472
Maarten Lankhorstadbe5c52017-11-22 19:39:06 +01006473 /* IPS should be fine as long as at least one plane is enabled. */
6474 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006475 return false;
6476
6477 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6478 if (IS_BROADWELL(dev_priv) &&
6479 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6480 return false;
6481
6482 return true;
Paulo Zanoni42db64e2013-05-31 16:33:22 -03006483}
6484
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006485static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6486{
6487 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6488
6489 /* GDG double wide on either pipe, otherwise pipe A only */
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00006490 return INTEL_GEN(dev_priv) < 4 &&
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006491 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6492}
6493
Ville Syrjäläceb99322017-01-20 20:22:05 +02006494static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6495{
6496 uint32_t pixel_rate;
6497
6498 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6499
6500 /*
6501 * We only use IF-ID interlacing. If we ever use
6502 * PF-ID we'll need to adjust the pixel_rate here.
6503 */
6504
6505 if (pipe_config->pch_pfit.enabled) {
6506 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6507 uint32_t pfit_size = pipe_config->pch_pfit.size;
6508
6509 pipe_w = pipe_config->pipe_src_w;
6510 pipe_h = pipe_config->pipe_src_h;
6511
6512 pfit_w = (pfit_size >> 16) & 0xFFFF;
6513 pfit_h = pfit_size & 0xFFFF;
6514 if (pipe_w < pfit_w)
6515 pipe_w = pfit_w;
6516 if (pipe_h < pfit_h)
6517 pipe_h = pfit_h;
6518
6519 if (WARN_ON(!pfit_w || !pfit_h))
6520 return pixel_rate;
6521
6522 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6523 pfit_w * pfit_h);
6524 }
6525
6526 return pixel_rate;
6527}
6528
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +02006529static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6530{
6531 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6532
6533 if (HAS_GMCH_DISPLAY(dev_priv))
6534 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6535 crtc_state->pixel_rate =
6536 crtc_state->base.adjusted_mode.crtc_clock;
6537 else
6538 crtc_state->pixel_rate =
6539 ilk_pipe_pixel_rate(crtc_state);
6540}
6541
Daniel Vettera43f6e02013-06-07 23:10:32 +02006542static int intel_crtc_compute_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006543 struct intel_crtc_state *pipe_config)
Jesse Barnes79e53942008-11-07 14:24:08 -08006544{
Daniel Vettera43f6e02013-06-07 23:10:32 +02006545 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006546 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03006547 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Ville Syrjäläf3261152016-05-24 21:34:18 +03006548 int clock_limit = dev_priv->max_dotclk_freq;
Chris Wilson89749352010-09-12 18:25:19 +01006549
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006550 if (INTEL_GEN(dev_priv) < 4) {
Ville Syrjäläf3261152016-05-24 21:34:18 +03006551 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006552
6553 /*
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006554 * Enable double wide mode when the dot clock
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006555 * is > 90% of the (display) core speed.
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006556 */
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006557 if (intel_crtc_supports_double_wide(crtc) &&
6558 adjusted_mode->crtc_clock > clock_limit) {
Ville Syrjäläf3261152016-05-24 21:34:18 +03006559 clock_limit = dev_priv->max_dotclk_freq;
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006560 pipe_config->double_wide = true;
Ville Syrjäläad3a4472013-09-04 18:30:04 +03006561 }
Ville Syrjäläf3261152016-05-24 21:34:18 +03006562 }
Ville Syrjäläad3a4472013-09-04 18:30:04 +03006563
Ville Syrjäläf3261152016-05-24 21:34:18 +03006564 if (adjusted_mode->crtc_clock > clock_limit) {
6565 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6566 adjusted_mode->crtc_clock, clock_limit,
6567 yesno(pipe_config->double_wide));
6568 return -EINVAL;
Zhenyu Wang2c072452009-06-05 15:38:42 +08006569 }
Chris Wilson89749352010-09-12 18:25:19 +01006570
Shashank Sharma25edf912017-07-21 20:55:07 +05306571 if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
6572 /*
6573 * There is only one pipe CSC unit per pipe, and we need that
6574 * for output conversion from RGB->YCBCR. So if CTM is already
6575 * applied we can't support YCBCR420 output.
6576 */
6577 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6578 return -EINVAL;
6579 }
6580
Ville Syrjälä1d1d0e22013-09-04 18:30:05 +03006581 /*
6582 * Pipe horizontal size must be even in:
6583 * - DVO ganged mode
6584 * - LVDS dual channel mode
6585 * - Double wide pipe
6586 */
Ville Syrjälä0574bd82017-11-23 21:04:48 +02006587 if (pipe_config->pipe_src_w & 1) {
6588 if (pipe_config->double_wide) {
6589 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6590 return -EINVAL;
6591 }
6592
6593 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6594 intel_is_dual_link_lvds(dev)) {
6595 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6596 return -EINVAL;
6597 }
6598 }
Ville Syrjälä1d1d0e22013-09-04 18:30:05 +03006599
Damien Lespiau8693a822013-05-03 18:48:11 +01006600 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6601 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
Chris Wilson44f46b422012-06-21 13:19:59 +03006602 */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01006603 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
Ville Syrjäläaad941d2015-09-25 16:38:56 +03006604 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
Daniel Vettere29c22c2013-02-21 00:00:16 +01006605 return -EINVAL;
Chris Wilson44f46b422012-06-21 13:19:59 +03006606
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +02006607 intel_crtc_compute_pixel_rate(pipe_config);
6608
Daniel Vetter877d48d2013-04-19 11:24:43 +02006609 if (pipe_config->has_pch_encoder)
Daniel Vettera43f6e02013-06-07 23:10:32 +02006610 return ironlake_fdi_compute_config(crtc, pipe_config);
Daniel Vetter877d48d2013-04-19 11:24:43 +02006611
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +02006612 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08006613}
6614
Zhenyu Wang2c072452009-06-05 15:38:42 +08006615static void
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006616intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006617{
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006618 while (*num > DATA_LINK_M_N_MASK ||
6619 *den > DATA_LINK_M_N_MASK) {
Zhenyu Wang2c072452009-06-05 15:38:42 +08006620 *num >>= 1;
6621 *den >>= 1;
6622 }
6623}
6624
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006625static void compute_m_n(unsigned int m, unsigned int n,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006626 uint32_t *ret_m, uint32_t *ret_n,
6627 bool reduce_m_n)
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006628{
Jani Nikula9a86cda2017-03-27 14:33:25 +03006629 /*
6630 * Reduce M/N as much as possible without loss in precision. Several DP
6631 * dongles in particular seem to be fussy about too large *link* M/N
6632 * values. The passed in values are more likely to have the least
6633 * significant bits zero than M after rounding below, so do this first.
6634 */
Jani Nikulab31e85e2017-05-18 14:10:25 +03006635 if (reduce_m_n) {
6636 while ((m & 1) == 0 && (n & 1) == 0) {
6637 m >>= 1;
6638 n >>= 1;
6639 }
Jani Nikula9a86cda2017-03-27 14:33:25 +03006640 }
6641
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006642 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6643 *ret_m = div_u64((uint64_t) m * *ret_n, n);
6644 intel_reduce_m_n_ratio(ret_m, ret_n);
6645}
6646
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006647void
6648intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6649 int pixel_clock, int link_clock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006650 struct intel_link_m_n *m_n,
6651 bool reduce_m_n)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006652{
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006653 m_n->tu = 64;
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006654
6655 compute_m_n(bits_per_pixel * pixel_clock,
6656 link_clock * nlanes * 8,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006657 &m_n->gmch_m, &m_n->gmch_n,
6658 reduce_m_n);
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006659
6660 compute_m_n(pixel_clock, link_clock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006661 &m_n->link_m, &m_n->link_n,
6662 reduce_m_n);
Zhenyu Wang2c072452009-06-05 15:38:42 +08006663}
6664
Chris Wilsona7615032011-01-12 17:04:08 +00006665static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6666{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00006667 if (i915_modparams.panel_use_ssc >= 0)
6668 return i915_modparams.panel_use_ssc != 0;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03006669 return dev_priv->vbt.lvds_use_ssc
Keith Packard435793d2011-07-12 14:56:22 -07006670 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
Chris Wilsona7615032011-01-12 17:04:08 +00006671}
6672
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006673static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
Jesse Barnesc65d77d2011-12-15 12:30:36 -08006674{
Daniel Vetter7df00d72013-05-21 21:54:55 +02006675 return (1 << dpll->n) << 16 | dpll->m2;
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006676}
Daniel Vetterf47709a2013-03-28 10:42:02 +01006677
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006678static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6679{
6680 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
Jesse Barnesc65d77d2011-12-15 12:30:36 -08006681}
6682
Daniel Vetterf47709a2013-03-28 10:42:02 +01006683static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006684 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03006685 struct dpll *reduced_clock)
Jesse Barnesa7516a02011-12-15 12:30:37 -08006686{
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02006687 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006688 u32 fp, fp2 = 0;
6689
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02006690 if (IS_PINEVIEW(dev_priv)) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006691 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006692 if (reduced_clock)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006693 fp2 = pnv_dpll_compute_fp(reduced_clock);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006694 } else {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006695 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006696 if (reduced_clock)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006697 fp2 = i9xx_dpll_compute_fp(reduced_clock);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006698 }
6699
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006700 crtc_state->dpll_hw_state.fp0 = fp;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006701
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03006702 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Rodrigo Viviab585de2015-03-24 12:40:09 -07006703 reduced_clock) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006704 crtc_state->dpll_hw_state.fp1 = fp2;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006705 } else {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006706 crtc_state->dpll_hw_state.fp1 = fp;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006707 }
6708}
6709
Chon Ming Lee5e69f972013-09-05 20:41:49 +08006710static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6711 pipe)
Jesse Barnes89b667f2013-04-18 14:51:36 -07006712{
6713 u32 reg_val;
6714
6715 /*
6716 * PLLB opamp always calibrates to max value of 0x3f, force enable it
6717 * and set it to a reasonable value instead.
6718 */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006719 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006720 reg_val &= 0xffffff00;
6721 reg_val |= 0x00000030;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006722 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006723
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006724 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
Imre Deaked585702017-05-10 12:21:47 +03006725 reg_val &= 0x00ffffff;
6726 reg_val |= 0x8c000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006727 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006728
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006729 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006730 reg_val &= 0xffffff00;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006731 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006732
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006733 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006734 reg_val &= 0x00ffffff;
6735 reg_val |= 0xb0000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006736 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006737}
6738
Daniel Vetterb5518422013-05-03 11:49:48 +02006739static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6740 struct intel_link_m_n *m_n)
6741{
6742 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006743 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetterb5518422013-05-03 11:49:48 +02006744 int pipe = crtc->pipe;
6745
Daniel Vettere3b95f12013-05-03 11:49:49 +02006746 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6747 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6748 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6749 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
Daniel Vetterb5518422013-05-03 11:49:48 +02006750}
6751
6752static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
Vandana Kannanf769cd22014-08-05 07:51:22 -07006753 struct intel_link_m_n *m_n,
6754 struct intel_link_m_n *m2_n2)
Daniel Vetterb5518422013-05-03 11:49:48 +02006755{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetterb5518422013-05-03 11:49:48 +02006757 int pipe = crtc->pipe;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02006758 enum transcoder transcoder = crtc->config->cpu_transcoder;
Daniel Vetterb5518422013-05-03 11:49:48 +02006759
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006760 if (INTEL_GEN(dev_priv) >= 5) {
Daniel Vetterb5518422013-05-03 11:49:48 +02006761 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6762 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6763 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6764 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
Vandana Kannanf769cd22014-08-05 07:51:22 -07006765 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
6766 * for gen < 8) and if DRRS is supported (to make sure the
6767 * registers are not unnecessarily accessed).
6768 */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006769 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
6770 INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07006771 I915_WRITE(PIPE_DATA_M2(transcoder),
6772 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6773 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6774 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6775 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6776 }
Daniel Vetterb5518422013-05-03 11:49:48 +02006777 } else {
Daniel Vettere3b95f12013-05-03 11:49:49 +02006778 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6779 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6780 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6781 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
Daniel Vetterb5518422013-05-03 11:49:48 +02006782 }
6783}
6784
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306785void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006786{
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306787 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6788
6789 if (m_n == M1_N1) {
6790 dp_m_n = &crtc->config->dp_m_n;
6791 dp_m2_n2 = &crtc->config->dp_m2_n2;
6792 } else if (m_n == M2_N2) {
6793
6794 /*
6795 * M2_N2 registers are not supported. Hence m2_n2 divider value
6796 * needs to be programmed into M1_N1.
6797 */
6798 dp_m_n = &crtc->config->dp_m2_n2;
6799 } else {
6800 DRM_ERROR("Unsupported divider value\n");
6801 return;
6802 }
6803
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02006804 if (crtc->config->has_pch_encoder)
6805 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006806 else
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306807 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006808}
6809
Daniel Vetter251ac862015-06-18 10:30:24 +02006810static void vlv_compute_dpll(struct intel_crtc *crtc,
6811 struct intel_crtc_state *pipe_config)
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006812{
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006813 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006814 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006815 if (crtc->pipe != PIPE_A)
6816 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006817
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006818 /* DPLL not used with DSI, but still need the rest set up */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006819 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006820 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6821 DPLL_EXT_BUFFER_ENABLE_VLV;
6822
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006823 pipe_config->dpll_hw_state.dpll_md =
6824 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6825}
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006826
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006827static void chv_compute_dpll(struct intel_crtc *crtc,
6828 struct intel_crtc_state *pipe_config)
6829{
6830 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006831 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006832 if (crtc->pipe != PIPE_A)
6833 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6834
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006835 /* DPLL not used with DSI, but still need the rest set up */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006836 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006837 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6838
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006839 pipe_config->dpll_hw_state.dpll_md =
6840 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006841}
6842
Ville Syrjäläd288f652014-10-28 13:20:22 +02006843static void vlv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006844 const struct intel_crtc_state *pipe_config)
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006845{
Daniel Vetterf47709a2013-03-28 10:42:02 +01006846 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006847 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006848 enum pipe pipe = crtc->pipe;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006849 u32 mdiv;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006850 u32 bestn, bestm1, bestm2, bestp1, bestp2;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006851 u32 coreclk, reg_val;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006852
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006853 /* Enable Refclk */
6854 I915_WRITE(DPLL(pipe),
6855 pipe_config->dpll_hw_state.dpll &
6856 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6857
6858 /* No need to actually set up the DPLL with DSI */
6859 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6860 return;
6861
Ville Syrjäläa5805162015-05-26 20:42:30 +03006862 mutex_lock(&dev_priv->sb_lock);
Daniel Vetter09153002012-12-12 14:06:44 +01006863
Ville Syrjäläd288f652014-10-28 13:20:22 +02006864 bestn = pipe_config->dpll.n;
6865 bestm1 = pipe_config->dpll.m1;
6866 bestm2 = pipe_config->dpll.m2;
6867 bestp1 = pipe_config->dpll.p1;
6868 bestp2 = pipe_config->dpll.p2;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006869
Jesse Barnes89b667f2013-04-18 14:51:36 -07006870 /* See eDP HDMI DPIO driver vbios notes doc */
6871
6872 /* PLL B needs special handling */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006873 if (pipe == PIPE_B)
Chon Ming Lee5e69f972013-09-05 20:41:49 +08006874 vlv_pllb_recal_opamp(dev_priv, pipe);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006875
6876 /* Set up Tx target for periodic Rcomp update */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006877 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006878
6879 /* Disable target IRef on PLL */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006880 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006881 reg_val &= 0x00ffffff;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006882 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006883
6884 /* Disable fast lock */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006885 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006886
6887 /* Set idtafcrecal before PLL is enabled */
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006888 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
6889 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
6890 mdiv |= ((bestn << DPIO_N_SHIFT));
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006891 mdiv |= (1 << DPIO_K_SHIFT);
Jesse Barnes7df50802013-05-02 10:48:09 -07006892
6893 /*
6894 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
6895 * but we don't support that).
6896 * Note: don't use the DAC post divider as it seems unstable.
6897 */
6898 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006899 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006900
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006901 mdiv |= DPIO_ENABLE_CALIBRATION;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006902 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006903
Jesse Barnes89b667f2013-04-18 14:51:36 -07006904 /* Set HBR and RBR LPF coefficients */
Ville Syrjäläd288f652014-10-28 13:20:22 +02006905 if (pipe_config->port_clock == 162000 ||
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03006906 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
6907 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006908 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
Ville Syrjälä885b01202013-07-05 19:21:38 +03006909 0x009f0003);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006910 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006911 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006912 0x00d0000f);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006913
Ville Syrjälä37a56502016-06-22 21:57:04 +03006914 if (intel_crtc_has_dp_encoder(pipe_config)) {
Jesse Barnes89b667f2013-04-18 14:51:36 -07006915 /* Use SSC source */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006916 if (pipe == PIPE_A)
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006917 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006918 0x0df40000);
6919 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006920 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006921 0x0df70000);
6922 } else { /* HDMI or VGA */
6923 /* Use bend source */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006924 if (pipe == PIPE_A)
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006925 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006926 0x0df70000);
6927 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006928 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006929 0x0df40000);
6930 }
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006931
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006932 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006933 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
Ville Syrjälä2210ce72016-06-22 21:57:05 +03006934 if (intel_crtc_has_dp_encoder(crtc->config))
Jesse Barnes89b667f2013-04-18 14:51:36 -07006935 coreclk |= 0x01000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006936 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006937
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006938 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03006939 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006940}
6941
Ville Syrjäläd288f652014-10-28 13:20:22 +02006942static void chv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006943 const struct intel_crtc_state *pipe_config)
Ville Syrjälä1ae0d132014-06-28 02:04:00 +03006944{
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006945 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006946 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006947 enum pipe pipe = crtc->pipe;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006948 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05306949 u32 loopfilter, tribuf_calcntr;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006950 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05306951 u32 dpio_val;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05306952 int vco;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006953
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006954 /* Enable Refclk and SSC */
6955 I915_WRITE(DPLL(pipe),
6956 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
6957
6958 /* No need to actually set up the DPLL with DSI */
6959 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6960 return;
6961
Ville Syrjäläd288f652014-10-28 13:20:22 +02006962 bestn = pipe_config->dpll.n;
6963 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
6964 bestm1 = pipe_config->dpll.m1;
6965 bestm2 = pipe_config->dpll.m2 >> 22;
6966 bestp1 = pipe_config->dpll.p1;
6967 bestp2 = pipe_config->dpll.p2;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05306968 vco = pipe_config->dpll.vco;
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05306969 dpio_val = 0;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05306970 loopfilter = 0;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006971
Ville Syrjäläa5805162015-05-26 20:42:30 +03006972 mutex_lock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006973
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006974 /* p1 and p2 divider */
6975 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
6976 5 << DPIO_CHV_S1_DIV_SHIFT |
6977 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
6978 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
6979 1 << DPIO_CHV_K_DIV_SHIFT);
6980
6981 /* Feedback post-divider - m2 */
6982 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
6983
6984 /* Feedback refclk divider - n and m1 */
6985 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
6986 DPIO_CHV_M1_DIV_BY_2 |
6987 1 << DPIO_CHV_N_DIV_SHIFT);
6988
6989 /* M2 fraction division */
Ville Syrjälä25a25df2015-07-08 23:45:47 +03006990 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006991
6992 /* M2 fraction division enable */
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05306993 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
6994 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
6995 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
6996 if (bestm2_frac)
6997 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
6998 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006999
Vijay Purushothamande3a0fd2015-03-05 19:32:06 +05307000 /* Program digital lock detect threshold */
7001 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7002 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7003 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7004 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7005 if (!bestm2_frac)
7006 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7007 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7008
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007009 /* Loop filter */
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307010 if (vco == 5400000) {
7011 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7012 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7013 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7014 tribuf_calcntr = 0x9;
7015 } else if (vco <= 6200000) {
7016 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7017 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7018 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7019 tribuf_calcntr = 0x9;
7020 } else if (vco <= 6480000) {
7021 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7022 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7023 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7024 tribuf_calcntr = 0x8;
7025 } else {
7026 /* Not supported. Apply the same limits as in the max case */
7027 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7028 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7029 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7030 tribuf_calcntr = 0;
7031 }
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007032 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7033
Ville Syrjälä968040b2015-03-11 22:52:08 +02007034 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307035 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7036 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7037 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7038
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007039 /* AFC Recal */
7040 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7041 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7042 DPIO_AFC_RECAL);
7043
Ville Syrjäläa5805162015-05-26 20:42:30 +03007044 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007045}
7046
Ville Syrjäläd288f652014-10-28 13:20:22 +02007047/**
7048 * vlv_force_pll_on - forcibly enable just the PLL
7049 * @dev_priv: i915 private structure
7050 * @pipe: pipe PLL to enable
7051 * @dpll: PLL configuration
7052 *
7053 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7054 * in cases where we need the PLL enabled even when @pipe is not going to
7055 * be enabled.
7056 */
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007057int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007058 const struct dpll *dpll)
Ville Syrjäläd288f652014-10-28 13:20:22 +02007059{
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02007060 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007061 struct intel_crtc_state *pipe_config;
7062
7063 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7064 if (!pipe_config)
7065 return -ENOMEM;
7066
7067 pipe_config->base.crtc = &crtc->base;
7068 pipe_config->pixel_multiplier = 1;
7069 pipe_config->dpll = *dpll;
Ville Syrjäläd288f652014-10-28 13:20:22 +02007070
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007071 if (IS_CHERRYVIEW(dev_priv)) {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007072 chv_compute_dpll(crtc, pipe_config);
7073 chv_prepare_pll(crtc, pipe_config);
7074 chv_enable_pll(crtc, pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007075 } else {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007076 vlv_compute_dpll(crtc, pipe_config);
7077 vlv_prepare_pll(crtc, pipe_config);
7078 vlv_enable_pll(crtc, pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007079 }
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007080
7081 kfree(pipe_config);
7082
7083 return 0;
Ville Syrjäläd288f652014-10-28 13:20:22 +02007084}
7085
7086/**
7087 * vlv_force_pll_off - forcibly disable just the PLL
7088 * @dev_priv: i915 private structure
7089 * @pipe: pipe PLL to disable
7090 *
7091 * Disable the PLL for @pipe. To be used in cases where we need
7092 * the PLL enabled even when @pipe is not going to be enabled.
7093 */
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007094void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
Ville Syrjäläd288f652014-10-28 13:20:22 +02007095{
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007096 if (IS_CHERRYVIEW(dev_priv))
7097 chv_disable_pll(dev_priv, pipe);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007098 else
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007099 vlv_disable_pll(dev_priv, pipe);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007100}
7101
Daniel Vetter251ac862015-06-18 10:30:24 +02007102static void i9xx_compute_dpll(struct intel_crtc *crtc,
7103 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007104 struct dpll *reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007105{
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007107 u32 dpll;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007108 struct dpll *clock = &crtc_state->dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007109
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007110 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
Vijay Purushothaman2a8f64c2012-09-27 19:13:06 +05307111
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007112 dpll = DPLL_VGA_MODE_DIS;
7113
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007114 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007115 dpll |= DPLLB_MODE_LVDS;
7116 else
7117 dpll |= DPLLB_MODE_DAC_SERIAL;
Daniel Vetter6cc5f342013-03-27 00:44:53 +01007118
Jani Nikula73f67aa2016-12-07 22:48:09 +02007119 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7120 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007121 dpll |= (crtc_state->pixel_multiplier - 1)
Daniel Vetter198a037f2013-04-19 11:14:37 +02007122 << SDVO_MULTIPLIER_SHIFT_HIRES;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007123 }
Daniel Vetter198a037f2013-04-19 11:14:37 +02007124
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03007125 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7126 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007127 dpll |= DPLL_SDVO_HIGH_SPEED;
Daniel Vetter198a037f2013-04-19 11:14:37 +02007128
Ville Syrjälä37a56502016-06-22 21:57:04 +03007129 if (intel_crtc_has_dp_encoder(crtc_state))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007130 dpll |= DPLL_SDVO_HIGH_SPEED;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007131
7132 /* compute bitmask from p1 value */
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007133 if (IS_PINEVIEW(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007134 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7135 else {
7136 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007137 if (IS_G4X(dev_priv) && reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007138 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7139 }
7140 switch (clock->p2) {
7141 case 5:
7142 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7143 break;
7144 case 7:
7145 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7146 break;
7147 case 10:
7148 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7149 break;
7150 case 14:
7151 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7152 break;
7153 }
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007154 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007155 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7156
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007157 if (crtc_state->sdvo_tv_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007158 dpll |= PLL_REF_INPUT_TVCLKINBC;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007159 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Ander Conselvan de Oliveiraceb41002016-03-21 18:00:02 +02007160 intel_panel_use_ssc(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007161 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7162 else
7163 dpll |= PLL_REF_INPUT_DREFCLK;
7164
7165 dpll |= DPLL_VCO_ENABLE;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007166 crtc_state->dpll_hw_state.dpll = dpll;
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007167
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007168 if (INTEL_GEN(dev_priv) >= 4) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007169 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
Daniel Vetteref1b4602013-06-01 17:17:04 +02007170 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007171 crtc_state->dpll_hw_state.dpll_md = dpll_md;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007172 }
7173}
7174
Daniel Vetter251ac862015-06-18 10:30:24 +02007175static void i8xx_compute_dpll(struct intel_crtc *crtc,
7176 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007177 struct dpll *reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007178{
Daniel Vetterf47709a2013-03-28 10:42:02 +01007179 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007180 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007181 u32 dpll;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007182 struct dpll *clock = &crtc_state->dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007183
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007184 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
Vijay Purushothaman2a8f64c2012-09-27 19:13:06 +05307185
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007186 dpll = DPLL_VGA_MODE_DIS;
7187
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007188 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007189 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7190 } else {
7191 if (clock->p1 == 2)
7192 dpll |= PLL_P1_DIVIDE_BY_TWO;
7193 else
7194 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7195 if (clock->p2 == 4)
7196 dpll |= PLL_P2_DIVIDE_BY_4;
7197 }
7198
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007199 if (!IS_I830(dev_priv) &&
7200 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007201 dpll |= DPLL_DVO_2X_MODE;
7202
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007203 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Ander Conselvan de Oliveiraceb41002016-03-21 18:00:02 +02007204 intel_panel_use_ssc(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007205 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7206 else
7207 dpll |= PLL_REF_INPUT_DREFCLK;
7208
7209 dpll |= DPLL_VCO_ENABLE;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007210 crtc_state->dpll_hw_state.dpll = dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007211}
7212
Daniel Vetter8a654f32013-06-01 17:16:22 +02007213static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007214{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007215 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007216 enum pipe pipe = intel_crtc->pipe;
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02007217 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03007218 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
Ville Syrjälä1caea6e2014-03-28 23:29:32 +02007219 uint32_t crtc_vtotal, crtc_vblank_end;
7220 int vsyncshift = 0;
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007221
7222 /* We need to be careful not to changed the adjusted mode, for otherwise
7223 * the hw state checker will get angry at the mismatch. */
7224 crtc_vtotal = adjusted_mode->crtc_vtotal;
7225 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007226
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007227 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007228 /* the chip adds 2 halflines automatically */
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007229 crtc_vtotal -= 1;
7230 crtc_vblank_end -= 1;
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007231
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007232 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007233 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7234 else
7235 vsyncshift = adjusted_mode->crtc_hsync_start -
7236 adjusted_mode->crtc_htotal / 2;
Ville Syrjälä1caea6e2014-03-28 23:29:32 +02007237 if (vsyncshift < 0)
7238 vsyncshift += adjusted_mode->crtc_htotal;
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007239 }
7240
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007241 if (INTEL_GEN(dev_priv) > 3)
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007242 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007243
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007244 I915_WRITE(HTOTAL(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007245 (adjusted_mode->crtc_hdisplay - 1) |
7246 ((adjusted_mode->crtc_htotal - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007247 I915_WRITE(HBLANK(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007248 (adjusted_mode->crtc_hblank_start - 1) |
7249 ((adjusted_mode->crtc_hblank_end - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007250 I915_WRITE(HSYNC(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007251 (adjusted_mode->crtc_hsync_start - 1) |
7252 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7253
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007254 I915_WRITE(VTOTAL(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007255 (adjusted_mode->crtc_vdisplay - 1) |
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007256 ((crtc_vtotal - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007257 I915_WRITE(VBLANK(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007258 (adjusted_mode->crtc_vblank_start - 1) |
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007259 ((crtc_vblank_end - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007260 I915_WRITE(VSYNC(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007261 (adjusted_mode->crtc_vsync_start - 1) |
7262 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7263
Paulo Zanonib5e508d2012-10-24 11:34:43 -02007264 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7265 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7266 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7267 * bits. */
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01007268 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
Paulo Zanonib5e508d2012-10-24 11:34:43 -02007269 (pipe == PIPE_B || pipe == PIPE_C))
7270 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7271
Jani Nikulabc58be62016-03-18 17:05:39 +02007272}
7273
7274static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7275{
7276 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007277 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulabc58be62016-03-18 17:05:39 +02007278 enum pipe pipe = intel_crtc->pipe;
7279
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007280 /* pipesrc controls the size that is scaled from, which should
7281 * always be the user's requested size.
7282 */
7283 I915_WRITE(PIPESRC(pipe),
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02007284 ((intel_crtc->config->pipe_src_w - 1) << 16) |
7285 (intel_crtc->config->pipe_src_h - 1));
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007286}
7287
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007288static void intel_get_pipe_timings(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007289 struct intel_crtc_state *pipe_config)
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007290{
7291 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007292 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007293 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7294 uint32_t tmp;
7295
7296 tmp = I915_READ(HTOTAL(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007297 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7298 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007299 tmp = I915_READ(HBLANK(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007300 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7301 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007302 tmp = I915_READ(HSYNC(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007303 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7304 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007305
7306 tmp = I915_READ(VTOTAL(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007307 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7308 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007309 tmp = I915_READ(VBLANK(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007310 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7311 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007312 tmp = I915_READ(VSYNC(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007313 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7314 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007315
7316 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007317 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7318 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7319 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007320 }
Jani Nikulabc58be62016-03-18 17:05:39 +02007321}
7322
7323static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7324 struct intel_crtc_state *pipe_config)
7325{
7326 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007327 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulabc58be62016-03-18 17:05:39 +02007328 u32 tmp;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007329
7330 tmp = I915_READ(PIPESRC(crtc->pipe));
Ville Syrjälä37327ab2013-09-04 18:25:28 +03007331 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7332 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7333
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007334 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7335 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007336}
7337
Daniel Vetterf6a83282014-02-11 15:28:57 -08007338void intel_mode_from_pipe_config(struct drm_display_mode *mode,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007339 struct intel_crtc_state *pipe_config)
Jesse Barnesbabea612013-06-26 18:57:38 +03007340{
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007341 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7342 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7343 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7344 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
Jesse Barnesbabea612013-06-26 18:57:38 +03007345
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007346 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7347 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7348 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7349 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
Jesse Barnesbabea612013-06-26 18:57:38 +03007350
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007351 mode->flags = pipe_config->base.adjusted_mode.flags;
Maarten Lankhorstcd13f5a2015-07-14 14:12:02 +02007352 mode->type = DRM_MODE_TYPE_DRIVER;
Jesse Barnesbabea612013-06-26 18:57:38 +03007353
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007354 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
Maarten Lankhorstcd13f5a2015-07-14 14:12:02 +02007355
7356 mode->hsync = drm_mode_hsync(mode);
7357 mode->vrefresh = drm_mode_vrefresh(mode);
7358 drm_mode_set_name(mode);
Jesse Barnesbabea612013-06-26 18:57:38 +03007359}
7360
Daniel Vetter84b046f2013-02-19 18:48:54 +01007361static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7362{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007363 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
Daniel Vetter84b046f2013-02-19 18:48:54 +01007364 uint32_t pipeconf;
7365
Daniel Vetter9f11a9e2013-06-13 00:54:58 +02007366 pipeconf = 0;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007367
Ville Syrjäläe56134b2017-06-01 17:36:19 +03007368 /* we keep both pipes enabled on 830 */
7369 if (IS_I830(dev_priv))
Ville Syrjäläb6b5d042014-08-15 01:22:07 +03007370 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
Daniel Vetter67c72a12013-09-24 11:46:14 +02007371
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02007372 if (intel_crtc->config->double_wide)
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03007373 pipeconf |= PIPECONF_DOUBLE_WIDE;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007374
Daniel Vetterff9ce462013-04-24 14:57:17 +02007375 /* only g4x and later have fancy bpc/dither controls */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007376 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7377 IS_CHERRYVIEW(dev_priv)) {
Daniel Vetterff9ce462013-04-24 14:57:17 +02007378 /* Bspec claims that we can't use dithering for 30bpp pipes. */
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02007379 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
Daniel Vetterff9ce462013-04-24 14:57:17 +02007380 pipeconf |= PIPECONF_DITHER_EN |
7381 PIPECONF_DITHER_TYPE_SP;
7382
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02007383 switch (intel_crtc->config->pipe_bpp) {
Daniel Vetterff9ce462013-04-24 14:57:17 +02007384 case 18:
7385 pipeconf |= PIPECONF_6BPC;
7386 break;
7387 case 24:
7388 pipeconf |= PIPECONF_8BPC;
7389 break;
7390 case 30:
7391 pipeconf |= PIPECONF_10BPC;
7392 break;
7393 default:
7394 /* Case prevented by intel_choose_pipe_bpp_dither. */
7395 BUG();
Daniel Vetter84b046f2013-02-19 18:48:54 +01007396 }
7397 }
7398
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02007399 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007400 if (INTEL_GEN(dev_priv) < 4 ||
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007401 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
Ville Syrjäläefc2cff2014-03-28 23:29:31 +02007402 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7403 else
7404 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7405 } else
Daniel Vetter84b046f2013-02-19 18:48:54 +01007406 pipeconf |= PIPECONF_PROGRESSIVE;
7407
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007408 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08007409 intel_crtc->config->limited_color_range)
Daniel Vetter9f11a9e2013-06-13 00:54:58 +02007410 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
Ville Syrjälä9c8e09b2013-04-02 16:10:09 +03007411
Daniel Vetter84b046f2013-02-19 18:48:54 +01007412 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7413 POSTING_READ(PIPECONF(intel_crtc->pipe));
7414}
7415
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007416static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7417 struct intel_crtc_state *crtc_state)
7418{
7419 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007420 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007421 const struct intel_limit *limit;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007422 int refclk = 48000;
7423
7424 memset(&crtc_state->dpll_hw_state, 0,
7425 sizeof(crtc_state->dpll_hw_state));
7426
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007427 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007428 if (intel_panel_use_ssc(dev_priv)) {
7429 refclk = dev_priv->vbt.lvds_ssc_freq;
7430 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7431 }
7432
7433 limit = &intel_limits_i8xx_lvds;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007434 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007435 limit = &intel_limits_i8xx_dvo;
7436 } else {
7437 limit = &intel_limits_i8xx_dac;
7438 }
7439
7440 if (!crtc_state->clock_set &&
7441 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7442 refclk, NULL, &crtc_state->dpll)) {
7443 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7444 return -EINVAL;
7445 }
7446
7447 i8xx_compute_dpll(crtc, crtc_state, NULL);
7448
7449 return 0;
7450}
7451
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007452static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7453 struct intel_crtc_state *crtc_state)
7454{
7455 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007456 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007457 const struct intel_limit *limit;
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007458 int refclk = 96000;
7459
7460 memset(&crtc_state->dpll_hw_state, 0,
7461 sizeof(crtc_state->dpll_hw_state));
7462
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007463 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007464 if (intel_panel_use_ssc(dev_priv)) {
7465 refclk = dev_priv->vbt.lvds_ssc_freq;
7466 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7467 }
7468
7469 if (intel_is_dual_link_lvds(dev))
7470 limit = &intel_limits_g4x_dual_channel_lvds;
7471 else
7472 limit = &intel_limits_g4x_single_channel_lvds;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007473 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7474 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007475 limit = &intel_limits_g4x_hdmi;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007476 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007477 limit = &intel_limits_g4x_sdvo;
7478 } else {
7479 /* The option is for other outputs */
7480 limit = &intel_limits_i9xx_sdvo;
7481 }
7482
7483 if (!crtc_state->clock_set &&
7484 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7485 refclk, NULL, &crtc_state->dpll)) {
7486 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7487 return -EINVAL;
7488 }
7489
7490 i9xx_compute_dpll(crtc, crtc_state, NULL);
7491
7492 return 0;
7493}
7494
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007495static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7496 struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08007497{
Ander Conselvan de Oliveirac7653192014-10-20 13:46:44 +03007498 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007499 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007500 const struct intel_limit *limit;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007501 int refclk = 96000;
Jesse Barnes79e53942008-11-07 14:24:08 -08007502
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03007503 memset(&crtc_state->dpll_hw_state, 0,
7504 sizeof(crtc_state->dpll_hw_state));
7505
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007506 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007507 if (intel_panel_use_ssc(dev_priv)) {
7508 refclk = dev_priv->vbt.lvds_ssc_freq;
7509 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7510 }
Jesse Barnes79e53942008-11-07 14:24:08 -08007511
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007512 limit = &intel_limits_pineview_lvds;
7513 } else {
7514 limit = &intel_limits_pineview_sdvo;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007515 }
Jani Nikulaf2335332013-09-13 11:03:09 +03007516
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007517 if (!crtc_state->clock_set &&
7518 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7519 refclk, NULL, &crtc_state->dpll)) {
7520 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7521 return -EINVAL;
7522 }
7523
7524 i9xx_compute_dpll(crtc, crtc_state, NULL);
7525
7526 return 0;
7527}
7528
7529static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7530 struct intel_crtc_state *crtc_state)
7531{
7532 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007533 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007534 const struct intel_limit *limit;
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007535 int refclk = 96000;
7536
7537 memset(&crtc_state->dpll_hw_state, 0,
7538 sizeof(crtc_state->dpll_hw_state));
7539
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007540 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007541 if (intel_panel_use_ssc(dev_priv)) {
7542 refclk = dev_priv->vbt.lvds_ssc_freq;
7543 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
Jani Nikulae9fd1c02013-08-27 15:12:23 +03007544 }
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007545
7546 limit = &intel_limits_i9xx_lvds;
7547 } else {
7548 limit = &intel_limits_i9xx_sdvo;
7549 }
7550
7551 if (!crtc_state->clock_set &&
7552 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7553 refclk, NULL, &crtc_state->dpll)) {
7554 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7555 return -EINVAL;
Daniel Vetterf47709a2013-03-28 10:42:02 +01007556 }
Eric Anholtf564048e2011-03-30 13:01:02 -07007557
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007558 i9xx_compute_dpll(crtc, crtc_state, NULL);
Eric Anholtf564048e2011-03-30 13:01:02 -07007559
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02007560 return 0;
Eric Anholtf564048e2011-03-30 13:01:02 -07007561}
7562
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007563static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7564 struct intel_crtc_state *crtc_state)
7565{
7566 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007567 const struct intel_limit *limit = &intel_limits_chv;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007568
7569 memset(&crtc_state->dpll_hw_state, 0,
7570 sizeof(crtc_state->dpll_hw_state));
7571
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007572 if (!crtc_state->clock_set &&
7573 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7574 refclk, NULL, &crtc_state->dpll)) {
7575 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7576 return -EINVAL;
7577 }
7578
7579 chv_compute_dpll(crtc, crtc_state);
7580
7581 return 0;
7582}
7583
7584static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7585 struct intel_crtc_state *crtc_state)
7586{
7587 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007588 const struct intel_limit *limit = &intel_limits_vlv;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007589
7590 memset(&crtc_state->dpll_hw_state, 0,
7591 sizeof(crtc_state->dpll_hw_state));
7592
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007593 if (!crtc_state->clock_set &&
7594 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7595 refclk, NULL, &crtc_state->dpll)) {
7596 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7597 return -EINVAL;
7598 }
7599
7600 vlv_compute_dpll(crtc, crtc_state);
7601
7602 return 0;
7603}
7604
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007605static void i9xx_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007606 struct intel_crtc_state *pipe_config)
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007607{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007608 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007609 uint32_t tmp;
7610
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007611 if (INTEL_GEN(dev_priv) <= 3 &&
7612 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
Ville Syrjälädc9e7dec2014-01-10 14:06:45 +02007613 return;
7614
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007615 tmp = I915_READ(PFIT_CONTROL);
Daniel Vetter06922822013-07-11 13:35:40 +02007616 if (!(tmp & PFIT_ENABLE))
7617 return;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007618
Daniel Vetter06922822013-07-11 13:35:40 +02007619 /* Check whether the pfit is attached to our pipe. */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007620 if (INTEL_GEN(dev_priv) < 4) {
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007621 if (crtc->pipe != PIPE_B)
7622 return;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007623 } else {
7624 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7625 return;
7626 }
7627
Daniel Vetter06922822013-07-11 13:35:40 +02007628 pipe_config->gmch_pfit.control = tmp;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007629 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007630}
7631
Jesse Barnesacbec812013-09-20 11:29:32 -07007632static void vlv_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007633 struct intel_crtc_state *pipe_config)
Jesse Barnesacbec812013-09-20 11:29:32 -07007634{
7635 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007636 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesacbec812013-09-20 11:29:32 -07007637 int pipe = pipe_config->cpu_transcoder;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007638 struct dpll clock;
Jesse Barnesacbec812013-09-20 11:29:32 -07007639 u32 mdiv;
Chris Wilson662c6ec2013-09-25 14:24:01 -07007640 int refclk = 100000;
Jesse Barnesacbec812013-09-20 11:29:32 -07007641
Ville Syrjäläb5219732016-03-15 16:40:01 +02007642 /* In case of DSI, DPLL will not be used */
7643 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
Shobhit Kumarf573de52014-07-30 20:32:37 +05307644 return;
7645
Ville Syrjäläa5805162015-05-26 20:42:30 +03007646 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007647 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
Ville Syrjäläa5805162015-05-26 20:42:30 +03007648 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesacbec812013-09-20 11:29:32 -07007649
7650 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7651 clock.m2 = mdiv & DPIO_M2DIV_MASK;
7652 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7653 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7654 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7655
Imre Deakdccbea32015-06-22 23:35:51 +03007656 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
Jesse Barnesacbec812013-09-20 11:29:32 -07007657}
7658
Damien Lespiau5724dbd2015-01-20 12:51:52 +00007659static void
7660i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7661 struct intel_initial_plane_config *plane_config)
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007662{
7663 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007664 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007665 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7666 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
7667 enum pipe pipe = crtc->pipe;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007668 u32 val, base, offset;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007669 int fourcc, pixel_format;
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00007670 unsigned int aligned_height;
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007671 struct drm_framebuffer *fb;
Damien Lespiau1b842c82015-01-21 13:50:54 +00007672 struct intel_framebuffer *intel_fb;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007673
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02007674 if (!plane->get_hw_state(plane))
Damien Lespiau42a7b082015-02-05 19:35:13 +00007675 return;
7676
Damien Lespiaud9806c92015-01-21 14:07:19 +00007677 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Damien Lespiau1b842c82015-01-21 13:50:54 +00007678 if (!intel_fb) {
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007679 DRM_DEBUG_KMS("failed to alloc fb\n");
7680 return;
7681 }
7682
Damien Lespiau1b842c82015-01-21 13:50:54 +00007683 fb = &intel_fb->base;
7684
Ville Syrjäläd2e9f5f2016-11-18 21:52:53 +02007685 fb->dev = dev;
7686
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02007687 val = I915_READ(DSPCNTR(i9xx_plane));
7688
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007689 if (INTEL_GEN(dev_priv) >= 4) {
Daniel Vetter18c52472015-02-10 17:16:09 +00007690 if (val & DISPPLANE_TILED) {
Damien Lespiau49af4492015-01-20 12:51:44 +00007691 plane_config->tiling = I915_TILING_X;
Ville Syrjäläbae781b2016-11-16 13:33:16 +02007692 fb->modifier = I915_FORMAT_MOD_X_TILED;
Daniel Vetter18c52472015-02-10 17:16:09 +00007693 }
7694 }
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007695
7696 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
Damien Lespiaub35d63f2015-01-20 12:51:50 +00007697 fourcc = i9xx_format_to_fourcc(pixel_format);
Ville Syrjälä2f3f4762016-11-18 21:52:57 +02007698 fb->format = drm_format_info(fourcc);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007699
Ville Syrjälä81894b22017-11-17 21:19:13 +02007700 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7701 offset = I915_READ(DSPOFFSET(i9xx_plane));
7702 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7703 } else if (INTEL_GEN(dev_priv) >= 4) {
Damien Lespiau49af4492015-01-20 12:51:44 +00007704 if (plane_config->tiling)
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007705 offset = I915_READ(DSPTILEOFF(i9xx_plane));
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007706 else
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007707 offset = I915_READ(DSPLINOFF(i9xx_plane));
7708 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007709 } else {
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007710 base = I915_READ(DSPADDR(i9xx_plane));
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007711 }
7712 plane_config->base = base;
7713
7714 val = I915_READ(PIPESRC(pipe));
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007715 fb->width = ((val >> 16) & 0xfff) + 1;
7716 fb->height = ((val >> 0) & 0xfff) + 1;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007717
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007718 val = I915_READ(DSPSTRIDE(i9xx_plane));
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007719 fb->pitches[0] = val & 0xffffffc0;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007720
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02007721 aligned_height = intel_fb_align_height(fb, 0, fb->height);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007722
Daniel Vetterf37b5c22015-02-10 23:12:27 +01007723 plane_config->size = fb->pitches[0] * aligned_height;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007724
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007725 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7726 crtc->base.name, plane->base.name, fb->width, fb->height,
Ville Syrjälä272725c2016-12-14 23:32:20 +02007727 fb->format->cpp[0] * 8, base, fb->pitches[0],
Damien Lespiau2844a922015-01-20 12:51:48 +00007728 plane_config->size);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007729
Damien Lespiau2d140302015-02-05 17:22:18 +00007730 plane_config->fb = intel_fb;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007731}
7732
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007733static void chv_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007734 struct intel_crtc_state *pipe_config)
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007735{
7736 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007737 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007738 int pipe = pipe_config->cpu_transcoder;
7739 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007740 struct dpll clock;
Imre Deak0d7b6b12015-07-02 14:29:58 +03007741 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007742 int refclk = 100000;
7743
Ville Syrjäläb5219732016-03-15 16:40:01 +02007744 /* In case of DSI, DPLL will not be used */
7745 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7746 return;
7747
Ville Syrjäläa5805162015-05-26 20:42:30 +03007748 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007749 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7750 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7751 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7752 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
Imre Deak0d7b6b12015-07-02 14:29:58 +03007753 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
Ville Syrjäläa5805162015-05-26 20:42:30 +03007754 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007755
7756 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
Imre Deak0d7b6b12015-07-02 14:29:58 +03007757 clock.m2 = (pll_dw0 & 0xff) << 22;
7758 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7759 clock.m2 |= pll_dw2 & 0x3fffff;
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007760 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7761 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7762 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7763
Imre Deakdccbea32015-06-22 23:35:51 +03007764 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007765}
7766
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007767static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007768 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007769{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Imre Deak17290502016-02-12 18:55:11 +02007771 enum intel_display_power_domain power_domain;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007772 uint32_t tmp;
Imre Deak17290502016-02-12 18:55:11 +02007773 bool ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007774
Imre Deak17290502016-02-12 18:55:11 +02007775 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7776 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Imre Deakb5482bd2014-03-05 16:20:55 +02007777 return false;
7778
Daniel Vettere143a212013-07-04 12:01:15 +02007779 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02007780 pipe_config->shared_dpll = NULL;
Daniel Vettereccb1402013-05-22 00:50:22 +02007781
Imre Deak17290502016-02-12 18:55:11 +02007782 ret = false;
7783
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007784 tmp = I915_READ(PIPECONF(crtc->pipe));
7785 if (!(tmp & PIPECONF_ENABLE))
Imre Deak17290502016-02-12 18:55:11 +02007786 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007787
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007788 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7789 IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä42571ae2013-09-06 23:29:00 +03007790 switch (tmp & PIPECONF_BPC_MASK) {
7791 case PIPECONF_6BPC:
7792 pipe_config->pipe_bpp = 18;
7793 break;
7794 case PIPECONF_8BPC:
7795 pipe_config->pipe_bpp = 24;
7796 break;
7797 case PIPECONF_10BPC:
7798 pipe_config->pipe_bpp = 30;
7799 break;
7800 default:
7801 break;
7802 }
7803 }
7804
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007805 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08007806 (tmp & PIPECONF_COLOR_RANGE_SELECT))
Daniel Vetterb5a9fa02014-04-24 23:54:49 +02007807 pipe_config->limited_color_range = true;
7808
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007809 if (INTEL_GEN(dev_priv) < 4)
Ville Syrjälä282740f2013-09-04 18:30:03 +03007810 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7811
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007812 intel_get_pipe_timings(crtc, pipe_config);
Jani Nikulabc58be62016-03-18 17:05:39 +02007813 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007814
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007815 i9xx_get_pfit_config(crtc, pipe_config);
7816
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007817 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjäläc2317752016-03-15 16:39:56 +02007818 /* No way to read it out on pipes B and C */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007819 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
Ville Syrjäläc2317752016-03-15 16:39:56 +02007820 tmp = dev_priv->chv_dpll_md[crtc->pipe];
7821 else
7822 tmp = I915_READ(DPLL_MD(crtc->pipe));
Daniel Vetter6c49f242013-06-06 12:45:25 +02007823 pipe_config->pixel_multiplier =
7824 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7825 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007826 pipe_config->dpll_hw_state.dpll_md = tmp;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007827 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
Jani Nikula73f67aa2016-12-07 22:48:09 +02007828 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
Daniel Vetter6c49f242013-06-06 12:45:25 +02007829 tmp = I915_READ(DPLL(crtc->pipe));
7830 pipe_config->pixel_multiplier =
7831 ((tmp & SDVO_MULTIPLIER_MASK)
7832 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7833 } else {
7834 /* Note that on i915G/GM the pixel multiplier is in the sdvo
7835 * port and will be fixed up in the encoder->get_config
7836 * function. */
7837 pipe_config->pixel_multiplier = 1;
7838 }
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007839 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007840 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03007841 /*
7842 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7843 * on 830. Filter it out here so that we don't
7844 * report errors due to that.
7845 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007846 if (IS_I830(dev_priv))
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03007847 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7848
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007849 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7850 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
Ville Syrjälä165e9012013-06-26 17:44:15 +03007851 } else {
7852 /* Mask out read-only status bits. */
7853 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7854 DPLL_PORTC_READY_MASK |
7855 DPLL_PORTB_READY_MASK);
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007856 }
Daniel Vetter6c49f242013-06-06 12:45:25 +02007857
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007858 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007859 chv_crtc_clock_get(crtc, pipe_config);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01007860 else if (IS_VALLEYVIEW(dev_priv))
Jesse Barnesacbec812013-09-20 11:29:32 -07007861 vlv_crtc_clock_get(crtc, pipe_config);
7862 else
7863 i9xx_crtc_clock_get(crtc, pipe_config);
Ville Syrjälä18442d02013-09-13 16:00:08 +03007864
Ville Syrjälä0f646142015-08-26 19:39:18 +03007865 /*
7866 * Normally the dotclock is filled in by the encoder .get_config()
7867 * but in case the pipe is enabled w/o any ports we need a sane
7868 * default.
7869 */
7870 pipe_config->base.adjusted_mode.crtc_clock =
7871 pipe_config->port_clock / pipe_config->pixel_multiplier;
7872
Imre Deak17290502016-02-12 18:55:11 +02007873 ret = true;
7874
7875out:
7876 intel_display_power_put(dev_priv, power_domain);
7877
7878 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007879}
7880
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02007881static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
Jesse Barnes13d83a62011-08-03 12:59:20 -07007882{
Jesse Barnes13d83a62011-08-03 12:59:20 -07007883 struct intel_encoder *encoder;
Lyude1c1a24d2016-06-14 11:04:09 -04007884 int i;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007885 u32 val, final;
Jesse Barnes13d83a62011-08-03 12:59:20 -07007886 bool has_lvds = false;
Keith Packard199e5d72011-09-22 12:01:57 -07007887 bool has_cpu_edp = false;
Keith Packard199e5d72011-09-22 12:01:57 -07007888 bool has_panel = false;
Keith Packard99eb6a02011-09-26 14:29:12 -07007889 bool has_ck505 = false;
7890 bool can_ssc = false;
Lyude1c1a24d2016-06-14 11:04:09 -04007891 bool using_ssc_source = false;
Jesse Barnes13d83a62011-08-03 12:59:20 -07007892
7893 /* We need to take the global config into account */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02007894 for_each_intel_encoder(&dev_priv->drm, encoder) {
Keith Packard199e5d72011-09-22 12:01:57 -07007895 switch (encoder->type) {
7896 case INTEL_OUTPUT_LVDS:
7897 has_panel = true;
7898 has_lvds = true;
7899 break;
7900 case INTEL_OUTPUT_EDP:
7901 has_panel = true;
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02007902 if (encoder->port == PORT_A)
Keith Packard199e5d72011-09-22 12:01:57 -07007903 has_cpu_edp = true;
7904 break;
Paulo Zanoni6847d71b2014-10-27 17:47:52 -02007905 default:
7906 break;
Jesse Barnes13d83a62011-08-03 12:59:20 -07007907 }
7908 }
7909
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01007910 if (HAS_PCH_IBX(dev_priv)) {
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03007911 has_ck505 = dev_priv->vbt.display_clock_mode;
Keith Packard99eb6a02011-09-26 14:29:12 -07007912 can_ssc = has_ck505;
7913 } else {
7914 has_ck505 = false;
7915 can_ssc = true;
7916 }
7917
Lyude1c1a24d2016-06-14 11:04:09 -04007918 /* Check if any DPLLs are using the SSC source */
7919 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
7920 u32 temp = I915_READ(PCH_DPLL(i));
7921
7922 if (!(temp & DPLL_VCO_ENABLE))
7923 continue;
7924
7925 if ((temp & PLL_REF_INPUT_MASK) ==
7926 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7927 using_ssc_source = true;
7928 break;
7929 }
7930 }
7931
7932 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
7933 has_panel, has_lvds, has_ck505, using_ssc_source);
Jesse Barnes13d83a62011-08-03 12:59:20 -07007934
7935 /* Ironlake: try to setup display ref clock before DPLL
7936 * enabling. This is only under driver's control after
7937 * PCH B stepping, previous chipset stepping should be
7938 * ignoring this setting.
7939 */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007940 val = I915_READ(PCH_DREF_CONTROL);
Jesse Barnes13d83a62011-08-03 12:59:20 -07007941
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007942 /* As we must carefully and slowly disable/enable each source in turn,
7943 * compute the final state we want first and check if we need to
7944 * make any changes at all.
7945 */
7946 final = val;
7947 final &= ~DREF_NONSPREAD_SOURCE_MASK;
Keith Packard99eb6a02011-09-26 14:29:12 -07007948 if (has_ck505)
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007949 final |= DREF_NONSPREAD_CK505_ENABLE;
Keith Packard99eb6a02011-09-26 14:29:12 -07007950 else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007951 final |= DREF_NONSPREAD_SOURCE_ENABLE;
7952
Daniel Vetter8c07eb62016-06-09 18:39:07 +02007953 final &= ~DREF_SSC_SOURCE_MASK;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007954 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Daniel Vetter8c07eb62016-06-09 18:39:07 +02007955 final &= ~DREF_SSC1_ENABLE;
Jesse Barnes13d83a62011-08-03 12:59:20 -07007956
Keith Packard199e5d72011-09-22 12:01:57 -07007957 if (has_panel) {
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007958 final |= DREF_SSC_SOURCE_ENABLE;
7959
7960 if (intel_panel_use_ssc(dev_priv) && can_ssc)
7961 final |= DREF_SSC1_ENABLE;
7962
7963 if (has_cpu_edp) {
7964 if (intel_panel_use_ssc(dev_priv) && can_ssc)
7965 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
7966 else
7967 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
7968 } else
7969 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Lyude1c1a24d2016-06-14 11:04:09 -04007970 } else if (using_ssc_source) {
7971 final |= DREF_SSC_SOURCE_ENABLE;
7972 final |= DREF_SSC1_ENABLE;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007973 }
7974
7975 if (final == val)
7976 return;
7977
7978 /* Always enable nonspread source */
7979 val &= ~DREF_NONSPREAD_SOURCE_MASK;
7980
7981 if (has_ck505)
7982 val |= DREF_NONSPREAD_CK505_ENABLE;
7983 else
7984 val |= DREF_NONSPREAD_SOURCE_ENABLE;
7985
7986 if (has_panel) {
7987 val &= ~DREF_SSC_SOURCE_MASK;
7988 val |= DREF_SSC_SOURCE_ENABLE;
Jesse Barnes13d83a62011-08-03 12:59:20 -07007989
Keith Packard199e5d72011-09-22 12:01:57 -07007990 /* SSC must be turned on before enabling the CPU output */
Keith Packard99eb6a02011-09-26 14:29:12 -07007991 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
Keith Packard199e5d72011-09-22 12:01:57 -07007992 DRM_DEBUG_KMS("Using SSC on panel\n");
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007993 val |= DREF_SSC1_ENABLE;
Daniel Vettere77166b2012-03-30 22:14:05 +02007994 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007995 val &= ~DREF_SSC1_ENABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07007996
7997 /* Get SSC going before enabling the outputs */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007998 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07007999 POSTING_READ(PCH_DREF_CONTROL);
8000 udelay(200);
8001
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008002 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008003
8004 /* Enable CPU source on CPU attached eDP */
Keith Packard199e5d72011-09-22 12:01:57 -07008005 if (has_cpu_edp) {
Keith Packard99eb6a02011-09-26 14:29:12 -07008006 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
Keith Packard199e5d72011-09-22 12:01:57 -07008007 DRM_DEBUG_KMS("Using SSC on eDP\n");
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008008 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
Robin Schroereba905b2014-05-18 02:24:50 +02008009 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008010 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
Keith Packard199e5d72011-09-22 12:01:57 -07008011 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008012 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008013
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008014 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008015 POSTING_READ(PCH_DREF_CONTROL);
8016 udelay(200);
8017 } else {
Lyude1c1a24d2016-06-14 11:04:09 -04008018 DRM_DEBUG_KMS("Disabling CPU source output\n");
Keith Packard199e5d72011-09-22 12:01:57 -07008019
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008020 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Keith Packard199e5d72011-09-22 12:01:57 -07008021
8022 /* Turn off CPU output */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008023 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008024
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008025 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008026 POSTING_READ(PCH_DREF_CONTROL);
8027 udelay(200);
8028
Lyude1c1a24d2016-06-14 11:04:09 -04008029 if (!using_ssc_source) {
8030 DRM_DEBUG_KMS("Disabling SSC source\n");
Keith Packard199e5d72011-09-22 12:01:57 -07008031
Lyude1c1a24d2016-06-14 11:04:09 -04008032 /* Turn off the SSC source */
8033 val &= ~DREF_SSC_SOURCE_MASK;
8034 val |= DREF_SSC_SOURCE_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008035
Lyude1c1a24d2016-06-14 11:04:09 -04008036 /* Turn off SSC1 */
8037 val &= ~DREF_SSC1_ENABLE;
8038
8039 I915_WRITE(PCH_DREF_CONTROL, val);
8040 POSTING_READ(PCH_DREF_CONTROL);
8041 udelay(200);
8042 }
Jesse Barnes13d83a62011-08-03 12:59:20 -07008043 }
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008044
8045 BUG_ON(val != final);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008046}
8047
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008048static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
Paulo Zanonidde86e22012-12-01 12:04:25 -02008049{
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008050 uint32_t tmp;
Paulo Zanonidde86e22012-12-01 12:04:25 -02008051
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008052 tmp = I915_READ(SOUTH_CHICKEN2);
8053 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8054 I915_WRITE(SOUTH_CHICKEN2, tmp);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008055
Imre Deakcf3598c2016-06-28 13:37:31 +03008056 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8057 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008058 DRM_ERROR("FDI mPHY reset assert timeout\n");
Paulo Zanonidde86e22012-12-01 12:04:25 -02008059
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008060 tmp = I915_READ(SOUTH_CHICKEN2);
8061 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8062 I915_WRITE(SOUTH_CHICKEN2, tmp);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008063
Imre Deakcf3598c2016-06-28 13:37:31 +03008064 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8065 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008066 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008067}
8068
8069/* WaMPhyProgramming:hsw */
8070static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8071{
8072 uint32_t tmp;
Paulo Zanonidde86e22012-12-01 12:04:25 -02008073
8074 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8075 tmp &= ~(0xFF << 24);
8076 tmp |= (0x12 << 24);
8077 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8078
Paulo Zanonidde86e22012-12-01 12:04:25 -02008079 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8080 tmp |= (1 << 11);
8081 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8082
8083 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8084 tmp |= (1 << 11);
8085 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8086
Paulo Zanonidde86e22012-12-01 12:04:25 -02008087 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8088 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8089 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8090
8091 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8092 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8093 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8094
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008095 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8096 tmp &= ~(7 << 13);
8097 tmp |= (5 << 13);
8098 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008099
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008100 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8101 tmp &= ~(7 << 13);
8102 tmp |= (5 << 13);
8103 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008104
8105 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8106 tmp &= ~0xFF;
8107 tmp |= 0x1C;
8108 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8109
8110 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8111 tmp &= ~0xFF;
8112 tmp |= 0x1C;
8113 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8114
8115 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8116 tmp &= ~(0xFF << 16);
8117 tmp |= (0x1C << 16);
8118 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8119
8120 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8121 tmp &= ~(0xFF << 16);
8122 tmp |= (0x1C << 16);
8123 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8124
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008125 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8126 tmp |= (1 << 27);
8127 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008128
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008129 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8130 tmp |= (1 << 27);
8131 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008132
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008133 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8134 tmp &= ~(0xF << 28);
8135 tmp |= (4 << 28);
8136 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008137
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008138 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8139 tmp &= ~(0xF << 28);
8140 tmp |= (4 << 28);
8141 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008142}
8143
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008144/* Implements 3 different sequences from BSpec chapter "Display iCLK
8145 * Programming" based on the parameters passed:
8146 * - Sequence to enable CLKOUT_DP
8147 * - Sequence to enable CLKOUT_DP without spread
8148 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8149 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008150static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8151 bool with_spread, bool with_fdi)
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008152{
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008153 uint32_t reg, tmp;
8154
8155 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8156 with_spread = true;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008157 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8158 with_fdi, "LP PCH doesn't have FDI\n"))
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008159 with_fdi = false;
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008160
Ville Syrjäläa5805162015-05-26 20:42:30 +03008161 mutex_lock(&dev_priv->sb_lock);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008162
8163 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8164 tmp &= ~SBI_SSCCTL_DISABLE;
8165 tmp |= SBI_SSCCTL_PATHALT;
8166 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8167
8168 udelay(24);
8169
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008170 if (with_spread) {
8171 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8172 tmp &= ~SBI_SSCCTL_PATHALT;
8173 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008174
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008175 if (with_fdi) {
8176 lpt_reset_fdi_mphy(dev_priv);
8177 lpt_program_fdi_mphy(dev_priv);
8178 }
8179 }
Paulo Zanonidde86e22012-12-01 12:04:25 -02008180
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008181 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008182 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8183 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8184 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
Daniel Vetterc00db242013-01-22 15:33:27 +01008185
Ville Syrjäläa5805162015-05-26 20:42:30 +03008186 mutex_unlock(&dev_priv->sb_lock);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008187}
8188
Paulo Zanoni47701c32013-07-23 11:19:25 -03008189/* Sequence to disable CLKOUT_DP */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008190static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
Paulo Zanoni47701c32013-07-23 11:19:25 -03008191{
Paulo Zanoni47701c32013-07-23 11:19:25 -03008192 uint32_t reg, tmp;
8193
Ville Syrjäläa5805162015-05-26 20:42:30 +03008194 mutex_lock(&dev_priv->sb_lock);
Paulo Zanoni47701c32013-07-23 11:19:25 -03008195
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008196 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
Paulo Zanoni47701c32013-07-23 11:19:25 -03008197 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8198 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8199 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8200
8201 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8202 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8203 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8204 tmp |= SBI_SSCCTL_PATHALT;
8205 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8206 udelay(32);
8207 }
8208 tmp |= SBI_SSCCTL_DISABLE;
8209 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8210 }
8211
Ville Syrjäläa5805162015-05-26 20:42:30 +03008212 mutex_unlock(&dev_priv->sb_lock);
Paulo Zanoni47701c32013-07-23 11:19:25 -03008213}
8214
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008215#define BEND_IDX(steps) ((50 + (steps)) / 5)
8216
8217static const uint16_t sscdivintphase[] = {
8218 [BEND_IDX( 50)] = 0x3B23,
8219 [BEND_IDX( 45)] = 0x3B23,
8220 [BEND_IDX( 40)] = 0x3C23,
8221 [BEND_IDX( 35)] = 0x3C23,
8222 [BEND_IDX( 30)] = 0x3D23,
8223 [BEND_IDX( 25)] = 0x3D23,
8224 [BEND_IDX( 20)] = 0x3E23,
8225 [BEND_IDX( 15)] = 0x3E23,
8226 [BEND_IDX( 10)] = 0x3F23,
8227 [BEND_IDX( 5)] = 0x3F23,
8228 [BEND_IDX( 0)] = 0x0025,
8229 [BEND_IDX( -5)] = 0x0025,
8230 [BEND_IDX(-10)] = 0x0125,
8231 [BEND_IDX(-15)] = 0x0125,
8232 [BEND_IDX(-20)] = 0x0225,
8233 [BEND_IDX(-25)] = 0x0225,
8234 [BEND_IDX(-30)] = 0x0325,
8235 [BEND_IDX(-35)] = 0x0325,
8236 [BEND_IDX(-40)] = 0x0425,
8237 [BEND_IDX(-45)] = 0x0425,
8238 [BEND_IDX(-50)] = 0x0525,
8239};
8240
8241/*
8242 * Bend CLKOUT_DP
8243 * steps -50 to 50 inclusive, in steps of 5
8244 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8245 * change in clock period = -(steps / 10) * 5.787 ps
8246 */
8247static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8248{
8249 uint32_t tmp;
8250 int idx = BEND_IDX(steps);
8251
8252 if (WARN_ON(steps % 5 != 0))
8253 return;
8254
8255 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8256 return;
8257
8258 mutex_lock(&dev_priv->sb_lock);
8259
8260 if (steps % 10 != 0)
8261 tmp = 0xAAAAAAAB;
8262 else
8263 tmp = 0x00000000;
8264 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8265
8266 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8267 tmp &= 0xffff0000;
8268 tmp |= sscdivintphase[idx];
8269 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8270
8271 mutex_unlock(&dev_priv->sb_lock);
8272}
8273
8274#undef BEND_IDX
8275
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008276static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008277{
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008278 struct intel_encoder *encoder;
8279 bool has_vga = false;
8280
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008281 for_each_intel_encoder(&dev_priv->drm, encoder) {
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008282 switch (encoder->type) {
8283 case INTEL_OUTPUT_ANALOG:
8284 has_vga = true;
8285 break;
Paulo Zanoni6847d71b2014-10-27 17:47:52 -02008286 default:
8287 break;
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008288 }
8289 }
8290
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008291 if (has_vga) {
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008292 lpt_bend_clkout_dp(dev_priv, 0);
8293 lpt_enable_clkout_dp(dev_priv, true, true);
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008294 } else {
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008295 lpt_disable_clkout_dp(dev_priv);
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008296 }
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008297}
8298
Paulo Zanonidde86e22012-12-01 12:04:25 -02008299/*
8300 * Initialize reference clocks when the driver loads
8301 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008302void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
Paulo Zanonidde86e22012-12-01 12:04:25 -02008303{
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008304 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008305 ironlake_init_pch_refclk(dev_priv);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008306 else if (HAS_PCH_LPT(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008307 lpt_init_pch_refclk(dev_priv);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008308}
8309
Daniel Vetter6ff93602013-04-19 11:24:36 +02008310static void ironlake_set_pipeconf(struct drm_crtc *crtc)
Paulo Zanonic8203562012-09-12 10:06:29 -03008311{
Chris Wilsonfac5e232016-07-04 11:34:36 +01008312 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanonic8203562012-09-12 10:06:29 -03008313 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8314 int pipe = intel_crtc->pipe;
8315 uint32_t val;
8316
Daniel Vetter78114072013-06-13 00:54:57 +02008317 val = 0;
Paulo Zanonic8203562012-09-12 10:06:29 -03008318
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008319 switch (intel_crtc->config->pipe_bpp) {
Paulo Zanonic8203562012-09-12 10:06:29 -03008320 case 18:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008321 val |= PIPECONF_6BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008322 break;
8323 case 24:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008324 val |= PIPECONF_8BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008325 break;
8326 case 30:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008327 val |= PIPECONF_10BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008328 break;
8329 case 36:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008330 val |= PIPECONF_12BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008331 break;
8332 default:
Paulo Zanonicc769b62012-09-20 18:36:03 -03008333 /* Case prevented by intel_choose_pipe_bpp_dither. */
8334 BUG();
Paulo Zanonic8203562012-09-12 10:06:29 -03008335 }
8336
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008337 if (intel_crtc->config->dither)
Paulo Zanonic8203562012-09-12 10:06:29 -03008338 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8339
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008340 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
Paulo Zanonic8203562012-09-12 10:06:29 -03008341 val |= PIPECONF_INTERLACED_ILK;
8342 else
8343 val |= PIPECONF_PROGRESSIVE;
8344
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008345 if (intel_crtc->config->limited_color_range)
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02008346 val |= PIPECONF_COLOR_RANGE_SELECT;
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02008347
Paulo Zanonic8203562012-09-12 10:06:29 -03008348 I915_WRITE(PIPECONF(pipe), val);
8349 POSTING_READ(PIPECONF(pipe));
8350}
8351
Daniel Vetter6ff93602013-04-19 11:24:36 +02008352static void haswell_set_pipeconf(struct drm_crtc *crtc)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008353{
Chris Wilsonfac5e232016-07-04 11:34:36 +01008354 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008355 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008356 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
Jani Nikula391bf042016-03-18 17:05:40 +02008357 u32 val = 0;
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008358
Jani Nikula391bf042016-03-18 17:05:40 +02008359 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008360 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8361
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008362 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008363 val |= PIPECONF_INTERLACED_ILK;
8364 else
8365 val |= PIPECONF_PROGRESSIVE;
8366
Paulo Zanoni702e7a52012-10-23 18:29:59 -02008367 I915_WRITE(PIPECONF(cpu_transcoder), val);
8368 POSTING_READ(PIPECONF(cpu_transcoder));
Jani Nikula391bf042016-03-18 17:05:40 +02008369}
8370
Jani Nikula391bf042016-03-18 17:05:40 +02008371static void haswell_set_pipemisc(struct drm_crtc *crtc)
8372{
Chris Wilsonfac5e232016-07-04 11:34:36 +01008373 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Jani Nikula391bf042016-03-18 17:05:40 +02008374 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Shashank Sharmab22ca992017-07-24 19:19:32 +05308375 struct intel_crtc_state *config = intel_crtc->config;
Jani Nikula391bf042016-03-18 17:05:40 +02008376
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00008377 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
Jani Nikula391bf042016-03-18 17:05:40 +02008378 u32 val = 0;
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008379
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008380 switch (intel_crtc->config->pipe_bpp) {
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008381 case 18:
8382 val |= PIPEMISC_DITHER_6_BPC;
8383 break;
8384 case 24:
8385 val |= PIPEMISC_DITHER_8_BPC;
8386 break;
8387 case 30:
8388 val |= PIPEMISC_DITHER_10_BPC;
8389 break;
8390 case 36:
8391 val |= PIPEMISC_DITHER_12_BPC;
8392 break;
8393 default:
8394 /* Case prevented by pipe_config_set_bpp. */
8395 BUG();
8396 }
8397
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008398 if (intel_crtc->config->dither)
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008399 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8400
Shashank Sharmab22ca992017-07-24 19:19:32 +05308401 if (config->ycbcr420) {
8402 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
8403 PIPEMISC_YUV420_ENABLE |
8404 PIPEMISC_YUV420_MODE_FULL_BLEND;
8405 }
8406
Jani Nikula391bf042016-03-18 17:05:40 +02008407 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008408 }
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008409}
8410
Paulo Zanonid4b19312012-11-29 11:29:32 -02008411int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8412{
8413 /*
8414 * Account for spread spectrum to avoid
8415 * oversubscribing the link. Max center spread
8416 * is 2.5%; use 5% for safety's sake.
8417 */
8418 u32 bps = target_clock * bpp * 21 / 20;
Ville Syrjälä619d4d02014-02-27 14:23:14 +02008419 return DIV_ROUND_UP(bps, link_bw * 8);
Paulo Zanonid4b19312012-11-29 11:29:32 -02008420}
8421
Daniel Vetter7429e9d2013-04-20 17:19:46 +02008422static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
Daniel Vetter6cf86a52013-04-02 23:38:10 +02008423{
Daniel Vetter7429e9d2013-04-20 17:19:46 +02008424 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
Paulo Zanonif48d8f22012-09-20 18:36:04 -03008425}
8426
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008427static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8428 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03008429 struct dpll *reduced_clock)
Paulo Zanonide13a2e2012-09-20 18:36:05 -03008430{
8431 struct drm_crtc *crtc = &intel_crtc->base;
8432 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008433 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008434 u32 dpll, fp, fp2;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008435 int factor;
Jesse Barnes79e53942008-11-07 14:24:08 -08008436
Chris Wilsonc1858122010-12-03 21:35:48 +00008437 /* Enable autotuning of the PLL clock (if permissible) */
Eric Anholt8febb292011-03-30 13:01:07 -07008438 factor = 21;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008439 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Eric Anholt8febb292011-03-30 13:01:07 -07008440 if ((intel_panel_use_ssc(dev_priv) &&
Ville Syrjäläe91e9412013-12-09 18:54:16 +02008441 dev_priv->vbt.lvds_ssc_freq == 100000) ||
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008442 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
Eric Anholt8febb292011-03-30 13:01:07 -07008443 factor = 25;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008444 } else if (crtc_state->sdvo_tv_clock)
Eric Anholt8febb292011-03-30 13:01:07 -07008445 factor = 20;
Chris Wilsonc1858122010-12-03 21:35:48 +00008446
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008447 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
Chris Wilsonc1858122010-12-03 21:35:48 +00008448
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008449 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8450 fp |= FP_CB_TUNE;
8451
8452 if (reduced_clock) {
8453 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8454
8455 if (reduced_clock->m < factor * reduced_clock->n)
8456 fp2 |= FP_CB_TUNE;
8457 } else {
8458 fp2 = fp;
8459 }
Daniel Vetter9a7c7892013-04-04 22:20:34 +02008460
Chris Wilson5eddb702010-09-11 13:48:45 +01008461 dpll = 0;
Zhenyu Wang2c072452009-06-05 15:38:42 +08008462
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008463 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
Eric Anholta07d6782011-03-30 13:01:08 -07008464 dpll |= DPLLB_MODE_LVDS;
8465 else
8466 dpll |= DPLLB_MODE_DAC_SERIAL;
Daniel Vetter198a037f2013-04-19 11:14:37 +02008467
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008468 dpll |= (crtc_state->pixel_multiplier - 1)
Daniel Vetteref1b4602013-06-01 17:17:04 +02008469 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
Daniel Vetter198a037f2013-04-19 11:14:37 +02008470
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008471 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8472 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Daniel Vetter4a33e482013-07-06 12:52:05 +02008473 dpll |= DPLL_SDVO_HIGH_SPEED;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008474
Ville Syrjälä37a56502016-06-22 21:57:04 +03008475 if (intel_crtc_has_dp_encoder(crtc_state))
Daniel Vetter4a33e482013-07-06 12:52:05 +02008476 dpll |= DPLL_SDVO_HIGH_SPEED;
Jesse Barnes79e53942008-11-07 14:24:08 -08008477
Ville Syrjälä7d7f8632016-09-26 11:30:46 +03008478 /*
8479 * The high speed IO clock is only really required for
8480 * SDVO/HDMI/DP, but we also enable it for CRT to make it
8481 * possible to share the DPLL between CRT and HDMI. Enabling
8482 * the clock needlessly does no real harm, except use up a
8483 * bit of power potentially.
8484 *
8485 * We'll limit this to IVB with 3 pipes, since it has only two
8486 * DPLLs and so DPLL sharing is the only way to get three pipes
8487 * driving PCH ports at the same time. On SNB we could do this,
8488 * and potentially avoid enabling the second DPLL, but it's not
8489 * clear if it''s a win or loss power wise. No point in doing
8490 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8491 */
8492 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8493 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8494 dpll |= DPLL_SDVO_HIGH_SPEED;
8495
Eric Anholta07d6782011-03-30 13:01:08 -07008496 /* compute bitmask from p1 value */
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008497 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
Eric Anholta07d6782011-03-30 13:01:08 -07008498 /* also FPA1 */
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008499 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
Eric Anholta07d6782011-03-30 13:01:08 -07008500
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008501 switch (crtc_state->dpll.p2) {
Eric Anholta07d6782011-03-30 13:01:08 -07008502 case 5:
8503 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8504 break;
8505 case 7:
8506 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8507 break;
8508 case 10:
8509 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8510 break;
8511 case 14:
8512 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8513 break;
Jesse Barnes79e53942008-11-07 14:24:08 -08008514 }
8515
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008516 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8517 intel_panel_use_ssc(dev_priv))
Kristian Høgsberg43565a02009-02-13 20:56:52 -05008518 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
Jesse Barnes79e53942008-11-07 14:24:08 -08008519 else
8520 dpll |= PLL_REF_INPUT_DREFCLK;
8521
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008522 dpll |= DPLL_VCO_ENABLE;
8523
8524 crtc_state->dpll_hw_state.dpll = dpll;
8525 crtc_state->dpll_hw_state.fp0 = fp;
8526 crtc_state->dpll_hw_state.fp1 = fp2;
Paulo Zanonide13a2e2012-09-20 18:36:05 -03008527}
8528
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008529static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8530 struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08008531{
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008532 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008533 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03008534 const struct intel_limit *limit;
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008535 int refclk = 120000;
Jesse Barnes79e53942008-11-07 14:24:08 -08008536
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03008537 memset(&crtc_state->dpll_hw_state, 0,
8538 sizeof(crtc_state->dpll_hw_state));
8539
Ander Conselvan de Oliveiraded220e2016-03-21 18:00:09 +02008540 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8541 if (!crtc_state->has_pch_encoder)
8542 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08008543
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03008544 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008545 if (intel_panel_use_ssc(dev_priv)) {
8546 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8547 dev_priv->vbt.lvds_ssc_freq);
8548 refclk = dev_priv->vbt.lvds_ssc_freq;
8549 }
8550
8551 if (intel_is_dual_link_lvds(dev)) {
8552 if (refclk == 100000)
8553 limit = &intel_limits_ironlake_dual_lvds_100m;
8554 else
8555 limit = &intel_limits_ironlake_dual_lvds;
8556 } else {
8557 if (refclk == 100000)
8558 limit = &intel_limits_ironlake_single_lvds_100m;
8559 else
8560 limit = &intel_limits_ironlake_single_lvds;
8561 }
8562 } else {
8563 limit = &intel_limits_ironlake_dac;
8564 }
8565
Ander Conselvan de Oliveira364ee292016-03-21 18:00:10 +02008566 if (!crtc_state->clock_set &&
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008567 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8568 refclk, NULL, &crtc_state->dpll)) {
Ander Conselvan de Oliveira364ee292016-03-21 18:00:10 +02008569 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8570 return -EINVAL;
Daniel Vetterf47709a2013-03-28 10:42:02 +01008571 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008572
Gustavo A. R. Silvacbaa3312017-05-15 16:56:05 -05008573 ironlake_compute_dpll(crtc, crtc_state, NULL);
Daniel Vetter66e985c2013-06-05 13:34:20 +02008574
Gustavo A. R. Silvaefd38b62017-05-15 17:00:28 -05008575 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
Ander Conselvan de Oliveiraded220e2016-03-21 18:00:09 +02008576 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8577 pipe_name(crtc->pipe));
8578 return -EINVAL;
Ander Conselvan de Oliveira3fb37702014-10-29 11:32:35 +02008579 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008580
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02008581 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08008582}
8583
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008584static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8585 struct intel_link_m_n *m_n)
Daniel Vetter72419202013-04-04 13:28:53 +02008586{
8587 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008588 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008589 enum pipe pipe = crtc->pipe;
Daniel Vetter72419202013-04-04 13:28:53 +02008590
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008591 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8592 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8593 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8594 & ~TU_SIZE_MASK;
8595 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8596 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8597 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8598}
8599
8600static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8601 enum transcoder transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008602 struct intel_link_m_n *m_n,
8603 struct intel_link_m_n *m2_n2)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008604{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008605 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008606 enum pipe pipe = crtc->pipe;
8607
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008608 if (INTEL_GEN(dev_priv) >= 5) {
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008609 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8610 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8611 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8612 & ~TU_SIZE_MASK;
8613 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8614 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8615 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008616 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8617 * gen < 8) and if DRRS is supported (to make sure the
8618 * registers are not unnecessarily read).
8619 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008620 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +02008621 crtc->config->has_drrs) {
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008622 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8623 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8624 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8625 & ~TU_SIZE_MASK;
8626 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8627 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8628 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8629 }
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008630 } else {
8631 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8632 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8633 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8634 & ~TU_SIZE_MASK;
8635 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8636 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8637 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8638 }
8639}
8640
8641void intel_dp_get_m_n(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008642 struct intel_crtc_state *pipe_config)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008643{
Ander Conselvan de Oliveira681a8502015-01-15 14:55:24 +02008644 if (pipe_config->has_pch_encoder)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008645 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8646 else
8647 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008648 &pipe_config->dp_m_n,
8649 &pipe_config->dp_m2_n2);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008650}
8651
Daniel Vetter72419202013-04-04 13:28:53 +02008652static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008653 struct intel_crtc_state *pipe_config)
Daniel Vetter72419202013-04-04 13:28:53 +02008654{
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008655 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008656 &pipe_config->fdi_m_n, NULL);
Daniel Vetter72419202013-04-04 13:28:53 +02008657}
8658
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008659static void skylake_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008660 struct intel_crtc_state *pipe_config)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008661{
8662 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008663 struct drm_i915_private *dev_priv = to_i915(dev);
Chandra Kondurua1b22782015-04-07 15:28:45 -07008664 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8665 uint32_t ps_ctrl = 0;
8666 int id = -1;
8667 int i;
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008668
Chandra Kondurua1b22782015-04-07 15:28:45 -07008669 /* find scaler attached to this pipe */
8670 for (i = 0; i < crtc->num_scalers; i++) {
8671 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8672 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8673 id = i;
8674 pipe_config->pch_pfit.enabled = true;
8675 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8676 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8677 break;
8678 }
8679 }
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008680
Chandra Kondurua1b22782015-04-07 15:28:45 -07008681 scaler_state->scaler_id = id;
8682 if (id >= 0) {
8683 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8684 } else {
8685 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008686 }
8687}
8688
Damien Lespiau5724dbd2015-01-20 12:51:52 +00008689static void
8690skylake_get_initial_plane_config(struct intel_crtc *crtc,
8691 struct intel_initial_plane_config *plane_config)
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008692{
8693 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008694 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008695 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8696 enum plane_id plane_id = plane->id;
8697 enum pipe pipe = crtc->pipe;
James Ausmus4036c782017-11-13 10:11:28 -08008698 u32 val, base, offset, stride_mult, tiling, alpha;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008699 int fourcc, pixel_format;
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00008700 unsigned int aligned_height;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008701 struct drm_framebuffer *fb;
Damien Lespiau1b842c82015-01-21 13:50:54 +00008702 struct intel_framebuffer *intel_fb;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008703
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02008704 if (!plane->get_hw_state(plane))
8705 return;
8706
Damien Lespiaud9806c92015-01-21 14:07:19 +00008707 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Damien Lespiau1b842c82015-01-21 13:50:54 +00008708 if (!intel_fb) {
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008709 DRM_DEBUG_KMS("failed to alloc fb\n");
8710 return;
8711 }
8712
Damien Lespiau1b842c82015-01-21 13:50:54 +00008713 fb = &intel_fb->base;
8714
Ville Syrjäläd2e9f5f2016-11-18 21:52:53 +02008715 fb->dev = dev;
8716
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008717 val = I915_READ(PLANE_CTL(pipe, plane_id));
Damien Lespiau42a7b082015-02-05 19:35:13 +00008718
James Ausmusb5972772018-01-30 11:49:16 -02008719 if (INTEL_GEN(dev_priv) >= 11)
8720 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8721 else
8722 pixel_format = val & PLANE_CTL_FORMAT_MASK;
James Ausmus4036c782017-11-13 10:11:28 -08008723
8724 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008725 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
James Ausmus4036c782017-11-13 10:11:28 -08008726 alpha &= PLANE_COLOR_ALPHA_MASK;
8727 } else {
8728 alpha = val & PLANE_CTL_ALPHA_MASK;
8729 }
8730
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008731 fourcc = skl_format_to_fourcc(pixel_format,
James Ausmus4036c782017-11-13 10:11:28 -08008732 val & PLANE_CTL_ORDER_RGBX, alpha);
Ville Syrjälä2f3f4762016-11-18 21:52:57 +02008733 fb->format = drm_format_info(fourcc);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008734
Damien Lespiau40f46282015-02-27 11:15:21 +00008735 tiling = val & PLANE_CTL_TILED_MASK;
8736 switch (tiling) {
8737 case PLANE_CTL_TILED_LINEAR:
Ben Widawsky2f075562017-03-24 14:29:48 -07008738 fb->modifier = DRM_FORMAT_MOD_LINEAR;
Damien Lespiau40f46282015-02-27 11:15:21 +00008739 break;
8740 case PLANE_CTL_TILED_X:
8741 plane_config->tiling = I915_TILING_X;
Ville Syrjäläbae781b2016-11-16 13:33:16 +02008742 fb->modifier = I915_FORMAT_MOD_X_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008743 break;
8744 case PLANE_CTL_TILED_Y:
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07008745 if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
8746 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8747 else
8748 fb->modifier = I915_FORMAT_MOD_Y_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008749 break;
8750 case PLANE_CTL_TILED_YF:
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07008751 if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
8752 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8753 else
8754 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008755 break;
8756 default:
8757 MISSING_CASE(tiling);
8758 goto error;
8759 }
8760
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008761 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008762 plane_config->base = base;
8763
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008764 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008765
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008766 val = I915_READ(PLANE_SIZE(pipe, plane_id));
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008767 fb->height = ((val >> 16) & 0xfff) + 1;
8768 fb->width = ((val >> 0) & 0x1fff) + 1;
8769
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008770 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02008771 stride_mult = intel_fb_stride_alignment(fb, 0);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008772 fb->pitches[0] = (val & 0x3ff) * stride_mult;
8773
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02008774 aligned_height = intel_fb_align_height(fb, 0, fb->height);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008775
Daniel Vetterf37b5c22015-02-10 23:12:27 +01008776 plane_config->size = fb->pitches[0] * aligned_height;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008777
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008778 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8779 crtc->base.name, plane->base.name, fb->width, fb->height,
Ville Syrjälä272725c2016-12-14 23:32:20 +02008780 fb->format->cpp[0] * 8, base, fb->pitches[0],
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008781 plane_config->size);
8782
Damien Lespiau2d140302015-02-05 17:22:18 +00008783 plane_config->fb = intel_fb;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008784 return;
8785
8786error:
Matthew Auldd1a3a032016-08-23 16:00:44 +01008787 kfree(intel_fb);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008788}
8789
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008790static void ironlake_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008791 struct intel_crtc_state *pipe_config)
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008792{
8793 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008794 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008795 uint32_t tmp;
8796
8797 tmp = I915_READ(PF_CTL(crtc->pipe));
8798
8799 if (tmp & PF_ENABLE) {
Chris Wilsonfd4daa92013-08-27 17:04:17 +01008800 pipe_config->pch_pfit.enabled = true;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008801 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8802 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
Daniel Vettercb8b2a32013-06-01 17:16:23 +02008803
8804 /* We currently do not free assignements of panel fitters on
8805 * ivb/hsw (since we don't use the higher upscaling modes which
8806 * differentiates them) so just WARN about this case for now. */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01008807 if (IS_GEN7(dev_priv)) {
Daniel Vettercb8b2a32013-06-01 17:16:23 +02008808 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8809 PF_PIPE_SEL_IVB(crtc->pipe));
8810 }
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008811 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008812}
8813
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008814static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008815 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008816{
8817 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008818 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deak17290502016-02-12 18:55:11 +02008819 enum intel_display_power_domain power_domain;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008820 uint32_t tmp;
Imre Deak17290502016-02-12 18:55:11 +02008821 bool ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008822
Imre Deak17290502016-02-12 18:55:11 +02008823 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8824 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Paulo Zanoni930e8c92014-07-04 13:38:34 -03008825 return false;
8826
Daniel Vettere143a212013-07-04 12:01:15 +02008827 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008828 pipe_config->shared_dpll = NULL;
Daniel Vettereccb1402013-05-22 00:50:22 +02008829
Imre Deak17290502016-02-12 18:55:11 +02008830 ret = false;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008831 tmp = I915_READ(PIPECONF(crtc->pipe));
8832 if (!(tmp & PIPECONF_ENABLE))
Imre Deak17290502016-02-12 18:55:11 +02008833 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008834
Ville Syrjälä42571ae2013-09-06 23:29:00 +03008835 switch (tmp & PIPECONF_BPC_MASK) {
8836 case PIPECONF_6BPC:
8837 pipe_config->pipe_bpp = 18;
8838 break;
8839 case PIPECONF_8BPC:
8840 pipe_config->pipe_bpp = 24;
8841 break;
8842 case PIPECONF_10BPC:
8843 pipe_config->pipe_bpp = 30;
8844 break;
8845 case PIPECONF_12BPC:
8846 pipe_config->pipe_bpp = 36;
8847 break;
8848 default:
8849 break;
8850 }
8851
Daniel Vetterb5a9fa02014-04-24 23:54:49 +02008852 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8853 pipe_config->limited_color_range = true;
8854
Daniel Vetterab9412b2013-05-03 11:49:46 +02008855 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
Daniel Vetter66e985c2013-06-05 13:34:20 +02008856 struct intel_shared_dpll *pll;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008857 enum intel_dpll_id pll_id;
Daniel Vetter66e985c2013-06-05 13:34:20 +02008858
Daniel Vetter88adfff2013-03-28 10:42:01 +01008859 pipe_config->has_pch_encoder = true;
8860
Daniel Vetter627eb5a2013-04-29 19:33:42 +02008861 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8862 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8863 FDI_DP_PORT_WIDTH_SHIFT) + 1;
Daniel Vetter72419202013-04-04 13:28:53 +02008864
8865 ironlake_get_fdi_m_n_config(crtc, pipe_config);
Daniel Vetter6c49f242013-06-06 12:45:25 +02008866
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03008867 if (HAS_PCH_IBX(dev_priv)) {
Imre Deakd9a7bc62016-05-12 16:18:50 +03008868 /*
8869 * The pipe->pch transcoder and pch transcoder->pll
8870 * mapping is fixed.
8871 */
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008872 pll_id = (enum intel_dpll_id) crtc->pipe;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02008873 } else {
8874 tmp = I915_READ(PCH_DPLL_SEL);
8875 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008876 pll_id = DPLL_ID_PCH_PLL_B;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02008877 else
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008878 pll_id= DPLL_ID_PCH_PLL_A;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02008879 }
Daniel Vetter66e985c2013-06-05 13:34:20 +02008880
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008881 pipe_config->shared_dpll =
8882 intel_get_shared_dpll_by_id(dev_priv, pll_id);
8883 pll = pipe_config->shared_dpll;
Daniel Vetter66e985c2013-06-05 13:34:20 +02008884
Lucas De Marchiee1398b2018-03-20 15:06:33 -07008885 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
8886 &pipe_config->dpll_hw_state));
Daniel Vetterc93f54c2013-06-27 19:47:19 +02008887
8888 tmp = pipe_config->dpll_hw_state.dpll;
8889 pipe_config->pixel_multiplier =
8890 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
8891 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
Ville Syrjälä18442d02013-09-13 16:00:08 +03008892
8893 ironlake_pch_clock_get(crtc, pipe_config);
Daniel Vetter6c49f242013-06-06 12:45:25 +02008894 } else {
8895 pipe_config->pixel_multiplier = 1;
Daniel Vetter627eb5a2013-04-29 19:33:42 +02008896 }
8897
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02008898 intel_get_pipe_timings(crtc, pipe_config);
Jani Nikulabc58be62016-03-18 17:05:39 +02008899 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02008900
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008901 ironlake_get_pfit_config(crtc, pipe_config);
8902
Imre Deak17290502016-02-12 18:55:11 +02008903 ret = true;
8904
8905out:
8906 intel_display_power_put(dev_priv, power_domain);
8907
8908 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008909}
8910
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008911static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
8912{
Chris Wilson91c8a322016-07-05 10:40:23 +01008913 struct drm_device *dev = &dev_priv->drm;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008914 struct intel_crtc *crtc;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008915
Damien Lespiaud3fcc802014-05-13 23:32:22 +01008916 for_each_intel_crtc(dev, crtc)
Rob Clarke2c719b2014-12-15 13:56:32 -05008917 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008918 pipe_name(crtc->pipe));
8919
Imre Deak9c3a16c2017-08-14 18:15:30 +03008920 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)),
8921 "Display power well on\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05008922 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
Ville Syrjälä01403de2015-09-18 20:03:33 +03008923 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
8924 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
Imre Deak44cb7342016-08-10 14:07:29 +03008925 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05008926 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008927 "CPU PWM1 enabled\n");
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01008928 if (IS_HASWELL(dev_priv))
Rob Clarke2c719b2014-12-15 13:56:32 -05008929 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
Paulo Zanonic5107b82014-07-04 11:50:30 -03008930 "CPU PWM2 enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05008931 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008932 "PCH PWM1 enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05008933 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008934 "Utility pin enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05008935 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008936
Paulo Zanoni9926ada2014-04-01 19:39:47 -03008937 /*
8938 * In theory we can still leave IRQs enabled, as long as only the HPD
8939 * interrupts remain enabled. We used to check for that, but since it's
8940 * gen-specific and since we only disable LCPLL after we fully disable
8941 * the interrupts, the check below should be enough.
8942 */
Rob Clarke2c719b2014-12-15 13:56:32 -05008943 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008944}
8945
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03008946static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
8947{
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01008948 if (IS_HASWELL(dev_priv))
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03008949 return I915_READ(D_COMP_HSW);
8950 else
8951 return I915_READ(D_COMP_BDW);
8952}
8953
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03008954static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
8955{
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01008956 if (IS_HASWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01008957 mutex_lock(&dev_priv->pcu_lock);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03008958 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
8959 val))
Chris Wilson79cf2192016-08-24 11:16:07 +01008960 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01008961 mutex_unlock(&dev_priv->pcu_lock);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03008962 } else {
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03008963 I915_WRITE(D_COMP_BDW, val);
8964 POSTING_READ(D_COMP_BDW);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03008965 }
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008966}
8967
8968/*
8969 * This function implements pieces of two sequences from BSpec:
8970 * - Sequence for display software to disable LCPLL
8971 * - Sequence for display software to allow package C8+
8972 * The steps implemented here are just the steps that actually touch the LCPLL
8973 * register. Callers should take care of disabling all the display engine
8974 * functions, doing the mode unset, fixing interrupts, etc.
8975 */
Paulo Zanoni6ff58d52013-09-24 13:52:57 -03008976static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
8977 bool switch_to_fclk, bool allow_power_down)
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008978{
8979 uint32_t val;
8980
8981 assert_can_disable_lcpll(dev_priv);
8982
8983 val = I915_READ(LCPLL_CTL);
8984
8985 if (switch_to_fclk) {
8986 val |= LCPLL_CD_SOURCE_FCLK;
8987 I915_WRITE(LCPLL_CTL, val);
8988
Imre Deakf53dd632016-06-28 13:37:32 +03008989 if (wait_for_us(I915_READ(LCPLL_CTL) &
8990 LCPLL_CD_SOURCE_FCLK_DONE, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03008991 DRM_ERROR("Switching to FCLK failed\n");
8992
8993 val = I915_READ(LCPLL_CTL);
8994 }
8995
8996 val |= LCPLL_PLL_DISABLE;
8997 I915_WRITE(LCPLL_CTL, val);
8998 POSTING_READ(LCPLL_CTL);
8999
Chris Wilson24d84412016-06-30 15:33:07 +01009000 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009001 DRM_ERROR("LCPLL still locked\n");
9002
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009003 val = hsw_read_dcomp(dev_priv);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009004 val |= D_COMP_COMP_DISABLE;
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009005 hsw_write_dcomp(dev_priv, val);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009006 ndelay(100);
9007
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009008 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9009 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009010 DRM_ERROR("D_COMP RCOMP still in progress\n");
9011
9012 if (allow_power_down) {
9013 val = I915_READ(LCPLL_CTL);
9014 val |= LCPLL_POWER_DOWN_ALLOW;
9015 I915_WRITE(LCPLL_CTL, val);
9016 POSTING_READ(LCPLL_CTL);
9017 }
9018}
9019
9020/*
9021 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9022 * source.
9023 */
Paulo Zanoni6ff58d52013-09-24 13:52:57 -03009024static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009025{
9026 uint32_t val;
9027
9028 val = I915_READ(LCPLL_CTL);
9029
9030 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9031 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9032 return;
9033
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03009034 /*
9035 * Make sure we're not on PC8 state before disabling PC8, otherwise
9036 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03009037 */
Mika Kuoppala59bad942015-01-16 11:34:40 +02009038 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Paulo Zanoni215733f2013-08-19 13:18:07 -03009039
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009040 if (val & LCPLL_POWER_DOWN_ALLOW) {
9041 val &= ~LCPLL_POWER_DOWN_ALLOW;
9042 I915_WRITE(LCPLL_CTL, val);
Daniel Vetter35d8f2e2013-08-21 23:38:08 +02009043 POSTING_READ(LCPLL_CTL);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009044 }
9045
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009046 val = hsw_read_dcomp(dev_priv);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009047 val |= D_COMP_COMP_FORCE;
9048 val &= ~D_COMP_COMP_DISABLE;
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009049 hsw_write_dcomp(dev_priv, val);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009050
9051 val = I915_READ(LCPLL_CTL);
9052 val &= ~LCPLL_PLL_DISABLE;
9053 I915_WRITE(LCPLL_CTL, val);
9054
Chris Wilson93220c02016-06-30 15:33:08 +01009055 if (intel_wait_for_register(dev_priv,
9056 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9057 5))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009058 DRM_ERROR("LCPLL not locked yet\n");
9059
9060 if (val & LCPLL_CD_SOURCE_FCLK) {
9061 val = I915_READ(LCPLL_CTL);
9062 val &= ~LCPLL_CD_SOURCE_FCLK;
9063 I915_WRITE(LCPLL_CTL, val);
9064
Imre Deakf53dd632016-06-28 13:37:32 +03009065 if (wait_for_us((I915_READ(LCPLL_CTL) &
9066 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009067 DRM_ERROR("Switching back to LCPLL failed\n");
9068 }
Paulo Zanoni215733f2013-08-19 13:18:07 -03009069
Mika Kuoppala59bad942015-01-16 11:34:40 +02009070 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +03009071
Ville Syrjälä4c75b942016-10-31 22:37:12 +02009072 intel_update_cdclk(dev_priv);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +03009073 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009074}
9075
Paulo Zanoni765dab672014-03-07 20:08:18 -03009076/*
9077 * Package states C8 and deeper are really deep PC states that can only be
9078 * reached when all the devices on the system allow it, so even if the graphics
9079 * device allows PC8+, it doesn't mean the system will actually get to these
9080 * states. Our driver only allows PC8+ when going into runtime PM.
9081 *
9082 * The requirements for PC8+ are that all the outputs are disabled, the power
9083 * well is disabled and most interrupts are disabled, and these are also
9084 * requirements for runtime PM. When these conditions are met, we manually do
9085 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9086 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9087 * hang the machine.
9088 *
9089 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9090 * the state of some registers, so when we come back from PC8+ we need to
9091 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9092 * need to take care of the registers kept by RC6. Notice that this happens even
9093 * if we don't put the device in PCI D3 state (which is what currently happens
9094 * because of the runtime PM support).
9095 *
9096 * For more, read "Display Sequences for Package C8" on the hardware
9097 * documentation.
9098 */
Paulo Zanonia14cb6f2014-03-07 20:08:17 -03009099void hsw_enable_pc8(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03009100{
Paulo Zanonic67a4702013-08-19 13:18:09 -03009101 uint32_t val;
9102
Paulo Zanonic67a4702013-08-19 13:18:09 -03009103 DRM_DEBUG_KMS("Enabling package C8+\n");
9104
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01009105 if (HAS_PCH_LPT_LP(dev_priv)) {
Paulo Zanonic67a4702013-08-19 13:18:09 -03009106 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9107 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9108 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9109 }
9110
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02009111 lpt_disable_clkout_dp(dev_priv);
Paulo Zanonic67a4702013-08-19 13:18:09 -03009112 hsw_disable_lcpll(dev_priv, true, true);
9113}
9114
Paulo Zanonia14cb6f2014-03-07 20:08:17 -03009115void hsw_disable_pc8(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03009116{
Paulo Zanonic67a4702013-08-19 13:18:09 -03009117 uint32_t val;
9118
Paulo Zanonic67a4702013-08-19 13:18:09 -03009119 DRM_DEBUG_KMS("Disabling package C8+\n");
9120
9121 hsw_restore_lcpll(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02009122 lpt_init_pch_refclk(dev_priv);
Paulo Zanonic67a4702013-08-19 13:18:09 -03009123
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01009124 if (HAS_PCH_LPT_LP(dev_priv)) {
Paulo Zanonic67a4702013-08-19 13:18:09 -03009125 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9126 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9127 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9128 }
Paulo Zanonic67a4702013-08-19 13:18:09 -03009129}
9130
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02009131static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9132 struct intel_crtc_state *crtc_state)
Paulo Zanoni09b4ddf2012-10-05 12:05:55 -03009133{
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009134 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009135 struct intel_encoder *encoder =
9136 intel_ddi_get_crtc_new_encoder(crtc_state);
9137
9138 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9139 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9140 pipe_name(crtc->pipe));
Mika Kaholaaf3997b2016-02-05 13:29:28 +02009141 return -EINVAL;
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009142 }
Mika Kaholaaf3997b2016-02-05 13:29:28 +02009143 }
Daniel Vetter716c2e52014-06-25 22:02:02 +03009144
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02009145 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08009146}
9147
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009148static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9149 enum port port,
9150 struct intel_crtc_state *pipe_config)
9151{
9152 enum intel_dpll_id id;
9153 u32 temp;
9154
9155 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
Paulo Zanonidfbd4502017-08-25 16:40:04 -03009156 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009157
9158 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9159 return;
9160
9161 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9162}
9163
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309164static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9165 enum port port,
9166 struct intel_crtc_state *pipe_config)
9167{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009168 enum intel_dpll_id id;
9169
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309170 switch (port) {
9171 case PORT_A:
Imre Deak08250c42016-03-14 19:55:34 +02009172 id = DPLL_ID_SKL_DPLL0;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309173 break;
9174 case PORT_B:
Imre Deak08250c42016-03-14 19:55:34 +02009175 id = DPLL_ID_SKL_DPLL1;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309176 break;
9177 case PORT_C:
Imre Deak08250c42016-03-14 19:55:34 +02009178 id = DPLL_ID_SKL_DPLL2;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309179 break;
9180 default:
9181 DRM_ERROR("Incorrect port type\n");
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009182 return;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309183 }
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009184
9185 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309186}
9187
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009188static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9189 enum port port,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009190 struct intel_crtc_state *pipe_config)
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009191{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009192 enum intel_dpll_id id;
Ander Conselvan de Oliveiraa3c988e2016-03-08 17:46:27 +02009193 u32 temp;
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009194
9195 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009196 id = temp >> (port * 3 + 1);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009197
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009198 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009199 return;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009200
9201 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009202}
9203
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009204static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9205 enum port port,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009206 struct intel_crtc_state *pipe_config)
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009207{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009208 enum intel_dpll_id id;
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009209 uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009210
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009211 switch (ddi_pll_sel) {
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009212 case PORT_CLK_SEL_WRPLL1:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009213 id = DPLL_ID_WRPLL1;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009214 break;
9215 case PORT_CLK_SEL_WRPLL2:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009216 id = DPLL_ID_WRPLL2;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009217 break;
Maarten Lankhorst00490c22015-11-16 14:42:12 +01009218 case PORT_CLK_SEL_SPLL:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009219 id = DPLL_ID_SPLL;
Ville Syrjälä79bd23d2015-12-01 23:32:07 +02009220 break;
Ander Conselvan de Oliveira9d16da62016-03-08 17:46:26 +02009221 case PORT_CLK_SEL_LCPLL_810:
9222 id = DPLL_ID_LCPLL_810;
9223 break;
9224 case PORT_CLK_SEL_LCPLL_1350:
9225 id = DPLL_ID_LCPLL_1350;
9226 break;
9227 case PORT_CLK_SEL_LCPLL_2700:
9228 id = DPLL_ID_LCPLL_2700;
9229 break;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009230 default:
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009231 MISSING_CASE(ddi_pll_sel);
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009232 /* fall through */
9233 case PORT_CLK_SEL_NONE:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009234 return;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009235 }
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009236
9237 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009238}
9239
Jani Nikulacf304292016-03-18 17:05:41 +02009240static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9241 struct intel_crtc_state *pipe_config,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009242 u64 *power_domain_mask)
Jani Nikulacf304292016-03-18 17:05:41 +02009243{
9244 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009245 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulacf304292016-03-18 17:05:41 +02009246 enum intel_display_power_domain power_domain;
9247 u32 tmp;
9248
Imre Deakd9a7bc62016-05-12 16:18:50 +03009249 /*
9250 * The pipe->transcoder mapping is fixed with the exception of the eDP
9251 * transcoder handled below.
9252 */
Jani Nikulacf304292016-03-18 17:05:41 +02009253 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9254
9255 /*
9256 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9257 * consistency and less surprising code; it's in always on power).
9258 */
9259 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9260 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9261 enum pipe trans_edp_pipe;
9262 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9263 default:
9264 WARN(1, "unknown pipe linked to edp transcoder\n");
9265 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9266 case TRANS_DDI_EDP_INPUT_A_ON:
9267 trans_edp_pipe = PIPE_A;
9268 break;
9269 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9270 trans_edp_pipe = PIPE_B;
9271 break;
9272 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9273 trans_edp_pipe = PIPE_C;
9274 break;
9275 }
9276
9277 if (trans_edp_pipe == crtc->pipe)
9278 pipe_config->cpu_transcoder = TRANSCODER_EDP;
9279 }
9280
9281 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9282 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9283 return false;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009284 *power_domain_mask |= BIT_ULL(power_domain);
Jani Nikulacf304292016-03-18 17:05:41 +02009285
9286 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9287
9288 return tmp & PIPECONF_ENABLE;
9289}
9290
Jani Nikula4d1de972016-03-18 17:05:42 +02009291static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9292 struct intel_crtc_state *pipe_config,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009293 u64 *power_domain_mask)
Jani Nikula4d1de972016-03-18 17:05:42 +02009294{
9295 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009296 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikula4d1de972016-03-18 17:05:42 +02009297 enum intel_display_power_domain power_domain;
9298 enum port port;
9299 enum transcoder cpu_transcoder;
9300 u32 tmp;
9301
Jani Nikula4d1de972016-03-18 17:05:42 +02009302 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9303 if (port == PORT_A)
9304 cpu_transcoder = TRANSCODER_DSI_A;
9305 else
9306 cpu_transcoder = TRANSCODER_DSI_C;
9307
9308 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9309 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9310 continue;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009311 *power_domain_mask |= BIT_ULL(power_domain);
Jani Nikula4d1de972016-03-18 17:05:42 +02009312
Imre Deakdb18b6a2016-03-24 12:41:40 +02009313 /*
9314 * The PLL needs to be enabled with a valid divider
9315 * configuration, otherwise accessing DSI registers will hang
9316 * the machine. See BSpec North Display Engine
9317 * registers/MIPI[BXT]. We can break out here early, since we
9318 * need the same DSI PLL to be enabled for both DSI ports.
9319 */
9320 if (!intel_dsi_pll_is_enabled(dev_priv))
9321 break;
9322
Jani Nikula4d1de972016-03-18 17:05:42 +02009323 /* XXX: this works for video mode only */
9324 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9325 if (!(tmp & DPI_ENABLE))
9326 continue;
9327
9328 tmp = I915_READ(MIPI_CTRL(port));
9329 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9330 continue;
9331
9332 pipe_config->cpu_transcoder = cpu_transcoder;
Jani Nikula4d1de972016-03-18 17:05:42 +02009333 break;
9334 }
9335
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009336 return transcoder_is_dsi(pipe_config->cpu_transcoder);
Jani Nikula4d1de972016-03-18 17:05:42 +02009337}
9338
Daniel Vetter26804af2014-06-25 22:01:55 +03009339static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009340 struct intel_crtc_state *pipe_config)
Daniel Vetter26804af2014-06-25 22:01:55 +03009341{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009342 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetterd452c5b2014-07-04 11:27:39 -03009343 struct intel_shared_dpll *pll;
Daniel Vetter26804af2014-06-25 22:01:55 +03009344 enum port port;
9345 uint32_t tmp;
9346
9347 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9348
9349 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9350
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009351 if (IS_CANNONLAKE(dev_priv))
9352 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9353 else if (IS_GEN9_BC(dev_priv))
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009354 skylake_get_ddi_pll(dev_priv, port, pipe_config);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02009355 else if (IS_GEN9_LP(dev_priv))
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309356 bxt_get_ddi_pll(dev_priv, port, pipe_config);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009357 else
9358 haswell_get_ddi_pll(dev_priv, port, pipe_config);
Daniel Vetter9cd86932014-06-25 22:01:57 +03009359
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009360 pll = pipe_config->shared_dpll;
9361 if (pll) {
Lucas De Marchiee1398b2018-03-20 15:06:33 -07009362 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9363 &pipe_config->dpll_hw_state));
Daniel Vetterd452c5b2014-07-04 11:27:39 -03009364 }
9365
Daniel Vetter26804af2014-06-25 22:01:55 +03009366 /*
9367 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9368 * DDI E. So just check whether this pipe is wired to DDI E and whether
9369 * the PCH transcoder is on.
9370 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009371 if (INTEL_GEN(dev_priv) < 9 &&
Damien Lespiauca370452013-12-03 13:56:24 +00009372 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
Daniel Vetter26804af2014-06-25 22:01:55 +03009373 pipe_config->has_pch_encoder = true;
9374
9375 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9376 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9377 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9378
9379 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9380 }
9381}
9382
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009383static bool haswell_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009384 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009385{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009386 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Imre Deak17290502016-02-12 18:55:11 +02009387 enum intel_display_power_domain power_domain;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009388 u64 power_domain_mask;
Jani Nikulacf304292016-03-18 17:05:41 +02009389 bool active;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009390
Imre Deake79dfb52017-07-20 01:50:57 +03009391 intel_crtc_init_scalers(crtc, pipe_config);
Imre Deak5fb9dad2017-07-20 14:28:20 +03009392
Imre Deak17290502016-02-12 18:55:11 +02009393 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9394 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Imre Deakb5482bd2014-03-05 16:20:55 +02009395 return false;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009396 power_domain_mask = BIT_ULL(power_domain);
Imre Deak17290502016-02-12 18:55:11 +02009397
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009398 pipe_config->shared_dpll = NULL;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009399
Jani Nikulacf304292016-03-18 17:05:41 +02009400 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
Daniel Vettereccb1402013-05-22 00:50:22 +02009401
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02009402 if (IS_GEN9_LP(dev_priv) &&
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009403 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9404 WARN_ON(active);
9405 active = true;
Jani Nikula4d1de972016-03-18 17:05:42 +02009406 }
9407
Jani Nikulacf304292016-03-18 17:05:41 +02009408 if (!active)
Imre Deak17290502016-02-12 18:55:11 +02009409 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009410
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009411 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
Jani Nikula4d1de972016-03-18 17:05:42 +02009412 haswell_get_ddi_port_state(crtc, pipe_config);
9413 intel_get_pipe_timings(crtc, pipe_config);
9414 }
Daniel Vetter627eb5a2013-04-29 19:33:42 +02009415
Jani Nikulabc58be62016-03-18 17:05:39 +02009416 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02009417
Lionel Landwerlin05dc6982016-03-16 10:57:15 +00009418 pipe_config->gamma_mode =
9419 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9420
Rodrigo Vivibd30ca22017-09-26 14:13:46 -07009421 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
Shashank Sharmab22ca992017-07-24 19:19:32 +05309422 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9423 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9424
Rodrigo Vivibd30ca22017-09-26 14:13:46 -07009425 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
Shashank Sharmab22ca992017-07-24 19:19:32 +05309426 bool blend_mode_420 = tmp &
9427 PIPEMISC_YUV420_MODE_FULL_BLEND;
9428
9429 pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
9430 if (pipe_config->ycbcr420 != clrspace_yuv ||
9431 pipe_config->ycbcr420 != blend_mode_420)
9432 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
9433 } else if (clrspace_yuv) {
9434 DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
9435 }
9436 }
9437
Imre Deak17290502016-02-12 18:55:11 +02009438 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9439 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009440 power_domain_mask |= BIT_ULL(power_domain);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009441 if (INTEL_GEN(dev_priv) >= 9)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00009442 skylake_get_pfit_config(crtc, pipe_config);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08009443 else
Rodrigo Vivi1c132b42015-09-02 15:19:26 -07009444 ironlake_get_pfit_config(crtc, pipe_config);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00009445 }
Daniel Vetter88adfff2013-03-28 10:42:01 +01009446
Maarten Lankhorst24f28452017-11-22 19:39:01 +01009447 if (hsw_crtc_supports_ips(crtc)) {
9448 if (IS_HASWELL(dev_priv))
9449 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9450 else {
9451 /*
9452 * We cannot readout IPS state on broadwell, set to
9453 * true so we can set it to a defined state on first
9454 * commit.
9455 */
9456 pipe_config->ips_enabled = true;
9457 }
9458 }
9459
Jani Nikula4d1de972016-03-18 17:05:42 +02009460 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9461 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
Clint Taylorebb69c92014-09-30 10:30:22 -07009462 pipe_config->pixel_multiplier =
9463 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9464 } else {
9465 pipe_config->pixel_multiplier = 1;
9466 }
Daniel Vetter6c49f242013-06-06 12:45:25 +02009467
Imre Deak17290502016-02-12 18:55:11 +02009468out:
9469 for_each_power_domain(power_domain, power_domain_mask)
9470 intel_display_power_put(dev_priv, power_domain);
9471
Jani Nikulacf304292016-03-18 17:05:41 +02009472 return active;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009473}
9474
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009475static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009476{
9477 struct drm_i915_private *dev_priv =
9478 to_i915(plane_state->base.plane->dev);
9479 const struct drm_framebuffer *fb = plane_state->base.fb;
9480 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9481 u32 base;
9482
9483 if (INTEL_INFO(dev_priv)->cursor_needs_physical)
9484 base = obj->phys_handle->busaddr;
9485 else
9486 base = intel_plane_ggtt_offset(plane_state);
9487
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009488 base += plane_state->main.offset;
9489
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009490 /* ILK+ do this automagically */
9491 if (HAS_GMCH_DISPLAY(dev_priv) &&
Dave Airliea82256b2017-05-30 15:25:28 +10009492 plane_state->base.rotation & DRM_MODE_ROTATE_180)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009493 base += (plane_state->base.crtc_h *
9494 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9495
9496 return base;
9497}
9498
Ville Syrjäläed270222017-03-27 21:55:36 +03009499static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9500{
9501 int x = plane_state->base.crtc_x;
9502 int y = plane_state->base.crtc_y;
9503 u32 pos = 0;
9504
9505 if (x < 0) {
9506 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9507 x = -x;
9508 }
9509 pos |= x << CURSOR_X_SHIFT;
9510
9511 if (y < 0) {
9512 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9513 y = -y;
9514 }
9515 pos |= y << CURSOR_Y_SHIFT;
9516
9517 return pos;
9518}
9519
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009520static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9521{
9522 const struct drm_mode_config *config =
9523 &plane_state->base.plane->dev->mode_config;
9524 int width = plane_state->base.crtc_w;
9525 int height = plane_state->base.crtc_h;
9526
9527 return width > 0 && width <= config->cursor_width &&
9528 height > 0 && height <= config->cursor_height;
9529}
9530
Ville Syrjälä659056f2017-03-27 21:55:39 +03009531static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9532 struct intel_plane_state *plane_state)
9533{
9534 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009535 int src_x, src_y;
9536 u32 offset;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009537 int ret;
9538
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +02009539 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9540 &crtc_state->base,
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +02009541 DRM_PLANE_HELPER_NO_SCALING,
9542 DRM_PLANE_HELPER_NO_SCALING,
9543 true, true);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009544 if (ret)
9545 return ret;
9546
9547 if (!fb)
9548 return 0;
9549
9550 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9551 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9552 return -EINVAL;
9553 }
9554
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009555 src_x = plane_state->base.src_x >> 16;
9556 src_y = plane_state->base.src_y >> 16;
9557
9558 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9559 offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0);
9560
9561 if (src_x != 0 || src_y != 0) {
9562 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9563 return -EINVAL;
9564 }
9565
9566 plane_state->main.offset = offset;
9567
Ville Syrjälä659056f2017-03-27 21:55:39 +03009568 return 0;
9569}
9570
Ville Syrjälä292889e2017-03-17 23:18:01 +02009571static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9572 const struct intel_plane_state *plane_state)
9573{
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009574 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä292889e2017-03-17 23:18:01 +02009575
Ville Syrjälä292889e2017-03-17 23:18:01 +02009576 return CURSOR_ENABLE |
9577 CURSOR_GAMMA_ENABLE |
9578 CURSOR_FORMAT_ARGB |
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009579 CURSOR_STRIDE(fb->pitches[0]);
Ville Syrjälä292889e2017-03-17 23:18:01 +02009580}
9581
Ville Syrjälä659056f2017-03-27 21:55:39 +03009582static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9583{
Ville Syrjälä659056f2017-03-27 21:55:39 +03009584 int width = plane_state->base.crtc_w;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009585
9586 /*
9587 * 845g/865g are only limited by the width of their cursors,
9588 * the height is arbitrary up to the precision of the register.
9589 */
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009590 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009591}
9592
9593static int i845_check_cursor(struct intel_plane *plane,
9594 struct intel_crtc_state *crtc_state,
9595 struct intel_plane_state *plane_state)
9596{
9597 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009598 int ret;
9599
9600 ret = intel_check_cursor(crtc_state, plane_state);
9601 if (ret)
9602 return ret;
9603
9604 /* if we want to turn off the cursor ignore width and height */
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009605 if (!fb)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009606 return 0;
9607
9608 /* Check for which cursor types we support */
9609 if (!i845_cursor_size_ok(plane_state)) {
9610 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9611 plane_state->base.crtc_w,
9612 plane_state->base.crtc_h);
9613 return -EINVAL;
9614 }
9615
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009616 switch (fb->pitches[0]) {
Chris Wilson560b85b2010-08-07 11:01:38 +01009617 case 256:
9618 case 512:
9619 case 1024:
9620 case 2048:
Ville Syrjälädc41c152014-08-13 11:57:05 +03009621 break;
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009622 default:
9623 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9624 fb->pitches[0]);
9625 return -EINVAL;
Chris Wilson560b85b2010-08-07 11:01:38 +01009626 }
Maarten Lankhorst55a08b3f2016-01-07 11:54:10 +01009627
Ville Syrjälä659056f2017-03-27 21:55:39 +03009628 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9629
9630 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08009631}
9632
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009633static void i845_update_cursor(struct intel_plane *plane,
9634 const struct intel_crtc_state *crtc_state,
Chris Wilson560b85b2010-08-07 11:01:38 +01009635 const struct intel_plane_state *plane_state)
9636{
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009637 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009638 u32 cntl = 0, base = 0, pos = 0, size = 0;
9639 unsigned long irqflags;
Chris Wilson560b85b2010-08-07 11:01:38 +01009640
Ville Syrjälä936e71e2016-07-26 19:06:59 +03009641 if (plane_state && plane_state->base.visible) {
Maarten Lankhorst55a08b3f2016-01-07 11:54:10 +01009642 unsigned int width = plane_state->base.crtc_w;
9643 unsigned int height = plane_state->base.crtc_h;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009644
Ville Syrjäläa0864d52017-03-23 21:27:09 +02009645 cntl = plane_state->ctl;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009646 size = (height << 12) | width;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009647
9648 base = intel_cursor_base(plane_state);
9649 pos = intel_cursor_position(plane_state);
Chris Wilson4b0e3332014-05-30 16:35:26 +03009650 }
Chris Wilson560b85b2010-08-07 11:01:38 +01009651
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009652 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9653
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009654 /* On these chipsets we can only modify the base/size/stride
9655 * whilst the cursor is disabled.
9656 */
9657 if (plane->cursor.base != base ||
9658 plane->cursor.size != size ||
9659 plane->cursor.cntl != cntl) {
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009660 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009661 I915_WRITE_FW(CURBASE(PIPE_A), base);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009662 I915_WRITE_FW(CURSIZE, size);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009663 I915_WRITE_FW(CURPOS(PIPE_A), pos);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009664 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
Ville Syrjälä75343a42017-03-27 21:55:38 +03009665
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009666 plane->cursor.base = base;
9667 plane->cursor.size = size;
9668 plane->cursor.cntl = cntl;
9669 } else {
9670 I915_WRITE_FW(CURPOS(PIPE_A), pos);
Ville Syrjälädc41c152014-08-13 11:57:05 +03009671 }
9672
Ville Syrjälä75343a42017-03-27 21:55:38 +03009673 POSTING_READ_FW(CURCNTR(PIPE_A));
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009674
9675 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9676}
9677
9678static void i845_disable_cursor(struct intel_plane *plane,
9679 struct intel_crtc *crtc)
9680{
9681 i845_update_cursor(plane, NULL, NULL);
Chris Wilson560b85b2010-08-07 11:01:38 +01009682}
9683
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02009684static bool i845_cursor_get_hw_state(struct intel_plane *plane)
9685{
9686 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9687 enum intel_display_power_domain power_domain;
9688 bool ret;
9689
9690 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9691 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9692 return false;
9693
9694 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9695
9696 intel_display_power_put(dev_priv, power_domain);
9697
9698 return ret;
9699}
9700
Ville Syrjälä292889e2017-03-17 23:18:01 +02009701static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9702 const struct intel_plane_state *plane_state)
9703{
9704 struct drm_i915_private *dev_priv =
9705 to_i915(plane_state->base.plane->dev);
9706 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ville Syrjälä292889e2017-03-17 23:18:01 +02009707 u32 cntl;
9708
9709 cntl = MCURSOR_GAMMA_ENABLE;
9710
9711 if (HAS_DDI(dev_priv))
9712 cntl |= CURSOR_PIPE_CSC_ENABLE;
9713
Ville Syrjälä32ea06b2018-01-30 22:38:01 +02009714 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
9715 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
Ville Syrjälä292889e2017-03-17 23:18:01 +02009716
9717 switch (plane_state->base.crtc_w) {
9718 case 64:
9719 cntl |= CURSOR_MODE_64_ARGB_AX;
9720 break;
9721 case 128:
9722 cntl |= CURSOR_MODE_128_ARGB_AX;
9723 break;
9724 case 256:
9725 cntl |= CURSOR_MODE_256_ARGB_AX;
9726 break;
9727 default:
9728 MISSING_CASE(plane_state->base.crtc_w);
9729 return 0;
9730 }
9731
Robert Fossc2c446a2017-05-19 16:50:17 -04009732 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
Ville Syrjälä292889e2017-03-17 23:18:01 +02009733 cntl |= CURSOR_ROTATE_180;
9734
9735 return cntl;
9736}
9737
Ville Syrjälä659056f2017-03-27 21:55:39 +03009738static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
Chris Wilson560b85b2010-08-07 11:01:38 +01009739{
Ville Syrjälä024faac2017-03-27 21:55:42 +03009740 struct drm_i915_private *dev_priv =
9741 to_i915(plane_state->base.plane->dev);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009742 int width = plane_state->base.crtc_w;
9743 int height = plane_state->base.crtc_h;
Chris Wilson560b85b2010-08-07 11:01:38 +01009744
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009745 if (!intel_cursor_size_ok(plane_state))
Ville Syrjälädc41c152014-08-13 11:57:05 +03009746 return false;
9747
Ville Syrjälä024faac2017-03-27 21:55:42 +03009748 /* Cursor width is limited to a few power-of-two sizes */
9749 switch (width) {
Ville Syrjälä659056f2017-03-27 21:55:39 +03009750 case 256:
9751 case 128:
Ville Syrjälä659056f2017-03-27 21:55:39 +03009752 case 64:
9753 break;
9754 default:
9755 return false;
9756 }
9757
Ville Syrjälädc41c152014-08-13 11:57:05 +03009758 /*
Ville Syrjälä024faac2017-03-27 21:55:42 +03009759 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
9760 * height from 8 lines up to the cursor width, when the
9761 * cursor is not rotated. Everything else requires square
9762 * cursors.
Ville Syrjälädc41c152014-08-13 11:57:05 +03009763 */
Ville Syrjälä024faac2017-03-27 21:55:42 +03009764 if (HAS_CUR_FBC(dev_priv) &&
Dave Airliea82256b2017-05-30 15:25:28 +10009765 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
Ville Syrjälä024faac2017-03-27 21:55:42 +03009766 if (height < 8 || height > width)
Ville Syrjälädc41c152014-08-13 11:57:05 +03009767 return false;
9768 } else {
Ville Syrjälä024faac2017-03-27 21:55:42 +03009769 if (height != width)
Ville Syrjälädc41c152014-08-13 11:57:05 +03009770 return false;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009771 }
9772
9773 return true;
9774}
9775
Ville Syrjälä659056f2017-03-27 21:55:39 +03009776static int i9xx_check_cursor(struct intel_plane *plane,
9777 struct intel_crtc_state *crtc_state,
9778 struct intel_plane_state *plane_state)
9779{
9780 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9781 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009782 enum pipe pipe = plane->pipe;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009783 int ret;
9784
9785 ret = intel_check_cursor(crtc_state, plane_state);
9786 if (ret)
9787 return ret;
9788
9789 /* if we want to turn off the cursor ignore width and height */
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009790 if (!fb)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009791 return 0;
9792
9793 /* Check for which cursor types we support */
9794 if (!i9xx_cursor_size_ok(plane_state)) {
9795 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9796 plane_state->base.crtc_w,
9797 plane_state->base.crtc_h);
9798 return -EINVAL;
9799 }
9800
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009801 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
9802 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
9803 fb->pitches[0], plane_state->base.crtc_w);
9804 return -EINVAL;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009805 }
9806
9807 /*
9808 * There's something wrong with the cursor on CHV pipe C.
9809 * If it straddles the left edge of the screen then
9810 * moving it away from the edge or disabling it often
9811 * results in a pipe underrun, and often that can lead to
9812 * dead pipe (constant underrun reported, and it scans
9813 * out just a solid color). To recover from that, the
9814 * display power well must be turned off and on again.
9815 * Refuse the put the cursor into that compromised position.
9816 */
9817 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
9818 plane_state->base.visible && plane_state->base.crtc_x < 0) {
9819 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
9820 return -EINVAL;
9821 }
9822
9823 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
9824
9825 return 0;
9826}
9827
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009828static void i9xx_update_cursor(struct intel_plane *plane,
9829 const struct intel_crtc_state *crtc_state,
Sagar Kamble4726e0b2014-03-10 17:06:23 +05309830 const struct intel_plane_state *plane_state)
9831{
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009832 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9833 enum pipe pipe = plane->pipe;
Ville Syrjälä024faac2017-03-27 21:55:42 +03009834 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009835 unsigned long irqflags;
Sagar Kamble4726e0b2014-03-10 17:06:23 +05309836
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009837 if (plane_state && plane_state->base.visible) {
Ville Syrjäläa0864d52017-03-23 21:27:09 +02009838 cntl = plane_state->ctl;
Chris Wilson4b0e3332014-05-30 16:35:26 +03009839
Ville Syrjälä024faac2017-03-27 21:55:42 +03009840 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
9841 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
9842
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009843 base = intel_cursor_base(plane_state);
9844 pos = intel_cursor_position(plane_state);
9845 }
9846
9847 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9848
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009849 /*
9850 * On some platforms writing CURCNTR first will also
9851 * cause CURPOS to be armed by the CURBASE write.
9852 * Without the CURCNTR write the CURPOS write would
Ville Syrjälä8753d2b2017-07-14 18:52:27 +03009853 * arm itself. Thus we always start the full update
9854 * with a CURCNTR write.
9855 *
9856 * On other platforms CURPOS always requires the
9857 * CURBASE write to arm the update. Additonally
9858 * a write to any of the cursor register will cancel
9859 * an already armed cursor update. Thus leaving out
9860 * the CURBASE write after CURPOS could lead to a
9861 * cursor that doesn't appear to move, or even change
9862 * shape. Thus we always write CURBASE.
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009863 *
9864 * CURCNTR and CUR_FBC_CTL are always
9865 * armed by the CURBASE write only.
9866 */
9867 if (plane->cursor.base != base ||
Ville Syrjälä024faac2017-03-27 21:55:42 +03009868 plane->cursor.size != fbc_ctl ||
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009869 plane->cursor.cntl != cntl) {
9870 I915_WRITE_FW(CURCNTR(pipe), cntl);
9871 if (HAS_CUR_FBC(dev_priv))
9872 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
9873 I915_WRITE_FW(CURPOS(pipe), pos);
Ville Syrjälä75343a42017-03-27 21:55:38 +03009874 I915_WRITE_FW(CURBASE(pipe), base);
9875
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009876 plane->cursor.base = base;
9877 plane->cursor.size = fbc_ctl;
9878 plane->cursor.cntl = cntl;
9879 } else {
9880 I915_WRITE_FW(CURPOS(pipe), pos);
Ville Syrjälä8753d2b2017-07-14 18:52:27 +03009881 I915_WRITE_FW(CURBASE(pipe), base);
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009882 }
9883
Sagar Kamble4726e0b2014-03-10 17:06:23 +05309884 POSTING_READ_FW(CURBASE(pipe));
9885
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009886 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Jesse Barnes65a21cd2011-10-12 11:10:21 -07009887}
Ville Syrjälä5efb3e22014-04-09 13:28:53 +03009888
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009889static void i9xx_disable_cursor(struct intel_plane *plane,
9890 struct intel_crtc *crtc)
Chris Wilsoncda4b7d2010-07-09 08:45:04 +01009891{
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009892 i9xx_update_cursor(plane, NULL, NULL);
Chris Wilsoncda4b7d2010-07-09 08:45:04 +01009893}
Ville Syrjäläd6e4db12013-09-04 18:25:31 +03009894
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02009895static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
9896{
9897 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9898 enum intel_display_power_domain power_domain;
9899 enum pipe pipe = plane->pipe;
9900 bool ret;
9901
9902 /*
9903 * Not 100% correct for planes that can move between pipes,
9904 * but that's only the case for gen2-3 which don't have any
9905 * display power wells.
9906 */
9907 power_domain = POWER_DOMAIN_PIPE(pipe);
9908 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9909 return false;
9910
9911 ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
9912
9913 intel_display_power_put(dev_priv, power_domain);
9914
9915 return ret;
9916}
Chris Wilsoncda4b7d2010-07-09 08:45:04 +01009917
Jesse Barnes79e53942008-11-07 14:24:08 -08009918/* VESA 640x480x72Hz mode to set on the pipe */
Ville Syrjäläbacdcd52017-05-18 22:38:37 +03009919static const struct drm_display_mode load_detect_mode = {
Jesse Barnes79e53942008-11-07 14:24:08 -08009920 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
9921 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
9922};
9923
Daniel Vettera8bb6812014-02-10 18:00:39 +01009924struct drm_framebuffer *
Chris Wilson24dbf512017-02-15 10:59:18 +00009925intel_framebuffer_create(struct drm_i915_gem_object *obj,
9926 struct drm_mode_fb_cmd2 *mode_cmd)
Chris Wilsond2dff872011-04-19 08:36:26 +01009927{
9928 struct intel_framebuffer *intel_fb;
9929 int ret;
9930
9931 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Lukas Wunnerdcb13942015-07-04 11:50:58 +02009932 if (!intel_fb)
Chris Wilsond2dff872011-04-19 08:36:26 +01009933 return ERR_PTR(-ENOMEM);
Chris Wilsond2dff872011-04-19 08:36:26 +01009934
Chris Wilson24dbf512017-02-15 10:59:18 +00009935 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
Daniel Vetterdd4916c2013-10-09 21:23:51 +02009936 if (ret)
9937 goto err;
Chris Wilsond2dff872011-04-19 08:36:26 +01009938
9939 return &intel_fb->base;
Daniel Vetterdd4916c2013-10-09 21:23:51 +02009940
Lukas Wunnerdcb13942015-07-04 11:50:58 +02009941err:
9942 kfree(intel_fb);
Daniel Vetterdd4916c2013-10-09 21:23:51 +02009943 return ERR_PTR(ret);
Chris Wilsond2dff872011-04-19 08:36:26 +01009944}
9945
Ville Syrjälä20bdc112017-12-20 10:35:45 +01009946static int intel_modeset_disable_planes(struct drm_atomic_state *state,
9947 struct drm_crtc *crtc)
Chris Wilsond2dff872011-04-19 08:36:26 +01009948{
Ville Syrjälä20bdc112017-12-20 10:35:45 +01009949 struct drm_plane *plane;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +03009950 struct drm_plane_state *plane_state;
Ville Syrjälä20bdc112017-12-20 10:35:45 +01009951 int ret, i;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +03009952
Ville Syrjälä20bdc112017-12-20 10:35:45 +01009953 ret = drm_atomic_add_affected_planes(state, crtc);
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +03009954 if (ret)
9955 return ret;
Ville Syrjälä20bdc112017-12-20 10:35:45 +01009956
9957 for_each_new_plane_in_state(state, plane, plane_state, i) {
9958 if (plane_state->crtc != crtc)
9959 continue;
9960
9961 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
9962 if (ret)
9963 return ret;
9964
9965 drm_atomic_set_fb_for_plane(plane_state, NULL);
9966 }
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +03009967
9968 return 0;
9969}
9970
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +02009971int intel_get_load_detect_pipe(struct drm_connector *connector,
Ville Syrjäläbacdcd52017-05-18 22:38:37 +03009972 const struct drm_display_mode *mode,
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +02009973 struct intel_load_detect_pipe *old,
9974 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnes79e53942008-11-07 14:24:08 -08009975{
9976 struct intel_crtc *intel_crtc;
Daniel Vetterd2434ab2012-08-12 21:20:10 +02009977 struct intel_encoder *intel_encoder =
9978 intel_attached_encoder(connector);
Jesse Barnes79e53942008-11-07 14:24:08 -08009979 struct drm_crtc *possible_crtc;
Chris Wilson4ef69c72010-09-09 15:14:28 +01009980 struct drm_encoder *encoder = &intel_encoder->base;
Jesse Barnes79e53942008-11-07 14:24:08 -08009981 struct drm_crtc *crtc = NULL;
9982 struct drm_device *dev = encoder->dev;
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02009983 struct drm_i915_private *dev_priv = to_i915(dev);
Rob Clark51fd3712013-11-19 12:10:12 -05009984 struct drm_mode_config *config = &dev->mode_config;
Maarten Lankhorstedde3612016-02-17 09:18:35 +01009985 struct drm_atomic_state *state = NULL, *restore_state = NULL;
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +02009986 struct drm_connector_state *connector_state;
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +03009987 struct intel_crtc_state *crtc_state;
Rob Clark51fd3712013-11-19 12:10:12 -05009988 int ret, i = -1;
Jesse Barnes79e53942008-11-07 14:24:08 -08009989
Chris Wilsond2dff872011-04-19 08:36:26 +01009990 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +03009991 connector->base.id, connector->name,
Jani Nikula8e329a032014-06-03 14:56:21 +03009992 encoder->base.id, encoder->name);
Chris Wilsond2dff872011-04-19 08:36:26 +01009993
Maarten Lankhorstedde3612016-02-17 09:18:35 +01009994 old->restore_state = NULL;
9995
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +02009996 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
Daniel Vetter6e9f7982014-05-29 23:54:47 +02009997
Jesse Barnes79e53942008-11-07 14:24:08 -08009998 /*
9999 * Algorithm gets a little messy:
Chris Wilson7a5e4802011-04-19 23:21:12 +010010000 *
Jesse Barnes79e53942008-11-07 14:24:08 -080010001 * - if the connector already has an assigned crtc, use it (but make
10002 * sure it's on first)
Chris Wilson7a5e4802011-04-19 23:21:12 +010010003 *
Jesse Barnes79e53942008-11-07 14:24:08 -080010004 * - try to find the first unused crtc that can drive this connector,
10005 * and use that if we find one
Jesse Barnes79e53942008-11-07 14:24:08 -080010006 */
10007
10008 /* See if we already have a CRTC for this connector */
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010009 if (connector->state->crtc) {
10010 crtc = connector->state->crtc;
Chris Wilson8261b192011-04-19 23:18:09 +010010011
Rob Clark51fd3712013-11-19 12:10:12 -050010012 ret = drm_modeset_lock(&crtc->mutex, ctx);
10013 if (ret)
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010014 goto fail;
Chris Wilson8261b192011-04-19 23:18:09 +010010015
10016 /* Make sure the crtc and connector are running */
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010017 goto found;
Jesse Barnes79e53942008-11-07 14:24:08 -080010018 }
10019
10020 /* Find an unused one (if possible) */
Damien Lespiau70e1e0e2014-05-13 23:32:24 +010010021 for_each_crtc(dev, possible_crtc) {
Jesse Barnes79e53942008-11-07 14:24:08 -080010022 i++;
10023 if (!(encoder->possible_crtcs & (1 << i)))
10024 continue;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010025
10026 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10027 if (ret)
10028 goto fail;
10029
10030 if (possible_crtc->state->enable) {
10031 drm_modeset_unlock(&possible_crtc->mutex);
Ville Syrjäläa4592492014-08-11 13:15:36 +030010032 continue;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010033 }
Ville Syrjäläa4592492014-08-11 13:15:36 +030010034
10035 crtc = possible_crtc;
10036 break;
Jesse Barnes79e53942008-11-07 14:24:08 -080010037 }
10038
10039 /*
10040 * If we didn't find an unused CRTC, don't use any.
10041 */
10042 if (!crtc) {
Chris Wilson71731882011-04-19 23:10:58 +010010043 DRM_DEBUG_KMS("no pipe available for load-detect\n");
Dan Carpenterf4bf77b2017-04-14 22:54:25 +030010044 ret = -ENODEV;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010045 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080010046 }
10047
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010048found:
10049 intel_crtc = to_intel_crtc(crtc);
10050
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010051 state = drm_atomic_state_alloc(dev);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010052 restore_state = drm_atomic_state_alloc(dev);
10053 if (!state || !restore_state) {
10054 ret = -ENOMEM;
10055 goto fail;
10056 }
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010057
10058 state->acquire_ctx = ctx;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010059 restore_state->acquire_ctx = ctx;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010060
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010061 connector_state = drm_atomic_get_connector_state(state, connector);
10062 if (IS_ERR(connector_state)) {
10063 ret = PTR_ERR(connector_state);
10064 goto fail;
10065 }
10066
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010067 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10068 if (ret)
10069 goto fail;
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010070
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010071 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10072 if (IS_ERR(crtc_state)) {
10073 ret = PTR_ERR(crtc_state);
10074 goto fail;
10075 }
10076
Maarten Lankhorst49d6fa22015-05-11 10:45:15 +020010077 crtc_state->base.active = crtc_state->base.enable = true;
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010078
Chris Wilson64927112011-04-20 07:25:26 +010010079 if (!mode)
10080 mode = &load_detect_mode;
Jesse Barnes79e53942008-11-07 14:24:08 -080010081
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010082 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010083 if (ret)
10084 goto fail;
10085
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010086 ret = intel_modeset_disable_planes(state, crtc);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010087 if (ret)
10088 goto fail;
10089
10090 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10091 if (!ret)
10092 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
Ville Syrjäläbe90cc32018-03-22 17:23:12 +020010093 if (!ret)
10094 ret = drm_atomic_add_affected_planes(restore_state, crtc);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010095 if (ret) {
10096 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10097 goto fail;
10098 }
Ander Conselvan de Oliveira8c7b5cc2015-04-21 17:13:19 +030010099
Maarten Lankhorst3ba86072016-02-29 09:18:57 +010010100 ret = drm_atomic_commit(state);
10101 if (ret) {
Chris Wilson64927112011-04-20 07:25:26 +010010102 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010103 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080010104 }
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010105
10106 old->restore_state = restore_state;
Chris Wilson7abbd112017-01-19 11:37:49 +000010107 drm_atomic_state_put(state);
Chris Wilson71731882011-04-19 23:10:58 +010010108
Jesse Barnes79e53942008-11-07 14:24:08 -080010109 /* let the connector get through one full cycle before testing */
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020010110 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
Chris Wilson71731882011-04-19 23:10:58 +010010111 return true;
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010112
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010113fail:
Chris Wilson7fb71c82016-10-19 12:37:43 +010010114 if (state) {
10115 drm_atomic_state_put(state);
10116 state = NULL;
10117 }
10118 if (restore_state) {
10119 drm_atomic_state_put(restore_state);
10120 restore_state = NULL;
10121 }
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010122
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010123 if (ret == -EDEADLK)
10124 return ret;
Rob Clark51fd3712013-11-19 12:10:12 -050010125
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010126 return false;
Jesse Barnes79e53942008-11-07 14:24:08 -080010127}
10128
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010129void intel_release_load_detect_pipe(struct drm_connector *connector,
Ander Conselvan de Oliveira49172fe2015-03-20 16:18:02 +020010130 struct intel_load_detect_pipe *old,
10131 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnes79e53942008-11-07 14:24:08 -080010132{
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010133 struct intel_encoder *intel_encoder =
10134 intel_attached_encoder(connector);
Chris Wilson4ef69c72010-09-09 15:14:28 +010010135 struct drm_encoder *encoder = &intel_encoder->base;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010136 struct drm_atomic_state *state = old->restore_state;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010137 int ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080010138
Chris Wilsond2dff872011-04-19 08:36:26 +010010139 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +030010140 connector->base.id, connector->name,
Jani Nikula8e329a032014-06-03 14:56:21 +030010141 encoder->base.id, encoder->name);
Chris Wilsond2dff872011-04-19 08:36:26 +010010142
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010143 if (!state)
Chris Wilson0622a532011-04-21 09:32:11 +010010144 return;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010145
Maarten Lankhorst581e49f2017-01-16 10:37:38 +010010146 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
Chris Wilson08536952016-10-14 13:18:18 +010010147 if (ret)
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010148 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
Chris Wilson08536952016-10-14 13:18:18 +010010149 drm_atomic_state_put(state);
Jesse Barnes79e53942008-11-07 14:24:08 -080010150}
10151
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010152static int i9xx_pll_refclk(struct drm_device *dev,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010153 const struct intel_crtc_state *pipe_config)
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010154{
Chris Wilsonfac5e232016-07-04 11:34:36 +010010155 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010156 u32 dpll = pipe_config->dpll_hw_state.dpll;
10157
10158 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
Ville Syrjäläe91e9412013-12-09 18:54:16 +020010159 return dev_priv->vbt.lvds_ssc_freq;
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010010160 else if (HAS_PCH_SPLIT(dev_priv))
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010161 return 120000;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010010162 else if (!IS_GEN2(dev_priv))
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010163 return 96000;
10164 else
10165 return 48000;
10166}
10167
Jesse Barnes79e53942008-11-07 14:24:08 -080010168/* Returns the clock of the currently programmed mode of the given pipe. */
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010169static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010170 struct intel_crtc_state *pipe_config)
Jesse Barnes79e53942008-11-07 14:24:08 -080010171{
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010172 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010010173 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010174 int pipe = pipe_config->cpu_transcoder;
Ville Syrjälä293623f2013-09-13 16:18:46 +030010175 u32 dpll = pipe_config->dpll_hw_state.dpll;
Jesse Barnes79e53942008-11-07 14:24:08 -080010176 u32 fp;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +030010177 struct dpll clock;
Imre Deakdccbea32015-06-22 23:35:51 +030010178 int port_clock;
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010179 int refclk = i9xx_pll_refclk(dev, pipe_config);
Jesse Barnes79e53942008-11-07 14:24:08 -080010180
10181 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
Ville Syrjälä293623f2013-09-13 16:18:46 +030010182 fp = pipe_config->dpll_hw_state.fp0;
Jesse Barnes79e53942008-11-07 14:24:08 -080010183 else
Ville Syrjälä293623f2013-09-13 16:18:46 +030010184 fp = pipe_config->dpll_hw_state.fp1;
Jesse Barnes79e53942008-11-07 14:24:08 -080010185
10186 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010187 if (IS_PINEVIEW(dev_priv)) {
Adam Jacksonf2b115e2009-12-03 17:14:42 -050010188 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10189 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
Shaohua Li21778322009-02-23 15:19:16 +080010190 } else {
10191 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10192 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10193 }
10194
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010010195 if (!IS_GEN2(dev_priv)) {
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010196 if (IS_PINEVIEW(dev_priv))
Adam Jacksonf2b115e2009-12-03 17:14:42 -050010197 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10198 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
Shaohua Li21778322009-02-23 15:19:16 +080010199 else
10200 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
Jesse Barnes79e53942008-11-07 14:24:08 -080010201 DPLL_FPA01_P1_POST_DIV_SHIFT);
10202
10203 switch (dpll & DPLL_MODE_MASK) {
10204 case DPLLB_MODE_DAC_SERIAL:
10205 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10206 5 : 10;
10207 break;
10208 case DPLLB_MODE_LVDS:
10209 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10210 7 : 14;
10211 break;
10212 default:
Zhao Yakui28c97732009-10-09 11:39:41 +080010213 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
Jesse Barnes79e53942008-11-07 14:24:08 -080010214 "mode\n", (int)(dpll & DPLL_MODE_MASK));
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010215 return;
Jesse Barnes79e53942008-11-07 14:24:08 -080010216 }
10217
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010218 if (IS_PINEVIEW(dev_priv))
Imre Deakdccbea32015-06-22 23:35:51 +030010219 port_clock = pnv_calc_dpll_params(refclk, &clock);
Daniel Vetterac58c3f2013-06-01 17:16:17 +020010220 else
Imre Deakdccbea32015-06-22 23:35:51 +030010221 port_clock = i9xx_calc_dpll_params(refclk, &clock);
Jesse Barnes79e53942008-11-07 14:24:08 -080010222 } else {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +010010223 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
Ville Syrjäläb1c560d2013-12-09 18:54:13 +020010224 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
Jesse Barnes79e53942008-11-07 14:24:08 -080010225
10226 if (is_lvds) {
10227 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10228 DPLL_FPA01_P1_POST_DIV_SHIFT);
Ville Syrjäläb1c560d2013-12-09 18:54:13 +020010229
10230 if (lvds & LVDS_CLKB_POWER_UP)
10231 clock.p2 = 7;
10232 else
10233 clock.p2 = 14;
Jesse Barnes79e53942008-11-07 14:24:08 -080010234 } else {
10235 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10236 clock.p1 = 2;
10237 else {
10238 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10239 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10240 }
10241 if (dpll & PLL_P2_DIVIDE_BY_4)
10242 clock.p2 = 4;
10243 else
10244 clock.p2 = 2;
Jesse Barnes79e53942008-11-07 14:24:08 -080010245 }
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010246
Imre Deakdccbea32015-06-22 23:35:51 +030010247 port_clock = i9xx_calc_dpll_params(refclk, &clock);
Jesse Barnes79e53942008-11-07 14:24:08 -080010248 }
10249
Ville Syrjälä18442d02013-09-13 16:00:08 +030010250 /*
10251 * This value includes pixel_multiplier. We will use
Damien Lespiau241bfc32013-09-25 16:45:37 +010010252 * port_clock to compute adjusted_mode.crtc_clock in the
Ville Syrjälä18442d02013-09-13 16:00:08 +030010253 * encoder's get_config() function.
10254 */
Imre Deakdccbea32015-06-22 23:35:51 +030010255 pipe_config->port_clock = port_clock;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010256}
10257
Ville Syrjälä6878da02013-09-13 15:59:11 +030010258int intel_dotclock_calculate(int link_freq,
10259 const struct intel_link_m_n *m_n)
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010260{
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010261 /*
10262 * The calculation for the data clock is:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010263 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010264 * But we want to avoid losing precison if possible, so:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010265 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010266 *
10267 * and the link clock is simpler:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010268 * link_clock = (m * link_clock) / n
Jesse Barnes79e53942008-11-07 14:24:08 -080010269 */
10270
Ville Syrjälä6878da02013-09-13 15:59:11 +030010271 if (!m_n->link_n)
10272 return 0;
10273
Chris Wilson31236982017-09-13 11:51:53 +010010274 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
Ville Syrjälä6878da02013-09-13 15:59:11 +030010275}
10276
Ville Syrjälä18442d02013-09-13 16:00:08 +030010277static void ironlake_pch_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010278 struct intel_crtc_state *pipe_config)
Ville Syrjälä6878da02013-09-13 15:59:11 +030010279{
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020010280 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä18442d02013-09-13 16:00:08 +030010281
10282 /* read out port_clock from the DPLL */
10283 i9xx_crtc_clock_get(crtc, pipe_config);
Ville Syrjälä6878da02013-09-13 15:59:11 +030010284
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010285 /*
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020010286 * In case there is an active pipe without active ports,
10287 * we may need some idea for the dotclock anyway.
10288 * Calculate one based on the FDI configuration.
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010289 */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020010290 pipe_config->base.adjusted_mode.crtc_clock =
Ville Syrjälä21a727b2016-02-17 21:41:10 +020010291 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
Ville Syrjälä18442d02013-09-13 16:00:08 +030010292 &pipe_config->fdi_m_n);
Jesse Barnes79e53942008-11-07 14:24:08 -080010293}
10294
Ville Syrjäläde330812017-10-09 19:19:50 +030010295/* Returns the currently programmed mode of the given encoder. */
10296struct drm_display_mode *
10297intel_encoder_current_mode(struct intel_encoder *encoder)
Jesse Barnes79e53942008-11-07 14:24:08 -080010298{
Ville Syrjäläde330812017-10-09 19:19:50 +030010299 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10300 struct intel_crtc_state *crtc_state;
Jesse Barnes79e53942008-11-07 14:24:08 -080010301 struct drm_display_mode *mode;
Ville Syrjäläde330812017-10-09 19:19:50 +030010302 struct intel_crtc *crtc;
10303 enum pipe pipe;
10304
10305 if (!encoder->get_hw_state(encoder, &pipe))
10306 return NULL;
10307
10308 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Jesse Barnes79e53942008-11-07 14:24:08 -080010309
10310 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10311 if (!mode)
10312 return NULL;
10313
Ville Syrjäläde330812017-10-09 19:19:50 +030010314 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10315 if (!crtc_state) {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +000010316 kfree(mode);
10317 return NULL;
10318 }
10319
Ville Syrjäläde330812017-10-09 19:19:50 +030010320 crtc_state->base.crtc = &crtc->base;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010321
Ville Syrjäläde330812017-10-09 19:19:50 +030010322 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10323 kfree(crtc_state);
10324 kfree(mode);
10325 return NULL;
10326 }
Ville Syrjäläe30a1542016-04-01 18:37:25 +030010327
Ville Syrjäläde330812017-10-09 19:19:50 +030010328 encoder->get_config(encoder, crtc_state);
Ville Syrjäläe30a1542016-04-01 18:37:25 +030010329
Ville Syrjäläde330812017-10-09 19:19:50 +030010330 intel_mode_from_pipe_config(mode, crtc_state);
Jesse Barnes79e53942008-11-07 14:24:08 -080010331
Ville Syrjäläde330812017-10-09 19:19:50 +030010332 kfree(crtc_state);
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +000010333
Jesse Barnes79e53942008-11-07 14:24:08 -080010334 return mode;
10335}
10336
10337static void intel_crtc_destroy(struct drm_crtc *crtc)
10338{
10339 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10340
10341 drm_crtc_cleanup(crtc);
10342 kfree(intel_crtc);
10343}
10344
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010345/**
10346 * intel_wm_need_update - Check whether watermarks need updating
10347 * @plane: drm plane
10348 * @state: new plane state
10349 *
10350 * Check current plane state versus the new one to determine whether
10351 * watermarks need to be recalculated.
10352 *
10353 * Returns true or false.
10354 */
10355static bool intel_wm_need_update(struct drm_plane *plane,
10356 struct drm_plane_state *state)
10357{
Matt Roperd21fbe82015-09-24 15:53:12 -070010358 struct intel_plane_state *new = to_intel_plane_state(state);
10359 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10360
10361 /* Update watermarks on tiling or size changes. */
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010362 if (new->base.visible != cur->base.visible)
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010363 return true;
10364
10365 if (!cur->base.fb || !new->base.fb)
10366 return false;
10367
Ville Syrjäläbae781b2016-11-16 13:33:16 +020010368 if (cur->base.fb->modifier != new->base.fb->modifier ||
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010369 cur->base.rotation != new->base.rotation ||
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010370 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10371 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10372 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10373 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010374 return true;
10375
10376 return false;
10377}
10378
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010379static bool needs_scaling(const struct intel_plane_state *state)
Matt Roperd21fbe82015-09-24 15:53:12 -070010380{
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010381 int src_w = drm_rect_width(&state->base.src) >> 16;
10382 int src_h = drm_rect_height(&state->base.src) >> 16;
10383 int dst_w = drm_rect_width(&state->base.dst);
10384 int dst_h = drm_rect_height(&state->base.dst);
Matt Roperd21fbe82015-09-24 15:53:12 -070010385
10386 return (src_w != dst_w || src_h != dst_h);
10387}
10388
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010389int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10390 struct drm_crtc_state *crtc_state,
10391 const struct intel_plane_state *old_plane_state,
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010392 struct drm_plane_state *plane_state)
10393{
Maarten Lankhorstab1d3a02015-11-19 16:07:14 +010010394 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010395 struct drm_crtc *crtc = crtc_state->crtc;
10396 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010397 struct intel_plane *plane = to_intel_plane(plane_state->plane);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010398 struct drm_device *dev = crtc->dev;
Matt Ropered4a6a72016-02-23 17:20:13 -080010399 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010400 bool mode_changed = needs_modeset(crtc_state);
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010401 bool was_crtc_enabled = old_crtc_state->base.active;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010402 bool is_crtc_enabled = crtc_state->active;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010403 bool turn_off, turn_on, visible, was_visible;
10404 struct drm_framebuffer *fb = plane_state->fb;
Ville Syrjälä78108b72016-05-27 20:59:19 +030010405 int ret;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010406
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010407 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010408 ret = skl_update_scaler_plane(
10409 to_intel_crtc_state(crtc_state),
10410 to_intel_plane_state(plane_state));
10411 if (ret)
10412 return ret;
10413 }
10414
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010415 was_visible = old_plane_state->base.visible;
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +010010416 visible = plane_state->visible;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010417
10418 if (!was_crtc_enabled && WARN_ON(was_visible))
10419 was_visible = false;
10420
Maarten Lankhorst35c08f42015-12-03 14:31:07 +010010421 /*
10422 * Visibility is calculated as if the crtc was on, but
10423 * after scaler setup everything depends on it being off
10424 * when the crtc isn't active.
Ville Syrjäläf818ffe2016-04-29 17:31:18 +030010425 *
10426 * FIXME this is wrong for watermarks. Watermarks should also
10427 * be computed as if the pipe would be active. Perhaps move
10428 * per-plane wm computation to the .check_plane() hook, and
10429 * only combine the results from all planes in the current place?
Maarten Lankhorst35c08f42015-12-03 14:31:07 +010010430 */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010431 if (!is_crtc_enabled) {
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +010010432 plane_state->visible = visible = false;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010433 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10434 }
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010435
10436 if (!was_visible && !visible)
10437 return 0;
10438
Maarten Lankhorste8861672016-02-24 11:24:26 +010010439 if (fb != old_plane_state->base.fb)
10440 pipe_config->fb_changed = true;
10441
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010442 turn_off = was_visible && (!visible || mode_changed);
10443 turn_on = visible && (!was_visible || mode_changed);
10444
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010445 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010446 intel_crtc->base.base.id, intel_crtc->base.name,
10447 plane->base.base.id, plane->base.name,
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010448 fb ? fb->base.id : -1);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010449
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010450 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010451 plane->base.base.id, plane->base.name,
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010452 was_visible, visible,
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010453 turn_off, turn_on, mode_changed);
10454
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010455 if (turn_on) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010456 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010457 pipe_config->update_wm_pre = true;
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010458
10459 /* must disable cxsr around plane enable/disable */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010460 if (plane->id != PLANE_CURSOR)
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010461 pipe_config->disable_cxsr = true;
10462 } else if (turn_off) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010463 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010464 pipe_config->update_wm_post = true;
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010465
Ville Syrjälä852eb002015-06-24 22:00:07 +030010466 /* must disable cxsr around plane enable/disable */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010467 if (plane->id != PLANE_CURSOR)
Maarten Lankhorstab1d3a02015-11-19 16:07:14 +010010468 pipe_config->disable_cxsr = true;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010469 } else if (intel_wm_need_update(&plane->base, plane_state)) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010470 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010471 /* FIXME bollocks */
10472 pipe_config->update_wm_pre = true;
10473 pipe_config->update_wm_post = true;
10474 }
Ville Syrjälä852eb002015-06-24 22:00:07 +030010475 }
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010476
Rodrigo Vivi8be6ca82015-08-24 16:38:23 -070010477 if (visible || was_visible)
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010478 pipe_config->fb_bits |= plane->frontbuffer_bit;
Ville Syrjäläa9ff8712015-06-24 21:59:34 +030010479
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010480 /*
10481 * WaCxSRDisabledForSpriteScaling:ivb
10482 *
10483 * cstate->update_wm was already set above, so this flag will
10484 * take effect when we commit and program watermarks.
10485 */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010486 if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010487 needs_scaling(to_intel_plane_state(plane_state)) &&
10488 !needs_scaling(old_plane_state))
10489 pipe_config->disable_lp_wm = true;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010490
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010491 return 0;
10492}
10493
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010494static bool encoders_cloneable(const struct intel_encoder *a,
10495 const struct intel_encoder *b)
10496{
10497 /* masks could be asymmetric, so check both ways */
10498 return a == b || (a->cloneable & (1 << b->type) &&
10499 b->cloneable & (1 << a->type));
10500}
10501
10502static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10503 struct intel_crtc *crtc,
10504 struct intel_encoder *encoder)
10505{
10506 struct intel_encoder *source_encoder;
10507 struct drm_connector *connector;
10508 struct drm_connector_state *connector_state;
10509 int i;
10510
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010010511 for_each_new_connector_in_state(state, connector, connector_state, i) {
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010512 if (connector_state->crtc != &crtc->base)
10513 continue;
10514
10515 source_encoder =
10516 to_intel_encoder(connector_state->best_encoder);
10517 if (!encoders_cloneable(encoder, source_encoder))
10518 return false;
10519 }
10520
10521 return true;
10522}
10523
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010524static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10525 struct drm_crtc_state *crtc_state)
10526{
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +020010527 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010010528 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010529 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +020010530 struct intel_crtc_state *pipe_config =
10531 to_intel_crtc_state(crtc_state);
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010532 struct drm_atomic_state *state = crtc_state->state;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020010533 int ret;
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010534 bool mode_changed = needs_modeset(crtc_state);
10535
Ville Syrjälä852eb002015-06-24 22:00:07 +030010536 if (mode_changed && !crtc_state->active)
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010537 pipe_config->update_wm_post = true;
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020010538
Maarten Lankhorstad421372015-06-15 12:33:42 +020010539 if (mode_changed && crtc_state->enable &&
10540 dev_priv->display.crtc_compute_clock &&
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020010541 !WARN_ON(pipe_config->shared_dpll)) {
Maarten Lankhorstad421372015-06-15 12:33:42 +020010542 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10543 pipe_config);
10544 if (ret)
10545 return ret;
10546 }
10547
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000010548 if (crtc_state->color_mgmt_changed) {
10549 ret = intel_color_check(crtc, crtc_state);
10550 if (ret)
10551 return ret;
Lionel Landwerline7852a42016-05-25 14:30:41 +010010552
10553 /*
10554 * Changing color management on Intel hardware is
10555 * handled as part of planes update.
10556 */
10557 crtc_state->planes_changed = true;
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000010558 }
10559
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020010560 ret = 0;
Matt Roper86c8bbb2015-09-24 15:53:16 -070010561 if (dev_priv->display.compute_pipe_wm) {
Maarten Lankhorste3bddde2016-03-01 11:07:22 +010010562 ret = dev_priv->display.compute_pipe_wm(pipe_config);
Matt Ropered4a6a72016-02-23 17:20:13 -080010563 if (ret) {
10564 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
Matt Roper86c8bbb2015-09-24 15:53:16 -070010565 return ret;
Matt Ropered4a6a72016-02-23 17:20:13 -080010566 }
10567 }
10568
10569 if (dev_priv->display.compute_intermediate_wm &&
10570 !to_intel_atomic_state(state)->skip_intermediate_wm) {
10571 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10572 return 0;
10573
10574 /*
10575 * Calculate 'intermediate' watermarks that satisfy both the
10576 * old state and the new state. We can program these
10577 * immediately.
10578 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000010579 ret = dev_priv->display.compute_intermediate_wm(dev,
Matt Ropered4a6a72016-02-23 17:20:13 -080010580 intel_crtc,
10581 pipe_config);
10582 if (ret) {
10583 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10584 return ret;
10585 }
Ville Syrjäläe3d54572016-05-13 10:10:42 -070010586 } else if (dev_priv->display.compute_intermediate_wm) {
10587 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10588 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
Matt Roper86c8bbb2015-09-24 15:53:16 -070010589 }
10590
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000010591 if (INTEL_GEN(dev_priv) >= 9) {
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020010592 if (mode_changed)
10593 ret = skl_update_scaler_crtc(pipe_config);
10594
10595 if (!ret)
Mahesh Kumar73b0ca82017-05-26 20:45:46 +053010596 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10597 pipe_config);
10598 if (!ret)
Ander Conselvan de Oliveira6ebc6922017-02-23 09:15:59 +020010599 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020010600 pipe_config);
10601 }
10602
Maarten Lankhorst24f28452017-11-22 19:39:01 +010010603 if (HAS_IPS(dev_priv))
10604 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
10605
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020010606 return ret;
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010607}
10608
Jani Nikula65b38e02015-04-13 11:26:56 +030010609static const struct drm_crtc_helper_funcs intel_helper_funcs = {
Daniel Vetter5a21b662016-05-24 17:13:53 +020010610 .atomic_begin = intel_begin_crtc_commit,
10611 .atomic_flush = intel_finish_crtc_commit,
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010612 .atomic_check = intel_crtc_atomic_check,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010010613};
10614
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010615static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10616{
10617 struct intel_connector *connector;
Daniel Vetterf9e905c2017-03-01 10:52:25 +010010618 struct drm_connector_list_iter conn_iter;
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010619
Daniel Vetterf9e905c2017-03-01 10:52:25 +010010620 drm_connector_list_iter_begin(dev, &conn_iter);
10621 for_each_intel_connector_iter(connector, &conn_iter) {
Daniel Vetter8863dc72016-05-06 15:39:03 +020010622 if (connector->base.state->crtc)
10623 drm_connector_unreference(&connector->base);
10624
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010625 if (connector->base.encoder) {
10626 connector->base.state->best_encoder =
10627 connector->base.encoder;
10628 connector->base.state->crtc =
10629 connector->base.encoder->crtc;
Daniel Vetter8863dc72016-05-06 15:39:03 +020010630
10631 drm_connector_reference(&connector->base);
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010632 } else {
10633 connector->base.state->best_encoder = NULL;
10634 connector->base.state->crtc = NULL;
10635 }
10636 }
Daniel Vetterf9e905c2017-03-01 10:52:25 +010010637 drm_connector_list_iter_end(&conn_iter);
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010638}
10639
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010640static void
Robin Schroereba905b2014-05-18 02:24:50 +020010641connected_sink_compute_bpp(struct intel_connector *connector,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010642 struct intel_crtc_state *pipe_config)
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010643{
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010644 const struct drm_display_info *info = &connector->base.display_info;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010645 int bpp = pipe_config->pipe_bpp;
10646
10647 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010648 connector->base.base.id,
10649 connector->base.name);
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010650
10651 /* Don't use an invalid EDID bpc value */
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010652 if (info->bpc != 0 && info->bpc * 3 < bpp) {
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010653 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010654 bpp, info->bpc * 3);
10655 pipe_config->pipe_bpp = info->bpc * 3;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010656 }
10657
Mario Kleiner196f9542016-07-06 12:05:45 +020010658 /* Clamp bpp to 8 on screens without EDID 1.4 */
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010659 if (info->bpc == 0 && bpp > 24) {
Mario Kleiner196f9542016-07-06 12:05:45 +020010660 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10661 bpp);
10662 pipe_config->pipe_bpp = 24;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010663 }
10664}
10665
10666static int
10667compute_baseline_pipe_bpp(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010668 struct intel_crtc_state *pipe_config)
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010669{
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010010670 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020010671 struct drm_atomic_state *state;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030010672 struct drm_connector *connector;
10673 struct drm_connector_state *connector_state;
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020010674 int bpp, i;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010675
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010010676 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10677 IS_CHERRYVIEW(dev_priv)))
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010678 bpp = 10*3;
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010010679 else if (INTEL_GEN(dev_priv) >= 5)
Daniel Vetterd328c9d2015-04-10 16:22:37 +020010680 bpp = 12*3;
10681 else
10682 bpp = 8*3;
10683
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010684
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010685 pipe_config->pipe_bpp = bpp;
10686
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020010687 state = pipe_config->base.state;
10688
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010689 /* Clamp display bpp to EDID value */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010010690 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030010691 if (connector_state->crtc != &crtc->base)
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020010692 continue;
10693
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030010694 connected_sink_compute_bpp(to_intel_connector(connector),
10695 pipe_config);
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010696 }
10697
10698 return bpp;
10699}
10700
Daniel Vetter644db712013-09-19 14:53:58 +020010701static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10702{
10703 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10704 "type: 0x%x flags: 0x%x\n",
Damien Lespiau13428302013-09-25 16:45:36 +010010705 mode->crtc_clock,
Daniel Vetter644db712013-09-19 14:53:58 +020010706 mode->crtc_hdisplay, mode->crtc_hsync_start,
10707 mode->crtc_hsync_end, mode->crtc_htotal,
10708 mode->crtc_vdisplay, mode->crtc_vsync_start,
10709 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10710}
10711
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000010712static inline void
10713intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
Tvrtko Ursulina4309652016-11-17 12:30:09 +000010714 unsigned int lane_count, struct intel_link_m_n *m_n)
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000010715{
Tvrtko Ursulina4309652016-11-17 12:30:09 +000010716 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10717 id, lane_count,
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000010718 m_n->gmch_m, m_n->gmch_n,
10719 m_n->link_m, m_n->link_n, m_n->tu);
10720}
10721
Ville Syrjälä40b2be42017-10-10 15:11:59 +030010722#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
10723
10724static const char * const output_type_str[] = {
10725 OUTPUT_TYPE(UNUSED),
10726 OUTPUT_TYPE(ANALOG),
10727 OUTPUT_TYPE(DVO),
10728 OUTPUT_TYPE(SDVO),
10729 OUTPUT_TYPE(LVDS),
10730 OUTPUT_TYPE(TVOUT),
10731 OUTPUT_TYPE(HDMI),
10732 OUTPUT_TYPE(DP),
10733 OUTPUT_TYPE(EDP),
10734 OUTPUT_TYPE(DSI),
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030010735 OUTPUT_TYPE(DDI),
Ville Syrjälä40b2be42017-10-10 15:11:59 +030010736 OUTPUT_TYPE(DP_MST),
10737};
10738
10739#undef OUTPUT_TYPE
10740
10741static void snprintf_output_types(char *buf, size_t len,
10742 unsigned int output_types)
10743{
10744 char *str = buf;
10745 int i;
10746
10747 str[0] = '\0';
10748
10749 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
10750 int r;
10751
10752 if ((output_types & BIT(i)) == 0)
10753 continue;
10754
10755 r = snprintf(str, len, "%s%s",
10756 str != buf ? "," : "", output_type_str[i]);
10757 if (r >= len)
10758 break;
10759 str += r;
10760 len -= r;
10761
10762 output_types &= ~BIT(i);
10763 }
10764
10765 WARN_ON_ONCE(output_types != 0);
10766}
10767
Daniel Vetterc0b03412013-05-28 12:05:54 +020010768static void intel_dump_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010769 struct intel_crtc_state *pipe_config,
Daniel Vetterc0b03412013-05-28 12:05:54 +020010770 const char *context)
10771{
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010772 struct drm_device *dev = crtc->base.dev;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010010773 struct drm_i915_private *dev_priv = to_i915(dev);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010774 struct drm_plane *plane;
10775 struct intel_plane *intel_plane;
10776 struct intel_plane_state *state;
10777 struct drm_framebuffer *fb;
Ville Syrjälä40b2be42017-10-10 15:11:59 +030010778 char buf[64];
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010779
Tvrtko Ursulin66766e42016-11-17 12:30:10 +000010780 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
10781 crtc->base.base.id, crtc->base.name, context);
Daniel Vetterc0b03412013-05-28 12:05:54 +020010782
Ville Syrjälä40b2be42017-10-10 15:11:59 +030010783 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
10784 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
10785 buf, pipe_config->output_types);
10786
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000010787 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
10788 transcoder_name(pipe_config->cpu_transcoder),
Daniel Vetterc0b03412013-05-28 12:05:54 +020010789 pipe_config->pipe_bpp, pipe_config->dither);
Tvrtko Ursulina4309652016-11-17 12:30:09 +000010790
10791 if (pipe_config->has_pch_encoder)
10792 intel_dump_m_n_config(pipe_config, "fdi",
10793 pipe_config->fdi_lanes,
10794 &pipe_config->fdi_m_n);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070010795
Shashank Sharmab22ca992017-07-24 19:19:32 +053010796 if (pipe_config->ycbcr420)
10797 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
10798
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000010799 if (intel_crtc_has_dp_encoder(pipe_config)) {
Tvrtko Ursulina4309652016-11-17 12:30:09 +000010800 intel_dump_m_n_config(pipe_config, "dp m_n",
10801 pipe_config->lane_count, &pipe_config->dp_m_n);
Tvrtko Ursulind806e682016-11-17 15:44:09 +000010802 if (pipe_config->has_drrs)
10803 intel_dump_m_n_config(pipe_config, "dp m2_n2",
10804 pipe_config->lane_count,
10805 &pipe_config->dp_m2_n2);
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000010806 }
Vandana Kannanb95af8b2014-08-05 07:51:23 -070010807
Daniel Vetter55072d12014-11-20 16:10:28 +010010808 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000010809 pipe_config->has_audio, pipe_config->has_infoframe);
Daniel Vetter55072d12014-11-20 16:10:28 +010010810
Daniel Vetterc0b03412013-05-28 12:05:54 +020010811 DRM_DEBUG_KMS("requested mode:\n");
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020010812 drm_mode_debug_printmodeline(&pipe_config->base.mode);
Daniel Vetterc0b03412013-05-28 12:05:54 +020010813 DRM_DEBUG_KMS("adjusted mode:\n");
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020010814 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
10815 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020010816 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000010817 pipe_config->port_clock,
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020010818 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
10819 pipe_config->pixel_rate);
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000010820
10821 if (INTEL_GEN(dev_priv) >= 9)
10822 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
10823 crtc->num_scalers,
10824 pipe_config->scaler_state.scaler_users,
10825 pipe_config->scaler_state.scaler_id);
Tvrtko Ursulina74f8372016-11-17 12:30:13 +000010826
10827 if (HAS_GMCH_DISPLAY(dev_priv))
10828 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
10829 pipe_config->gmch_pfit.control,
10830 pipe_config->gmch_pfit.pgm_ratios,
10831 pipe_config->gmch_pfit.lvds_border_bits);
10832 else
10833 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
10834 pipe_config->pch_pfit.pos,
10835 pipe_config->pch_pfit.size,
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000010836 enableddisabled(pipe_config->pch_pfit.enabled));
Tvrtko Ursulina74f8372016-11-17 12:30:13 +000010837
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000010838 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
10839 pipe_config->ips_enabled, pipe_config->double_wide);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010840
Ander Conselvan de Oliveiraf50b79f2016-12-29 17:22:12 +020010841 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
Tvrtko Ursulin415ff0f2015-05-14 13:38:31 +010010842
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010843 DRM_DEBUG_KMS("planes on this crtc\n");
10844 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
Eric Engestromb3c11ac2016-11-12 01:12:56 +000010845 struct drm_format_name_buf format_name;
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010846 intel_plane = to_intel_plane(plane);
10847 if (intel_plane->pipe != crtc->pipe)
10848 continue;
10849
10850 state = to_intel_plane_state(plane->state);
10851 fb = state->base.fb;
10852 if (!fb) {
Ville Syrjälä1d577e02016-05-27 20:59:25 +030010853 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
10854 plane->base.id, plane->name, state->scaler_id);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010855 continue;
10856 }
10857
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000010858 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
10859 plane->base.id, plane->name,
Eric Engestromb3c11ac2016-11-12 01:12:56 +000010860 fb->base.id, fb->width, fb->height,
Ville Syrjälä438b74a2016-12-14 23:32:55 +020010861 drm_get_format_name(fb->format->format, &format_name));
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000010862 if (INTEL_GEN(dev_priv) >= 9)
10863 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
10864 state->scaler_id,
10865 state->base.src.x1 >> 16,
10866 state->base.src.y1 >> 16,
10867 drm_rect_width(&state->base.src) >> 16,
10868 drm_rect_height(&state->base.src) >> 16,
10869 state->base.dst.x1, state->base.dst.y1,
10870 drm_rect_width(&state->base.dst),
10871 drm_rect_height(&state->base.dst));
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010872 }
Daniel Vetterc0b03412013-05-28 12:05:54 +020010873}
10874
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030010875static bool check_digital_port_conflicts(struct drm_atomic_state *state)
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010876{
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030010877 struct drm_device *dev = state->dev;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030010878 struct drm_connector *connector;
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030010879 struct drm_connector_list_iter conn_iter;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010880 unsigned int used_ports = 0;
Ville Syrjälä477321e2016-07-28 17:50:40 +030010881 unsigned int used_mst_ports = 0;
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010010882 bool ret = true;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010883
10884 /*
10885 * Walk the connector list instead of the encoder
10886 * list to detect the problem on ddi platforms
10887 * where there's just one encoder per digital port.
10888 */
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030010889 drm_connector_list_iter_begin(dev, &conn_iter);
10890 drm_for_each_connector_iter(connector, &conn_iter) {
Ville Syrjälä0bff4852015-12-10 18:22:31 +020010891 struct drm_connector_state *connector_state;
10892 struct intel_encoder *encoder;
10893
Maarten Lankhorst8b694492018-04-09 14:46:55 +020010894 connector_state = drm_atomic_get_new_connector_state(state, connector);
Ville Syrjälä0bff4852015-12-10 18:22:31 +020010895 if (!connector_state)
10896 connector_state = connector->state;
10897
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030010898 if (!connector_state->best_encoder)
10899 continue;
10900
10901 encoder = to_intel_encoder(connector_state->best_encoder);
10902
10903 WARN_ON(!connector_state->crtc);
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010904
10905 switch (encoder->type) {
10906 unsigned int port_mask;
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030010907 case INTEL_OUTPUT_DDI:
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010010908 if (WARN_ON(!HAS_DDI(to_i915(dev))))
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010909 break;
Ville Syrjäläcca05022016-06-22 21:57:06 +030010910 case INTEL_OUTPUT_DP:
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010911 case INTEL_OUTPUT_HDMI:
10912 case INTEL_OUTPUT_EDP:
Ville Syrjälä8f4f2792017-11-09 17:24:34 +020010913 port_mask = 1 << encoder->port;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010914
10915 /* the same port mustn't appear more than once */
10916 if (used_ports & port_mask)
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010010917 ret = false;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010918
10919 used_ports |= port_mask;
Ville Syrjälä477321e2016-07-28 17:50:40 +030010920 break;
10921 case INTEL_OUTPUT_DP_MST:
10922 used_mst_ports |=
Ville Syrjälä8f4f2792017-11-09 17:24:34 +020010923 1 << encoder->port;
Ville Syrjälä477321e2016-07-28 17:50:40 +030010924 break;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010925 default:
10926 break;
10927 }
10928 }
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030010929 drm_connector_list_iter_end(&conn_iter);
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010930
Ville Syrjälä477321e2016-07-28 17:50:40 +030010931 /* can't mix MST and SST/HDMI on the same port */
10932 if (used_ports & used_mst_ports)
10933 return false;
10934
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010010935 return ret;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020010936}
10937
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010938static void
10939clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
10940{
Ville Syrjäläff32c542017-03-02 19:14:57 +020010941 struct drm_i915_private *dev_priv =
10942 to_i915(crtc_state->base.crtc->dev);
Chandra Konduru663a3642015-04-07 15:28:41 -070010943 struct intel_crtc_scaler_state scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030010944 struct intel_dpll_hw_state dpll_hw_state;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020010945 struct intel_shared_dpll *shared_dpll;
Ville Syrjäläff32c542017-03-02 19:14:57 +020010946 struct intel_crtc_wm_state wm_state;
Ville Syrjälä6e644622017-08-17 17:55:09 +030010947 bool force_thru, ips_force_disable;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010948
Ander Conselvan de Oliveira7546a382015-05-20 09:03:27 +030010949 /* FIXME: before the switch to atomic started, a new pipe_config was
10950 * kzalloc'd. Code that depends on any field being zero should be
10951 * fixed, so that the crtc_state can be safely duplicated. For now,
10952 * only fields that are know to not cause problems are preserved. */
10953
Chandra Konduru663a3642015-04-07 15:28:41 -070010954 scaler_state = crtc_state->scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030010955 shared_dpll = crtc_state->shared_dpll;
10956 dpll_hw_state = crtc_state->dpll_hw_state;
Maarten Lankhorstc4e2d042015-08-05 12:36:59 +020010957 force_thru = crtc_state->pch_pfit.force_thru;
Ville Syrjälä6e644622017-08-17 17:55:09 +030010958 ips_force_disable = crtc_state->ips_force_disable;
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010959 if (IS_G4X(dev_priv) ||
10960 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjäläff32c542017-03-02 19:14:57 +020010961 wm_state = crtc_state->wm;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030010962
Chris Wilsond2fa80a2017-03-03 15:46:44 +000010963 /* Keep base drm_crtc_state intact, only clear our extended struct */
10964 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
10965 memset(&crtc_state->base + 1, 0,
10966 sizeof(*crtc_state) - sizeof(crtc_state->base));
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030010967
Chandra Konduru663a3642015-04-07 15:28:41 -070010968 crtc_state->scaler_state = scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030010969 crtc_state->shared_dpll = shared_dpll;
10970 crtc_state->dpll_hw_state = dpll_hw_state;
Maarten Lankhorstc4e2d042015-08-05 12:36:59 +020010971 crtc_state->pch_pfit.force_thru = force_thru;
Ville Syrjälä6e644622017-08-17 17:55:09 +030010972 crtc_state->ips_force_disable = ips_force_disable;
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010973 if (IS_G4X(dev_priv) ||
10974 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjäläff32c542017-03-02 19:14:57 +020010975 crtc_state->wm = wm_state;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010976}
10977
Ander Conselvan de Oliveira548ee152015-04-21 17:13:02 +030010978static int
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010010979intel_modeset_pipe_config(struct drm_crtc *crtc,
Maarten Lankhorstb3592832015-06-15 12:33:38 +020010980 struct intel_crtc_state *pipe_config)
Daniel Vetter7758a112012-07-08 19:40:39 +020010981{
Maarten Lankhorstb3592832015-06-15 12:33:38 +020010982 struct drm_atomic_state *state = pipe_config->base.state;
Daniel Vetter7758a112012-07-08 19:40:39 +020010983 struct intel_encoder *encoder;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030010984 struct drm_connector *connector;
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020010985 struct drm_connector_state *connector_state;
Daniel Vetterd328c9d2015-04-10 16:22:37 +020010986 int base_bpp, ret = -EINVAL;
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020010987 int i;
Daniel Vettere29c22c2013-02-21 00:00:16 +010010988 bool retry = true;
Daniel Vetter7758a112012-07-08 19:40:39 +020010989
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010990 clear_intel_crtc_state(pipe_config);
Daniel Vetter7758a112012-07-08 19:40:39 +020010991
Daniel Vettere143a212013-07-04 12:01:15 +020010992 pipe_config->cpu_transcoder =
10993 (enum transcoder) to_intel_crtc(crtc)->pipe;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010010994
Imre Deak2960bc92013-07-30 13:36:32 +030010995 /*
10996 * Sanitize sync polarity flags based on requested ones. If neither
10997 * positive or negative polarity is requested, treat this as meaning
10998 * negative polarity.
10999 */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011000 if (!(pipe_config->base.adjusted_mode.flags &
Imre Deak2960bc92013-07-30 13:36:32 +030011001 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011002 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
Imre Deak2960bc92013-07-30 13:36:32 +030011003
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011004 if (!(pipe_config->base.adjusted_mode.flags &
Imre Deak2960bc92013-07-30 13:36:32 +030011005 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011006 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
Imre Deak2960bc92013-07-30 13:36:32 +030011007
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011008 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11009 pipe_config);
11010 if (base_bpp < 0)
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011011 goto fail;
11012
Ville Syrjäläe41a56b2013-10-01 22:52:14 +030011013 /*
11014 * Determine the real pipe dimensions. Note that stereo modes can
11015 * increase the actual pipe size due to the frame doubling and
11016 * insertion of additional space for blanks between the frame. This
11017 * is stored in the crtc timings. We use the requested mode to do this
11018 * computation to clearly distinguish it from the adjusted mode, which
11019 * can be changed by the connectors in the below retry loop.
11020 */
Daniel Vetter196cd5d2017-01-25 07:26:56 +010011021 drm_mode_get_hv_timing(&pipe_config->base.mode,
Gustavo Padovanecb7e162014-12-01 15:40:09 -080011022 &pipe_config->pipe_src_w,
11023 &pipe_config->pipe_src_h);
Ville Syrjäläe41a56b2013-10-01 22:52:14 +030011024
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011025 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011026 if (connector_state->crtc != crtc)
11027 continue;
11028
11029 encoder = to_intel_encoder(connector_state->best_encoder);
11030
Ville Syrjäläe25148d2016-06-22 21:57:09 +030011031 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11032 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11033 goto fail;
11034 }
11035
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011036 /*
11037 * Determine output_types before calling the .compute_config()
11038 * hooks so that the hooks can use this information safely.
11039 */
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011040 if (encoder->compute_output_type)
11041 pipe_config->output_types |=
11042 BIT(encoder->compute_output_type(encoder, pipe_config,
11043 connector_state));
11044 else
11045 pipe_config->output_types |= BIT(encoder->type);
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011046 }
11047
Daniel Vettere29c22c2013-02-21 00:00:16 +010011048encoder_retry:
Daniel Vetteref1b4602013-06-01 17:17:04 +020011049 /* Ensure the port clock defaults are reset when retrying. */
Daniel Vetterff9a6752013-06-01 17:16:21 +020011050 pipe_config->port_clock = 0;
Daniel Vetteref1b4602013-06-01 17:17:04 +020011051 pipe_config->pixel_multiplier = 1;
Daniel Vetterff9a6752013-06-01 17:16:21 +020011052
Daniel Vetter135c81b2013-07-21 21:37:09 +020011053 /* Fill in default crtc timings, allow encoders to overwrite them. */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011054 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11055 CRTC_STEREO_DOUBLE);
Daniel Vetter135c81b2013-07-21 21:37:09 +020011056
Daniel Vetter7758a112012-07-08 19:40:39 +020011057 /* Pass our mode to the connectors and the CRTC to give them a chance to
11058 * adjust it according to limitations or connector properties, and also
11059 * a chance to reject the mode entirely.
11060 */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011061 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011062 if (connector_state->crtc != crtc)
11063 continue;
11064
11065 encoder = to_intel_encoder(connector_state->best_encoder);
11066
Maarten Lankhorst0a478c22016-08-09 17:04:05 +020011067 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
Daniel Vetterefea6e82013-07-21 21:36:59 +020011068 DRM_DEBUG_KMS("Encoder config failure\n");
Daniel Vetter7758a112012-07-08 19:40:39 +020011069 goto fail;
11070 }
11071 }
11072
Daniel Vetterff9a6752013-06-01 17:16:21 +020011073 /* Set default port clock if not overwritten by the encoder. Needs to be
11074 * done afterwards in case the encoder adjusts the mode. */
11075 if (!pipe_config->port_clock)
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011076 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
Damien Lespiau241bfc32013-09-25 16:45:37 +010011077 * pipe_config->pixel_multiplier;
Daniel Vetterff9a6752013-06-01 17:16:21 +020011078
Daniel Vettera43f6e02013-06-07 23:10:32 +020011079 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
Daniel Vettere29c22c2013-02-21 00:00:16 +010011080 if (ret < 0) {
Daniel Vetter7758a112012-07-08 19:40:39 +020011081 DRM_DEBUG_KMS("CRTC fixup failed\n");
11082 goto fail;
11083 }
Daniel Vettere29c22c2013-02-21 00:00:16 +010011084
11085 if (ret == RETRY) {
11086 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11087 ret = -EINVAL;
11088 goto fail;
11089 }
11090
11091 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11092 retry = false;
11093 goto encoder_retry;
11094 }
11095
Daniel Vettere8fa4272015-08-12 11:43:34 +020011096 /* Dithering seems to not pass-through bits correctly when it should, so
Manasi Navare611032b2017-01-24 08:21:49 -080011097 * only enable it on 6bpc panels and when its not a compliance
11098 * test requesting 6bpc video pattern.
11099 */
11100 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11101 !pipe_config->dither_force_disable;
Daniel Vetter62f0ace2015-08-26 18:57:26 +020011102 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011103 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011104
Daniel Vetter7758a112012-07-08 19:40:39 +020011105fail:
Ander Conselvan de Oliveira548ee152015-04-21 17:13:02 +030011106 return ret;
Daniel Vetter7758a112012-07-08 19:40:39 +020011107}
11108
Ville Syrjälä3bd26262013-09-06 23:29:02 +030011109static bool intel_fuzzy_clock_check(int clock1, int clock2)
Jesse Barnesf1f644d2013-06-27 00:39:25 +030011110{
Ville Syrjälä3bd26262013-09-06 23:29:02 +030011111 int diff;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030011112
11113 if (clock1 == clock2)
11114 return true;
11115
11116 if (!clock1 || !clock2)
11117 return false;
11118
11119 diff = abs(clock1 - clock2);
11120
11121 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11122 return true;
11123
11124 return false;
11125}
11126
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011127static bool
11128intel_compare_m_n(unsigned int m, unsigned int n,
11129 unsigned int m2, unsigned int n2,
11130 bool exact)
11131{
11132 if (m == m2 && n == n2)
11133 return true;
11134
11135 if (exact || !m || !n || !m2 || !n2)
11136 return false;
11137
11138 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11139
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011140 if (n > n2) {
11141 while (n > n2) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011142 m2 <<= 1;
11143 n2 <<= 1;
11144 }
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011145 } else if (n < n2) {
11146 while (n < n2) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011147 m <<= 1;
11148 n <<= 1;
11149 }
11150 }
11151
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011152 if (n != n2)
11153 return false;
11154
11155 return intel_fuzzy_clock_check(m, m2);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011156}
11157
11158static bool
11159intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11160 struct intel_link_m_n *m2_n2,
11161 bool adjust)
11162{
11163 if (m_n->tu == m2_n2->tu &&
11164 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11165 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11166 intel_compare_m_n(m_n->link_m, m_n->link_n,
11167 m2_n2->link_m, m2_n2->link_n, !adjust)) {
11168 if (adjust)
11169 *m2_n2 = *m_n;
11170
11171 return true;
11172 }
11173
11174 return false;
11175}
11176
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011177static void __printf(3, 4)
11178pipe_config_err(bool adjust, const char *name, const char *format, ...)
11179{
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011180 struct va_format vaf;
11181 va_list args;
11182
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011183 va_start(args, format);
11184 vaf.fmt = format;
11185 vaf.va = &args;
11186
Joe Perches99a95482018-03-13 15:02:15 -070011187 if (adjust)
11188 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11189 else
11190 drm_err("mismatch in %s %pV", name, &vaf);
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011191
11192 va_end(args);
11193}
11194
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011195static bool
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011196intel_pipe_config_compare(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020011197 struct intel_crtc_state *current_config,
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011198 struct intel_crtc_state *pipe_config,
11199 bool adjust)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011200{
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011201 bool ret = true;
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011202 bool fixup_inherited = adjust &&
11203 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11204 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011205
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011206#define PIPE_CONF_CHECK_X(name) do { \
Daniel Vetter66e985c2013-06-05 13:34:20 +020011207 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011208 pipe_config_err(adjust, __stringify(name), \
Daniel Vetter66e985c2013-06-05 13:34:20 +020011209 "(expected 0x%08x, found 0x%08x)\n", \
11210 current_config->name, \
11211 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011212 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011213 } \
11214} while (0)
Daniel Vetter66e985c2013-06-05 13:34:20 +020011215
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011216#define PIPE_CONF_CHECK_I(name) do { \
Daniel Vetter08a24032013-04-19 11:25:34 +020011217 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011218 pipe_config_err(adjust, __stringify(name), \
Daniel Vetter08a24032013-04-19 11:25:34 +020011219 "(expected %i, found %i)\n", \
11220 current_config->name, \
11221 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011222 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011223 } \
11224} while (0)
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011225
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011226#define PIPE_CONF_CHECK_BOOL(name) do { \
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011227 if (current_config->name != pipe_config->name) { \
11228 pipe_config_err(adjust, __stringify(name), \
11229 "(expected %s, found %s)\n", \
11230 yesno(current_config->name), \
11231 yesno(pipe_config->name)); \
11232 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011233 } \
11234} while (0)
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011235
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011236/*
11237 * Checks state where we only read out the enabling, but not the entire
11238 * state itself (like full infoframes or ELD for audio). These states
11239 * require a full modeset on bootup to fix up.
11240 */
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011241#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011242 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11243 PIPE_CONF_CHECK_BOOL(name); \
11244 } else { \
11245 pipe_config_err(adjust, __stringify(name), \
11246 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11247 yesno(current_config->name), \
11248 yesno(pipe_config->name)); \
11249 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011250 } \
11251} while (0)
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011252
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011253#define PIPE_CONF_CHECK_P(name) do { \
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011254 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011255 pipe_config_err(adjust, __stringify(name), \
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011256 "(expected %p, found %p)\n", \
11257 current_config->name, \
11258 pipe_config->name); \
11259 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011260 } \
11261} while (0)
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011262
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011263#define PIPE_CONF_CHECK_M_N(name) do { \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011264 if (!intel_compare_link_m_n(&current_config->name, \
11265 &pipe_config->name,\
11266 adjust)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011267 pipe_config_err(adjust, __stringify(name), \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011268 "(expected tu %i gmch %i/%i link %i/%i, " \
11269 "found tu %i, gmch %i/%i link %i/%i)\n", \
11270 current_config->name.tu, \
11271 current_config->name.gmch_m, \
11272 current_config->name.gmch_n, \
11273 current_config->name.link_m, \
11274 current_config->name.link_n, \
11275 pipe_config->name.tu, \
11276 pipe_config->name.gmch_m, \
11277 pipe_config->name.gmch_n, \
11278 pipe_config->name.link_m, \
11279 pipe_config->name.link_n); \
11280 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011281 } \
11282} while (0)
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011283
Daniel Vetter55c561a2016-03-30 11:34:36 +020011284/* This is required for BDW+ where there is only one set of registers for
11285 * switching between high and low RR.
11286 * This macro can be used whenever a comparison has to be made between one
11287 * hw state and multiple sw state variables.
11288 */
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011289#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011290 if (!intel_compare_link_m_n(&current_config->name, \
11291 &pipe_config->name, adjust) && \
11292 !intel_compare_link_m_n(&current_config->alt_name, \
11293 &pipe_config->name, adjust)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011294 pipe_config_err(adjust, __stringify(name), \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011295 "(expected tu %i gmch %i/%i link %i/%i, " \
11296 "or tu %i gmch %i/%i link %i/%i, " \
11297 "found tu %i, gmch %i/%i link %i/%i)\n", \
11298 current_config->name.tu, \
11299 current_config->name.gmch_m, \
11300 current_config->name.gmch_n, \
11301 current_config->name.link_m, \
11302 current_config->name.link_n, \
11303 current_config->alt_name.tu, \
11304 current_config->alt_name.gmch_m, \
11305 current_config->alt_name.gmch_n, \
11306 current_config->alt_name.link_m, \
11307 current_config->alt_name.link_n, \
11308 pipe_config->name.tu, \
11309 pipe_config->name.gmch_m, \
11310 pipe_config->name.gmch_n, \
11311 pipe_config->name.link_m, \
11312 pipe_config->name.link_n); \
11313 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011314 } \
11315} while (0)
Daniel Vetter88adfff2013-03-28 10:42:01 +010011316
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011317#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011318 if ((current_config->name ^ pipe_config->name) & (mask)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011319 pipe_config_err(adjust, __stringify(name), \
11320 "(%x) (expected %i, found %i)\n", \
11321 (mask), \
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011322 current_config->name & (mask), \
11323 pipe_config->name & (mask)); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011324 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011325 } \
11326} while (0)
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011327
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011328#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
Ville Syrjälä5e550652013-09-06 23:29:07 +030011329 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011330 pipe_config_err(adjust, __stringify(name), \
Ville Syrjälä5e550652013-09-06 23:29:07 +030011331 "(expected %i, found %i)\n", \
11332 current_config->name, \
11333 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011334 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011335 } \
11336} while (0)
Ville Syrjälä5e550652013-09-06 23:29:07 +030011337
Daniel Vetterbb760062013-06-06 14:55:52 +020011338#define PIPE_CONF_QUIRK(quirk) \
11339 ((current_config->quirks | pipe_config->quirks) & (quirk))
11340
Daniel Vettereccb1402013-05-22 00:50:22 +020011341 PIPE_CONF_CHECK_I(cpu_transcoder);
11342
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011343 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
Daniel Vetter08a24032013-04-19 11:25:34 +020011344 PIPE_CONF_CHECK_I(fdi_lanes);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011345 PIPE_CONF_CHECK_M_N(fdi_m_n);
Daniel Vetter08a24032013-04-19 11:25:34 +020011346
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +030011347 PIPE_CONF_CHECK_I(lane_count);
Imre Deak95a7a2a2016-06-13 16:44:35 +030011348 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011349
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011350 if (INTEL_GEN(dev_priv) < 8) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011351 PIPE_CONF_CHECK_M_N(dp_m_n);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011352
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011353 if (current_config->has_drrs)
11354 PIPE_CONF_CHECK_M_N(dp_m2_n2);
11355 } else
11356 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +030011357
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011358 PIPE_CONF_CHECK_X(output_types);
Jani Nikulaa65347b2015-11-27 12:21:46 +020011359
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011360 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11361 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11362 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11363 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11364 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11365 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011366
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011367 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11368 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11369 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11370 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11371 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11372 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011373
Daniel Vetterc93f54c2013-06-27 19:47:19 +020011374 PIPE_CONF_CHECK_I(pixel_multiplier);
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011375 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +010011376 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010011377 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011378 PIPE_CONF_CHECK_BOOL(limited_color_range);
Shashank Sharma15953632017-03-13 16:54:03 +053011379
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011380 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11381 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011382 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011383 PIPE_CONF_CHECK_BOOL(ycbcr420);
Daniel Vetter6c49f242013-06-06 12:45:25 +020011384
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011385 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
Daniel Vetter9ed109a2014-04-24 23:54:52 +020011386
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011387 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011388 DRM_MODE_FLAG_INTERLACE);
11389
Daniel Vetterbb760062013-06-06 14:55:52 +020011390 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011391 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011392 DRM_MODE_FLAG_PHSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011393 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011394 DRM_MODE_FLAG_NHSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011395 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011396 DRM_MODE_FLAG_PVSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011397 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011398 DRM_MODE_FLAG_NVSYNC);
11399 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -070011400
Ville Syrjälä333b8ca2015-09-03 21:50:16 +030011401 PIPE_CONF_CHECK_X(gmch_pfit.control);
Daniel Vettere2ff2d42015-07-15 14:15:50 +020011402 /* pfit ratios are autocomputed by the hw on gen4+ */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011403 if (INTEL_GEN(dev_priv) < 4)
Ville Syrjälä7f7d8dd2016-03-15 16:40:07 +020011404 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
Ville Syrjälä333b8ca2015-09-03 21:50:16 +030011405 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
Daniel Vetter99535992014-04-13 12:00:33 +020011406
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +020011407 if (!adjust) {
11408 PIPE_CONF_CHECK_I(pipe_src_w);
11409 PIPE_CONF_CHECK_I(pipe_src_h);
11410
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011411 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +020011412 if (current_config->pch_pfit.enabled) {
11413 PIPE_CONF_CHECK_X(pch_pfit.pos);
11414 PIPE_CONF_CHECK_X(pch_pfit.size);
11415 }
Daniel Vetter2fa2fe92013-05-07 23:34:16 +020011416
Maarten Lankhorst7aefe2b2015-09-14 11:30:10 +020011417 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011418 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
Maarten Lankhorst7aefe2b2015-09-14 11:30:10 +020011419 }
Chandra Kondurua1b22782015-04-07 15:28:45 -070011420
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011421 PIPE_CONF_CHECK_BOOL(double_wide);
Ville Syrjälä282740f2013-09-04 18:30:03 +030011422
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011423 PIPE_CONF_CHECK_P(shared_dpll);
Daniel Vetter66e985c2013-06-05 13:34:20 +020011424 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
Daniel Vetter8bcc2792013-06-05 13:34:28 +020011425 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
Daniel Vetter66e985c2013-06-05 13:34:20 +020011426 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11427 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
Daniel Vetterd452c5b2014-07-04 11:27:39 -030011428 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
Maarten Lankhorst00490c22015-11-16 14:42:12 +010011429 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
Damien Lespiau3f4cd192014-11-13 14:55:21 +000011430 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11431 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11432 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
Paulo Zanoni2de38132017-09-22 17:53:42 -030011433 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11434 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11435 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11436 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11437 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11438 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11439 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11440 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11441 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11442 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11443 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11444 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
Paulo Zanonic27e9172018-04-27 16:14:36 -070011445 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11446 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11447 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11448 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11449 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11450 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11451 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11452 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11453 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11454 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetterc0d43d62013-06-07 23:11:08 +020011455
Ville Syrjälä47eacba2016-04-12 22:14:35 +030011456 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11457 PIPE_CONF_CHECK_X(dsi_pll.div);
11458
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011459 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
Ville Syrjälä42571ae2013-09-06 23:29:00 +030011460 PIPE_CONF_CHECK_I(pipe_bpp);
11461
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011462 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
Jesse Barnesa9a7e982014-01-20 14:18:04 -080011463 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
Ville Syrjälä5e550652013-09-06 23:29:07 +030011464
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030011465 PIPE_CONF_CHECK_I(min_voltage_level);
11466
Daniel Vetter66e985c2013-06-05 13:34:20 +020011467#undef PIPE_CONF_CHECK_X
Daniel Vetter08a24032013-04-19 11:25:34 +020011468#undef PIPE_CONF_CHECK_I
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011469#undef PIPE_CONF_CHECK_BOOL
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011470#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011471#undef PIPE_CONF_CHECK_P
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011472#undef PIPE_CONF_CHECK_FLAGS
Ville Syrjälä5e550652013-09-06 23:29:07 +030011473#undef PIPE_CONF_CHECK_CLOCK_FUZZY
Daniel Vetterbb760062013-06-06 14:55:52 +020011474#undef PIPE_CONF_QUIRK
Daniel Vetter627eb5a2013-04-29 19:33:42 +020011475
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011476 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011477}
11478
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020011479static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11480 const struct intel_crtc_state *pipe_config)
11481{
11482 if (pipe_config->has_pch_encoder) {
Ville Syrjälä21a727b2016-02-17 21:41:10 +020011483 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020011484 &pipe_config->fdi_m_n);
11485 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11486
11487 /*
11488 * FDI already provided one idea for the dotclock.
11489 * Yell if the encoder disagrees.
11490 */
11491 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11492 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11493 fdi_dotclock, dotclock);
11494 }
11495}
11496
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011497static void verify_wm_state(struct drm_crtc *crtc,
11498 struct drm_crtc_state *new_state)
Damien Lespiau08db6652014-11-04 17:06:52 +000011499{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011500 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Damien Lespiau08db6652014-11-04 17:06:52 +000011501 struct skl_ddb_allocation hw_ddb, *sw_ddb;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011502 struct skl_pipe_wm hw_wm, *sw_wm;
11503 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11504 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011505 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11506 const enum pipe pipe = intel_crtc->pipe;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011507 int plane, level, max_level = ilk_wm_max_level(dev_priv);
Damien Lespiau08db6652014-11-04 17:06:52 +000011508
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011509 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
Damien Lespiau08db6652014-11-04 17:06:52 +000011510 return;
11511
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011512 skl_pipe_wm_get_hw_state(crtc, &hw_wm);
Maarten Lankhorst03af79e2016-10-26 15:41:36 +020011513 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011514
Damien Lespiau08db6652014-11-04 17:06:52 +000011515 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11516 sw_ddb = &dev_priv->wm.skl_hw.ddb;
11517
Mahesh Kumar74bd8002018-04-26 19:55:15 +053011518 if (INTEL_GEN(dev_priv) >= 11)
11519 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
11520 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
11521 sw_ddb->enabled_slices,
11522 hw_ddb.enabled_slices);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011523 /* planes */
Matt Roper8b364b42016-10-26 15:51:28 -070011524 for_each_universal_plane(dev_priv, pipe, plane) {
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011525 hw_plane_wm = &hw_wm.planes[plane];
11526 sw_plane_wm = &sw_wm->planes[plane];
Damien Lespiau08db6652014-11-04 17:06:52 +000011527
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011528 /* Watermarks */
11529 for (level = 0; level <= max_level; level++) {
11530 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11531 &sw_plane_wm->wm[level]))
11532 continue;
Damien Lespiau08db6652014-11-04 17:06:52 +000011533
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011534 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11535 pipe_name(pipe), plane + 1, level,
11536 sw_plane_wm->wm[level].plane_en,
11537 sw_plane_wm->wm[level].plane_res_b,
11538 sw_plane_wm->wm[level].plane_res_l,
11539 hw_plane_wm->wm[level].plane_en,
11540 hw_plane_wm->wm[level].plane_res_b,
11541 hw_plane_wm->wm[level].plane_res_l);
11542 }
11543
11544 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11545 &sw_plane_wm->trans_wm)) {
11546 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11547 pipe_name(pipe), plane + 1,
11548 sw_plane_wm->trans_wm.plane_en,
11549 sw_plane_wm->trans_wm.plane_res_b,
11550 sw_plane_wm->trans_wm.plane_res_l,
11551 hw_plane_wm->trans_wm.plane_en,
11552 hw_plane_wm->trans_wm.plane_res_b,
11553 hw_plane_wm->trans_wm.plane_res_l);
11554 }
11555
11556 /* DDB */
11557 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
11558 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
11559
11560 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
cpaul@redhat.comfaccd992016-10-14 17:31:58 -040011561 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011562 pipe_name(pipe), plane + 1,
11563 sw_ddb_entry->start, sw_ddb_entry->end,
11564 hw_ddb_entry->start, hw_ddb_entry->end);
11565 }
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011566 }
11567
Lyude27082492016-08-24 07:48:10 +020011568 /*
11569 * cursor
11570 * If the cursor plane isn't active, we may not have updated it's ddb
11571 * allocation. In that case since the ddb allocation will be updated
11572 * once the plane becomes visible, we can skip this check
11573 */
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030011574 if (1) {
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011575 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
11576 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011577
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011578 /* Watermarks */
11579 for (level = 0; level <= max_level; level++) {
11580 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11581 &sw_plane_wm->wm[level]))
11582 continue;
11583
11584 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11585 pipe_name(pipe), level,
11586 sw_plane_wm->wm[level].plane_en,
11587 sw_plane_wm->wm[level].plane_res_b,
11588 sw_plane_wm->wm[level].plane_res_l,
11589 hw_plane_wm->wm[level].plane_en,
11590 hw_plane_wm->wm[level].plane_res_b,
11591 hw_plane_wm->wm[level].plane_res_l);
11592 }
11593
11594 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11595 &sw_plane_wm->trans_wm)) {
11596 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11597 pipe_name(pipe),
11598 sw_plane_wm->trans_wm.plane_en,
11599 sw_plane_wm->trans_wm.plane_res_b,
11600 sw_plane_wm->trans_wm.plane_res_l,
11601 hw_plane_wm->trans_wm.plane_en,
11602 hw_plane_wm->trans_wm.plane_res_b,
11603 hw_plane_wm->trans_wm.plane_res_l);
11604 }
11605
11606 /* DDB */
11607 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
11608 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
11609
11610 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
cpaul@redhat.comfaccd992016-10-14 17:31:58 -040011611 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
Lyude27082492016-08-24 07:48:10 +020011612 pipe_name(pipe),
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011613 sw_ddb_entry->start, sw_ddb_entry->end,
11614 hw_ddb_entry->start, hw_ddb_entry->end);
Lyude27082492016-08-24 07:48:10 +020011615 }
Damien Lespiau08db6652014-11-04 17:06:52 +000011616 }
11617}
11618
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011619static void
Maarten Lankhorst677100c2016-11-08 13:55:41 +010011620verify_connector_state(struct drm_device *dev,
11621 struct drm_atomic_state *state,
11622 struct drm_crtc *crtc)
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011623{
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020011624 struct drm_connector *connector;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011625 struct drm_connector_state *new_conn_state;
Maarten Lankhorst677100c2016-11-08 13:55:41 +010011626 int i;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011627
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011628 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020011629 struct drm_encoder *encoder = connector->encoder;
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020011630 struct drm_crtc_state *crtc_state = NULL;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020011631
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011632 if (new_conn_state->crtc != crtc)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011633 continue;
11634
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020011635 if (crtc)
11636 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
11637
11638 intel_connector_verify_state(crtc_state, new_conn_state);
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011639
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011640 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020011641 "connector's atomic encoder doesn't match legacy encoder\n");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011642 }
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011643}
11644
11645static void
Daniel Vetter86b04262017-03-01 10:52:26 +010011646verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011647{
11648 struct intel_encoder *encoder;
Daniel Vetter86b04262017-03-01 10:52:26 +010011649 struct drm_connector *connector;
11650 struct drm_connector_state *old_conn_state, *new_conn_state;
11651 int i;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011652
Damien Lespiaub2784e12014-08-05 11:29:37 +010011653 for_each_intel_encoder(dev, encoder) {
Daniel Vetter86b04262017-03-01 10:52:26 +010011654 bool enabled = false, found = false;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011655 enum pipe pipe;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011656
11657 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11658 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030011659 encoder->base.name);
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011660
Daniel Vetter86b04262017-03-01 10:52:26 +010011661 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
11662 new_conn_state, i) {
11663 if (old_conn_state->best_encoder == &encoder->base)
11664 found = true;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020011665
Daniel Vetter86b04262017-03-01 10:52:26 +010011666 if (new_conn_state->best_encoder != &encoder->base)
11667 continue;
11668 found = enabled = true;
11669
11670 I915_STATE_WARN(new_conn_state->crtc !=
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020011671 encoder->base.crtc,
11672 "connector's crtc doesn't match encoder crtc\n");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011673 }
Daniel Vetter86b04262017-03-01 10:52:26 +010011674
11675 if (!found)
11676 continue;
Dave Airlie0e32b392014-05-02 14:02:48 +100011677
Rob Clarke2c719b2014-12-15 13:56:32 -050011678 I915_STATE_WARN(!!encoder->base.crtc != enabled,
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011679 "encoder's enabled state mismatch "
11680 "(expected %i, found %i)\n",
11681 !!encoder->base.crtc, enabled);
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020011682
11683 if (!encoder->base.crtc) {
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011684 bool active;
11685
11686 active = encoder->get_hw_state(encoder, &pipe);
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020011687 I915_STATE_WARN(active,
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011688 "encoder detached but still enabled on pipe %c.\n",
11689 pipe_name(pipe));
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020011690 }
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011691 }
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011692}
11693
11694static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011695verify_crtc_state(struct drm_crtc *crtc,
11696 struct drm_crtc_state *old_crtc_state,
11697 struct drm_crtc_state *new_crtc_state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011698{
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011699 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010011700 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011701 struct intel_encoder *encoder;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11703 struct intel_crtc_state *pipe_config, *sw_config;
11704 struct drm_atomic_state *old_state;
11705 bool active;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011706
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011707 old_state = old_crtc_state->state;
Daniel Vetterec2dc6a2016-05-09 16:34:09 +020011708 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011709 pipe_config = to_intel_crtc_state(old_crtc_state);
11710 memset(pipe_config, 0, sizeof(*pipe_config));
11711 pipe_config->base.crtc = crtc;
11712 pipe_config->base.state = old_state;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011713
Ville Syrjälä78108b72016-05-27 20:59:19 +030011714 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011715
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011716 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011717
Ville Syrjäläe56134b2017-06-01 17:36:19 +030011718 /* we keep both pipes enabled on 830 */
11719 if (IS_I830(dev_priv))
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011720 active = new_crtc_state->active;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011721
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011722 I915_STATE_WARN(new_crtc_state->active != active,
11723 "crtc active state doesn't match with hw state "
11724 "(expected %i, found %i)\n", new_crtc_state->active, active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011725
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011726 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
11727 "transitional active state does not match atomic hw state "
11728 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011729
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011730 for_each_encoder_on_crtc(dev, crtc, encoder) {
11731 enum pipe pipe;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011732
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011733 active = encoder->get_hw_state(encoder, &pipe);
11734 I915_STATE_WARN(active != new_crtc_state->active,
11735 "[ENCODER:%i] active %i with crtc active %i\n",
11736 encoder->base.base.id, active, new_crtc_state->active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011737
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011738 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
11739 "Encoder connected to wrong pipe %c\n",
11740 pipe_name(pipe));
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011741
Ville Syrjäläe1214b92017-10-27 22:31:23 +030011742 if (active)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011743 encoder->get_config(encoder, pipe_config);
11744 }
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011745
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011746 intel_crtc_compute_pixel_rate(pipe_config);
11747
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011748 if (!new_crtc_state->active)
11749 return;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011750
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011751 intel_pipe_config_sanity_check(dev_priv, pipe_config);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011752
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020011753 sw_config = to_intel_crtc_state(new_crtc_state);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011754 if (!intel_pipe_config_compare(dev_priv, sw_config,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011755 pipe_config, false)) {
11756 I915_STATE_WARN(1, "pipe state doesn't match!\n");
11757 intel_dump_pipe_config(intel_crtc, pipe_config,
11758 "[hw state]");
11759 intel_dump_pipe_config(intel_crtc, sw_config,
11760 "[sw state]");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011761 }
11762}
11763
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011764static void
Ville Syrjäläcff109f2017-11-17 21:19:17 +020011765intel_verify_planes(struct intel_atomic_state *state)
11766{
11767 struct intel_plane *plane;
11768 const struct intel_plane_state *plane_state;
11769 int i;
11770
11771 for_each_new_intel_plane_in_state(state, plane,
11772 plane_state, i)
11773 assert_plane(plane, plane_state->base.visible);
11774}
11775
11776static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011777verify_single_dpll_state(struct drm_i915_private *dev_priv,
11778 struct intel_shared_dpll *pll,
11779 struct drm_crtc *crtc,
11780 struct drm_crtc_state *new_state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011781{
11782 struct intel_dpll_hw_state dpll_hw_state;
11783 unsigned crtc_mask;
11784 bool active;
11785
11786 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
11787
Lucas De Marchi72f775f2018-03-20 15:06:34 -070011788 DRM_DEBUG_KMS("%s\n", pll->info->name);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011789
Lucas De Marchiee1398b2018-03-20 15:06:33 -070011790 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011791
Lucas De Marchi5cd281f2018-03-20 15:06:36 -070011792 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011793 I915_STATE_WARN(!pll->on && pll->active_mask,
11794 "pll in active use but not on in sw tracking\n");
11795 I915_STATE_WARN(pll->on && !pll->active_mask,
11796 "pll is on but not used by any active crtc\n");
11797 I915_STATE_WARN(pll->on != active,
11798 "pll on state mismatch (expected %i, found %i)\n",
11799 pll->on, active);
11800 }
11801
11802 if (!crtc) {
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020011803 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011804 "more active pll users than references: %x vs %x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020011805 pll->active_mask, pll->state.crtc_mask);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011806
11807 return;
11808 }
11809
11810 crtc_mask = 1 << drm_crtc_index(crtc);
11811
11812 if (new_state->active)
11813 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
11814 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
11815 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
11816 else
11817 I915_STATE_WARN(pll->active_mask & crtc_mask,
11818 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
11819 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
11820
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020011821 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011822 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020011823 crtc_mask, pll->state.crtc_mask);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011824
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020011825 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011826 &dpll_hw_state,
11827 sizeof(dpll_hw_state)),
11828 "pll hw state mismatch\n");
11829}
11830
11831static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011832verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
11833 struct drm_crtc_state *old_crtc_state,
11834 struct drm_crtc_state *new_crtc_state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011835{
Chris Wilsonfac5e232016-07-04 11:34:36 +010011836 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011837 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
11838 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
11839
11840 if (new_state->shared_dpll)
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011841 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011842
11843 if (old_state->shared_dpll &&
11844 old_state->shared_dpll != new_state->shared_dpll) {
11845 unsigned crtc_mask = 1 << drm_crtc_index(crtc);
11846 struct intel_shared_dpll *pll = old_state->shared_dpll;
11847
11848 I915_STATE_WARN(pll->active_mask & crtc_mask,
11849 "pll active mismatch (didn't expect pipe %c in active mask)\n",
11850 pipe_name(drm_crtc_index(crtc)));
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020011851 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011852 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
11853 pipe_name(drm_crtc_index(crtc)));
11854 }
11855}
11856
11857static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011858intel_modeset_verify_crtc(struct drm_crtc *crtc,
Maarten Lankhorst677100c2016-11-08 13:55:41 +010011859 struct drm_atomic_state *state,
11860 struct drm_crtc_state *old_state,
11861 struct drm_crtc_state *new_state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011862{
Daniel Vetter5a21b662016-05-24 17:13:53 +020011863 if (!needs_modeset(new_state) &&
11864 !to_intel_crtc_state(new_state)->update_pipe)
11865 return;
11866
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011867 verify_wm_state(crtc, new_state);
Maarten Lankhorst677100c2016-11-08 13:55:41 +010011868 verify_connector_state(crtc->dev, state, crtc);
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011869 verify_crtc_state(crtc, old_state, new_state);
11870 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011871}
11872
11873static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011874verify_disabled_dpll_state(struct drm_device *dev)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011875{
Chris Wilsonfac5e232016-07-04 11:34:36 +010011876 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011877 int i;
Daniel Vetter53589012013-06-05 13:34:16 +020011878
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011879 for (i = 0; i < dev_priv->num_shared_dpll; i++)
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011880 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011881}
Daniel Vetter53589012013-06-05 13:34:16 +020011882
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011883static void
Maarten Lankhorst677100c2016-11-08 13:55:41 +010011884intel_modeset_verify_disabled(struct drm_device *dev,
11885 struct drm_atomic_state *state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011886{
Daniel Vetter86b04262017-03-01 10:52:26 +010011887 verify_encoder_state(dev, state);
Maarten Lankhorst677100c2016-11-08 13:55:41 +010011888 verify_connector_state(dev, state, NULL);
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011889 verify_disabled_dpll_state(dev);
Daniel Vetter25c5b262012-07-08 22:08:04 +020011890}
11891
Ville Syrjälä80715b22014-05-15 20:23:23 +030011892static void update_scanline_offset(struct intel_crtc *crtc)
11893{
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010011894 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä80715b22014-05-15 20:23:23 +030011895
11896 /*
11897 * The scanline counter increments at the leading edge of hsync.
11898 *
11899 * On most platforms it starts counting from vtotal-1 on the
11900 * first active line. That means the scanline counter value is
11901 * always one less than what we would expect. Ie. just after
11902 * start of vblank, which also occurs at start of hsync (on the
11903 * last active line), the scanline counter will read vblank_start-1.
11904 *
11905 * On gen2 the scanline counter starts counting from 1 instead
11906 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
11907 * to keep the value positive), instead of adding one.
11908 *
11909 * On HSW+ the behaviour of the scanline counter depends on the output
11910 * type. For DP ports it behaves like most other platforms, but on HDMI
11911 * there's an extra 1 line difference. So we need to add two instead of
11912 * one to the value.
Ville Syrjäläec1b4ee2016-12-15 19:47:34 +020011913 *
11914 * On VLV/CHV DSI the scanline counter would appear to increment
11915 * approx. 1/3 of a scanline before start of vblank. Unfortunately
11916 * that means we can't tell whether we're in vblank or not while
11917 * we're on that particular line. We must still set scanline_offset
11918 * to 1 so that the vblank timestamps come out correct when we query
11919 * the scanline counter from within the vblank interrupt handler.
11920 * However if queried just before the start of vblank we'll get an
11921 * answer that's slightly in the future.
Ville Syrjälä80715b22014-05-15 20:23:23 +030011922 */
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010011923 if (IS_GEN2(dev_priv)) {
Ville Syrjälä124abe02015-09-08 13:40:45 +030011924 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
Ville Syrjälä80715b22014-05-15 20:23:23 +030011925 int vtotal;
11926
Ville Syrjälä124abe02015-09-08 13:40:45 +030011927 vtotal = adjusted_mode->crtc_vtotal;
11928 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
Ville Syrjälä80715b22014-05-15 20:23:23 +030011929 vtotal /= 2;
11930
11931 crtc->scanline_offset = vtotal - 1;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010011932 } else if (HAS_DDI(dev_priv) &&
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +030011933 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
Ville Syrjälä80715b22014-05-15 20:23:23 +030011934 crtc->scanline_offset = 2;
11935 } else
11936 crtc->scanline_offset = 1;
11937}
11938
Maarten Lankhorstad421372015-06-15 12:33:42 +020011939static void intel_modeset_clear_plls(struct drm_atomic_state *state)
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020011940{
Ander Conselvan de Oliveira225da592015-04-02 14:47:57 +030011941 struct drm_device *dev = state->dev;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020011942 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030011943 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011944 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030011945 int i;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020011946
11947 if (!dev_priv->display.crtc_compute_clock)
Maarten Lankhorstad421372015-06-15 12:33:42 +020011948 return;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020011949
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011950 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010011951 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011952 struct intel_shared_dpll *old_dpll =
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011953 to_intel_crtc_state(old_crtc_state)->shared_dpll;
Maarten Lankhorstad421372015-06-15 12:33:42 +020011954
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011955 if (!needs_modeset(new_crtc_state))
Ander Conselvan de Oliveira225da592015-04-02 14:47:57 +030011956 continue;
11957
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011958 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010011959
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011960 if (!old_dpll)
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010011961 continue;
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030011962
Ander Conselvan de Oliveiraa1c414e2016-12-29 17:22:07 +020011963 intel_release_shared_dpll(old_dpll, intel_crtc, state);
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020011964 }
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020011965}
11966
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020011967/*
11968 * This implements the workaround described in the "notes" section of the mode
11969 * set sequence documentation. When going from no pipes or single pipe to
11970 * multiple pipes, and planes are enabled after the pipe, we need to wait at
11971 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
11972 */
11973static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
11974{
11975 struct drm_crtc_state *crtc_state;
11976 struct intel_crtc *intel_crtc;
11977 struct drm_crtc *crtc;
11978 struct intel_crtc_state *first_crtc_state = NULL;
11979 struct intel_crtc_state *other_crtc_state = NULL;
11980 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
11981 int i;
11982
11983 /* look at all crtc's that are going to be enabled in during modeset */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011984 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020011985 intel_crtc = to_intel_crtc(crtc);
11986
11987 if (!crtc_state->active || !needs_modeset(crtc_state))
11988 continue;
11989
11990 if (first_crtc_state) {
11991 other_crtc_state = to_intel_crtc_state(crtc_state);
11992 break;
11993 } else {
11994 first_crtc_state = to_intel_crtc_state(crtc_state);
11995 first_pipe = intel_crtc->pipe;
11996 }
11997 }
11998
11999 /* No workaround needed? */
12000 if (!first_crtc_state)
12001 return 0;
12002
12003 /* w/a possibly needed, check how many crtc's are already enabled. */
12004 for_each_intel_crtc(state->dev, intel_crtc) {
12005 struct intel_crtc_state *pipe_config;
12006
12007 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12008 if (IS_ERR(pipe_config))
12009 return PTR_ERR(pipe_config);
12010
12011 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12012
12013 if (!pipe_config->base.active ||
12014 needs_modeset(&pipe_config->base))
12015 continue;
12016
12017 /* 2 or more enabled crtcs means no need for w/a */
12018 if (enabled_pipe != INVALID_PIPE)
12019 return 0;
12020
12021 enabled_pipe = intel_crtc->pipe;
12022 }
12023
12024 if (enabled_pipe != INVALID_PIPE)
12025 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12026 else if (other_crtc_state)
12027 other_crtc_state->hsw_workaround_pipe = first_pipe;
12028
12029 return 0;
12030}
12031
Ville Syrjälä8d965612016-11-14 18:35:10 +020012032static int intel_lock_all_pipes(struct drm_atomic_state *state)
12033{
12034 struct drm_crtc *crtc;
12035
12036 /* Add all pipes to the state */
12037 for_each_crtc(state->dev, crtc) {
12038 struct drm_crtc_state *crtc_state;
12039
12040 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12041 if (IS_ERR(crtc_state))
12042 return PTR_ERR(crtc_state);
12043 }
12044
12045 return 0;
12046}
12047
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012048static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12049{
12050 struct drm_crtc *crtc;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012051
Ville Syrjälä8d965612016-11-14 18:35:10 +020012052 /*
12053 * Add all pipes to the state, and force
12054 * a modeset on all the active ones.
12055 */
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012056 for_each_crtc(state->dev, crtc) {
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012057 struct drm_crtc_state *crtc_state;
12058 int ret;
12059
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012060 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12061 if (IS_ERR(crtc_state))
12062 return PTR_ERR(crtc_state);
12063
12064 if (!crtc_state->active || needs_modeset(crtc_state))
12065 continue;
12066
12067 crtc_state->mode_changed = true;
12068
12069 ret = drm_atomic_add_affected_connectors(state, crtc);
12070 if (ret)
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012071 return ret;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012072
12073 ret = drm_atomic_add_affected_planes(state, crtc);
12074 if (ret)
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012075 return ret;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012076 }
12077
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012078 return 0;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012079}
12080
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012081static int intel_modeset_checks(struct drm_atomic_state *state)
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012082{
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012083 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012084 struct drm_i915_private *dev_priv = to_i915(state->dev);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012085 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012086 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012087 int ret = 0, i;
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012088
Maarten Lankhorstb3592832015-06-15 12:33:38 +020012089 if (!check_digital_port_conflicts(state)) {
12090 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12091 return -EINVAL;
12092 }
12093
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012094 intel_state->modeset = true;
12095 intel_state->active_crtcs = dev_priv->active_crtcs;
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012096 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12097 intel_state->cdclk.actual = dev_priv->cdclk.actual;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012098
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012099 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12100 if (new_crtc_state->active)
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012101 intel_state->active_crtcs |= 1 << i;
12102 else
12103 intel_state->active_crtcs &= ~(1 << i);
Matt Roper8b4a7d02016-05-12 07:06:00 -070012104
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012105 if (old_crtc_state->active != new_crtc_state->active)
Matt Roper8b4a7d02016-05-12 07:06:00 -070012106 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012107 }
12108
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012109 /*
12110 * See if the config requires any additional preparation, e.g.
12111 * to adjust global state with pipes off. We need to do this
12112 * here so we can get the modeset_pipe updated config for the new
12113 * mode set on this crtc. For other crtcs we need to use the
12114 * adjusted_mode bits in the crtc directly.
12115 */
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012116 if (dev_priv->display.modeset_calc_cdclk) {
Clint Taylorc89e39f2016-05-13 23:41:21 +030012117 ret = dev_priv->display.modeset_calc_cdclk(state);
12118 if (ret < 0)
12119 return ret;
12120
Ville Syrjälä8d965612016-11-14 18:35:10 +020012121 /*
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012122 * Writes to dev_priv->cdclk.logical must protected by
Ville Syrjälä8d965612016-11-14 18:35:10 +020012123 * holding all the crtc locks, even if we don't end up
12124 * touching the hardware
12125 */
Ville Syrjälä64600bd2017-10-24 12:52:08 +030012126 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12127 &intel_state->cdclk.logical)) {
Ville Syrjälä8d965612016-11-14 18:35:10 +020012128 ret = intel_lock_all_pipes(state);
12129 if (ret < 0)
12130 return ret;
12131 }
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012132
Ville Syrjälä8d965612016-11-14 18:35:10 +020012133 /* All pipes must be switched off while we change the cdclk. */
Ville Syrjälä64600bd2017-10-24 12:52:08 +030012134 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12135 &intel_state->cdclk.actual)) {
Ville Syrjälä8d965612016-11-14 18:35:10 +020012136 ret = intel_modeset_all_pipes(state);
12137 if (ret < 0)
12138 return ret;
12139 }
Maarten Lankhorste8788cb2016-02-16 10:25:11 +010012140
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012141 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12142 intel_state->cdclk.logical.cdclk,
12143 intel_state->cdclk.actual.cdclk);
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030012144 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12145 intel_state->cdclk.logical.voltage_level,
12146 intel_state->cdclk.actual.voltage_level);
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012147 } else {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012148 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012149 }
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012150
Maarten Lankhorstad421372015-06-15 12:33:42 +020012151 intel_modeset_clear_plls(state);
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012152
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012153 if (IS_HASWELL(dev_priv))
Maarten Lankhorstad421372015-06-15 12:33:42 +020012154 return haswell_mode_set_planes_workaround(state);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012155
Maarten Lankhorstad421372015-06-15 12:33:42 +020012156 return 0;
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012157}
12158
Matt Roperaa363132015-09-24 15:53:18 -070012159/*
12160 * Handle calculation of various watermark data at the end of the atomic check
12161 * phase. The code here should be run after the per-crtc and per-plane 'check'
12162 * handlers to ensure that all derived state has been updated.
12163 */
Matt Roper55994c22016-05-12 07:06:08 -070012164static int calc_watermark_data(struct drm_atomic_state *state)
Matt Roperaa363132015-09-24 15:53:18 -070012165{
12166 struct drm_device *dev = state->dev;
Matt Roper98d39492016-05-12 07:06:03 -070012167 struct drm_i915_private *dev_priv = to_i915(dev);
Matt Roper98d39492016-05-12 07:06:03 -070012168
12169 /* Is there platform-specific watermark information to calculate? */
12170 if (dev_priv->display.compute_global_watermarks)
Matt Roper55994c22016-05-12 07:06:08 -070012171 return dev_priv->display.compute_global_watermarks(state);
12172
12173 return 0;
Matt Roperaa363132015-09-24 15:53:18 -070012174}
12175
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012176/**
12177 * intel_atomic_check - validate state object
12178 * @dev: drm device
12179 * @state: state to validate
12180 */
12181static int intel_atomic_check(struct drm_device *dev,
12182 struct drm_atomic_state *state)
Daniel Vettera6778b32012-07-02 09:56:42 +020012183{
Paulo Zanonidd8b3bd2016-01-19 11:35:49 -020012184 struct drm_i915_private *dev_priv = to_i915(dev);
Matt Roperaa363132015-09-24 15:53:18 -070012185 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012186 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012187 struct drm_crtc_state *old_crtc_state, *crtc_state;
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012188 int ret, i;
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012189 bool any_ms = false;
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012190
Maarten Lankhorst8c58f732018-02-21 10:28:08 +010012191 /* Catch I915_MODE_FLAG_INHERITED */
12192 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12193 crtc_state, i) {
12194 if (crtc_state->mode.private_flags !=
12195 old_crtc_state->mode.private_flags)
12196 crtc_state->mode_changed = true;
12197 }
12198
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012199 ret = drm_atomic_helper_check_modeset(dev, state);
Daniel Vettera6778b32012-07-02 09:56:42 +020012200 if (ret)
12201 return ret;
12202
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012203 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012204 struct intel_crtc_state *pipe_config =
12205 to_intel_crtc_state(crtc_state);
Daniel Vetter1ed51de2015-07-15 14:15:51 +020012206
Daniel Vetter26495482015-07-15 14:15:52 +020012207 if (!needs_modeset(crtc_state))
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012208 continue;
12209
Daniel Vetteraf4a8792016-05-09 09:31:25 +020012210 if (!crtc_state->enable) {
12211 any_ms = true;
12212 continue;
12213 }
12214
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012215 ret = intel_modeset_pipe_config(crtc, pipe_config);
Maarten Lankhorst25aa1c32016-05-03 10:30:38 +020012216 if (ret) {
12217 intel_dump_pipe_config(to_intel_crtc(crtc),
12218 pipe_config, "[failed]");
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012219 return ret;
Maarten Lankhorst25aa1c32016-05-03 10:30:38 +020012220 }
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012221
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000012222 if (i915_modparams.fastboot &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000012223 intel_pipe_config_compare(dev_priv,
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012224 to_intel_crtc_state(old_crtc_state),
Daniel Vetter1ed51de2015-07-15 14:15:51 +020012225 pipe_config, true)) {
Daniel Vetter26495482015-07-15 14:15:52 +020012226 crtc_state->mode_changed = false;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012227 pipe_config->update_pipe = true;
Daniel Vetter26495482015-07-15 14:15:52 +020012228 }
12229
Daniel Vetteraf4a8792016-05-09 09:31:25 +020012230 if (needs_modeset(crtc_state))
Daniel Vetter26495482015-07-15 14:15:52 +020012231 any_ms = true;
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012232
Daniel Vetter26495482015-07-15 14:15:52 +020012233 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12234 needs_modeset(crtc_state) ?
12235 "[modeset]" : "[fastset]");
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012236 }
12237
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012238 if (any_ms) {
12239 ret = intel_modeset_checks(state);
12240
12241 if (ret)
12242 return ret;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012243 } else {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012244 intel_state->cdclk.logical = dev_priv->cdclk.logical;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012245 }
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012246
Paulo Zanonidd8b3bd2016-01-19 11:35:49 -020012247 ret = drm_atomic_helper_check_planes(dev, state);
Matt Roperaa363132015-09-24 15:53:18 -070012248 if (ret)
12249 return ret;
12250
Ville Syrjälädd576022017-11-17 21:19:14 +020012251 intel_fbc_choose_crtc(dev_priv, intel_state);
Matt Roper55994c22016-05-12 07:06:08 -070012252 return calc_watermark_data(state);
Daniel Vettera6778b32012-07-02 09:56:42 +020012253}
12254
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012255static int intel_atomic_prepare_commit(struct drm_device *dev,
Chris Wilsond07f0e52016-10-28 13:58:44 +010012256 struct drm_atomic_state *state)
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012257{
Chris Wilsonfd700752017-07-26 17:00:36 +010012258 return drm_atomic_helper_prepare_planes(dev, state);
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012259}
12260
Maarten Lankhorsta2991412016-05-17 15:07:48 +020012261u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12262{
12263 struct drm_device *dev = crtc->base.dev;
12264
12265 if (!dev->max_vblank_count)
Dhinakaran Pandiyan734cbbf2018-02-02 21:12:54 -080012266 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
Maarten Lankhorsta2991412016-05-17 15:07:48 +020012267
12268 return dev->driver->get_vblank_counter(dev, crtc->pipe);
12269}
12270
Lyude896e5bb2016-08-24 07:48:09 +020012271static void intel_update_crtc(struct drm_crtc *crtc,
12272 struct drm_atomic_state *state,
12273 struct drm_crtc_state *old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012274 struct drm_crtc_state *new_crtc_state)
Lyude896e5bb2016-08-24 07:48:09 +020012275{
12276 struct drm_device *dev = crtc->dev;
12277 struct drm_i915_private *dev_priv = to_i915(dev);
12278 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012279 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12280 bool modeset = needs_modeset(new_crtc_state);
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012281 struct intel_plane_state *new_plane_state =
12282 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12283 to_intel_plane(crtc->primary));
Lyude896e5bb2016-08-24 07:48:09 +020012284
12285 if (modeset) {
12286 update_scanline_offset(intel_crtc);
12287 dev_priv->display.crtc_enable(pipe_config, state);
Maarten Lankhorst033b7a22018-03-08 13:02:02 +010012288
12289 /* vblanks work again, re-enable pipe CRC. */
12290 intel_crtc_enable_pipe_crc(intel_crtc);
Lyude896e5bb2016-08-24 07:48:09 +020012291 } else {
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012292 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12293 pipe_config);
Lyude896e5bb2016-08-24 07:48:09 +020012294 }
12295
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012296 if (new_plane_state)
12297 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
Lyude896e5bb2016-08-24 07:48:09 +020012298
12299 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
Lyude896e5bb2016-08-24 07:48:09 +020012300}
12301
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012302static void intel_update_crtcs(struct drm_atomic_state *state)
Lyude896e5bb2016-08-24 07:48:09 +020012303{
12304 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012305 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Lyude896e5bb2016-08-24 07:48:09 +020012306 int i;
12307
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012308 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12309 if (!new_crtc_state->active)
Lyude896e5bb2016-08-24 07:48:09 +020012310 continue;
12311
12312 intel_update_crtc(crtc, state, old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012313 new_crtc_state);
Lyude896e5bb2016-08-24 07:48:09 +020012314 }
12315}
12316
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012317static void skl_update_crtcs(struct drm_atomic_state *state)
Lyude27082492016-08-24 07:48:10 +020012318{
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020012319 struct drm_i915_private *dev_priv = to_i915(state->dev);
Lyude27082492016-08-24 07:48:10 +020012320 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12321 struct drm_crtc *crtc;
Lyudece0ba282016-09-15 10:46:35 -040012322 struct intel_crtc *intel_crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012323 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Lyudece0ba282016-09-15 10:46:35 -040012324 struct intel_crtc_state *cstate;
Lyude27082492016-08-24 07:48:10 +020012325 unsigned int updated = 0;
12326 bool progress;
12327 enum pipe pipe;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012328 int i;
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012329 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12330 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012331
12332 const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12333
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012334 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012335 /* ignore allocations for crtc's that have been turned off. */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012336 if (new_crtc_state->active)
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012337 entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
Lyude27082492016-08-24 07:48:10 +020012338
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012339 /* If 2nd DBuf slice required, enable it here */
12340 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12341 icl_dbuf_slices_update(dev_priv, required_slices);
12342
Lyude27082492016-08-24 07:48:10 +020012343 /*
12344 * Whenever the number of active pipes changes, we need to make sure we
12345 * update the pipes in the right order so that their ddb allocations
12346 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12347 * cause pipe underruns and other bad stuff.
12348 */
12349 do {
Lyude27082492016-08-24 07:48:10 +020012350 progress = false;
12351
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012352 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Lyude27082492016-08-24 07:48:10 +020012353 bool vbl_wait = false;
12354 unsigned int cmask = drm_crtc_mask(crtc);
Lyudece0ba282016-09-15 10:46:35 -040012355
12356 intel_crtc = to_intel_crtc(crtc);
Ville Syrjälä21794812017-08-23 18:22:26 +030012357 cstate = to_intel_crtc_state(new_crtc_state);
Lyudece0ba282016-09-15 10:46:35 -040012358 pipe = intel_crtc->pipe;
Lyude27082492016-08-24 07:48:10 +020012359
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012360 if (updated & cmask || !cstate->base.active)
Lyude27082492016-08-24 07:48:10 +020012361 continue;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012362
Mika Kahola2b685042017-10-10 13:17:03 +030012363 if (skl_ddb_allocation_overlaps(dev_priv,
12364 entries,
12365 &cstate->wm.skl.ddb,
12366 i))
Lyude27082492016-08-24 07:48:10 +020012367 continue;
12368
12369 updated |= cmask;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012370 entries[i] = &cstate->wm.skl.ddb;
Lyude27082492016-08-24 07:48:10 +020012371
12372 /*
12373 * If this is an already active pipe, it's DDB changed,
12374 * and this isn't the last pipe that needs updating
12375 * then we need to wait for a vblank to pass for the
12376 * new ddb allocation to take effect.
12377 */
Lyudece0ba282016-09-15 10:46:35 -040012378 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
Maarten Lankhorst512b5522016-11-08 13:55:34 +010012379 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012380 !new_crtc_state->active_changed &&
Lyude27082492016-08-24 07:48:10 +020012381 intel_state->wm_results.dirty_pipes != updated)
12382 vbl_wait = true;
12383
12384 intel_update_crtc(crtc, state, old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012385 new_crtc_state);
Lyude27082492016-08-24 07:48:10 +020012386
12387 if (vbl_wait)
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020012388 intel_wait_for_vblank(dev_priv, pipe);
Lyude27082492016-08-24 07:48:10 +020012389
12390 progress = true;
12391 }
12392 } while (progress);
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012393
12394 /* If 2nd DBuf slice is no more required disable it */
12395 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12396 icl_dbuf_slices_update(dev_priv, required_slices);
Lyude27082492016-08-24 07:48:10 +020012397}
12398
Chris Wilsonba318c62017-02-02 20:47:41 +000012399static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12400{
12401 struct intel_atomic_state *state, *next;
12402 struct llist_node *freed;
12403
12404 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12405 llist_for_each_entry_safe(state, next, freed, freed)
12406 drm_atomic_state_put(&state->base);
12407}
12408
12409static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12410{
12411 struct drm_i915_private *dev_priv =
12412 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12413
12414 intel_atomic_helper_free_state(dev_priv);
12415}
12416
Daniel Vetter9db529a2017-08-08 10:08:28 +020012417static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12418{
12419 struct wait_queue_entry wait_fence, wait_reset;
12420 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12421
12422 init_wait_entry(&wait_fence, 0);
12423 init_wait_entry(&wait_reset, 0);
12424 for (;;) {
12425 prepare_to_wait(&intel_state->commit_ready.wait,
12426 &wait_fence, TASK_UNINTERRUPTIBLE);
12427 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12428 &wait_reset, TASK_UNINTERRUPTIBLE);
12429
12430
12431 if (i915_sw_fence_done(&intel_state->commit_ready)
12432 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12433 break;
12434
12435 schedule();
12436 }
12437 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12438 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12439}
12440
Daniel Vetter94f05022016-06-14 18:01:00 +020012441static void intel_atomic_commit_tail(struct drm_atomic_state *state)
Daniel Vettera6778b32012-07-02 09:56:42 +020012442{
Daniel Vetter94f05022016-06-14 18:01:00 +020012443 struct drm_device *dev = state->dev;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012444 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012445 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012446 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maarten Lankhorst7580d772015-08-18 13:40:06 +020012447 struct drm_crtc *crtc;
Daniel Vetter5a21b662016-05-24 17:13:53 +020012448 struct intel_crtc_state *intel_cstate;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +020012449 u64 put_domains[I915_MAX_PIPES] = {};
Chris Wilsone95433c2016-10-28 13:58:27 +010012450 int i;
Daniel Vettera6778b32012-07-02 09:56:42 +020012451
Daniel Vetter9db529a2017-08-08 10:08:28 +020012452 intel_atomic_commit_fence_wait(intel_state);
Daniel Vetter42b062b2017-08-08 10:08:27 +020012453
Daniel Vetterea0000f2016-06-13 16:13:46 +020012454 drm_atomic_helper_wait_for_dependencies(state);
12455
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012456 if (intel_state->modeset)
Daniel Vetter5a21b662016-05-24 17:13:53 +020012457 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012458
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012459 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maarten Lankhorsta5392052015-06-15 12:33:52 +020012460 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12461
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012462 if (needs_modeset(new_crtc_state) ||
12463 to_intel_crtc_state(new_crtc_state)->update_pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +020012464
12465 put_domains[to_intel_crtc(crtc)->pipe] =
12466 modeset_get_crtc_power_domains(crtc,
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012467 to_intel_crtc_state(new_crtc_state));
Daniel Vetter5a21b662016-05-24 17:13:53 +020012468 }
12469
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012470 if (!needs_modeset(new_crtc_state))
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012471 continue;
12472
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012473 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12474 to_intel_crtc_state(new_crtc_state));
Daniel Vetter460da9162013-03-27 00:44:51 +010012475
Ville Syrjälä29ceb0e2016-03-09 19:07:27 +020012476 if (old_crtc_state->active) {
12477 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
Maarten Lankhorst033b7a22018-03-08 13:02:02 +010012478
12479 /*
12480 * We need to disable pipe CRC before disabling the pipe,
12481 * or we race against vblank off.
12482 */
12483 intel_crtc_disable_pipe_crc(intel_crtc);
12484
Maarten Lankhorst4a806552016-08-09 17:04:01 +020012485 dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020012486 intel_crtc->active = false;
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -020012487 intel_fbc_disable(intel_crtc);
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020012488 intel_disable_shared_dpll(intel_crtc);
Ville Syrjälä9bbc8258a2015-11-20 22:09:20 +020012489
12490 /*
12491 * Underruns don't always raise
12492 * interrupts, so check manually.
12493 */
12494 intel_check_cpu_fifo_underruns(dev_priv);
12495 intel_check_pch_fifo_underruns(dev_priv);
Maarten Lankhorstb9001112015-11-19 16:07:16 +010012496
Ville Syrjälä21794812017-08-23 18:22:26 +030012497 if (!new_crtc_state->active) {
Maarten Lankhorste62929b2016-11-08 13:55:33 +010012498 /*
12499 * Make sure we don't call initial_watermarks
12500 * for ILK-style watermark updates.
Ville Syrjäläff32c542017-03-02 19:14:57 +020012501 *
12502 * No clue what this is supposed to achieve.
Maarten Lankhorste62929b2016-11-08 13:55:33 +010012503 */
Ville Syrjäläff32c542017-03-02 19:14:57 +020012504 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorste62929b2016-11-08 13:55:33 +010012505 dev_priv->display.initial_watermarks(intel_state,
Ville Syrjälä21794812017-08-23 18:22:26 +030012506 to_intel_crtc_state(new_crtc_state));
Maarten Lankhorste62929b2016-11-08 13:55:33 +010012507 }
Maarten Lankhorsta5392052015-06-15 12:33:52 +020012508 }
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010012509 }
Daniel Vetter7758a112012-07-08 19:40:39 +020012510
Daniel Vetter7a1530d72017-12-07 15:32:02 +010012511 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
12512 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
12513 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
Daniel Vetterea9d7582012-07-10 10:42:52 +020012514
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012515 if (intel_state->modeset) {
Maarten Lankhorst4740b0f2015-08-05 12:37:10 +020012516 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
Maarten Lankhorst33c8df892016-02-10 13:49:37 +010012517
Ville Syrjäläb0587e42017-01-26 21:52:01 +020012518 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
Maarten Lankhorstf6d19732016-03-23 14:58:07 +010012519
Lyude656d1b82016-08-17 15:55:54 -040012520 /*
12521 * SKL workaround: bspec recommends we disable the SAGV when we
12522 * have more then one pipe enabled
12523 */
Paulo Zanoni56feca92016-09-22 18:00:28 -030012524 if (!intel_can_enable_sagv(state))
Paulo Zanoni16dcdc42016-09-22 18:00:27 -030012525 intel_disable_sagv(dev_priv);
Lyude656d1b82016-08-17 15:55:54 -040012526
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012527 intel_modeset_verify_disabled(dev, state);
Maarten Lankhorst4740b0f2015-08-05 12:37:10 +020012528 }
Daniel Vetter47fab732012-10-26 10:58:18 +020012529
Lyude896e5bb2016-08-24 07:48:09 +020012530 /* Complete the events for pipes that have now been disabled */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012531 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12532 bool modeset = needs_modeset(new_crtc_state);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020012533
Daniel Vetter1f7528c2016-06-13 16:13:45 +020012534 /* Complete events for now disable pipes here. */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012535 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
Daniel Vetter1f7528c2016-06-13 16:13:45 +020012536 spin_lock_irq(&dev->event_lock);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012537 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
Daniel Vetter1f7528c2016-06-13 16:13:45 +020012538 spin_unlock_irq(&dev->event_lock);
12539
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012540 new_crtc_state->event = NULL;
Daniel Vetter1f7528c2016-06-13 16:13:45 +020012541 }
Matt Ropered4a6a72016-02-23 17:20:13 -080012542 }
12543
Lyude896e5bb2016-08-24 07:48:09 +020012544 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012545 dev_priv->display.update_crtcs(state);
Lyude896e5bb2016-08-24 07:48:09 +020012546
Daniel Vetter94f05022016-06-14 18:01:00 +020012547 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12548 * already, but still need the state for the delayed optimization. To
12549 * fix this:
12550 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12551 * - schedule that vblank worker _before_ calling hw_done
12552 * - at the start of commit_tail, cancel it _synchrously
12553 * - switch over to the vblank wait helper in the core after that since
12554 * we don't need out special handling any more.
12555 */
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012556 drm_atomic_helper_wait_for_flip_done(dev, state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012557
12558 /*
12559 * Now that the vblank has passed, we can go ahead and program the
12560 * optimal watermarks on platforms that need two-step watermark
12561 * programming.
12562 *
12563 * TODO: Move this (and other cleanup) to an async worker eventually.
12564 */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012565 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12566 intel_cstate = to_intel_crtc_state(new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012567
12568 if (dev_priv->display.optimize_watermarks)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010012569 dev_priv->display.optimize_watermarks(intel_state,
12570 intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012571 }
12572
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012573 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Daniel Vetter5a21b662016-05-24 17:13:53 +020012574 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
12575
12576 if (put_domains[i])
12577 modeset_put_power_domains(dev_priv, put_domains[i]);
12578
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012579 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012580 }
12581
Ville Syrjäläcff109f2017-11-17 21:19:17 +020012582 if (intel_state->modeset)
12583 intel_verify_planes(intel_state);
12584
Paulo Zanoni56feca92016-09-22 18:00:28 -030012585 if (intel_state->modeset && intel_can_enable_sagv(state))
Paulo Zanoni16dcdc42016-09-22 18:00:27 -030012586 intel_enable_sagv(dev_priv);
Lyude656d1b82016-08-17 15:55:54 -040012587
Daniel Vetter94f05022016-06-14 18:01:00 +020012588 drm_atomic_helper_commit_hw_done(state);
12589
Chris Wilsond5553c02017-05-04 12:55:08 +010012590 if (intel_state->modeset) {
12591 /* As one of the primary mmio accessors, KMS has a high
12592 * likelihood of triggering bugs in unclaimed access. After we
12593 * finish modesetting, see if an error has been flagged, and if
12594 * so enable debugging for the next modeset - and hope we catch
12595 * the culprit.
12596 */
12597 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012598 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
Chris Wilsond5553c02017-05-04 12:55:08 +010012599 }
Daniel Vetter5a21b662016-05-24 17:13:53 +020012600
Daniel Vetter5a21b662016-05-24 17:13:53 +020012601 drm_atomic_helper_cleanup_planes(dev, state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012602
Daniel Vetterea0000f2016-06-13 16:13:46 +020012603 drm_atomic_helper_commit_cleanup_done(state);
12604
Chris Wilson08536952016-10-14 13:18:18 +010012605 drm_atomic_state_put(state);
Jesse Barnes7f27126e2014-11-05 14:26:06 -080012606
Chris Wilsonba318c62017-02-02 20:47:41 +000012607 intel_atomic_helper_free_state(dev_priv);
Daniel Vetter94f05022016-06-14 18:01:00 +020012608}
12609
12610static void intel_atomic_commit_work(struct work_struct *work)
12611{
Chris Wilsonc004a902016-10-28 13:58:45 +010012612 struct drm_atomic_state *state =
12613 container_of(work, struct drm_atomic_state, commit_work);
12614
Daniel Vetter94f05022016-06-14 18:01:00 +020012615 intel_atomic_commit_tail(state);
12616}
12617
Chris Wilsonc004a902016-10-28 13:58:45 +010012618static int __i915_sw_fence_call
12619intel_atomic_commit_ready(struct i915_sw_fence *fence,
12620 enum i915_sw_fence_notify notify)
12621{
12622 struct intel_atomic_state *state =
12623 container_of(fence, struct intel_atomic_state, commit_ready);
12624
12625 switch (notify) {
12626 case FENCE_COMPLETE:
Daniel Vetter42b062b2017-08-08 10:08:27 +020012627 /* we do blocking waits in the worker, nothing to do here */
Chris Wilsonc004a902016-10-28 13:58:45 +010012628 break;
Chris Wilsonc004a902016-10-28 13:58:45 +010012629 case FENCE_FREE:
Chris Wilsoneb955ee2017-01-23 21:29:39 +000012630 {
12631 struct intel_atomic_helper *helper =
12632 &to_i915(state->base.dev)->atomic_helper;
12633
12634 if (llist_add(&state->freed, &helper->free_list))
12635 schedule_work(&helper->free_work);
12636 break;
12637 }
Chris Wilsonc004a902016-10-28 13:58:45 +010012638 }
12639
12640 return NOTIFY_DONE;
12641}
12642
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012643static void intel_atomic_track_fbs(struct drm_atomic_state *state)
12644{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012645 struct drm_plane_state *old_plane_state, *new_plane_state;
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012646 struct drm_plane *plane;
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012647 int i;
12648
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012649 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
Chris Wilsonfaf5bf02016-08-04 16:32:37 +010012650 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012651 intel_fb_obj(new_plane_state->fb),
Chris Wilsonfaf5bf02016-08-04 16:32:37 +010012652 to_intel_plane(plane)->frontbuffer_bit);
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012653}
12654
Daniel Vetter94f05022016-06-14 18:01:00 +020012655/**
12656 * intel_atomic_commit - commit validated state object
12657 * @dev: DRM device
12658 * @state: the top-level driver state object
12659 * @nonblock: nonblocking commit
12660 *
12661 * This function commits a top-level state object that has been validated
12662 * with drm_atomic_helper_check().
12663 *
Daniel Vetter94f05022016-06-14 18:01:00 +020012664 * RETURNS
12665 * Zero for success or -errno.
12666 */
12667static int intel_atomic_commit(struct drm_device *dev,
12668 struct drm_atomic_state *state,
12669 bool nonblock)
12670{
12671 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012672 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter94f05022016-06-14 18:01:00 +020012673 int ret = 0;
12674
Chris Wilsonc004a902016-10-28 13:58:45 +010012675 drm_atomic_state_get(state);
12676 i915_sw_fence_init(&intel_state->commit_ready,
12677 intel_atomic_commit_ready);
Daniel Vetter94f05022016-06-14 18:01:00 +020012678
Ville Syrjälä440df932017-03-29 17:21:23 +030012679 /*
12680 * The intel_legacy_cursor_update() fast path takes care
12681 * of avoiding the vblank waits for simple cursor
12682 * movement and flips. For cursor on/off and size changes,
12683 * we want to perform the vblank waits so that watermark
12684 * updates happen during the correct frames. Gen9+ have
12685 * double buffered watermarks and so shouldn't need this.
12686 *
Maarten Lankhorst3cf50c62017-09-19 14:14:18 +020012687 * Unset state->legacy_cursor_update before the call to
12688 * drm_atomic_helper_setup_commit() because otherwise
12689 * drm_atomic_helper_wait_for_flip_done() is a noop and
12690 * we get FIFO underruns because we didn't wait
12691 * for vblank.
Ville Syrjälä440df932017-03-29 17:21:23 +030012692 *
12693 * FIXME doing watermarks and fb cleanup from a vblank worker
12694 * (assuming we had any) would solve these problems.
12695 */
Maarten Lankhorst213f1bd2017-09-19 14:14:19 +020012696 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
12697 struct intel_crtc_state *new_crtc_state;
12698 struct intel_crtc *crtc;
12699 int i;
12700
12701 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
12702 if (new_crtc_state->wm.need_postvbl_update ||
12703 new_crtc_state->update_wm_post)
12704 state->legacy_cursor_update = false;
12705 }
Ville Syrjälä440df932017-03-29 17:21:23 +030012706
Maarten Lankhorst3cf50c62017-09-19 14:14:18 +020012707 ret = intel_atomic_prepare_commit(dev, state);
12708 if (ret) {
12709 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
12710 i915_sw_fence_commit(&intel_state->commit_ready);
12711 return ret;
12712 }
12713
12714 ret = drm_atomic_helper_setup_commit(state, nonblock);
12715 if (!ret)
12716 ret = drm_atomic_helper_swap_state(state, true);
12717
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020012718 if (ret) {
12719 i915_sw_fence_commit(&intel_state->commit_ready);
12720
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020012721 drm_atomic_helper_cleanup_planes(dev, state);
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020012722 return ret;
12723 }
Daniel Vetter94f05022016-06-14 18:01:00 +020012724 dev_priv->wm.distrust_bios_wm = false;
Ander Conselvan de Oliveira3c0fb582016-12-29 17:22:08 +020012725 intel_shared_dpll_swap_state(state);
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012726 intel_atomic_track_fbs(state);
Daniel Vetter94f05022016-06-14 18:01:00 +020012727
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012728 if (intel_state->modeset) {
Ville Syrjäläd305e062017-08-30 21:57:03 +030012729 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
12730 sizeof(intel_state->min_cdclk));
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030012731 memcpy(dev_priv->min_voltage_level,
12732 intel_state->min_voltage_level,
12733 sizeof(intel_state->min_voltage_level));
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012734 dev_priv->active_crtcs = intel_state->active_crtcs;
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012735 dev_priv->cdclk.logical = intel_state->cdclk.logical;
12736 dev_priv->cdclk.actual = intel_state->cdclk.actual;
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012737 }
12738
Chris Wilson08536952016-10-14 13:18:18 +010012739 drm_atomic_state_get(state);
Daniel Vetter42b062b2017-08-08 10:08:27 +020012740 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
Chris Wilsonc004a902016-10-28 13:58:45 +010012741
12742 i915_sw_fence_commit(&intel_state->commit_ready);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020012743 if (nonblock && intel_state->modeset) {
12744 queue_work(dev_priv->modeset_wq, &state->commit_work);
12745 } else if (nonblock) {
Daniel Vetter42b062b2017-08-08 10:08:27 +020012746 queue_work(system_unbound_wq, &state->commit_work);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020012747 } else {
12748 if (intel_state->modeset)
12749 flush_workqueue(dev_priv->modeset_wq);
Daniel Vetter94f05022016-06-14 18:01:00 +020012750 intel_atomic_commit_tail(state);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020012751 }
Mika Kuoppala75714942015-12-16 09:26:48 +020012752
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012753 return 0;
Daniel Vetterf30da182013-04-11 20:22:50 +020012754}
12755
Chris Wilsonf6e5b162011-04-12 18:06:51 +010012756static const struct drm_crtc_funcs intel_crtc_funcs = {
Daniel Vetter3fab2f02017-04-03 10:32:57 +020012757 .gamma_set = drm_atomic_helper_legacy_gamma_set,
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012758 .set_config = drm_atomic_helper_set_config,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010012759 .destroy = intel_crtc_destroy,
Maarten Lankhorst4c01ded2016-12-22 11:33:23 +010012760 .page_flip = drm_atomic_helper_page_flip,
Matt Roper13568372015-01-21 16:35:47 -080012761 .atomic_duplicate_state = intel_crtc_duplicate_state,
12762 .atomic_destroy_state = intel_crtc_destroy_state,
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +010012763 .set_crc_source = intel_crtc_set_crc_source,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010012764};
12765
Chris Wilson74d290f2017-08-17 13:37:06 +010012766struct wait_rps_boost {
12767 struct wait_queue_entry wait;
12768
12769 struct drm_crtc *crtc;
Chris Wilsone61e0f52018-02-21 09:56:36 +000012770 struct i915_request *request;
Chris Wilson74d290f2017-08-17 13:37:06 +010012771};
12772
12773static int do_rps_boost(struct wait_queue_entry *_wait,
12774 unsigned mode, int sync, void *key)
12775{
12776 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
Chris Wilsone61e0f52018-02-21 09:56:36 +000012777 struct i915_request *rq = wait->request;
Chris Wilson74d290f2017-08-17 13:37:06 +010012778
Chris Wilsone9af4ea2018-01-18 13:16:09 +000012779 /*
12780 * If we missed the vblank, but the request is already running it
12781 * is reasonable to assume that it will complete before the next
12782 * vblank without our intervention, so leave RPS alone.
12783 */
Chris Wilsone61e0f52018-02-21 09:56:36 +000012784 if (!i915_request_started(rq))
Chris Wilsone9af4ea2018-01-18 13:16:09 +000012785 gen6_rps_boost(rq, NULL);
Chris Wilsone61e0f52018-02-21 09:56:36 +000012786 i915_request_put(rq);
Chris Wilson74d290f2017-08-17 13:37:06 +010012787
12788 drm_crtc_vblank_put(wait->crtc);
12789
12790 list_del(&wait->wait.entry);
12791 kfree(wait);
12792 return 1;
12793}
12794
12795static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
12796 struct dma_fence *fence)
12797{
12798 struct wait_rps_boost *wait;
12799
12800 if (!dma_fence_is_i915(fence))
12801 return;
12802
12803 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
12804 return;
12805
12806 if (drm_crtc_vblank_get(crtc))
12807 return;
12808
12809 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
12810 if (!wait) {
12811 drm_crtc_vblank_put(crtc);
12812 return;
12813 }
12814
12815 wait->request = to_request(dma_fence_get(fence));
12816 wait->crtc = crtc;
12817
12818 wait->wait.func = do_rps_boost;
12819 wait->wait.flags = 0;
12820
12821 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
12822}
12823
Ville Syrjäläef1a1912018-02-21 18:02:34 +020012824static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
12825{
12826 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
12827 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12828 struct drm_framebuffer *fb = plane_state->base.fb;
12829 struct i915_vma *vma;
12830
12831 if (plane->id == PLANE_CURSOR &&
12832 INTEL_INFO(dev_priv)->cursor_needs_physical) {
12833 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12834 const int align = intel_cursor_alignment(dev_priv);
12835
12836 return i915_gem_object_attach_phys(obj, align);
12837 }
12838
12839 vma = intel_pin_and_fence_fb_obj(fb,
12840 plane_state->base.rotation,
12841 intel_plane_uses_fence(plane_state),
12842 &plane_state->flags);
12843 if (IS_ERR(vma))
12844 return PTR_ERR(vma);
12845
12846 plane_state->vma = vma;
12847
12848 return 0;
12849}
12850
12851static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
12852{
12853 struct i915_vma *vma;
12854
12855 vma = fetch_and_zero(&old_plane_state->vma);
12856 if (vma)
12857 intel_unpin_fb_vma(vma, old_plane_state->flags);
12858}
12859
Chris Wilsonb7268c52018-04-18 19:40:52 +010012860static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
12861{
12862 struct i915_sched_attr attr = {
12863 .priority = I915_PRIORITY_DISPLAY,
12864 };
12865
12866 i915_gem_object_wait_priority(obj, 0, &attr);
12867}
12868
Matt Roper6beb8c232014-12-01 15:40:14 -080012869/**
12870 * intel_prepare_plane_fb - Prepare fb for usage on plane
12871 * @plane: drm plane to prepare for
Chris Wilsonc38c1452018-02-14 13:49:22 +000012872 * @new_state: the plane state being prepared
Matt Roper6beb8c232014-12-01 15:40:14 -080012873 *
12874 * Prepares a framebuffer for usage on a display plane. Generally this
12875 * involves pinning the underlying object and updating the frontbuffer tracking
12876 * bits. Some older platforms need special physical address handling for
12877 * cursor planes.
12878 *
Maarten Lankhorstf9356752015-08-18 13:40:05 +020012879 * Must be called with struct_mutex held.
12880 *
Matt Roper6beb8c232014-12-01 15:40:14 -080012881 * Returns 0 on success, negative error code on failure.
12882 */
12883int
12884intel_prepare_plane_fb(struct drm_plane *plane,
Chris Wilson18320402016-08-18 19:00:16 +010012885 struct drm_plane_state *new_state)
Matt Roper465c1202014-05-29 08:06:54 -070012886{
Chris Wilsonc004a902016-10-28 13:58:45 +010012887 struct intel_atomic_state *intel_state =
12888 to_intel_atomic_state(new_state->state);
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000012889 struct drm_i915_private *dev_priv = to_i915(plane->dev);
Maarten Lankhorst844f9112015-09-02 10:42:40 +020012890 struct drm_framebuffer *fb = new_state->fb;
Matt Roper6beb8c232014-12-01 15:40:14 -080012891 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Maarten Lankhorst1ee49392015-09-23 13:27:08 +020012892 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
Chris Wilsonc004a902016-10-28 13:58:45 +010012893 int ret;
Matt Roper465c1202014-05-29 08:06:54 -070012894
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012895 if (old_obj) {
12896 struct drm_crtc_state *crtc_state =
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012897 drm_atomic_get_new_crtc_state(new_state->state,
12898 plane->state->crtc);
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012899
12900 /* Big Hammer, we also need to ensure that any pending
12901 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
12902 * current scanout is retired before unpinning the old
12903 * framebuffer. Note that we rely on userspace rendering
12904 * into the buffer attached to the pipe they are waiting
12905 * on. If not, userspace generates a GPU hang with IPEHR
12906 * point to the MI_WAIT_FOR_EVENT.
12907 *
12908 * This should only fail upon a hung GPU, in which case we
12909 * can safely continue.
12910 */
Chris Wilsonc004a902016-10-28 13:58:45 +010012911 if (needs_modeset(crtc_state)) {
12912 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
12913 old_obj->resv, NULL,
12914 false, 0,
12915 GFP_KERNEL);
12916 if (ret < 0)
12917 return ret;
Chris Wilsonf4457ae2016-04-13 17:35:08 +010012918 }
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012919 }
12920
Chris Wilsonc004a902016-10-28 13:58:45 +010012921 if (new_state->fence) { /* explicit fencing */
12922 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
12923 new_state->fence,
12924 I915_FENCE_TIMEOUT,
12925 GFP_KERNEL);
12926 if (ret < 0)
12927 return ret;
12928 }
12929
Chris Wilsonc37efb92016-06-17 08:28:47 +010012930 if (!obj)
12931 return 0;
12932
Chris Wilson4d3088c2017-07-26 17:00:38 +010012933 ret = i915_gem_object_pin_pages(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010012934 if (ret)
12935 return ret;
12936
Chris Wilson4d3088c2017-07-26 17:00:38 +010012937 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
12938 if (ret) {
12939 i915_gem_object_unpin_pages(obj);
12940 return ret;
12941 }
12942
Ville Syrjäläef1a1912018-02-21 18:02:34 +020012943 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
Chris Wilsonfd700752017-07-26 17:00:36 +010012944
Chris Wilsonb7268c52018-04-18 19:40:52 +010012945 fb_obj_bump_render_priority(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010012946
12947 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson4d3088c2017-07-26 17:00:38 +010012948 i915_gem_object_unpin_pages(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010012949 if (ret)
12950 return ret;
12951
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -080012952 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
12953
Chris Wilsonc004a902016-10-28 13:58:45 +010012954 if (!new_state->fence) { /* implicit fencing */
Chris Wilson74d290f2017-08-17 13:37:06 +010012955 struct dma_fence *fence;
12956
Chris Wilsonc004a902016-10-28 13:58:45 +010012957 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
12958 obj->resv, NULL,
12959 false, I915_FENCE_TIMEOUT,
12960 GFP_KERNEL);
12961 if (ret < 0)
12962 return ret;
Chris Wilson74d290f2017-08-17 13:37:06 +010012963
12964 fence = reservation_object_get_excl_rcu(obj->resv);
12965 if (fence) {
12966 add_rps_boost_after_vblank(new_state->crtc, fence);
12967 dma_fence_put(fence);
12968 }
12969 } else {
12970 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
Chris Wilsonc004a902016-10-28 13:58:45 +010012971 }
Daniel Vetter5a21b662016-05-24 17:13:53 +020012972
Chris Wilsond07f0e52016-10-28 13:58:44 +010012973 return 0;
Matt Roper6beb8c232014-12-01 15:40:14 -080012974}
12975
Matt Roper38f3ce32014-12-02 07:45:25 -080012976/**
12977 * intel_cleanup_plane_fb - Cleans up an fb after plane use
12978 * @plane: drm plane to clean up for
Chris Wilsonc38c1452018-02-14 13:49:22 +000012979 * @old_state: the state from the previous modeset
Matt Roper38f3ce32014-12-02 07:45:25 -080012980 *
12981 * Cleans up a framebuffer that has just been removed from a plane.
Maarten Lankhorstf9356752015-08-18 13:40:05 +020012982 *
12983 * Must be called with struct_mutex held.
Matt Roper38f3ce32014-12-02 07:45:25 -080012984 */
12985void
12986intel_cleanup_plane_fb(struct drm_plane *plane,
Chris Wilson18320402016-08-18 19:00:16 +010012987 struct drm_plane_state *old_state)
Matt Roper38f3ce32014-12-02 07:45:25 -080012988{
Ville Syrjäläef1a1912018-02-21 18:02:34 +020012989 struct drm_i915_private *dev_priv = to_i915(plane->dev);
Matt Roper38f3ce32014-12-02 07:45:25 -080012990
Chris Wilsonbe1e3412017-01-16 15:21:27 +000012991 /* Should only be called after a successful intel_prepare_plane_fb()! */
Ville Syrjäläef1a1912018-02-21 18:02:34 +020012992 mutex_lock(&dev_priv->drm.struct_mutex);
12993 intel_plane_unpin_fb(to_intel_plane_state(old_state));
12994 mutex_unlock(&dev_priv->drm.struct_mutex);
Matt Roper465c1202014-05-29 08:06:54 -070012995}
12996
Chandra Konduru6156a452015-04-27 13:48:39 -070012997int
Chandra Konduru77224cd2018-04-09 09:11:13 +053012998skl_max_scale(struct intel_crtc *intel_crtc,
12999 struct intel_crtc_state *crtc_state,
13000 uint32_t pixel_format)
Chandra Konduru6156a452015-04-27 13:48:39 -070013001{
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013002 struct drm_i915_private *dev_priv;
Chandra Konduru77224cd2018-04-09 09:11:13 +053013003 int max_scale, mult;
13004 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
Chandra Konduru6156a452015-04-27 13:48:39 -070013005
Maarten Lankhorstbf8a0af2015-11-24 11:29:02 +010013006 if (!intel_crtc || !crtc_state->base.enable)
Chandra Konduru6156a452015-04-27 13:48:39 -070013007 return DRM_PLANE_HELPER_NO_SCALING;
13008
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013009 dev_priv = to_i915(intel_crtc->base.dev);
Chandra Konduru6156a452015-04-27 13:48:39 -070013010
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013011 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13012 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13013
Rodrigo Vivi43037c82017-10-03 15:31:42 -070013014 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013015 max_dotclk *= 2;
13016
13017 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
Chandra Konduru6156a452015-04-27 13:48:39 -070013018 return DRM_PLANE_HELPER_NO_SCALING;
13019
13020 /*
13021 * skl max scale is lower of:
13022 * close to 3 but not 3, -1 is for that purpose
13023 * or
13024 * cdclk/crtc_clock
13025 */
Chandra Konduru77224cd2018-04-09 09:11:13 +053013026 mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13027 tmpclk1 = (1 << 16) * mult - 1;
13028 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13029 max_scale = min(tmpclk1, tmpclk2);
Chandra Konduru6156a452015-04-27 13:48:39 -070013030
13031 return max_scale;
13032}
13033
Matt Roper465c1202014-05-29 08:06:54 -070013034static int
Ville Syrjälä282dbf92017-03-27 21:55:33 +030013035intel_check_primary_plane(struct intel_plane *plane,
Maarten Lankhorst061e4b82015-06-15 12:33:46 +020013036 struct intel_crtc_state *crtc_state,
Gustavo Padovan3c692a42014-09-05 17:04:49 -030013037 struct intel_plane_state *state)
Matt Roper465c1202014-05-29 08:06:54 -070013038{
Ville Syrjälä282dbf92017-03-27 21:55:33 +030013039 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Matt Roper2b875c22014-12-01 15:40:13 -080013040 struct drm_crtc *crtc = state->base.crtc;
Chandra Konduru6156a452015-04-27 13:48:39 -070013041 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
Maarten Lankhorst061e4b82015-06-15 12:33:46 +020013042 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13043 bool can_position = false;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +020013044 int ret;
Chandra Konduru77224cd2018-04-09 09:11:13 +053013045 uint32_t pixel_format = 0;
Gustavo Padovan3c692a42014-09-05 17:04:49 -030013046
Ville Syrjäläb63a16f2016-01-28 16:53:54 +020013047 if (INTEL_GEN(dev_priv) >= 9) {
Ville Syrjälä693bdc22016-01-15 20:46:53 +020013048 /* use scaler when colorkey is not required */
Ville Syrjälä6ec5bd32018-02-02 22:42:31 +020013049 if (!state->ckey.flags) {
Ville Syrjälä693bdc22016-01-15 20:46:53 +020013050 min_scale = 1;
Chandra Konduru77224cd2018-04-09 09:11:13 +053013051 if (state->base.fb)
13052 pixel_format = state->base.fb->format->format;
13053 max_scale = skl_max_scale(to_intel_crtc(crtc),
13054 crtc_state, pixel_format);
Ville Syrjälä693bdc22016-01-15 20:46:53 +020013055 }
Sonika Jindald8106362015-04-10 14:37:28 +053013056 can_position = true;
Chandra Konduru6156a452015-04-27 13:48:39 -070013057 }
Sonika Jindald8106362015-04-10 14:37:28 +053013058
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +020013059 ret = drm_atomic_helper_check_plane_state(&state->base,
13060 &crtc_state->base,
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +020013061 min_scale, max_scale,
13062 can_position, true);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +020013063 if (ret)
13064 return ret;
13065
Daniel Vettercc926382016-08-15 10:41:47 +020013066 if (!state->base.fb)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +020013067 return 0;
13068
13069 if (INTEL_GEN(dev_priv) >= 9) {
Imre Deakc322c642018-01-16 13:24:14 +020013070 ret = skl_check_plane_surface(crtc_state, state);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +020013071 if (ret)
13072 return ret;
Ville Syrjäläa0864d52017-03-23 21:27:09 +020013073
13074 state->ctl = skl_plane_ctl(crtc_state, state);
13075 } else {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +020013076 ret = i9xx_check_plane_surface(state);
13077 if (ret)
13078 return ret;
13079
Ville Syrjäläa0864d52017-03-23 21:27:09 +020013080 state->ctl = i9xx_plane_ctl(crtc_state, state);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +020013081 }
13082
James Ausmus4036c782017-11-13 10:11:28 -080013083 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
13084 state->color_ctl = glk_plane_color_ctl(crtc_state, state);
13085
Ville Syrjäläb63a16f2016-01-28 16:53:54 +020013086 return 0;
Matt Roper465c1202014-05-29 08:06:54 -070013087}
13088
Daniel Vetter5a21b662016-05-24 17:13:53 +020013089static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13090 struct drm_crtc_state *old_crtc_state)
13091{
13092 struct drm_device *dev = crtc->dev;
Lyude62e0fb82016-08-22 12:50:08 -040013093 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013095 struct intel_crtc_state *old_intel_cstate =
Daniel Vetter5a21b662016-05-24 17:13:53 +020013096 to_intel_crtc_state(old_crtc_state);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013097 struct intel_atomic_state *old_intel_state =
13098 to_intel_atomic_state(old_crtc_state->state);
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013099 struct intel_crtc_state *intel_cstate =
13100 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13101 bool modeset = needs_modeset(&intel_cstate->base);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013102
Maarten Lankhorst567f0792017-02-28 15:28:47 +010013103 if (!modeset &&
13104 (intel_cstate->base.color_mgmt_changed ||
13105 intel_cstate->update_pipe)) {
Ville Syrjälä5c857e62017-08-23 18:22:20 +030013106 intel_color_set_csc(&intel_cstate->base);
13107 intel_color_load_luts(&intel_cstate->base);
Maarten Lankhorst567f0792017-02-28 15:28:47 +010013108 }
13109
Daniel Vetter5a21b662016-05-24 17:13:53 +020013110 /* Perform vblank evasion around commit operation */
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013111 intel_pipe_update_start(intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013112
13113 if (modeset)
Maarten Lankhorste62929b2016-11-08 13:55:33 +010013114 goto out;
Daniel Vetter5a21b662016-05-24 17:13:53 +020013115
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013116 if (intel_cstate->update_pipe)
Ville Syrjälä1a15b772017-08-23 18:22:25 +030013117 intel_update_pipe_config(old_intel_cstate, intel_cstate);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013118 else if (INTEL_GEN(dev_priv) >= 9)
Daniel Vetter5a21b662016-05-24 17:13:53 +020013119 skl_detach_scalers(intel_crtc);
Lyude62e0fb82016-08-22 12:50:08 -040013120
Maarten Lankhorste62929b2016-11-08 13:55:33 +010013121out:
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013122 if (dev_priv->display.atomic_update_watermarks)
13123 dev_priv->display.atomic_update_watermarks(old_intel_state,
13124 intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013125}
13126
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013127void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13128 struct intel_crtc_state *crtc_state)
13129{
13130 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13131
13132 if (!IS_GEN2(dev_priv))
13133 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13134
13135 if (crtc_state->has_pch_encoder) {
13136 enum pipe pch_transcoder =
13137 intel_crtc_pch_transcoder(crtc);
13138
13139 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13140 }
13141}
13142
Daniel Vetter5a21b662016-05-24 17:13:53 +020013143static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13144 struct drm_crtc_state *old_crtc_state)
13145{
13146 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013147 struct intel_atomic_state *old_intel_state =
13148 to_intel_atomic_state(old_crtc_state->state);
13149 struct intel_crtc_state *new_crtc_state =
13150 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013151
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013152 intel_pipe_update_end(new_crtc_state);
Maarten Lankhorst33a49862017-11-13 15:40:43 +010013153
13154 if (new_crtc_state->update_pipe &&
13155 !needs_modeset(&new_crtc_state->base) &&
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013156 old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13157 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013158}
13159
Matt Ropercf4c7c12014-12-04 10:27:42 -080013160/**
Matt Roper4a3b8762014-12-23 10:41:51 -080013161 * intel_plane_destroy - destroy a plane
13162 * @plane: plane to destroy
Matt Ropercf4c7c12014-12-04 10:27:42 -080013163 *
Matt Roper4a3b8762014-12-23 10:41:51 -080013164 * Common destruction function for all types of planes (primary, cursor,
13165 * sprite).
Matt Ropercf4c7c12014-12-04 10:27:42 -080013166 */
Matt Roper4a3b8762014-12-23 10:41:51 -080013167void intel_plane_destroy(struct drm_plane *plane)
Matt Roper465c1202014-05-29 08:06:54 -070013168{
Matt Roper465c1202014-05-29 08:06:54 -070013169 drm_plane_cleanup(plane);
Ville Syrjälä69ae5612016-05-27 20:59:22 +030013170 kfree(to_intel_plane(plane));
Matt Roper465c1202014-05-29 08:06:54 -070013171}
13172
Ben Widawsky714244e2017-08-01 09:58:16 -070013173static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
13174{
13175 switch (format) {
13176 case DRM_FORMAT_C8:
13177 case DRM_FORMAT_RGB565:
13178 case DRM_FORMAT_XRGB1555:
13179 case DRM_FORMAT_XRGB8888:
13180 return modifier == DRM_FORMAT_MOD_LINEAR ||
13181 modifier == I915_FORMAT_MOD_X_TILED;
13182 default:
13183 return false;
13184 }
13185}
13186
13187static bool i965_mod_supported(uint32_t format, uint64_t modifier)
13188{
13189 switch (format) {
13190 case DRM_FORMAT_C8:
13191 case DRM_FORMAT_RGB565:
13192 case DRM_FORMAT_XRGB8888:
13193 case DRM_FORMAT_XBGR8888:
13194 case DRM_FORMAT_XRGB2101010:
13195 case DRM_FORMAT_XBGR2101010:
13196 return modifier == DRM_FORMAT_MOD_LINEAR ||
13197 modifier == I915_FORMAT_MOD_X_TILED;
13198 default:
13199 return false;
13200 }
13201}
13202
13203static bool skl_mod_supported(uint32_t format, uint64_t modifier)
13204{
13205 switch (format) {
13206 case DRM_FORMAT_XRGB8888:
13207 case DRM_FORMAT_XBGR8888:
13208 case DRM_FORMAT_ARGB8888:
13209 case DRM_FORMAT_ABGR8888:
13210 if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
13211 modifier == I915_FORMAT_MOD_Y_TILED_CCS)
13212 return true;
13213 /* fall through */
13214 case DRM_FORMAT_RGB565:
13215 case DRM_FORMAT_XRGB2101010:
13216 case DRM_FORMAT_XBGR2101010:
13217 case DRM_FORMAT_YUYV:
13218 case DRM_FORMAT_YVYU:
13219 case DRM_FORMAT_UYVY:
13220 case DRM_FORMAT_VYUY:
13221 if (modifier == I915_FORMAT_MOD_Yf_TILED)
13222 return true;
13223 /* fall through */
13224 case DRM_FORMAT_C8:
13225 if (modifier == DRM_FORMAT_MOD_LINEAR ||
13226 modifier == I915_FORMAT_MOD_X_TILED ||
13227 modifier == I915_FORMAT_MOD_Y_TILED)
13228 return true;
13229 /* fall through */
13230 default:
13231 return false;
13232 }
13233}
13234
13235static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
13236 uint32_t format,
13237 uint64_t modifier)
13238{
13239 struct drm_i915_private *dev_priv = to_i915(plane->dev);
13240
13241 if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
13242 return false;
13243
13244 if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
13245 modifier != DRM_FORMAT_MOD_LINEAR)
13246 return false;
13247
13248 if (INTEL_GEN(dev_priv) >= 9)
13249 return skl_mod_supported(format, modifier);
13250 else if (INTEL_GEN(dev_priv) >= 4)
13251 return i965_mod_supported(format, modifier);
13252 else
13253 return i8xx_mod_supported(format, modifier);
Ben Widawsky714244e2017-08-01 09:58:16 -070013254}
13255
13256static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
13257 uint32_t format,
13258 uint64_t modifier)
13259{
13260 if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
13261 return false;
13262
13263 return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
13264}
13265
13266static struct drm_plane_funcs intel_plane_funcs = {
Matt Roper70a101f2015-04-08 18:56:53 -070013267 .update_plane = drm_atomic_helper_update_plane,
13268 .disable_plane = drm_atomic_helper_disable_plane,
Matt Roper3d7d6512014-06-10 08:28:13 -070013269 .destroy = intel_plane_destroy,
Matt Ropera98b3432015-01-21 16:35:43 -080013270 .atomic_get_property = intel_plane_atomic_get_property,
13271 .atomic_set_property = intel_plane_atomic_set_property,
Matt Roperea2c67b2014-12-23 10:41:52 -080013272 .atomic_duplicate_state = intel_plane_duplicate_state,
13273 .atomic_destroy_state = intel_plane_destroy_state,
Ben Widawsky714244e2017-08-01 09:58:16 -070013274 .format_mod_supported = intel_primary_plane_format_mod_supported,
Matt Roper465c1202014-05-29 08:06:54 -070013275};
13276
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013277static int
13278intel_legacy_cursor_update(struct drm_plane *plane,
13279 struct drm_crtc *crtc,
13280 struct drm_framebuffer *fb,
13281 int crtc_x, int crtc_y,
13282 unsigned int crtc_w, unsigned int crtc_h,
13283 uint32_t src_x, uint32_t src_y,
Daniel Vetter34a2ab52017-03-22 22:50:41 +010013284 uint32_t src_w, uint32_t src_h,
13285 struct drm_modeset_acquire_ctx *ctx)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013286{
13287 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13288 int ret;
13289 struct drm_plane_state *old_plane_state, *new_plane_state;
13290 struct intel_plane *intel_plane = to_intel_plane(plane);
13291 struct drm_framebuffer *old_fb;
13292 struct drm_crtc_state *crtc_state = crtc->state;
13293
13294 /*
13295 * When crtc is inactive or there is a modeset pending,
13296 * wait for it to complete in the slowpath
13297 */
13298 if (!crtc_state->active || needs_modeset(crtc_state) ||
13299 to_intel_crtc_state(crtc_state)->update_pipe)
13300 goto slow;
13301
13302 old_plane_state = plane->state;
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013303 /*
13304 * Don't do an async update if there is an outstanding commit modifying
13305 * the plane. This prevents our async update's changes from getting
13306 * overridden by a previous synchronous update's state.
13307 */
13308 if (old_plane_state->commit &&
13309 !try_wait_for_completion(&old_plane_state->commit->hw_done))
13310 goto slow;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013311
13312 /*
13313 * If any parameters change that may affect watermarks,
13314 * take the slowpath. Only changing fb or position should be
13315 * in the fastpath.
13316 */
13317 if (old_plane_state->crtc != crtc ||
13318 old_plane_state->src_w != src_w ||
13319 old_plane_state->src_h != src_h ||
13320 old_plane_state->crtc_w != crtc_w ||
13321 old_plane_state->crtc_h != crtc_h ||
Ville Syrjäläa5509ab2017-02-17 17:01:59 +020013322 !old_plane_state->fb != !fb)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013323 goto slow;
13324
13325 new_plane_state = intel_plane_duplicate_state(plane);
13326 if (!new_plane_state)
13327 return -ENOMEM;
13328
13329 drm_atomic_set_fb_for_plane(new_plane_state, fb);
13330
13331 new_plane_state->src_x = src_x;
13332 new_plane_state->src_y = src_y;
13333 new_plane_state->src_w = src_w;
13334 new_plane_state->src_h = src_h;
13335 new_plane_state->crtc_x = crtc_x;
13336 new_plane_state->crtc_y = crtc_y;
13337 new_plane_state->crtc_w = crtc_w;
13338 new_plane_state->crtc_h = crtc_h;
13339
13340 ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
Ville Syrjäläb2b55502017-08-23 18:22:23 +030013341 to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
13342 to_intel_plane_state(plane->state),
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013343 to_intel_plane_state(new_plane_state));
13344 if (ret)
13345 goto out_free;
13346
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013347 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13348 if (ret)
13349 goto out_free;
13350
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013351 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13352 if (ret)
13353 goto out_unlock;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013354
Dhinakaran Pandiyana694e222018-03-06 19:34:19 -080013355 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013356
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -080013357 old_fb = old_plane_state->fb;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013358 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13359 intel_plane->frontbuffer_bit);
13360
13361 /* Swap plane state */
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013362 plane->state = new_plane_state;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013363
Ville Syrjälä72259532017-03-02 19:15:05 +020013364 if (plane->state->visible) {
13365 trace_intel_update_plane(plane, to_intel_crtc(crtc));
Ville Syrjälä282dbf92017-03-27 21:55:33 +030013366 intel_plane->update_plane(intel_plane,
Ville Syrjäläa5509ab2017-02-17 17:01:59 +020013367 to_intel_crtc_state(crtc->state),
13368 to_intel_plane_state(plane->state));
Ville Syrjälä72259532017-03-02 19:15:05 +020013369 } else {
13370 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
Ville Syrjälä282dbf92017-03-27 21:55:33 +030013371 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
Ville Syrjälä72259532017-03-02 19:15:05 +020013372 }
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013373
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013374 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013375
13376out_unlock:
13377 mutex_unlock(&dev_priv->drm.struct_mutex);
13378out_free:
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013379 if (ret)
13380 intel_plane_destroy_state(plane, new_plane_state);
13381 else
13382 intel_plane_destroy_state(plane, old_plane_state);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013383 return ret;
13384
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013385slow:
13386 return drm_atomic_helper_update_plane(plane, crtc, fb,
13387 crtc_x, crtc_y, crtc_w, crtc_h,
Daniel Vetter34a2ab52017-03-22 22:50:41 +010013388 src_x, src_y, src_w, src_h, ctx);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013389}
13390
13391static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13392 .update_plane = intel_legacy_cursor_update,
13393 .disable_plane = drm_atomic_helper_disable_plane,
13394 .destroy = intel_plane_destroy,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013395 .atomic_get_property = intel_plane_atomic_get_property,
13396 .atomic_set_property = intel_plane_atomic_set_property,
13397 .atomic_duplicate_state = intel_plane_duplicate_state,
13398 .atomic_destroy_state = intel_plane_destroy_state,
Ben Widawsky714244e2017-08-01 09:58:16 -070013399 .format_mod_supported = intel_cursor_plane_format_mod_supported,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013400};
13401
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013402static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13403 enum i9xx_plane_id i9xx_plane)
13404{
13405 if (!HAS_FBC(dev_priv))
13406 return false;
13407
13408 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13409 return i9xx_plane == PLANE_A; /* tied to pipe A */
13410 else if (IS_IVYBRIDGE(dev_priv))
13411 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13412 i9xx_plane == PLANE_C;
13413 else if (INTEL_GEN(dev_priv) >= 4)
13414 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13415 else
13416 return i9xx_plane == PLANE_A;
13417}
13418
13419static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13420 enum pipe pipe, enum plane_id plane_id)
13421{
13422 if (!HAS_FBC(dev_priv))
13423 return false;
13424
13425 return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13426}
13427
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013428static struct intel_plane *
Ville Syrjälä580503c2016-10-31 22:37:00 +020013429intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
Matt Roper465c1202014-05-29 08:06:54 -070013430{
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013431 struct intel_plane *primary = NULL;
13432 struct intel_plane_state *state = NULL;
Matt Roper465c1202014-05-29 08:06:54 -070013433 const uint32_t *intel_primary_formats;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013434 unsigned int supported_rotations;
Thierry Reding45e37432015-08-12 16:54:28 +020013435 unsigned int num_formats;
Ben Widawsky714244e2017-08-01 09:58:16 -070013436 const uint64_t *modifiers;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013437 int ret;
Matt Roper465c1202014-05-29 08:06:54 -070013438
13439 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013440 if (!primary) {
13441 ret = -ENOMEM;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013442 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013443 }
Matt Roper465c1202014-05-29 08:06:54 -070013444
Matt Roper8e7d6882015-01-21 16:35:41 -080013445 state = intel_create_plane_state(&primary->base);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013446 if (!state) {
13447 ret = -ENOMEM;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013448 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013449 }
13450
Matt Roper8e7d6882015-01-21 16:35:41 -080013451 primary->base.state = &state->base;
Matt Roperea2c67b2014-12-23 10:41:52 -080013452
Matt Roper465c1202014-05-29 08:06:54 -070013453 primary->can_scale = false;
13454 primary->max_downscale = 1;
Ville Syrjälä580503c2016-10-31 22:37:00 +020013455 if (INTEL_GEN(dev_priv) >= 9) {
Chandra Konduru6156a452015-04-27 13:48:39 -070013456 primary->can_scale = true;
Chandra Konduruaf99ced2015-05-11 14:35:47 -070013457 state->scaler_id = -1;
Chandra Konduru6156a452015-04-27 13:48:39 -070013458 }
Matt Roper465c1202014-05-29 08:06:54 -070013459 primary->pipe = pipe;
Ville Syrjäläe3c566d2016-11-08 16:47:11 +020013460 /*
13461 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13462 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13463 */
13464 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
Ville Syrjäläed150302017-11-17 21:19:10 +020013465 primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
Ville Syrjäläe3c566d2016-11-08 16:47:11 +020013466 else
Ville Syrjäläed150302017-11-17 21:19:10 +020013467 primary->i9xx_plane = (enum i9xx_plane_id) pipe;
Ville Syrjäläb14e5842016-11-22 18:01:56 +020013468 primary->id = PLANE_PRIMARY;
Ville Syrjäläc19e1122018-01-23 20:33:43 +020013469 primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013470
13471 if (INTEL_GEN(dev_priv) >= 9)
13472 primary->has_fbc = skl_plane_has_fbc(dev_priv,
13473 primary->pipe,
13474 primary->id);
13475 else
13476 primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
13477 primary->i9xx_plane);
13478
13479 if (primary->has_fbc) {
13480 struct intel_fbc *fbc = &dev_priv->fbc;
13481
13482 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
13483 }
13484
Matt Roperc59cb172014-12-01 15:40:16 -080013485 primary->check_plane = intel_check_primary_plane;
Matt Roper465c1202014-05-29 08:06:54 -070013486
Ville Syrjälä77064e22017-12-22 21:22:28 +020013487 if (INTEL_GEN(dev_priv) >= 9) {
Damien Lespiau6c0fd452015-05-19 12:29:16 +010013488 intel_primary_formats = skl_primary_formats;
13489 num_formats = ARRAY_SIZE(skl_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013490
Ville Syrjälä77064e22017-12-22 21:22:28 +020013491 if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
Ben Widawsky714244e2017-08-01 09:58:16 -070013492 modifiers = skl_format_modifiers_ccs;
13493 else
13494 modifiers = skl_format_modifiers_noccs;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013495
Juha-Pekka Heikkila9a8cc572017-10-17 23:08:09 +030013496 primary->update_plane = skl_update_plane;
Juha-Pekka Heikkila779d4d82017-10-17 23:08:10 +030013497 primary->disable_plane = skl_disable_plane;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020013498 primary->get_hw_state = skl_plane_get_hw_state;
Ville Syrjälä580503c2016-10-31 22:37:00 +020013499 } else if (INTEL_GEN(dev_priv) >= 4) {
Damien Lespiau568db4f2015-05-12 16:13:18 +010013500 intel_primary_formats = i965_primary_formats;
13501 num_formats = ARRAY_SIZE(i965_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013502 modifiers = i9xx_format_modifiers;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013503
Ville Syrjäläed150302017-11-17 21:19:10 +020013504 primary->update_plane = i9xx_update_plane;
13505 primary->disable_plane = i9xx_disable_plane;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020013506 primary->get_hw_state = i9xx_plane_get_hw_state;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010013507 } else {
13508 intel_primary_formats = i8xx_primary_formats;
13509 num_formats = ARRAY_SIZE(i8xx_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013510 modifiers = i9xx_format_modifiers;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013511
Ville Syrjäläed150302017-11-17 21:19:10 +020013512 primary->update_plane = i9xx_update_plane;
13513 primary->disable_plane = i9xx_disable_plane;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020013514 primary->get_hw_state = i9xx_plane_get_hw_state;
Matt Roper465c1202014-05-29 08:06:54 -070013515 }
13516
Ville Syrjälä580503c2016-10-31 22:37:00 +020013517 if (INTEL_GEN(dev_priv) >= 9)
13518 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13519 0, &intel_plane_funcs,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013520 intel_primary_formats, num_formats,
Ben Widawsky714244e2017-08-01 09:58:16 -070013521 modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013522 DRM_PLANE_TYPE_PRIMARY,
13523 "plane 1%c", pipe_name(pipe));
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010013524 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
Ville Syrjälä580503c2016-10-31 22:37:00 +020013525 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13526 0, &intel_plane_funcs,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013527 intel_primary_formats, num_formats,
Ben Widawsky714244e2017-08-01 09:58:16 -070013528 modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013529 DRM_PLANE_TYPE_PRIMARY,
13530 "primary %c", pipe_name(pipe));
13531 else
Ville Syrjälä580503c2016-10-31 22:37:00 +020013532 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13533 0, &intel_plane_funcs,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013534 intel_primary_formats, num_formats,
Ben Widawsky714244e2017-08-01 09:58:16 -070013535 modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013536 DRM_PLANE_TYPE_PRIMARY,
Ville Syrjäläed150302017-11-17 21:19:10 +020013537 "plane %c",
13538 plane_name(primary->i9xx_plane));
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013539 if (ret)
13540 goto fail;
Sonika Jindal48404c12014-08-22 14:06:04 +053013541
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -080013542 if (INTEL_GEN(dev_priv) >= 10) {
13543 supported_rotations =
13544 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13545 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13546 DRM_MODE_REFLECT_X;
13547 } else if (INTEL_GEN(dev_priv) >= 9) {
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013548 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040013549 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13550 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
Ville Syrjälä4ea7be22016-11-14 18:54:00 +020013551 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13552 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040013553 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13554 DRM_MODE_REFLECT_X;
Dave Airlie5481e272016-10-25 16:36:13 +100013555 } else if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013556 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040013557 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013558 } else {
Robert Fossc2c446a2017-05-19 16:50:17 -040013559 supported_rotations = DRM_MODE_ROTATE_0;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013560 }
13561
Dave Airlie5481e272016-10-25 16:36:13 +100013562 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013563 drm_plane_create_rotation_property(&primary->base,
Robert Fossc2c446a2017-05-19 16:50:17 -040013564 DRM_MODE_ROTATE_0,
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013565 supported_rotations);
Sonika Jindal48404c12014-08-22 14:06:04 +053013566
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +020013567 if (INTEL_GEN(dev_priv) >= 9)
13568 drm_plane_create_color_properties(&primary->base,
13569 BIT(DRM_COLOR_YCBCR_BT601) |
13570 BIT(DRM_COLOR_YCBCR_BT709),
Ville Syrjäläc8624ed2018-02-14 21:23:27 +020013571 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13572 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
Ville Syrjälä23b28082018-02-14 21:23:26 +020013573 DRM_COLOR_YCBCR_BT709,
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +020013574 DRM_COLOR_YCBCR_LIMITED_RANGE);
13575
Matt Roperea2c67b2014-12-23 10:41:52 -080013576 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13577
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013578 return primary;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013579
13580fail:
13581 kfree(state);
13582 kfree(primary);
13583
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013584 return ERR_PTR(ret);
Matt Roper465c1202014-05-29 08:06:54 -070013585}
13586
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013587static struct intel_plane *
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013588intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13589 enum pipe pipe)
Matt Roper3d7d6512014-06-10 08:28:13 -070013590{
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013591 struct intel_plane *cursor = NULL;
13592 struct intel_plane_state *state = NULL;
13593 int ret;
Matt Roper3d7d6512014-06-10 08:28:13 -070013594
13595 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013596 if (!cursor) {
13597 ret = -ENOMEM;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013598 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013599 }
Matt Roper3d7d6512014-06-10 08:28:13 -070013600
Matt Roper8e7d6882015-01-21 16:35:41 -080013601 state = intel_create_plane_state(&cursor->base);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013602 if (!state) {
13603 ret = -ENOMEM;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013604 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013605 }
13606
Matt Roper8e7d6882015-01-21 16:35:41 -080013607 cursor->base.state = &state->base;
Matt Roperea2c67b2014-12-23 10:41:52 -080013608
Matt Roper3d7d6512014-06-10 08:28:13 -070013609 cursor->can_scale = false;
13610 cursor->max_downscale = 1;
13611 cursor->pipe = pipe;
Ville Syrjäläed150302017-11-17 21:19:10 +020013612 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
Ville Syrjäläb14e5842016-11-22 18:01:56 +020013613 cursor->id = PLANE_CURSOR;
Ville Syrjäläc19e1122018-01-23 20:33:43 +020013614 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013615
13616 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13617 cursor->update_plane = i845_update_cursor;
13618 cursor->disable_plane = i845_disable_cursor;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020013619 cursor->get_hw_state = i845_cursor_get_hw_state;
Ville Syrjälä659056f2017-03-27 21:55:39 +030013620 cursor->check_plane = i845_check_cursor;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013621 } else {
13622 cursor->update_plane = i9xx_update_cursor;
13623 cursor->disable_plane = i9xx_disable_cursor;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020013624 cursor->get_hw_state = i9xx_cursor_get_hw_state;
Ville Syrjälä659056f2017-03-27 21:55:39 +030013625 cursor->check_plane = i9xx_check_cursor;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013626 }
Matt Roper3d7d6512014-06-10 08:28:13 -070013627
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030013628 cursor->cursor.base = ~0;
13629 cursor->cursor.cntl = ~0;
Ville Syrjälä024faac2017-03-27 21:55:42 +030013630
13631 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13632 cursor->cursor.size = ~0;
Matt Roper3d7d6512014-06-10 08:28:13 -070013633
Ville Syrjälä580503c2016-10-31 22:37:00 +020013634 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013635 0, &intel_cursor_plane_funcs,
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013636 intel_cursor_formats,
13637 ARRAY_SIZE(intel_cursor_formats),
Ben Widawsky714244e2017-08-01 09:58:16 -070013638 cursor_format_modifiers,
13639 DRM_PLANE_TYPE_CURSOR,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013640 "cursor %c", pipe_name(pipe));
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013641 if (ret)
13642 goto fail;
Ville Syrjälä4398ad42014-10-23 07:41:34 -070013643
Dave Airlie5481e272016-10-25 16:36:13 +100013644 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013645 drm_plane_create_rotation_property(&cursor->base,
Robert Fossc2c446a2017-05-19 16:50:17 -040013646 DRM_MODE_ROTATE_0,
13647 DRM_MODE_ROTATE_0 |
13648 DRM_MODE_ROTATE_180);
Ville Syrjälä4398ad42014-10-23 07:41:34 -070013649
Ville Syrjälä580503c2016-10-31 22:37:00 +020013650 if (INTEL_GEN(dev_priv) >= 9)
Chandra Konduruaf99ced2015-05-11 14:35:47 -070013651 state->scaler_id = -1;
13652
Matt Roperea2c67b2014-12-23 10:41:52 -080013653 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13654
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013655 return cursor;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013656
13657fail:
13658 kfree(state);
13659 kfree(cursor);
13660
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013661 return ERR_PTR(ret);
Matt Roper3d7d6512014-06-10 08:28:13 -070013662}
13663
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053013664static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13665 struct intel_crtc_state *crtc_state)
Chandra Konduru549e2bf2015-04-07 15:28:38 -070013666{
Ville Syrjälä65edccc2016-10-31 22:37:01 +020013667 struct intel_crtc_scaler_state *scaler_state =
13668 &crtc_state->scaler_state;
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053013669 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Chandra Konduru549e2bf2015-04-07 15:28:38 -070013670 int i;
Chandra Konduru549e2bf2015-04-07 15:28:38 -070013671
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053013672 crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13673 if (!crtc->num_scalers)
13674 return;
13675
Ville Syrjälä65edccc2016-10-31 22:37:01 +020013676 for (i = 0; i < crtc->num_scalers; i++) {
13677 struct intel_scaler *scaler = &scaler_state->scalers[i];
13678
13679 scaler->in_use = 0;
13680 scaler->mode = PS_SCALER_MODE_DYN;
Chandra Konduru549e2bf2015-04-07 15:28:38 -070013681 }
13682
13683 scaler_state->scaler_id = -1;
13684}
13685
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020013686static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
Jesse Barnes79e53942008-11-07 14:24:08 -080013687{
13688 struct intel_crtc *intel_crtc;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013689 struct intel_crtc_state *crtc_state = NULL;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013690 struct intel_plane *primary = NULL;
13691 struct intel_plane *cursor = NULL;
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030013692 int sprite, ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080013693
Daniel Vetter955382f2013-09-19 14:05:45 +020013694 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013695 if (!intel_crtc)
13696 return -ENOMEM;
Jesse Barnes79e53942008-11-07 14:24:08 -080013697
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013698 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013699 if (!crtc_state) {
13700 ret = -ENOMEM;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013701 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013702 }
Ander Conselvan de Oliveira550acef2015-04-21 17:13:24 +030013703 intel_crtc->config = crtc_state;
13704 intel_crtc->base.state = &crtc_state->base;
Matt Roper07878242015-02-25 11:43:26 -080013705 crtc_state->base.crtc = &intel_crtc->base;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013706
Ville Syrjälä580503c2016-10-31 22:37:00 +020013707 primary = intel_primary_plane_create(dev_priv, pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013708 if (IS_ERR(primary)) {
13709 ret = PTR_ERR(primary);
Matt Roper3d7d6512014-06-10 08:28:13 -070013710 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013711 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020013712 intel_crtc->plane_ids_mask |= BIT(primary->id);
Matt Roper3d7d6512014-06-10 08:28:13 -070013713
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030013714 for_each_sprite(dev_priv, pipe, sprite) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013715 struct intel_plane *plane;
13716
Ville Syrjälä580503c2016-10-31 22:37:00 +020013717 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
Ville Syrjäläd2b2cbc2016-11-07 22:20:56 +020013718 if (IS_ERR(plane)) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013719 ret = PTR_ERR(plane);
13720 goto fail;
13721 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020013722 intel_crtc->plane_ids_mask |= BIT(plane->id);
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030013723 }
13724
Ville Syrjälä580503c2016-10-31 22:37:00 +020013725 cursor = intel_cursor_plane_create(dev_priv, pipe);
Ville Syrjäläd2b2cbc2016-11-07 22:20:56 +020013726 if (IS_ERR(cursor)) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013727 ret = PTR_ERR(cursor);
Matt Roper3d7d6512014-06-10 08:28:13 -070013728 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013729 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020013730 intel_crtc->plane_ids_mask |= BIT(cursor->id);
Matt Roper3d7d6512014-06-10 08:28:13 -070013731
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020013732 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013733 &primary->base, &cursor->base,
13734 &intel_crtc_funcs,
Ville Syrjälä4d5d72b72016-05-27 20:59:21 +030013735 "pipe %c", pipe_name(pipe));
Matt Roper3d7d6512014-06-10 08:28:13 -070013736 if (ret)
13737 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080013738
Jesse Barnes80824002009-09-10 15:28:06 -070013739 intel_crtc->pipe = pipe;
Jesse Barnes80824002009-09-10 15:28:06 -070013740
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053013741 /* initialize shared scalers */
13742 intel_crtc_init_scalers(intel_crtc, crtc_state);
13743
Ville Syrjälä1947fd12018-03-05 19:41:22 +020013744 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
13745 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
13746 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
13747
13748 if (INTEL_GEN(dev_priv) < 9) {
13749 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
13750
13751 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13752 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
13753 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
13754 }
Jesse Barnes22fd0fa2009-12-02 13:42:53 -080013755
Jesse Barnes79e53942008-11-07 14:24:08 -080013756 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
Daniel Vetter87b6b102014-05-15 15:33:46 +020013757
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +000013758 intel_color_init(&intel_crtc->base);
13759
Daniel Vetter87b6b102014-05-15 15:33:46 +020013760 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013761
13762 return 0;
Matt Roper3d7d6512014-06-10 08:28:13 -070013763
13764fail:
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013765 /*
13766 * drm_mode_config_cleanup() will free up any
13767 * crtcs/planes already initialized.
13768 */
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013769 kfree(crtc_state);
Matt Roper3d7d6512014-06-10 08:28:13 -070013770 kfree(intel_crtc);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013771
13772 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080013773}
13774
Jesse Barnes752aa882013-10-31 18:55:49 +020013775enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
13776{
Daniel Vetter6e9f7982014-05-29 23:54:47 +020013777 struct drm_device *dev = connector->base.dev;
Jesse Barnes752aa882013-10-31 18:55:49 +020013778
Rob Clark51fd3712013-11-19 12:10:12 -050013779 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
Jesse Barnes752aa882013-10-31 18:55:49 +020013780
Daniel Vetter51ec53d2017-03-01 10:52:24 +010013781 if (!connector->base.state->crtc)
Jesse Barnes752aa882013-10-31 18:55:49 +020013782 return INVALID_PIPE;
13783
Daniel Vetter51ec53d2017-03-01 10:52:24 +010013784 return to_intel_crtc(connector->base.state->crtc)->pipe;
Jesse Barnes752aa882013-10-31 18:55:49 +020013785}
13786
Ville Syrjälä6a20fe72018-02-07 18:48:41 +020013787int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
13788 struct drm_file *file)
Carl Worth08d7b3d2009-04-29 14:43:54 -070013789{
Carl Worth08d7b3d2009-04-29 14:43:54 -070013790 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
Rob Clark7707e652014-07-17 23:30:04 -040013791 struct drm_crtc *drmmode_crtc;
Daniel Vetterc05422d2009-08-11 16:05:30 +020013792 struct intel_crtc *crtc;
Carl Worth08d7b3d2009-04-29 14:43:54 -070013793
Keith Packard418da172017-03-14 23:25:07 -070013794 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
Chris Wilson71240ed2016-06-24 14:00:24 +010013795 if (!drmmode_crtc)
Ville Syrjälä3f2c2052013-10-17 13:35:03 +030013796 return -ENOENT;
Carl Worth08d7b3d2009-04-29 14:43:54 -070013797
Rob Clark7707e652014-07-17 23:30:04 -040013798 crtc = to_intel_crtc(drmmode_crtc);
Daniel Vetterc05422d2009-08-11 16:05:30 +020013799 pipe_from_crtc_id->pipe = crtc->pipe;
Carl Worth08d7b3d2009-04-29 14:43:54 -070013800
Daniel Vetterc05422d2009-08-11 16:05:30 +020013801 return 0;
Carl Worth08d7b3d2009-04-29 14:43:54 -070013802}
13803
Daniel Vetter66a92782012-07-12 20:08:18 +020013804static int intel_encoder_clones(struct intel_encoder *encoder)
Jesse Barnes79e53942008-11-07 14:24:08 -080013805{
Daniel Vetter66a92782012-07-12 20:08:18 +020013806 struct drm_device *dev = encoder->base.dev;
13807 struct intel_encoder *source_encoder;
Jesse Barnes79e53942008-11-07 14:24:08 -080013808 int index_mask = 0;
Jesse Barnes79e53942008-11-07 14:24:08 -080013809 int entry = 0;
13810
Damien Lespiaub2784e12014-08-05 11:29:37 +010013811 for_each_intel_encoder(dev, source_encoder) {
Ville Syrjäläbc079e82014-03-03 16:15:28 +020013812 if (encoders_cloneable(encoder, source_encoder))
Daniel Vetter66a92782012-07-12 20:08:18 +020013813 index_mask |= (1 << entry);
13814
Jesse Barnes79e53942008-11-07 14:24:08 -080013815 entry++;
13816 }
Chris Wilson4ef69c72010-09-09 15:14:28 +010013817
Jesse Barnes79e53942008-11-07 14:24:08 -080013818 return index_mask;
13819}
13820
Ville Syrjälä646d5772016-10-31 22:37:14 +020013821static bool has_edp_a(struct drm_i915_private *dev_priv)
Chris Wilson4d302442010-12-14 19:21:29 +000013822{
Ville Syrjälä646d5772016-10-31 22:37:14 +020013823 if (!IS_MOBILE(dev_priv))
Chris Wilson4d302442010-12-14 19:21:29 +000013824 return false;
13825
13826 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
13827 return false;
13828
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010013829 if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
Chris Wilson4d302442010-12-14 19:21:29 +000013830 return false;
13831
13832 return true;
13833}
13834
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000013835static bool intel_crt_present(struct drm_i915_private *dev_priv)
Jesse Barnes84b4e042014-06-25 08:24:29 -070013836{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000013837 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau884497e2013-12-03 13:56:23 +000013838 return false;
13839
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +010013840 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
Jesse Barnes84b4e042014-06-25 08:24:29 -070013841 return false;
13842
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010013843 if (IS_CHERRYVIEW(dev_priv))
Jesse Barnes84b4e042014-06-25 08:24:29 -070013844 return false;
13845
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010013846 if (HAS_PCH_LPT_H(dev_priv) &&
13847 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
Ville Syrjälä65e472e2015-12-01 23:28:55 +020013848 return false;
13849
Ville Syrjälä70ac54d2015-12-01 23:29:56 +020013850 /* DDI E can't be used if DDI A requires 4 lanes */
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010013851 if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
Ville Syrjälä70ac54d2015-12-01 23:29:56 +020013852 return false;
13853
Ville Syrjäläe4abb732015-12-01 23:31:33 +020013854 if (!dev_priv->vbt.int_crt_support)
Jesse Barnes84b4e042014-06-25 08:24:29 -070013855 return false;
13856
13857 return true;
13858}
13859
Imre Deak8090ba82016-08-10 14:07:33 +030013860void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
13861{
13862 int pps_num;
13863 int pps_idx;
13864
13865 if (HAS_DDI(dev_priv))
13866 return;
13867 /*
13868 * This w/a is needed at least on CPT/PPT, but to be sure apply it
13869 * everywhere where registers can be write protected.
13870 */
13871 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13872 pps_num = 2;
13873 else
13874 pps_num = 1;
13875
13876 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
13877 u32 val = I915_READ(PP_CONTROL(pps_idx));
13878
13879 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
13880 I915_WRITE(PP_CONTROL(pps_idx), val);
13881 }
13882}
13883
Imre Deak44cb7342016-08-10 14:07:29 +030013884static void intel_pps_init(struct drm_i915_private *dev_priv)
13885{
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +020013886 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
Imre Deak44cb7342016-08-10 14:07:29 +030013887 dev_priv->pps_mmio_base = PCH_PPS_BASE;
13888 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13889 dev_priv->pps_mmio_base = VLV_PPS_BASE;
13890 else
13891 dev_priv->pps_mmio_base = PPS_BASE;
Imre Deak8090ba82016-08-10 14:07:33 +030013892
13893 intel_pps_unlock_regs_wa(dev_priv);
Imre Deak44cb7342016-08-10 14:07:29 +030013894}
13895
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013896static void intel_setup_outputs(struct drm_i915_private *dev_priv)
Jesse Barnes79e53942008-11-07 14:24:08 -080013897{
Chris Wilson4ef69c72010-09-09 15:14:28 +010013898 struct intel_encoder *encoder;
Adam Jacksoncb0953d2010-07-16 14:46:29 -040013899 bool dpd_is_edp = false;
Jesse Barnes79e53942008-11-07 14:24:08 -080013900
Imre Deak44cb7342016-08-10 14:07:29 +030013901 intel_pps_init(dev_priv);
13902
Imre Deak97a824e12016-06-21 11:51:47 +030013903 /*
13904 * intel_edp_init_connector() depends on this completing first, to
13905 * prevent the registeration of both eDP and LVDS and the incorrect
13906 * sharing of the PPS.
13907 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013908 intel_lvds_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080013909
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000013910 if (intel_crt_present(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013911 intel_crt_init(dev_priv);
Adam Jacksoncb0953d2010-07-16 14:46:29 -040013912
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +020013913 if (IS_GEN9_LP(dev_priv)) {
Vandana Kannanc776eb22014-08-19 12:05:01 +053013914 /*
13915 * FIXME: Broxton doesn't support port detection via the
13916 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
13917 * detect the ports.
13918 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013919 intel_ddi_init(dev_priv, PORT_A);
13920 intel_ddi_init(dev_priv, PORT_B);
13921 intel_ddi_init(dev_priv, PORT_C);
Shashank Sharmac6c794a2016-03-22 12:01:50 +020013922
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013923 intel_dsi_init(dev_priv);
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010013924 } else if (HAS_DDI(dev_priv)) {
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030013925 int found;
13926
Jesse Barnesde31fac2015-03-06 15:53:32 -080013927 /*
13928 * Haswell uses DDI functions to detect digital outputs.
13929 * On SKL pre-D0 the strap isn't connected, so we assume
13930 * it's there.
13931 */
Ville Syrjälä77179402015-09-18 20:03:35 +030013932 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
Jesse Barnesde31fac2015-03-06 15:53:32 -080013933 /* WaIgnoreDDIAStrap: skl */
Rodrigo Vivib976dc52017-01-23 10:32:37 -080013934 if (found || IS_GEN9_BC(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013935 intel_ddi_init(dev_priv, PORT_A);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030013936
Rodrigo Vivi9787e832018-01-29 15:22:22 -080013937 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030013938 * register */
13939 found = I915_READ(SFUSE_STRAP);
13940
13941 if (found & SFUSE_STRAP_DDIB_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013942 intel_ddi_init(dev_priv, PORT_B);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030013943 if (found & SFUSE_STRAP_DDIC_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013944 intel_ddi_init(dev_priv, PORT_C);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030013945 if (found & SFUSE_STRAP_DDID_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013946 intel_ddi_init(dev_priv, PORT_D);
Rodrigo Vivi9787e832018-01-29 15:22:22 -080013947 if (found & SFUSE_STRAP_DDIF_DETECTED)
13948 intel_ddi_init(dev_priv, PORT_F);
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070013949 /*
13950 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
13951 */
Rodrigo Vivib976dc52017-01-23 10:32:37 -080013952 if (IS_GEN9_BC(dev_priv) &&
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070013953 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
13954 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
13955 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013956 intel_ddi_init(dev_priv, PORT_E);
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070013957
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010013958 } else if (HAS_PCH_SPLIT(dev_priv)) {
Adam Jacksoncb0953d2010-07-16 14:46:29 -040013959 int found;
Jani Nikula7b91bf72017-08-18 12:30:19 +030013960 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
Daniel Vetter270b3042012-10-27 15:52:05 +020013961
Ville Syrjälä646d5772016-10-31 22:37:14 +020013962 if (has_edp_a(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013963 intel_dp_init(dev_priv, DP_A, PORT_A);
Adam Jacksoncb0953d2010-07-16 14:46:29 -040013964
Paulo Zanonidc0fa712013-02-19 16:21:46 -030013965 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
Zhao Yakui461ed3c2010-03-30 15:11:33 +080013966 /* PCH SDVOB multiplex with HDMIB */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013967 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080013968 if (!found)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013969 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080013970 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013971 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080013972 }
13973
Paulo Zanonidc0fa712013-02-19 16:21:46 -030013974 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013975 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080013976
Paulo Zanonidc0fa712013-02-19 16:21:46 -030013977 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013978 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080013979
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080013980 if (I915_READ(PCH_DP_C) & DP_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013981 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080013982
Daniel Vetter270b3042012-10-27 15:52:05 +020013983 if (I915_READ(PCH_DP_D) & DP_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020013984 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010013985 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä22f350422016-06-03 12:17:43 +030013986 bool has_edp, has_port;
Chris Wilson457c52d2016-06-01 08:27:50 +010013987
Ville Syrjäläe17ac6d2014-10-09 19:37:15 +030013988 /*
13989 * The DP_DETECTED bit is the latched state of the DDC
13990 * SDA pin at boot. However since eDP doesn't require DDC
13991 * (no way to plug in a DP->HDMI dongle) the DDC pins for
13992 * eDP ports may have been muxed to an alternate function.
13993 * Thus we can't rely on the DP_DETECTED bit alone to detect
13994 * eDP ports. Consult the VBT as well as DP_DETECTED to
13995 * detect eDP ports.
Ville Syrjälä22f350422016-06-03 12:17:43 +030013996 *
13997 * Sadly the straps seem to be missing sometimes even for HDMI
13998 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
13999 * and VBT for the presence of the port. Additionally we can't
14000 * trust the port type the VBT declares as we've seen at least
14001 * HDMI ports that the VBT claim are DP or eDP.
Ville Syrjäläe17ac6d2014-10-09 19:37:15 +030014002 */
Jani Nikula7b91bf72017-08-18 12:30:19 +030014003 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014004 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14005 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014006 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014007 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014008 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
Artem Bityutskiy585a94b2013-10-16 18:10:41 +030014009
Jani Nikula7b91bf72017-08-18 12:30:19 +030014010 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014011 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14012 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014013 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014014 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014015 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
Gajanan Bhat19c03922012-09-27 19:13:07 +053014016
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014017 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä22f350422016-06-03 12:17:43 +030014018 /*
14019 * eDP not supported on port D,
14020 * so no need to worry about it
14021 */
14022 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14023 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014024 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014025 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014026 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
Ville Syrjälä9418c1f2014-04-09 13:28:56 +030014027 }
14028
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014029 intel_dsi_init(dev_priv);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014030 } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
Ma Ling27185ae2009-08-24 13:50:23 +080014031 bool found = false;
Eric Anholt7d573822009-01-02 13:33:00 -080014032
Paulo Zanonie2debe92013-02-18 19:00:27 -030014033 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014034 DRM_DEBUG_KMS("probing SDVOB\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014035 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014036 if (!found && IS_G4X(dev_priv)) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014037 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014038 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014039 }
Ma Ling27185ae2009-08-24 13:50:23 +080014040
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014041 if (!found && IS_G4X(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014042 intel_dp_init(dev_priv, DP_B, PORT_B);
Eric Anholt725e30a2009-01-22 13:01:02 -080014043 }
Kristian Høgsberg13520b02009-03-13 15:42:14 -040014044
14045 /* Before G4X SDVOC doesn't have its own detect register */
Kristian Høgsberg13520b02009-03-13 15:42:14 -040014046
Paulo Zanonie2debe92013-02-18 19:00:27 -030014047 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014048 DRM_DEBUG_KMS("probing SDVOC\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014049 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014050 }
Ma Ling27185ae2009-08-24 13:50:23 +080014051
Paulo Zanonie2debe92013-02-18 19:00:27 -030014052 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
Ma Ling27185ae2009-08-24 13:50:23 +080014053
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014054 if (IS_G4X(dev_priv)) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014055 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014056 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014057 }
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014058 if (IS_G4X(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014059 intel_dp_init(dev_priv, DP_C, PORT_C);
Eric Anholt725e30a2009-01-22 13:01:02 -080014060 }
Ma Ling27185ae2009-08-24 13:50:23 +080014061
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014062 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014063 intel_dp_init(dev_priv, DP_D, PORT_D);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014064 } else if (IS_GEN2(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014065 intel_dvo_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080014066
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +000014067 if (SUPPORTS_TV(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014068 intel_tv_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080014069
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014070 intel_psr_init(dev_priv);
Rodrigo Vivi7c8f8a72014-06-13 05:10:03 -070014071
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014072 for_each_intel_encoder(&dev_priv->drm, encoder) {
Chris Wilson4ef69c72010-09-09 15:14:28 +010014073 encoder->base.possible_crtcs = encoder->crtc_mask;
14074 encoder->base.possible_clones =
Daniel Vetter66a92782012-07-12 20:08:18 +020014075 intel_encoder_clones(encoder);
Jesse Barnes79e53942008-11-07 14:24:08 -080014076 }
Chris Wilson47356eb2011-01-11 17:06:04 +000014077
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014078 intel_init_pch_refclk(dev_priv);
Daniel Vetter270b3042012-10-27 15:52:05 +020014079
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014080 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
Jesse Barnes79e53942008-11-07 14:24:08 -080014081}
14082
14083static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14084{
14085 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
Jesse Barnes79e53942008-11-07 14:24:08 -080014086
Daniel Vetteref2d6332014-02-10 18:00:38 +010014087 drm_framebuffer_cleanup(fb);
Chris Wilson70001cd2017-02-16 09:46:21 +000014088
Chris Wilsondd689282017-03-01 15:41:28 +000014089 i915_gem_object_lock(intel_fb->obj);
14090 WARN_ON(!intel_fb->obj->framebuffer_references--);
14091 i915_gem_object_unlock(intel_fb->obj);
14092
Chris Wilsonf8c417c2016-07-20 13:31:53 +010014093 i915_gem_object_put(intel_fb->obj);
Chris Wilson70001cd2017-02-16 09:46:21 +000014094
Jesse Barnes79e53942008-11-07 14:24:08 -080014095 kfree(intel_fb);
14096}
14097
14098static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
Chris Wilson05394f32010-11-08 19:18:58 +000014099 struct drm_file *file,
Jesse Barnes79e53942008-11-07 14:24:08 -080014100 unsigned int *handle)
14101{
14102 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
Chris Wilson05394f32010-11-08 19:18:58 +000014103 struct drm_i915_gem_object *obj = intel_fb->obj;
Jesse Barnes79e53942008-11-07 14:24:08 -080014104
Chris Wilsoncc917ab2015-10-13 14:22:26 +010014105 if (obj->userptr.mm) {
14106 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14107 return -EINVAL;
14108 }
14109
Chris Wilson05394f32010-11-08 19:18:58 +000014110 return drm_gem_handle_create(file, &obj->base, handle);
Jesse Barnes79e53942008-11-07 14:24:08 -080014111}
14112
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014113static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14114 struct drm_file *file,
14115 unsigned flags, unsigned color,
14116 struct drm_clip_rect *clips,
14117 unsigned num_clips)
14118{
Chris Wilson5a97bcc2017-02-22 11:40:46 +000014119 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014120
Chris Wilson5a97bcc2017-02-22 11:40:46 +000014121 i915_gem_object_flush_if_display(obj);
Chris Wilsond59b21e2017-02-22 11:40:49 +000014122 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014123
14124 return 0;
14125}
14126
Jesse Barnes79e53942008-11-07 14:24:08 -080014127static const struct drm_framebuffer_funcs intel_fb_funcs = {
14128 .destroy = intel_user_framebuffer_destroy,
14129 .create_handle = intel_user_framebuffer_create_handle,
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014130 .dirty = intel_user_framebuffer_dirty,
Jesse Barnes79e53942008-11-07 14:24:08 -080014131};
14132
Damien Lespiaub3218032015-02-27 11:15:18 +000014133static
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014134u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14135 uint64_t fb_modifier, uint32_t pixel_format)
Damien Lespiaub3218032015-02-27 11:15:18 +000014136{
Chris Wilson24dbf512017-02-15 10:59:18 +000014137 u32 gen = INTEL_GEN(dev_priv);
Damien Lespiaub3218032015-02-27 11:15:18 +000014138
14139 if (gen >= 9) {
Ville Syrjäläac484962016-01-20 21:05:26 +020014140 int cpp = drm_format_plane_cpp(pixel_format, 0);
14141
Damien Lespiaub3218032015-02-27 11:15:18 +000014142 /* "The stride in bytes must not exceed the of the size of 8K
14143 * pixels and 32K bytes."
14144 */
Ville Syrjäläac484962016-01-20 21:05:26 +020014145 return min(8192 * cpp, 32768);
Ville Syrjälä6401c372017-02-08 19:53:28 +020014146 } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
Damien Lespiaub3218032015-02-27 11:15:18 +000014147 return 32*1024;
14148 } else if (gen >= 4) {
14149 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14150 return 16*1024;
14151 else
14152 return 32*1024;
14153 } else if (gen >= 3) {
14154 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14155 return 8*1024;
14156 else
14157 return 16*1024;
14158 } else {
14159 /* XXX DSPC is limited to 4k tiled */
14160 return 8*1024;
14161 }
14162}
14163
Chris Wilson24dbf512017-02-15 10:59:18 +000014164static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14165 struct drm_i915_gem_object *obj,
14166 struct drm_mode_fb_cmd2 *mode_cmd)
Jesse Barnes79e53942008-11-07 14:24:08 -080014167{
Chris Wilson24dbf512017-02-15 10:59:18 +000014168 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014169 struct drm_framebuffer *fb = &intel_fb->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +000014170 struct drm_format_name_buf format_name;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014171 u32 pitch_limit;
Chris Wilsondd689282017-03-01 15:41:28 +000014172 unsigned int tiling, stride;
Chris Wilson24dbf512017-02-15 10:59:18 +000014173 int ret = -EINVAL;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014174 int i;
Jesse Barnes79e53942008-11-07 14:24:08 -080014175
Chris Wilsondd689282017-03-01 15:41:28 +000014176 i915_gem_object_lock(obj);
14177 obj->framebuffer_references++;
14178 tiling = i915_gem_object_get_tiling(obj);
14179 stride = i915_gem_object_get_stride(obj);
14180 i915_gem_object_unlock(obj);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020014181
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014182 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014183 /*
14184 * If there's a fence, enforce that
14185 * the fb modifier and tiling mode match.
14186 */
14187 if (tiling != I915_TILING_NONE &&
14188 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014189 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
Chris Wilson24dbf512017-02-15 10:59:18 +000014190 goto err;
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014191 }
14192 } else {
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014193 if (tiling == I915_TILING_X) {
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014194 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014195 } else if (tiling == I915_TILING_Y) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014196 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
Chris Wilson24dbf512017-02-15 10:59:18 +000014197 goto err;
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014198 }
14199 }
14200
Tvrtko Ursulin9a8f0a12015-02-27 11:15:24 +000014201 /* Passed in modifier sanity checking. */
14202 switch (mode_cmd->modifier[0]) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014203 case I915_FORMAT_MOD_Y_TILED_CCS:
14204 case I915_FORMAT_MOD_Yf_TILED_CCS:
14205 switch (mode_cmd->pixel_format) {
14206 case DRM_FORMAT_XBGR8888:
14207 case DRM_FORMAT_ABGR8888:
14208 case DRM_FORMAT_XRGB8888:
14209 case DRM_FORMAT_ARGB8888:
14210 break;
14211 default:
14212 DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
14213 goto err;
14214 }
14215 /* fall through */
Tvrtko Ursulin9a8f0a12015-02-27 11:15:24 +000014216 case I915_FORMAT_MOD_Y_TILED:
14217 case I915_FORMAT_MOD_Yf_TILED:
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014218 if (INTEL_GEN(dev_priv) < 9) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014219 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14220 mode_cmd->modifier[0]);
Chris Wilson24dbf512017-02-15 10:59:18 +000014221 goto err;
Tvrtko Ursulin9a8f0a12015-02-27 11:15:24 +000014222 }
Ben Widawsky2f075562017-03-24 14:29:48 -070014223 case DRM_FORMAT_MOD_LINEAR:
Tvrtko Ursulin9a8f0a12015-02-27 11:15:24 +000014224 case I915_FORMAT_MOD_X_TILED:
14225 break;
14226 default:
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014227 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14228 mode_cmd->modifier[0]);
Chris Wilson24dbf512017-02-15 10:59:18 +000014229 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014230 }
Chris Wilson57cd6502010-08-08 12:34:44 +010014231
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014232 /*
14233 * gen2/3 display engine uses the fence if present,
14234 * so the tiling mode must match the fb modifier exactly.
14235 */
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +000014236 if (INTEL_GEN(dev_priv) < 4 &&
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014237 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014238 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014239 goto err;
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014240 }
14241
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014242 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
Damien Lespiaub3218032015-02-27 11:15:18 +000014243 mode_cmd->pixel_format);
Chris Wilsona35cdaa2013-06-25 17:26:45 +010014244 if (mode_cmd->pitches[0] > pitch_limit) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014245 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
Ben Widawsky2f075562017-03-24 14:29:48 -070014246 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014247 "tiled" : "linear",
14248 mode_cmd->pitches[0], pitch_limit);
Chris Wilson24dbf512017-02-15 10:59:18 +000014249 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014250 }
Ville Syrjälä5d7bd702012-10-31 17:50:18 +020014251
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014252 /*
14253 * If there's a fence, enforce that
14254 * the fb pitch and fence stride match.
14255 */
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014256 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14257 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14258 mode_cmd->pitches[0], stride);
Chris Wilson24dbf512017-02-15 10:59:18 +000014259 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014260 }
Ville Syrjälä5d7bd702012-10-31 17:50:18 +020014261
Ville Syrjälä57779d02012-10-31 17:50:14 +020014262 /* Reject formats not supported by any plane early. */
Jesse Barnes308e5bc2011-11-14 14:51:28 -080014263 switch (mode_cmd->pixel_format) {
Ville Syrjälä57779d02012-10-31 17:50:14 +020014264 case DRM_FORMAT_C8:
Ville Syrjälä04b39242011-11-17 18:05:13 +020014265 case DRM_FORMAT_RGB565:
14266 case DRM_FORMAT_XRGB8888:
14267 case DRM_FORMAT_ARGB8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +020014268 break;
14269 case DRM_FORMAT_XRGB1555:
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014270 if (INTEL_GEN(dev_priv) > 3) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014271 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14272 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014273 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014274 }
Ville Syrjälä57779d02012-10-31 17:50:14 +020014275 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +020014276 case DRM_FORMAT_ABGR8888:
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014277 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014278 INTEL_GEN(dev_priv) < 9) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014279 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14280 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014281 goto err;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010014282 }
14283 break;
14284 case DRM_FORMAT_XBGR8888:
Ville Syrjälä04b39242011-11-17 18:05:13 +020014285 case DRM_FORMAT_XRGB2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +020014286 case DRM_FORMAT_XBGR2101010:
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014287 if (INTEL_GEN(dev_priv) < 4) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014288 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14289 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014290 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014291 }
Jesse Barnesb5626742011-06-24 12:19:27 -070014292 break;
Damien Lespiau75312082015-05-15 19:06:01 +010014293 case DRM_FORMAT_ABGR2101010:
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014294 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014295 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14296 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014297 goto err;
Damien Lespiau75312082015-05-15 19:06:01 +010014298 }
14299 break;
Ville Syrjälä04b39242011-11-17 18:05:13 +020014300 case DRM_FORMAT_YUYV:
14301 case DRM_FORMAT_UYVY:
14302 case DRM_FORMAT_YVYU:
14303 case DRM_FORMAT_VYUY:
Ville Syrjäläab330812017-04-21 21:14:32 +030014304 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014305 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14306 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014307 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014308 }
Chris Wilson57cd6502010-08-08 12:34:44 +010014309 break;
Chandra Kondurue44134f2018-05-12 03:03:15 +053014310 case DRM_FORMAT_NV12:
14311 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS ||
14312 mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) {
14313 DRM_DEBUG_KMS("RC not to be enabled with NV12\n");
14314 goto err;
14315 }
14316 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
14317 IS_BROXTON(dev_priv)) {
14318 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14319 drm_get_format_name(mode_cmd->pixel_format,
14320 &format_name));
14321 goto err;
14322 }
14323 break;
Chris Wilson57cd6502010-08-08 12:34:44 +010014324 default:
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014325 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14326 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014327 goto err;
Chris Wilson57cd6502010-08-08 12:34:44 +010014328 }
14329
Ville Syrjälä90f9a332012-10-31 17:50:19 +020014330 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14331 if (mode_cmd->offsets[0] != 0)
Chris Wilson24dbf512017-02-15 10:59:18 +000014332 goto err;
Ville Syrjälä90f9a332012-10-31 17:50:19 +020014333
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014334 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +020014335
Chandra Kondurue44134f2018-05-12 03:03:15 +053014336 if (fb->format->format == DRM_FORMAT_NV12 &&
14337 (fb->width < SKL_MIN_YUV_420_SRC_W ||
14338 fb->height < SKL_MIN_YUV_420_SRC_H ||
14339 (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14340 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14341 return -EINVAL;
14342 }
14343
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014344 for (i = 0; i < fb->format->num_planes; i++) {
14345 u32 stride_alignment;
14346
14347 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14348 DRM_DEBUG_KMS("bad plane %d handle\n", i);
Christophe JAILLET37875d62017-09-10 10:56:42 +020014349 goto err;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014350 }
14351
14352 stride_alignment = intel_fb_stride_alignment(fb, i);
14353
14354 /*
14355 * Display WA #0531: skl,bxt,kbl,glk
14356 *
14357 * Render decompression and plane width > 3840
14358 * combined with horizontal panning requires the
14359 * plane stride to be a multiple of 4. We'll just
14360 * require the entire fb to accommodate that to avoid
14361 * potential runtime errors at plane configuration time.
14362 */
14363 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
14364 (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
14365 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
14366 stride_alignment *= 4;
14367
14368 if (fb->pitches[i] & (stride_alignment - 1)) {
14369 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14370 i, fb->pitches[i], stride_alignment);
14371 goto err;
14372 }
Ville Syrjäläd88c4af2017-03-07 21:42:06 +020014373 }
14374
Daniel Vetterc7d73f62012-12-13 23:38:38 +010014375 intel_fb->obj = obj;
14376
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014377 ret = intel_fill_fb_info(dev_priv, fb);
Ville Syrjälä6687c902015-09-15 13:16:41 +030014378 if (ret)
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014379 goto err;
Ville Syrjälä2d7a2152016-02-15 22:54:47 +020014380
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014381 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
Jesse Barnes79e53942008-11-07 14:24:08 -080014382 if (ret) {
14383 DRM_ERROR("framebuffer init failed %d\n", ret);
Chris Wilson24dbf512017-02-15 10:59:18 +000014384 goto err;
Jesse Barnes79e53942008-11-07 14:24:08 -080014385 }
14386
Jesse Barnes79e53942008-11-07 14:24:08 -080014387 return 0;
Chris Wilson24dbf512017-02-15 10:59:18 +000014388
14389err:
Chris Wilsondd689282017-03-01 15:41:28 +000014390 i915_gem_object_lock(obj);
14391 obj->framebuffer_references--;
14392 i915_gem_object_unlock(obj);
Chris Wilson24dbf512017-02-15 10:59:18 +000014393 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014394}
14395
Jesse Barnes79e53942008-11-07 14:24:08 -080014396static struct drm_framebuffer *
14397intel_user_framebuffer_create(struct drm_device *dev,
14398 struct drm_file *filp,
Ville Syrjälä1eb834512015-11-11 19:11:29 +020014399 const struct drm_mode_fb_cmd2 *user_mode_cmd)
Jesse Barnes79e53942008-11-07 14:24:08 -080014400{
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014401 struct drm_framebuffer *fb;
Chris Wilson05394f32010-11-08 19:18:58 +000014402 struct drm_i915_gem_object *obj;
Ville Syrjälä76dc3762015-11-11 19:11:28 +020014403 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
Jesse Barnes79e53942008-11-07 14:24:08 -080014404
Chris Wilson03ac0642016-07-20 13:31:51 +010014405 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14406 if (!obj)
Chris Wilsoncce13ff2010-08-08 13:36:38 +010014407 return ERR_PTR(-ENOENT);
Jesse Barnes79e53942008-11-07 14:24:08 -080014408
Chris Wilson24dbf512017-02-15 10:59:18 +000014409 fb = intel_framebuffer_create(obj, &mode_cmd);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014410 if (IS_ERR(fb))
Chris Wilsonf0cd5182016-10-28 13:58:43 +010014411 i915_gem_object_put(obj);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014412
14413 return fb;
Jesse Barnes79e53942008-11-07 14:24:08 -080014414}
14415
Chris Wilson778e23a2016-12-05 14:29:39 +000014416static void intel_atomic_state_free(struct drm_atomic_state *state)
14417{
14418 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14419
14420 drm_atomic_state_default_release(state);
14421
14422 i915_sw_fence_fini(&intel_state->commit_ready);
14423
14424 kfree(state);
14425}
14426
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014427static enum drm_mode_status
14428intel_mode_valid(struct drm_device *dev,
14429 const struct drm_display_mode *mode)
14430{
14431 if (mode->vscan > 1)
14432 return MODE_NO_VSCAN;
14433
14434 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
14435 return MODE_NO_DBLESCAN;
14436
14437 if (mode->flags & DRM_MODE_FLAG_HSKEW)
14438 return MODE_H_ILLEGAL;
14439
14440 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14441 DRM_MODE_FLAG_NCSYNC |
14442 DRM_MODE_FLAG_PCSYNC))
14443 return MODE_HSYNC;
14444
14445 if (mode->flags & (DRM_MODE_FLAG_BCAST |
14446 DRM_MODE_FLAG_PIXMUX |
14447 DRM_MODE_FLAG_CLKDIV2))
14448 return MODE_BAD;
14449
14450 return MODE_OK;
14451}
14452
Jesse Barnes79e53942008-11-07 14:24:08 -080014453static const struct drm_mode_config_funcs intel_mode_funcs = {
Jesse Barnes79e53942008-11-07 14:24:08 -080014454 .fb_create = intel_user_framebuffer_create,
Ville Syrjäläbbfb6ce2017-08-01 09:58:12 -070014455 .get_format_info = intel_get_format_info,
Daniel Vetter0632fef2013-10-08 17:44:49 +020014456 .output_poll_changed = intel_fbdev_output_poll_changed,
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014457 .mode_valid = intel_mode_valid,
Matt Roper5ee67f12015-01-21 16:35:44 -080014458 .atomic_check = intel_atomic_check,
14459 .atomic_commit = intel_atomic_commit,
Maarten Lankhorstde419ab2015-06-04 10:21:28 +020014460 .atomic_state_alloc = intel_atomic_state_alloc,
14461 .atomic_state_clear = intel_atomic_state_clear,
Chris Wilson778e23a2016-12-05 14:29:39 +000014462 .atomic_state_free = intel_atomic_state_free,
Jesse Barnes79e53942008-11-07 14:24:08 -080014463};
14464
Imre Deak88212942016-03-16 13:38:53 +020014465/**
14466 * intel_init_display_hooks - initialize the display modesetting hooks
14467 * @dev_priv: device private
14468 */
14469void intel_init_display_hooks(struct drm_i915_private *dev_priv)
Jesse Barnese70236a2009-09-21 10:42:27 -070014470{
Ville Syrjälä7ff89ca2017-02-07 20:33:05 +020014471 intel_init_cdclk_hooks(dev_priv);
14472
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +000014473 if (INTEL_GEN(dev_priv) >= 9) {
Damien Lespiaubc8d7df2015-01-20 12:51:51 +000014474 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014475 dev_priv->display.get_initial_plane_config =
14476 skylake_get_initial_plane_config;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +000014477 dev_priv->display.crtc_compute_clock =
14478 haswell_crtc_compute_clock;
14479 dev_priv->display.crtc_enable = haswell_crtc_enable;
14480 dev_priv->display.crtc_disable = haswell_crtc_disable;
Imre Deak88212942016-03-16 13:38:53 +020014481 } else if (HAS_DDI(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014482 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014483 dev_priv->display.get_initial_plane_config =
Ville Syrjälä81894b22017-11-17 21:19:13 +020014484 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira797d0252014-10-29 11:32:34 +020014485 dev_priv->display.crtc_compute_clock =
14486 haswell_crtc_compute_clock;
Paulo Zanoni4f771f12012-10-23 18:29:51 -020014487 dev_priv->display.crtc_enable = haswell_crtc_enable;
14488 dev_priv->display.crtc_disable = haswell_crtc_disable;
Imre Deak88212942016-03-16 13:38:53 +020014489 } else if (HAS_PCH_SPLIT(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014490 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014491 dev_priv->display.get_initial_plane_config =
Ville Syrjälä81894b22017-11-17 21:19:13 +020014492 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira3fb37702014-10-29 11:32:35 +020014493 dev_priv->display.crtc_compute_clock =
14494 ironlake_crtc_compute_clock;
Daniel Vetter76e5a892012-06-29 22:39:33 +020014495 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14496 dev_priv->display.crtc_disable = ironlake_crtc_disable;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +020014497 } else if (IS_CHERRYVIEW(dev_priv)) {
Jesse Barnes89b667f2013-04-18 14:51:36 -070014498 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014499 dev_priv->display.get_initial_plane_config =
14500 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +020014501 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14502 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14503 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14504 } else if (IS_VALLEYVIEW(dev_priv)) {
14505 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14506 dev_priv->display.get_initial_plane_config =
14507 i9xx_get_initial_plane_config;
14508 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
Jesse Barnes89b667f2013-04-18 14:51:36 -070014509 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14510 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +020014511 } else if (IS_G4X(dev_priv)) {
14512 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14513 dev_priv->display.get_initial_plane_config =
14514 i9xx_get_initial_plane_config;
14515 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14516 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14517 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +020014518 } else if (IS_PINEVIEW(dev_priv)) {
14519 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14520 dev_priv->display.get_initial_plane_config =
14521 i9xx_get_initial_plane_config;
14522 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14523 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14524 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +020014525 } else if (!IS_GEN2(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014526 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014527 dev_priv->display.get_initial_plane_config =
14528 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveirad6dfee72014-10-29 11:32:36 +020014529 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
Daniel Vetter76e5a892012-06-29 22:39:33 +020014530 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14531 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +020014532 } else {
14533 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14534 dev_priv->display.get_initial_plane_config =
14535 i9xx_get_initial_plane_config;
14536 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14537 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14538 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Eric Anholtf564048e2011-03-30 13:01:02 -070014539 }
Jesse Barnese70236a2009-09-21 10:42:27 -070014540
Imre Deak88212942016-03-16 13:38:53 +020014541 if (IS_GEN5(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014542 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014543 } else if (IS_GEN6(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014544 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014545 } else if (IS_IVYBRIDGE(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014546 /* FIXME: detect B0+ stepping and use auto training */
14547 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014548 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014549 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
Ville Syrjälä445e7802016-05-11 22:44:42 +030014550 }
14551
Rodrigo Vivibd30ca22017-09-26 14:13:46 -070014552 if (INTEL_GEN(dev_priv) >= 9)
Lyude27082492016-08-24 07:48:10 +020014553 dev_priv->display.update_crtcs = skl_update_crtcs;
14554 else
14555 dev_priv->display.update_crtcs = intel_update_crtcs;
Jesse Barnese70236a2009-09-21 10:42:27 -070014556}
14557
Jesse Barnesb690e962010-07-19 13:53:12 -070014558/*
Keith Packard435793d2011-07-12 14:56:22 -070014559 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14560 */
14561static void quirk_ssc_force_disable(struct drm_device *dev)
14562{
Chris Wilsonfac5e232016-07-04 11:34:36 +010014563 struct drm_i915_private *dev_priv = to_i915(dev);
Keith Packard435793d2011-07-12 14:56:22 -070014564 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
Daniel Vetterbc0daf42012-04-01 13:16:49 +020014565 DRM_INFO("applying lvds SSC disable quirk\n");
Keith Packard435793d2011-07-12 14:56:22 -070014566}
14567
Carsten Emde4dca20e2012-03-15 15:56:26 +010014568/*
Carsten Emde5a15ab52012-03-15 15:56:27 +010014569 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14570 * brightness value
Carsten Emde4dca20e2012-03-15 15:56:26 +010014571 */
14572static void quirk_invert_brightness(struct drm_device *dev)
14573{
Chris Wilsonfac5e232016-07-04 11:34:36 +010014574 struct drm_i915_private *dev_priv = to_i915(dev);
Carsten Emde4dca20e2012-03-15 15:56:26 +010014575 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
Daniel Vetterbc0daf42012-04-01 13:16:49 +020014576 DRM_INFO("applying inverted panel brightness quirk\n");
Jesse Barnesb690e962010-07-19 13:53:12 -070014577}
14578
Scot Doyle9c72cc62014-07-03 23:27:50 +000014579/* Some VBT's incorrectly indicate no backlight is present */
14580static void quirk_backlight_present(struct drm_device *dev)
14581{
Chris Wilsonfac5e232016-07-04 11:34:36 +010014582 struct drm_i915_private *dev_priv = to_i915(dev);
Scot Doyle9c72cc62014-07-03 23:27:50 +000014583 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14584 DRM_INFO("applying backlight present quirk\n");
14585}
14586
Manasi Navarec99a2592017-06-30 09:33:48 -070014587/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14588 * which is 300 ms greater than eDP spec T12 min.
14589 */
14590static void quirk_increase_t12_delay(struct drm_device *dev)
14591{
14592 struct drm_i915_private *dev_priv = to_i915(dev);
14593
14594 dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14595 DRM_INFO("Applying T12 delay quirk\n");
14596}
14597
Jesse Barnesb690e962010-07-19 13:53:12 -070014598struct intel_quirk {
14599 int device;
14600 int subsystem_vendor;
14601 int subsystem_device;
14602 void (*hook)(struct drm_device *dev);
14603};
14604
Egbert Eich5f85f172012-10-14 15:46:38 +020014605/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14606struct intel_dmi_quirk {
14607 void (*hook)(struct drm_device *dev);
14608 const struct dmi_system_id (*dmi_id_list)[];
14609};
14610
14611static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14612{
14613 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14614 return 1;
14615}
14616
14617static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14618 {
14619 .dmi_id_list = &(const struct dmi_system_id[]) {
14620 {
14621 .callback = intel_dmi_reverse_brightness,
14622 .ident = "NCR Corporation",
14623 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14624 DMI_MATCH(DMI_PRODUCT_NAME, ""),
14625 },
14626 },
14627 { } /* terminating entry */
14628 },
14629 .hook = quirk_invert_brightness,
14630 },
14631};
14632
Ben Widawskyc43b5632012-04-16 14:07:40 -070014633static struct intel_quirk intel_quirks[] = {
Keith Packard435793d2011-07-12 14:56:22 -070014634 /* Lenovo U160 cannot use SSC on LVDS */
14635 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
Michel Alexandre Salim070d3292011-07-28 18:52:06 +020014636
14637 /* Sony Vaio Y cannot use SSC on LVDS */
14638 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
Carsten Emde5a15ab52012-03-15 15:56:27 +010014639
Alexander van Heukelumbe505f62013-12-28 21:00:39 +010014640 /* Acer Aspire 5734Z must invert backlight brightness */
14641 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14642
14643 /* Acer/eMachines G725 */
14644 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14645
14646 /* Acer/eMachines e725 */
14647 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14648
14649 /* Acer/Packard Bell NCL20 */
14650 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14651
14652 /* Acer Aspire 4736Z */
14653 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
Jani Nikula0f540c32014-01-13 17:30:34 +020014654
14655 /* Acer Aspire 5336 */
14656 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
Scot Doyle2e93a1a2014-07-03 23:27:51 +000014657
14658 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14659 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
Scot Doyled4967d82014-07-03 23:27:52 +000014660
Scot Doyledfb3d47b2014-08-21 16:08:02 +000014661 /* Acer C720 Chromebook (Core i3 4005U) */
14662 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14663
jens steinb2a96012014-10-28 20:25:53 +010014664 /* Apple Macbook 2,1 (Core 2 T7400) */
14665 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14666
Jani Nikula1b9448b02015-11-05 11:49:59 +020014667 /* Apple Macbook 4,1 */
14668 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14669
Scot Doyled4967d82014-07-03 23:27:52 +000014670 /* Toshiba CB35 Chromebook (Celeron 2955U) */
14671 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
Scot Doyle724cb062014-07-11 22:16:30 +000014672
14673 /* HP Chromebook 14 (Celeron 2955U) */
14674 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
Jani Nikulacf6f0af2015-02-19 10:53:39 +020014675
14676 /* Dell Chromebook 11 */
14677 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
Jani Nikula9be64ee2015-10-30 14:50:24 +020014678
14679 /* Dell Chromebook 11 (2015 version) */
14680 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
Manasi Navarec99a2592017-06-30 09:33:48 -070014681
14682 /* Toshiba Satellite P50-C-18C */
14683 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
Jesse Barnesb690e962010-07-19 13:53:12 -070014684};
14685
14686static void intel_init_quirks(struct drm_device *dev)
14687{
14688 struct pci_dev *d = dev->pdev;
14689 int i;
14690
14691 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14692 struct intel_quirk *q = &intel_quirks[i];
14693
14694 if (d->device == q->device &&
14695 (d->subsystem_vendor == q->subsystem_vendor ||
14696 q->subsystem_vendor == PCI_ANY_ID) &&
14697 (d->subsystem_device == q->subsystem_device ||
14698 q->subsystem_device == PCI_ANY_ID))
14699 q->hook(dev);
14700 }
Egbert Eich5f85f172012-10-14 15:46:38 +020014701 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14702 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14703 intel_dmi_quirks[i].hook(dev);
14704 }
Jesse Barnesb690e962010-07-19 13:53:12 -070014705}
14706
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014707/* Disable the VGA plane that we never use */
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000014708static void i915_disable_vga(struct drm_i915_private *dev_priv)
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014709{
David Weinehall52a05c32016-08-22 13:32:44 +030014710 struct pci_dev *pdev = dev_priv->drm.pdev;
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014711 u8 sr1;
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014712 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014713
Ville Syrjälä2b37c612014-01-22 21:32:38 +020014714 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
David Weinehall52a05c32016-08-22 13:32:44 +030014715 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
Jesse Barnes3fdcf432012-04-06 11:46:27 -070014716 outb(SR01, VGA_SR_INDEX);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014717 sr1 = inb(VGA_SR_DATA);
14718 outb(sr1 | 1<<5, VGA_SR_DATA);
David Weinehall52a05c32016-08-22 13:32:44 +030014719 vga_put(pdev, VGA_RSRC_LEGACY_IO);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014720 udelay(300);
14721
Ville Syrjälä01f5a622014-12-16 18:38:37 +020014722 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014723 POSTING_READ(vga_reg);
14724}
14725
Daniel Vetterf8175862012-04-10 15:50:11 +020014726void intel_modeset_init_hw(struct drm_device *dev)
14727{
Chris Wilsonfac5e232016-07-04 11:34:36 +010014728 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst1a617b72015-12-03 14:31:06 +010014729
Ville Syrjälä4c75b942016-10-31 22:37:12 +020014730 intel_update_cdclk(dev_priv);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +030014731 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020014732 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
Daniel Vetterf8175862012-04-10 15:50:11 +020014733}
14734
Matt Roperd93c0372015-12-03 11:37:41 -080014735/*
14736 * Calculate what we think the watermarks should be for the state we've read
14737 * out of the hardware and then immediately program those watermarks so that
14738 * we ensure the hardware settings match our internal state.
14739 *
14740 * We can calculate what we think WM's should be by creating a duplicate of the
14741 * current state (which was constructed during hardware readout) and running it
14742 * through the atomic check code to calculate new watermark values in the
14743 * state object.
14744 */
14745static void sanitize_watermarks(struct drm_device *dev)
14746{
14747 struct drm_i915_private *dev_priv = to_i915(dev);
14748 struct drm_atomic_state *state;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014749 struct intel_atomic_state *intel_state;
Matt Roperd93c0372015-12-03 11:37:41 -080014750 struct drm_crtc *crtc;
14751 struct drm_crtc_state *cstate;
14752 struct drm_modeset_acquire_ctx ctx;
14753 int ret;
14754 int i;
14755
14756 /* Only supported on platforms that use atomic watermark design */
Matt Ropered4a6a72016-02-23 17:20:13 -080014757 if (!dev_priv->display.optimize_watermarks)
Matt Roperd93c0372015-12-03 11:37:41 -080014758 return;
14759
14760 /*
14761 * We need to hold connection_mutex before calling duplicate_state so
14762 * that the connector loop is protected.
14763 */
14764 drm_modeset_acquire_init(&ctx, 0);
14765retry:
Matt Roper0cd12622016-01-12 07:13:37 -080014766 ret = drm_modeset_lock_all_ctx(dev, &ctx);
Matt Roperd93c0372015-12-03 11:37:41 -080014767 if (ret == -EDEADLK) {
14768 drm_modeset_backoff(&ctx);
14769 goto retry;
14770 } else if (WARN_ON(ret)) {
Matt Roper0cd12622016-01-12 07:13:37 -080014771 goto fail;
Matt Roperd93c0372015-12-03 11:37:41 -080014772 }
14773
14774 state = drm_atomic_helper_duplicate_state(dev, &ctx);
14775 if (WARN_ON(IS_ERR(state)))
Matt Roper0cd12622016-01-12 07:13:37 -080014776 goto fail;
Matt Roperd93c0372015-12-03 11:37:41 -080014777
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014778 intel_state = to_intel_atomic_state(state);
14779
Matt Ropered4a6a72016-02-23 17:20:13 -080014780 /*
14781 * Hardware readout is the only time we don't want to calculate
14782 * intermediate watermarks (since we don't trust the current
14783 * watermarks).
14784 */
Ville Syrjälä602ae832017-03-02 19:15:02 +020014785 if (!HAS_GMCH_DISPLAY(dev_priv))
14786 intel_state->skip_intermediate_wm = true;
Matt Ropered4a6a72016-02-23 17:20:13 -080014787
Matt Roperd93c0372015-12-03 11:37:41 -080014788 ret = intel_atomic_check(dev, state);
14789 if (ret) {
14790 /*
14791 * If we fail here, it means that the hardware appears to be
14792 * programmed in a way that shouldn't be possible, given our
14793 * understanding of watermark requirements. This might mean a
14794 * mistake in the hardware readout code or a mistake in the
14795 * watermark calculations for a given platform. Raise a WARN
14796 * so that this is noticeable.
14797 *
14798 * If this actually happens, we'll have to just leave the
14799 * BIOS-programmed watermarks untouched and hope for the best.
14800 */
14801 WARN(true, "Could not determine valid watermarks for inherited state\n");
Arnd Bergmannb9a1b712016-10-18 17:16:23 +020014802 goto put_state;
Matt Roperd93c0372015-12-03 11:37:41 -080014803 }
14804
14805 /* Write calculated watermark values back */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010014806 for_each_new_crtc_in_state(state, crtc, cstate, i) {
Matt Roperd93c0372015-12-03 11:37:41 -080014807 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
14808
Matt Ropered4a6a72016-02-23 17:20:13 -080014809 cs->wm.need_postvbl_update = true;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014810 dev_priv->display.optimize_watermarks(intel_state, cs);
Maarten Lankhorst556fe362017-11-10 12:34:53 +010014811
14812 to_intel_crtc_state(crtc->state)->wm = cs->wm;
Matt Roperd93c0372015-12-03 11:37:41 -080014813 }
14814
Arnd Bergmannb9a1b712016-10-18 17:16:23 +020014815put_state:
Chris Wilson08536952016-10-14 13:18:18 +010014816 drm_atomic_state_put(state);
Matt Roper0cd12622016-01-12 07:13:37 -080014817fail:
Matt Roperd93c0372015-12-03 11:37:41 -080014818 drm_modeset_drop_locks(&ctx);
14819 drm_modeset_acquire_fini(&ctx);
14820}
14821
Chris Wilson58ecd9d2017-11-05 13:49:05 +000014822static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
14823{
14824 if (IS_GEN5(dev_priv)) {
14825 u32 fdi_pll_clk =
14826 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
14827
14828 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
14829 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
14830 dev_priv->fdi_pll_freq = 270000;
14831 } else {
14832 return;
14833 }
14834
14835 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
14836}
14837
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014838int intel_modeset_init(struct drm_device *dev)
Jesse Barnes79e53942008-11-07 14:24:08 -080014839{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030014840 struct drm_i915_private *dev_priv = to_i915(dev);
14841 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Damien Lespiau8cc87b72014-03-03 17:31:44 +000014842 enum pipe pipe;
Jesse Barnes46f297f2014-03-07 08:57:48 -080014843 struct intel_crtc *crtc;
Jesse Barnes79e53942008-11-07 14:24:08 -080014844
Ville Syrjälä757fffc2017-11-13 15:36:22 +020014845 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
14846
Jesse Barnes79e53942008-11-07 14:24:08 -080014847 drm_mode_config_init(dev);
14848
14849 dev->mode_config.min_width = 0;
14850 dev->mode_config.min_height = 0;
14851
Dave Airlie019d96c2011-09-29 16:20:42 +010014852 dev->mode_config.preferred_depth = 24;
14853 dev->mode_config.prefer_shadow = 1;
14854
Tvrtko Ursulin25bab382015-02-10 17:16:16 +000014855 dev->mode_config.allow_fb_modifiers = true;
14856
Laurent Pincharte6ecefa2012-05-17 13:27:23 +020014857 dev->mode_config.funcs = &intel_mode_funcs;
Jesse Barnes79e53942008-11-07 14:24:08 -080014858
Andrea Arcangeli400c19d2017-04-07 01:23:45 +020014859 init_llist_head(&dev_priv->atomic_helper.free_list);
Chris Wilsoneb955ee2017-01-23 21:29:39 +000014860 INIT_WORK(&dev_priv->atomic_helper.free_work,
Chris Wilsonba318c62017-02-02 20:47:41 +000014861 intel_atomic_helper_free_state_worker);
Chris Wilsoneb955ee2017-01-23 21:29:39 +000014862
Jesse Barnesb690e962010-07-19 13:53:12 -070014863 intel_init_quirks(dev);
14864
Ville Syrjälä62d75df2016-10-31 22:37:25 +020014865 intel_init_pm(dev_priv);
Eugeni Dodonov1fa61102012-04-18 15:29:26 -030014866
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000014867 if (INTEL_INFO(dev_priv)->num_pipes == 0)
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014868 return 0;
Ben Widawskye3c74752013-04-05 13:12:39 -070014869
Lukas Wunner69f92f62015-07-15 13:57:35 +020014870 /*
14871 * There may be no VBT; and if the BIOS enabled SSC we can
14872 * just keep using it to avoid unnecessary flicker. Whereas if the
14873 * BIOS isn't using it, don't assume it will work even if the VBT
14874 * indicates as much.
14875 */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010014876 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
Lukas Wunner69f92f62015-07-15 13:57:35 +020014877 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14878 DREF_SSC1_ENABLE);
14879
14880 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
14881 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
14882 bios_lvds_use_ssc ? "en" : "dis",
14883 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
14884 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
14885 }
14886 }
14887
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014888 if (IS_GEN2(dev_priv)) {
Chris Wilsona6c45cf2010-09-17 00:32:17 +010014889 dev->mode_config.max_width = 2048;
14890 dev->mode_config.max_height = 2048;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014891 } else if (IS_GEN3(dev_priv)) {
Keith Packard5e4d6fa2009-07-12 23:53:17 -070014892 dev->mode_config.max_width = 4096;
14893 dev->mode_config.max_height = 4096;
Jesse Barnes79e53942008-11-07 14:24:08 -080014894 } else {
Chris Wilsona6c45cf2010-09-17 00:32:17 +010014895 dev->mode_config.max_width = 8192;
14896 dev->mode_config.max_height = 8192;
Jesse Barnes79e53942008-11-07 14:24:08 -080014897 }
Damien Lespiau068be562014-03-28 14:17:49 +000014898
Jani Nikula2a307c22016-11-30 17:43:04 +020014899 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14900 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
Ville Syrjälädc41c152014-08-13 11:57:05 +030014901 dev->mode_config.cursor_height = 1023;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014902 } else if (IS_GEN2(dev_priv)) {
Damien Lespiau068be562014-03-28 14:17:49 +000014903 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
14904 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
14905 } else {
14906 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
14907 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
14908 }
14909
Matthew Auld73ebd502017-12-11 15:18:20 +000014910 dev->mode_config.fb_base = ggtt->gmadr.start;
Jesse Barnes79e53942008-11-07 14:24:08 -080014911
Zhao Yakui28c97732009-10-09 11:39:41 +080014912 DRM_DEBUG_KMS("%d display pipe%s available.\n",
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000014913 INTEL_INFO(dev_priv)->num_pipes,
14914 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
Jesse Barnes79e53942008-11-07 14:24:08 -080014915
Damien Lespiau055e3932014-08-18 13:49:10 +010014916 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014917 int ret;
14918
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020014919 ret = intel_crtc_init(dev_priv, pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014920 if (ret) {
14921 drm_mode_config_cleanup(dev);
14922 return ret;
14923 }
Jesse Barnes79e53942008-11-07 14:24:08 -080014924 }
14925
Daniel Vettere72f9fb2013-06-05 13:34:06 +020014926 intel_shared_dpll_init(dev);
Chris Wilson58ecd9d2017-11-05 13:49:05 +000014927 intel_update_fdi_pll_freq(dev_priv);
Jesse Barnesee7b9f92012-04-20 17:11:53 +010014928
Ville Syrjälä5be6e332017-02-20 16:04:43 +020014929 intel_update_czclk(dev_priv);
14930 intel_modeset_init_hw(dev);
14931
Ville Syrjäläb2045352016-05-13 23:41:27 +030014932 if (dev_priv->max_cdclk_freq == 0)
Ville Syrjälä4c75b942016-10-31 22:37:12 +020014933 intel_update_max_cdclk(dev_priv);
Ville Syrjäläb2045352016-05-13 23:41:27 +030014934
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014935 /* Just disable it once at startup */
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000014936 i915_disable_vga(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014937 intel_setup_outputs(dev_priv);
Chris Wilson11be49e2012-11-15 11:32:20 +000014938
Daniel Vetter6e9f7982014-05-29 23:54:47 +020014939 drm_modeset_lock_all(dev);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030014940 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
Daniel Vetter6e9f7982014-05-29 23:54:47 +020014941 drm_modeset_unlock_all(dev);
Jesse Barnes46f297f2014-03-07 08:57:48 -080014942
Damien Lespiaud3fcc802014-05-13 23:32:22 +010014943 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorsteeebeac2015-07-14 12:33:29 +020014944 struct intel_initial_plane_config plane_config = {};
14945
Jesse Barnes46f297f2014-03-07 08:57:48 -080014946 if (!crtc->active)
14947 continue;
14948
Jesse Barnes46f297f2014-03-07 08:57:48 -080014949 /*
Jesse Barnes46f297f2014-03-07 08:57:48 -080014950 * Note that reserving the BIOS fb up front prevents us
14951 * from stuffing other stolen allocations like the ring
14952 * on top. This prevents some ugliness at boot time, and
14953 * can even allow for smooth boot transitions if the BIOS
14954 * fb is large enough for the active pipe configuration.
14955 */
Maarten Lankhorsteeebeac2015-07-14 12:33:29 +020014956 dev_priv->display.get_initial_plane_config(crtc,
14957 &plane_config);
14958
14959 /*
14960 * If the fb is shared between multiple heads, we'll
14961 * just get the first one.
14962 */
14963 intel_find_initial_plane_obj(crtc, &plane_config);
Jesse Barnes46f297f2014-03-07 08:57:48 -080014964 }
Matt Roperd93c0372015-12-03 11:37:41 -080014965
14966 /*
14967 * Make sure hardware watermarks really match the state we read out.
14968 * Note that we need to do this after reconstructing the BIOS fb's
14969 * since the watermark calculation done here will use pstate->fb.
14970 */
Ville Syrjälä602ae832017-03-02 19:15:02 +020014971 if (!HAS_GMCH_DISPLAY(dev_priv))
14972 sanitize_watermarks(dev);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014973
14974 return 0;
Chris Wilson2c7111d2011-03-29 10:40:27 +010014975}
Jesse Barnesd5bb0812011-01-05 12:01:26 -080014976
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030014977void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
14978{
Ville Syrjäläd5fb43c2017-11-29 17:37:31 +020014979 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030014980 /* 640x480@60Hz, ~25175 kHz */
14981 struct dpll clock = {
14982 .m1 = 18,
14983 .m2 = 7,
14984 .p1 = 13,
14985 .p2 = 4,
14986 .n = 2,
14987 };
14988 u32 dpll, fp;
14989 int i;
14990
14991 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
14992
14993 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
14994 pipe_name(pipe), clock.vco, clock.dot);
14995
14996 fp = i9xx_dpll_compute_fp(&clock);
14997 dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
14998 DPLL_VGA_MODE_DIS |
14999 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15000 PLL_P2_DIVIDE_BY_4 |
15001 PLL_REF_INPUT_DREFCLK |
15002 DPLL_VCO_ENABLE;
15003
15004 I915_WRITE(FP0(pipe), fp);
15005 I915_WRITE(FP1(pipe), fp);
15006
15007 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15008 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15009 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15010 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15011 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15012 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15013 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15014
15015 /*
15016 * Apparently we need to have VGA mode enabled prior to changing
15017 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15018 * dividers, even though the register value does change.
15019 */
15020 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15021 I915_WRITE(DPLL(pipe), dpll);
15022
15023 /* Wait for the clocks to stabilize. */
15024 POSTING_READ(DPLL(pipe));
15025 udelay(150);
15026
15027 /* The pixel multiplier can only be updated once the
15028 * DPLL is enabled and the clocks are stable.
15029 *
15030 * So write it again.
15031 */
15032 I915_WRITE(DPLL(pipe), dpll);
15033
15034 /* We do this three times for luck */
15035 for (i = 0; i < 3 ; i++) {
15036 I915_WRITE(DPLL(pipe), dpll);
15037 POSTING_READ(DPLL(pipe));
15038 udelay(150); /* wait for warmup */
15039 }
15040
15041 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15042 POSTING_READ(PIPECONF(pipe));
Ville Syrjäläd5fb43c2017-11-29 17:37:31 +020015043
15044 intel_wait_for_pipe_scanline_moving(crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015045}
15046
15047void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15048{
Ville Syrjälä8fedd642017-11-29 17:37:30 +020015049 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15050
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015051 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15052 pipe_name(pipe));
15053
Ville Syrjälä5816d9c2017-11-29 14:54:11 +020015054 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15055 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15056 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15057 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
15058 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015059
15060 I915_WRITE(PIPECONF(pipe), 0);
15061 POSTING_READ(PIPECONF(pipe));
15062
Ville Syrjälä8fedd642017-11-29 17:37:30 +020015063 intel_wait_for_pipe_scanline_stopped(crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015064
15065 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15066 POSTING_READ(DPLL(pipe));
15067}
15068
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015069static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
Ville Syrjäläed150302017-11-17 21:19:10 +020015070 struct intel_plane *plane)
Daniel Vetterfa555832012-10-10 23:14:00 +020015071{
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000015072 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläed150302017-11-17 21:19:10 +020015073 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
15074 u32 val = I915_READ(DSPCNTR(i9xx_plane));
Daniel Vetterfa555832012-10-10 23:14:00 +020015075
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015076 return (val & DISPLAY_PLANE_ENABLE) == 0 ||
15077 (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
15078}
Daniel Vetterfa555832012-10-10 23:14:00 +020015079
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015080static void
15081intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15082{
15083 struct intel_crtc *crtc;
Daniel Vetterfa555832012-10-10 23:14:00 +020015084
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015085 if (INTEL_GEN(dev_priv) >= 4)
15086 return;
Daniel Vetterfa555832012-10-10 23:14:00 +020015087
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015088 for_each_intel_crtc(&dev_priv->drm, crtc) {
15089 struct intel_plane *plane =
15090 to_intel_plane(crtc->base.primary);
15091
15092 if (intel_plane_mapping_ok(crtc, plane))
15093 continue;
15094
15095 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
15096 plane->base.name);
15097 intel_plane_disable_noatomic(crtc, plane);
15098 }
Daniel Vetterfa555832012-10-10 23:14:00 +020015099}
15100
Ville Syrjälä02e93c32015-08-26 19:39:19 +030015101static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15102{
15103 struct drm_device *dev = crtc->base.dev;
15104 struct intel_encoder *encoder;
15105
15106 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15107 return true;
15108
15109 return false;
15110}
15111
Maarten Lankhorst496b0fc2016-08-23 16:18:07 +020015112static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15113{
15114 struct drm_device *dev = encoder->base.dev;
15115 struct intel_connector *connector;
15116
15117 for_each_connector_on_encoder(dev, &encoder->base, connector)
15118 return connector;
15119
15120 return NULL;
15121}
15122
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015123static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015124 enum pipe pch_transcoder)
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015125{
15126 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015127 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015128}
15129
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015130static void intel_sanitize_crtc(struct intel_crtc *crtc,
15131 struct drm_modeset_acquire_ctx *ctx)
Daniel Vetter24929352012-07-02 20:28:59 +020015132{
15133 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010015134 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikula4d1de972016-03-18 17:05:42 +020015135 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
Daniel Vetter24929352012-07-02 20:28:59 +020015136
Daniel Vetter24929352012-07-02 20:28:59 +020015137 /* Clear any frame start delays used for debugging left by the BIOS */
Ville Syrjälä738a8142017-11-15 22:04:42 +020015138 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
Jani Nikula4d1de972016-03-18 17:05:42 +020015139 i915_reg_t reg = PIPECONF(cpu_transcoder);
15140
15141 I915_WRITE(reg,
15142 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15143 }
Daniel Vetter24929352012-07-02 20:28:59 +020015144
Ville Syrjäläd3eaf882014-05-20 17:20:05 +030015145 /* restore vblank interrupts to correct state */
Daniel Vetter96256042015-02-13 21:03:42 +010015146 drm_crtc_vblank_reset(&crtc->base);
Ville Syrjäläd297e102014-08-06 14:50:01 +030015147 if (crtc->active) {
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015148 struct intel_plane *plane;
15149
Daniel Vetter96256042015-02-13 21:03:42 +010015150 drm_crtc_vblank_on(&crtc->base);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015151
15152 /* Disable everything but the primary plane */
15153 for_each_intel_plane_on_crtc(dev, crtc, plane) {
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015154 const struct intel_plane_state *plane_state =
15155 to_intel_plane_state(plane->base.state);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015156
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015157 if (plane_state->base.visible &&
15158 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15159 intel_plane_disable_noatomic(crtc, plane);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015160 }
Daniel Vetter96256042015-02-13 21:03:42 +010015161 }
Ville Syrjäläd3eaf882014-05-20 17:20:05 +030015162
Daniel Vetter24929352012-07-02 20:28:59 +020015163 /* Adjust the state of the output pipe according to whether we
15164 * have active connectors/encoders. */
Maarten Lankhorst842e0302016-03-02 15:48:01 +010015165 if (crtc->active && !intel_crtc_has_encoders(crtc))
Ville Syrjäläda1d0e22017-06-01 17:36:14 +030015166 intel_crtc_disable_noatomic(&crtc->base, ctx);
Daniel Vetter24929352012-07-02 20:28:59 +020015167
Tvrtko Ursulin49cff962016-10-13 11:02:54 +010015168 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
Daniel Vetter4cc31482014-03-24 00:01:41 +010015169 /*
15170 * We start out with underrun reporting disabled to avoid races.
15171 * For correct bookkeeping mark this on active crtcs.
15172 *
Daniel Vetterc5ab3bc2014-05-14 15:40:34 +020015173 * Also on gmch platforms we dont have any hardware bits to
15174 * disable the underrun reporting. Which means we need to start
15175 * out with underrun reporting disabled also on inactive pipes,
15176 * since otherwise we'll complain about the garbage we read when
15177 * e.g. coming up after runtime pm.
15178 *
Daniel Vetter4cc31482014-03-24 00:01:41 +010015179 * No protection against concurrent access is required - at
15180 * worst a fifo underrun happens which also sets this to false.
15181 */
15182 crtc->cpu_fifo_underrun_disabled = true;
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015183 /*
15184 * We track the PCH trancoder underrun reporting state
15185 * within the crtc. With crtc for pipe A housing the underrun
15186 * reporting state for PCH transcoder A, crtc for pipe B housing
15187 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15188 * and marking underrun reporting as disabled for the non-existing
15189 * PCH transcoders B and C would prevent enabling the south
15190 * error interrupt (see cpt_can_enable_serr_int()).
15191 */
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015192 if (has_pch_trancoder(dev_priv, crtc->pipe))
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015193 crtc->pch_fifo_underrun_disabled = true;
Daniel Vetter4cc31482014-03-24 00:01:41 +010015194 }
Daniel Vetter24929352012-07-02 20:28:59 +020015195}
15196
15197static void intel_sanitize_encoder(struct intel_encoder *encoder)
15198{
15199 struct intel_connector *connector;
Daniel Vetter24929352012-07-02 20:28:59 +020015200
15201 /* We need to check both for a crtc link (meaning that the
15202 * encoder is active and trying to read from a pipe) and the
15203 * pipe itself being active. */
15204 bool has_active_crtc = encoder->base.crtc &&
15205 to_intel_crtc(encoder->base.crtc)->active;
15206
Maarten Lankhorst496b0fc2016-08-23 16:18:07 +020015207 connector = intel_encoder_find_connector(encoder);
15208 if (connector && !has_active_crtc) {
Daniel Vetter24929352012-07-02 20:28:59 +020015209 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15210 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030015211 encoder->base.name);
Daniel Vetter24929352012-07-02 20:28:59 +020015212
15213 /* Connector is active, but has no active pipe. This is
15214 * fallout from our resume register restoring. Disable
15215 * the encoder manually again. */
15216 if (encoder->base.crtc) {
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015217 struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15218
Daniel Vetter24929352012-07-02 20:28:59 +020015219 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15220 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030015221 encoder->base.name);
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015222 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
Ville Syrjäläa62d1492014-06-28 02:04:01 +030015223 if (encoder->post_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015224 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
Daniel Vetter24929352012-07-02 20:28:59 +020015225 }
Egbert Eich7f1950f2014-04-25 10:56:22 +020015226 encoder->base.crtc = NULL;
Daniel Vetter24929352012-07-02 20:28:59 +020015227
15228 /* Inconsistent output/port/pipe state happens presumably due to
15229 * a bug in one of the get_hw_state functions. Or someplace else
15230 * in our code, like the register restore mess on resume. Clamp
15231 * things to off as a safer default. */
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015232
15233 connector->base.dpms = DRM_MODE_DPMS_OFF;
15234 connector->base.encoder = NULL;
Daniel Vetter24929352012-07-02 20:28:59 +020015235 }
Daniel Vetter24929352012-07-02 20:28:59 +020015236}
15237
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015238void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015239{
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010015240 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015241
Imre Deak04098752014-02-18 00:02:16 +020015242 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15243 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015244 i915_disable_vga(dev_priv);
Imre Deak04098752014-02-18 00:02:16 +020015245 }
15246}
15247
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015248void i915_redisable_vga(struct drm_i915_private *dev_priv)
Imre Deak04098752014-02-18 00:02:16 +020015249{
Paulo Zanoni8dc8a272013-08-02 16:22:24 -030015250 /* This function can be called both from intel_modeset_setup_hw_state or
15251 * at a very early point in our resume sequence, where the power well
15252 * structures are not yet restored. Since this function is at a very
15253 * paranoid "someone might have enabled VGA while we were not looking"
15254 * level, just check if the power well is enabled instead of trying to
15255 * follow the "don't touch the power well if we don't need it" policy
15256 * the rest of the driver uses. */
Imre Deak6392f842016-02-12 18:55:13 +020015257 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
Paulo Zanoni8dc8a272013-08-02 16:22:24 -030015258 return;
15259
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015260 i915_redisable_vga_power_on(dev_priv);
Imre Deak6392f842016-02-12 18:55:13 +020015261
15262 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015263}
15264
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015265/* FIXME read out full plane state for all planes */
15266static void readout_plane_state(struct intel_crtc *crtc)
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +020015267{
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015268 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15269 struct intel_crtc_state *crtc_state =
15270 to_intel_crtc_state(crtc->base.state);
15271 struct intel_plane *plane;
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +020015272
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015273 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
15274 struct intel_plane_state *plane_state =
15275 to_intel_plane_state(plane->base.state);
15276 bool visible = plane->get_hw_state(plane);
Maarten Lankhorstb26d3ea2015-09-23 16:11:41 +020015277
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015278 intel_set_plane_visible(crtc_state, plane_state, visible);
15279 }
Ville Syrjälä98ec7732014-04-30 17:43:01 +030015280}
15281
Daniel Vetter30e984d2013-06-05 13:34:17 +020015282static void intel_modeset_readout_hw_state(struct drm_device *dev)
Daniel Vetter24929352012-07-02 20:28:59 +020015283{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015284 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015285 enum pipe pipe;
Daniel Vetter24929352012-07-02 20:28:59 +020015286 struct intel_crtc *crtc;
15287 struct intel_encoder *encoder;
15288 struct intel_connector *connector;
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015289 struct drm_connector_list_iter conn_iter;
Daniel Vetter53589012013-06-05 13:34:16 +020015290 int i;
Daniel Vetter24929352012-07-02 20:28:59 +020015291
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015292 dev_priv->active_crtcs = 0;
15293
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015294 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015295 struct intel_crtc_state *crtc_state =
15296 to_intel_crtc_state(crtc->base.state);
Daniel Vetter3b117c82013-04-17 20:15:07 +020015297
Daniel Vetterec2dc6a2016-05-09 16:34:09 +020015298 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015299 memset(crtc_state, 0, sizeof(*crtc_state));
15300 crtc_state->base.crtc = &crtc->base;
Daniel Vetter24929352012-07-02 20:28:59 +020015301
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015302 crtc_state->base.active = crtc_state->base.enable =
15303 dev_priv->display.get_pipe_config(crtc, crtc_state);
15304
15305 crtc->base.enabled = crtc_state->base.enable;
15306 crtc->active = crtc_state->base.active;
15307
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015308 if (crtc_state->base.active)
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015309 dev_priv->active_crtcs |= 1 << crtc->pipe;
15310
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015311 readout_plane_state(crtc);
Daniel Vetter24929352012-07-02 20:28:59 +020015312
Ville Syrjälä78108b72016-05-27 20:59:19 +030015313 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15314 crtc->base.base.id, crtc->base.name,
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015315 enableddisabled(crtc_state->base.active));
Daniel Vetter24929352012-07-02 20:28:59 +020015316 }
15317
Daniel Vetter53589012013-06-05 13:34:16 +020015318 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15319 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15320
Lucas De Marchiee1398b2018-03-20 15:06:33 -070015321 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15322 &pll->state.hw_state);
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015323 pll->state.crtc_mask = 0;
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015324 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015325 struct intel_crtc_state *crtc_state =
15326 to_intel_crtc_state(crtc->base.state);
15327
15328 if (crtc_state->base.active &&
15329 crtc_state->shared_dpll == pll)
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015330 pll->state.crtc_mask |= 1 << crtc->pipe;
Daniel Vetter53589012013-06-05 13:34:16 +020015331 }
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015332 pll->active_mask = pll->state.crtc_mask;
Daniel Vetter53589012013-06-05 13:34:16 +020015333
Ander Conselvan de Oliveira1e6f2dd2014-10-29 11:32:31 +020015334 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
Lucas De Marchi72f775f2018-03-20 15:06:34 -070015335 pll->info->name, pll->state.crtc_mask, pll->on);
Daniel Vetter53589012013-06-05 13:34:16 +020015336 }
15337
Damien Lespiaub2784e12014-08-05 11:29:37 +010015338 for_each_intel_encoder(dev, encoder) {
Daniel Vetter24929352012-07-02 20:28:59 +020015339 pipe = 0;
15340
15341 if (encoder->get_hw_state(encoder, &pipe)) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015342 struct intel_crtc_state *crtc_state;
15343
Ville Syrjälä98187832016-10-31 22:37:10 +020015344 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015345 crtc_state = to_intel_crtc_state(crtc->base.state);
Ville Syrjäläe2af48c2016-10-31 22:37:05 +020015346
Jesse Barnes045ac3b2013-05-14 17:08:26 -070015347 encoder->base.crtc = &crtc->base;
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015348 encoder->get_config(encoder, crtc_state);
Daniel Vetter24929352012-07-02 20:28:59 +020015349 } else {
15350 encoder->base.crtc = NULL;
15351 }
15352
Damien Lespiau6f2bcce2013-10-16 12:29:54 +010015353 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000015354 encoder->base.base.id, encoder->base.name,
15355 enableddisabled(encoder->base.crtc),
Damien Lespiau6f2bcce2013-10-16 12:29:54 +010015356 pipe_name(pipe));
Daniel Vetter24929352012-07-02 20:28:59 +020015357 }
15358
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015359 drm_connector_list_iter_begin(dev, &conn_iter);
15360 for_each_intel_connector_iter(connector, &conn_iter) {
Daniel Vetter24929352012-07-02 20:28:59 +020015361 if (connector->get_hw_state(connector)) {
15362 connector->base.dpms = DRM_MODE_DPMS_ON;
Maarten Lankhorst2aa974c2016-01-06 14:53:25 +010015363
15364 encoder = connector->encoder;
15365 connector->base.encoder = &encoder->base;
15366
15367 if (encoder->base.crtc &&
15368 encoder->base.crtc->state->active) {
15369 /*
15370 * This has to be done during hardware readout
15371 * because anything calling .crtc_disable may
15372 * rely on the connector_mask being accurate.
15373 */
15374 encoder->base.crtc->state->connector_mask |=
15375 1 << drm_connector_index(&connector->base);
Maarten Lankhorste87a52b2016-01-28 15:04:58 +010015376 encoder->base.crtc->state->encoder_mask |=
15377 1 << drm_encoder_index(&encoder->base);
Maarten Lankhorst2aa974c2016-01-06 14:53:25 +010015378 }
15379
Daniel Vetter24929352012-07-02 20:28:59 +020015380 } else {
15381 connector->base.dpms = DRM_MODE_DPMS_OFF;
15382 connector->base.encoder = NULL;
15383 }
15384 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000015385 connector->base.base.id, connector->base.name,
15386 enableddisabled(connector->base.encoder));
Daniel Vetter24929352012-07-02 20:28:59 +020015387 }
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015388 drm_connector_list_iter_end(&conn_iter);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015389
15390 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015391 struct intel_crtc_state *crtc_state =
15392 to_intel_crtc_state(crtc->base.state);
Ville Syrjäläd305e062017-08-30 21:57:03 +030015393 int min_cdclk = 0;
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015394
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015395 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015396 if (crtc_state->base.active) {
15397 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
Ville Syrjäläbd4cd032018-04-26 19:30:15 +030015398 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15399 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015400 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015401 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15402
15403 /*
15404 * The initial mode needs to be set in order to keep
15405 * the atomic core happy. It wants a valid mode if the
15406 * crtc's enabled, so we do the above call.
15407 *
Daniel Vetter7800fb62016-12-19 09:24:23 +010015408 * But we don't set all the derived state fully, hence
15409 * set a flag to indicate that a full recalculation is
15410 * needed on the next commit.
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015411 */
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015412 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
Ville Syrjälä9eca68322015-09-10 18:59:10 +030015413
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020015414 intel_crtc_compute_pixel_rate(crtc_state);
15415
Ville Syrjälä9c61de42017-07-10 22:33:47 +030015416 if (dev_priv->display.modeset_calc_cdclk) {
Ville Syrjäläd305e062017-08-30 21:57:03 +030015417 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
Ville Syrjälä9c61de42017-07-10 22:33:47 +030015418 if (WARN_ON(min_cdclk < 0))
15419 min_cdclk = 0;
15420 }
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015421
Daniel Vetter5caa0fe2017-05-09 16:03:29 +020015422 drm_calc_timestamping_constants(&crtc->base,
15423 &crtc_state->base.adjusted_mode);
Ville Syrjälä9eca68322015-09-10 18:59:10 +030015424 update_scanline_offset(crtc);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015425 }
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020015426
Ville Syrjäläd305e062017-08-30 21:57:03 +030015427 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030015428 dev_priv->min_voltage_level[crtc->pipe] =
15429 crtc_state->min_voltage_level;
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015430
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015431 intel_pipe_config_sanity_check(dev_priv, crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015432 }
Daniel Vetter30e984d2013-06-05 13:34:17 +020015433}
15434
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015435static void
15436get_encoder_power_domains(struct drm_i915_private *dev_priv)
15437{
15438 struct intel_encoder *encoder;
15439
15440 for_each_intel_encoder(&dev_priv->drm, encoder) {
15441 u64 get_domains;
15442 enum intel_display_power_domain domain;
15443
15444 if (!encoder->get_power_domains)
15445 continue;
15446
15447 get_domains = encoder->get_power_domains(encoder);
15448 for_each_power_domain(domain, get_domains)
15449 intel_display_power_get(dev_priv, domain);
15450 }
15451}
15452
Rodrigo Vividf49ec82017-11-10 16:03:19 -080015453static void intel_early_display_was(struct drm_i915_private *dev_priv)
15454{
15455 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15456 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15457 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15458 DARBF_GATING_DIS);
15459
15460 if (IS_HASWELL(dev_priv)) {
15461 /*
15462 * WaRsPkgCStateDisplayPMReq:hsw
15463 * System hang if this isn't done before disabling all planes!
15464 */
15465 I915_WRITE(CHICKEN_PAR1_1,
15466 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15467 }
15468}
15469
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015470/* Scan out the current hw modeset state,
15471 * and sanitizes it to the current state
15472 */
15473static void
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015474intel_modeset_setup_hw_state(struct drm_device *dev,
15475 struct drm_modeset_acquire_ctx *ctx)
Daniel Vetter30e984d2013-06-05 13:34:17 +020015476{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015477 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter30e984d2013-06-05 13:34:17 +020015478 enum pipe pipe;
Daniel Vetter30e984d2013-06-05 13:34:17 +020015479 struct intel_crtc *crtc;
15480 struct intel_encoder *encoder;
Daniel Vetter35c95372013-07-17 06:55:04 +020015481 int i;
Daniel Vetter30e984d2013-06-05 13:34:17 +020015482
Rodrigo Vividf49ec82017-11-10 16:03:19 -080015483 intel_early_display_was(dev_priv);
Daniel Vetter30e984d2013-06-05 13:34:17 +020015484 intel_modeset_readout_hw_state(dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015485
15486 /* HW state is read out, now we need to sanitize this mess. */
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015487 get_encoder_power_domains(dev_priv);
15488
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015489 intel_sanitize_plane_mapping(dev_priv);
15490
Damien Lespiaub2784e12014-08-05 11:29:37 +010015491 for_each_intel_encoder(dev, encoder) {
Daniel Vetter24929352012-07-02 20:28:59 +020015492 intel_sanitize_encoder(encoder);
15493 }
15494
Damien Lespiau055e3932014-08-18 13:49:10 +010015495 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä98187832016-10-31 22:37:10 +020015496 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjäläe2af48c2016-10-31 22:37:05 +020015497
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015498 intel_sanitize_crtc(crtc, ctx);
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +020015499 intel_dump_pipe_config(crtc, crtc->config,
15500 "[setup_hw_state]");
Daniel Vetter24929352012-07-02 20:28:59 +020015501 }
Daniel Vetter9a935852012-07-05 22:34:27 +020015502
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020015503 intel_modeset_update_connector_atomic_state(dev);
15504
Daniel Vetter35c95372013-07-17 06:55:04 +020015505 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15506 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15507
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +010015508 if (!pll->on || pll->active_mask)
Daniel Vetter35c95372013-07-17 06:55:04 +020015509 continue;
15510
Lucas De Marchi72f775f2018-03-20 15:06:34 -070015511 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15512 pll->info->name);
Daniel Vetter35c95372013-07-17 06:55:04 +020015513
Lucas De Marchiee1398b2018-03-20 15:06:33 -070015514 pll->info->funcs->disable(dev_priv, pll);
Daniel Vetter35c95372013-07-17 06:55:04 +020015515 pll->on = false;
15516 }
15517
Ville Syrjälä04548cb2017-04-21 21:14:29 +030015518 if (IS_G4X(dev_priv)) {
15519 g4x_wm_get_hw_state(dev);
15520 g4x_wm_sanitize(dev_priv);
15521 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä6eb1a682015-06-24 22:00:03 +030015522 vlv_wm_get_hw_state(dev);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015523 vlv_wm_sanitize(dev_priv);
Rodrigo Vivia029fa42017-08-09 13:52:48 -070015524 } else if (INTEL_GEN(dev_priv) >= 9) {
Pradeep Bhat30789992014-11-04 17:06:45 +000015525 skl_wm_get_hw_state(dev);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015526 } else if (HAS_PCH_SPLIT(dev_priv)) {
Ville Syrjälä243e6a42013-10-14 14:55:24 +030015527 ilk_wm_get_hw_state(dev);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015528 }
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015529
15530 for_each_intel_crtc(dev, crtc) {
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +020015531 u64 put_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015532
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +010015533 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015534 if (WARN_ON(put_domains))
15535 modeset_put_power_domains(dev_priv, put_domains);
15536 }
15537 intel_display_set_init_power(dev_priv, false);
Paulo Zanoni010cf732016-01-19 11:35:48 -020015538
Imre Deak8d8c3862017-02-17 17:39:46 +020015539 intel_power_domains_verify_state(dev_priv);
15540
Paulo Zanoni010cf732016-01-19 11:35:48 -020015541 intel_fbc_init_pipe_state(dev_priv);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015542}
Ville Syrjälä7d0bc1e2013-09-16 17:38:33 +030015543
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015544void intel_display_resume(struct drm_device *dev)
15545{
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015546 struct drm_i915_private *dev_priv = to_i915(dev);
15547 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15548 struct drm_modeset_acquire_ctx ctx;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015549 int ret;
Daniel Vetterf30da182013-04-11 20:22:50 +020015550
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015551 dev_priv->modeset_restore_state = NULL;
Maarten Lankhorst73974892016-08-05 23:28:27 +030015552 if (state)
15553 state->acquire_ctx = &ctx;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015554
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015555 drm_modeset_acquire_init(&ctx, 0);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015556
Maarten Lankhorst73974892016-08-05 23:28:27 +030015557 while (1) {
15558 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15559 if (ret != -EDEADLK)
15560 break;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015561
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015562 drm_modeset_backoff(&ctx);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015563 }
15564
Maarten Lankhorst73974892016-08-05 23:28:27 +030015565 if (!ret)
Maarten Lankhorst581e49f2017-01-16 10:37:38 +010015566 ret = __intel_display_resume(dev, state, &ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +030015567
Kumar, Mahesh2503a0f2017-08-17 19:15:28 +053015568 intel_enable_ipc(dev_priv);
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015569 drm_modeset_drop_locks(&ctx);
15570 drm_modeset_acquire_fini(&ctx);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015571
Chris Wilson08536952016-10-14 13:18:18 +010015572 if (ret)
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015573 DRM_ERROR("Restoring old state failed with %i\n", ret);
Chris Wilson3c5e37f2017-01-15 12:58:25 +000015574 if (state)
15575 drm_atomic_state_put(state);
Chris Wilson2c7111d2011-03-29 10:40:27 +010015576}
15577
Chris Wilson1ebaa0b2016-06-24 14:00:15 +010015578int intel_connector_register(struct drm_connector *connector)
15579{
15580 struct intel_connector *intel_connector = to_intel_connector(connector);
15581 int ret;
15582
15583 ret = intel_backlight_device_register(intel_connector);
15584 if (ret)
15585 goto err;
15586
15587 return 0;
15588
15589err:
15590 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080015591}
15592
Chris Wilsonc191eca2016-06-17 11:40:33 +010015593void intel_connector_unregister(struct drm_connector *connector)
Imre Deak4932e2c2014-02-11 17:12:48 +020015594{
Chris Wilsone63d87c2016-06-17 11:40:34 +010015595 struct intel_connector *intel_connector = to_intel_connector(connector);
Imre Deak4932e2c2014-02-11 17:12:48 +020015596
Chris Wilsone63d87c2016-06-17 11:40:34 +010015597 intel_backlight_device_unregister(intel_connector);
Imre Deak4932e2c2014-02-11 17:12:48 +020015598 intel_panel_destroy_backlight(connector);
Imre Deak4932e2c2014-02-11 17:12:48 +020015599}
15600
Manasi Navare886c6b82017-10-26 14:52:00 -070015601static void intel_hpd_poll_fini(struct drm_device *dev)
15602{
15603 struct intel_connector *connector;
15604 struct drm_connector_list_iter conn_iter;
15605
Chris Wilson448aa912017-11-28 11:01:47 +000015606 /* Kill all the work that may have been queued by hpd. */
Manasi Navare886c6b82017-10-26 14:52:00 -070015607 drm_connector_list_iter_begin(dev, &conn_iter);
15608 for_each_intel_connector_iter(connector, &conn_iter) {
15609 if (connector->modeset_retry_work.func)
15610 cancel_work_sync(&connector->modeset_retry_work);
Sean Paulee5e5e72018-01-08 14:55:39 -050015611 if (connector->hdcp_shim) {
15612 cancel_delayed_work_sync(&connector->hdcp_check_work);
15613 cancel_work_sync(&connector->hdcp_prop_work);
15614 }
Manasi Navare886c6b82017-10-26 14:52:00 -070015615 }
15616 drm_connector_list_iter_end(&conn_iter);
15617}
15618
Jesse Barnes79e53942008-11-07 14:24:08 -080015619void intel_modeset_cleanup(struct drm_device *dev)
15620{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015621 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes652c3932009-08-17 13:31:43 -070015622
Chris Wilsoneb955ee2017-01-23 21:29:39 +000015623 flush_work(&dev_priv->atomic_helper.free_work);
15624 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
15625
Chris Wilsondc979972016-05-10 14:10:04 +010015626 intel_disable_gt_powersave(dev_priv);
Imre Deak2eb52522014-11-19 15:30:05 +020015627
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015628 /*
15629 * Interrupts and polling as the first thing to avoid creating havoc.
Imre Deak2eb52522014-11-19 15:30:05 +020015630 * Too much stuff here (turning of connectors, ...) would
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015631 * experience fancy races otherwise.
15632 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +020015633 intel_irq_uninstall(dev_priv);
Jesse Barneseb21b922014-06-20 11:57:33 -070015634
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015635 /*
15636 * Due to the hpd irq storm handling the hotplug work can re-arm the
15637 * poll handlers. Hence disable polling after hpd handling is shut down.
15638 */
Manasi Navare886c6b82017-10-26 14:52:00 -070015639 intel_hpd_poll_fini(dev);
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015640
Daniel Vetter4f256d82017-07-15 00:46:55 +020015641 /* poll work can call into fbdev, hence clean that up afterwards */
15642 intel_fbdev_fini(dev_priv);
15643
Jesse Barnes723bfd72010-10-07 16:01:13 -070015644 intel_unregister_dsm_handler();
15645
Paulo Zanonic937ab3e52016-01-19 11:35:46 -020015646 intel_fbc_global_disable(dev_priv);
Kristian Høgsberg69341a52009-11-11 12:19:17 -050015647
Chris Wilson1630fe72011-07-08 12:22:42 +010015648 /* flush any delayed tasks or pending work */
15649 flush_scheduled_work();
15650
Jesse Barnes79e53942008-11-07 14:24:08 -080015651 drm_mode_config_cleanup(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +010015652
Chris Wilson1ee8da62016-05-12 12:43:23 +010015653 intel_cleanup_overlay(dev_priv);
Imre Deakae484342014-03-31 15:10:44 +030015654
Chris Wilsondc979972016-05-10 14:10:04 +010015655 intel_cleanup_gt_powersave(dev_priv);
Daniel Vetterf5949142016-01-13 11:55:28 +010015656
Tvrtko Ursulin40196442016-12-01 14:16:42 +000015657 intel_teardown_gmbus(dev_priv);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020015658
15659 destroy_workqueue(dev_priv->modeset_wq);
Jesse Barnes79e53942008-11-07 14:24:08 -080015660}
15661
Chris Wilsondf0e9242010-09-09 16:20:55 +010015662void intel_connector_attach_encoder(struct intel_connector *connector,
15663 struct intel_encoder *encoder)
15664{
15665 connector->encoder = encoder;
15666 drm_mode_connector_attach_encoder(&connector->base,
15667 &encoder->base);
Jesse Barnes79e53942008-11-07 14:24:08 -080015668}
Dave Airlie28d52042009-09-21 14:33:58 +100015669
15670/*
15671 * set vga decode state - true == enable VGA decode
15672 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000015673int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
Dave Airlie28d52042009-09-21 14:33:58 +100015674{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000015675 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
Dave Airlie28d52042009-09-21 14:33:58 +100015676 u16 gmch_ctrl;
15677
Chris Wilson75fa0412014-02-07 18:37:02 -020015678 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15679 DRM_ERROR("failed to read control word\n");
15680 return -EIO;
15681 }
15682
Chris Wilsonc0cc8a52014-02-07 18:37:03 -020015683 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15684 return 0;
15685
Dave Airlie28d52042009-09-21 14:33:58 +100015686 if (state)
15687 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15688 else
15689 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
Chris Wilson75fa0412014-02-07 18:37:02 -020015690
15691 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15692 DRM_ERROR("failed to write control word\n");
15693 return -EIO;
15694 }
15695
Dave Airlie28d52042009-09-21 14:33:58 +100015696 return 0;
15697}
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015698
Chris Wilson98a2f412016-10-12 10:05:18 +010015699#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
15700
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015701struct intel_display_error_state {
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030015702
15703 u32 power_well_driver;
15704
Chris Wilson63b66e52013-08-08 15:12:06 +020015705 int num_transcoders;
15706
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015707 struct intel_cursor_error_state {
15708 u32 control;
15709 u32 position;
15710 u32 base;
15711 u32 size;
Damien Lespiau52331302012-08-15 19:23:25 +010015712 } cursor[I915_MAX_PIPES];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015713
15714 struct intel_pipe_error_state {
Imre Deakddf9c532013-11-27 22:02:02 +020015715 bool power_domain_on;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015716 u32 source;
Imre Deakf301b1e12014-04-18 15:55:04 +030015717 u32 stat;
Damien Lespiau52331302012-08-15 19:23:25 +010015718 } pipe[I915_MAX_PIPES];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015719
15720 struct intel_plane_error_state {
15721 u32 control;
15722 u32 stride;
15723 u32 size;
15724 u32 pos;
15725 u32 addr;
15726 u32 surface;
15727 u32 tile_offset;
Damien Lespiau52331302012-08-15 19:23:25 +010015728 } plane[I915_MAX_PIPES];
Chris Wilson63b66e52013-08-08 15:12:06 +020015729
15730 struct intel_transcoder_error_state {
Imre Deakddf9c532013-11-27 22:02:02 +020015731 bool power_domain_on;
Chris Wilson63b66e52013-08-08 15:12:06 +020015732 enum transcoder cpu_transcoder;
15733
15734 u32 conf;
15735
15736 u32 htotal;
15737 u32 hblank;
15738 u32 hsync;
15739 u32 vtotal;
15740 u32 vblank;
15741 u32 vsync;
15742 } transcoder[4];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015743};
15744
15745struct intel_display_error_state *
Chris Wilsonc0336662016-05-06 15:40:21 +010015746intel_display_capture_error_state(struct drm_i915_private *dev_priv)
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015747{
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015748 struct intel_display_error_state *error;
Chris Wilson63b66e52013-08-08 15:12:06 +020015749 int transcoders[] = {
15750 TRANSCODER_A,
15751 TRANSCODER_B,
15752 TRANSCODER_C,
15753 TRANSCODER_EDP,
15754 };
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015755 int i;
15756
Chris Wilsonc0336662016-05-06 15:40:21 +010015757 if (INTEL_INFO(dev_priv)->num_pipes == 0)
Chris Wilson63b66e52013-08-08 15:12:06 +020015758 return NULL;
15759
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020015760 error = kzalloc(sizeof(*error), GFP_ATOMIC);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015761 if (error == NULL)
15762 return NULL;
15763
Chris Wilsonc0336662016-05-06 15:40:21 +010015764 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Imre Deak9c3a16c2017-08-14 18:15:30 +030015765 error->power_well_driver =
15766 I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL));
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030015767
Damien Lespiau055e3932014-08-18 13:49:10 +010015768 for_each_pipe(dev_priv, i) {
Imre Deakddf9c532013-11-27 22:02:02 +020015769 error->pipe[i].power_domain_on =
Daniel Vetterf458ebb2014-09-30 10:56:39 +020015770 __intel_display_power_is_enabled(dev_priv,
15771 POWER_DOMAIN_PIPE(i));
Imre Deakddf9c532013-11-27 22:02:02 +020015772 if (!error->pipe[i].power_domain_on)
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020015773 continue;
15774
Ville Syrjälä5efb3e22014-04-09 13:28:53 +030015775 error->cursor[i].control = I915_READ(CURCNTR(i));
15776 error->cursor[i].position = I915_READ(CURPOS(i));
15777 error->cursor[i].base = I915_READ(CURBASE(i));
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015778
15779 error->plane[i].control = I915_READ(DSPCNTR(i));
15780 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
Chris Wilsonc0336662016-05-06 15:40:21 +010015781 if (INTEL_GEN(dev_priv) <= 3) {
Paulo Zanoni51889b32013-03-06 20:03:13 -030015782 error->plane[i].size = I915_READ(DSPSIZE(i));
Paulo Zanoni80ca3782013-03-22 14:20:57 -030015783 error->plane[i].pos = I915_READ(DSPPOS(i));
15784 }
Chris Wilsonc0336662016-05-06 15:40:21 +010015785 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
Paulo Zanonica291362013-03-06 20:03:14 -030015786 error->plane[i].addr = I915_READ(DSPADDR(i));
Chris Wilsonc0336662016-05-06 15:40:21 +010015787 if (INTEL_GEN(dev_priv) >= 4) {
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015788 error->plane[i].surface = I915_READ(DSPSURF(i));
15789 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15790 }
15791
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015792 error->pipe[i].source = I915_READ(PIPESRC(i));
Imre Deakf301b1e12014-04-18 15:55:04 +030015793
Chris Wilsonc0336662016-05-06 15:40:21 +010015794 if (HAS_GMCH_DISPLAY(dev_priv))
Imre Deakf301b1e12014-04-18 15:55:04 +030015795 error->pipe[i].stat = I915_READ(PIPESTAT(i));
Chris Wilson63b66e52013-08-08 15:12:06 +020015796 }
15797
Jani Nikula4d1de972016-03-18 17:05:42 +020015798 /* Note: this does not include DSI transcoders. */
Chris Wilsonc0336662016-05-06 15:40:21 +010015799 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +030015800 if (HAS_DDI(dev_priv))
Chris Wilson63b66e52013-08-08 15:12:06 +020015801 error->num_transcoders++; /* Account for eDP. */
15802
15803 for (i = 0; i < error->num_transcoders; i++) {
15804 enum transcoder cpu_transcoder = transcoders[i];
15805
Imre Deakddf9c532013-11-27 22:02:02 +020015806 error->transcoder[i].power_domain_on =
Daniel Vetterf458ebb2014-09-30 10:56:39 +020015807 __intel_display_power_is_enabled(dev_priv,
Paulo Zanoni38cc1da2013-12-20 15:09:41 -020015808 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
Imre Deakddf9c532013-11-27 22:02:02 +020015809 if (!error->transcoder[i].power_domain_on)
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020015810 continue;
15811
Chris Wilson63b66e52013-08-08 15:12:06 +020015812 error->transcoder[i].cpu_transcoder = cpu_transcoder;
15813
15814 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15815 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15816 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15817 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15818 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15819 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15820 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015821 }
15822
15823 return error;
15824}
15825
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015826#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15827
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015828void
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015829intel_display_print_error_state(struct drm_i915_error_state_buf *m,
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015830 struct intel_display_error_state *error)
15831{
Chris Wilson5a4c6f12017-02-14 16:46:11 +000015832 struct drm_i915_private *dev_priv = m->i915;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015833 int i;
15834
Chris Wilson63b66e52013-08-08 15:12:06 +020015835 if (!error)
15836 return;
15837
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000015838 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
Tvrtko Ursulin86527442016-10-13 11:03:00 +010015839 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015840 err_printf(m, "PWR_WELL_CTL2: %08x\n",
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030015841 error->power_well_driver);
Damien Lespiau055e3932014-08-18 13:49:10 +010015842 for_each_pipe(dev_priv, i) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015843 err_printf(m, "Pipe [%d]:\n", i);
Imre Deakddf9c532013-11-27 22:02:02 +020015844 err_printf(m, " Power: %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +020015845 onoff(error->pipe[i].power_domain_on));
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015846 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
Imre Deakf301b1e12014-04-18 15:55:04 +030015847 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015848
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015849 err_printf(m, "Plane [%d]:\n", i);
15850 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
15851 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
Tvrtko Ursulin5f56d5f2016-11-16 08:55:37 +000015852 if (INTEL_GEN(dev_priv) <= 3) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015853 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
15854 err_printf(m, " POS: %08x\n", error->plane[i].pos);
Paulo Zanoni80ca3782013-03-22 14:20:57 -030015855 }
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +010015856 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015857 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
Tvrtko Ursulin5f56d5f2016-11-16 08:55:37 +000015858 if (INTEL_GEN(dev_priv) >= 4) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015859 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
15860 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015861 }
15862
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015863 err_printf(m, "Cursor [%d]:\n", i);
15864 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
15865 err_printf(m, " POS: %08x\n", error->cursor[i].position);
15866 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015867 }
Chris Wilson63b66e52013-08-08 15:12:06 +020015868
15869 for (i = 0; i < error->num_transcoders; i++) {
Jani Nikulada205632016-03-15 21:51:10 +020015870 err_printf(m, "CPU transcoder: %s\n",
Chris Wilson63b66e52013-08-08 15:12:06 +020015871 transcoder_name(error->transcoder[i].cpu_transcoder));
Imre Deakddf9c532013-11-27 22:02:02 +020015872 err_printf(m, " Power: %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +020015873 onoff(error->transcoder[i].power_domain_on));
Chris Wilson63b66e52013-08-08 15:12:06 +020015874 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
15875 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
15876 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
15877 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
15878 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
15879 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
15880 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
15881 }
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015882}
Chris Wilson98a2f412016-10-12 10:05:18 +010015883
15884#endif