blob: 15d758cd0c1b69312cf311ea442c8a6008d164c3 [file] [log] [blame]
Jesse Barnes79e53942008-11-07 14:24:08 -08001/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
Jesse Barnesc1c7af62009-09-10 15:28:03 -070027#include <linux/module.h>
28#include <linux/input.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080029#include <linux/i2c.h>
Shaohua Li7662c8b2009-06-26 11:23:55 +080030#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Jesse Barnes9cce37f2010-08-13 15:11:26 -070032#include <linux/vgaarb.h>
Wu Fengguange0dac652011-09-05 14:25:34 +080033#include <drm/drm_edid.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080034#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010035#include "intel_frontbuffer.h"
David Howells760285e2012-10-02 18:01:07 +010036#include <drm/i915_drm.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080037#include "i915_drv.h"
Chris Wilson57822dc2017-02-22 11:40:48 +000038#include "i915_gem_clflush.h"
Imre Deakdb18b6a2016-03-24 12:41:40 +020039#include "intel_dsi.h"
Jesse Barnese5510fa2010-07-01 16:48:37 -070040#include "i915_trace.h"
Xi Ruoyao319c1d42015-03-12 20:16:32 +080041#include <drm/drm_atomic.h>
Matt Roperc196e1d2015-01-21 16:35:48 -080042#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010043#include <drm/drm_dp_helper.h>
Matt Roper465c1202014-05-29 08:06:54 -070044#include <drm/drm_plane_helper.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010045#include <drm/drm_probe_helper.h>
Matt Roper465c1202014-05-29 08:06:54 -070046#include <drm/drm_rect.h>
Daniel Vetter72fdb402018-09-05 15:57:11 +020047#include <drm/drm_atomic_uapi.h>
Lu Baoludaedaa32018-11-12 14:40:08 +080048#include <linux/intel-iommu.h>
Alex Goinsfd8e0582015-11-25 18:43:38 -080049#include <linux/reservation.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080050
Matt Roper465c1202014-05-29 08:06:54 -070051/* Primary plane formats for gen <= 3 */
Damien Lespiau568db4f2015-05-12 16:13:18 +010052static const uint32_t i8xx_primary_formats[] = {
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010053 DRM_FORMAT_C8,
54 DRM_FORMAT_RGB565,
Matt Roper465c1202014-05-29 08:06:54 -070055 DRM_FORMAT_XRGB1555,
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010056 DRM_FORMAT_XRGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070057};
58
59/* Primary plane formats for gen >= 4 */
Damien Lespiau568db4f2015-05-12 16:13:18 +010060static const uint32_t i965_primary_formats[] = {
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010061 DRM_FORMAT_C8,
62 DRM_FORMAT_RGB565,
63 DRM_FORMAT_XRGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070064 DRM_FORMAT_XBGR8888,
Damien Lespiau6c0fd452015-05-19 12:29:16 +010065 DRM_FORMAT_XRGB2101010,
66 DRM_FORMAT_XBGR2101010,
67};
68
Ben Widawsky714244e2017-08-01 09:58:16 -070069static const uint64_t i9xx_format_modifiers[] = {
70 I915_FORMAT_MOD_X_TILED,
71 DRM_FORMAT_MOD_LINEAR,
72 DRM_FORMAT_MOD_INVALID
73};
74
Matt Roper3d7d6512014-06-10 08:28:13 -070075/* Cursor formats */
76static const uint32_t intel_cursor_formats[] = {
77 DRM_FORMAT_ARGB8888,
78};
79
Ben Widawsky714244e2017-08-01 09:58:16 -070080static const uint64_t cursor_format_modifiers[] = {
81 DRM_FORMAT_MOD_LINEAR,
82 DRM_FORMAT_MOD_INVALID
83};
84
Jesse Barnesf1f644d2013-06-27 00:39:25 +030085static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020086 struct intel_crtc_state *pipe_config);
Ville Syrjälä18442d02013-09-13 16:00:08 +030087static void ironlake_pch_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020088 struct intel_crtc_state *pipe_config);
Jesse Barnesf1f644d2013-06-27 00:39:25 +030089
Chris Wilson24dbf512017-02-15 10:59:18 +000090static int intel_framebuffer_init(struct intel_framebuffer *ifb,
91 struct drm_i915_gem_object *obj,
92 struct drm_mode_fb_cmd2 *mode_cmd);
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +020093static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
94static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
Maarten Lankhorst4c354752018-10-11 12:04:49 +020095static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
96 const struct intel_link_m_n *m_n,
97 const struct intel_link_m_n *m2_n2);
Maarten Lankhorstfdf73512018-10-04 11:45:52 +020098static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
99static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
100static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
101static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200102static void vlv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200103 const struct intel_crtc_state *pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200104static void chv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200105 const struct intel_crtc_state *pipe_config);
Daniel Vetter5a21b662016-05-24 17:13:53 +0200106static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
107static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
Nabendu Maiti1c74eea2016-11-29 11:23:14 +0530108static void intel_crtc_init_scalers(struct intel_crtc *crtc,
109 struct intel_crtc_state *crtc_state);
Maarten Lankhorstb2562712018-10-04 11:45:53 +0200110static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
111static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
112static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +0300113static void intel_modeset_setup_hw_state(struct drm_device *dev,
114 struct drm_modeset_acquire_ctx *ctx);
Ville Syrjälä2622a082016-03-09 19:07:26 +0200115static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
Damien Lespiaue7457a92013-08-08 22:28:59 +0100116
Ma Lingd4906092009-03-18 20:13:27 +0800117struct intel_limit {
Ander Conselvan de Oliveira4c5def92016-05-04 12:11:58 +0300118 struct {
119 int min, max;
120 } dot, vco, n, m, m1, m2, p, p1;
121
122 struct {
123 int dot_limit;
124 int p2_slow, p2_fast;
125 } p2;
Ma Lingd4906092009-03-18 20:13:27 +0800126};
Jesse Barnes79e53942008-11-07 14:24:08 -0800127
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300128/* returns HPLL frequency in kHz */
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200129int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300130{
131 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
132
133 /* Obtain SKU information */
134 mutex_lock(&dev_priv->sb_lock);
135 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
136 CCK_FUSE_HPLL_FREQ_MASK;
137 mutex_unlock(&dev_priv->sb_lock);
138
139 return vco_freq[hpll_freq] * 1000;
140}
141
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200142int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
143 const char *name, u32 reg, int ref_freq)
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300144{
145 u32 val;
146 int divider;
147
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300148 mutex_lock(&dev_priv->sb_lock);
149 val = vlv_cck_read(dev_priv, reg);
150 mutex_unlock(&dev_priv->sb_lock);
151
152 divider = val & CCK_FREQUENCY_VALUES;
153
154 WARN((val & CCK_FREQUENCY_STATUS) !=
155 (divider << CCK_FREQUENCY_STATUS_SHIFT),
156 "%s change in progress\n", name);
157
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200158 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
159}
160
Ville Syrjälä7ff89ca2017-02-07 20:33:05 +0200161int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
162 const char *name, u32 reg)
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200163{
164 if (dev_priv->hpll_freq == 0)
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200165 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200166
167 return vlv_get_cck_clock(dev_priv, name, reg,
168 dev_priv->hpll_freq);
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300169}
170
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300171static void intel_update_czclk(struct drm_i915_private *dev_priv)
172{
Wayne Boyer666a4532015-12-09 12:29:35 -0800173 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300174 return;
175
176 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
177 CCK_CZ_CLOCK_CONTROL);
178
179 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
180}
181
Chris Wilson021357a2010-09-07 20:54:59 +0100182static inline u32 /* units of 100MHz */
Ville Syrjälä21a727b2016-02-17 21:41:10 +0200183intel_fdi_link_freq(struct drm_i915_private *dev_priv,
184 const struct intel_crtc_state *pipe_config)
Chris Wilson021357a2010-09-07 20:54:59 +0100185{
Ville Syrjälä21a727b2016-02-17 21:41:10 +0200186 if (HAS_DDI(dev_priv))
187 return pipe_config->port_clock; /* SPLL */
Ville Syrjäläe3b247d2016-02-17 21:41:09 +0200188 else
Chris Wilson58ecd9d2017-11-05 13:49:05 +0000189 return dev_priv->fdi_pll_freq;
Chris Wilson021357a2010-09-07 20:54:59 +0100190}
191
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300192static const struct intel_limit intel_limits_i8xx_dac = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400193 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200194 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200195 .n = { .min = 2, .max = 16 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400196 .m = { .min = 96, .max = 140 },
197 .m1 = { .min = 18, .max = 26 },
198 .m2 = { .min = 6, .max = 16 },
199 .p = { .min = 4, .max = 128 },
200 .p1 = { .min = 2, .max = 33 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700201 .p2 = { .dot_limit = 165000,
202 .p2_slow = 4, .p2_fast = 2 },
Keith Packarde4b36692009-06-05 19:22:17 -0700203};
204
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300205static const struct intel_limit intel_limits_i8xx_dvo = {
Daniel Vetter5d536e22013-07-06 12:52:06 +0200206 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200207 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200208 .n = { .min = 2, .max = 16 },
Daniel Vetter5d536e22013-07-06 12:52:06 +0200209 .m = { .min = 96, .max = 140 },
210 .m1 = { .min = 18, .max = 26 },
211 .m2 = { .min = 6, .max = 16 },
212 .p = { .min = 4, .max = 128 },
213 .p1 = { .min = 2, .max = 33 },
214 .p2 = { .dot_limit = 165000,
215 .p2_slow = 4, .p2_fast = 4 },
216};
217
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300218static const struct intel_limit intel_limits_i8xx_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400219 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200220 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200221 .n = { .min = 2, .max = 16 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400222 .m = { .min = 96, .max = 140 },
223 .m1 = { .min = 18, .max = 26 },
224 .m2 = { .min = 6, .max = 16 },
225 .p = { .min = 4, .max = 128 },
226 .p1 = { .min = 1, .max = 6 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700227 .p2 = { .dot_limit = 165000,
228 .p2_slow = 14, .p2_fast = 7 },
Keith Packarde4b36692009-06-05 19:22:17 -0700229};
Eric Anholt273e27c2011-03-30 13:01:10 -0700230
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300231static const struct intel_limit intel_limits_i9xx_sdvo = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400232 .dot = { .min = 20000, .max = 400000 },
233 .vco = { .min = 1400000, .max = 2800000 },
234 .n = { .min = 1, .max = 6 },
235 .m = { .min = 70, .max = 120 },
Patrik Jakobsson4f7dfb62013-02-13 22:20:22 +0100236 .m1 = { .min = 8, .max = 18 },
237 .m2 = { .min = 3, .max = 7 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400238 .p = { .min = 5, .max = 80 },
239 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700240 .p2 = { .dot_limit = 200000,
241 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700242};
243
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300244static const struct intel_limit intel_limits_i9xx_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400245 .dot = { .min = 20000, .max = 400000 },
246 .vco = { .min = 1400000, .max = 2800000 },
247 .n = { .min = 1, .max = 6 },
248 .m = { .min = 70, .max = 120 },
Patrik Jakobsson53a7d2d2013-02-13 22:20:21 +0100249 .m1 = { .min = 8, .max = 18 },
250 .m2 = { .min = 3, .max = 7 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400251 .p = { .min = 7, .max = 98 },
252 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700253 .p2 = { .dot_limit = 112000,
254 .p2_slow = 14, .p2_fast = 7 },
Keith Packarde4b36692009-06-05 19:22:17 -0700255};
256
Eric Anholt273e27c2011-03-30 13:01:10 -0700257
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300258static const struct intel_limit intel_limits_g4x_sdvo = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700259 .dot = { .min = 25000, .max = 270000 },
260 .vco = { .min = 1750000, .max = 3500000},
261 .n = { .min = 1, .max = 4 },
262 .m = { .min = 104, .max = 138 },
263 .m1 = { .min = 17, .max = 23 },
264 .m2 = { .min = 5, .max = 11 },
265 .p = { .min = 10, .max = 30 },
266 .p1 = { .min = 1, .max = 3},
267 .p2 = { .dot_limit = 270000,
268 .p2_slow = 10,
269 .p2_fast = 10
Ma Ling044c7c42009-03-18 20:13:23 +0800270 },
Keith Packarde4b36692009-06-05 19:22:17 -0700271};
272
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300273static const struct intel_limit intel_limits_g4x_hdmi = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700274 .dot = { .min = 22000, .max = 400000 },
275 .vco = { .min = 1750000, .max = 3500000},
276 .n = { .min = 1, .max = 4 },
277 .m = { .min = 104, .max = 138 },
278 .m1 = { .min = 16, .max = 23 },
279 .m2 = { .min = 5, .max = 11 },
280 .p = { .min = 5, .max = 80 },
281 .p1 = { .min = 1, .max = 8},
282 .p2 = { .dot_limit = 165000,
283 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700284};
285
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300286static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700287 .dot = { .min = 20000, .max = 115000 },
288 .vco = { .min = 1750000, .max = 3500000 },
289 .n = { .min = 1, .max = 3 },
290 .m = { .min = 104, .max = 138 },
291 .m1 = { .min = 17, .max = 23 },
292 .m2 = { .min = 5, .max = 11 },
293 .p = { .min = 28, .max = 112 },
294 .p1 = { .min = 2, .max = 8 },
295 .p2 = { .dot_limit = 0,
296 .p2_slow = 14, .p2_fast = 14
Ma Ling044c7c42009-03-18 20:13:23 +0800297 },
Keith Packarde4b36692009-06-05 19:22:17 -0700298};
299
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300300static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700301 .dot = { .min = 80000, .max = 224000 },
302 .vco = { .min = 1750000, .max = 3500000 },
303 .n = { .min = 1, .max = 3 },
304 .m = { .min = 104, .max = 138 },
305 .m1 = { .min = 17, .max = 23 },
306 .m2 = { .min = 5, .max = 11 },
307 .p = { .min = 14, .max = 42 },
308 .p1 = { .min = 2, .max = 6 },
309 .p2 = { .dot_limit = 0,
310 .p2_slow = 7, .p2_fast = 7
Ma Ling044c7c42009-03-18 20:13:23 +0800311 },
Keith Packarde4b36692009-06-05 19:22:17 -0700312};
313
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300314static const struct intel_limit intel_limits_pineview_sdvo = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400315 .dot = { .min = 20000, .max = 400000},
316 .vco = { .min = 1700000, .max = 3500000 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700317 /* Pineview's Ncounter is a ring counter */
Akshay Joshi0206e352011-08-16 15:34:10 -0400318 .n = { .min = 3, .max = 6 },
319 .m = { .min = 2, .max = 256 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700320 /* Pineview only has one combined m divider, which we treat as m2. */
Akshay Joshi0206e352011-08-16 15:34:10 -0400321 .m1 = { .min = 0, .max = 0 },
322 .m2 = { .min = 0, .max = 254 },
323 .p = { .min = 5, .max = 80 },
324 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700325 .p2 = { .dot_limit = 200000,
326 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700327};
328
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300329static const struct intel_limit intel_limits_pineview_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400330 .dot = { .min = 20000, .max = 400000 },
331 .vco = { .min = 1700000, .max = 3500000 },
332 .n = { .min = 3, .max = 6 },
333 .m = { .min = 2, .max = 256 },
334 .m1 = { .min = 0, .max = 0 },
335 .m2 = { .min = 0, .max = 254 },
336 .p = { .min = 7, .max = 112 },
337 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700338 .p2 = { .dot_limit = 112000,
339 .p2_slow = 14, .p2_fast = 14 },
Keith Packarde4b36692009-06-05 19:22:17 -0700340};
341
Eric Anholt273e27c2011-03-30 13:01:10 -0700342/* Ironlake / Sandybridge
343 *
344 * We calculate clock using (register_value + 2) for N/M1/M2, so here
345 * the range value for them is (actual_value - 2).
346 */
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300347static const struct intel_limit intel_limits_ironlake_dac = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700348 .dot = { .min = 25000, .max = 350000 },
349 .vco = { .min = 1760000, .max = 3510000 },
350 .n = { .min = 1, .max = 5 },
351 .m = { .min = 79, .max = 127 },
352 .m1 = { .min = 12, .max = 22 },
353 .m2 = { .min = 5, .max = 9 },
354 .p = { .min = 5, .max = 80 },
355 .p1 = { .min = 1, .max = 8 },
356 .p2 = { .dot_limit = 225000,
357 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700358};
359
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300360static const struct intel_limit intel_limits_ironlake_single_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700361 .dot = { .min = 25000, .max = 350000 },
362 .vco = { .min = 1760000, .max = 3510000 },
363 .n = { .min = 1, .max = 3 },
364 .m = { .min = 79, .max = 118 },
365 .m1 = { .min = 12, .max = 22 },
366 .m2 = { .min = 5, .max = 9 },
367 .p = { .min = 28, .max = 112 },
368 .p1 = { .min = 2, .max = 8 },
369 .p2 = { .dot_limit = 225000,
370 .p2_slow = 14, .p2_fast = 14 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800371};
372
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300373static const struct intel_limit intel_limits_ironlake_dual_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700374 .dot = { .min = 25000, .max = 350000 },
375 .vco = { .min = 1760000, .max = 3510000 },
376 .n = { .min = 1, .max = 3 },
377 .m = { .min = 79, .max = 127 },
378 .m1 = { .min = 12, .max = 22 },
379 .m2 = { .min = 5, .max = 9 },
380 .p = { .min = 14, .max = 56 },
381 .p1 = { .min = 2, .max = 8 },
382 .p2 = { .dot_limit = 225000,
383 .p2_slow = 7, .p2_fast = 7 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800384};
385
Eric Anholt273e27c2011-03-30 13:01:10 -0700386/* LVDS 100mhz refclk limits. */
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300387static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700388 .dot = { .min = 25000, .max = 350000 },
389 .vco = { .min = 1760000, .max = 3510000 },
390 .n = { .min = 1, .max = 2 },
391 .m = { .min = 79, .max = 126 },
392 .m1 = { .min = 12, .max = 22 },
393 .m2 = { .min = 5, .max = 9 },
394 .p = { .min = 28, .max = 112 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400395 .p1 = { .min = 2, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700396 .p2 = { .dot_limit = 225000,
397 .p2_slow = 14, .p2_fast = 14 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800398};
399
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300400static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700401 .dot = { .min = 25000, .max = 350000 },
402 .vco = { .min = 1760000, .max = 3510000 },
403 .n = { .min = 1, .max = 3 },
404 .m = { .min = 79, .max = 126 },
405 .m1 = { .min = 12, .max = 22 },
406 .m2 = { .min = 5, .max = 9 },
407 .p = { .min = 14, .max = 42 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400408 .p1 = { .min = 2, .max = 6 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700409 .p2 = { .dot_limit = 225000,
410 .p2_slow = 7, .p2_fast = 7 },
Zhao Yakui45476682009-12-31 16:06:04 +0800411};
412
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300413static const struct intel_limit intel_limits_vlv = {
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300414 /*
415 * These are the data rate limits (measured in fast clocks)
416 * since those are the strictest limits we have. The fast
417 * clock and actual rate limits are more relaxed, so checking
418 * them would make no difference.
419 */
420 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
Daniel Vetter75e53982013-04-18 21:10:43 +0200421 .vco = { .min = 4000000, .max = 6000000 },
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700422 .n = { .min = 1, .max = 7 },
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700423 .m1 = { .min = 2, .max = 3 },
424 .m2 = { .min = 11, .max = 156 },
Ville Syrjäläb99ab662013-09-24 21:26:26 +0300425 .p1 = { .min = 2, .max = 3 },
Ville Syrjälä5fdc9c492013-09-24 21:26:29 +0300426 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700427};
428
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300429static const struct intel_limit intel_limits_chv = {
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300430 /*
431 * These are the data rate limits (measured in fast clocks)
432 * since those are the strictest limits we have. The fast
433 * clock and actual rate limits are more relaxed, so checking
434 * them would make no difference.
435 */
436 .dot = { .min = 25000 * 5, .max = 540000 * 5},
Ville Syrjälä17fe1022015-02-26 21:01:52 +0200437 .vco = { .min = 4800000, .max = 6480000 },
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300438 .n = { .min = 1, .max = 1 },
439 .m1 = { .min = 2, .max = 2 },
440 .m2 = { .min = 24 << 22, .max = 175 << 22 },
441 .p1 = { .min = 2, .max = 4 },
442 .p2 = { .p2_slow = 1, .p2_fast = 14 },
443};
444
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300445static const struct intel_limit intel_limits_bxt = {
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200446 /* FIXME: find real dot limits */
447 .dot = { .min = 0, .max = INT_MAX },
Vandana Kannane6292552015-07-01 17:02:57 +0530448 .vco = { .min = 4800000, .max = 6700000 },
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200449 .n = { .min = 1, .max = 1 },
450 .m1 = { .min = 2, .max = 2 },
451 /* FIXME: find real m2 limits */
452 .m2 = { .min = 2 << 22, .max = 255 << 22 },
453 .p1 = { .min = 2, .max = 4 },
454 .p2 = { .p2_slow = 1, .p2_fast = 20 },
455};
456
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530457static void
458skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
459{
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530460 if (enable)
461 I915_WRITE(CLKGATE_DIS_PSL(pipe),
462 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
463 else
464 I915_WRITE(CLKGATE_DIS_PSL(pipe),
465 I915_READ(CLKGATE_DIS_PSL(pipe)) &
466 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
467}
468
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200469static bool
Maarten Lankhorst24f28452017-11-22 19:39:01 +0100470needs_modeset(const struct drm_crtc_state *state)
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200471{
Maarten Lankhorstfc596662015-07-21 13:28:57 +0200472 return drm_atomic_crtc_needs_modeset(state);
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200473}
474
Imre Deakdccbea32015-06-22 23:35:51 +0300475/*
476 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
477 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
478 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
479 * The helpers' return value is the rate of the clock that is fed to the
480 * display engine's pipe which can be the above fast dot clock rate or a
481 * divided-down version of it.
482 */
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500483/* m1 is reserved as 0 in Pineview, n is a ring counter */
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300484static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
Jesse Barnes79e53942008-11-07 14:24:08 -0800485{
Shaohua Li21778322009-02-23 15:19:16 +0800486 clock->m = clock->m2 + 2;
487 clock->p = clock->p1 * clock->p2;
Ville Syrjäläed5ca772013-12-02 19:00:45 +0200488 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300489 return 0;
Ville Syrjäläfb03ac02013-10-14 14:50:30 +0300490 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
491 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300492
493 return clock->dot;
Shaohua Li21778322009-02-23 15:19:16 +0800494}
495
Daniel Vetter7429e9d2013-04-20 17:19:46 +0200496static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
497{
498 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
499}
500
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300501static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
Shaohua Li21778322009-02-23 15:19:16 +0800502{
Daniel Vetter7429e9d2013-04-20 17:19:46 +0200503 clock->m = i9xx_dpll_compute_m(clock);
Jesse Barnes79e53942008-11-07 14:24:08 -0800504 clock->p = clock->p1 * clock->p2;
Ville Syrjäläed5ca772013-12-02 19:00:45 +0200505 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300506 return 0;
Ville Syrjäläfb03ac02013-10-14 14:50:30 +0300507 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
508 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300509
510 return clock->dot;
Jesse Barnes79e53942008-11-07 14:24:08 -0800511}
512
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300513static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
Imre Deak589eca62015-06-22 23:35:50 +0300514{
515 clock->m = clock->m1 * clock->m2;
516 clock->p = clock->p1 * clock->p2;
517 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300518 return 0;
Imre Deak589eca62015-06-22 23:35:50 +0300519 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
520 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300521
522 return clock->dot / 5;
Imre Deak589eca62015-06-22 23:35:50 +0300523}
524
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300525int chv_calc_dpll_params(int refclk, struct dpll *clock)
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300526{
527 clock->m = clock->m1 * clock->m2;
528 clock->p = clock->p1 * clock->p2;
529 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300530 return 0;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300531 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
532 clock->n << 22);
533 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300534
535 return clock->dot / 5;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300536}
537
Jesse Barnes7c04d1d2009-02-23 15:36:40 -0800538#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
Chris Wilsonc38c1452018-02-14 13:49:22 +0000539
540/*
Jesse Barnes79e53942008-11-07 14:24:08 -0800541 * Returns whether the given set of divisors are valid for a given refclk with
542 * the given connectors.
543 */
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100544static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300545 const struct intel_limit *limit,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300546 const struct dpll *clock)
Jesse Barnes79e53942008-11-07 14:24:08 -0800547{
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300548 if (clock->n < limit->n.min || limit->n.max < clock->n)
549 INTELPllInvalid("n out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800550 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
Akshay Joshi0206e352011-08-16 15:34:10 -0400551 INTELPllInvalid("p1 out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800552 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
Akshay Joshi0206e352011-08-16 15:34:10 -0400553 INTELPllInvalid("m2 out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800554 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
Akshay Joshi0206e352011-08-16 15:34:10 -0400555 INTELPllInvalid("m1 out of range\n");
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300556
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100557 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200558 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300559 if (clock->m1 <= clock->m2)
560 INTELPllInvalid("m1 <= m2\n");
561
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100562 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200563 !IS_GEN9_LP(dev_priv)) {
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300564 if (clock->p < limit->p.min || limit->p.max < clock->p)
565 INTELPllInvalid("p out of range\n");
566 if (clock->m < limit->m.min || limit->m.max < clock->m)
567 INTELPllInvalid("m out of range\n");
568 }
569
Jesse Barnes79e53942008-11-07 14:24:08 -0800570 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
Akshay Joshi0206e352011-08-16 15:34:10 -0400571 INTELPllInvalid("vco out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800572 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
573 * connector, etc., rather than just a single range.
574 */
575 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
Akshay Joshi0206e352011-08-16 15:34:10 -0400576 INTELPllInvalid("dot out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800577
578 return true;
579}
580
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300581static int
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300582i9xx_select_p2_div(const struct intel_limit *limit,
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300583 const struct intel_crtc_state *crtc_state,
584 int target)
Jesse Barnes79e53942008-11-07 14:24:08 -0800585{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300586 struct drm_device *dev = crtc_state->base.crtc->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -0800587
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +0300588 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Jesse Barnes79e53942008-11-07 14:24:08 -0800589 /*
Daniel Vettera210b022012-11-26 17:22:08 +0100590 * For LVDS just rely on its current settings for dual-channel.
591 * We haven't figured out how to reliably set up different
592 * single/dual channel state, if we even can.
Jesse Barnes79e53942008-11-07 14:24:08 -0800593 */
Daniel Vetter1974cad2012-11-26 17:22:09 +0100594 if (intel_is_dual_link_lvds(dev))
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300595 return limit->p2.p2_fast;
Jesse Barnes79e53942008-11-07 14:24:08 -0800596 else
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300597 return limit->p2.p2_slow;
Jesse Barnes79e53942008-11-07 14:24:08 -0800598 } else {
599 if (target < limit->p2.dot_limit)
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300600 return limit->p2.p2_slow;
Jesse Barnes79e53942008-11-07 14:24:08 -0800601 else
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300602 return limit->p2.p2_fast;
Jesse Barnes79e53942008-11-07 14:24:08 -0800603 }
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300604}
605
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200606/*
607 * Returns a set of divisors for the desired target clock with the given
608 * refclk, or FALSE. The returned values represent the clock equation:
609 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
610 *
611 * Target and reference clocks are specified in kHz.
612 *
613 * If match_clock is provided, then best_clock P divider must match the P
614 * divider from @match_clock used for LVDS downclocking.
615 */
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300616static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300617i9xx_find_best_dpll(const struct intel_limit *limit,
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300618 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300619 int target, int refclk, struct dpll *match_clock,
620 struct dpll *best_clock)
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300621{
622 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300623 struct dpll clock;
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300624 int err = target;
Jesse Barnes79e53942008-11-07 14:24:08 -0800625
Akshay Joshi0206e352011-08-16 15:34:10 -0400626 memset(best_clock, 0, sizeof(*best_clock));
Jesse Barnes79e53942008-11-07 14:24:08 -0800627
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300628 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
629
Zhao Yakui42158662009-11-20 11:24:18 +0800630 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
631 clock.m1++) {
632 for (clock.m2 = limit->m2.min;
633 clock.m2 <= limit->m2.max; clock.m2++) {
Daniel Vetterc0efc382013-06-03 20:56:24 +0200634 if (clock.m2 >= clock.m1)
Zhao Yakui42158662009-11-20 11:24:18 +0800635 break;
636 for (clock.n = limit->n.min;
637 clock.n <= limit->n.max; clock.n++) {
638 for (clock.p1 = limit->p1.min;
639 clock.p1 <= limit->p1.max; clock.p1++) {
Jesse Barnes79e53942008-11-07 14:24:08 -0800640 int this_err;
641
Imre Deakdccbea32015-06-22 23:35:51 +0300642 i9xx_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100643 if (!intel_PLL_is_valid(to_i915(dev),
644 limit,
Chris Wilson1b894b52010-12-14 20:04:54 +0000645 &clock))
Jesse Barnes79e53942008-11-07 14:24:08 -0800646 continue;
Sean Paulcec2f352012-01-10 15:09:36 -0800647 if (match_clock &&
648 clock.p != match_clock->p)
649 continue;
Jesse Barnes79e53942008-11-07 14:24:08 -0800650
651 this_err = abs(clock.dot - target);
652 if (this_err < err) {
653 *best_clock = clock;
654 err = this_err;
655 }
656 }
657 }
658 }
659 }
660
661 return (err != target);
662}
663
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200664/*
665 * Returns a set of divisors for the desired target clock with the given
666 * refclk, or FALSE. The returned values represent the clock equation:
667 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
668 *
669 * Target and reference clocks are specified in kHz.
670 *
671 * If match_clock is provided, then best_clock P divider must match the P
672 * divider from @match_clock used for LVDS downclocking.
673 */
Ma Lingd4906092009-03-18 20:13:27 +0800674static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300675pnv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200676 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300677 int target, int refclk, struct dpll *match_clock,
678 struct dpll *best_clock)
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200679{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300680 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300681 struct dpll clock;
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200682 int err = target;
683
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200684 memset(best_clock, 0, sizeof(*best_clock));
685
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300686 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
687
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200688 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
689 clock.m1++) {
690 for (clock.m2 = limit->m2.min;
691 clock.m2 <= limit->m2.max; clock.m2++) {
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200692 for (clock.n = limit->n.min;
693 clock.n <= limit->n.max; clock.n++) {
694 for (clock.p1 = limit->p1.min;
695 clock.p1 <= limit->p1.max; clock.p1++) {
696 int this_err;
697
Imre Deakdccbea32015-06-22 23:35:51 +0300698 pnv_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100699 if (!intel_PLL_is_valid(to_i915(dev),
700 limit,
Jesse Barnes79e53942008-11-07 14:24:08 -0800701 &clock))
702 continue;
703 if (match_clock &&
704 clock.p != match_clock->p)
705 continue;
706
707 this_err = abs(clock.dot - target);
708 if (this_err < err) {
709 *best_clock = clock;
710 err = this_err;
711 }
712 }
713 }
714 }
715 }
716
717 return (err != target);
718}
719
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +0200720/*
721 * Returns a set of divisors for the desired target clock with the given
722 * refclk, or FALSE. The returned values represent the clock equation:
723 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200724 *
725 * Target and reference clocks are specified in kHz.
726 *
727 * If match_clock is provided, then best_clock P divider must match the P
728 * divider from @match_clock used for LVDS downclocking.
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +0200729 */
Ma Lingd4906092009-03-18 20:13:27 +0800730static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300731g4x_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200732 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300733 int target, int refclk, struct dpll *match_clock,
734 struct dpll *best_clock)
Ma Lingd4906092009-03-18 20:13:27 +0800735{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300736 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300737 struct dpll clock;
Ma Lingd4906092009-03-18 20:13:27 +0800738 int max_n;
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300739 bool found = false;
Adam Jackson6ba770d2010-07-02 16:43:30 -0400740 /* approximately equals target * 0.00585 */
741 int err_most = (target >> 8) + (target >> 9);
Ma Lingd4906092009-03-18 20:13:27 +0800742
743 memset(best_clock, 0, sizeof(*best_clock));
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300744
745 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
746
Ma Lingd4906092009-03-18 20:13:27 +0800747 max_n = limit->n.max;
Gilles Espinassef77f13e2010-03-29 15:41:47 +0200748 /* based on hardware requirement, prefer smaller n to precision */
Ma Lingd4906092009-03-18 20:13:27 +0800749 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
Gilles Espinassef77f13e2010-03-29 15:41:47 +0200750 /* based on hardware requirement, prefere larger m1,m2 */
Ma Lingd4906092009-03-18 20:13:27 +0800751 for (clock.m1 = limit->m1.max;
752 clock.m1 >= limit->m1.min; clock.m1--) {
753 for (clock.m2 = limit->m2.max;
754 clock.m2 >= limit->m2.min; clock.m2--) {
755 for (clock.p1 = limit->p1.max;
756 clock.p1 >= limit->p1.min; clock.p1--) {
757 int this_err;
758
Imre Deakdccbea32015-06-22 23:35:51 +0300759 i9xx_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100760 if (!intel_PLL_is_valid(to_i915(dev),
761 limit,
Chris Wilson1b894b52010-12-14 20:04:54 +0000762 &clock))
Ma Lingd4906092009-03-18 20:13:27 +0800763 continue;
Chris Wilson1b894b52010-12-14 20:04:54 +0000764
765 this_err = abs(clock.dot - target);
Ma Lingd4906092009-03-18 20:13:27 +0800766 if (this_err < err_most) {
767 *best_clock = clock;
768 err_most = this_err;
769 max_n = clock.n;
770 found = true;
771 }
772 }
773 }
774 }
775 }
Zhenyu Wang2c072452009-06-05 15:38:42 +0800776 return found;
777}
Ma Lingd4906092009-03-18 20:13:27 +0800778
Imre Deakd5dd62b2015-03-17 11:40:03 +0200779/*
780 * Check if the calculated PLL configuration is more optimal compared to the
781 * best configuration and error found so far. Return the calculated error.
782 */
783static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300784 const struct dpll *calculated_clock,
785 const struct dpll *best_clock,
Imre Deakd5dd62b2015-03-17 11:40:03 +0200786 unsigned int best_error_ppm,
787 unsigned int *error_ppm)
788{
Imre Deak9ca3ba02015-03-17 11:40:05 +0200789 /*
790 * For CHV ignore the error and consider only the P value.
791 * Prefer a bigger P value based on HW requirements.
792 */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100793 if (IS_CHERRYVIEW(to_i915(dev))) {
Imre Deak9ca3ba02015-03-17 11:40:05 +0200794 *error_ppm = 0;
795
796 return calculated_clock->p > best_clock->p;
797 }
798
Imre Deak24be4e42015-03-17 11:40:04 +0200799 if (WARN_ON_ONCE(!target_freq))
800 return false;
801
Imre Deakd5dd62b2015-03-17 11:40:03 +0200802 *error_ppm = div_u64(1000000ULL *
803 abs(target_freq - calculated_clock->dot),
804 target_freq);
805 /*
806 * Prefer a better P value over a better (smaller) error if the error
807 * is small. Ensure this preference for future configurations too by
808 * setting the error to 0.
809 */
810 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
811 *error_ppm = 0;
812
813 return true;
814 }
815
816 return *error_ppm + 10 < best_error_ppm;
817}
818
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200819/*
820 * Returns a set of divisors for the desired target clock with the given
821 * refclk, or FALSE. The returned values represent the clock equation:
822 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
823 */
Zhenyu Wang2c072452009-06-05 15:38:42 +0800824static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300825vlv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200826 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300827 int target, int refclk, struct dpll *match_clock,
828 struct dpll *best_clock)
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700829{
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveiraa919ff12014-10-20 13:46:43 +0300831 struct drm_device *dev = crtc->base.dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300832 struct dpll clock;
Ville Syrjälä69e4f9002013-09-24 21:26:20 +0300833 unsigned int bestppm = 1000000;
Ville Syrjälä27e639b2013-09-24 21:26:24 +0300834 /* min update 19.2 MHz */
835 int max_n = min(limit->n.max, refclk / 19200);
Ville Syrjälä49e497e2013-09-24 21:26:31 +0300836 bool found = false;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700837
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300838 target *= 5; /* fast clock */
839
840 memset(best_clock, 0, sizeof(*best_clock));
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700841
842 /* based on hardware requirement, prefer smaller n to precision */
Ville Syrjälä27e639b2013-09-24 21:26:24 +0300843 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
Ville Syrjälä811bbf02013-09-24 21:26:25 +0300844 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
Ville Syrjälä889059d2013-09-24 21:26:27 +0300845 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
Ville Syrjäläc1a9ae42013-09-24 21:26:23 +0300846 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300847 clock.p = clock.p1 * clock.p2;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700848 /* based on hardware requirement, prefer bigger m1,m2 values */
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300849 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
Imre Deakd5dd62b2015-03-17 11:40:03 +0200850 unsigned int ppm;
Ville Syrjälä69e4f9002013-09-24 21:26:20 +0300851
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300852 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
853 refclk * clock.m1);
Ville Syrjälä43b0ac52013-09-24 21:26:18 +0300854
Imre Deakdccbea32015-06-22 23:35:51 +0300855 vlv_calc_dpll_params(refclk, &clock);
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300856
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100857 if (!intel_PLL_is_valid(to_i915(dev),
858 limit,
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300859 &clock))
Ville Syrjälä43b0ac52013-09-24 21:26:18 +0300860 continue;
861
Imre Deakd5dd62b2015-03-17 11:40:03 +0200862 if (!vlv_PLL_is_optimal(dev, target,
863 &clock,
864 best_clock,
865 bestppm, &ppm))
866 continue;
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300867
Imre Deakd5dd62b2015-03-17 11:40:03 +0200868 *best_clock = clock;
869 bestppm = ppm;
870 found = true;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700871 }
872 }
873 }
874 }
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700875
Ville Syrjälä49e497e2013-09-24 21:26:31 +0300876 return found;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700877}
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700878
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200879/*
880 * Returns a set of divisors for the desired target clock with the given
881 * refclk, or FALSE. The returned values represent the clock equation:
882 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
883 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300884static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300885chv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200886 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300887 int target, int refclk, struct dpll *match_clock,
888 struct dpll *best_clock)
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300889{
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200890 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveiraa919ff12014-10-20 13:46:43 +0300891 struct drm_device *dev = crtc->base.dev;
Imre Deak9ca3ba02015-03-17 11:40:05 +0200892 unsigned int best_error_ppm;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300893 struct dpll clock;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300894 uint64_t m2;
895 int found = false;
896
897 memset(best_clock, 0, sizeof(*best_clock));
Imre Deak9ca3ba02015-03-17 11:40:05 +0200898 best_error_ppm = 1000000;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300899
900 /*
901 * Based on hardware doc, the n always set to 1, and m1 always
902 * set to 2. If requires to support 200Mhz refclk, we need to
903 * revisit this because n may not 1 anymore.
904 */
905 clock.n = 1, clock.m1 = 2;
906 target *= 5; /* fast clock */
907
908 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
909 for (clock.p2 = limit->p2.p2_fast;
910 clock.p2 >= limit->p2.p2_slow;
911 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
Imre Deak9ca3ba02015-03-17 11:40:05 +0200912 unsigned int error_ppm;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300913
914 clock.p = clock.p1 * clock.p2;
915
916 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
917 clock.n) << 22, refclk * clock.m1);
918
919 if (m2 > INT_MAX/clock.m1)
920 continue;
921
922 clock.m2 = m2;
923
Imre Deakdccbea32015-06-22 23:35:51 +0300924 chv_calc_dpll_params(refclk, &clock);
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300925
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100926 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300927 continue;
928
Imre Deak9ca3ba02015-03-17 11:40:05 +0200929 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
930 best_error_ppm, &error_ppm))
931 continue;
932
933 *best_clock = clock;
934 best_error_ppm = error_ppm;
935 found = true;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300936 }
937 }
938
939 return found;
940}
941
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200942bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300943 struct dpll *best_clock)
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200944{
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200945 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300946 const struct intel_limit *limit = &intel_limits_bxt;
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200947
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200948 return chv_find_best_dpll(limit, crtc_state,
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200949 target_clock, refclk, NULL, best_clock);
950}
951
Ville Syrjälä525b9312016-10-31 22:37:02 +0200952bool intel_crtc_active(struct intel_crtc *crtc)
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300953{
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300954 /* Be paranoid as we can arrive here with only partial
955 * state retrieved from the hardware during setup.
956 *
Damien Lespiau241bfc32013-09-25 16:45:37 +0100957 * We can ditch the adjusted_mode.crtc_clock check as soon
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300958 * as Haswell has gained clock readout/fastboot support.
959 *
Ville Syrjäläcd30fbc2018-05-25 21:50:40 +0300960 * We can ditch the crtc->primary->state->fb check as soon as we can
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300961 * properly reconstruct framebuffers.
Matt Roperc3d1f432015-03-09 10:19:23 -0700962 *
963 * FIXME: The intel_crtc->active here should be switched to
964 * crtc->state->active once we have proper CRTC states wired up
965 * for atomic.
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300966 */
Ville Syrjälä525b9312016-10-31 22:37:02 +0200967 return crtc->active && crtc->base.primary->state->fb &&
968 crtc->config->base.adjusted_mode.crtc_clock;
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300969}
970
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200971enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
972 enum pipe pipe)
973{
Ville Syrjälä98187832016-10-31 22:37:10 +0200974 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200975
Ville Syrjäläe2af48c2016-10-31 22:37:05 +0200976 return crtc->config->cpu_transcoder;
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200977}
978
Ville Syrjälä8fedd642017-11-29 17:37:30 +0200979static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
980 enum pipe pipe)
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300981{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200982 i915_reg_t reg = PIPEDSL(pipe);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300983 u32 line1, line2;
984 u32 line_mask;
985
Lucas De Marchicf819ef2018-12-12 10:10:43 -0800986 if (IS_GEN(dev_priv, 2))
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300987 line_mask = DSL_LINEMASK_GEN2;
988 else
989 line_mask = DSL_LINEMASK_GEN3;
990
991 line1 = I915_READ(reg) & line_mask;
Daniel Vetter6adfb1e2015-07-07 09:10:40 +0200992 msleep(5);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300993 line2 = I915_READ(reg) & line_mask;
994
Ville Syrjälä8fedd642017-11-29 17:37:30 +0200995 return line1 != line2;
996}
997
998static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
999{
1000 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1001 enum pipe pipe = crtc->pipe;
1002
1003 /* Wait for the display line to settle/start moving */
1004 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1005 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1006 pipe_name(pipe), onoff(state));
1007}
1008
1009static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1010{
1011 wait_for_pipe_scanline_moving(crtc, false);
1012}
1013
1014static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1015{
1016 wait_for_pipe_scanline_moving(crtc, true);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001017}
1018
Ville Syrjälä4972f702017-11-29 17:37:32 +02001019static void
1020intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001021{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001022 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001023 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001024
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001025 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001026 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001027 i915_reg_t reg = PIPECONF(cpu_transcoder);
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001028
Keith Packardab7ad7f2010-10-03 00:33:06 -07001029 /* Wait for the Pipe State to go off */
Chris Wilsonb8511f52016-06-30 15:32:53 +01001030 if (intel_wait_for_register(dev_priv,
1031 reg, I965_PIPECONF_ACTIVE, 0,
1032 100))
Daniel Vetter284637d2012-07-09 09:51:57 +02001033 WARN(1, "pipe_off wait timed out\n");
Keith Packardab7ad7f2010-10-03 00:33:06 -07001034 } else {
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001035 intel_wait_for_pipe_scanline_stopped(crtc);
Keith Packardab7ad7f2010-10-03 00:33:06 -07001036 }
Jesse Barnes79e53942008-11-07 14:24:08 -08001037}
1038
Jesse Barnesb24e7172011-01-04 15:09:30 -08001039/* Only for pre-ILK configs */
Daniel Vetter55607e82013-06-16 21:42:39 +02001040void assert_pll(struct drm_i915_private *dev_priv,
1041 enum pipe pipe, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001042{
Jesse Barnesb24e7172011-01-04 15:09:30 -08001043 u32 val;
1044 bool cur_state;
1045
Ville Syrjälä649636e2015-09-22 19:50:01 +03001046 val = I915_READ(DPLL(pipe));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001047 cur_state = !!(val & DPLL_VCO_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001048 I915_STATE_WARN(cur_state != state,
Jesse Barnesb24e7172011-01-04 15:09:30 -08001049 "PLL state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001050 onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001051}
Jesse Barnesb24e7172011-01-04 15:09:30 -08001052
Jani Nikula23538ef2013-08-27 15:12:22 +03001053/* XXX: the dsi pll is shared between MIPI DSI ports */
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +00001054void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
Jani Nikula23538ef2013-08-27 15:12:22 +03001055{
1056 u32 val;
1057 bool cur_state;
1058
Ville Syrjäläa5805162015-05-26 20:42:30 +03001059 mutex_lock(&dev_priv->sb_lock);
Jani Nikula23538ef2013-08-27 15:12:22 +03001060 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
Ville Syrjäläa5805162015-05-26 20:42:30 +03001061 mutex_unlock(&dev_priv->sb_lock);
Jani Nikula23538ef2013-08-27 15:12:22 +03001062
1063 cur_state = val & DSI_PLL_VCO_EN;
Rob Clarke2c719b2014-12-15 13:56:32 -05001064 I915_STATE_WARN(cur_state != state,
Jani Nikula23538ef2013-08-27 15:12:22 +03001065 "DSI PLL state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001066 onoff(state), onoff(cur_state));
Jani Nikula23538ef2013-08-27 15:12:22 +03001067}
Jani Nikula23538ef2013-08-27 15:12:22 +03001068
Jesse Barnes040484a2011-01-03 12:14:26 -08001069static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1070 enum pipe pipe, bool state)
1071{
Jesse Barnes040484a2011-01-03 12:14:26 -08001072 bool cur_state;
Paulo Zanoniad80a812012-10-24 16:06:19 -02001073 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1074 pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001075
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001076 if (HAS_DDI(dev_priv)) {
Paulo Zanoniaffa9352012-11-23 15:30:39 -02001077 /* DDI does not have a specific FDI_TX register */
Ville Syrjälä649636e2015-09-22 19:50:01 +03001078 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
Paulo Zanoniad80a812012-10-24 16:06:19 -02001079 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001080 } else {
Ville Syrjälä649636e2015-09-22 19:50:01 +03001081 u32 val = I915_READ(FDI_TX_CTL(pipe));
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001082 cur_state = !!(val & FDI_TX_ENABLE);
1083 }
Rob Clarke2c719b2014-12-15 13:56:32 -05001084 I915_STATE_WARN(cur_state != state,
Jesse Barnes040484a2011-01-03 12:14:26 -08001085 "FDI TX state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001086 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001087}
1088#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1089#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1090
1091static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1092 enum pipe pipe, bool state)
1093{
Jesse Barnes040484a2011-01-03 12:14:26 -08001094 u32 val;
1095 bool cur_state;
1096
Ville Syrjälä649636e2015-09-22 19:50:01 +03001097 val = I915_READ(FDI_RX_CTL(pipe));
Paulo Zanonid63fa0d2012-11-20 13:27:35 -02001098 cur_state = !!(val & FDI_RX_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001099 I915_STATE_WARN(cur_state != state,
Jesse Barnes040484a2011-01-03 12:14:26 -08001100 "FDI RX state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001101 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001102}
1103#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1104#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1105
1106static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1107 enum pipe pipe)
1108{
Jesse Barnes040484a2011-01-03 12:14:26 -08001109 u32 val;
1110
1111 /* ILK FDI PLL is always enabled */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001112 if (IS_GEN(dev_priv, 5))
Jesse Barnes040484a2011-01-03 12:14:26 -08001113 return;
1114
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001115 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001116 if (HAS_DDI(dev_priv))
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001117 return;
1118
Ville Syrjälä649636e2015-09-22 19:50:01 +03001119 val = I915_READ(FDI_TX_CTL(pipe));
Rob Clarke2c719b2014-12-15 13:56:32 -05001120 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
Jesse Barnes040484a2011-01-03 12:14:26 -08001121}
1122
Daniel Vetter55607e82013-06-16 21:42:39 +02001123void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1124 enum pipe pipe, bool state)
Jesse Barnes040484a2011-01-03 12:14:26 -08001125{
Jesse Barnes040484a2011-01-03 12:14:26 -08001126 u32 val;
Daniel Vetter55607e82013-06-16 21:42:39 +02001127 bool cur_state;
Jesse Barnes040484a2011-01-03 12:14:26 -08001128
Ville Syrjälä649636e2015-09-22 19:50:01 +03001129 val = I915_READ(FDI_RX_CTL(pipe));
Daniel Vetter55607e82013-06-16 21:42:39 +02001130 cur_state = !!(val & FDI_RX_PLL_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001131 I915_STATE_WARN(cur_state != state,
Daniel Vetter55607e82013-06-16 21:42:39 +02001132 "FDI RX PLL assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001133 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001134}
1135
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001136void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
Jesse Barnesea0760c2011-01-04 15:09:32 -08001137{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001138 i915_reg_t pp_reg;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001139 u32 val;
Ville Syrjälä10ed55e2018-05-23 17:57:18 +03001140 enum pipe panel_pipe = INVALID_PIPE;
Thomas Jarosch0de3b482011-08-25 15:37:45 +02001141 bool locked = true;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001142
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001143 if (WARN_ON(HAS_DDI(dev_priv)))
Jani Nikulabedd4db2014-08-22 15:04:13 +03001144 return;
1145
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001146 if (HAS_PCH_SPLIT(dev_priv)) {
Jani Nikulabedd4db2014-08-22 15:04:13 +03001147 u32 port_sel;
1148
Imre Deak44cb7342016-08-10 14:07:29 +03001149 pp_reg = PP_CONTROL(0);
1150 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
Jani Nikulabedd4db2014-08-22 15:04:13 +03001151
Ville Syrjälä4c23dea2018-05-18 18:29:30 +03001152 switch (port_sel) {
1153 case PANEL_PORT_SELECT_LVDS:
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001154 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
Ville Syrjälä4c23dea2018-05-18 18:29:30 +03001155 break;
1156 case PANEL_PORT_SELECT_DPA:
1157 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1158 break;
1159 case PANEL_PORT_SELECT_DPC:
1160 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1161 break;
1162 case PANEL_PORT_SELECT_DPD:
1163 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1164 break;
1165 default:
1166 MISSING_CASE(port_sel);
1167 break;
1168 }
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001169 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Jani Nikulabedd4db2014-08-22 15:04:13 +03001170 /* presumably write lock depends on pipe, not port select */
Imre Deak44cb7342016-08-10 14:07:29 +03001171 pp_reg = PP_CONTROL(pipe);
Jani Nikulabedd4db2014-08-22 15:04:13 +03001172 panel_pipe = pipe;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001173 } else {
Ville Syrjäläf0d2b752018-05-18 18:29:31 +03001174 u32 port_sel;
1175
Imre Deak44cb7342016-08-10 14:07:29 +03001176 pp_reg = PP_CONTROL(0);
Ville Syrjäläf0d2b752018-05-18 18:29:31 +03001177 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1178
1179 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001180 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
Jesse Barnesea0760c2011-01-04 15:09:32 -08001181 }
1182
1183 val = I915_READ(pp_reg);
1184 if (!(val & PANEL_POWER_ON) ||
Jani Nikulaec49ba22014-08-21 15:06:25 +03001185 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
Jesse Barnesea0760c2011-01-04 15:09:32 -08001186 locked = false;
1187
Rob Clarke2c719b2014-12-15 13:56:32 -05001188 I915_STATE_WARN(panel_pipe == pipe && locked,
Jesse Barnesea0760c2011-01-04 15:09:32 -08001189 "panel assertion failure, pipe %c regs locked\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001190 pipe_name(pipe));
Jesse Barnesea0760c2011-01-04 15:09:32 -08001191}
1192
Jesse Barnesb840d907f2011-12-13 13:19:38 -08001193void assert_pipe(struct drm_i915_private *dev_priv,
1194 enum pipe pipe, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001195{
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001196 bool cur_state;
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001197 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1198 pipe);
Imre Deak4feed0e2016-02-12 18:55:14 +02001199 enum intel_display_power_domain power_domain;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001200
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001201 /* we keep both pipes enabled on 830 */
1202 if (IS_I830(dev_priv))
Daniel Vetter8e636782012-01-22 01:36:48 +01001203 state = true;
1204
Imre Deak4feed0e2016-02-12 18:55:14 +02001205 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1206 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
Ville Syrjälä649636e2015-09-22 19:50:01 +03001207 u32 val = I915_READ(PIPECONF(cpu_transcoder));
Paulo Zanoni69310162013-01-29 16:35:19 -02001208 cur_state = !!(val & PIPECONF_ENABLE);
Imre Deak4feed0e2016-02-12 18:55:14 +02001209
1210 intel_display_power_put(dev_priv, power_domain);
1211 } else {
1212 cur_state = false;
Paulo Zanoni69310162013-01-29 16:35:19 -02001213 }
1214
Rob Clarke2c719b2014-12-15 13:56:32 -05001215 I915_STATE_WARN(cur_state != state,
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001216 "pipe %c assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001217 pipe_name(pipe), onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001218}
1219
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001220static void assert_plane(struct intel_plane *plane, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001221{
Ville Syrjäläeade6c82018-01-30 22:38:03 +02001222 enum pipe pipe;
1223 bool cur_state;
1224
1225 cur_state = plane->get_hw_state(plane, &pipe);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001226
Rob Clarke2c719b2014-12-15 13:56:32 -05001227 I915_STATE_WARN(cur_state != state,
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001228 "%s assertion failure (expected %s, current %s)\n",
1229 plane->base.name, onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001230}
1231
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001232#define assert_plane_enabled(p) assert_plane(p, true)
1233#define assert_plane_disabled(p) assert_plane(p, false)
Chris Wilson931872f2012-01-16 23:01:13 +00001234
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001235static void assert_planes_disabled(struct intel_crtc *crtc)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001236{
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001237 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1238 struct intel_plane *plane;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001239
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001240 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1241 assert_plane_disabled(plane);
Jesse Barnes19332d72013-03-28 09:55:38 -07001242}
1243
Ville Syrjälä08c71e52014-08-06 14:49:45 +03001244static void assert_vblank_disabled(struct drm_crtc *crtc)
1245{
Rob Clarke2c719b2014-12-15 13:56:32 -05001246 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
Ville Syrjälä08c71e52014-08-06 14:49:45 +03001247 drm_crtc_vblank_put(crtc);
1248}
1249
Ander Conselvan de Oliveira7abd4b32016-03-08 17:46:15 +02001250void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1251 enum pipe pipe)
Jesse Barnes92f25842011-01-04 15:09:34 -08001252{
Jesse Barnes92f25842011-01-04 15:09:34 -08001253 u32 val;
1254 bool enabled;
1255
Ville Syrjälä649636e2015-09-22 19:50:01 +03001256 val = I915_READ(PCH_TRANSCONF(pipe));
Jesse Barnes92f25842011-01-04 15:09:34 -08001257 enabled = !!(val & TRANS_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001258 I915_STATE_WARN(enabled,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001259 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1260 pipe_name(pipe));
Jesse Barnes92f25842011-01-04 15:09:34 -08001261}
1262
Jesse Barnes291906f2011-02-02 12:28:03 -08001263static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001264 enum pipe pipe, enum port port,
1265 i915_reg_t dp_reg)
Jesse Barnes291906f2011-02-02 12:28:03 -08001266{
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001267 enum pipe port_pipe;
1268 bool state;
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001269
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001270 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1271
1272 I915_STATE_WARN(state && port_pipe == pipe,
1273 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1274 port_name(port), pipe_name(pipe));
1275
1276 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1277 "IBX PCH DP %c still using transcoder B\n",
1278 port_name(port));
Jesse Barnes291906f2011-02-02 12:28:03 -08001279}
1280
1281static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
Ville Syrjälä76203462018-05-14 20:24:21 +03001282 enum pipe pipe, enum port port,
1283 i915_reg_t hdmi_reg)
Jesse Barnes291906f2011-02-02 12:28:03 -08001284{
Ville Syrjälä76203462018-05-14 20:24:21 +03001285 enum pipe port_pipe;
1286 bool state;
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001287
Ville Syrjälä76203462018-05-14 20:24:21 +03001288 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1289
1290 I915_STATE_WARN(state && port_pipe == pipe,
1291 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1292 port_name(port), pipe_name(pipe));
1293
1294 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1295 "IBX PCH HDMI %c still using transcoder B\n",
1296 port_name(port));
Jesse Barnes291906f2011-02-02 12:28:03 -08001297}
1298
1299static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1300 enum pipe pipe)
1301{
Ville Syrjälä6102a8e2018-05-14 20:24:19 +03001302 enum pipe port_pipe;
Jesse Barnes291906f2011-02-02 12:28:03 -08001303
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001304 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1305 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1306 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
Jesse Barnes291906f2011-02-02 12:28:03 -08001307
Ville Syrjälä6102a8e2018-05-14 20:24:19 +03001308 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1309 port_pipe == pipe,
1310 "PCH VGA enabled on transcoder %c, should be disabled\n",
1311 pipe_name(pipe));
Jesse Barnes291906f2011-02-02 12:28:03 -08001312
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001313 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1314 port_pipe == pipe,
1315 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1316 pipe_name(pipe));
Jesse Barnes291906f2011-02-02 12:28:03 -08001317
Ville Syrjälä3aefb672018-11-08 16:36:35 +02001318 /* PCH SDVOB multiplex with HDMIB */
Ville Syrjälä76203462018-05-14 20:24:21 +03001319 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1320 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1321 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
Jesse Barnes291906f2011-02-02 12:28:03 -08001322}
1323
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001324static void _vlv_enable_pll(struct intel_crtc *crtc,
1325 const struct intel_crtc_state *pipe_config)
1326{
1327 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1328 enum pipe pipe = crtc->pipe;
1329
1330 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1331 POSTING_READ(DPLL(pipe));
1332 udelay(150);
1333
Chris Wilson2c30b432016-06-30 15:32:54 +01001334 if (intel_wait_for_register(dev_priv,
1335 DPLL(pipe),
1336 DPLL_LOCK_VLV,
1337 DPLL_LOCK_VLV,
1338 1))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001339 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1340}
1341
Ville Syrjäläd288f652014-10-28 13:20:22 +02001342static void vlv_enable_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001343 const struct intel_crtc_state *pipe_config)
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001344{
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001345 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001346 enum pipe pipe = crtc->pipe;
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001347
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001348 assert_pipe_disabled(dev_priv, pipe);
Daniel Vetter58c6eaa2013-04-11 16:29:09 +02001349
Daniel Vetter87442f72013-06-06 00:52:17 +02001350 /* PLL is protected by panel, make sure we can write it */
Ville Syrjälä7d1a83c2016-03-15 16:39:58 +02001351 assert_panel_unlocked(dev_priv, pipe);
Daniel Vetter87442f72013-06-06 00:52:17 +02001352
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001353 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1354 _vlv_enable_pll(crtc, pipe_config);
Daniel Vetter426115c2013-07-11 22:13:42 +02001355
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001356 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1357 POSTING_READ(DPLL_MD(pipe));
Daniel Vetter87442f72013-06-06 00:52:17 +02001358}
1359
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001360
1361static void _chv_enable_pll(struct intel_crtc *crtc,
1362 const struct intel_crtc_state *pipe_config)
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001363{
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001364 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001365 enum pipe pipe = crtc->pipe;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001366 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001367 u32 tmp;
1368
Ville Syrjäläa5805162015-05-26 20:42:30 +03001369 mutex_lock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001370
1371 /* Enable back the 10bit clock to display controller */
1372 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1373 tmp |= DPIO_DCLKP_EN;
1374 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1375
Ville Syrjälä54433e92015-05-26 20:42:31 +03001376 mutex_unlock(&dev_priv->sb_lock);
1377
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001378 /*
1379 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1380 */
1381 udelay(1);
1382
1383 /* Enable PLL */
Ville Syrjäläd288f652014-10-28 13:20:22 +02001384 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001385
1386 /* Check PLL is locked */
Chris Wilson6b188262016-06-30 15:32:55 +01001387 if (intel_wait_for_register(dev_priv,
1388 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1389 1))
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001390 DRM_ERROR("PLL %d failed to lock\n", pipe);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001391}
1392
1393static void chv_enable_pll(struct intel_crtc *crtc,
1394 const struct intel_crtc_state *pipe_config)
1395{
1396 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1397 enum pipe pipe = crtc->pipe;
1398
1399 assert_pipe_disabled(dev_priv, pipe);
1400
1401 /* PLL is protected by panel, make sure we can write it */
1402 assert_panel_unlocked(dev_priv, pipe);
1403
1404 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1405 _chv_enable_pll(crtc, pipe_config);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001406
Ville Syrjäläc2317752016-03-15 16:39:56 +02001407 if (pipe != PIPE_A) {
1408 /*
1409 * WaPixelRepeatModeFixForC0:chv
1410 *
1411 * DPLLCMD is AWOL. Use chicken bits to propagate
1412 * the value from DPLLBMD to either pipe B or C.
1413 */
Ville Syrjälädfa311f2017-09-13 17:08:54 +03001414 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
Ville Syrjäläc2317752016-03-15 16:39:56 +02001415 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1416 I915_WRITE(CBR4_VLV, 0);
1417 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1418
1419 /*
1420 * DPLLB VGA mode also seems to cause problems.
1421 * We should always have it disabled.
1422 */
1423 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1424 } else {
1425 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1426 POSTING_READ(DPLL_MD(pipe));
1427 }
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001428}
1429
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001430static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001431{
1432 struct intel_crtc *crtc;
1433 int count = 0;
1434
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001435 for_each_intel_crtc(&dev_priv->drm, crtc) {
Maarten Lankhorst3538b9d2015-06-01 12:50:10 +02001436 count += crtc->base.state->active &&
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03001437 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1438 }
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001439
1440 return count;
1441}
1442
Ville Syrjälä939994d2017-09-13 17:08:56 +03001443static void i9xx_enable_pll(struct intel_crtc *crtc,
1444 const struct intel_crtc_state *crtc_state)
Daniel Vetter87442f72013-06-06 00:52:17 +02001445{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001446 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001447 i915_reg_t reg = DPLL(crtc->pipe);
Ville Syrjälä939994d2017-09-13 17:08:56 +03001448 u32 dpll = crtc_state->dpll_hw_state.dpll;
Ville Syrjäläbb408dd2017-06-01 17:36:15 +03001449 int i;
Daniel Vetter87442f72013-06-06 00:52:17 +02001450
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001451 assert_pipe_disabled(dev_priv, crtc->pipe);
Daniel Vetter87442f72013-06-06 00:52:17 +02001452
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001453 /* PLL is protected by panel, make sure we can write it */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001454 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001455 assert_panel_unlocked(dev_priv, crtc->pipe);
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001456
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001457 /* Enable DVO 2x clock on both PLLs if necessary */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001458 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001459 /*
1460 * It appears to be important that we don't enable this
1461 * for the current pipe before otherwise configuring the
1462 * PLL. No idea how this should be handled if multiple
1463 * DVO outputs are enabled simultaneosly.
1464 */
1465 dpll |= DPLL_DVO_2X_MODE;
1466 I915_WRITE(DPLL(!crtc->pipe),
1467 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1468 }
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001469
Ville Syrjäläc2b63372015-10-07 22:08:25 +03001470 /*
1471 * Apparently we need to have VGA mode enabled prior to changing
1472 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1473 * dividers, even though the register value does change.
1474 */
1475 I915_WRITE(reg, 0);
1476
Ville Syrjälä8e7a65a2015-10-07 22:08:24 +03001477 I915_WRITE(reg, dpll);
1478
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001479 /* Wait for the clocks to stabilize. */
1480 POSTING_READ(reg);
1481 udelay(150);
1482
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001483 if (INTEL_GEN(dev_priv) >= 4) {
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001484 I915_WRITE(DPLL_MD(crtc->pipe),
Ville Syrjälä939994d2017-09-13 17:08:56 +03001485 crtc_state->dpll_hw_state.dpll_md);
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001486 } else {
1487 /* The pixel multiplier can only be updated once the
1488 * DPLL is enabled and the clocks are stable.
1489 *
1490 * So write it again.
1491 */
1492 I915_WRITE(reg, dpll);
1493 }
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001494
1495 /* We do this three times for luck */
Ville Syrjäläbb408dd2017-06-01 17:36:15 +03001496 for (i = 0; i < 3; i++) {
1497 I915_WRITE(reg, dpll);
1498 POSTING_READ(reg);
1499 udelay(150); /* wait for warmup */
1500 }
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001501}
1502
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001503static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001504{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001505 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001506 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001507 enum pipe pipe = crtc->pipe;
1508
1509 /* Disable DVO 2x clock on both PLLs if necessary */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001510 if (IS_I830(dev_priv) &&
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001511 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001512 !intel_num_dvo_pipes(dev_priv)) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001513 I915_WRITE(DPLL(PIPE_B),
1514 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1515 I915_WRITE(DPLL(PIPE_A),
1516 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1517 }
1518
Ville Syrjäläb6b5d042014-08-15 01:22:07 +03001519 /* Don't disable pipe or pipe PLLs if needed */
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001520 if (IS_I830(dev_priv))
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001521 return;
1522
1523 /* Make sure the pipe isn't still relying on us */
1524 assert_pipe_disabled(dev_priv, pipe);
1525
Ville Syrjäläb8afb912015-06-29 15:25:48 +03001526 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
Daniel Vetter50b44a42013-06-05 13:34:33 +02001527 POSTING_READ(DPLL(pipe));
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001528}
1529
Jesse Barnesf6071162013-10-01 10:41:38 -07001530static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1531{
Ville Syrjäläb8afb912015-06-29 15:25:48 +03001532 u32 val;
Jesse Barnesf6071162013-10-01 10:41:38 -07001533
1534 /* Make sure the pipe isn't still relying on us */
1535 assert_pipe_disabled(dev_priv, pipe);
1536
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02001537 val = DPLL_INTEGRATED_REF_CLK_VLV |
1538 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1539 if (pipe != PIPE_A)
1540 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1541
Jesse Barnesf6071162013-10-01 10:41:38 -07001542 I915_WRITE(DPLL(pipe), val);
1543 POSTING_READ(DPLL(pipe));
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001544}
1545
1546static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1547{
Ville Syrjäläd7520482014-04-09 13:28:59 +03001548 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001549 u32 val;
1550
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001551 /* Make sure the pipe isn't still relying on us */
1552 assert_pipe_disabled(dev_priv, pipe);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001553
Ville Syrjälä60bfe442015-06-29 15:25:49 +03001554 val = DPLL_SSC_REF_CLK_CHV |
1555 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001556 if (pipe != PIPE_A)
1557 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02001558
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001559 I915_WRITE(DPLL(pipe), val);
1560 POSTING_READ(DPLL(pipe));
Ville Syrjäläd7520482014-04-09 13:28:59 +03001561
Ville Syrjäläa5805162015-05-26 20:42:30 +03001562 mutex_lock(&dev_priv->sb_lock);
Ville Syrjäläd7520482014-04-09 13:28:59 +03001563
1564 /* Disable 10bit clock to display controller */
1565 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1566 val &= ~DPIO_DCLKP_EN;
1567 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1568
Ville Syrjäläa5805162015-05-26 20:42:30 +03001569 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesf6071162013-10-01 10:41:38 -07001570}
1571
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001572void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001573 struct intel_digital_port *dport,
1574 unsigned int expected_mask)
Jesse Barnes89b667f2013-04-18 14:51:36 -07001575{
1576 u32 port_mask;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001577 i915_reg_t dpll_reg;
Jesse Barnes89b667f2013-04-18 14:51:36 -07001578
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02001579 switch (dport->base.port) {
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001580 case PORT_B:
Jesse Barnes89b667f2013-04-18 14:51:36 -07001581 port_mask = DPLL_PORTB_READY_MASK;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001582 dpll_reg = DPLL(0);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001583 break;
1584 case PORT_C:
Jesse Barnes89b667f2013-04-18 14:51:36 -07001585 port_mask = DPLL_PORTC_READY_MASK;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001586 dpll_reg = DPLL(0);
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001587 expected_mask <<= 4;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001588 break;
1589 case PORT_D:
1590 port_mask = DPLL_PORTD_READY_MASK;
1591 dpll_reg = DPIO_PHY_STATUS;
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001592 break;
1593 default:
1594 BUG();
1595 }
Jesse Barnes89b667f2013-04-18 14:51:36 -07001596
Chris Wilson370004d2016-06-30 15:32:56 +01001597 if (intel_wait_for_register(dev_priv,
1598 dpll_reg, port_mask, expected_mask,
1599 1000))
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001600 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02001601 port_name(dport->base.port),
1602 I915_READ(dpll_reg) & port_mask, expected_mask);
Jesse Barnes89b667f2013-04-18 14:51:36 -07001603}
1604
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001605static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
Jesse Barnes040484a2011-01-03 12:14:26 -08001606{
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001607 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1608 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1609 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001610 i915_reg_t reg;
1611 uint32_t val, pipeconf_val;
Jesse Barnes040484a2011-01-03 12:14:26 -08001612
Jesse Barnes040484a2011-01-03 12:14:26 -08001613 /* Make sure PCH DPLL is enabled */
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001614 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
Jesse Barnes040484a2011-01-03 12:14:26 -08001615
1616 /* FDI must be feeding us bits for PCH ports */
1617 assert_fdi_tx_enabled(dev_priv, pipe);
1618 assert_fdi_rx_enabled(dev_priv, pipe);
1619
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01001620 if (HAS_PCH_CPT(dev_priv)) {
Daniel Vetter23670b322012-11-01 09:15:30 +01001621 /* Workaround: Set the timing override bit before enabling the
1622 * pch transcoder. */
1623 reg = TRANS_CHICKEN2(pipe);
1624 val = I915_READ(reg);
1625 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1626 I915_WRITE(reg, val);
Eugeni Dodonov59c859d2012-05-09 15:37:19 -03001627 }
Daniel Vetter23670b322012-11-01 09:15:30 +01001628
Daniel Vetterab9412b2013-05-03 11:49:46 +02001629 reg = PCH_TRANSCONF(pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001630 val = I915_READ(reg);
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001631 pipeconf_val = I915_READ(PIPECONF(pipe));
Jesse Barnese9bcff52011-06-24 12:19:20 -07001632
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001633 if (HAS_PCH_IBX(dev_priv)) {
Jesse Barnese9bcff52011-06-24 12:19:20 -07001634 /*
Ville Syrjäläc5de7c62015-05-05 17:06:22 +03001635 * Make the BPC in transcoder be consistent with
1636 * that in pipeconf reg. For HDMI we must use 8bpc
1637 * here for both 8bpc and 12bpc.
Jesse Barnese9bcff52011-06-24 12:19:20 -07001638 */
Daniel Vetterdfd07d72012-12-17 11:21:38 +01001639 val &= ~PIPECONF_BPC_MASK;
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001640 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Ville Syrjäläc5de7c62015-05-05 17:06:22 +03001641 val |= PIPECONF_8BPC;
1642 else
1643 val |= pipeconf_val & PIPECONF_BPC_MASK;
Jesse Barnese9bcff52011-06-24 12:19:20 -07001644 }
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001645
1646 val &= ~TRANS_INTERLACE_MASK;
1647 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001648 if (HAS_PCH_IBX(dev_priv) &&
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001649 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Paulo Zanoni7c26e5c2012-02-14 17:07:09 -02001650 val |= TRANS_LEGACY_INTERLACED_ILK;
1651 else
1652 val |= TRANS_INTERLACED;
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001653 else
1654 val |= TRANS_PROGRESSIVE;
1655
Jesse Barnes040484a2011-01-03 12:14:26 -08001656 I915_WRITE(reg, val | TRANS_ENABLE);
Chris Wilson650fbd82016-06-30 15:32:57 +01001657 if (intel_wait_for_register(dev_priv,
1658 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1659 100))
Ville Syrjälä4bb6f1f2013-04-17 17:48:50 +03001660 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
Jesse Barnes040484a2011-01-03 12:14:26 -08001661}
1662
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001663static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
Paulo Zanoni937bb612012-10-31 18:12:47 -02001664 enum transcoder cpu_transcoder)
Jesse Barnes040484a2011-01-03 12:14:26 -08001665{
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001666 u32 val, pipeconf_val;
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001667
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001668 /* FDI must be feeding us bits for PCH ports */
Daniel Vetter1a240d42012-11-29 22:18:51 +01001669 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001670 assert_fdi_rx_enabled(dev_priv, PIPE_A);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001671
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001672 /* Workaround: set timing override bit. */
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001673 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
Daniel Vetter23670b322012-11-01 09:15:30 +01001674 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001675 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001676
Paulo Zanoni25f3ef12012-10-31 18:12:49 -02001677 val = TRANS_ENABLE;
Paulo Zanoni937bb612012-10-31 18:12:47 -02001678 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001679
Paulo Zanoni9a76b1c2012-10-31 18:12:48 -02001680 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1681 PIPECONF_INTERLACED_ILK)
Paulo Zanonia35f2672012-10-31 18:12:45 -02001682 val |= TRANS_INTERLACED;
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001683 else
1684 val |= TRANS_PROGRESSIVE;
1685
Daniel Vetterab9412b2013-05-03 11:49:46 +02001686 I915_WRITE(LPT_TRANSCONF, val);
Chris Wilsond9f96242016-06-30 15:32:58 +01001687 if (intel_wait_for_register(dev_priv,
1688 LPT_TRANSCONF,
1689 TRANS_STATE_ENABLE,
1690 TRANS_STATE_ENABLE,
1691 100))
Paulo Zanoni937bb612012-10-31 18:12:47 -02001692 DRM_ERROR("Failed to enable PCH transcoder\n");
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001693}
1694
Paulo Zanonib8a4f402012-10-31 18:12:42 -02001695static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1696 enum pipe pipe)
Jesse Barnes040484a2011-01-03 12:14:26 -08001697{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001698 i915_reg_t reg;
1699 uint32_t val;
Jesse Barnes040484a2011-01-03 12:14:26 -08001700
1701 /* FDI relies on the transcoder */
1702 assert_fdi_tx_disabled(dev_priv, pipe);
1703 assert_fdi_rx_disabled(dev_priv, pipe);
1704
Jesse Barnes291906f2011-02-02 12:28:03 -08001705 /* Ports must be off as well */
1706 assert_pch_ports_disabled(dev_priv, pipe);
1707
Daniel Vetterab9412b2013-05-03 11:49:46 +02001708 reg = PCH_TRANSCONF(pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001709 val = I915_READ(reg);
1710 val &= ~TRANS_ENABLE;
1711 I915_WRITE(reg, val);
1712 /* wait for PCH transcoder off, transcoder state */
Chris Wilsona7d04662016-06-30 15:32:59 +01001713 if (intel_wait_for_register(dev_priv,
1714 reg, TRANS_STATE_ENABLE, 0,
1715 50))
Ville Syrjälä4bb6f1f2013-04-17 17:48:50 +03001716 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
Daniel Vetter23670b322012-11-01 09:15:30 +01001717
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01001718 if (HAS_PCH_CPT(dev_priv)) {
Daniel Vetter23670b322012-11-01 09:15:30 +01001719 /* Workaround: Clear the timing override chicken bit again. */
1720 reg = TRANS_CHICKEN2(pipe);
1721 val = I915_READ(reg);
1722 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1723 I915_WRITE(reg, val);
1724 }
Jesse Barnes040484a2011-01-03 12:14:26 -08001725}
1726
Maarten Lankhorstb7076542016-08-23 16:18:08 +02001727void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001728{
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001729 u32 val;
1730
Daniel Vetterab9412b2013-05-03 11:49:46 +02001731 val = I915_READ(LPT_TRANSCONF);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001732 val &= ~TRANS_ENABLE;
Daniel Vetterab9412b2013-05-03 11:49:46 +02001733 I915_WRITE(LPT_TRANSCONF, val);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001734 /* wait for PCH transcoder off, transcoder state */
Chris Wilsondfdb4742016-06-30 15:33:00 +01001735 if (intel_wait_for_register(dev_priv,
1736 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1737 50))
Paulo Zanoni8a52fd92012-10-31 18:12:51 -02001738 DRM_ERROR("Failed to disable PCH transcoder\n");
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001739
1740 /* Workaround: clear timing override bit. */
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001741 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
Daniel Vetter23670b322012-11-01 09:15:30 +01001742 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001743 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
Jesse Barnes92f25842011-01-04 15:09:34 -08001744}
1745
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001746enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
Ville Syrjälä65f21302016-10-14 20:02:53 +03001747{
1748 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1749
Ville Syrjälä65f21302016-10-14 20:02:53 +03001750 if (HAS_PCH_LPT(dev_priv))
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001751 return PIPE_A;
Ville Syrjälä65f21302016-10-14 20:02:53 +03001752 else
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001753 return crtc->pipe;
Ville Syrjälä65f21302016-10-14 20:02:53 +03001754}
1755
Ville Syrjälä4972f702017-11-29 17:37:32 +02001756static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001757{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001758 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1759 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1760 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
Paulo Zanoni03722642014-01-17 13:51:09 -02001761 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001762 i915_reg_t reg;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001763 u32 val;
1764
Ville Syrjälä9e2ee2d2015-06-24 21:59:35 +03001765 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1766
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001767 assert_planes_disabled(crtc);
Daniel Vetter58c6eaa2013-04-11 16:29:09 +02001768
Jesse Barnesb24e7172011-01-04 15:09:30 -08001769 /*
1770 * A pipe without a PLL won't actually be able to drive bits from
1771 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1772 * need the check.
1773 */
Ville Syrjälä09fa8bb2016-08-05 20:41:34 +03001774 if (HAS_GMCH_DISPLAY(dev_priv)) {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001775 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
Jani Nikula23538ef2013-08-27 15:12:22 +03001776 assert_dsi_pll_enabled(dev_priv);
1777 else
1778 assert_pll_enabled(dev_priv, pipe);
Ville Syrjälä09fa8bb2016-08-05 20:41:34 +03001779 } else {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001780 if (new_crtc_state->has_pch_encoder) {
Jesse Barnes040484a2011-01-03 12:14:26 -08001781 /* if driving the PCH, we need FDI enabled */
Ville Syrjälä65f21302016-10-14 20:02:53 +03001782 assert_fdi_rx_pll_enabled(dev_priv,
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001783 intel_crtc_pch_transcoder(crtc));
Daniel Vetter1a240d42012-11-29 22:18:51 +01001784 assert_fdi_tx_pll_enabled(dev_priv,
1785 (enum pipe) cpu_transcoder);
Jesse Barnes040484a2011-01-03 12:14:26 -08001786 }
1787 /* FIXME: assert CPU port conditions for SNB+ */
1788 }
Jesse Barnesb24e7172011-01-04 15:09:30 -08001789
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001790 reg = PIPECONF(cpu_transcoder);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001791 val = I915_READ(reg);
Paulo Zanoni7ad25d42014-01-17 13:51:13 -02001792 if (val & PIPECONF_ENABLE) {
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001793 /* we keep both pipes enabled on 830 */
1794 WARN_ON(!IS_I830(dev_priv));
Chris Wilson00d70b12011-03-17 07:18:29 +00001795 return;
Paulo Zanoni7ad25d42014-01-17 13:51:13 -02001796 }
Chris Wilson00d70b12011-03-17 07:18:29 +00001797
1798 I915_WRITE(reg, val | PIPECONF_ENABLE);
Paulo Zanoni851855d2013-12-19 19:12:29 -02001799 POSTING_READ(reg);
Ville Syrjäläb7792d82015-12-14 18:23:43 +02001800
1801 /*
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001802 * Until the pipe starts PIPEDSL reads will return a stale value,
1803 * which causes an apparent vblank timestamp jump when PIPEDSL
1804 * resets to its proper value. That also messes up the frame count
1805 * when it's derived from the timestamps. So let's wait for the
1806 * pipe to start properly before we call drm_crtc_vblank_on()
Ville Syrjäläb7792d82015-12-14 18:23:43 +02001807 */
Ville Syrjälä4972f702017-11-29 17:37:32 +02001808 if (dev_priv->drm.max_vblank_count == 0)
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001809 intel_wait_for_pipe_scanline_moving(crtc);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001810}
1811
Ville Syrjälä4972f702017-11-29 17:37:32 +02001812static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001813{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001814 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Chris Wilsonfac5e232016-07-04 11:34:36 +01001815 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä4972f702017-11-29 17:37:32 +02001816 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Ville Syrjälä575f7ab2014-08-15 01:21:56 +03001817 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001818 i915_reg_t reg;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001819 u32 val;
1820
Ville Syrjälä9e2ee2d2015-06-24 21:59:35 +03001821 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1822
Jesse Barnesb24e7172011-01-04 15:09:30 -08001823 /*
1824 * Make sure planes won't keep trying to pump pixels to us,
1825 * or we might hang the display.
1826 */
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001827 assert_planes_disabled(crtc);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001828
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001829 reg = PIPECONF(cpu_transcoder);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001830 val = I915_READ(reg);
Chris Wilson00d70b12011-03-17 07:18:29 +00001831 if ((val & PIPECONF_ENABLE) == 0)
1832 return;
1833
Ville Syrjälä67adc642014-08-15 01:21:57 +03001834 /*
1835 * Double wide has implications for planes
1836 * so best keep it disabled when not needed.
1837 */
Ville Syrjälä4972f702017-11-29 17:37:32 +02001838 if (old_crtc_state->double_wide)
Ville Syrjälä67adc642014-08-15 01:21:57 +03001839 val &= ~PIPECONF_DOUBLE_WIDE;
1840
1841 /* Don't disable pipe or pipe PLLs if needed */
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001842 if (!IS_I830(dev_priv))
Ville Syrjälä67adc642014-08-15 01:21:57 +03001843 val &= ~PIPECONF_ENABLE;
1844
1845 I915_WRITE(reg, val);
1846 if ((val & PIPECONF_ENABLE) == 0)
Ville Syrjälä4972f702017-11-29 17:37:32 +02001847 intel_wait_for_pipe_off(old_crtc_state);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001848}
1849
Ville Syrjälä832be822016-01-12 21:08:33 +02001850static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1851{
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001852 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
Ville Syrjälä832be822016-01-12 21:08:33 +02001853}
1854
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001855static unsigned int
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001856intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001857{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001858 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001859 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001860
1861 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07001862 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001863 return cpp;
1864 case I915_FORMAT_MOD_X_TILED:
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001865 if (IS_GEN(dev_priv, 2))
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001866 return 128;
1867 else
1868 return 512;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001869 case I915_FORMAT_MOD_Y_TILED_CCS:
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001870 if (color_plane == 1)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001871 return 128;
1872 /* fall through */
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001873 case I915_FORMAT_MOD_Y_TILED:
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001874 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001875 return 128;
1876 else
1877 return 512;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001878 case I915_FORMAT_MOD_Yf_TILED_CCS:
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001879 if (color_plane == 1)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001880 return 128;
1881 /* fall through */
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001882 case I915_FORMAT_MOD_Yf_TILED:
1883 switch (cpp) {
1884 case 1:
1885 return 64;
1886 case 2:
1887 case 4:
1888 return 128;
1889 case 8:
1890 case 16:
1891 return 256;
1892 default:
1893 MISSING_CASE(cpp);
1894 return cpp;
1895 }
1896 break;
1897 default:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001898 MISSING_CASE(fb->modifier);
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001899 return cpp;
1900 }
1901}
1902
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001903static unsigned int
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001904intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
Jesse Barnesa57ce0b2014-02-07 12:10:35 -08001905{
Ben Widawsky2f075562017-03-24 14:29:48 -07001906 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
Ville Syrjälä832be822016-01-12 21:08:33 +02001907 return 1;
1908 else
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001909 return intel_tile_size(to_i915(fb->dev)) /
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001910 intel_tile_width_bytes(fb, color_plane);
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001911}
1912
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001913/* Return the tile dimensions in pixel units */
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001914static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001915 unsigned int *tile_width,
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001916 unsigned int *tile_height)
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001917{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001918 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1919 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001920
1921 *tile_width = tile_width_bytes / cpp;
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001922 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001923}
1924
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001925unsigned int
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001926intel_fb_align_height(const struct drm_framebuffer *fb,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001927 int color_plane, unsigned int height)
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001928{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001929 unsigned int tile_height = intel_tile_height(fb, color_plane);
Ville Syrjälä832be822016-01-12 21:08:33 +02001930
1931 return ALIGN(height, tile_height);
Jesse Barnesa57ce0b2014-02-07 12:10:35 -08001932}
1933
Ville Syrjälä1663b9d2016-02-15 22:54:45 +02001934unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1935{
1936 unsigned int size = 0;
1937 int i;
1938
1939 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1940 size += rot_info->plane[i].width * rot_info->plane[i].height;
1941
1942 return size;
1943}
1944
Daniel Vetter75c82a52015-10-14 16:51:04 +02001945static void
Ville Syrjälä3465c582016-02-15 22:54:43 +02001946intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1947 const struct drm_framebuffer *fb,
1948 unsigned int rotation)
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00001949{
Chris Wilson7b92c042017-01-14 00:28:26 +00001950 view->type = I915_GGTT_VIEW_NORMAL;
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03001951 if (drm_rotation_90_or_270(rotation)) {
Chris Wilson7b92c042017-01-14 00:28:26 +00001952 view->type = I915_GGTT_VIEW_ROTATED;
Chris Wilson8bab11932017-01-14 00:28:25 +00001953 view->rotated = to_intel_framebuffer(fb)->rot_info;
Ville Syrjälä2d7a2152016-02-15 22:54:47 +02001954 }
1955}
1956
Ville Syrjäläfabac482017-03-27 21:55:43 +03001957static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1958{
1959 if (IS_I830(dev_priv))
1960 return 16 * 1024;
1961 else if (IS_I85X(dev_priv))
1962 return 256;
Ville Syrjäläd9e15512017-03-27 21:55:45 +03001963 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1964 return 32;
Ville Syrjäläfabac482017-03-27 21:55:43 +03001965 else
1966 return 4 * 1024;
1967}
1968
Ville Syrjälä603525d2016-01-12 21:08:37 +02001969static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001970{
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00001971 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001972 return 256 * 1024;
Jani Nikulac0f86832016-12-07 12:13:04 +02001973 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
Wayne Boyer666a4532015-12-09 12:29:35 -08001974 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001975 return 128 * 1024;
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00001976 else if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001977 return 4 * 1024;
1978 else
Ville Syrjälä44c59052015-06-11 16:31:16 +03001979 return 0;
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001980}
1981
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001982static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001983 int color_plane)
Ville Syrjälä603525d2016-01-12 21:08:37 +02001984{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001985 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1986
Ville Syrjäläb90c1ee2017-03-07 21:42:07 +02001987 /* AUX_DIST needs only 4K alignment */
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001988 if (color_plane == 1)
Ville Syrjäläb90c1ee2017-03-07 21:42:07 +02001989 return 4096;
1990
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001991 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07001992 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjälä603525d2016-01-12 21:08:37 +02001993 return intel_linear_alignment(dev_priv);
1994 case I915_FORMAT_MOD_X_TILED:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001995 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä603525d2016-01-12 21:08:37 +02001996 return 256 * 1024;
1997 return 0;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001998 case I915_FORMAT_MOD_Y_TILED_CCS:
1999 case I915_FORMAT_MOD_Yf_TILED_CCS:
Ville Syrjälä603525d2016-01-12 21:08:37 +02002000 case I915_FORMAT_MOD_Y_TILED:
2001 case I915_FORMAT_MOD_Yf_TILED:
2002 return 1 * 1024 * 1024;
2003 default:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002004 MISSING_CASE(fb->modifier);
Ville Syrjälä603525d2016-01-12 21:08:37 +02002005 return 0;
2006 }
2007}
2008
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002009static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2010{
2011 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2012 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2013
Ville Syrjälä32febd92018-02-21 18:02:33 +02002014 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002015}
2016
Chris Wilson058d88c2016-08-15 10:49:06 +01002017struct i915_vma *
Chris Wilson59354852018-02-20 13:42:06 +00002018intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002019 const struct i915_ggtt_view *view,
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002020 bool uses_fence,
Chris Wilson59354852018-02-20 13:42:06 +00002021 unsigned long *out_flags)
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002022{
Tvrtko Ursulin850c4cd2014-10-30 16:39:38 +00002023 struct drm_device *dev = fb->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002024 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulin850c4cd2014-10-30 16:39:38 +00002025 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Chris Wilson058d88c2016-08-15 10:49:06 +01002026 struct i915_vma *vma;
Chris Wilson59354852018-02-20 13:42:06 +00002027 unsigned int pinctl;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002028 u32 alignment;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002029
Matt Roperebcdd392014-07-09 16:22:11 -07002030 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2031
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002032 alignment = intel_surf_alignment(fb, 0);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002033
Chris Wilson693db182013-03-05 14:52:39 +00002034 /* Note that the w/a also requires 64 PTE of padding following the
2035 * bo. We currently fill all unused PTE with the shadow page and so
2036 * we should always have valid PTE following the scanout preventing
2037 * the VT-d warning.
2038 */
Chris Wilson48f112f2016-06-24 14:07:14 +01002039 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
Chris Wilson693db182013-03-05 14:52:39 +00002040 alignment = 256 * 1024;
2041
Paulo Zanonid6dd6842014-08-15 15:59:32 -03002042 /*
2043 * Global gtt pte registers are special registers which actually forward
2044 * writes to a chunk of system memory. Which means that there is no risk
2045 * that the register values disappear as soon as we call
2046 * intel_runtime_pm_put(), so it is correct to wrap only the
2047 * pin/unpin/fence and not more.
2048 */
2049 intel_runtime_pm_get(dev_priv);
2050
Daniel Vetter9db529a2017-08-08 10:08:28 +02002051 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2052
Chris Wilson59354852018-02-20 13:42:06 +00002053 pinctl = 0;
2054
2055 /* Valleyview is definitely limited to scanning out the first
2056 * 512MiB. Lets presume this behaviour was inherited from the
2057 * g4x display engine and that all earlier gen are similarly
2058 * limited. Testing suggests that it is a little more
2059 * complicated than this. For example, Cherryview appears quite
2060 * happy to scanout from anywhere within its global aperture.
2061 */
2062 if (HAS_GMCH_DISPLAY(dev_priv))
2063 pinctl |= PIN_MAPPABLE;
2064
2065 vma = i915_gem_object_pin_to_display_plane(obj,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002066 alignment, view, pinctl);
Chris Wilson49ef5292016-08-18 17:17:00 +01002067 if (IS_ERR(vma))
2068 goto err;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002069
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002070 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002071 int ret;
2072
Chris Wilson49ef5292016-08-18 17:17:00 +01002073 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2074 * fence, whereas 965+ only requires a fence if using
2075 * framebuffer compression. For simplicity, we always, when
2076 * possible, install a fence as the cost is not that onerous.
2077 *
2078 * If we fail to fence the tiled scanout, then either the
2079 * modeset will reject the change (which is highly unlikely as
2080 * the affected systems, all but one, do not have unmappable
2081 * space) or we will not be able to enable full powersaving
2082 * techniques (also likely not to apply due to various limits
2083 * FBC and the like impose on the size of the buffer, which
2084 * presumably we violated anyway with this unmappable buffer).
2085 * Anyway, it is presumably better to stumble onwards with
2086 * something and try to run the system in a "less than optimal"
2087 * mode that matches the user configuration.
2088 */
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002089 ret = i915_vma_pin_fence(vma);
2090 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
Chris Wilson75097022018-03-05 10:33:12 +00002091 i915_gem_object_unpin_from_display_plane(vma);
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002092 vma = ERR_PTR(ret);
2093 goto err;
2094 }
2095
2096 if (ret == 0 && vma->fence)
Chris Wilson59354852018-02-20 13:42:06 +00002097 *out_flags |= PLANE_HAS_FENCE;
Vivek Kasireddy98072162015-10-29 18:54:38 -07002098 }
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002099
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002100 i915_vma_get(vma);
Chris Wilson49ef5292016-08-18 17:17:00 +01002101err:
Daniel Vetter9db529a2017-08-08 10:08:28 +02002102 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2103
Paulo Zanonid6dd6842014-08-15 15:59:32 -03002104 intel_runtime_pm_put(dev_priv);
Chris Wilson058d88c2016-08-15 10:49:06 +01002105 return vma;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002106}
2107
Chris Wilson59354852018-02-20 13:42:06 +00002108void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
Chris Wilson1690e1e2011-12-14 13:57:08 +01002109{
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002110 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00002111
Chris Wilson59354852018-02-20 13:42:06 +00002112 if (flags & PLANE_HAS_FENCE)
2113 i915_vma_unpin_fence(vma);
Chris Wilson058d88c2016-08-15 10:49:06 +01002114 i915_gem_object_unpin_from_display_plane(vma);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002115 i915_vma_put(vma);
Chris Wilson1690e1e2011-12-14 13:57:08 +01002116}
2117
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002118static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002119 unsigned int rotation)
2120{
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002121 if (drm_rotation_90_or_270(rotation))
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002122 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002123 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002124 return fb->pitches[color_plane];
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002125}
2126
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002127/*
Ville Syrjälä6687c902015-09-15 13:16:41 +03002128 * Convert the x/y offsets into a linear offset.
2129 * Only valid with 0/180 degree rotation, which is fine since linear
2130 * offset is only used with linear buffers on pre-hsw and tiled buffers
2131 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2132 */
2133u32 intel_fb_xy_to_linear(int x, int y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002134 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002135 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002136{
Ville Syrjälä29490562016-01-20 18:02:50 +02002137 const struct drm_framebuffer *fb = state->base.fb;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002138 unsigned int cpp = fb->format->cpp[color_plane];
2139 unsigned int pitch = state->color_plane[color_plane].stride;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002140
2141 return y * pitch + x * cpp;
2142}
2143
2144/*
2145 * Add the x/y offsets derived from fb->offsets[] to the user
2146 * specified plane src x/y offsets. The resulting x/y offsets
2147 * specify the start of scanout from the beginning of the gtt mapping.
2148 */
2149void intel_add_fb_offsets(int *x, int *y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002150 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002151 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002152
2153{
Ville Syrjälä29490562016-01-20 18:02:50 +02002154 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2155 unsigned int rotation = state->base.rotation;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002156
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002157 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002158 *x += intel_fb->rotated[color_plane].x;
2159 *y += intel_fb->rotated[color_plane].y;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002160 } else {
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002161 *x += intel_fb->normal[color_plane].x;
2162 *y += intel_fb->normal[color_plane].y;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002163 }
2164}
2165
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002166static u32 intel_adjust_tile_offset(int *x, int *y,
2167 unsigned int tile_width,
2168 unsigned int tile_height,
2169 unsigned int tile_size,
2170 unsigned int pitch_tiles,
2171 u32 old_offset,
2172 u32 new_offset)
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002173{
Ville Syrjäläb9b24032016-02-08 18:28:00 +02002174 unsigned int pitch_pixels = pitch_tiles * tile_width;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002175 unsigned int tiles;
2176
2177 WARN_ON(old_offset & (tile_size - 1));
2178 WARN_ON(new_offset & (tile_size - 1));
2179 WARN_ON(new_offset > old_offset);
2180
2181 tiles = (old_offset - new_offset) / tile_size;
2182
2183 *y += tiles / pitch_tiles * tile_height;
2184 *x += tiles % pitch_tiles * tile_width;
2185
Ville Syrjäläb9b24032016-02-08 18:28:00 +02002186 /* minimize x in case it got needlessly big */
2187 *y += *x / pitch_pixels * tile_height;
2188 *x %= pitch_pixels;
2189
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002190 return new_offset;
2191}
2192
Dhinakaran Pandiyan2a11b1b2018-10-26 12:38:04 -07002193static bool is_surface_linear(u64 modifier, int color_plane)
2194{
2195 return modifier == DRM_FORMAT_MOD_LINEAR;
2196}
2197
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002198static u32 intel_adjust_aligned_offset(int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002199 const struct drm_framebuffer *fb,
2200 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002201 unsigned int rotation,
Ville Syrjälädf79cf42018-09-11 18:01:39 +03002202 unsigned int pitch,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002203 u32 old_offset, u32 new_offset)
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002204{
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002205 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002206 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002207
2208 WARN_ON(new_offset > old_offset);
2209
Dhinakaran Pandiyan2a11b1b2018-10-26 12:38:04 -07002210 if (!is_surface_linear(fb->modifier, color_plane)) {
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002211 unsigned int tile_size, tile_width, tile_height;
2212 unsigned int pitch_tiles;
2213
2214 tile_size = intel_tile_size(dev_priv);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002215 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002216
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002217 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002218 pitch_tiles = pitch / tile_height;
2219 swap(tile_width, tile_height);
2220 } else {
2221 pitch_tiles = pitch / (tile_width * cpp);
2222 }
2223
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002224 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2225 tile_size, pitch_tiles,
2226 old_offset, new_offset);
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002227 } else {
2228 old_offset += *y * pitch + *x * cpp;
2229
2230 *y = (old_offset - new_offset) / pitch;
2231 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2232 }
2233
2234 return new_offset;
2235}
2236
2237/*
Ville Syrjälä303ba692017-08-24 22:10:49 +03002238 * Adjust the tile offset by moving the difference into
2239 * the x/y offsets.
2240 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002241static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2242 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002243 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002244 u32 old_offset, u32 new_offset)
Ville Syrjälä303ba692017-08-24 22:10:49 +03002245{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002246 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002247 state->base.rotation,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002248 state->color_plane[color_plane].stride,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002249 old_offset, new_offset);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002250}
2251
2252/*
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002253 * Computes the aligned offset to the base tile and adjusts
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002254 * x, y. bytes per pixel is assumed to be a power-of-two.
2255 *
2256 * In the 90/270 rotated case, x and y are assumed
2257 * to be already rotated to match the rotated GTT view, and
2258 * pitch is the tile_height aligned framebuffer height.
Ville Syrjälä6687c902015-09-15 13:16:41 +03002259 *
2260 * This function is used when computing the derived information
2261 * under intel_framebuffer, so using any of that information
2262 * here is not allowed. Anything under drm_framebuffer can be
2263 * used. This is why the user has to pass in the pitch since it
2264 * is specified in the rotated orientation.
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002265 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002266static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2267 int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002268 const struct drm_framebuffer *fb,
2269 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002270 unsigned int pitch,
2271 unsigned int rotation,
2272 u32 alignment)
Daniel Vetterc2c75132012-07-05 12:17:30 +02002273{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002274 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä6687c902015-09-15 13:16:41 +03002275 u32 offset, offset_aligned;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002276
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002277 if (alignment)
2278 alignment--;
2279
Dhinakaran Pandiyan2a11b1b2018-10-26 12:38:04 -07002280 if (!is_surface_linear(fb->modifier, color_plane)) {
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002281 unsigned int tile_size, tile_width, tile_height;
2282 unsigned int tile_rows, tiles, pitch_tiles;
Daniel Vetterc2c75132012-07-05 12:17:30 +02002283
Ville Syrjäläd8433102016-01-12 21:08:35 +02002284 tile_size = intel_tile_size(dev_priv);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002285 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002286
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002287 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002288 pitch_tiles = pitch / tile_height;
2289 swap(tile_width, tile_height);
2290 } else {
2291 pitch_tiles = pitch / (tile_width * cpp);
2292 }
Daniel Vetterc2c75132012-07-05 12:17:30 +02002293
Ville Syrjäläd8433102016-01-12 21:08:35 +02002294 tile_rows = *y / tile_height;
2295 *y %= tile_height;
Chris Wilsonbc752862013-02-21 20:04:31 +00002296
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002297 tiles = *x / tile_width;
2298 *x %= tile_width;
Ville Syrjäläd8433102016-01-12 21:08:35 +02002299
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002300 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2301 offset_aligned = offset & ~alignment;
Chris Wilsonbc752862013-02-21 20:04:31 +00002302
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002303 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2304 tile_size, pitch_tiles,
2305 offset, offset_aligned);
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002306 } else {
Chris Wilsonbc752862013-02-21 20:04:31 +00002307 offset = *y * pitch + *x * cpp;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002308 offset_aligned = offset & ~alignment;
2309
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002310 *y = (offset & alignment) / pitch;
2311 *x = ((offset & alignment) - *y * pitch) / cpp;
Chris Wilsonbc752862013-02-21 20:04:31 +00002312 }
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002313
2314 return offset_aligned;
Daniel Vetterc2c75132012-07-05 12:17:30 +02002315}
2316
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002317static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2318 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002319 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002320{
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03002321 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2322 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
Ville Syrjälä29490562016-01-20 18:02:50 +02002323 const struct drm_framebuffer *fb = state->base.fb;
2324 unsigned int rotation = state->base.rotation;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002325 int pitch = state->color_plane[color_plane].stride;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03002326 u32 alignment;
2327
2328 if (intel_plane->id == PLANE_CURSOR)
2329 alignment = intel_cursor_alignment(dev_priv);
2330 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002331 alignment = intel_surf_alignment(fb, color_plane);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002332
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002333 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002334 pitch, rotation, alignment);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002335}
2336
Ville Syrjälä303ba692017-08-24 22:10:49 +03002337/* Convert the fb->offset[] into x/y offsets */
2338static int intel_fb_offset_to_xy(int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002339 const struct drm_framebuffer *fb,
2340 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002341{
Ville Syrjälä303ba692017-08-24 22:10:49 +03002342 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä70bbe532018-10-23 19:02:01 +03002343 unsigned int height;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002344
Ville Syrjälä303ba692017-08-24 22:10:49 +03002345 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
Ville Syrjälä70bbe532018-10-23 19:02:01 +03002346 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2347 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2348 fb->offsets[color_plane], color_plane);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002349 return -EINVAL;
Ville Syrjälä70bbe532018-10-23 19:02:01 +03002350 }
2351
2352 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2353 height = ALIGN(height, intel_tile_height(fb, color_plane));
2354
2355 /* Catch potential overflows early */
2356 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2357 fb->offsets[color_plane])) {
2358 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2359 fb->offsets[color_plane], fb->pitches[color_plane],
2360 color_plane);
2361 return -ERANGE;
2362 }
Ville Syrjälä303ba692017-08-24 22:10:49 +03002363
2364 *x = 0;
2365 *y = 0;
2366
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002367 intel_adjust_aligned_offset(x, y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002368 fb, color_plane, DRM_MODE_ROTATE_0,
2369 fb->pitches[color_plane],
2370 fb->offsets[color_plane], 0);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002371
2372 return 0;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002373}
2374
Ville Syrjälä72618eb2016-02-04 20:38:20 +02002375static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2376{
2377 switch (fb_modifier) {
2378 case I915_FORMAT_MOD_X_TILED:
2379 return I915_TILING_X;
2380 case I915_FORMAT_MOD_Y_TILED:
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002381 case I915_FORMAT_MOD_Y_TILED_CCS:
Ville Syrjälä72618eb2016-02-04 20:38:20 +02002382 return I915_TILING_Y;
2383 default:
2384 return I915_TILING_NONE;
2385 }
2386}
2387
Ville Syrjälä16af25f2018-01-19 16:41:52 +02002388/*
2389 * From the Sky Lake PRM:
2390 * "The Color Control Surface (CCS) contains the compression status of
2391 * the cache-line pairs. The compression state of the cache-line pair
2392 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2393 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2394 * cache-line-pairs. CCS is always Y tiled."
2395 *
2396 * Since cache line pairs refers to horizontally adjacent cache lines,
2397 * each cache line in the CCS corresponds to an area of 32x16 cache
2398 * lines on the main surface. Since each pixel is 4 bytes, this gives
2399 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2400 * main surface.
2401 */
Ville Syrjäläbbfb6ce2017-08-01 09:58:12 -07002402static const struct drm_format_info ccs_formats[] = {
2403 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2404 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2405 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2406 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2407};
2408
2409static const struct drm_format_info *
2410lookup_format_info(const struct drm_format_info formats[],
2411 int num_formats, u32 format)
2412{
2413 int i;
2414
2415 for (i = 0; i < num_formats; i++) {
2416 if (formats[i].format == format)
2417 return &formats[i];
2418 }
2419
2420 return NULL;
2421}
2422
2423static const struct drm_format_info *
2424intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2425{
2426 switch (cmd->modifier[0]) {
2427 case I915_FORMAT_MOD_Y_TILED_CCS:
2428 case I915_FORMAT_MOD_Yf_TILED_CCS:
2429 return lookup_format_info(ccs_formats,
2430 ARRAY_SIZE(ccs_formats),
2431 cmd->pixel_format);
2432 default:
2433 return NULL;
2434 }
2435}
2436
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07002437bool is_ccs_modifier(u64 modifier)
2438{
2439 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2440 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2441}
2442
Ville Syrjälä6687c902015-09-15 13:16:41 +03002443static int
2444intel_fill_fb_info(struct drm_i915_private *dev_priv,
2445 struct drm_framebuffer *fb)
2446{
2447 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2448 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
Daniel Stonea5ff7a42018-05-18 15:30:07 +01002449 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002450 u32 gtt_offset_rotated = 0;
2451 unsigned int max_size = 0;
Ville Syrjäläbcb0b462016-12-14 23:30:22 +02002452 int i, num_planes = fb->format->num_planes;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002453 unsigned int tile_size = intel_tile_size(dev_priv);
2454
2455 for (i = 0; i < num_planes; i++) {
2456 unsigned int width, height;
2457 unsigned int cpp, size;
2458 u32 offset;
2459 int x, y;
Ville Syrjälä303ba692017-08-24 22:10:49 +03002460 int ret;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002461
Ville Syrjälä353c8592016-12-14 23:30:57 +02002462 cpp = fb->format->cpp[i];
Ville Syrjälä145fcb12016-11-18 21:53:06 +02002463 width = drm_framebuffer_plane_width(fb->width, fb, i);
2464 height = drm_framebuffer_plane_height(fb->height, fb, i);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002465
Ville Syrjälä303ba692017-08-24 22:10:49 +03002466 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2467 if (ret) {
2468 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2469 i, fb->offsets[i]);
2470 return ret;
2471 }
Ville Syrjälä6687c902015-09-15 13:16:41 +03002472
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07002473 if (is_ccs_modifier(fb->modifier) && i == 1) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002474 int hsub = fb->format->hsub;
2475 int vsub = fb->format->vsub;
2476 int tile_width, tile_height;
2477 int main_x, main_y;
2478 int ccs_x, ccs_y;
2479
2480 intel_tile_dims(fb, i, &tile_width, &tile_height);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002481 tile_width *= hsub;
2482 tile_height *= vsub;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002483
Ville Syrjälä303ba692017-08-24 22:10:49 +03002484 ccs_x = (x * hsub) % tile_width;
2485 ccs_y = (y * vsub) % tile_height;
2486 main_x = intel_fb->normal[0].x % tile_width;
2487 main_y = intel_fb->normal[0].y % tile_height;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002488
2489 /*
2490 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2491 * x/y offsets must match between CCS and the main surface.
2492 */
2493 if (main_x != ccs_x || main_y != ccs_y) {
2494 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2495 main_x, main_y,
2496 ccs_x, ccs_y,
2497 intel_fb->normal[0].x,
2498 intel_fb->normal[0].y,
2499 x, y);
2500 return -EINVAL;
2501 }
2502 }
2503
Ville Syrjälä6687c902015-09-15 13:16:41 +03002504 /*
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002505 * The fence (if used) is aligned to the start of the object
2506 * so having the framebuffer wrap around across the edge of the
2507 * fenced region doesn't really work. We have no API to configure
2508 * the fence start offset within the object (nor could we probably
2509 * on gen2/3). So it's just easier if we just require that the
2510 * fb layout agrees with the fence layout. We already check that the
2511 * fb stride matches the fence stride elsewhere.
2512 */
Daniel Stonea5ff7a42018-05-18 15:30:07 +01002513 if (i == 0 && i915_gem_object_is_tiled(obj) &&
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002514 (x + width) * cpp > fb->pitches[i]) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +02002515 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2516 i, fb->offsets[i]);
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002517 return -EINVAL;
2518 }
2519
2520 /*
Ville Syrjälä6687c902015-09-15 13:16:41 +03002521 * First pixel of the framebuffer from
2522 * the start of the normal gtt mapping.
2523 */
2524 intel_fb->normal[i].x = x;
2525 intel_fb->normal[i].y = y;
2526
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002527 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2528 fb->pitches[i],
2529 DRM_MODE_ROTATE_0,
2530 tile_size);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002531 offset /= tile_size;
2532
Dhinakaran Pandiyan2a11b1b2018-10-26 12:38:04 -07002533 if (!is_surface_linear(fb->modifier, i)) {
Ville Syrjälä6687c902015-09-15 13:16:41 +03002534 unsigned int tile_width, tile_height;
2535 unsigned int pitch_tiles;
2536 struct drm_rect r;
2537
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002538 intel_tile_dims(fb, i, &tile_width, &tile_height);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002539
2540 rot_info->plane[i].offset = offset;
2541 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2542 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2543 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2544
2545 intel_fb->rotated[i].pitch =
2546 rot_info->plane[i].height * tile_height;
2547
2548 /* how many tiles does this plane need */
2549 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2550 /*
2551 * If the plane isn't horizontally tile aligned,
2552 * we need one more tile.
2553 */
2554 if (x != 0)
2555 size++;
2556
2557 /* rotate the x/y offsets to match the GTT view */
2558 r.x1 = x;
2559 r.y1 = y;
2560 r.x2 = x + width;
2561 r.y2 = y + height;
2562 drm_rect_rotate(&r,
2563 rot_info->plane[i].width * tile_width,
2564 rot_info->plane[i].height * tile_height,
Robert Fossc2c446a2017-05-19 16:50:17 -04002565 DRM_MODE_ROTATE_270);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002566 x = r.x1;
2567 y = r.y1;
2568
2569 /* rotate the tile dimensions to match the GTT view */
2570 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2571 swap(tile_width, tile_height);
2572
2573 /*
2574 * We only keep the x/y offsets, so push all of the
2575 * gtt offset into the x/y offsets.
2576 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002577 intel_adjust_tile_offset(&x, &y,
2578 tile_width, tile_height,
2579 tile_size, pitch_tiles,
2580 gtt_offset_rotated * tile_size, 0);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002581
2582 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2583
2584 /*
2585 * First pixel of the framebuffer from
2586 * the start of the rotated gtt mapping.
2587 */
2588 intel_fb->rotated[i].x = x;
2589 intel_fb->rotated[i].y = y;
2590 } else {
2591 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2592 x * cpp, tile_size);
2593 }
2594
2595 /* how many tiles in total needed in the bo */
2596 max_size = max(max_size, offset + size);
2597 }
2598
Ville Syrjälä4e050472018-09-12 21:04:43 +03002599 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2600 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2601 mul_u32_u32(max_size, tile_size), obj->base.size);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002602 return -EINVAL;
2603 }
2604
2605 return 0;
2606}
2607
Damien Lespiaub35d63f2015-01-20 12:51:50 +00002608static int i9xx_format_to_fourcc(int format)
Jesse Barnes46f297f2014-03-07 08:57:48 -08002609{
2610 switch (format) {
2611 case DISPPLANE_8BPP:
2612 return DRM_FORMAT_C8;
2613 case DISPPLANE_BGRX555:
2614 return DRM_FORMAT_XRGB1555;
2615 case DISPPLANE_BGRX565:
2616 return DRM_FORMAT_RGB565;
2617 default:
2618 case DISPPLANE_BGRX888:
2619 return DRM_FORMAT_XRGB8888;
2620 case DISPPLANE_RGBX888:
2621 return DRM_FORMAT_XBGR8888;
2622 case DISPPLANE_BGRX101010:
2623 return DRM_FORMAT_XRGB2101010;
2624 case DISPPLANE_RGBX101010:
2625 return DRM_FORMAT_XBGR2101010;
2626 }
2627}
2628
Mahesh Kumarddf34312018-04-09 09:11:03 +05302629int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00002630{
2631 switch (format) {
2632 case PLANE_CTL_FORMAT_RGB_565:
2633 return DRM_FORMAT_RGB565;
Mahesh Kumarf34a2912018-04-09 09:11:02 +05302634 case PLANE_CTL_FORMAT_NV12:
2635 return DRM_FORMAT_NV12;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00002636 default:
2637 case PLANE_CTL_FORMAT_XRGB_8888:
2638 if (rgb_order) {
2639 if (alpha)
2640 return DRM_FORMAT_ABGR8888;
2641 else
2642 return DRM_FORMAT_XBGR8888;
2643 } else {
2644 if (alpha)
2645 return DRM_FORMAT_ARGB8888;
2646 else
2647 return DRM_FORMAT_XRGB8888;
2648 }
2649 case PLANE_CTL_FORMAT_XRGB_2101010:
2650 if (rgb_order)
2651 return DRM_FORMAT_XBGR2101010;
2652 else
2653 return DRM_FORMAT_XRGB2101010;
2654 }
2655}
2656
Damien Lespiau5724dbd2015-01-20 12:51:52 +00002657static bool
Daniel Vetterf6936e22015-03-26 12:17:05 +01002658intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2659 struct intel_initial_plane_config *plane_config)
Jesse Barnes46f297f2014-03-07 08:57:48 -08002660{
2661 struct drm_device *dev = crtc->base.dev;
Paulo Zanoni3badb492015-09-23 12:52:23 -03002662 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes46f297f2014-03-07 08:57:48 -08002663 struct drm_i915_gem_object *obj = NULL;
2664 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
Damien Lespiau2d140302015-02-05 17:22:18 +00002665 struct drm_framebuffer *fb = &plane_config->fb->base;
Daniel Vetterf37b5c22015-02-10 23:12:27 +01002666 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2667 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2668 PAGE_SIZE);
2669
2670 size_aligned -= base_aligned;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002671
Chris Wilsonff2652e2014-03-10 08:07:02 +00002672 if (plane_config->size == 0)
2673 return false;
2674
Paulo Zanoni3badb492015-09-23 12:52:23 -03002675 /* If the FB is too big, just don't use it since fbdev is not very
2676 * important and we should probably use that space with FBC or other
2677 * features. */
Matthew Auldb1ace602017-12-11 15:18:21 +00002678 if (size_aligned * 2 > dev_priv->stolen_usable_size)
Paulo Zanoni3badb492015-09-23 12:52:23 -03002679 return false;
2680
Imre Deak914a4fd2018-10-16 19:00:11 +03002681 switch (fb->modifier) {
2682 case DRM_FORMAT_MOD_LINEAR:
2683 case I915_FORMAT_MOD_X_TILED:
2684 case I915_FORMAT_MOD_Y_TILED:
2685 break;
2686 default:
2687 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2688 fb->modifier);
2689 return false;
2690 }
2691
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +00002692 mutex_lock(&dev->struct_mutex);
Tvrtko Ursulin187685c2016-12-01 14:16:36 +00002693 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
Daniel Vetterf37b5c22015-02-10 23:12:27 +01002694 base_aligned,
2695 base_aligned,
2696 size_aligned);
Chris Wilson24dbf512017-02-15 10:59:18 +00002697 mutex_unlock(&dev->struct_mutex);
2698 if (!obj)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002699 return false;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002700
Imre Deak914a4fd2018-10-16 19:00:11 +03002701 switch (plane_config->tiling) {
2702 case I915_TILING_NONE:
2703 break;
2704 case I915_TILING_X:
2705 case I915_TILING_Y:
2706 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2707 break;
2708 default:
2709 MISSING_CASE(plane_config->tiling);
2710 return false;
2711 }
Jesse Barnes46f297f2014-03-07 08:57:48 -08002712
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002713 mode_cmd.pixel_format = fb->format->format;
Damien Lespiau6bf129d2015-02-05 17:22:16 +00002714 mode_cmd.width = fb->width;
2715 mode_cmd.height = fb->height;
2716 mode_cmd.pitches[0] = fb->pitches[0];
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002717 mode_cmd.modifier[0] = fb->modifier;
Daniel Vetter18c52472015-02-10 17:16:09 +00002718 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002719
Chris Wilson24dbf512017-02-15 10:59:18 +00002720 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
Jesse Barnes46f297f2014-03-07 08:57:48 -08002721 DRM_DEBUG_KMS("intel fb init failed\n");
2722 goto out_unref_obj;
2723 }
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +00002724
Jesse Barnes484b41d2014-03-07 08:57:55 -08002725
Daniel Vetterf6936e22015-03-26 12:17:05 +01002726 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002727 return true;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002728
2729out_unref_obj:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002730 i915_gem_object_put(obj);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002731 return false;
2732}
2733
Damien Lespiau5724dbd2015-01-20 12:51:52 +00002734static void
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002735intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2736 struct intel_plane_state *plane_state,
2737 bool visible)
2738{
2739 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2740
2741 plane_state->base.visible = visible;
2742
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002743 if (visible)
Ville Syrjälä40560e22018-06-26 22:47:11 +03002744 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002745 else
Ville Syrjälä40560e22018-06-26 22:47:11 +03002746 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002747}
2748
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002749static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2750{
2751 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2752 struct drm_plane *plane;
2753
2754 /*
2755 * Active_planes aliases if multiple "primary" or cursor planes
2756 * have been used on the same (or wrong) pipe. plane_mask uses
2757 * unique ids, hence we can use that to reconstruct active_planes.
2758 */
2759 crtc_state->active_planes = 0;
2760
2761 drm_for_each_plane_mask(plane, &dev_priv->drm,
2762 crtc_state->base.plane_mask)
2763 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2764}
2765
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002766static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2767 struct intel_plane *plane)
2768{
2769 struct intel_crtc_state *crtc_state =
2770 to_intel_crtc_state(crtc->base.state);
2771 struct intel_plane_state *plane_state =
2772 to_intel_plane_state(plane->base.state);
2773
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +03002774 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2775 plane->base.base.id, plane->base.name,
2776 crtc->base.base.id, crtc->base.name);
2777
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002778 intel_set_plane_visible(crtc_state, plane_state, false);
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002779 fixup_active_planes(crtc_state);
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002780
2781 if (plane->id == PLANE_PRIMARY)
2782 intel_pre_disable_primary_noatomic(&crtc->base);
2783
2784 trace_intel_disable_plane(&plane->base, crtc);
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02002785 plane->disable_plane(plane, crtc_state);
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002786}
2787
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002788static void
Daniel Vetterf6936e22015-03-26 12:17:05 +01002789intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2790 struct intel_initial_plane_config *plane_config)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002791{
2792 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002793 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002794 struct drm_crtc *c;
Matt Roper2ff8fde2014-07-08 07:50:07 -07002795 struct drm_i915_gem_object *obj;
Daniel Vetter88595ac2015-03-26 12:42:24 +01002796 struct drm_plane *primary = intel_crtc->base.primary;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002797 struct drm_plane_state *plane_state = primary->state;
Matt Roper200757f2015-12-03 11:37:36 -08002798 struct intel_plane *intel_plane = to_intel_plane(primary);
Matt Roper0a8d8a82015-12-03 11:37:38 -08002799 struct intel_plane_state *intel_state =
2800 to_intel_plane_state(plane_state);
Daniel Vetter88595ac2015-03-26 12:42:24 +01002801 struct drm_framebuffer *fb;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002802
Damien Lespiau2d140302015-02-05 17:22:18 +00002803 if (!plane_config->fb)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002804 return;
2805
Daniel Vetterf6936e22015-03-26 12:17:05 +01002806 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
Daniel Vetter88595ac2015-03-26 12:42:24 +01002807 fb = &plane_config->fb->base;
2808 goto valid_fb;
Damien Lespiauf55548b2015-02-05 18:30:20 +00002809 }
Jesse Barnes484b41d2014-03-07 08:57:55 -08002810
Damien Lespiau2d140302015-02-05 17:22:18 +00002811 kfree(plane_config->fb);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002812
2813 /*
2814 * Failed to alloc the obj, check to see if we should share
2815 * an fb with another CRTC instead
2816 */
Damien Lespiau70e1e0e2014-05-13 23:32:24 +01002817 for_each_crtc(dev, c) {
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002818 struct intel_plane_state *state;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002819
2820 if (c == &intel_crtc->base)
2821 continue;
2822
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002823 if (!to_intel_crtc(c)->active)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002824 continue;
2825
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002826 state = to_intel_plane_state(c->primary->state);
2827 if (!state->vma)
Matt Roper2ff8fde2014-07-08 07:50:07 -07002828 continue;
2829
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002830 if (intel_plane_ggtt_offset(state) == plane_config->base) {
Ville Syrjälä8bc20f62018-03-22 17:22:59 +02002831 fb = state->base.fb;
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302832 drm_framebuffer_get(fb);
Daniel Vetter88595ac2015-03-26 12:42:24 +01002833 goto valid_fb;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002834 }
2835 }
Daniel Vetter88595ac2015-03-26 12:42:24 +01002836
Matt Roper200757f2015-12-03 11:37:36 -08002837 /*
2838 * We've failed to reconstruct the BIOS FB. Current display state
2839 * indicates that the primary plane is visible, but has a NULL FB,
2840 * which will lead to problems later if we don't fix it up. The
2841 * simplest solution is to just disable the primary plane now and
2842 * pretend the BIOS never had it enabled.
2843 */
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002844 intel_plane_disable_noatomic(intel_crtc, intel_plane);
Matt Roper200757f2015-12-03 11:37:36 -08002845
Daniel Vetter88595ac2015-03-26 12:42:24 +01002846 return;
2847
2848valid_fb:
Ville Syrjäläf43348a2018-11-20 15:54:50 +02002849 intel_state->base.rotation = plane_config->rotation;
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002850 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2851 intel_state->base.rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03002852 intel_state->color_plane[0].stride =
2853 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2854
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002855 mutex_lock(&dev->struct_mutex);
2856 intel_state->vma =
Chris Wilson59354852018-02-20 13:42:06 +00002857 intel_pin_and_fence_fb_obj(fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002858 &intel_state->view,
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002859 intel_plane_uses_fence(intel_state),
Chris Wilson59354852018-02-20 13:42:06 +00002860 &intel_state->flags);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002861 mutex_unlock(&dev->struct_mutex);
2862 if (IS_ERR(intel_state->vma)) {
2863 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2864 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2865
2866 intel_state->vma = NULL;
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302867 drm_framebuffer_put(fb);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002868 return;
2869 }
2870
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -08002871 obj = intel_fb_obj(fb);
2872 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2873
Ville Syrjäläf44e2652015-11-13 19:16:13 +02002874 plane_state->src_x = 0;
2875 plane_state->src_y = 0;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002876 plane_state->src_w = fb->width << 16;
2877 plane_state->src_h = fb->height << 16;
2878
Ville Syrjäläf44e2652015-11-13 19:16:13 +02002879 plane_state->crtc_x = 0;
2880 plane_state->crtc_y = 0;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002881 plane_state->crtc_w = fb->width;
2882 plane_state->crtc_h = fb->height;
2883
Rob Clark1638d302016-11-05 11:08:08 -04002884 intel_state->base.src = drm_plane_state_src(plane_state);
2885 intel_state->base.dst = drm_plane_state_dest(plane_state);
Matt Roper0a8d8a82015-12-03 11:37:38 -08002886
Chris Wilson3e510a82016-08-05 10:14:23 +01002887 if (i915_gem_object_is_tiled(obj))
Daniel Vetter88595ac2015-03-26 12:42:24 +01002888 dev_priv->preserve_bios_swizzle = true;
2889
Ville Syrjäläcd30fbc2018-05-25 21:50:40 +03002890 plane_state->fb = fb;
2891 plane_state->crtc = &intel_crtc->base;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002892
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01002893 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2894 &obj->frontbuffer_bits);
Jesse Barnes46f297f2014-03-07 08:57:48 -08002895}
2896
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002897static int skl_max_plane_width(const struct drm_framebuffer *fb,
2898 int color_plane,
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002899 unsigned int rotation)
2900{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002901 int cpp = fb->format->cpp[color_plane];
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002902
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002903 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07002904 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002905 case I915_FORMAT_MOD_X_TILED:
2906 switch (cpp) {
2907 case 8:
2908 return 4096;
2909 case 4:
2910 case 2:
2911 case 1:
2912 return 8192;
2913 default:
2914 MISSING_CASE(cpp);
2915 break;
2916 }
2917 break;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002918 case I915_FORMAT_MOD_Y_TILED_CCS:
2919 case I915_FORMAT_MOD_Yf_TILED_CCS:
2920 /* FIXME AUX plane? */
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002921 case I915_FORMAT_MOD_Y_TILED:
2922 case I915_FORMAT_MOD_Yf_TILED:
2923 switch (cpp) {
2924 case 8:
2925 return 2048;
2926 case 4:
2927 return 4096;
2928 case 2:
2929 case 1:
2930 return 8192;
2931 default:
2932 MISSING_CASE(cpp);
2933 break;
2934 }
2935 break;
2936 default:
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002937 MISSING_CASE(fb->modifier);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002938 }
2939
2940 return 2048;
2941}
2942
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002943static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2944 int main_x, int main_y, u32 main_offset)
2945{
2946 const struct drm_framebuffer *fb = plane_state->base.fb;
2947 int hsub = fb->format->hsub;
2948 int vsub = fb->format->vsub;
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002949 int aux_x = plane_state->color_plane[1].x;
2950 int aux_y = plane_state->color_plane[1].y;
2951 u32 aux_offset = plane_state->color_plane[1].offset;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002952 u32 alignment = intel_surf_alignment(fb, 1);
2953
2954 while (aux_offset >= main_offset && aux_y <= main_y) {
2955 int x, y;
2956
2957 if (aux_x == main_x && aux_y == main_y)
2958 break;
2959
2960 if (aux_offset == 0)
2961 break;
2962
2963 x = aux_x / hsub;
2964 y = aux_y / vsub;
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002965 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
2966 aux_offset, aux_offset - alignment);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002967 aux_x = x * hsub + aux_x % hsub;
2968 aux_y = y * vsub + aux_y % vsub;
2969 }
2970
2971 if (aux_x != main_x || aux_y != main_y)
2972 return false;
2973
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002974 plane_state->color_plane[1].offset = aux_offset;
2975 plane_state->color_plane[1].x = aux_x;
2976 plane_state->color_plane[1].y = aux_y;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002977
2978 return true;
2979}
2980
Ville Syrjälä73266592018-09-07 18:24:11 +03002981static int skl_check_main_surface(struct intel_plane_state *plane_state)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002982{
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002983 const struct drm_framebuffer *fb = plane_state->base.fb;
2984 unsigned int rotation = plane_state->base.rotation;
Daniel Vettercc926382016-08-15 10:41:47 +02002985 int x = plane_state->base.src.x1 >> 16;
2986 int y = plane_state->base.src.y1 >> 16;
2987 int w = drm_rect_width(&plane_state->base.src) >> 16;
2988 int h = drm_rect_height(&plane_state->base.src) >> 16;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002989 int max_width = skl_max_plane_width(fb, 0, rotation);
2990 int max_height = 4096;
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002991 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002992
2993 if (w > max_width || h > max_height) {
2994 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
2995 w, h, max_width, max_height);
2996 return -EINVAL;
2997 }
2998
2999 intel_add_fb_offsets(&x, &y, plane_state, 0);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003000 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003001 alignment = intel_surf_alignment(fb, 0);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003002
3003 /*
Ville Syrjälä8d970652016-01-28 16:30:28 +02003004 * AUX surface offset is specified as the distance from the
3005 * main surface offset, and it must be non-negative. Make
3006 * sure that is what we will get.
3007 */
3008 if (offset > aux_offset)
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003009 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3010 offset, aux_offset & ~(alignment - 1));
Ville Syrjälä8d970652016-01-28 16:30:28 +02003011
3012 /*
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003013 * When using an X-tiled surface, the plane blows up
3014 * if the x offset + width exceed the stride.
3015 *
3016 * TODO: linear and Y-tiled seem fine, Yf untested,
3017 */
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003018 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
Ville Syrjälä353c8592016-12-14 23:30:57 +02003019 int cpp = fb->format->cpp[0];
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003020
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003021 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003022 if (offset == 0) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003023 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003024 return -EINVAL;
3025 }
3026
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003027 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3028 offset, offset - alignment);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003029 }
3030 }
3031
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003032 /*
3033 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3034 * they match with the main surface x/y offsets.
3035 */
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07003036 if (is_ccs_modifier(fb->modifier)) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003037 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3038 if (offset == 0)
3039 break;
3040
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003041 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3042 offset, offset - alignment);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003043 }
3044
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003045 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003046 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3047 return -EINVAL;
3048 }
3049 }
3050
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003051 plane_state->color_plane[0].offset = offset;
3052 plane_state->color_plane[0].x = x;
3053 plane_state->color_plane[0].y = y;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003054
3055 return 0;
3056}
3057
Ville Syrjälä8d970652016-01-28 16:30:28 +02003058static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3059{
3060 const struct drm_framebuffer *fb = plane_state->base.fb;
3061 unsigned int rotation = plane_state->base.rotation;
3062 int max_width = skl_max_plane_width(fb, 1, rotation);
3063 int max_height = 4096;
Daniel Vettercc926382016-08-15 10:41:47 +02003064 int x = plane_state->base.src.x1 >> 17;
3065 int y = plane_state->base.src.y1 >> 17;
3066 int w = drm_rect_width(&plane_state->base.src) >> 17;
3067 int h = drm_rect_height(&plane_state->base.src) >> 17;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003068 u32 offset;
3069
3070 intel_add_fb_offsets(&x, &y, plane_state, 1);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003071 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
Ville Syrjälä8d970652016-01-28 16:30:28 +02003072
3073 /* FIXME not quite sure how/if these apply to the chroma plane */
3074 if (w > max_width || h > max_height) {
3075 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3076 w, h, max_width, max_height);
3077 return -EINVAL;
3078 }
3079
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003080 plane_state->color_plane[1].offset = offset;
3081 plane_state->color_plane[1].x = x;
3082 plane_state->color_plane[1].y = y;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003083
3084 return 0;
3085}
3086
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003087static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3088{
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003089 const struct drm_framebuffer *fb = plane_state->base.fb;
3090 int src_x = plane_state->base.src.x1 >> 16;
3091 int src_y = plane_state->base.src.y1 >> 16;
3092 int hsub = fb->format->hsub;
3093 int vsub = fb->format->vsub;
3094 int x = src_x / hsub;
3095 int y = src_y / vsub;
3096 u32 offset;
3097
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003098 intel_add_fb_offsets(&x, &y, plane_state, 1);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003099 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003100
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003101 plane_state->color_plane[1].offset = offset;
3102 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3103 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003104
3105 return 0;
3106}
3107
Ville Syrjälä73266592018-09-07 18:24:11 +03003108int skl_check_plane_surface(struct intel_plane_state *plane_state)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003109{
3110 const struct drm_framebuffer *fb = plane_state->base.fb;
3111 unsigned int rotation = plane_state->base.rotation;
3112 int ret;
3113
Ville Syrjäläf5929c52018-09-07 18:24:06 +03003114 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003115 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3116 plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3117
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003118 ret = intel_plane_check_stride(plane_state);
3119 if (ret)
3120 return ret;
3121
Ville Syrjäläa5e4c7d2016-11-07 22:20:54 +02003122 if (!plane_state->base.visible)
3123 return 0;
3124
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003125 /* Rotate src coordinates to match rotated GTT view */
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03003126 if (drm_rotation_90_or_270(rotation))
Daniel Vettercc926382016-08-15 10:41:47 +02003127 drm_rect_rotate(&plane_state->base.src,
Ville Syrjäläda064b42016-10-24 19:13:04 +03003128 fb->width << 16, fb->height << 16,
Robert Fossc2c446a2017-05-19 16:50:17 -04003129 DRM_MODE_ROTATE_270);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003130
Ville Syrjälä8d970652016-01-28 16:30:28 +02003131 /*
3132 * Handle the AUX surface first since
3133 * the main surface setup depends on it.
3134 */
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003135 if (fb->format->format == DRM_FORMAT_NV12) {
Ville Syrjälä8d970652016-01-28 16:30:28 +02003136 ret = skl_check_nv12_aux_surface(plane_state);
3137 if (ret)
3138 return ret;
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07003139 } else if (is_ccs_modifier(fb->modifier)) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003140 ret = skl_check_ccs_aux_surface(plane_state);
3141 if (ret)
3142 return ret;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003143 } else {
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003144 plane_state->color_plane[1].offset = ~0xfff;
3145 plane_state->color_plane[1].x = 0;
3146 plane_state->color_plane[1].y = 0;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003147 }
3148
Ville Syrjälä73266592018-09-07 18:24:11 +03003149 ret = skl_check_main_surface(plane_state);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003150 if (ret)
3151 return ret;
3152
3153 return 0;
3154}
3155
Ville Syrjäläddd57132018-09-07 18:24:02 +03003156unsigned int
3157i9xx_plane_max_stride(struct intel_plane *plane,
3158 u32 pixel_format, u64 modifier,
3159 unsigned int rotation)
3160{
3161 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3162
3163 if (!HAS_GMCH_DISPLAY(dev_priv)) {
3164 return 32*1024;
3165 } else if (INTEL_GEN(dev_priv) >= 4) {
3166 if (modifier == I915_FORMAT_MOD_X_TILED)
3167 return 16*1024;
3168 else
3169 return 32*1024;
3170 } else if (INTEL_GEN(dev_priv) >= 3) {
3171 if (modifier == I915_FORMAT_MOD_X_TILED)
3172 return 8*1024;
3173 else
3174 return 16*1024;
3175 } else {
3176 if (plane->i9xx_plane == PLANE_C)
3177 return 4*1024;
3178 else
3179 return 8*1024;
3180 }
3181}
3182
Ville Syrjälä7145f602017-03-23 21:27:07 +02003183static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3184 const struct intel_plane_state *plane_state)
Jesse Barnes81255562010-08-02 12:07:50 -07003185{
Ville Syrjälä7145f602017-03-23 21:27:07 +02003186 struct drm_i915_private *dev_priv =
3187 to_i915(plane_state->base.plane->dev);
3188 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3189 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02003190 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003191 u32 dspcntr;
Ville Syrjäläc9ba6fa2014-08-27 17:48:41 +03003192
Ville Syrjälä7145f602017-03-23 21:27:07 +02003193 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003194
Lucas De Marchicf819ef2018-12-12 10:10:43 -08003195 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3196 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
Ville Syrjälä7145f602017-03-23 21:27:07 +02003197 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003198
Ville Syrjälä6a4407a2017-03-23 21:27:08 +02003199 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3200 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003201
Ville Syrjäläc154d1e2018-01-30 22:38:02 +02003202 if (INTEL_GEN(dev_priv) < 5)
Ville Syrjäläd509e282017-03-27 21:55:32 +03003203 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003204
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003205 switch (fb->format->format) {
Ville Syrjälä57779d02012-10-31 17:50:14 +02003206 case DRM_FORMAT_C8:
Jesse Barnes81255562010-08-02 12:07:50 -07003207 dspcntr |= DISPPLANE_8BPP;
3208 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +02003209 case DRM_FORMAT_XRGB1555:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003210 dspcntr |= DISPPLANE_BGRX555;
Jesse Barnes81255562010-08-02 12:07:50 -07003211 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +02003212 case DRM_FORMAT_RGB565:
3213 dspcntr |= DISPPLANE_BGRX565;
3214 break;
3215 case DRM_FORMAT_XRGB8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003216 dspcntr |= DISPPLANE_BGRX888;
3217 break;
3218 case DRM_FORMAT_XBGR8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003219 dspcntr |= DISPPLANE_RGBX888;
3220 break;
3221 case DRM_FORMAT_XRGB2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003222 dspcntr |= DISPPLANE_BGRX101010;
3223 break;
3224 case DRM_FORMAT_XBGR2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003225 dspcntr |= DISPPLANE_RGBX101010;
Jesse Barnes81255562010-08-02 12:07:50 -07003226 break;
3227 default:
Ville Syrjälä7145f602017-03-23 21:27:07 +02003228 MISSING_CASE(fb->format->format);
3229 return 0;
Jesse Barnes81255562010-08-02 12:07:50 -07003230 }
Ville Syrjälä57779d02012-10-31 17:50:14 +02003231
Ville Syrjälä72618eb2016-02-04 20:38:20 +02003232 if (INTEL_GEN(dev_priv) >= 4 &&
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003233 fb->modifier == I915_FORMAT_MOD_X_TILED)
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003234 dspcntr |= DISPPLANE_TILED;
Jesse Barnes81255562010-08-02 12:07:50 -07003235
Robert Fossc2c446a2017-05-19 16:50:17 -04003236 if (rotation & DRM_MODE_ROTATE_180)
Ville Syrjälädf0cd452016-11-14 18:53:59 +02003237 dspcntr |= DISPPLANE_ROTATE_180;
3238
Robert Fossc2c446a2017-05-19 16:50:17 -04003239 if (rotation & DRM_MODE_REFLECT_X)
Ville Syrjälä4ea7be22016-11-14 18:54:00 +02003240 dspcntr |= DISPPLANE_MIRROR;
3241
Ville Syrjälä7145f602017-03-23 21:27:07 +02003242 return dspcntr;
3243}
Ville Syrjäläde1aa622013-06-07 10:47:01 +03003244
Ville Syrjäläf9407ae2017-03-23 21:27:12 +02003245int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003246{
3247 struct drm_i915_private *dev_priv =
3248 to_i915(plane_state->base.plane->dev);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003249 const struct drm_framebuffer *fb = plane_state->base.fb;
3250 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003251 int src_x = plane_state->base.src.x1 >> 16;
3252 int src_y = plane_state->base.src.y1 >> 16;
3253 u32 offset;
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003254 int ret;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003255
Ville Syrjäläf5929c52018-09-07 18:24:06 +03003256 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003257 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3258
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003259 ret = intel_plane_check_stride(plane_state);
3260 if (ret)
3261 return ret;
3262
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003263 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
Jesse Barnes81255562010-08-02 12:07:50 -07003264
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003265 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003266 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3267 plane_state, 0);
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003268 else
3269 offset = 0;
Daniel Vettere506a0c2012-07-05 12:17:29 +02003270
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003271 /* HSW/BDW do this automagically in hardware */
3272 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003273 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3274 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3275
Robert Fossc2c446a2017-05-19 16:50:17 -04003276 if (rotation & DRM_MODE_ROTATE_180) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003277 src_x += src_w - 1;
3278 src_y += src_h - 1;
Robert Fossc2c446a2017-05-19 16:50:17 -04003279 } else if (rotation & DRM_MODE_REFLECT_X) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003280 src_x += src_w - 1;
3281 }
Sonika Jindal48404c12014-08-22 14:06:04 +05303282 }
3283
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003284 plane_state->color_plane[0].offset = offset;
3285 plane_state->color_plane[0].x = src_x;
3286 plane_state->color_plane[0].y = src_y;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003287
3288 return 0;
3289}
3290
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03003291static int
3292i9xx_plane_check(struct intel_crtc_state *crtc_state,
3293 struct intel_plane_state *plane_state)
3294{
3295 int ret;
3296
Ville Syrjälä25721f82018-09-07 18:24:12 +03003297 ret = chv_plane_check_rotation(plane_state);
3298 if (ret)
3299 return ret;
3300
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03003301 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3302 &crtc_state->base,
3303 DRM_PLANE_HELPER_NO_SCALING,
3304 DRM_PLANE_HELPER_NO_SCALING,
3305 false, true);
3306 if (ret)
3307 return ret;
3308
3309 if (!plane_state->base.visible)
3310 return 0;
3311
3312 ret = intel_plane_check_src_coordinates(plane_state);
3313 if (ret)
3314 return ret;
3315
3316 ret = i9xx_check_plane_surface(plane_state);
3317 if (ret)
3318 return ret;
3319
3320 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3321
3322 return 0;
3323}
3324
Ville Syrjäläed150302017-11-17 21:19:10 +02003325static void i9xx_update_plane(struct intel_plane *plane,
3326 const struct intel_crtc_state *crtc_state,
3327 const struct intel_plane_state *plane_state)
Ville Syrjälä7145f602017-03-23 21:27:07 +02003328{
Ville Syrjäläed150302017-11-17 21:19:10 +02003329 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjäläed150302017-11-17 21:19:10 +02003330 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003331 u32 linear_offset;
Ville Syrjäläa0864d52017-03-23 21:27:09 +02003332 u32 dspcntr = plane_state->ctl;
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003333 int x = plane_state->color_plane[0].x;
3334 int y = plane_state->color_plane[0].y;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003335 unsigned long irqflags;
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003336 u32 dspaddr_offset;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003337
Ville Syrjälä29490562016-01-20 18:02:50 +02003338 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
Ville Syrjälä6687c902015-09-15 13:16:41 +03003339
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003340 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003341 dspaddr_offset = plane_state->color_plane[0].offset;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003342 else
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003343 dspaddr_offset = linear_offset;
Ville Syrjälä6687c902015-09-15 13:16:41 +03003344
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003345 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3346
Ville Syrjälä83234d12018-11-14 23:07:17 +02003347 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3348
Ville Syrjälä78587de2017-03-09 17:44:32 +02003349 if (INTEL_GEN(dev_priv) < 4) {
3350 /* pipesrc and dspsize control the size that is scaled from,
3351 * which should always be the user's requested size.
3352 */
Ville Syrjälä83234d12018-11-14 23:07:17 +02003353 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
Ville Syrjäläed150302017-11-17 21:19:10 +02003354 I915_WRITE_FW(DSPSIZE(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003355 ((crtc_state->pipe_src_h - 1) << 16) |
3356 (crtc_state->pipe_src_w - 1));
Ville Syrjäläed150302017-11-17 21:19:10 +02003357 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
Ville Syrjälä83234d12018-11-14 23:07:17 +02003358 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
Ville Syrjäläed150302017-11-17 21:19:10 +02003359 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003360 ((crtc_state->pipe_src_h - 1) << 16) |
3361 (crtc_state->pipe_src_w - 1));
Ville Syrjäläed150302017-11-17 21:19:10 +02003362 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
Ville Syrjälä78587de2017-03-09 17:44:32 +02003363 }
3364
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003365 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjäläed150302017-11-17 21:19:10 +02003366 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003367 } else if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä83234d12018-11-14 23:07:17 +02003368 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3369 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3370 }
3371
3372 /*
3373 * The control register self-arms if the plane was previously
3374 * disabled. Try to make the plane enable atomic by writing
3375 * the control register just before the surface register.
3376 */
3377 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3378 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjäläed150302017-11-17 21:19:10 +02003379 I915_WRITE_FW(DSPSURF(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003380 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003381 dspaddr_offset);
Ville Syrjälä83234d12018-11-14 23:07:17 +02003382 else
Ville Syrjäläed150302017-11-17 21:19:10 +02003383 I915_WRITE_FW(DSPADDR(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003384 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003385 dspaddr_offset);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003386
3387 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Jesse Barnes17638cd2011-06-24 12:19:23 -07003388}
3389
Ville Syrjäläed150302017-11-17 21:19:10 +02003390static void i9xx_disable_plane(struct intel_plane *plane,
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02003391 const struct intel_crtc_state *crtc_state)
Jesse Barnes17638cd2011-06-24 12:19:23 -07003392{
Ville Syrjäläed150302017-11-17 21:19:10 +02003393 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3394 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003395 unsigned long irqflags;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003396
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003397 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3398
Ville Syrjäläed150302017-11-17 21:19:10 +02003399 I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3400 if (INTEL_GEN(dev_priv) >= 4)
3401 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003402 else
Ville Syrjäläed150302017-11-17 21:19:10 +02003403 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003404
3405 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003406}
3407
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003408static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3409 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003410{
Ville Syrjäläed150302017-11-17 21:19:10 +02003411 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003412 enum intel_display_power_domain power_domain;
Ville Syrjäläed150302017-11-17 21:19:10 +02003413 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003414 bool ret;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003415 u32 val;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003416
3417 /*
3418 * Not 100% correct for planes that can move between pipes,
3419 * but that's only the case for gen2-4 which don't have any
3420 * display power wells.
3421 */
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003422 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003423 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3424 return false;
3425
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003426 val = I915_READ(DSPCNTR(i9xx_plane));
3427
3428 ret = val & DISPLAY_PLANE_ENABLE;
3429
3430 if (INTEL_GEN(dev_priv) >= 5)
3431 *pipe = plane->pipe;
3432 else
3433 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3434 DISPPLANE_SEL_PIPE_SHIFT;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003435
3436 intel_display_power_put(dev_priv, power_domain);
3437
3438 return ret;
3439}
3440
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003441static u32
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003442intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
Damien Lespiaub3218032015-02-27 11:15:18 +00003443{
Ben Widawsky2f075562017-03-24 14:29:48 -07003444 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
Ville Syrjälä7b49f942016-01-12 21:08:32 +02003445 return 64;
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003446 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003447 return intel_tile_width_bytes(fb, color_plane);
Damien Lespiaub3218032015-02-27 11:15:18 +00003448}
3449
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003450static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3451{
3452 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003453 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003454
3455 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3456 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3457 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003458}
3459
Chandra Kondurua1b22782015-04-07 15:28:45 -07003460/*
3461 * This function detaches (aka. unbinds) unused scalers in hardware
3462 */
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003463static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
Chandra Kondurua1b22782015-04-07 15:28:45 -07003464{
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3466 const struct intel_crtc_scaler_state *scaler_state =
3467 &crtc_state->scaler_state;
Chandra Kondurua1b22782015-04-07 15:28:45 -07003468 int i;
3469
Chandra Kondurua1b22782015-04-07 15:28:45 -07003470 /* loop through and disable scalers that aren't in use */
3471 for (i = 0; i < intel_crtc->num_scalers; i++) {
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003472 if (!scaler_state->scalers[i].in_use)
3473 skl_detach_scaler(intel_crtc, i);
Chandra Kondurua1b22782015-04-07 15:28:45 -07003474 }
3475}
3476
Ville Syrjäläb3cf5c02018-09-25 22:37:08 +03003477static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3478 int color_plane, unsigned int rotation)
3479{
3480 /*
3481 * The stride is either expressed as a multiple of 64 bytes chunks for
3482 * linear buffers or in number of tiles for tiled buffers.
3483 */
3484 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3485 return 64;
3486 else if (drm_rotation_90_or_270(rotation))
3487 return intel_tile_height(fb, color_plane);
3488 else
3489 return intel_tile_width_bytes(fb, color_plane);
3490}
3491
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003492u32 skl_plane_stride(const struct intel_plane_state *plane_state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003493 int color_plane)
Ville Syrjäläd2196772016-01-28 18:33:11 +02003494{
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003495 const struct drm_framebuffer *fb = plane_state->base.fb;
3496 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003497 u32 stride = plane_state->color_plane[color_plane].stride;
Ville Syrjälä1b500532017-03-07 21:42:08 +02003498
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003499 if (color_plane >= fb->format->num_planes)
Ville Syrjälä1b500532017-03-07 21:42:08 +02003500 return 0;
3501
Ville Syrjäläb3cf5c02018-09-25 22:37:08 +03003502 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
Ville Syrjäläd2196772016-01-28 18:33:11 +02003503}
3504
Ville Syrjälä2e881262017-03-17 23:17:56 +02003505static u32 skl_plane_ctl_format(uint32_t pixel_format)
Chandra Konduru6156a452015-04-27 13:48:39 -07003506{
Chandra Konduru6156a452015-04-27 13:48:39 -07003507 switch (pixel_format) {
Damien Lespiaud161cf72015-05-12 16:13:17 +01003508 case DRM_FORMAT_C8:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003509 return PLANE_CTL_FORMAT_INDEXED;
Chandra Konduru6156a452015-04-27 13:48:39 -07003510 case DRM_FORMAT_RGB565:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003511 return PLANE_CTL_FORMAT_RGB_565;
Chandra Konduru6156a452015-04-27 13:48:39 -07003512 case DRM_FORMAT_XBGR8888:
James Ausmus4036c782017-11-13 10:11:28 -08003513 case DRM_FORMAT_ABGR8888:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003514 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
Chandra Konduru6156a452015-04-27 13:48:39 -07003515 case DRM_FORMAT_XRGB8888:
Chandra Konduru6156a452015-04-27 13:48:39 -07003516 case DRM_FORMAT_ARGB8888:
James Ausmus4036c782017-11-13 10:11:28 -08003517 return PLANE_CTL_FORMAT_XRGB_8888;
Chandra Konduru6156a452015-04-27 13:48:39 -07003518 case DRM_FORMAT_XRGB2101010:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003519 return PLANE_CTL_FORMAT_XRGB_2101010;
Chandra Konduru6156a452015-04-27 13:48:39 -07003520 case DRM_FORMAT_XBGR2101010:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003521 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
Chandra Konduru6156a452015-04-27 13:48:39 -07003522 case DRM_FORMAT_YUYV:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003523 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
Chandra Konduru6156a452015-04-27 13:48:39 -07003524 case DRM_FORMAT_YVYU:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003525 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
Chandra Konduru6156a452015-04-27 13:48:39 -07003526 case DRM_FORMAT_UYVY:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003527 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
Chandra Konduru6156a452015-04-27 13:48:39 -07003528 case DRM_FORMAT_VYUY:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003529 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
Chandra Konduru77224cd2018-04-09 09:11:13 +05303530 case DRM_FORMAT_NV12:
3531 return PLANE_CTL_FORMAT_NV12;
Chandra Konduru6156a452015-04-27 13:48:39 -07003532 default:
Damien Lespiau4249eee2015-05-12 16:13:16 +01003533 MISSING_CASE(pixel_format);
Chandra Konduru6156a452015-04-27 13:48:39 -07003534 }
Damien Lespiau8cfcba42015-05-12 16:13:14 +01003535
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003536 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003537}
3538
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003539static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
James Ausmus4036c782017-11-13 10:11:28 -08003540{
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003541 if (!plane_state->base.fb->format->has_alpha)
3542 return PLANE_CTL_ALPHA_DISABLE;
3543
3544 switch (plane_state->base.pixel_blend_mode) {
3545 case DRM_MODE_BLEND_PIXEL_NONE:
3546 return PLANE_CTL_ALPHA_DISABLE;
3547 case DRM_MODE_BLEND_PREMULTI:
James Ausmus4036c782017-11-13 10:11:28 -08003548 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003549 case DRM_MODE_BLEND_COVERAGE:
3550 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
James Ausmus4036c782017-11-13 10:11:28 -08003551 default:
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003552 MISSING_CASE(plane_state->base.pixel_blend_mode);
James Ausmus4036c782017-11-13 10:11:28 -08003553 return PLANE_CTL_ALPHA_DISABLE;
3554 }
3555}
3556
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003557static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
James Ausmus4036c782017-11-13 10:11:28 -08003558{
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003559 if (!plane_state->base.fb->format->has_alpha)
3560 return PLANE_COLOR_ALPHA_DISABLE;
3561
3562 switch (plane_state->base.pixel_blend_mode) {
3563 case DRM_MODE_BLEND_PIXEL_NONE:
3564 return PLANE_COLOR_ALPHA_DISABLE;
3565 case DRM_MODE_BLEND_PREMULTI:
James Ausmus4036c782017-11-13 10:11:28 -08003566 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003567 case DRM_MODE_BLEND_COVERAGE:
3568 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
James Ausmus4036c782017-11-13 10:11:28 -08003569 default:
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003570 MISSING_CASE(plane_state->base.pixel_blend_mode);
James Ausmus4036c782017-11-13 10:11:28 -08003571 return PLANE_COLOR_ALPHA_DISABLE;
3572 }
3573}
3574
Ville Syrjälä2e881262017-03-17 23:17:56 +02003575static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
Chandra Konduru6156a452015-04-27 13:48:39 -07003576{
Chandra Konduru6156a452015-04-27 13:48:39 -07003577 switch (fb_modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07003578 case DRM_FORMAT_MOD_LINEAR:
Chandra Konduru6156a452015-04-27 13:48:39 -07003579 break;
3580 case I915_FORMAT_MOD_X_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003581 return PLANE_CTL_TILED_X;
Chandra Konduru6156a452015-04-27 13:48:39 -07003582 case I915_FORMAT_MOD_Y_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003583 return PLANE_CTL_TILED_Y;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003584 case I915_FORMAT_MOD_Y_TILED_CCS:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07003585 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
Chandra Konduru6156a452015-04-27 13:48:39 -07003586 case I915_FORMAT_MOD_Yf_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003587 return PLANE_CTL_TILED_YF;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003588 case I915_FORMAT_MOD_Yf_TILED_CCS:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07003589 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
Chandra Konduru6156a452015-04-27 13:48:39 -07003590 default:
3591 MISSING_CASE(fb_modifier);
3592 }
Damien Lespiau8cfcba42015-05-12 16:13:14 +01003593
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003594 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003595}
3596
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003597static u32 skl_plane_ctl_rotate(unsigned int rotate)
Chandra Konduru6156a452015-04-27 13:48:39 -07003598{
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003599 switch (rotate) {
Robert Fossc2c446a2017-05-19 16:50:17 -04003600 case DRM_MODE_ROTATE_0:
Chandra Konduru6156a452015-04-27 13:48:39 -07003601 break;
Sonika Jindal1e8df162015-05-20 13:40:48 +05303602 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003603 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
Sonika Jindal1e8df162015-05-20 13:40:48 +05303604 * while i915 HW rotation is clockwise, thats why this swapping.
3605 */
Robert Fossc2c446a2017-05-19 16:50:17 -04003606 case DRM_MODE_ROTATE_90:
Sonika Jindal1e8df162015-05-20 13:40:48 +05303607 return PLANE_CTL_ROTATE_270;
Robert Fossc2c446a2017-05-19 16:50:17 -04003608 case DRM_MODE_ROTATE_180:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003609 return PLANE_CTL_ROTATE_180;
Robert Fossc2c446a2017-05-19 16:50:17 -04003610 case DRM_MODE_ROTATE_270:
Sonika Jindal1e8df162015-05-20 13:40:48 +05303611 return PLANE_CTL_ROTATE_90;
Chandra Konduru6156a452015-04-27 13:48:39 -07003612 default:
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003613 MISSING_CASE(rotate);
3614 }
3615
3616 return 0;
3617}
3618
3619static u32 cnl_plane_ctl_flip(unsigned int reflect)
3620{
3621 switch (reflect) {
3622 case 0:
3623 break;
3624 case DRM_MODE_REFLECT_X:
3625 return PLANE_CTL_FLIP_HORIZONTAL;
3626 case DRM_MODE_REFLECT_Y:
3627 default:
3628 MISSING_CASE(reflect);
Chandra Konduru6156a452015-04-27 13:48:39 -07003629 }
3630
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003631 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003632}
3633
Ville Syrjälä2e881262017-03-17 23:17:56 +02003634u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3635 const struct intel_plane_state *plane_state)
Damien Lespiau70d21f02013-07-03 21:06:04 +01003636{
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003637 struct drm_i915_private *dev_priv =
3638 to_i915(plane_state->base.plane->dev);
3639 const struct drm_framebuffer *fb = plane_state->base.fb;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003640 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä2e881262017-03-17 23:17:56 +02003641 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003642 u32 plane_ctl;
Damien Lespiau70d21f02013-07-03 21:06:04 +01003643
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003644 plane_ctl = PLANE_CTL_ENABLE;
3645
James Ausmus4036c782017-11-13 10:11:28 -08003646 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003647 plane_ctl |= skl_plane_ctl_alpha(plane_state);
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003648 plane_ctl |=
3649 PLANE_CTL_PIPE_GAMMA_ENABLE |
3650 PLANE_CTL_PIPE_CSC_ENABLE |
3651 PLANE_CTL_PLANE_GAMMA_DISABLE;
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003652
3653 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3654 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
Ville Syrjäläc8624ed2018-02-14 21:23:27 +02003655
3656 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3657 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003658 }
Damien Lespiau70d21f02013-07-03 21:06:04 +01003659
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003660 plane_ctl |= skl_plane_ctl_format(fb->format->format);
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003661 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003662 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3663
3664 if (INTEL_GEN(dev_priv) >= 10)
3665 plane_ctl |= cnl_plane_ctl_flip(rotation &
3666 DRM_MODE_REFLECT_MASK);
Damien Lespiau70d21f02013-07-03 21:06:04 +01003667
Ville Syrjälä2e881262017-03-17 23:17:56 +02003668 if (key->flags & I915_SET_COLORKEY_DESTINATION)
3669 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3670 else if (key->flags & I915_SET_COLORKEY_SOURCE)
3671 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3672
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003673 return plane_ctl;
3674}
3675
James Ausmus4036c782017-11-13 10:11:28 -08003676u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3677 const struct intel_plane_state *plane_state)
3678{
James Ausmus077ef1f2018-03-28 14:57:56 -07003679 struct drm_i915_private *dev_priv =
3680 to_i915(plane_state->base.plane->dev);
James Ausmus4036c782017-11-13 10:11:28 -08003681 const struct drm_framebuffer *fb = plane_state->base.fb;
Uma Shankarbfe60a02018-11-02 00:40:20 +05303682 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
James Ausmus4036c782017-11-13 10:11:28 -08003683 u32 plane_color_ctl = 0;
3684
James Ausmus077ef1f2018-03-28 14:57:56 -07003685 if (INTEL_GEN(dev_priv) < 11) {
3686 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3687 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3688 }
James Ausmus4036c782017-11-13 10:11:28 -08003689 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003690 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
James Ausmus4036c782017-11-13 10:11:28 -08003691
Uma Shankarbfe60a02018-11-02 00:40:20 +05303692 if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003693 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3694 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3695 else
3696 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
Ville Syrjäläc8624ed2018-02-14 21:23:27 +02003697
3698 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3699 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
Uma Shankarbfe60a02018-11-02 00:40:20 +05303700 } else if (fb->format->is_yuv) {
3701 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003702 }
Ville Syrjälä012d79e2018-05-21 21:56:12 +03003703
James Ausmus4036c782017-11-13 10:11:28 -08003704 return plane_color_ctl;
3705}
3706
Maarten Lankhorst73974892016-08-05 23:28:27 +03003707static int
3708__intel_display_resume(struct drm_device *dev,
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003709 struct drm_atomic_state *state,
3710 struct drm_modeset_acquire_ctx *ctx)
Maarten Lankhorst73974892016-08-05 23:28:27 +03003711{
3712 struct drm_crtc_state *crtc_state;
3713 struct drm_crtc *crtc;
3714 int i, ret;
3715
Ville Syrjäläaecd36b2017-06-01 17:36:13 +03003716 intel_modeset_setup_hw_state(dev, ctx);
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +00003717 i915_redisable_vga(to_i915(dev));
Maarten Lankhorst73974892016-08-05 23:28:27 +03003718
3719 if (!state)
3720 return 0;
3721
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01003722 /*
3723 * We've duplicated the state, pointers to the old state are invalid.
3724 *
3725 * Don't attempt to use the old state until we commit the duplicated state.
3726 */
3727 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst73974892016-08-05 23:28:27 +03003728 /*
3729 * Force recalculation even if we restore
3730 * current state. With fast modeset this may not result
3731 * in a modeset when the state is compatible.
3732 */
3733 crtc_state->mode_changed = true;
3734 }
3735
3736 /* ignore any reset values/BIOS leftovers in the WM registers */
Ville Syrjälä602ae832017-03-02 19:15:02 +02003737 if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3738 to_intel_atomic_state(state)->skip_intermediate_wm = true;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003739
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003740 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003741
3742 WARN_ON(ret == -EDEADLK);
3743 return ret;
3744}
3745
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003746static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3747{
Chris Wilson55277e12019-01-03 11:21:04 +00003748 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3749 intel_has_gpu_reset(dev_priv));
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003750}
3751
Chris Wilsonc0336662016-05-06 15:40:21 +01003752void intel_prepare_reset(struct drm_i915_private *dev_priv)
Ville Syrjälä75147472014-11-24 18:28:11 +02003753{
Maarten Lankhorst73974892016-08-05 23:28:27 +03003754 struct drm_device *dev = &dev_priv->drm;
3755 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3756 struct drm_atomic_state *state;
3757 int ret;
3758
Daniel Vetterce87ea12017-07-19 14:54:55 +02003759 /* reset doesn't touch the display */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00003760 if (!i915_modparams.force_reset_modeset_test &&
Daniel Vetterce87ea12017-07-19 14:54:55 +02003761 !gpu_reset_clobbers_display(dev_priv))
3762 return;
3763
Daniel Vetter9db529a2017-08-08 10:08:28 +02003764 /* We have a modeset vs reset deadlock, defensively unbreak it. */
3765 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3766 wake_up_all(&dev_priv->gpu_error.wait_queue);
3767
3768 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3769 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3770 i915_gem_set_wedged(dev_priv);
3771 }
Daniel Vetter97154ec2017-08-08 10:08:26 +02003772
Maarten Lankhorst73974892016-08-05 23:28:27 +03003773 /*
3774 * Need mode_config.mutex so that we don't
3775 * trample ongoing ->detect() and whatnot.
3776 */
3777 mutex_lock(&dev->mode_config.mutex);
3778 drm_modeset_acquire_init(ctx, 0);
3779 while (1) {
3780 ret = drm_modeset_lock_all_ctx(dev, ctx);
3781 if (ret != -EDEADLK)
3782 break;
3783
3784 drm_modeset_backoff(ctx);
3785 }
Ville Syrjäläf98ce922014-11-21 21:54:30 +02003786 /*
3787 * Disabling the crtcs gracefully seems nicer. Also the
3788 * g33 docs say we should at least disable all the planes.
3789 */
Maarten Lankhorst73974892016-08-05 23:28:27 +03003790 state = drm_atomic_helper_duplicate_state(dev, ctx);
3791 if (IS_ERR(state)) {
3792 ret = PTR_ERR(state);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003793 DRM_ERROR("Duplicating state failed with %i\n", ret);
Ander Conselvan de Oliveira1e5a15d2017-01-18 14:34:28 +02003794 return;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003795 }
3796
3797 ret = drm_atomic_helper_disable_all(dev, ctx);
3798 if (ret) {
3799 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
Ander Conselvan de Oliveira1e5a15d2017-01-18 14:34:28 +02003800 drm_atomic_state_put(state);
3801 return;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003802 }
3803
3804 dev_priv->modeset_restore_state = state;
3805 state->acquire_ctx = ctx;
Ville Syrjälä75147472014-11-24 18:28:11 +02003806}
3807
Chris Wilsonc0336662016-05-06 15:40:21 +01003808void intel_finish_reset(struct drm_i915_private *dev_priv)
Ville Syrjälä75147472014-11-24 18:28:11 +02003809{
Maarten Lankhorst73974892016-08-05 23:28:27 +03003810 struct drm_device *dev = &dev_priv->drm;
3811 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
Chris Wilson40da1d32018-04-05 13:37:14 +01003812 struct drm_atomic_state *state;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003813 int ret;
3814
Daniel Vetterce87ea12017-07-19 14:54:55 +02003815 /* reset doesn't touch the display */
Chris Wilson40da1d32018-04-05 13:37:14 +01003816 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
Daniel Vetterce87ea12017-07-19 14:54:55 +02003817 return;
3818
Chris Wilson40da1d32018-04-05 13:37:14 +01003819 state = fetch_and_zero(&dev_priv->modeset_restore_state);
Daniel Vetterce87ea12017-07-19 14:54:55 +02003820 if (!state)
3821 goto unlock;
3822
Ville Syrjälä75147472014-11-24 18:28:11 +02003823 /* reset doesn't touch the display */
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003824 if (!gpu_reset_clobbers_display(dev_priv)) {
Daniel Vetterce87ea12017-07-19 14:54:55 +02003825 /* for testing only restore the display */
3826 ret = __intel_display_resume(dev, state, ctx);
Chris Wilson942d5d02017-08-28 11:46:04 +01003827 if (ret)
3828 DRM_ERROR("Restoring old state failed with %i\n", ret);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003829 } else {
3830 /*
3831 * The display has been reset as well,
3832 * so need a full re-initialization.
3833 */
3834 intel_runtime_pm_disable_interrupts(dev_priv);
3835 intel_runtime_pm_enable_interrupts(dev_priv);
3836
Imre Deak51f59202016-09-14 13:04:13 +03003837 intel_pps_unlock_regs_wa(dev_priv);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003838 intel_modeset_init_hw(dev);
Ville Syrjäläf72b84c2017-11-08 15:35:55 +02003839 intel_init_clock_gating(dev_priv);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003840
3841 spin_lock_irq(&dev_priv->irq_lock);
3842 if (dev_priv->display.hpd_irq_setup)
3843 dev_priv->display.hpd_irq_setup(dev_priv);
3844 spin_unlock_irq(&dev_priv->irq_lock);
3845
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003846 ret = __intel_display_resume(dev, state, ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003847 if (ret)
3848 DRM_ERROR("Restoring old state failed with %i\n", ret);
3849
3850 intel_hpd_init(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02003851 }
3852
Daniel Vetterce87ea12017-07-19 14:54:55 +02003853 drm_atomic_state_put(state);
3854unlock:
Maarten Lankhorst73974892016-08-05 23:28:27 +03003855 drm_modeset_drop_locks(ctx);
3856 drm_modeset_acquire_fini(ctx);
3857 mutex_unlock(&dev->mode_config.mutex);
Daniel Vetter9db529a2017-08-08 10:08:28 +02003858
3859 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
Ville Syrjälä75147472014-11-24 18:28:11 +02003860}
3861
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003862static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3863 const struct intel_crtc_state *new_crtc_state)
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003864{
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003865 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003866 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003867
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003868 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003869 crtc->base.mode = new_crtc_state->base.mode;
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003870
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003871 /*
3872 * Update pipe size and adjust fitter if needed: the reason for this is
3873 * that in compute_mode_changes we check the native mode (not the pfit
3874 * mode) to see if we can flip rather than do a full mode set. In the
3875 * fastboot case, we'll flip, but if we don't update the pipesrc and
3876 * pfit state, we'll end up with a big fb scanned out into the wrong
3877 * sized surface.
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003878 */
3879
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003880 I915_WRITE(PIPESRC(crtc->pipe),
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003881 ((new_crtc_state->pipe_src_w - 1) << 16) |
3882 (new_crtc_state->pipe_src_h - 1));
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003883
3884 /* on skylake this is done by detaching scalers */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003885 if (INTEL_GEN(dev_priv) >= 9) {
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003886 skl_detach_scalers(new_crtc_state);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003887
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003888 if (new_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003889 skylake_pfit_enable(new_crtc_state);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003890 } else if (HAS_PCH_SPLIT(dev_priv)) {
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003891 if (new_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003892 ironlake_pfit_enable(new_crtc_state);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003893 else if (old_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003894 ironlake_pfit_disable(old_crtc_state);
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003895 }
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003896}
3897
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003898static void intel_fdi_normal_train(struct intel_crtc *crtc)
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003899{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003900 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003901 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003902 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003903 i915_reg_t reg;
3904 u32 temp;
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003905
3906 /* enable normal train */
3907 reg = FDI_TX_CTL(pipe);
3908 temp = I915_READ(reg);
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01003909 if (IS_IVYBRIDGE(dev_priv)) {
Jesse Barnes357555c2011-04-28 15:09:55 -07003910 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3911 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
Keith Packard61e499b2011-05-17 16:13:52 -07003912 } else {
3913 temp &= ~FDI_LINK_TRAIN_NONE;
3914 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
Jesse Barnes357555c2011-04-28 15:09:55 -07003915 }
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003916 I915_WRITE(reg, temp);
3917
3918 reg = FDI_RX_CTL(pipe);
3919 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003920 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003921 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3922 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3923 } else {
3924 temp &= ~FDI_LINK_TRAIN_NONE;
3925 temp |= FDI_LINK_TRAIN_NONE;
3926 }
3927 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3928
3929 /* wait one idle pattern time */
3930 POSTING_READ(reg);
3931 udelay(1000);
Jesse Barnes357555c2011-04-28 15:09:55 -07003932
3933 /* IVB wants error correction enabled */
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01003934 if (IS_IVYBRIDGE(dev_priv))
Jesse Barnes357555c2011-04-28 15:09:55 -07003935 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3936 FDI_FE_ERRC_ENABLE);
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003937}
3938
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003939/* The FDI link training functions for ILK/Ibexpeak. */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02003940static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3941 const struct intel_crtc_state *crtc_state)
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003942{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003943 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003944 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003945 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003946 i915_reg_t reg;
3947 u32 temp, tries;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003948
Ville Syrjälä1c8562f2014-04-25 22:12:07 +03003949 /* FDI needs bits from pipe first */
Jesse Barnes0fc932b2011-01-04 15:09:37 -08003950 assert_pipe_enabled(dev_priv, pipe);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08003951
Adam Jacksone1a44742010-06-25 15:32:14 -04003952 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3953 for train result */
Chris Wilson5eddb702010-09-11 13:48:45 +01003954 reg = FDI_RX_IMR(pipe);
3955 temp = I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04003956 temp &= ~FDI_RX_SYMBOL_LOCK;
3957 temp &= ~FDI_RX_BIT_LOCK;
Chris Wilson5eddb702010-09-11 13:48:45 +01003958 I915_WRITE(reg, temp);
3959 I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04003960 udelay(150);
3961
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003962 /* enable CPU FDI TX and PCH FDI RX */
Chris Wilson5eddb702010-09-11 13:48:45 +01003963 reg = FDI_TX_CTL(pipe);
3964 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02003965 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02003966 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003967 temp &= ~FDI_LINK_TRAIN_NONE;
3968 temp |= FDI_LINK_TRAIN_PATTERN_1;
Chris Wilson5eddb702010-09-11 13:48:45 +01003969 I915_WRITE(reg, temp | FDI_TX_ENABLE);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003970
Chris Wilson5eddb702010-09-11 13:48:45 +01003971 reg = FDI_RX_CTL(pipe);
3972 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003973 temp &= ~FDI_LINK_TRAIN_NONE;
3974 temp |= FDI_LINK_TRAIN_PATTERN_1;
Chris Wilson5eddb702010-09-11 13:48:45 +01003975 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3976
3977 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003978 udelay(150);
3979
Jesse Barnes5b2adf82010-10-07 16:01:15 -07003980 /* Ironlake workaround, enable clock pointer after FDI enable*/
Daniel Vetter8f5718a2012-10-31 22:52:28 +01003981 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3982 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3983 FDI_RX_PHASE_SYNC_POINTER_EN);
Jesse Barnes5b2adf82010-10-07 16:01:15 -07003984
Chris Wilson5eddb702010-09-11 13:48:45 +01003985 reg = FDI_RX_IIR(pipe);
Adam Jacksone1a44742010-06-25 15:32:14 -04003986 for (tries = 0; tries < 5; tries++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01003987 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003988 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3989
3990 if ((temp & FDI_RX_BIT_LOCK)) {
3991 DRM_DEBUG_KMS("FDI train 1 done.\n");
Chris Wilson5eddb702010-09-11 13:48:45 +01003992 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003993 break;
3994 }
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003995 }
Adam Jacksone1a44742010-06-25 15:32:14 -04003996 if (tries == 5)
Chris Wilson5eddb702010-09-11 13:48:45 +01003997 DRM_ERROR("FDI train 1 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003998
3999 /* Train 2 */
Chris Wilson5eddb702010-09-11 13:48:45 +01004000 reg = FDI_TX_CTL(pipe);
4001 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004002 temp &= ~FDI_LINK_TRAIN_NONE;
4003 temp |= FDI_LINK_TRAIN_PATTERN_2;
Chris Wilson5eddb702010-09-11 13:48:45 +01004004 I915_WRITE(reg, temp);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004005
Chris Wilson5eddb702010-09-11 13:48:45 +01004006 reg = FDI_RX_CTL(pipe);
4007 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004008 temp &= ~FDI_LINK_TRAIN_NONE;
4009 temp |= FDI_LINK_TRAIN_PATTERN_2;
Chris Wilson5eddb702010-09-11 13:48:45 +01004010 I915_WRITE(reg, temp);
4011
4012 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004013 udelay(150);
4014
Chris Wilson5eddb702010-09-11 13:48:45 +01004015 reg = FDI_RX_IIR(pipe);
Adam Jacksone1a44742010-06-25 15:32:14 -04004016 for (tries = 0; tries < 5; tries++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004017 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004018 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4019
4020 if (temp & FDI_RX_SYMBOL_LOCK) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004021 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004022 DRM_DEBUG_KMS("FDI train 2 done.\n");
4023 break;
4024 }
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004025 }
Adam Jacksone1a44742010-06-25 15:32:14 -04004026 if (tries == 5)
Chris Wilson5eddb702010-09-11 13:48:45 +01004027 DRM_ERROR("FDI train 2 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004028
4029 DRM_DEBUG_KMS("FDI train done\n");
Jesse Barnes5c5313c2010-10-07 16:01:11 -07004030
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004031}
4032
Akshay Joshi0206e352011-08-16 15:34:10 -04004033static const int snb_b_fdi_train_param[] = {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004034 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4035 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4036 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4037 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4038};
4039
4040/* The FDI link training functions for SNB/Cougarpoint. */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004041static void gen6_fdi_link_train(struct intel_crtc *crtc,
4042 const struct intel_crtc_state *crtc_state)
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004043{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004044 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004045 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004046 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004047 i915_reg_t reg;
4048 u32 temp, i, retry;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004049
Adam Jacksone1a44742010-06-25 15:32:14 -04004050 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4051 for train result */
Chris Wilson5eddb702010-09-11 13:48:45 +01004052 reg = FDI_RX_IMR(pipe);
4053 temp = I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04004054 temp &= ~FDI_RX_SYMBOL_LOCK;
4055 temp &= ~FDI_RX_BIT_LOCK;
Chris Wilson5eddb702010-09-11 13:48:45 +01004056 I915_WRITE(reg, temp);
4057
4058 POSTING_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04004059 udelay(150);
4060
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004061 /* enable CPU FDI TX and PCH FDI RX */
Chris Wilson5eddb702010-09-11 13:48:45 +01004062 reg = FDI_TX_CTL(pipe);
4063 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02004064 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004065 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004066 temp &= ~FDI_LINK_TRAIN_NONE;
4067 temp |= FDI_LINK_TRAIN_PATTERN_1;
4068 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4069 /* SNB-B */
4070 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
Chris Wilson5eddb702010-09-11 13:48:45 +01004071 I915_WRITE(reg, temp | FDI_TX_ENABLE);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004072
Daniel Vetterd74cf322012-10-26 10:58:13 +02004073 I915_WRITE(FDI_RX_MISC(pipe),
4074 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4075
Chris Wilson5eddb702010-09-11 13:48:45 +01004076 reg = FDI_RX_CTL(pipe);
4077 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004078 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004079 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4080 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4081 } else {
4082 temp &= ~FDI_LINK_TRAIN_NONE;
4083 temp |= FDI_LINK_TRAIN_PATTERN_1;
4084 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004085 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4086
4087 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004088 udelay(150);
4089
Akshay Joshi0206e352011-08-16 15:34:10 -04004090 for (i = 0; i < 4; i++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004091 reg = FDI_TX_CTL(pipe);
4092 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004093 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4094 temp |= snb_b_fdi_train_param[i];
Chris Wilson5eddb702010-09-11 13:48:45 +01004095 I915_WRITE(reg, temp);
4096
4097 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004098 udelay(500);
4099
Sean Paulfa37d392012-03-02 12:53:39 -05004100 for (retry = 0; retry < 5; retry++) {
4101 reg = FDI_RX_IIR(pipe);
4102 temp = I915_READ(reg);
4103 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4104 if (temp & FDI_RX_BIT_LOCK) {
4105 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4106 DRM_DEBUG_KMS("FDI train 1 done.\n");
4107 break;
4108 }
4109 udelay(50);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004110 }
Sean Paulfa37d392012-03-02 12:53:39 -05004111 if (retry < 5)
4112 break;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004113 }
4114 if (i == 4)
Chris Wilson5eddb702010-09-11 13:48:45 +01004115 DRM_ERROR("FDI train 1 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004116
4117 /* Train 2 */
Chris Wilson5eddb702010-09-11 13:48:45 +01004118 reg = FDI_TX_CTL(pipe);
4119 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004120 temp &= ~FDI_LINK_TRAIN_NONE;
4121 temp |= FDI_LINK_TRAIN_PATTERN_2;
Lucas De Marchicf819ef2018-12-12 10:10:43 -08004122 if (IS_GEN(dev_priv, 6)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004123 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4124 /* SNB-B */
4125 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4126 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004127 I915_WRITE(reg, temp);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004128
Chris Wilson5eddb702010-09-11 13:48:45 +01004129 reg = FDI_RX_CTL(pipe);
4130 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004131 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004132 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4133 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4134 } else {
4135 temp &= ~FDI_LINK_TRAIN_NONE;
4136 temp |= FDI_LINK_TRAIN_PATTERN_2;
4137 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004138 I915_WRITE(reg, temp);
4139
4140 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004141 udelay(150);
4142
Akshay Joshi0206e352011-08-16 15:34:10 -04004143 for (i = 0; i < 4; i++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004144 reg = FDI_TX_CTL(pipe);
4145 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004146 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4147 temp |= snb_b_fdi_train_param[i];
Chris Wilson5eddb702010-09-11 13:48:45 +01004148 I915_WRITE(reg, temp);
4149
4150 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004151 udelay(500);
4152
Sean Paulfa37d392012-03-02 12:53:39 -05004153 for (retry = 0; retry < 5; retry++) {
4154 reg = FDI_RX_IIR(pipe);
4155 temp = I915_READ(reg);
4156 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4157 if (temp & FDI_RX_SYMBOL_LOCK) {
4158 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4159 DRM_DEBUG_KMS("FDI train 2 done.\n");
4160 break;
4161 }
4162 udelay(50);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004163 }
Sean Paulfa37d392012-03-02 12:53:39 -05004164 if (retry < 5)
4165 break;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004166 }
4167 if (i == 4)
Chris Wilson5eddb702010-09-11 13:48:45 +01004168 DRM_ERROR("FDI train 2 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004169
4170 DRM_DEBUG_KMS("FDI train done.\n");
4171}
4172
Jesse Barnes357555c2011-04-28 15:09:55 -07004173/* Manual link training for Ivy Bridge A0 parts */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004174static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4175 const struct intel_crtc_state *crtc_state)
Jesse Barnes357555c2011-04-28 15:09:55 -07004176{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004177 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004178 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004179 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004180 i915_reg_t reg;
4181 u32 temp, i, j;
Jesse Barnes357555c2011-04-28 15:09:55 -07004182
4183 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4184 for train result */
4185 reg = FDI_RX_IMR(pipe);
4186 temp = I915_READ(reg);
4187 temp &= ~FDI_RX_SYMBOL_LOCK;
4188 temp &= ~FDI_RX_BIT_LOCK;
4189 I915_WRITE(reg, temp);
4190
4191 POSTING_READ(reg);
4192 udelay(150);
4193
Daniel Vetter01a415f2012-10-27 15:58:40 +02004194 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4195 I915_READ(FDI_RX_IIR(pipe)));
4196
Jesse Barnes139ccd32013-08-19 11:04:55 -07004197 /* Try each vswing and preemphasis setting twice before moving on */
4198 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4199 /* disable first in case we need to retry */
Jesse Barnes357555c2011-04-28 15:09:55 -07004200 reg = FDI_TX_CTL(pipe);
4201 temp = I915_READ(reg);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004202 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4203 temp &= ~FDI_TX_ENABLE;
4204 I915_WRITE(reg, temp);
4205
4206 reg = FDI_RX_CTL(pipe);
4207 temp = I915_READ(reg);
4208 temp &= ~FDI_LINK_TRAIN_AUTO;
4209 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4210 temp &= ~FDI_RX_ENABLE;
4211 I915_WRITE(reg, temp);
4212
4213 /* enable CPU FDI TX and PCH FDI RX */
4214 reg = FDI_TX_CTL(pipe);
4215 temp = I915_READ(reg);
4216 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004217 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004218 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
Jesse Barnes357555c2011-04-28 15:09:55 -07004219 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
Jesse Barnes139ccd32013-08-19 11:04:55 -07004220 temp |= snb_b_fdi_train_param[j/2];
4221 temp |= FDI_COMPOSITE_SYNC;
4222 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4223
4224 I915_WRITE(FDI_RX_MISC(pipe),
4225 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4226
4227 reg = FDI_RX_CTL(pipe);
4228 temp = I915_READ(reg);
4229 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4230 temp |= FDI_COMPOSITE_SYNC;
4231 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4232
4233 POSTING_READ(reg);
4234 udelay(1); /* should be 0.5us */
4235
4236 for (i = 0; i < 4; i++) {
4237 reg = FDI_RX_IIR(pipe);
4238 temp = I915_READ(reg);
4239 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4240
4241 if (temp & FDI_RX_BIT_LOCK ||
4242 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4243 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4244 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4245 i);
4246 break;
4247 }
4248 udelay(1); /* should be 0.5us */
4249 }
4250 if (i == 4) {
4251 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4252 continue;
4253 }
4254
4255 /* Train 2 */
4256 reg = FDI_TX_CTL(pipe);
4257 temp = I915_READ(reg);
4258 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4259 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4260 I915_WRITE(reg, temp);
4261
4262 reg = FDI_RX_CTL(pipe);
4263 temp = I915_READ(reg);
4264 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4265 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
Jesse Barnes357555c2011-04-28 15:09:55 -07004266 I915_WRITE(reg, temp);
4267
4268 POSTING_READ(reg);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004269 udelay(2); /* should be 1.5us */
Jesse Barnes357555c2011-04-28 15:09:55 -07004270
Jesse Barnes139ccd32013-08-19 11:04:55 -07004271 for (i = 0; i < 4; i++) {
4272 reg = FDI_RX_IIR(pipe);
4273 temp = I915_READ(reg);
4274 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
Jesse Barnes357555c2011-04-28 15:09:55 -07004275
Jesse Barnes139ccd32013-08-19 11:04:55 -07004276 if (temp & FDI_RX_SYMBOL_LOCK ||
4277 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4278 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4279 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4280 i);
4281 goto train_done;
4282 }
4283 udelay(2); /* should be 1.5us */
Jesse Barnes357555c2011-04-28 15:09:55 -07004284 }
Jesse Barnes139ccd32013-08-19 11:04:55 -07004285 if (i == 4)
4286 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
Jesse Barnes357555c2011-04-28 15:09:55 -07004287 }
Jesse Barnes357555c2011-04-28 15:09:55 -07004288
Jesse Barnes139ccd32013-08-19 11:04:55 -07004289train_done:
Jesse Barnes357555c2011-04-28 15:09:55 -07004290 DRM_DEBUG_KMS("FDI train done.\n");
4291}
4292
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004293static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnes0e23b992010-09-10 11:10:00 -07004294{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004295 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4296 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004297 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004298 i915_reg_t reg;
4299 u32 temp;
Jesse Barnesc64e3112010-09-10 11:27:03 -07004300
Jesse Barnes0e23b992010-09-10 11:10:00 -07004301 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
Chris Wilson5eddb702010-09-11 13:48:45 +01004302 reg = FDI_RX_CTL(pipe);
4303 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02004304 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004305 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004306 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Chris Wilson5eddb702010-09-11 13:48:45 +01004307 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4308
4309 POSTING_READ(reg);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004310 udelay(200);
4311
4312 /* Switch from Rawclk to PCDclk */
Chris Wilson5eddb702010-09-11 13:48:45 +01004313 temp = I915_READ(reg);
4314 I915_WRITE(reg, temp | FDI_PCDCLK);
4315
4316 POSTING_READ(reg);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004317 udelay(200);
4318
Paulo Zanoni20749732012-11-23 15:30:38 -02004319 /* Enable CPU FDI TX PLL, always on for Ironlake */
4320 reg = FDI_TX_CTL(pipe);
4321 temp = I915_READ(reg);
4322 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4323 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
Chris Wilson5eddb702010-09-11 13:48:45 +01004324
Paulo Zanoni20749732012-11-23 15:30:38 -02004325 POSTING_READ(reg);
4326 udelay(100);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004327 }
4328}
4329
Daniel Vetter88cefb62012-08-12 19:27:14 +02004330static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4331{
4332 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004333 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter88cefb62012-08-12 19:27:14 +02004334 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004335 i915_reg_t reg;
4336 u32 temp;
Daniel Vetter88cefb62012-08-12 19:27:14 +02004337
4338 /* Switch from PCDclk to Rawclk */
4339 reg = FDI_RX_CTL(pipe);
4340 temp = I915_READ(reg);
4341 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4342
4343 /* Disable CPU FDI TX PLL */
4344 reg = FDI_TX_CTL(pipe);
4345 temp = I915_READ(reg);
4346 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4347
4348 POSTING_READ(reg);
4349 udelay(100);
4350
4351 reg = FDI_RX_CTL(pipe);
4352 temp = I915_READ(reg);
4353 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4354
4355 /* Wait for the clocks to turn off. */
4356 POSTING_READ(reg);
4357 udelay(100);
4358}
4359
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004360static void ironlake_fdi_disable(struct drm_crtc *crtc)
4361{
4362 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004363 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004364 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4365 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004366 i915_reg_t reg;
4367 u32 temp;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004368
4369 /* disable CPU FDI tx and PCH FDI rx */
4370 reg = FDI_TX_CTL(pipe);
4371 temp = I915_READ(reg);
4372 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4373 POSTING_READ(reg);
4374
4375 reg = FDI_RX_CTL(pipe);
4376 temp = I915_READ(reg);
4377 temp &= ~(0x7 << 16);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004378 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004379 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4380
4381 POSTING_READ(reg);
4382 udelay(100);
4383
4384 /* Ironlake workaround, disable clock pointer after downing FDI */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004385 if (HAS_PCH_IBX(dev_priv))
Jesse Barnes6f06ce12011-01-04 15:09:38 -08004386 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004387
4388 /* still set train pattern 1 */
4389 reg = FDI_TX_CTL(pipe);
4390 temp = I915_READ(reg);
4391 temp &= ~FDI_LINK_TRAIN_NONE;
4392 temp |= FDI_LINK_TRAIN_PATTERN_1;
4393 I915_WRITE(reg, temp);
4394
4395 reg = FDI_RX_CTL(pipe);
4396 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004397 if (HAS_PCH_CPT(dev_priv)) {
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004398 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4399 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4400 } else {
4401 temp &= ~FDI_LINK_TRAIN_NONE;
4402 temp |= FDI_LINK_TRAIN_PATTERN_1;
4403 }
4404 /* BPC in FDI rx is consistent with that in PIPECONF */
4405 temp &= ~(0x07 << 16);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004406 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004407 I915_WRITE(reg, temp);
4408
4409 POSTING_READ(reg);
4410 udelay(100);
4411}
4412
Chris Wilson49d73912016-11-29 09:50:08 +00004413bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
Chris Wilson5dce5b932014-01-20 10:17:36 +00004414{
Daniel Vetterfa058872017-07-20 19:57:52 +02004415 struct drm_crtc *crtc;
4416 bool cleanup_done;
Chris Wilson5dce5b932014-01-20 10:17:36 +00004417
Daniel Vetterfa058872017-07-20 19:57:52 +02004418 drm_for_each_crtc(crtc, &dev_priv->drm) {
4419 struct drm_crtc_commit *commit;
4420 spin_lock(&crtc->commit_lock);
4421 commit = list_first_entry_or_null(&crtc->commit_list,
4422 struct drm_crtc_commit, commit_entry);
4423 cleanup_done = commit ?
4424 try_wait_for_completion(&commit->cleanup_done) : true;
4425 spin_unlock(&crtc->commit_lock);
4426
4427 if (cleanup_done)
Chris Wilson5dce5b932014-01-20 10:17:36 +00004428 continue;
4429
Daniel Vetterfa058872017-07-20 19:57:52 +02004430 drm_crtc_wait_one_vblank(crtc);
Chris Wilson5dce5b932014-01-20 10:17:36 +00004431
4432 return true;
4433 }
4434
4435 return false;
4436}
4437
Maarten Lankhorstb7076542016-08-23 16:18:08 +02004438void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004439{
4440 u32 temp;
4441
4442 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4443
4444 mutex_lock(&dev_priv->sb_lock);
4445
4446 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4447 temp |= SBI_SSCCTL_DISABLE;
4448 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4449
4450 mutex_unlock(&dev_priv->sb_lock);
4451}
4452
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004453/* Program iCLKIP clock to the desired frequency */
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004454static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004455{
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004456 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004457 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004458 int clock = crtc_state->base.adjusted_mode.crtc_clock;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004459 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4460 u32 temp;
4461
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004462 lpt_disable_iclkip(dev_priv);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004463
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004464 /* The iCLK virtual clock root frequency is in MHz,
4465 * but the adjusted_mode->crtc_clock in in KHz. To get the
4466 * divisors, it is necessary to divide one by another, so we
4467 * convert the virtual clock precision to KHz here for higher
4468 * precision.
4469 */
4470 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004471 u32 iclk_virtual_root_freq = 172800 * 1000;
4472 u32 iclk_pi_range = 64;
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004473 u32 desired_divisor;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004474
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004475 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4476 clock << auxdiv);
4477 divsel = (desired_divisor / iclk_pi_range) - 2;
4478 phaseinc = desired_divisor % iclk_pi_range;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004479
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004480 /*
4481 * Near 20MHz is a corner case which is
4482 * out of range for the 7-bit divisor
4483 */
4484 if (divsel <= 0x7f)
4485 break;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004486 }
4487
4488 /* This should not happen with any sane values */
4489 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4490 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4491 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4492 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4493
4494 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
Ville Syrjälä12d7cee2013-09-04 18:25:19 +03004495 clock,
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004496 auxdiv,
4497 divsel,
4498 phasedir,
4499 phaseinc);
4500
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004501 mutex_lock(&dev_priv->sb_lock);
4502
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004503 /* Program SSCDIVINTPHASE6 */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004504 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004505 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4506 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4507 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4508 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4509 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4510 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004511 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004512
4513 /* Program SSCAUXDIV */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004514 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004515 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4516 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004517 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004518
4519 /* Enable modulator and associated divider */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004520 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004521 temp &= ~SBI_SSCCTL_DISABLE;
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004522 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004523
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004524 mutex_unlock(&dev_priv->sb_lock);
4525
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004526 /* Wait for initialization time */
4527 udelay(24);
4528
4529 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4530}
4531
Ville Syrjälä8802e5b2016-02-17 21:41:12 +02004532int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4533{
4534 u32 divsel, phaseinc, auxdiv;
4535 u32 iclk_virtual_root_freq = 172800 * 1000;
4536 u32 iclk_pi_range = 64;
4537 u32 desired_divisor;
4538 u32 temp;
4539
4540 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4541 return 0;
4542
4543 mutex_lock(&dev_priv->sb_lock);
4544
4545 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4546 if (temp & SBI_SSCCTL_DISABLE) {
4547 mutex_unlock(&dev_priv->sb_lock);
4548 return 0;
4549 }
4550
4551 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4552 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4553 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4554 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4555 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4556
4557 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4558 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4559 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4560
4561 mutex_unlock(&dev_priv->sb_lock);
4562
4563 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4564
4565 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4566 desired_divisor << auxdiv);
4567}
4568
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004569static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
Daniel Vetter275f01b22013-05-03 11:49:47 +02004570 enum pipe pch_transcoder)
4571{
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004572 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4573 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4574 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Daniel Vetter275f01b22013-05-03 11:49:47 +02004575
4576 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4577 I915_READ(HTOTAL(cpu_transcoder)));
4578 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4579 I915_READ(HBLANK(cpu_transcoder)));
4580 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4581 I915_READ(HSYNC(cpu_transcoder)));
4582
4583 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4584 I915_READ(VTOTAL(cpu_transcoder)));
4585 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4586 I915_READ(VBLANK(cpu_transcoder)));
4587 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4588 I915_READ(VSYNC(cpu_transcoder)));
4589 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4590 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4591}
4592
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004593static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004594{
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004595 uint32_t temp;
4596
4597 temp = I915_READ(SOUTH_CHICKEN1);
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004598 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004599 return;
4600
4601 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4602 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4603
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004604 temp &= ~FDI_BC_BIFURCATION_SELECT;
4605 if (enable)
4606 temp |= FDI_BC_BIFURCATION_SELECT;
4607
4608 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004609 I915_WRITE(SOUTH_CHICKEN1, temp);
4610 POSTING_READ(SOUTH_CHICKEN1);
4611}
4612
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004613static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004614{
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004615 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4616 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004617
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004618 switch (crtc->pipe) {
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004619 case PIPE_A:
4620 break;
4621 case PIPE_B:
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004622 if (crtc_state->fdi_lanes > 2)
4623 cpt_set_fdi_bc_bifurcation(dev_priv, false);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004624 else
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004625 cpt_set_fdi_bc_bifurcation(dev_priv, true);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004626
4627 break;
4628 case PIPE_C:
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004629 cpt_set_fdi_bc_bifurcation(dev_priv, true);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004630
4631 break;
4632 default:
4633 BUG();
4634 }
4635}
4636
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004637/*
4638 * Finds the encoder associated with the given CRTC. This can only be
4639 * used when we know that the CRTC isn't feeding multiple encoders!
4640 */
4641static struct intel_encoder *
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004642intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4643 const struct intel_crtc_state *crtc_state)
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004644{
4645 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004646 const struct drm_connector_state *connector_state;
4647 const struct drm_connector *connector;
4648 struct intel_encoder *encoder = NULL;
4649 int num_encoders = 0;
4650 int i;
4651
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004652 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004653 if (connector_state->crtc != &crtc->base)
4654 continue;
4655
4656 encoder = to_intel_encoder(connector_state->best_encoder);
4657 num_encoders++;
4658 }
4659
4660 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4661 num_encoders, pipe_name(crtc->pipe));
4662
4663 return encoder;
4664}
4665
Jesse Barnesf67a5592011-01-05 10:31:48 -08004666/*
4667 * Enable PCH resources required for PCH ports:
4668 * - PCH PLLs
4669 * - FDI training & RX/TX
4670 * - update transcoder timings
4671 * - DP transcoding bits
4672 * - transcoder
4673 */
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004674static void ironlake_pch_enable(const struct intel_atomic_state *state,
4675 const struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08004676{
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004677 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004678 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004679 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004680 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004681 u32 temp;
Jesse Barnes6be4a602010-09-10 10:26:01 -07004682
Daniel Vetterab9412b2013-05-03 11:49:46 +02004683 assert_pch_transcoder_disabled(dev_priv, pipe);
Chris Wilsone7e164d2012-05-11 09:21:25 +01004684
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004685 if (IS_IVYBRIDGE(dev_priv))
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004686 ivybridge_update_fdi_bc_bifurcation(crtc_state);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004687
Daniel Vettercd986ab2012-10-26 10:58:12 +02004688 /* Write the TU size bits before fdi link training, so that error
4689 * detection works. */
4690 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4691 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4692
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004693 /* For PCH output, training FDI link */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004694 dev_priv->display.fdi_link_train(crtc, crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07004695
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004696 /* We need to program the right clock selection before writing the pixel
4697 * mutliplier into the DPLL. */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004698 if (HAS_PCH_CPT(dev_priv)) {
Jesse Barnesee7b9f92012-04-20 17:11:53 +01004699 u32 sel;
Jesse Barnes4b645f12011-10-12 09:51:31 -07004700
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004701 temp = I915_READ(PCH_DPLL_SEL);
Daniel Vetter11887392013-06-05 13:34:09 +02004702 temp |= TRANS_DPLL_ENABLE(pipe);
4703 sel = TRANS_DPLLB_SEL(pipe);
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004704 if (crtc_state->shared_dpll ==
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02004705 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
Jesse Barnesee7b9f92012-04-20 17:11:53 +01004706 temp |= sel;
4707 else
4708 temp &= ~sel;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004709 I915_WRITE(PCH_DPLL_SEL, temp);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004710 }
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004711
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004712 /* XXX: pch pll's can be enabled any time before we enable the PCH
4713 * transcoder, and we actually should do this to not upset any PCH
4714 * transcoder that already use the clock when we share it.
4715 *
4716 * Note that enable_shared_dpll tries to do the right thing, but
4717 * get_shared_dpll unconditionally resets the pll - we need that to have
4718 * the right LVDS enable sequence. */
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02004719 intel_enable_shared_dpll(crtc_state);
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004720
Jesse Barnesd9b6cb52011-01-04 15:09:35 -08004721 /* set transcoder timing, panel must allow it */
4722 assert_panel_unlocked(dev_priv, pipe);
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004723 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004724
Paulo Zanoni303b81e2012-10-31 18:12:23 -02004725 intel_fdi_normal_train(crtc);
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08004726
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004727 /* For PCH DP, enable TRANS_DP_CTL */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004728 if (HAS_PCH_CPT(dev_priv) &&
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004729 intel_crtc_has_dp_encoder(crtc_state)) {
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004730 const struct drm_display_mode *adjusted_mode =
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004731 &crtc_state->base.adjusted_mode;
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004732 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004733 i915_reg_t reg = TRANS_DP_CTL(pipe);
Ville Syrjäläf67dc6d2018-05-18 18:29:26 +03004734 enum port port;
4735
Chris Wilson5eddb702010-09-11 13:48:45 +01004736 temp = I915_READ(reg);
4737 temp &= ~(TRANS_DP_PORT_SEL_MASK |
Eric Anholt220cad32010-11-18 09:32:58 +08004738 TRANS_DP_SYNC_MASK |
4739 TRANS_DP_BPC_MASK);
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03004740 temp |= TRANS_DP_OUTPUT_ENABLE;
Jesse Barnes9325c9f2011-06-24 12:19:21 -07004741 temp |= bpc << 9; /* same format but at 11:9 */
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004742
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004743 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
Chris Wilson5eddb702010-09-11 13:48:45 +01004744 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004745 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
Chris Wilson5eddb702010-09-11 13:48:45 +01004746 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004747
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004748 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
Ville Syrjäläf67dc6d2018-05-18 18:29:26 +03004749 WARN_ON(port < PORT_B || port > PORT_D);
4750 temp |= TRANS_DP_PORT_SEL(port);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004751
Chris Wilson5eddb702010-09-11 13:48:45 +01004752 I915_WRITE(reg, temp);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004753 }
4754
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02004755 ironlake_enable_pch_transcoder(crtc_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08004756}
4757
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004758static void lpt_pch_enable(const struct intel_atomic_state *state,
4759 const struct intel_crtc_state *crtc_state)
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004760{
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004761 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004762 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004763 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004764
Matthias Kaehlckea2196032017-07-17 11:14:03 -07004765 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004766
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004767 lpt_program_iclkip(crtc_state);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004768
Paulo Zanoni0540e482012-10-31 18:12:40 -02004769 /* Set transcoder timing. */
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004770 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004771
Paulo Zanoni937bb612012-10-31 18:12:47 -02004772 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
Jesse Barnesf67a5592011-01-05 10:31:48 -08004773}
4774
Daniel Vettera1520312013-05-03 11:49:50 +02004775static void cpt_verify_modeset(struct drm_device *dev, int pipe)
Jesse Barnesd4270e52011-10-11 10:43:02 -07004776{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004777 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004778 i915_reg_t dslreg = PIPEDSL(pipe);
Jesse Barnesd4270e52011-10-11 10:43:02 -07004779 u32 temp;
4780
4781 temp = I915_READ(dslreg);
4782 udelay(500);
4783 if (wait_for(I915_READ(dslreg) != temp, 5)) {
Jesse Barnesd4270e52011-10-11 10:43:02 -07004784 if (wait_for(I915_READ(dslreg) != temp, 5))
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03004785 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
Jesse Barnesd4270e52011-10-11 10:43:02 -07004786 }
4787}
4788
Ville Syrjälä0a599522018-05-21 21:56:13 +03004789/*
4790 * The hardware phase 0.0 refers to the center of the pixel.
4791 * We want to start from the top/left edge which is phase
4792 * -0.5. That matches how the hardware calculates the scaling
4793 * factors (from top-left of the first pixel to bottom-right
4794 * of the last pixel, as opposed to the pixel centers).
4795 *
4796 * For 4:2:0 subsampled chroma planes we obviously have to
4797 * adjust that so that the chroma sample position lands in
4798 * the right spot.
4799 *
4800 * Note that for packed YCbCr 4:2:2 formats there is no way to
4801 * control chroma siting. The hardware simply replicates the
4802 * chroma samples for both of the luma samples, and thus we don't
4803 * actually get the expected MPEG2 chroma siting convention :(
4804 * The same behaviour is observed on pre-SKL platforms as well.
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02004805 *
4806 * Theory behind the formula (note that we ignore sub-pixel
4807 * source coordinates):
4808 * s = source sample position
4809 * d = destination sample position
4810 *
4811 * Downscaling 4:1:
4812 * -0.5
4813 * | 0.0
4814 * | | 1.5 (initial phase)
4815 * | | |
4816 * v v v
4817 * | s | s | s | s |
4818 * | d |
4819 *
4820 * Upscaling 1:4:
4821 * -0.5
4822 * | -0.375 (initial phase)
4823 * | | 0.0
4824 * | | |
4825 * v v v
4826 * | s |
4827 * | d | d | d | d |
Ville Syrjälä0a599522018-05-21 21:56:13 +03004828 */
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02004829u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
Ville Syrjälä0a599522018-05-21 21:56:13 +03004830{
4831 int phase = -0x8000;
4832 u16 trip = 0;
4833
4834 if (chroma_cosited)
4835 phase += (sub - 1) * 0x8000 / sub;
4836
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02004837 phase += scale / (2 * sub);
4838
4839 /*
4840 * Hardware initial phase limited to [-0.5:1.5].
4841 * Since the max hardware scale factor is 3.0, we
4842 * should never actually excdeed 1.0 here.
4843 */
4844 WARN_ON(phase < -0x8000 || phase > 0x18000);
4845
Ville Syrjälä0a599522018-05-21 21:56:13 +03004846 if (phase < 0)
4847 phase = 0x10000 + phase;
4848 else
4849 trip = PS_PHASE_TRIP;
4850
4851 return ((phase >> 2) & PS_PHASE_MASK) | trip;
4852}
4853
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004854static int
4855skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
Ville Syrjäläd96a7d22017-03-31 21:00:54 +03004856 unsigned int scaler_user, int *scaler_id,
Chandra Konduru77224cd2018-04-09 09:11:13 +05304857 int src_w, int src_h, int dst_w, int dst_h,
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004858 const struct drm_format_info *format, bool need_scaler)
Chandra Kondurua1b22782015-04-07 15:28:45 -07004859{
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004860 struct intel_crtc_scaler_state *scaler_state =
4861 &crtc_state->scaler_state;
4862 struct intel_crtc *intel_crtc =
4863 to_intel_crtc(crtc_state->base.crtc);
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304864 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4865 const struct drm_display_mode *adjusted_mode =
4866 &crtc_state->base.adjusted_mode;
Chandra Konduru6156a452015-04-27 13:48:39 -07004867
Ville Syrjäläd96a7d22017-03-31 21:00:54 +03004868 /*
4869 * Src coordinates are already rotated by 270 degrees for
4870 * the 90/270 degree plane rotation cases (to match the
4871 * GTT mapping), hence no need to account for rotation here.
4872 */
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004873 if (src_w != dst_w || src_h != dst_h)
4874 need_scaler = true;
Shashank Sharmae5c05932017-07-21 20:55:05 +05304875
Chandra Kondurua1b22782015-04-07 15:28:45 -07004876 /*
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304877 * Scaling/fitting not supported in IF-ID mode in GEN9+
4878 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4879 * Once NV12 is enabled, handle it here while allocating scaler
4880 * for NV12.
4881 */
4882 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004883 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304884 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4885 return -EINVAL;
4886 }
4887
4888 /*
Chandra Kondurua1b22782015-04-07 15:28:45 -07004889 * if plane is being disabled or scaler is no more required or force detach
4890 * - free scaler binded to this plane/crtc
4891 * - in order to do this, update crtc->scaler_usage
4892 *
4893 * Here scaler state in crtc_state is set free so that
4894 * scaler can be assigned to other user. Actual register
4895 * update to free the scaler is done in plane/panel-fit programming.
4896 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4897 */
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004898 if (force_detach || !need_scaler) {
Chandra Kondurua1b22782015-04-07 15:28:45 -07004899 if (*scaler_id >= 0) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004900 scaler_state->scaler_users &= ~(1 << scaler_user);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004901 scaler_state->scalers[*scaler_id].in_use = 0;
4902
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004903 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4904 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4905 intel_crtc->pipe, scaler_user, *scaler_id,
Chandra Kondurua1b22782015-04-07 15:28:45 -07004906 scaler_state->scaler_users);
4907 *scaler_id = -1;
4908 }
4909 return 0;
4910 }
4911
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004912 if (format && format->format == DRM_FORMAT_NV12 &&
Maarten Lankhorst5d794282018-05-12 03:03:14 +05304913 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
Chandra Konduru77224cd2018-04-09 09:11:13 +05304914 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4915 return -EINVAL;
4916 }
4917
Chandra Kondurua1b22782015-04-07 15:28:45 -07004918 /* range checks */
4919 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
Nabendu Maiti323301a2018-03-23 10:24:18 -07004920 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
Lucas De Marchicf819ef2018-12-12 10:10:43 -08004921 (IS_GEN(dev_priv, 11) &&
Nabendu Maiti323301a2018-03-23 10:24:18 -07004922 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4923 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
Lucas De Marchicf819ef2018-12-12 10:10:43 -08004924 (!IS_GEN(dev_priv, 11) &&
Nabendu Maiti323301a2018-03-23 10:24:18 -07004925 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4926 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004927 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
Chandra Kondurua1b22782015-04-07 15:28:45 -07004928 "size is out of scaler range\n",
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004929 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004930 return -EINVAL;
4931 }
4932
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004933 /* mark this plane as a scaler user in crtc_state */
4934 scaler_state->scaler_users |= (1 << scaler_user);
4935 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4936 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4937 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4938 scaler_state->scaler_users);
4939
4940 return 0;
4941}
4942
4943/**
4944 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4945 *
4946 * @state: crtc's scaler state
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004947 *
4948 * Return
4949 * 0 - scaler_usage updated successfully
4950 * error - requested scaling cannot be supported or other error condition
4951 */
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004952int skl_update_scaler_crtc(struct intel_crtc_state *state)
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004953{
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03004954 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004955 bool need_scaler = false;
4956
4957 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4958 need_scaler = true;
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004959
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004960 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
Chandra Konduru77224cd2018-04-09 09:11:13 +05304961 &state->scaler_state.scaler_id,
4962 state->pipe_src_w, state->pipe_src_h,
4963 adjusted_mode->crtc_hdisplay,
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004964 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004965}
4966
4967/**
4968 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
Chris Wilsonc38c1452018-02-14 13:49:22 +00004969 * @crtc_state: crtc's scaler state
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004970 * @plane_state: atomic plane state to update
4971 *
4972 * Return
4973 * 0 - scaler_usage updated successfully
4974 * error - requested scaling cannot be supported or other error condition
4975 */
Maarten Lankhorstda20eab2015-06-15 12:33:44 +02004976static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4977 struct intel_plane_state *plane_state)
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004978{
Maarten Lankhorstda20eab2015-06-15 12:33:44 +02004979 struct intel_plane *intel_plane =
4980 to_intel_plane(plane_state->base.plane);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004981 struct drm_framebuffer *fb = plane_state->base.fb;
4982 int ret;
Ville Syrjälä936e71e2016-07-26 19:06:59 +03004983 bool force_detach = !fb || !plane_state->base.visible;
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004984 bool need_scaler = false;
4985
4986 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
4987 if (!icl_is_hdr_plane(intel_plane) &&
4988 fb && fb->format->format == DRM_FORMAT_NV12)
4989 need_scaler = true;
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004990
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004991 ret = skl_update_scaler(crtc_state, force_detach,
4992 drm_plane_index(&intel_plane->base),
4993 &plane_state->scaler_id,
Ville Syrjälä936e71e2016-07-26 19:06:59 +03004994 drm_rect_width(&plane_state->base.src) >> 16,
4995 drm_rect_height(&plane_state->base.src) >> 16,
4996 drm_rect_width(&plane_state->base.dst),
Chandra Konduru77224cd2018-04-09 09:11:13 +05304997 drm_rect_height(&plane_state->base.dst),
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004998 fb ? fb->format : NULL, need_scaler);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004999
5000 if (ret || plane_state->scaler_id < 0)
5001 return ret;
5002
Chandra Kondurua1b22782015-04-07 15:28:45 -07005003 /* check colorkey */
Ville Syrjälä6ec5bd32018-02-02 22:42:31 +02005004 if (plane_state->ckey.flags) {
Ville Syrjälä72660ce2016-05-27 20:59:20 +03005005 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5006 intel_plane->base.base.id,
5007 intel_plane->base.name);
Chandra Kondurua1b22782015-04-07 15:28:45 -07005008 return -EINVAL;
5009 }
5010
5011 /* Check src format */
Ville Syrjälä438b74a2016-12-14 23:32:55 +02005012 switch (fb->format->format) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02005013 case DRM_FORMAT_RGB565:
5014 case DRM_FORMAT_XBGR8888:
5015 case DRM_FORMAT_XRGB8888:
5016 case DRM_FORMAT_ABGR8888:
5017 case DRM_FORMAT_ARGB8888:
5018 case DRM_FORMAT_XRGB2101010:
5019 case DRM_FORMAT_XBGR2101010:
5020 case DRM_FORMAT_YUYV:
5021 case DRM_FORMAT_YVYU:
5022 case DRM_FORMAT_UYVY:
5023 case DRM_FORMAT_VYUY:
Chandra Konduru77224cd2018-04-09 09:11:13 +05305024 case DRM_FORMAT_NV12:
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02005025 break;
5026 default:
Ville Syrjälä72660ce2016-05-27 20:59:20 +03005027 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5028 intel_plane->base.base.id, intel_plane->base.name,
Ville Syrjälä438b74a2016-12-14 23:32:55 +02005029 fb->base.id, fb->format->format);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02005030 return -EINVAL;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005031 }
5032
Chandra Kondurua1b22782015-04-07 15:28:45 -07005033 return 0;
5034}
5035
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02005036static void skylake_scaler_disable(struct intel_crtc *crtc)
5037{
5038 int i;
5039
5040 for (i = 0; i < crtc->num_scalers; i++)
5041 skl_detach_scaler(crtc, i);
5042}
5043
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005044static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00005045{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005046 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5047 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5048 enum pipe pipe = crtc->pipe;
5049 const struct intel_crtc_scaler_state *scaler_state =
5050 &crtc_state->scaler_state;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005051
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005052 if (crtc_state->pch_pfit.enabled) {
Ville Syrjälä0a599522018-05-21 21:56:13 +03005053 u16 uv_rgb_hphase, uv_rgb_vphase;
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02005054 int pfit_w, pfit_h, hscale, vscale;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005055 int id;
5056
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005057 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
Chandra Kondurua1b22782015-04-07 15:28:45 -07005058 return;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005059
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02005060 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5061 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5062
5063 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5064 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5065
5066 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5067 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
Ville Syrjälä0a599522018-05-21 21:56:13 +03005068
Chandra Kondurua1b22782015-04-07 15:28:45 -07005069 id = scaler_state->scaler_id;
5070 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5071 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
Ville Syrjälä0a599522018-05-21 21:56:13 +03005072 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5073 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5074 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5075 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005076 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5077 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00005078 }
5079}
5080
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005081static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnesb074cec2013-04-25 12:55:02 -07005082{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005083 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5084 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesb074cec2013-04-25 12:55:02 -07005085 int pipe = crtc->pipe;
5086
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005087 if (crtc_state->pch_pfit.enabled) {
Jesse Barnesb074cec2013-04-25 12:55:02 -07005088 /* Force use of hard-coded filter coefficients
5089 * as some pre-programmed values are broken,
5090 * e.g. x201.
5091 */
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01005092 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
Jesse Barnesb074cec2013-04-25 12:55:02 -07005093 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5094 PF_PIPE_SEL_IVB(pipe));
5095 else
5096 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005097 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5098 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
Jesse Barnes040484a2011-01-03 12:14:26 -08005099 }
Jesse Barnesf67a5592011-01-05 10:31:48 -08005100}
5101
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005102void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005103{
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005104 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ville Syrjäläcea165c2014-04-15 21:41:35 +03005105 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005106 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005107
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005108 if (!crtc_state->ips_enabled)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005109 return;
5110
Maarten Lankhorst307e4492016-03-23 14:33:28 +01005111 /*
5112 * We can only enable IPS after we enable a plane and wait for a vblank
5113 * This function is called from post_plane_update, which is run after
5114 * a vblank wait.
5115 */
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005116 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02005117
Tvrtko Ursulin86527442016-10-13 11:03:00 +01005118 if (IS_BROADWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005119 mutex_lock(&dev_priv->pcu_lock);
Ville Syrjälä61843f02017-09-12 18:34:11 +03005120 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5121 IPS_ENABLE | IPS_PCODE_CONTROL));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005122 mutex_unlock(&dev_priv->pcu_lock);
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005123 /* Quoting Art Runyan: "its not safe to expect any particular
5124 * value in IPS_CTL bit 31 after enabling IPS through the
Jesse Barnese59150d2014-01-07 13:30:45 -08005125 * mailbox." Moreover, the mailbox may return a bogus state,
5126 * so we need to just enable it and continue on.
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005127 */
5128 } else {
5129 I915_WRITE(IPS_CTL, IPS_ENABLE);
5130 /* The bit only becomes 1 in the next vblank, so this wait here
5131 * is essentially intel_wait_for_vblank. If we don't have this
5132 * and don't wait for vblanks until the end of crtc_enable, then
5133 * the HW state readout code will complain that the expected
5134 * IPS_CTL value is not the one we read. */
Chris Wilson2ec9ba32016-06-30 15:33:01 +01005135 if (intel_wait_for_register(dev_priv,
5136 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5137 50))
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005138 DRM_ERROR("Timed out waiting for IPS enable\n");
5139 }
Paulo Zanonid77e4532013-09-24 13:52:55 -03005140}
5141
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005142void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005143{
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005144 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005145 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005146 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005147
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005148 if (!crtc_state->ips_enabled)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005149 return;
5150
Tvrtko Ursulin86527442016-10-13 11:03:00 +01005151 if (IS_BROADWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005152 mutex_lock(&dev_priv->pcu_lock);
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005153 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005154 mutex_unlock(&dev_priv->pcu_lock);
Imre Deakacb3ef02018-09-05 13:00:05 +03005155 /*
5156 * Wait for PCODE to finish disabling IPS. The BSpec specified
5157 * 42ms timeout value leads to occasional timeouts so use 100ms
5158 * instead.
5159 */
Chris Wilsonb85c1ec2016-06-30 15:33:02 +01005160 if (intel_wait_for_register(dev_priv,
5161 IPS_CTL, IPS_ENABLE, 0,
Imre Deakacb3ef02018-09-05 13:00:05 +03005162 100))
Ben Widawsky23d0b132014-04-10 14:32:41 -07005163 DRM_ERROR("Timed out waiting for IPS disable\n");
Jesse Barnese59150d2014-01-07 13:30:45 -08005164 } else {
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005165 I915_WRITE(IPS_CTL, 0);
Jesse Barnese59150d2014-01-07 13:30:45 -08005166 POSTING_READ(IPS_CTL);
5167 }
Paulo Zanonid77e4532013-09-24 13:52:55 -03005168
5169 /* We need to wait for a vblank before we can disable the plane. */
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005170 intel_wait_for_vblank(dev_priv, crtc->pipe);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005171}
5172
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005173static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005174{
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005175 if (intel_crtc->overlay) {
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005176 struct drm_device *dev = intel_crtc->base.dev;
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005177
5178 mutex_lock(&dev->struct_mutex);
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005179 (void) intel_overlay_switch_off(intel_crtc->overlay);
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005180 mutex_unlock(&dev->struct_mutex);
5181 }
5182
5183 /* Let userspace switch the overlay on again. In most cases userspace
5184 * has to recompute where to put it anyway.
5185 */
5186}
5187
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005188/**
5189 * intel_post_enable_primary - Perform operations after enabling primary plane
5190 * @crtc: the CRTC whose primary plane was just enabled
Chris Wilsonc38c1452018-02-14 13:49:22 +00005191 * @new_crtc_state: the enabling state
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005192 *
5193 * Performs potentially sleeping operations that must be done after the primary
5194 * plane is enabled, such as updating FBC and IPS. Note that this may be
5195 * called due to an explicit primary plane update, or due to an implicit
5196 * re-enable that is caused when a sprite plane is updated to no longer
5197 * completely hide the primary plane.
5198 */
5199static void
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005200intel_post_enable_primary(struct drm_crtc *crtc,
5201 const struct intel_crtc_state *new_crtc_state)
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005202{
5203 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005204 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5206 int pipe = intel_crtc->pipe;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005207
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005208 /*
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005209 * Gen2 reports pipe underruns whenever all planes are disabled.
5210 * So don't enable underrun reporting before at least some planes
5211 * are enabled.
5212 * FIXME: Need to fix the logic to work when we turn off all planes
5213 * but leave the pipe running.
Daniel Vetterf99d7062014-06-19 16:01:59 +02005214 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08005215 if (IS_GEN(dev_priv, 2))
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005216 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5217
Ville Syrjäläaca7b682015-10-30 19:22:21 +02005218 /* Underruns don't always raise interrupts, so check manually. */
5219 intel_check_cpu_fifo_underruns(dev_priv);
5220 intel_check_pch_fifo_underruns(dev_priv);
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005221}
5222
Ville Syrjälä2622a082016-03-09 19:07:26 +02005223/* FIXME get rid of this and use pre_plane_update */
5224static void
5225intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5226{
5227 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005228 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä2622a082016-03-09 19:07:26 +02005229 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5230 int pipe = intel_crtc->pipe;
5231
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005232 /*
5233 * Gen2 reports pipe underruns whenever all planes are disabled.
5234 * So disable underrun reporting before all the planes get disabled.
5235 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08005236 if (IS_GEN(dev_priv, 2))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005237 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5238
5239 hsw_disable_ips(to_intel_crtc_state(crtc->state));
Ville Syrjälä2622a082016-03-09 19:07:26 +02005240
5241 /*
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005242 * Vblank time updates from the shadow to live plane control register
5243 * are blocked if the memory self-refresh mode is active at that
5244 * moment. So to make sure the plane gets truly disabled, disable
5245 * first the self-refresh mode. The self-refresh enable bit in turn
5246 * will be checked/applied by the HW only at the next frame start
5247 * event which is after the vblank start event, so we need to have a
5248 * wait-for-vblank between disabling the plane and the pipe.
5249 */
Ville Syrjälä11a85d62016-11-28 19:37:12 +02005250 if (HAS_GMCH_DISPLAY(dev_priv) &&
5251 intel_set_memory_cxsr(dev_priv, false))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005252 intel_wait_for_vblank(dev_priv, pipe);
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005253}
5254
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005255static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5256 const struct intel_crtc_state *new_crtc_state)
5257{
5258 if (!old_crtc_state->ips_enabled)
5259 return false;
5260
5261 if (needs_modeset(&new_crtc_state->base))
5262 return true;
5263
5264 return !new_crtc_state->ips_enabled;
5265}
5266
5267static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5268 const struct intel_crtc_state *new_crtc_state)
5269{
5270 if (!new_crtc_state->ips_enabled)
5271 return false;
5272
5273 if (needs_modeset(&new_crtc_state->base))
5274 return true;
5275
5276 /*
5277 * We can't read out IPS on broadwell, assume the worst and
5278 * forcibly enable IPS on the first fastset.
5279 */
5280 if (new_crtc_state->update_pipe &&
5281 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5282 return true;
5283
5284 return !old_crtc_state->ips_enabled;
5285}
5286
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305287static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5288 const struct intel_crtc_state *crtc_state)
5289{
5290 if (!crtc_state->nv12_planes)
5291 return false;
5292
Rodrigo Vivi1347d3c2018-10-31 09:28:45 -07005293 /* WA Display #0827: Gen9:all */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08005294 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305295 return true;
5296
5297 return false;
5298}
5299
Daniel Vetter5a21b662016-05-24 17:13:53 +02005300static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5301{
5302 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +05305303 struct drm_device *dev = crtc->base.dev;
5304 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005305 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5306 struct intel_crtc_state *pipe_config =
Ville Syrjäläf9a8c142017-08-23 18:22:24 +03005307 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5308 crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005309 struct drm_plane *primary = crtc->base.primary;
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005310 struct drm_plane_state *old_primary_state =
5311 drm_atomic_get_old_plane_state(old_state, primary);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005312
Chris Wilson5748b6a2016-08-04 16:32:38 +01005313 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005314
Daniel Vetter5a21b662016-05-24 17:13:53 +02005315 if (pipe_config->update_wm_post && pipe_config->base.active)
Ville Syrjälä432081b2016-10-31 22:37:03 +02005316 intel_update_watermarks(crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005317
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005318 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5319 hsw_enable_ips(pipe_config);
5320
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005321 if (old_primary_state) {
5322 struct drm_plane_state *new_primary_state =
5323 drm_atomic_get_new_plane_state(old_state, primary);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005324
5325 intel_fbc_post_update(crtc);
5326
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005327 if (new_primary_state->visible &&
Daniel Vetter5a21b662016-05-24 17:13:53 +02005328 (needs_modeset(&pipe_config->base) ||
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005329 !old_primary_state->visible))
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005330 intel_post_enable_primary(&crtc->base, pipe_config);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005331 }
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305332
5333 /* Display WA 827 */
5334 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
Vidya Srinivas6deef9b2018-05-12 03:03:13 +05305335 !needs_nv12_wa(dev_priv, pipe_config)) {
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305336 skl_wa_clkgate(dev_priv, crtc->pipe, false);
Vidya Srinivas6deef9b2018-05-12 03:03:13 +05305337 }
Daniel Vetter5a21b662016-05-24 17:13:53 +02005338}
5339
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005340static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5341 struct intel_crtc_state *pipe_config)
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005342{
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005343 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005344 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005345 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005346 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5347 struct drm_plane *primary = crtc->base.primary;
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005348 struct drm_plane_state *old_primary_state =
5349 drm_atomic_get_old_plane_state(old_state, primary);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005350 bool modeset = needs_modeset(&pipe_config->base);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005351 struct intel_atomic_state *old_intel_state =
5352 to_intel_atomic_state(old_state);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005353
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005354 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5355 hsw_disable_ips(old_crtc_state);
5356
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005357 if (old_primary_state) {
5358 struct intel_plane_state *new_primary_state =
Ville Syrjäläf9a8c142017-08-23 18:22:24 +03005359 intel_atomic_get_new_plane_state(old_intel_state,
5360 to_intel_plane(primary));
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005361
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005362 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005363 /*
5364 * Gen2 reports pipe underruns whenever all planes are disabled.
5365 * So disable underrun reporting before all the planes get disabled.
5366 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08005367 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005368 (modeset || !new_primary_state->base.visible))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005369 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005370 }
Ville Syrjälä852eb002015-06-24 22:00:07 +03005371
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305372 /* Display WA 827 */
5373 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
Vidya Srinivas6deef9b2018-05-12 03:03:13 +05305374 needs_nv12_wa(dev_priv, pipe_config)) {
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305375 skl_wa_clkgate(dev_priv, crtc->pipe, true);
Vidya Srinivas6deef9b2018-05-12 03:03:13 +05305376 }
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305377
Ville Syrjälä5eeb7982017-03-02 19:15:00 +02005378 /*
5379 * Vblank time updates from the shadow to live plane control register
5380 * are blocked if the memory self-refresh mode is active at that
5381 * moment. So to make sure the plane gets truly disabled, disable
5382 * first the self-refresh mode. The self-refresh enable bit in turn
5383 * will be checked/applied by the HW only at the next frame start
5384 * event which is after the vblank start event, so we need to have a
5385 * wait-for-vblank between disabling the plane and the pipe.
5386 */
5387 if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5388 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5389 intel_wait_for_vblank(dev_priv, crtc->pipe);
Maarten Lankhorst92826fc2015-12-03 13:49:13 +01005390
Matt Ropered4a6a72016-02-23 17:20:13 -08005391 /*
5392 * IVB workaround: must disable low power watermarks for at least
5393 * one frame before enabling scaling. LP watermarks can be re-enabled
5394 * when scaling is disabled.
5395 *
5396 * WaCxSRDisabledForSpriteScaling:ivb
5397 */
Ville Syrjälä8e7a4422018-10-04 15:15:27 +03005398 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5399 old_crtc_state->base.active)
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005400 intel_wait_for_vblank(dev_priv, crtc->pipe);
Matt Ropered4a6a72016-02-23 17:20:13 -08005401
5402 /*
5403 * If we're doing a modeset, we're done. No need to do any pre-vblank
5404 * watermark programming here.
5405 */
5406 if (needs_modeset(&pipe_config->base))
5407 return;
5408
5409 /*
5410 * For platforms that support atomic watermarks, program the
5411 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5412 * will be the intermediate values that are safe for both pre- and
5413 * post- vblank; when vblank happens, the 'active' values will be set
5414 * to the final 'target' values and we'll do this again to get the
5415 * optimal watermarks. For gen9+ platforms, the values we program here
5416 * will be the final target values which will get automatically latched
5417 * at vblank time; no further programming will be necessary.
5418 *
5419 * If a platform hasn't been transitioned to atomic watermarks yet,
5420 * we'll continue to update watermarks the old way, if flags tell
5421 * us to.
5422 */
5423 if (dev_priv->display.initial_watermarks != NULL)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005424 dev_priv->display.initial_watermarks(old_intel_state,
5425 pipe_config);
Ville Syrjäläcaed3612016-03-09 19:07:25 +02005426 else if (pipe_config->update_wm_pre)
Ville Syrjälä432081b2016-10-31 22:37:03 +02005427 intel_update_watermarks(crtc);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005428}
5429
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005430static void intel_crtc_disable_planes(struct intel_atomic_state *state,
5431 struct intel_crtc *crtc)
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005432{
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5434 const struct intel_crtc_state *new_crtc_state =
5435 intel_atomic_get_new_crtc_state(state, crtc);
5436 unsigned int update_mask = new_crtc_state->update_planes;
5437 const struct intel_plane_state *old_plane_state;
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005438 struct intel_plane *plane;
5439 unsigned fb_bits = 0;
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005440 int i;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005441
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005442 intel_crtc_dpms_overlay_disable(crtc);
Maarten Lankhorst27321ae2015-04-21 17:12:52 +03005443
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005444 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
5445 if (crtc->pipe != plane->pipe ||
5446 !(update_mask & BIT(plane->id)))
5447 continue;
Ville Syrjäläf98551a2014-05-22 17:48:06 +03005448
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005449 plane->disable_plane(plane, new_crtc_state);
5450
5451 if (old_plane_state->base.visible)
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005452 fb_bits |= plane->frontbuffer_bit;
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005453 }
5454
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005455 intel_frontbuffer_flip(dev_priv, fb_bits);
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005456}
5457
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005458static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005459 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005460 struct drm_atomic_state *old_state)
5461{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005462 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005463 struct drm_connector *conn;
5464 int i;
5465
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005466 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005467 struct intel_encoder *encoder =
5468 to_intel_encoder(conn_state->best_encoder);
5469
5470 if (conn_state->crtc != crtc)
5471 continue;
5472
5473 if (encoder->pre_pll_enable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005474 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005475 }
5476}
5477
5478static void intel_encoders_pre_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005479 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005480 struct drm_atomic_state *old_state)
5481{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005482 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005483 struct drm_connector *conn;
5484 int i;
5485
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005486 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005487 struct intel_encoder *encoder =
5488 to_intel_encoder(conn_state->best_encoder);
5489
5490 if (conn_state->crtc != crtc)
5491 continue;
5492
5493 if (encoder->pre_enable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005494 encoder->pre_enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005495 }
5496}
5497
5498static void intel_encoders_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005499 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005500 struct drm_atomic_state *old_state)
5501{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005502 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005503 struct drm_connector *conn;
5504 int i;
5505
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005506 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005507 struct intel_encoder *encoder =
5508 to_intel_encoder(conn_state->best_encoder);
5509
5510 if (conn_state->crtc != crtc)
5511 continue;
5512
Jani Nikulac84c6fe2018-10-16 15:41:34 +03005513 if (encoder->enable)
5514 encoder->enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005515 intel_opregion_notify_encoder(encoder, true);
5516 }
5517}
5518
5519static void intel_encoders_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005520 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005521 struct drm_atomic_state *old_state)
5522{
5523 struct drm_connector_state *old_conn_state;
5524 struct drm_connector *conn;
5525 int i;
5526
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005527 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005528 struct intel_encoder *encoder =
5529 to_intel_encoder(old_conn_state->best_encoder);
5530
5531 if (old_conn_state->crtc != crtc)
5532 continue;
5533
5534 intel_opregion_notify_encoder(encoder, false);
Jani Nikulac84c6fe2018-10-16 15:41:34 +03005535 if (encoder->disable)
5536 encoder->disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005537 }
5538}
5539
5540static void intel_encoders_post_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005541 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005542 struct drm_atomic_state *old_state)
5543{
5544 struct drm_connector_state *old_conn_state;
5545 struct drm_connector *conn;
5546 int i;
5547
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005548 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005549 struct intel_encoder *encoder =
5550 to_intel_encoder(old_conn_state->best_encoder);
5551
5552 if (old_conn_state->crtc != crtc)
5553 continue;
5554
5555 if (encoder->post_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005556 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005557 }
5558}
5559
5560static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005561 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005562 struct drm_atomic_state *old_state)
5563{
5564 struct drm_connector_state *old_conn_state;
5565 struct drm_connector *conn;
5566 int i;
5567
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005568 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005569 struct intel_encoder *encoder =
5570 to_intel_encoder(old_conn_state->best_encoder);
5571
5572 if (old_conn_state->crtc != crtc)
5573 continue;
5574
5575 if (encoder->post_pll_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005576 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005577 }
5578}
5579
Hans de Goede608ed4a2018-12-20 14:21:18 +01005580static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5581 struct intel_crtc_state *crtc_state,
5582 struct drm_atomic_state *old_state)
5583{
5584 struct drm_connector_state *conn_state;
5585 struct drm_connector *conn;
5586 int i;
5587
5588 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5589 struct intel_encoder *encoder =
5590 to_intel_encoder(conn_state->best_encoder);
5591
5592 if (conn_state->crtc != crtc)
5593 continue;
5594
5595 if (encoder->update_pipe)
5596 encoder->update_pipe(encoder, crtc_state, conn_state);
5597 }
5598}
5599
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005600static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5601 struct drm_atomic_state *old_state)
Jesse Barnesf67a5592011-01-05 10:31:48 -08005602{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005603 struct drm_crtc *crtc = pipe_config->base.crtc;
Jesse Barnesf67a5592011-01-05 10:31:48 -08005604 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005605 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005606 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5607 int pipe = intel_crtc->pipe;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005608 struct intel_atomic_state *old_intel_state =
5609 to_intel_atomic_state(old_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005610
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005611 if (WARN_ON(intel_crtc->active))
Jesse Barnesf67a5592011-01-05 10:31:48 -08005612 return;
5613
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005614 /*
5615 * Sometimes spurious CPU pipe underruns happen during FDI
5616 * training, at least with VGA+HDMI cloning. Suppress them.
5617 *
5618 * On ILK we get an occasional spurious CPU pipe underruns
5619 * between eDP port A enable and vdd enable. Also PCH port
5620 * enable seems to result in the occasional CPU pipe underrun.
5621 *
5622 * Spurious PCH underruns also occur during PCH enabling.
5623 */
Ville Syrjälä2b5b6312018-05-24 22:04:06 +03005624 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5625 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005626
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02005627 if (pipe_config->has_pch_encoder)
5628 intel_prepare_shared_dpll(pipe_config);
Daniel Vetterb14b1052014-04-24 23:55:13 +02005629
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005630 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005631 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005632
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005633 intel_set_pipe_timings(pipe_config);
5634 intel_set_pipe_src_size(pipe_config);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005635
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005636 if (pipe_config->has_pch_encoder) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005637 intel_cpu_transcoder_set_m_n(pipe_config,
5638 &pipe_config->fdi_m_n, NULL);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005639 }
5640
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005641 ironlake_set_pipeconf(pipe_config);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005642
Jesse Barnesf67a5592011-01-05 10:31:48 -08005643 intel_crtc->active = true;
Paulo Zanoni86642812013-04-12 17:57:57 -03005644
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005645 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005646
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005647 if (pipe_config->has_pch_encoder) {
Daniel Vetterfff367c2012-10-27 15:50:28 +02005648 /* Note: FDI PLL enabling _must_ be done before we enable the
5649 * cpu pipes, hence this is separate from all the other fdi/pch
5650 * enabling. */
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02005651 ironlake_fdi_pll_enable(pipe_config);
Daniel Vetter46b6f812012-09-06 22:08:33 +02005652 } else {
5653 assert_fdi_tx_disabled(dev_priv, pipe);
5654 assert_fdi_rx_disabled(dev_priv, pipe);
5655 }
Jesse Barnesf67a5592011-01-05 10:31:48 -08005656
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005657 ironlake_pfit_enable(pipe_config);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005658
Jesse Barnes9c54c0d2011-06-15 23:32:33 +02005659 /*
5660 * On ILK+ LUT must be loaded before the pipe is running but with
5661 * clocks enabled
5662 */
Matt Roper302da0c2018-12-10 13:54:15 -08005663 intel_color_load_luts(pipe_config);
Jesse Barnes9c54c0d2011-06-15 23:32:33 +02005664
Imre Deak1d5bf5d2016-02-29 22:10:33 +02005665 if (dev_priv->display.initial_watermarks != NULL)
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005666 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
Ville Syrjälä4972f702017-11-29 17:37:32 +02005667 intel_enable_pipe(pipe_config);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005668
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005669 if (pipe_config->has_pch_encoder)
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03005670 ironlake_pch_enable(old_intel_state, pipe_config);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005671
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005672 assert_vblank_disabled(crtc);
5673 drm_crtc_vblank_on(crtc);
5674
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005675 intel_encoders_enable(crtc, pipe_config, old_state);
Daniel Vetter61b77dd2012-07-02 00:16:19 +02005676
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01005677 if (HAS_PCH_CPT(dev_priv))
Daniel Vettera1520312013-05-03 11:49:50 +02005678 cpt_verify_modeset(dev, intel_crtc->pipe);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005679
Ville Syrjäläea80a662018-05-24 22:04:05 +03005680 /*
5681 * Must wait for vblank to avoid spurious PCH FIFO underruns.
5682 * And a second vblank wait is needed at least on ILK with
5683 * some interlaced HDMI modes. Let's do the double wait always
5684 * in case there are more corner cases we don't know about.
5685 */
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005686 if (pipe_config->has_pch_encoder) {
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005687 intel_wait_for_vblank(dev_priv, pipe);
Ville Syrjäläea80a662018-05-24 22:04:05 +03005688 intel_wait_for_vblank(dev_priv, pipe);
5689 }
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005690 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005691 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005692}
5693
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005694/* IPS only exists on ULT machines and is tied to pipe A. */
5695static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5696{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01005697 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005698}
5699
Imre Deaked69cd42017-10-02 10:55:57 +03005700static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5701 enum pipe pipe, bool apply)
5702{
5703 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5704 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5705
5706 if (apply)
5707 val |= mask;
5708 else
5709 val &= ~mask;
5710
5711 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5712}
5713
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005714static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5715{
5716 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5717 enum pipe pipe = crtc->pipe;
5718 uint32_t val;
5719
Rodrigo Vivi443d5e32018-10-04 08:18:14 -07005720 val = MBUS_DBOX_A_CREDIT(2);
5721 val |= MBUS_DBOX_BW_CREDIT(1);
5722 val |= MBUS_DBOX_B_CREDIT(8);
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005723
5724 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5725}
5726
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005727static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5728 struct drm_atomic_state *old_state)
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005729{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005730 struct drm_crtc *crtc = pipe_config->base.crtc;
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005731 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005732 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005733 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005734 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005735 struct intel_atomic_state *old_intel_state =
5736 to_intel_atomic_state(old_state);
Imre Deaked69cd42017-10-02 10:55:57 +03005737 bool psl_clkgate_wa;
Vandita Kulkarnie16a3752018-06-21 20:43:56 +05305738 u32 pipe_chicken;
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005739
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005740 if (WARN_ON(intel_crtc->active))
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005741 return;
5742
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005743 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
Imre Deak95a7a2a2016-06-13 16:44:35 +03005744
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02005745 if (pipe_config->shared_dpll)
5746 intel_enable_shared_dpll(pipe_config);
Daniel Vetterdf8ad702014-06-25 22:02:03 +03005747
Paulo Zanonic8af5272018-05-02 14:58:51 -07005748 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5749
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005750 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005751 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter229fca92014-04-24 23:55:09 +02005752
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005753 if (!transcoder_is_dsi(cpu_transcoder))
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005754 intel_set_pipe_timings(pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005755
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005756 intel_set_pipe_src_size(pipe_config);
Daniel Vetter229fca92014-04-24 23:55:09 +02005757
Jani Nikula4d1de972016-03-18 17:05:42 +02005758 if (cpu_transcoder != TRANSCODER_EDP &&
5759 !transcoder_is_dsi(cpu_transcoder)) {
5760 I915_WRITE(PIPE_MULT(cpu_transcoder),
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005761 pipe_config->pixel_multiplier - 1);
Clint Taylorebb69c92014-09-30 10:30:22 -07005762 }
5763
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005764 if (pipe_config->has_pch_encoder) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005765 intel_cpu_transcoder_set_m_n(pipe_config,
5766 &pipe_config->fdi_m_n, NULL);
Daniel Vetter229fca92014-04-24 23:55:09 +02005767 }
5768
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005769 if (!transcoder_is_dsi(cpu_transcoder))
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005770 haswell_set_pipeconf(pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005771
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005772 haswell_set_pipemisc(pipe_config);
Daniel Vetter229fca92014-04-24 23:55:09 +02005773
Matt Roper302da0c2018-12-10 13:54:15 -08005774 intel_color_set_csc(pipe_config);
Daniel Vetter229fca92014-04-24 23:55:09 +02005775
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005776 intel_crtc->active = true;
Paulo Zanoni86642812013-04-12 17:57:57 -03005777
Imre Deaked69cd42017-10-02 10:55:57 +03005778 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5779 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005780 pipe_config->pch_pfit.enabled;
Imre Deaked69cd42017-10-02 10:55:57 +03005781 if (psl_clkgate_wa)
5782 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5783
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005784 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005785 skylake_pfit_enable(pipe_config);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08005786 else
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005787 ironlake_pfit_enable(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005788
5789 /*
5790 * On ILK+ LUT must be loaded before the pipe is running but with
5791 * clocks enabled
5792 */
Matt Roper302da0c2018-12-10 13:54:15 -08005793 intel_color_load_luts(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005794
Vandita Kulkarnie16a3752018-06-21 20:43:56 +05305795 /*
5796 * Display WA #1153: enable hardware to bypass the alpha math
5797 * and rounding for per-pixel values 00 and 0xff
5798 */
5799 if (INTEL_GEN(dev_priv) >= 11) {
5800 pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
5801 if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
5802 I915_WRITE_FW(PIPE_CHICKEN(pipe),
5803 pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
5804 }
5805
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005806 intel_ddi_set_pipe_settings(pipe_config);
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005807 if (!transcoder_is_dsi(cpu_transcoder))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005808 intel_ddi_enable_transcoder_func(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005809
Imre Deak1d5bf5d2016-02-29 22:10:33 +02005810 if (dev_priv->display.initial_watermarks != NULL)
Ville Syrjälä3125d392016-11-28 19:37:03 +02005811 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005812
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005813 if (INTEL_GEN(dev_priv) >= 11)
5814 icl_pipe_mbus_enable(intel_crtc);
5815
Jani Nikula4d1de972016-03-18 17:05:42 +02005816 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005817 if (!transcoder_is_dsi(cpu_transcoder))
Ville Syrjälä4972f702017-11-29 17:37:32 +02005818 intel_enable_pipe(pipe_config);
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005819
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005820 if (pipe_config->has_pch_encoder)
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03005821 lpt_pch_enable(old_intel_state, pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005822
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005823 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005824 intel_ddi_set_vc_payload_alloc(pipe_config, true);
Dave Airlie0e32b392014-05-02 14:02:48 +10005825
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005826 assert_vblank_disabled(crtc);
5827 drm_crtc_vblank_on(crtc);
5828
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005829 intel_encoders_enable(crtc, pipe_config, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005830
Imre Deaked69cd42017-10-02 10:55:57 +03005831 if (psl_clkgate_wa) {
5832 intel_wait_for_vblank(dev_priv, pipe);
5833 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5834 }
5835
Paulo Zanonie4916942013-09-20 16:21:19 -03005836 /* If we change the relative order between pipe/planes enabling, we need
5837 * to change the workaround. */
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005838 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01005839 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005840 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5841 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005842 }
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005843}
5844
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005845static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005846{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005847 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5848 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5849 enum pipe pipe = crtc->pipe;
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005850
5851 /* To avoid upsetting the power well on haswell only disable the pfit if
5852 * it's in use. The hw state code will make sure we get this right. */
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005853 if (old_crtc_state->pch_pfit.enabled) {
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005854 I915_WRITE(PF_CTL(pipe), 0);
5855 I915_WRITE(PF_WIN_POS(pipe), 0);
5856 I915_WRITE(PF_WIN_SZ(pipe), 0);
5857 }
5858}
5859
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005860static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5861 struct drm_atomic_state *old_state)
Jesse Barnes6be4a602010-09-10 10:26:01 -07005862{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005863 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Jesse Barnes6be4a602010-09-10 10:26:01 -07005864 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005865 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005866 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5867 int pipe = intel_crtc->pipe;
Jesse Barnes6be4a602010-09-10 10:26:01 -07005868
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005869 /*
5870 * Sometimes spurious CPU pipe underruns happen when the
5871 * pipe is already disabled, but FDI RX/TX is still enabled.
5872 * Happens at least with VGA+HDMI cloning. Suppress them.
5873 */
Ville Syrjälä2b5b6312018-05-24 22:04:06 +03005874 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5875 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005876
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005877 intel_encoders_disable(crtc, old_crtc_state, old_state);
Daniel Vetterea9d7582012-07-10 10:42:52 +02005878
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005879 drm_crtc_vblank_off(crtc);
5880 assert_vblank_disabled(crtc);
5881
Ville Syrjälä4972f702017-11-29 17:37:32 +02005882 intel_disable_pipe(old_crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005883
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005884 ironlake_pfit_disable(old_crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005885
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005886 if (old_crtc_state->has_pch_encoder)
Ville Syrjälä5a74f702015-05-05 17:17:38 +03005887 ironlake_fdi_disable(crtc);
5888
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005889 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005890
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005891 if (old_crtc_state->has_pch_encoder) {
Daniel Vetterd925c592013-06-05 13:34:04 +02005892 ironlake_disable_pch_transcoder(dev_priv, pipe);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005893
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01005894 if (HAS_PCH_CPT(dev_priv)) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005895 i915_reg_t reg;
5896 u32 temp;
5897
Daniel Vetterd925c592013-06-05 13:34:04 +02005898 /* disable TRANS_DP_CTL */
5899 reg = TRANS_DP_CTL(pipe);
5900 temp = I915_READ(reg);
5901 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5902 TRANS_DP_PORT_SEL_MASK);
5903 temp |= TRANS_DP_PORT_SEL_NONE;
5904 I915_WRITE(reg, temp);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005905
Daniel Vetterd925c592013-06-05 13:34:04 +02005906 /* disable DPLL_SEL */
5907 temp = I915_READ(PCH_DPLL_SEL);
Daniel Vetter11887392013-06-05 13:34:09 +02005908 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
Daniel Vetterd925c592013-06-05 13:34:04 +02005909 I915_WRITE(PCH_DPLL_SEL, temp);
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08005910 }
Daniel Vetterd925c592013-06-05 13:34:04 +02005911
Daniel Vetterd925c592013-06-05 13:34:04 +02005912 ironlake_fdi_pll_disable(intel_crtc);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005913 }
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005914
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005915 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005916 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005917}
5918
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005919static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5920 struct drm_atomic_state *old_state)
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005921{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005922 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005923 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005924 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Imre Deak24a28172018-06-13 20:07:06 +03005925 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005926
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005927 intel_encoders_disable(crtc, old_crtc_state, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005928
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005929 drm_crtc_vblank_off(crtc);
5930 assert_vblank_disabled(crtc);
5931
Jani Nikula4d1de972016-03-18 17:05:42 +02005932 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005933 if (!transcoder_is_dsi(cpu_transcoder))
Ville Syrjälä4972f702017-11-29 17:37:32 +02005934 intel_disable_pipe(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005935
Imre Deak24a28172018-06-13 20:07:06 +03005936 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
5937 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
Ville Syrjäläa4bf2142014-08-18 21:27:34 +03005938
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005939 if (!transcoder_is_dsi(cpu_transcoder))
Clint Taylor90c3e212018-07-10 13:02:05 -07005940 intel_ddi_disable_transcoder_func(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005941
Manasi Navarea6006222018-11-28 12:26:23 -08005942 intel_dsc_disable(old_crtc_state);
5943
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005944 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02005945 skylake_scaler_disable(intel_crtc);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08005946 else
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005947 ironlake_pfit_disable(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005948
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005949 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Paulo Zanonic27e9172018-04-27 16:14:36 -07005950
Imre Deakbdaa29b2018-11-01 16:04:24 +02005951 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005952}
5953
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005954static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnes2dd24552013-04-25 12:55:01 -07005955{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005956 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5957 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnes2dd24552013-04-25 12:55:01 -07005958
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005959 if (!crtc_state->gmch_pfit.control)
Jesse Barnes2dd24552013-04-25 12:55:01 -07005960 return;
5961
Daniel Vetterc0b03412013-05-28 12:05:54 +02005962 /*
5963 * The panel fitter should only be adjusted whilst the pipe is disabled,
5964 * according to register description and PRM.
5965 */
Jesse Barnes2dd24552013-04-25 12:55:01 -07005966 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5967 assert_pipe_disabled(dev_priv, crtc->pipe);
5968
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005969 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
5970 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
Daniel Vetter5a80c452013-04-25 22:52:18 +02005971
5972 /* Border color in case we don't scale up to the full screen. Black by
5973 * default, change to something else for debugging. */
5974 I915_WRITE(BCLRPAT(crtc->pipe), 0);
Jesse Barnes2dd24552013-04-25 12:55:01 -07005975}
5976
Mahesh Kumar176597a2018-10-04 14:20:43 +05305977bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
5978{
5979 if (port == PORT_NONE)
5980 return false;
5981
5982 if (IS_ICELAKE(dev_priv))
5983 return port <= PORT_B;
5984
5985 return false;
5986}
5987
Paulo Zanoniac213c12018-05-21 17:25:37 -07005988bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
5989{
5990 if (IS_ICELAKE(dev_priv))
5991 return port >= PORT_C && port <= PORT_F;
5992
5993 return false;
5994}
5995
5996enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
5997{
5998 if (!intel_port_is_tc(dev_priv, port))
5999 return PORT_TC_NONE;
6000
6001 return port - PORT_C;
6002}
6003
Ander Conselvan de Oliveira79f255a2017-02-22 08:34:27 +02006004enum intel_display_power_domain intel_port_to_power_domain(enum port port)
Dave Airlied05410f2014-06-05 13:22:59 +10006005{
6006 switch (port) {
6007 case PORT_A:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006008 return POWER_DOMAIN_PORT_DDI_A_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10006009 case PORT_B:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006010 return POWER_DOMAIN_PORT_DDI_B_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10006011 case PORT_C:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006012 return POWER_DOMAIN_PORT_DDI_C_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10006013 case PORT_D:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006014 return POWER_DOMAIN_PORT_DDI_D_LANES;
Xiong Zhangd8e19f92015-08-13 18:00:12 +08006015 case PORT_E:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006016 return POWER_DOMAIN_PORT_DDI_E_LANES;
Rodrigo Vivi9787e832018-01-29 15:22:22 -08006017 case PORT_F:
6018 return POWER_DOMAIN_PORT_DDI_F_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10006019 default:
Imre Deakb9fec162015-11-18 15:57:25 +02006020 MISSING_CASE(port);
Dave Airlied05410f2014-06-05 13:22:59 +10006021 return POWER_DOMAIN_PORT_OTHER;
6022 }
6023}
6024
Imre Deak337837a2018-11-01 16:04:23 +02006025enum intel_display_power_domain
6026intel_aux_power_domain(struct intel_digital_port *dig_port)
6027{
6028 switch (dig_port->aux_ch) {
6029 case AUX_CH_A:
6030 return POWER_DOMAIN_AUX_A;
6031 case AUX_CH_B:
6032 return POWER_DOMAIN_AUX_B;
6033 case AUX_CH_C:
6034 return POWER_DOMAIN_AUX_C;
6035 case AUX_CH_D:
6036 return POWER_DOMAIN_AUX_D;
6037 case AUX_CH_E:
6038 return POWER_DOMAIN_AUX_E;
6039 case AUX_CH_F:
6040 return POWER_DOMAIN_AUX_F;
6041 default:
6042 MISSING_CASE(dig_port->aux_ch);
6043 return POWER_DOMAIN_AUX_A;
6044 }
6045}
6046
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006047static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6048 struct intel_crtc_state *crtc_state)
Imre Deak319be8a2014-03-04 19:22:57 +02006049{
6050 struct drm_device *dev = crtc->dev;
Maarten Lankhorst37255d82016-12-15 15:29:43 +01006051 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006052 struct drm_encoder *encoder;
Imre Deak319be8a2014-03-04 19:22:57 +02006053 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6054 enum pipe pipe = intel_crtc->pipe;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006055 u64 mask;
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006056 enum transcoder transcoder = crtc_state->cpu_transcoder;
Imre Deak77d22dc2014-03-05 16:20:52 +02006057
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006058 if (!crtc_state->base.active)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006059 return 0;
6060
Imre Deak17bd6e62018-01-09 14:20:40 +02006061 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6062 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006063 if (crtc_state->pch_pfit.enabled ||
6064 crtc_state->pch_pfit.force_thru)
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006065 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
Imre Deak77d22dc2014-03-05 16:20:52 +02006066
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006067 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6068 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6069
Ander Conselvan de Oliveira79f255a2017-02-22 08:34:27 +02006070 mask |= BIT_ULL(intel_encoder->power_domain);
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006071 }
Imre Deak319be8a2014-03-04 19:22:57 +02006072
Maarten Lankhorst37255d82016-12-15 15:29:43 +01006073 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
Imre Deak17bd6e62018-01-09 14:20:40 +02006074 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
Maarten Lankhorst37255d82016-12-15 15:29:43 +01006075
Maarten Lankhorst15e7ec22016-03-14 09:27:54 +01006076 if (crtc_state->shared_dpll)
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006077 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
Maarten Lankhorst15e7ec22016-03-14 09:27:54 +01006078
Imre Deak77d22dc2014-03-05 16:20:52 +02006079 return mask;
6080}
6081
Ander Conselvan de Oliveirad2d15012017-02-13 16:57:33 +02006082static u64
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006083modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6084 struct intel_crtc_state *crtc_state)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006085{
Chris Wilsonfac5e232016-07-04 11:34:36 +01006086 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6088 enum intel_display_power_domain domain;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006089 u64 domains, new_domains, old_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006090
6091 old_domains = intel_crtc->enabled_power_domains;
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006092 intel_crtc->enabled_power_domains = new_domains =
6093 get_crtc_power_domains(crtc, crtc_state);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006094
Daniel Vetter5a21b662016-05-24 17:13:53 +02006095 domains = new_domains & ~old_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006096
6097 for_each_power_domain(domain, domains)
6098 intel_display_power_get(dev_priv, domain);
6099
Daniel Vetter5a21b662016-05-24 17:13:53 +02006100 return old_domains & ~new_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006101}
6102
6103static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006104 u64 domains)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006105{
6106 enum intel_display_power_domain domain;
6107
6108 for_each_power_domain(domain, domains)
6109 intel_display_power_put(dev_priv, domain);
6110}
6111
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006112static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6113 struct drm_atomic_state *old_state)
Jesse Barnes89b667f2013-04-18 14:51:36 -07006114{
Ville Syrjäläff32c542017-03-02 19:14:57 +02006115 struct intel_atomic_state *old_intel_state =
6116 to_intel_atomic_state(old_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006117 struct drm_crtc *crtc = pipe_config->base.crtc;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006118 struct drm_device *dev = crtc->dev;
Daniel Vettera72e4c92014-09-30 10:56:47 +02006119 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006121 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006122
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02006123 if (WARN_ON(intel_crtc->active))
Jesse Barnes89b667f2013-04-18 14:51:36 -07006124 return;
6125
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006126 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006127 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006128
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02006129 intel_set_pipe_timings(pipe_config);
6130 intel_set_pipe_src_size(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006131
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006132 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
Ville Syrjäläc14b0482014-10-16 20:52:34 +03006133 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6134 I915_WRITE(CHV_CANVAS(pipe), 0);
6135 }
6136
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02006137 i9xx_set_pipeconf(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006138
Matt Roper302da0c2018-12-10 13:54:15 -08006139 intel_color_set_csc(pipe_config);
P Raviraj Sitaramc59d2da2018-09-10 19:57:14 +05306140
Jesse Barnes89b667f2013-04-18 14:51:36 -07006141 intel_crtc->active = true;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006142
Daniel Vettera72e4c92014-09-30 10:56:47 +02006143 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä4a3436e2014-05-16 19:40:25 +03006144
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006145 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006146
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006147 if (IS_CHERRYVIEW(dev_priv)) {
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006148 chv_prepare_pll(intel_crtc, pipe_config);
6149 chv_enable_pll(intel_crtc, pipe_config);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006150 } else {
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006151 vlv_prepare_pll(intel_crtc, pipe_config);
6152 vlv_enable_pll(intel_crtc, pipe_config);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006153 }
Jesse Barnes89b667f2013-04-18 14:51:36 -07006154
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006155 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006156
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006157 i9xx_pfit_enable(pipe_config);
Jesse Barnes2dd24552013-04-25 12:55:01 -07006158
Matt Roper302da0c2018-12-10 13:54:15 -08006159 intel_color_load_luts(pipe_config);
Ville Syrjälä63cbb072013-06-04 13:48:59 +03006160
Ville Syrjäläff32c542017-03-02 19:14:57 +02006161 dev_priv->display.initial_watermarks(old_intel_state,
6162 pipe_config);
Ville Syrjälä4972f702017-11-29 17:37:32 +02006163 intel_enable_pipe(pipe_config);
Daniel Vetterbe6a6f82014-04-15 18:41:22 +02006164
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006165 assert_vblank_disabled(crtc);
6166 drm_crtc_vblank_on(crtc);
6167
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006168 intel_encoders_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006169}
6170
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006171static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006172{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006173 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6174 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006175
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006176 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6177 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006178}
6179
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006180static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6181 struct drm_atomic_state *old_state)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006182{
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006183 struct intel_atomic_state *old_intel_state =
6184 to_intel_atomic_state(old_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006185 struct drm_crtc *crtc = pipe_config->base.crtc;
Zhenyu Wang2c072452009-06-05 15:38:42 +08006186 struct drm_device *dev = crtc->dev;
Daniel Vettera72e4c92014-09-30 10:56:47 +02006187 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08006188 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006189 enum pipe pipe = intel_crtc->pipe;
Jesse Barnes79e53942008-11-07 14:24:08 -08006190
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02006191 if (WARN_ON(intel_crtc->active))
Chris Wilsonf7abfe82010-09-13 14:19:16 +01006192 return;
6193
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006194 i9xx_set_pll_dividers(pipe_config);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006195
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006196 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006197 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006198
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02006199 intel_set_pipe_timings(pipe_config);
6200 intel_set_pipe_src_size(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006201
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02006202 i9xx_set_pipeconf(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006203
Chris Wilsonf7abfe82010-09-13 14:19:16 +01006204 intel_crtc->active = true;
Chris Wilson6b383a72010-09-13 13:54:26 +01006205
Lucas De Marchicf819ef2018-12-12 10:10:43 -08006206 if (!IS_GEN(dev_priv, 2))
Daniel Vettera72e4c92014-09-30 10:56:47 +02006207 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä4a3436e2014-05-16 19:40:25 +03006208
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006209 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Mika Kuoppala9d6d9f12013-02-08 16:35:38 +02006210
Ville Syrjälä939994d2017-09-13 17:08:56 +03006211 i9xx_enable_pll(intel_crtc, pipe_config);
Daniel Vetterf6736a12013-06-05 13:34:30 +02006212
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006213 i9xx_pfit_enable(pipe_config);
Jesse Barnes2dd24552013-04-25 12:55:01 -07006214
Matt Roper302da0c2018-12-10 13:54:15 -08006215 intel_color_load_luts(pipe_config);
Ville Syrjälä63cbb072013-06-04 13:48:59 +03006216
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006217 if (dev_priv->display.initial_watermarks != NULL)
6218 dev_priv->display.initial_watermarks(old_intel_state,
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006219 pipe_config);
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006220 else
6221 intel_update_watermarks(intel_crtc);
Ville Syrjälä4972f702017-11-29 17:37:32 +02006222 intel_enable_pipe(pipe_config);
Daniel Vetterbe6a6f82014-04-15 18:41:22 +02006223
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006224 assert_vblank_disabled(crtc);
6225 drm_crtc_vblank_on(crtc);
6226
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006227 intel_encoders_enable(crtc, pipe_config, old_state);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006228}
6229
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006230static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
Daniel Vetter87476d62013-04-11 16:29:06 +02006231{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006232 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6233 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter328d8e82013-05-08 10:36:31 +02006234
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006235 if (!old_crtc_state->gmch_pfit.control)
Daniel Vetter328d8e82013-05-08 10:36:31 +02006236 return;
Daniel Vetter87476d62013-04-11 16:29:06 +02006237
6238 assert_pipe_disabled(dev_priv, crtc->pipe);
6239
Chris Wilson43031782018-09-13 14:16:26 +01006240 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6241 I915_READ(PFIT_CONTROL));
Daniel Vetter328d8e82013-05-08 10:36:31 +02006242 I915_WRITE(PFIT_CONTROL, 0);
Daniel Vetter87476d62013-04-11 16:29:06 +02006243}
6244
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006245static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6246 struct drm_atomic_state *old_state)
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006247{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006248 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006249 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006250 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006251 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6252 int pipe = intel_crtc->pipe;
Daniel Vetteref9c3ae2012-06-29 22:40:09 +02006253
Ville Syrjälä6304cd92014-04-25 13:30:12 +03006254 /*
6255 * On gen2 planes are double buffered but the pipe isn't, so we must
6256 * wait for planes to fully turn off before disabling the pipe.
6257 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08006258 if (IS_GEN(dev_priv, 2))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02006259 intel_wait_for_vblank(dev_priv, pipe);
Ville Syrjälä6304cd92014-04-25 13:30:12 +03006260
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006261 intel_encoders_disable(crtc, old_crtc_state, old_state);
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006262
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01006263 drm_crtc_vblank_off(crtc);
6264 assert_vblank_disabled(crtc);
6265
Ville Syrjälä4972f702017-11-29 17:37:32 +02006266 intel_disable_pipe(old_crtc_state);
Mika Kuoppala24a1f162013-02-08 16:35:37 +02006267
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006268 i9xx_pfit_disable(old_crtc_state);
Mika Kuoppala24a1f162013-02-08 16:35:37 +02006269
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006270 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006271
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006272 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006273 if (IS_CHERRYVIEW(dev_priv))
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006274 chv_disable_pll(dev_priv, pipe);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01006275 else if (IS_VALLEYVIEW(dev_priv))
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006276 vlv_disable_pll(dev_priv, pipe);
6277 else
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006278 i9xx_disable_pll(old_crtc_state);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006279 }
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006280
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006281 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03006282
Lucas De Marchicf819ef2018-12-12 10:10:43 -08006283 if (!IS_GEN(dev_priv, 2))
Daniel Vettera72e4c92014-09-30 10:56:47 +02006284 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjäläff32c542017-03-02 19:14:57 +02006285
6286 if (!dev_priv->display.initial_watermarks)
6287 intel_update_watermarks(intel_crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03006288
6289 /* clock the pipe down to 640x480@60 to potentially save power */
6290 if (IS_I830(dev_priv))
6291 i830_enable_pipe(dev_priv, pipe);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006292}
6293
Ville Syrjäläda1d0e22017-06-01 17:36:14 +03006294static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6295 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnesee7b9f92012-04-20 17:11:53 +01006296{
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006297 struct intel_encoder *encoder;
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006298 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006299 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006300 enum intel_display_power_domain domain;
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006301 struct intel_plane *plane;
Ander Conselvan de Oliveirad2d15012017-02-13 16:57:33 +02006302 u64 domains;
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006303 struct drm_atomic_state *state;
6304 struct intel_crtc_state *crtc_state;
6305 int ret;
Daniel Vetter976f8a22012-07-08 22:34:21 +02006306
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006307 if (!intel_crtc->active)
6308 return;
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006309
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006310 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6311 const struct intel_plane_state *plane_state =
6312 to_intel_plane_state(plane->base.state);
Maarten Lankhorst54a419612015-11-23 10:25:28 +01006313
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006314 if (plane_state->base.visible)
6315 intel_plane_disable_noatomic(intel_crtc, plane);
Maarten Lankhorsta5392052015-06-15 12:33:52 +02006316 }
6317
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006318 state = drm_atomic_state_alloc(crtc->dev);
Ander Conselvan de Oliveira31bb2ef2017-01-20 16:28:45 +02006319 if (!state) {
6320 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6321 crtc->base.id, crtc->name);
6322 return;
6323 }
6324
Ville Syrjäläda1d0e22017-06-01 17:36:14 +03006325 state->acquire_ctx = ctx;
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006326
6327 /* Everything's already locked, -EDEADLK can't happen. */
6328 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6329 ret = drm_atomic_add_affected_connectors(state, crtc);
6330
6331 WARN_ON(IS_ERR(crtc_state) || ret);
6332
6333 dev_priv->display.crtc_disable(crtc_state, state);
6334
Chris Wilson08536952016-10-14 13:18:18 +01006335 drm_atomic_state_put(state);
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006336
Ville Syrjälä78108b72016-05-27 20:59:19 +03006337 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6338 crtc->base.id, crtc->name);
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006339
6340 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6341 crtc->state->active = false;
Matt Roper37d90782015-09-24 15:53:06 -07006342 intel_crtc->active = false;
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006343 crtc->enabled = false;
6344 crtc->state->connector_mask = 0;
6345 crtc->state->encoder_mask = 0;
6346
6347 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6348 encoder->base.crtc = NULL;
6349
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -02006350 intel_fbc_disable(intel_crtc);
Ville Syrjälä432081b2016-10-31 22:37:03 +02006351 intel_update_watermarks(intel_crtc);
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02006352 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006353
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006354 domains = intel_crtc->enabled_power_domains;
6355 for_each_power_domain(domain, domains)
6356 intel_display_power_put(dev_priv, domain);
6357 intel_crtc->enabled_power_domains = 0;
Maarten Lankhorst565602d2015-12-10 12:33:57 +01006358
6359 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
Ville Syrjäläd305e062017-08-30 21:57:03 +03006360 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
Ville Syrjälä53e9bf52017-10-24 12:52:14 +03006361 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006362}
6363
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006364/*
6365 * turn all crtc's off, but do not adjust state
6366 * This has to be paired with a call to intel_modeset_setup_hw_state.
6367 */
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006368int intel_display_suspend(struct drm_device *dev)
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006369{
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006370 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006371 struct drm_atomic_state *state;
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006372 int ret;
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006373
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006374 state = drm_atomic_helper_suspend(dev);
6375 ret = PTR_ERR_OR_ZERO(state);
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006376 if (ret)
6377 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006378 else
6379 dev_priv->modeset_restore_state = state;
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006380 return ret;
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006381}
6382
Chris Wilsonea5b2132010-08-04 13:50:23 +01006383void intel_encoder_destroy(struct drm_encoder *encoder)
6384{
Chris Wilson4ef69c72010-09-09 15:14:28 +01006385 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
Chris Wilsonea5b2132010-08-04 13:50:23 +01006386
Chris Wilsonea5b2132010-08-04 13:50:23 +01006387 drm_encoder_cleanup(encoder);
6388 kfree(intel_encoder);
6389}
6390
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006391/* Cross check the actual hw state with our own modeset state tracking (and it's
6392 * internal consistency). */
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006393static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6394 struct drm_connector_state *conn_state)
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006395{
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006396 struct intel_connector *connector = to_intel_connector(conn_state->connector);
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006397
6398 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6399 connector->base.base.id,
6400 connector->base.name);
6401
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006402 if (connector->get_hw_state(connector)) {
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006403 struct intel_encoder *encoder = connector->encoder;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006404
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006405 I915_STATE_WARN(!crtc_state,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006406 "connector enabled without attached crtc\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006407
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006408 if (!crtc_state)
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006409 return;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006410
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006411 I915_STATE_WARN(!crtc_state->active,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006412 "connector is active, but attached crtc isn't\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006413
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006414 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006415 return;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006416
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006417 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006418 "atomic encoder doesn't match attached encoder\n");
Dave Airlie36cd7442014-05-02 13:44:18 +10006419
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006420 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006421 "attached encoder crtc differs from connector crtc\n");
6422 } else {
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006423 I915_STATE_WARN(crtc_state && crtc_state->active,
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02006424 "attached crtc is active, but connector isn't\n");
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006425 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006426 "best encoder set without crtc!\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006427 }
6428}
6429
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006430static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
Ville Syrjäläd272ddf2015-03-11 18:52:31 +02006431{
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006432 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6433 return crtc_state->fdi_lanes;
Ville Syrjäläd272ddf2015-03-11 18:52:31 +02006434
6435 return 0;
6436}
6437
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006438static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006439 struct intel_crtc_state *pipe_config)
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006440{
Tvrtko Ursulin86527442016-10-13 11:03:00 +01006441 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006442 struct drm_atomic_state *state = pipe_config->base.state;
6443 struct intel_crtc *other_crtc;
6444 struct intel_crtc_state *other_crtc_state;
6445
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006446 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6447 pipe_name(pipe), pipe_config->fdi_lanes);
6448 if (pipe_config->fdi_lanes > 4) {
6449 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6450 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006451 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006452 }
6453
Tvrtko Ursulin86527442016-10-13 11:03:00 +01006454 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006455 if (pipe_config->fdi_lanes > 2) {
6456 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6457 pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006458 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006459 } else {
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006460 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006461 }
6462 }
6463
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +00006464 if (INTEL_INFO(dev_priv)->num_pipes == 2)
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006465 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006466
6467 /* Ivybridge 3 pipe is really complicated */
6468 switch (pipe) {
6469 case PIPE_A:
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006470 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006471 case PIPE_B:
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006472 if (pipe_config->fdi_lanes <= 2)
6473 return 0;
6474
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02006475 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006476 other_crtc_state =
6477 intel_atomic_get_crtc_state(state, other_crtc);
6478 if (IS_ERR(other_crtc_state))
6479 return PTR_ERR(other_crtc_state);
6480
6481 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006482 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6483 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006484 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006485 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006486 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006487 case PIPE_C:
Ville Syrjälä251cc672015-03-11 18:52:30 +02006488 if (pipe_config->fdi_lanes > 2) {
6489 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6490 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006491 return -EINVAL;
Ville Syrjälä251cc672015-03-11 18:52:30 +02006492 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006493
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02006494 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006495 other_crtc_state =
6496 intel_atomic_get_crtc_state(state, other_crtc);
6497 if (IS_ERR(other_crtc_state))
6498 return PTR_ERR(other_crtc_state);
6499
6500 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006501 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006502 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006503 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006504 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006505 default:
6506 BUG();
6507 }
6508}
6509
Daniel Vettere29c22c2013-02-21 00:00:16 +01006510#define RETRY 1
6511static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006512 struct intel_crtc_state *pipe_config)
Daniel Vetter877d48d2013-04-19 11:24:43 +02006513{
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006514 struct drm_device *dev = intel_crtc->base.dev;
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03006515 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006516 int lane, link_bw, fdi_dotclock, ret;
6517 bool needs_recompute = false;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006518
Daniel Vettere29c22c2013-02-21 00:00:16 +01006519retry:
Daniel Vetter877d48d2013-04-19 11:24:43 +02006520 /* FDI is a binary signal running at ~2.7GHz, encoding
6521 * each output octet as 10 bits. The actual frequency
6522 * is stored as a divider into a 100MHz clock, and the
6523 * mode pixel clock is stored in units of 1KHz.
6524 * Hence the bw of each lane in terms of the mode signal
6525 * is:
6526 */
Ville Syrjälä21a727b2016-02-17 21:41:10 +02006527 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
Daniel Vetter877d48d2013-04-19 11:24:43 +02006528
Damien Lespiau241bfc32013-09-25 16:45:37 +01006529 fdi_dotclock = adjusted_mode->crtc_clock;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006530
Daniel Vetter2bd89a02013-06-01 17:16:19 +02006531 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
Daniel Vetter877d48d2013-04-19 11:24:43 +02006532 pipe_config->pipe_bpp);
6533
6534 pipe_config->fdi_lanes = lane;
6535
Daniel Vetter2bd89a02013-06-01 17:16:19 +02006536 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006537 link_bw, &pipe_config->fdi_m_n, false);
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006538
Ville Syrjäläe3b247d2016-02-17 21:41:09 +02006539 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
Ville Syrjälä8e2b4df2018-11-07 23:35:20 +02006540 if (ret == -EDEADLK)
6541 return ret;
6542
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006543 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
Daniel Vettere29c22c2013-02-21 00:00:16 +01006544 pipe_config->pipe_bpp -= 2*3;
6545 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6546 pipe_config->pipe_bpp);
6547 needs_recompute = true;
6548 pipe_config->bw_constrained = true;
6549
6550 goto retry;
6551 }
6552
6553 if (needs_recompute)
6554 return RETRY;
6555
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006556 return ret;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006557}
6558
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006559bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006560{
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006561 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6562 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6563
6564 /* IPS only exists on ULT machines and is tied to pipe A. */
6565 if (!hsw_crtc_supports_ips(crtc))
Ville Syrjälä6e644622017-08-17 17:55:09 +03006566 return false;
6567
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006568 if (!i915_modparams.enable_ips)
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006569 return false;
6570
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006571 if (crtc_state->pipe_bpp > 24)
6572 return false;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006573
6574 /*
Ville Syrjäläb432e5c2015-06-03 15:45:13 +03006575 * We compare against max which means we must take
6576 * the increased cdclk requirement into account when
6577 * calculating the new cdclk.
6578 *
6579 * Should measure whether using a lower cdclk w/o IPS
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006580 */
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006581 if (IS_BROADWELL(dev_priv) &&
6582 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6583 return false;
6584
6585 return true;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006586}
6587
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006588static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
Paulo Zanoni42db64e2013-05-31 16:33:22 -03006589{
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006590 struct drm_i915_private *dev_priv =
6591 to_i915(crtc_state->base.crtc->dev);
6592 struct intel_atomic_state *intel_state =
6593 to_intel_atomic_state(crtc_state->base.state);
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006594
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006595 if (!hsw_crtc_state_ips_capable(crtc_state))
6596 return false;
6597
6598 if (crtc_state->ips_force_disable)
6599 return false;
6600
Maarten Lankhorstadbe5c52017-11-22 19:39:06 +01006601 /* IPS should be fine as long as at least one plane is enabled. */
6602 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006603 return false;
6604
6605 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6606 if (IS_BROADWELL(dev_priv) &&
6607 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6608 return false;
6609
6610 return true;
Paulo Zanoni42db64e2013-05-31 16:33:22 -03006611}
6612
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006613static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6614{
6615 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6616
6617 /* GDG double wide on either pipe, otherwise pipe A only */
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00006618 return INTEL_GEN(dev_priv) < 4 &&
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006619 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6620}
6621
Ville Syrjäläceb99322017-01-20 20:22:05 +02006622static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6623{
6624 uint32_t pixel_rate;
6625
6626 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6627
6628 /*
6629 * We only use IF-ID interlacing. If we ever use
6630 * PF-ID we'll need to adjust the pixel_rate here.
6631 */
6632
6633 if (pipe_config->pch_pfit.enabled) {
6634 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6635 uint32_t pfit_size = pipe_config->pch_pfit.size;
6636
6637 pipe_w = pipe_config->pipe_src_w;
6638 pipe_h = pipe_config->pipe_src_h;
6639
6640 pfit_w = (pfit_size >> 16) & 0xFFFF;
6641 pfit_h = pfit_size & 0xFFFF;
6642 if (pipe_w < pfit_w)
6643 pipe_w = pfit_w;
6644 if (pipe_h < pfit_h)
6645 pipe_h = pfit_h;
6646
6647 if (WARN_ON(!pfit_w || !pfit_h))
6648 return pixel_rate;
6649
6650 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6651 pfit_w * pfit_h);
6652 }
6653
6654 return pixel_rate;
6655}
6656
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +02006657static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6658{
6659 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6660
6661 if (HAS_GMCH_DISPLAY(dev_priv))
6662 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6663 crtc_state->pixel_rate =
6664 crtc_state->base.adjusted_mode.crtc_clock;
6665 else
6666 crtc_state->pixel_rate =
6667 ilk_pipe_pixel_rate(crtc_state);
6668}
6669
Daniel Vettera43f6e02013-06-07 23:10:32 +02006670static int intel_crtc_compute_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006671 struct intel_crtc_state *pipe_config)
Jesse Barnes79e53942008-11-07 14:24:08 -08006672{
Daniel Vettera43f6e02013-06-07 23:10:32 +02006673 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006674 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03006675 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Ville Syrjäläf3261152016-05-24 21:34:18 +03006676 int clock_limit = dev_priv->max_dotclk_freq;
Chris Wilson89749352010-09-12 18:25:19 +01006677
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006678 if (INTEL_GEN(dev_priv) < 4) {
Ville Syrjäläf3261152016-05-24 21:34:18 +03006679 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006680
6681 /*
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006682 * Enable double wide mode when the dot clock
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006683 * is > 90% of the (display) core speed.
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006684 */
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006685 if (intel_crtc_supports_double_wide(crtc) &&
6686 adjusted_mode->crtc_clock > clock_limit) {
Ville Syrjäläf3261152016-05-24 21:34:18 +03006687 clock_limit = dev_priv->max_dotclk_freq;
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006688 pipe_config->double_wide = true;
Ville Syrjäläad3a4472013-09-04 18:30:04 +03006689 }
Ville Syrjäläf3261152016-05-24 21:34:18 +03006690 }
Ville Syrjäläad3a4472013-09-04 18:30:04 +03006691
Ville Syrjäläf3261152016-05-24 21:34:18 +03006692 if (adjusted_mode->crtc_clock > clock_limit) {
6693 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6694 adjusted_mode->crtc_clock, clock_limit,
6695 yesno(pipe_config->double_wide));
6696 return -EINVAL;
Zhenyu Wang2c072452009-06-05 15:38:42 +08006697 }
Chris Wilson89749352010-09-12 18:25:19 +01006698
Shashank Sharma8c79f842018-10-12 11:53:09 +05306699 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6700 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6701 pipe_config->base.ctm) {
Shashank Sharma25edf912017-07-21 20:55:07 +05306702 /*
6703 * There is only one pipe CSC unit per pipe, and we need that
6704 * for output conversion from RGB->YCBCR. So if CTM is already
6705 * applied we can't support YCBCR420 output.
6706 */
6707 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6708 return -EINVAL;
6709 }
6710
Ville Syrjälä1d1d0e22013-09-04 18:30:05 +03006711 /*
6712 * Pipe horizontal size must be even in:
6713 * - DVO ganged mode
6714 * - LVDS dual channel mode
6715 * - Double wide pipe
6716 */
Ville Syrjälä0574bd82017-11-23 21:04:48 +02006717 if (pipe_config->pipe_src_w & 1) {
6718 if (pipe_config->double_wide) {
6719 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6720 return -EINVAL;
6721 }
6722
6723 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6724 intel_is_dual_link_lvds(dev)) {
6725 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6726 return -EINVAL;
6727 }
6728 }
Ville Syrjälä1d1d0e22013-09-04 18:30:05 +03006729
Damien Lespiau8693a822013-05-03 18:48:11 +01006730 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6731 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
Chris Wilson44f46b422012-06-21 13:19:59 +03006732 */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01006733 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
Ville Syrjäläaad941d2015-09-25 16:38:56 +03006734 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
Daniel Vettere29c22c2013-02-21 00:00:16 +01006735 return -EINVAL;
Chris Wilson44f46b422012-06-21 13:19:59 +03006736
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +02006737 intel_crtc_compute_pixel_rate(pipe_config);
6738
Daniel Vetter877d48d2013-04-19 11:24:43 +02006739 if (pipe_config->has_pch_encoder)
Daniel Vettera43f6e02013-06-07 23:10:32 +02006740 return ironlake_fdi_compute_config(crtc, pipe_config);
Daniel Vetter877d48d2013-04-19 11:24:43 +02006741
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +02006742 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08006743}
6744
Zhenyu Wang2c072452009-06-05 15:38:42 +08006745static void
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006746intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006747{
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006748 while (*num > DATA_LINK_M_N_MASK ||
6749 *den > DATA_LINK_M_N_MASK) {
Zhenyu Wang2c072452009-06-05 15:38:42 +08006750 *num >>= 1;
6751 *den >>= 1;
6752 }
6753}
6754
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006755static void compute_m_n(unsigned int m, unsigned int n,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006756 uint32_t *ret_m, uint32_t *ret_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006757 bool constant_n)
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006758{
Jani Nikula9a86cda2017-03-27 14:33:25 +03006759 /*
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006760 * Several DP dongles in particular seem to be fussy about
6761 * too large link M/N values. Give N value as 0x8000 that
6762 * should be acceptable by specific devices. 0x8000 is the
6763 * specified fixed N value for asynchronous clock mode,
6764 * which the devices expect also in synchronous clock mode.
Jani Nikula9a86cda2017-03-27 14:33:25 +03006765 */
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006766 if (constant_n)
6767 *ret_n = 0x8000;
6768 else
6769 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
Jani Nikula9a86cda2017-03-27 14:33:25 +03006770
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006771 *ret_m = div_u64((uint64_t) m * *ret_n, n);
6772 intel_reduce_m_n_ratio(ret_m, ret_n);
6773}
6774
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006775void
Manasi Navarea4a15772018-11-28 13:36:21 -08006776intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006777 int pixel_clock, int link_clock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006778 struct intel_link_m_n *m_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006779 bool constant_n)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006780{
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006781 m_n->tu = 64;
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006782
6783 compute_m_n(bits_per_pixel * pixel_clock,
6784 link_clock * nlanes * 8,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006785 &m_n->gmch_m, &m_n->gmch_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006786 constant_n);
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006787
6788 compute_m_n(pixel_clock, link_clock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006789 &m_n->link_m, &m_n->link_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006790 constant_n);
Zhenyu Wang2c072452009-06-05 15:38:42 +08006791}
6792
Chris Wilsona7615032011-01-12 17:04:08 +00006793static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6794{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00006795 if (i915_modparams.panel_use_ssc >= 0)
6796 return i915_modparams.panel_use_ssc != 0;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03006797 return dev_priv->vbt.lvds_use_ssc
Keith Packard435793d2011-07-12 14:56:22 -07006798 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
Chris Wilsona7615032011-01-12 17:04:08 +00006799}
6800
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006801static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
Jesse Barnesc65d77d2011-12-15 12:30:36 -08006802{
Daniel Vetter7df00d72013-05-21 21:54:55 +02006803 return (1 << dpll->n) << 16 | dpll->m2;
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006804}
Daniel Vetterf47709a2013-03-28 10:42:02 +01006805
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006806static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6807{
6808 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
Jesse Barnesc65d77d2011-12-15 12:30:36 -08006809}
6810
Daniel Vetterf47709a2013-03-28 10:42:02 +01006811static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006812 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03006813 struct dpll *reduced_clock)
Jesse Barnesa7516a02011-12-15 12:30:37 -08006814{
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02006815 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006816 u32 fp, fp2 = 0;
6817
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02006818 if (IS_PINEVIEW(dev_priv)) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006819 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006820 if (reduced_clock)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006821 fp2 = pnv_dpll_compute_fp(reduced_clock);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006822 } else {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006823 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006824 if (reduced_clock)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006825 fp2 = i9xx_dpll_compute_fp(reduced_clock);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006826 }
6827
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006828 crtc_state->dpll_hw_state.fp0 = fp;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006829
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03006830 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Rodrigo Viviab585de2015-03-24 12:40:09 -07006831 reduced_clock) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006832 crtc_state->dpll_hw_state.fp1 = fp2;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006833 } else {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006834 crtc_state->dpll_hw_state.fp1 = fp;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006835 }
6836}
6837
Chon Ming Lee5e69f972013-09-05 20:41:49 +08006838static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6839 pipe)
Jesse Barnes89b667f2013-04-18 14:51:36 -07006840{
6841 u32 reg_val;
6842
6843 /*
6844 * PLLB opamp always calibrates to max value of 0x3f, force enable it
6845 * and set it to a reasonable value instead.
6846 */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006847 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006848 reg_val &= 0xffffff00;
6849 reg_val |= 0x00000030;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006850 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006851
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006852 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
Imre Deaked585702017-05-10 12:21:47 +03006853 reg_val &= 0x00ffffff;
6854 reg_val |= 0x8c000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006855 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006856
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006857 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006858 reg_val &= 0xffffff00;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006859 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006860
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006861 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006862 reg_val &= 0x00ffffff;
6863 reg_val |= 0xb0000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006864 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006865}
6866
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006867static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6868 const struct intel_link_m_n *m_n)
Daniel Vetterb5518422013-05-03 11:49:48 +02006869{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006870 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6871 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6872 enum pipe pipe = crtc->pipe;
Daniel Vetterb5518422013-05-03 11:49:48 +02006873
Daniel Vettere3b95f12013-05-03 11:49:49 +02006874 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6875 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6876 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6877 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
Daniel Vetterb5518422013-05-03 11:49:48 +02006878}
6879
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006880static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
6881 enum transcoder transcoder)
6882{
6883 if (IS_HASWELL(dev_priv))
6884 return transcoder == TRANSCODER_EDP;
6885
6886 /*
6887 * Strictly speaking some registers are available before
6888 * gen7, but we only support DRRS on gen7+
6889 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08006890 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006891}
6892
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006893static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6894 const struct intel_link_m_n *m_n,
6895 const struct intel_link_m_n *m2_n2)
Daniel Vetterb5518422013-05-03 11:49:48 +02006896{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006897 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006898 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006899 enum pipe pipe = crtc->pipe;
6900 enum transcoder transcoder = crtc_state->cpu_transcoder;
Daniel Vetterb5518422013-05-03 11:49:48 +02006901
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006902 if (INTEL_GEN(dev_priv) >= 5) {
Daniel Vetterb5518422013-05-03 11:49:48 +02006903 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6904 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6905 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6906 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006907 /*
6908 * M2_N2 registers are set only if DRRS is supported
6909 * (to make sure the registers are not unnecessarily accessed).
Vandana Kannanf769cd22014-08-05 07:51:22 -07006910 */
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006911 if (m2_n2 && crtc_state->has_drrs &&
6912 transcoder_has_m2_n2(dev_priv, transcoder)) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07006913 I915_WRITE(PIPE_DATA_M2(transcoder),
6914 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6915 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6916 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6917 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6918 }
Daniel Vetterb5518422013-05-03 11:49:48 +02006919 } else {
Daniel Vettere3b95f12013-05-03 11:49:49 +02006920 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6921 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6922 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6923 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
Daniel Vetterb5518422013-05-03 11:49:48 +02006924 }
6925}
6926
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006927void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006928{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006929 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306930
6931 if (m_n == M1_N1) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006932 dp_m_n = &crtc_state->dp_m_n;
6933 dp_m2_n2 = &crtc_state->dp_m2_n2;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306934 } else if (m_n == M2_N2) {
6935
6936 /*
6937 * M2_N2 registers are not supported. Hence m2_n2 divider value
6938 * needs to be programmed into M1_N1.
6939 */
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006940 dp_m_n = &crtc_state->dp_m2_n2;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306941 } else {
6942 DRM_ERROR("Unsupported divider value\n");
6943 return;
6944 }
6945
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006946 if (crtc_state->has_pch_encoder)
6947 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006948 else
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006949 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006950}
6951
Daniel Vetter251ac862015-06-18 10:30:24 +02006952static void vlv_compute_dpll(struct intel_crtc *crtc,
6953 struct intel_crtc_state *pipe_config)
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006954{
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006955 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006956 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006957 if (crtc->pipe != PIPE_A)
6958 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006959
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006960 /* DPLL not used with DSI, but still need the rest set up */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006961 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006962 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6963 DPLL_EXT_BUFFER_ENABLE_VLV;
6964
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006965 pipe_config->dpll_hw_state.dpll_md =
6966 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6967}
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006968
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006969static void chv_compute_dpll(struct intel_crtc *crtc,
6970 struct intel_crtc_state *pipe_config)
6971{
6972 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006973 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006974 if (crtc->pipe != PIPE_A)
6975 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6976
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006977 /* DPLL not used with DSI, but still need the rest set up */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006978 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006979 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6980
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006981 pipe_config->dpll_hw_state.dpll_md =
6982 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006983}
6984
Ville Syrjäläd288f652014-10-28 13:20:22 +02006985static void vlv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006986 const struct intel_crtc_state *pipe_config)
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006987{
Daniel Vetterf47709a2013-03-28 10:42:02 +01006988 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006989 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006990 enum pipe pipe = crtc->pipe;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006991 u32 mdiv;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006992 u32 bestn, bestm1, bestm2, bestp1, bestp2;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006993 u32 coreclk, reg_val;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006994
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006995 /* Enable Refclk */
6996 I915_WRITE(DPLL(pipe),
6997 pipe_config->dpll_hw_state.dpll &
6998 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6999
7000 /* No need to actually set up the DPLL with DSI */
7001 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7002 return;
7003
Ville Syrjäläa5805162015-05-26 20:42:30 +03007004 mutex_lock(&dev_priv->sb_lock);
Daniel Vetter09153002012-12-12 14:06:44 +01007005
Ville Syrjäläd288f652014-10-28 13:20:22 +02007006 bestn = pipe_config->dpll.n;
7007 bestm1 = pipe_config->dpll.m1;
7008 bestm2 = pipe_config->dpll.m2;
7009 bestp1 = pipe_config->dpll.p1;
7010 bestp2 = pipe_config->dpll.p2;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007011
Jesse Barnes89b667f2013-04-18 14:51:36 -07007012 /* See eDP HDMI DPIO driver vbios notes doc */
7013
7014 /* PLL B needs special handling */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02007015 if (pipe == PIPE_B)
Chon Ming Lee5e69f972013-09-05 20:41:49 +08007016 vlv_pllb_recal_opamp(dev_priv, pipe);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007017
7018 /* Set up Tx target for periodic Rcomp update */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007019 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007020
7021 /* Disable target IRef on PLL */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007022 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
Jesse Barnes89b667f2013-04-18 14:51:36 -07007023 reg_val &= 0x00ffffff;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007024 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007025
7026 /* Disable fast lock */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007027 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007028
7029 /* Set idtafcrecal before PLL is enabled */
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007030 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7031 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7032 mdiv |= ((bestn << DPIO_N_SHIFT));
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007033 mdiv |= (1 << DPIO_K_SHIFT);
Jesse Barnes7df50802013-05-02 10:48:09 -07007034
7035 /*
7036 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7037 * but we don't support that).
7038 * Note: don't use the DAC post divider as it seems unstable.
7039 */
7040 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007041 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007042
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007043 mdiv |= DPIO_ENABLE_CALIBRATION;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007044 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007045
Jesse Barnes89b667f2013-04-18 14:51:36 -07007046 /* Set HBR and RBR LPF coefficients */
Ville Syrjäläd288f652014-10-28 13:20:22 +02007047 if (pipe_config->port_clock == 162000 ||
Maarten Lankhorst92d54b02018-10-11 12:04:50 +02007048 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7049 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007050 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
Ville Syrjälä885b01202013-07-05 19:21:38 +03007051 0x009f0003);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007052 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007053 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007054 0x00d0000f);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007055
Ville Syrjälä37a56502016-06-22 21:57:04 +03007056 if (intel_crtc_has_dp_encoder(pipe_config)) {
Jesse Barnes89b667f2013-04-18 14:51:36 -07007057 /* Use SSC source */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02007058 if (pipe == PIPE_A)
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007059 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007060 0x0df40000);
7061 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007062 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007063 0x0df70000);
7064 } else { /* HDMI or VGA */
7065 /* Use bend source */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02007066 if (pipe == PIPE_A)
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007067 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007068 0x0df70000);
7069 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007070 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007071 0x0df40000);
7072 }
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007073
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007074 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
Jesse Barnes89b667f2013-04-18 14:51:36 -07007075 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
Maarten Lankhorst92d54b02018-10-11 12:04:50 +02007076 if (intel_crtc_has_dp_encoder(pipe_config))
Jesse Barnes89b667f2013-04-18 14:51:36 -07007077 coreclk |= 0x01000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007078 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007079
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007080 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03007081 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007082}
7083
Ville Syrjäläd288f652014-10-28 13:20:22 +02007084static void chv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007085 const struct intel_crtc_state *pipe_config)
Ville Syrjälä1ae0d132014-06-28 02:04:00 +03007086{
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007087 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007088 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03007089 enum pipe pipe = crtc->pipe;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007090 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307091 u32 loopfilter, tribuf_calcntr;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007092 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05307093 u32 dpio_val;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307094 int vco;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007095
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03007096 /* Enable Refclk and SSC */
7097 I915_WRITE(DPLL(pipe),
7098 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7099
7100 /* No need to actually set up the DPLL with DSI */
7101 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7102 return;
7103
Ville Syrjäläd288f652014-10-28 13:20:22 +02007104 bestn = pipe_config->dpll.n;
7105 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7106 bestm1 = pipe_config->dpll.m1;
7107 bestm2 = pipe_config->dpll.m2 >> 22;
7108 bestp1 = pipe_config->dpll.p1;
7109 bestp2 = pipe_config->dpll.p2;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307110 vco = pipe_config->dpll.vco;
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05307111 dpio_val = 0;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307112 loopfilter = 0;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007113
Ville Syrjäläa5805162015-05-26 20:42:30 +03007114 mutex_lock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007115
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007116 /* p1 and p2 divider */
7117 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7118 5 << DPIO_CHV_S1_DIV_SHIFT |
7119 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7120 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7121 1 << DPIO_CHV_K_DIV_SHIFT);
7122
7123 /* Feedback post-divider - m2 */
7124 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7125
7126 /* Feedback refclk divider - n and m1 */
7127 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7128 DPIO_CHV_M1_DIV_BY_2 |
7129 1 << DPIO_CHV_N_DIV_SHIFT);
7130
7131 /* M2 fraction division */
Ville Syrjälä25a25df2015-07-08 23:45:47 +03007132 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007133
7134 /* M2 fraction division enable */
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05307135 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7136 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7137 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7138 if (bestm2_frac)
7139 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7140 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007141
Vijay Purushothamande3a0fd2015-03-05 19:32:06 +05307142 /* Program digital lock detect threshold */
7143 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7144 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7145 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7146 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7147 if (!bestm2_frac)
7148 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7149 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7150
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007151 /* Loop filter */
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307152 if (vco == 5400000) {
7153 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7154 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7155 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7156 tribuf_calcntr = 0x9;
7157 } else if (vco <= 6200000) {
7158 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7159 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7160 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7161 tribuf_calcntr = 0x9;
7162 } else if (vco <= 6480000) {
7163 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7164 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7165 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7166 tribuf_calcntr = 0x8;
7167 } else {
7168 /* Not supported. Apply the same limits as in the max case */
7169 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7170 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7171 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7172 tribuf_calcntr = 0;
7173 }
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007174 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7175
Ville Syrjälä968040b2015-03-11 22:52:08 +02007176 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307177 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7178 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7179 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7180
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007181 /* AFC Recal */
7182 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7183 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7184 DPIO_AFC_RECAL);
7185
Ville Syrjäläa5805162015-05-26 20:42:30 +03007186 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007187}
7188
Ville Syrjäläd288f652014-10-28 13:20:22 +02007189/**
7190 * vlv_force_pll_on - forcibly enable just the PLL
7191 * @dev_priv: i915 private structure
7192 * @pipe: pipe PLL to enable
7193 * @dpll: PLL configuration
7194 *
7195 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7196 * in cases where we need the PLL enabled even when @pipe is not going to
7197 * be enabled.
7198 */
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007199int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007200 const struct dpll *dpll)
Ville Syrjäläd288f652014-10-28 13:20:22 +02007201{
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02007202 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007203 struct intel_crtc_state *pipe_config;
7204
7205 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7206 if (!pipe_config)
7207 return -ENOMEM;
7208
7209 pipe_config->base.crtc = &crtc->base;
7210 pipe_config->pixel_multiplier = 1;
7211 pipe_config->dpll = *dpll;
Ville Syrjäläd288f652014-10-28 13:20:22 +02007212
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007213 if (IS_CHERRYVIEW(dev_priv)) {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007214 chv_compute_dpll(crtc, pipe_config);
7215 chv_prepare_pll(crtc, pipe_config);
7216 chv_enable_pll(crtc, pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007217 } else {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007218 vlv_compute_dpll(crtc, pipe_config);
7219 vlv_prepare_pll(crtc, pipe_config);
7220 vlv_enable_pll(crtc, pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007221 }
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007222
7223 kfree(pipe_config);
7224
7225 return 0;
Ville Syrjäläd288f652014-10-28 13:20:22 +02007226}
7227
7228/**
7229 * vlv_force_pll_off - forcibly disable just the PLL
7230 * @dev_priv: i915 private structure
7231 * @pipe: pipe PLL to disable
7232 *
7233 * Disable the PLL for @pipe. To be used in cases where we need
7234 * the PLL enabled even when @pipe is not going to be enabled.
7235 */
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007236void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
Ville Syrjäläd288f652014-10-28 13:20:22 +02007237{
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007238 if (IS_CHERRYVIEW(dev_priv))
7239 chv_disable_pll(dev_priv, pipe);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007240 else
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007241 vlv_disable_pll(dev_priv, pipe);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007242}
7243
Daniel Vetter251ac862015-06-18 10:30:24 +02007244static void i9xx_compute_dpll(struct intel_crtc *crtc,
7245 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007246 struct dpll *reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007247{
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007249 u32 dpll;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007250 struct dpll *clock = &crtc_state->dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007251
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007252 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
Vijay Purushothaman2a8f64c2012-09-27 19:13:06 +05307253
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007254 dpll = DPLL_VGA_MODE_DIS;
7255
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007256 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007257 dpll |= DPLLB_MODE_LVDS;
7258 else
7259 dpll |= DPLLB_MODE_DAC_SERIAL;
Daniel Vetter6cc5f342013-03-27 00:44:53 +01007260
Jani Nikula73f67aa2016-12-07 22:48:09 +02007261 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7262 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007263 dpll |= (crtc_state->pixel_multiplier - 1)
Daniel Vetter198a037f2013-04-19 11:14:37 +02007264 << SDVO_MULTIPLIER_SHIFT_HIRES;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007265 }
Daniel Vetter198a037f2013-04-19 11:14:37 +02007266
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03007267 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7268 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007269 dpll |= DPLL_SDVO_HIGH_SPEED;
Daniel Vetter198a037f2013-04-19 11:14:37 +02007270
Ville Syrjälä37a56502016-06-22 21:57:04 +03007271 if (intel_crtc_has_dp_encoder(crtc_state))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007272 dpll |= DPLL_SDVO_HIGH_SPEED;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007273
7274 /* compute bitmask from p1 value */
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007275 if (IS_PINEVIEW(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007276 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7277 else {
7278 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007279 if (IS_G4X(dev_priv) && reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007280 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7281 }
7282 switch (clock->p2) {
7283 case 5:
7284 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7285 break;
7286 case 7:
7287 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7288 break;
7289 case 10:
7290 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7291 break;
7292 case 14:
7293 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7294 break;
7295 }
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007296 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007297 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7298
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007299 if (crtc_state->sdvo_tv_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007300 dpll |= PLL_REF_INPUT_TVCLKINBC;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007301 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Ander Conselvan de Oliveiraceb41002016-03-21 18:00:02 +02007302 intel_panel_use_ssc(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007303 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7304 else
7305 dpll |= PLL_REF_INPUT_DREFCLK;
7306
7307 dpll |= DPLL_VCO_ENABLE;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007308 crtc_state->dpll_hw_state.dpll = dpll;
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007309
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007310 if (INTEL_GEN(dev_priv) >= 4) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007311 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
Daniel Vetteref1b4602013-06-01 17:17:04 +02007312 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007313 crtc_state->dpll_hw_state.dpll_md = dpll_md;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007314 }
7315}
7316
Daniel Vetter251ac862015-06-18 10:30:24 +02007317static void i8xx_compute_dpll(struct intel_crtc *crtc,
7318 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007319 struct dpll *reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007320{
Daniel Vetterf47709a2013-03-28 10:42:02 +01007321 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007322 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007323 u32 dpll;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007324 struct dpll *clock = &crtc_state->dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007325
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007326 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
Vijay Purushothaman2a8f64c2012-09-27 19:13:06 +05307327
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007328 dpll = DPLL_VGA_MODE_DIS;
7329
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007330 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007331 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7332 } else {
7333 if (clock->p1 == 2)
7334 dpll |= PLL_P1_DIVIDE_BY_TWO;
7335 else
7336 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7337 if (clock->p2 == 4)
7338 dpll |= PLL_P2_DIVIDE_BY_4;
7339 }
7340
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007341 if (!IS_I830(dev_priv) &&
7342 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007343 dpll |= DPLL_DVO_2X_MODE;
7344
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007345 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Ander Conselvan de Oliveiraceb41002016-03-21 18:00:02 +02007346 intel_panel_use_ssc(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007347 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7348 else
7349 dpll |= PLL_REF_INPUT_DREFCLK;
7350
7351 dpll |= DPLL_VCO_ENABLE;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007352 crtc_state->dpll_hw_state.dpll = dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007353}
7354
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007355static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007356{
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007357 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7358 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7359 enum pipe pipe = crtc->pipe;
7360 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7361 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
Ville Syrjälä1caea6e2014-03-28 23:29:32 +02007362 uint32_t crtc_vtotal, crtc_vblank_end;
7363 int vsyncshift = 0;
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007364
7365 /* We need to be careful not to changed the adjusted mode, for otherwise
7366 * the hw state checker will get angry at the mismatch. */
7367 crtc_vtotal = adjusted_mode->crtc_vtotal;
7368 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007369
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007370 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007371 /* the chip adds 2 halflines automatically */
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007372 crtc_vtotal -= 1;
7373 crtc_vblank_end -= 1;
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007374
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007375 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007376 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7377 else
7378 vsyncshift = adjusted_mode->crtc_hsync_start -
7379 adjusted_mode->crtc_htotal / 2;
Ville Syrjälä1caea6e2014-03-28 23:29:32 +02007380 if (vsyncshift < 0)
7381 vsyncshift += adjusted_mode->crtc_htotal;
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007382 }
7383
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007384 if (INTEL_GEN(dev_priv) > 3)
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007385 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007386
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007387 I915_WRITE(HTOTAL(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007388 (adjusted_mode->crtc_hdisplay - 1) |
7389 ((adjusted_mode->crtc_htotal - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007390 I915_WRITE(HBLANK(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007391 (adjusted_mode->crtc_hblank_start - 1) |
7392 ((adjusted_mode->crtc_hblank_end - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007393 I915_WRITE(HSYNC(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007394 (adjusted_mode->crtc_hsync_start - 1) |
7395 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7396
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007397 I915_WRITE(VTOTAL(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007398 (adjusted_mode->crtc_vdisplay - 1) |
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007399 ((crtc_vtotal - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007400 I915_WRITE(VBLANK(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007401 (adjusted_mode->crtc_vblank_start - 1) |
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007402 ((crtc_vblank_end - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007403 I915_WRITE(VSYNC(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007404 (adjusted_mode->crtc_vsync_start - 1) |
7405 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7406
Paulo Zanonib5e508d2012-10-24 11:34:43 -02007407 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7408 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7409 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7410 * bits. */
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01007411 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
Paulo Zanonib5e508d2012-10-24 11:34:43 -02007412 (pipe == PIPE_B || pipe == PIPE_C))
7413 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7414
Jani Nikulabc58be62016-03-18 17:05:39 +02007415}
7416
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007417static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
Jani Nikulabc58be62016-03-18 17:05:39 +02007418{
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007419 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7420 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7421 enum pipe pipe = crtc->pipe;
Jani Nikulabc58be62016-03-18 17:05:39 +02007422
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007423 /* pipesrc controls the size that is scaled from, which should
7424 * always be the user's requested size.
7425 */
7426 I915_WRITE(PIPESRC(pipe),
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007427 ((crtc_state->pipe_src_w - 1) << 16) |
7428 (crtc_state->pipe_src_h - 1));
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007429}
7430
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007431static void intel_get_pipe_timings(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007432 struct intel_crtc_state *pipe_config)
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007433{
7434 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007435 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007436 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7437 uint32_t tmp;
7438
7439 tmp = I915_READ(HTOTAL(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007440 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7441 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007442 tmp = I915_READ(HBLANK(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007443 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7444 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007445 tmp = I915_READ(HSYNC(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007446 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7447 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007448
7449 tmp = I915_READ(VTOTAL(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007450 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7451 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007452 tmp = I915_READ(VBLANK(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007453 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7454 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007455 tmp = I915_READ(VSYNC(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007456 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7457 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007458
7459 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007460 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7461 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7462 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007463 }
Jani Nikulabc58be62016-03-18 17:05:39 +02007464}
7465
7466static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7467 struct intel_crtc_state *pipe_config)
7468{
7469 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007470 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulabc58be62016-03-18 17:05:39 +02007471 u32 tmp;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007472
7473 tmp = I915_READ(PIPESRC(crtc->pipe));
Ville Syrjälä37327ab2013-09-04 18:25:28 +03007474 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7475 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7476
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007477 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7478 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007479}
7480
Daniel Vetterf6a83282014-02-11 15:28:57 -08007481void intel_mode_from_pipe_config(struct drm_display_mode *mode,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007482 struct intel_crtc_state *pipe_config)
Jesse Barnesbabea612013-06-26 18:57:38 +03007483{
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007484 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7485 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7486 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7487 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
Jesse Barnesbabea612013-06-26 18:57:38 +03007488
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007489 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7490 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7491 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7492 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
Jesse Barnesbabea612013-06-26 18:57:38 +03007493
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007494 mode->flags = pipe_config->base.adjusted_mode.flags;
Maarten Lankhorstcd13f5a2015-07-14 14:12:02 +02007495 mode->type = DRM_MODE_TYPE_DRIVER;
Jesse Barnesbabea612013-06-26 18:57:38 +03007496
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007497 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
Maarten Lankhorstcd13f5a2015-07-14 14:12:02 +02007498
7499 mode->hsync = drm_mode_hsync(mode);
7500 mode->vrefresh = drm_mode_vrefresh(mode);
7501 drm_mode_set_name(mode);
Jesse Barnesbabea612013-06-26 18:57:38 +03007502}
7503
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007504static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
Daniel Vetter84b046f2013-02-19 18:48:54 +01007505{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007506 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7507 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter84b046f2013-02-19 18:48:54 +01007508 uint32_t pipeconf;
7509
Daniel Vetter9f11a9e2013-06-13 00:54:58 +02007510 pipeconf = 0;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007511
Ville Syrjäläe56134b2017-06-01 17:36:19 +03007512 /* we keep both pipes enabled on 830 */
7513 if (IS_I830(dev_priv))
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007514 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
Daniel Vetter67c72a12013-09-24 11:46:14 +02007515
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007516 if (crtc_state->double_wide)
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03007517 pipeconf |= PIPECONF_DOUBLE_WIDE;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007518
Daniel Vetterff9ce462013-04-24 14:57:17 +02007519 /* only g4x and later have fancy bpc/dither controls */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007520 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7521 IS_CHERRYVIEW(dev_priv)) {
Daniel Vetterff9ce462013-04-24 14:57:17 +02007522 /* Bspec claims that we can't use dithering for 30bpp pipes. */
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007523 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
Daniel Vetterff9ce462013-04-24 14:57:17 +02007524 pipeconf |= PIPECONF_DITHER_EN |
7525 PIPECONF_DITHER_TYPE_SP;
7526
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007527 switch (crtc_state->pipe_bpp) {
Daniel Vetterff9ce462013-04-24 14:57:17 +02007528 case 18:
7529 pipeconf |= PIPECONF_6BPC;
7530 break;
7531 case 24:
7532 pipeconf |= PIPECONF_8BPC;
7533 break;
7534 case 30:
7535 pipeconf |= PIPECONF_10BPC;
7536 break;
7537 default:
7538 /* Case prevented by intel_choose_pipe_bpp_dither. */
7539 BUG();
Daniel Vetter84b046f2013-02-19 18:48:54 +01007540 }
7541 }
7542
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007543 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007544 if (INTEL_GEN(dev_priv) < 4 ||
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007545 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Ville Syrjäläefc2cff2014-03-28 23:29:31 +02007546 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7547 else
7548 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7549 } else
Daniel Vetter84b046f2013-02-19 18:48:54 +01007550 pipeconf |= PIPECONF_PROGRESSIVE;
7551
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007552 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007553 crtc_state->limited_color_range)
Daniel Vetter9f11a9e2013-06-13 00:54:58 +02007554 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
Ville Syrjälä9c8e09b2013-04-02 16:10:09 +03007555
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007556 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7557 POSTING_READ(PIPECONF(crtc->pipe));
Daniel Vetter84b046f2013-02-19 18:48:54 +01007558}
7559
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007560static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7561 struct intel_crtc_state *crtc_state)
7562{
7563 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007564 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007565 const struct intel_limit *limit;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007566 int refclk = 48000;
7567
7568 memset(&crtc_state->dpll_hw_state, 0,
7569 sizeof(crtc_state->dpll_hw_state));
7570
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007571 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007572 if (intel_panel_use_ssc(dev_priv)) {
7573 refclk = dev_priv->vbt.lvds_ssc_freq;
7574 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7575 }
7576
7577 limit = &intel_limits_i8xx_lvds;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007578 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007579 limit = &intel_limits_i8xx_dvo;
7580 } else {
7581 limit = &intel_limits_i8xx_dac;
7582 }
7583
7584 if (!crtc_state->clock_set &&
7585 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7586 refclk, NULL, &crtc_state->dpll)) {
7587 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7588 return -EINVAL;
7589 }
7590
7591 i8xx_compute_dpll(crtc, crtc_state, NULL);
7592
7593 return 0;
7594}
7595
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007596static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7597 struct intel_crtc_state *crtc_state)
7598{
7599 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007600 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007601 const struct intel_limit *limit;
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007602 int refclk = 96000;
7603
7604 memset(&crtc_state->dpll_hw_state, 0,
7605 sizeof(crtc_state->dpll_hw_state));
7606
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007607 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007608 if (intel_panel_use_ssc(dev_priv)) {
7609 refclk = dev_priv->vbt.lvds_ssc_freq;
7610 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7611 }
7612
7613 if (intel_is_dual_link_lvds(dev))
7614 limit = &intel_limits_g4x_dual_channel_lvds;
7615 else
7616 limit = &intel_limits_g4x_single_channel_lvds;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007617 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7618 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007619 limit = &intel_limits_g4x_hdmi;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007620 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007621 limit = &intel_limits_g4x_sdvo;
7622 } else {
7623 /* The option is for other outputs */
7624 limit = &intel_limits_i9xx_sdvo;
7625 }
7626
7627 if (!crtc_state->clock_set &&
7628 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7629 refclk, NULL, &crtc_state->dpll)) {
7630 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7631 return -EINVAL;
7632 }
7633
7634 i9xx_compute_dpll(crtc, crtc_state, NULL);
7635
7636 return 0;
7637}
7638
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007639static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7640 struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08007641{
Ander Conselvan de Oliveirac7653192014-10-20 13:46:44 +03007642 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007643 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007644 const struct intel_limit *limit;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007645 int refclk = 96000;
Jesse Barnes79e53942008-11-07 14:24:08 -08007646
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03007647 memset(&crtc_state->dpll_hw_state, 0,
7648 sizeof(crtc_state->dpll_hw_state));
7649
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007650 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007651 if (intel_panel_use_ssc(dev_priv)) {
7652 refclk = dev_priv->vbt.lvds_ssc_freq;
7653 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7654 }
Jesse Barnes79e53942008-11-07 14:24:08 -08007655
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007656 limit = &intel_limits_pineview_lvds;
7657 } else {
7658 limit = &intel_limits_pineview_sdvo;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007659 }
Jani Nikulaf2335332013-09-13 11:03:09 +03007660
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007661 if (!crtc_state->clock_set &&
7662 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7663 refclk, NULL, &crtc_state->dpll)) {
7664 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7665 return -EINVAL;
7666 }
7667
7668 i9xx_compute_dpll(crtc, crtc_state, NULL);
7669
7670 return 0;
7671}
7672
7673static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7674 struct intel_crtc_state *crtc_state)
7675{
7676 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007677 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007678 const struct intel_limit *limit;
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007679 int refclk = 96000;
7680
7681 memset(&crtc_state->dpll_hw_state, 0,
7682 sizeof(crtc_state->dpll_hw_state));
7683
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007684 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007685 if (intel_panel_use_ssc(dev_priv)) {
7686 refclk = dev_priv->vbt.lvds_ssc_freq;
7687 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
Jani Nikulae9fd1c02013-08-27 15:12:23 +03007688 }
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007689
7690 limit = &intel_limits_i9xx_lvds;
7691 } else {
7692 limit = &intel_limits_i9xx_sdvo;
7693 }
7694
7695 if (!crtc_state->clock_set &&
7696 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7697 refclk, NULL, &crtc_state->dpll)) {
7698 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7699 return -EINVAL;
Daniel Vetterf47709a2013-03-28 10:42:02 +01007700 }
Eric Anholtf564048e2011-03-30 13:01:02 -07007701
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007702 i9xx_compute_dpll(crtc, crtc_state, NULL);
Eric Anholtf564048e2011-03-30 13:01:02 -07007703
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02007704 return 0;
Eric Anholtf564048e2011-03-30 13:01:02 -07007705}
7706
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007707static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7708 struct intel_crtc_state *crtc_state)
7709{
7710 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007711 const struct intel_limit *limit = &intel_limits_chv;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007712
7713 memset(&crtc_state->dpll_hw_state, 0,
7714 sizeof(crtc_state->dpll_hw_state));
7715
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007716 if (!crtc_state->clock_set &&
7717 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7718 refclk, NULL, &crtc_state->dpll)) {
7719 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7720 return -EINVAL;
7721 }
7722
7723 chv_compute_dpll(crtc, crtc_state);
7724
7725 return 0;
7726}
7727
7728static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7729 struct intel_crtc_state *crtc_state)
7730{
7731 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007732 const struct intel_limit *limit = &intel_limits_vlv;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007733
7734 memset(&crtc_state->dpll_hw_state, 0,
7735 sizeof(crtc_state->dpll_hw_state));
7736
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007737 if (!crtc_state->clock_set &&
7738 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7739 refclk, NULL, &crtc_state->dpll)) {
7740 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7741 return -EINVAL;
7742 }
7743
7744 vlv_compute_dpll(crtc, crtc_state);
7745
7746 return 0;
7747}
7748
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007749static void i9xx_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007750 struct intel_crtc_state *pipe_config)
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007751{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007752 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007753 uint32_t tmp;
7754
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007755 if (INTEL_GEN(dev_priv) <= 3 &&
7756 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
Ville Syrjälädc9e7dec2014-01-10 14:06:45 +02007757 return;
7758
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007759 tmp = I915_READ(PFIT_CONTROL);
Daniel Vetter06922822013-07-11 13:35:40 +02007760 if (!(tmp & PFIT_ENABLE))
7761 return;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007762
Daniel Vetter06922822013-07-11 13:35:40 +02007763 /* Check whether the pfit is attached to our pipe. */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007764 if (INTEL_GEN(dev_priv) < 4) {
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007765 if (crtc->pipe != PIPE_B)
7766 return;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007767 } else {
7768 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7769 return;
7770 }
7771
Daniel Vetter06922822013-07-11 13:35:40 +02007772 pipe_config->gmch_pfit.control = tmp;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007773 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007774}
7775
Jesse Barnesacbec812013-09-20 11:29:32 -07007776static void vlv_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007777 struct intel_crtc_state *pipe_config)
Jesse Barnesacbec812013-09-20 11:29:32 -07007778{
7779 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007780 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesacbec812013-09-20 11:29:32 -07007781 int pipe = pipe_config->cpu_transcoder;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007782 struct dpll clock;
Jesse Barnesacbec812013-09-20 11:29:32 -07007783 u32 mdiv;
Chris Wilson662c6ec2013-09-25 14:24:01 -07007784 int refclk = 100000;
Jesse Barnesacbec812013-09-20 11:29:32 -07007785
Ville Syrjäläb5219732016-03-15 16:40:01 +02007786 /* In case of DSI, DPLL will not be used */
7787 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
Shobhit Kumarf573de52014-07-30 20:32:37 +05307788 return;
7789
Ville Syrjäläa5805162015-05-26 20:42:30 +03007790 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007791 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
Ville Syrjäläa5805162015-05-26 20:42:30 +03007792 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesacbec812013-09-20 11:29:32 -07007793
7794 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7795 clock.m2 = mdiv & DPIO_M2DIV_MASK;
7796 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7797 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7798 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7799
Imre Deakdccbea32015-06-22 23:35:51 +03007800 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
Jesse Barnesacbec812013-09-20 11:29:32 -07007801}
7802
Damien Lespiau5724dbd2015-01-20 12:51:52 +00007803static void
7804i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7805 struct intel_initial_plane_config *plane_config)
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007806{
7807 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007808 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007809 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7810 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007811 enum pipe pipe;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007812 u32 val, base, offset;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007813 int fourcc, pixel_format;
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00007814 unsigned int aligned_height;
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007815 struct drm_framebuffer *fb;
Damien Lespiau1b842c82015-01-21 13:50:54 +00007816 struct intel_framebuffer *intel_fb;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007817
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007818 if (!plane->get_hw_state(plane, &pipe))
Damien Lespiau42a7b082015-02-05 19:35:13 +00007819 return;
7820
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007821 WARN_ON(pipe != crtc->pipe);
7822
Damien Lespiaud9806c92015-01-21 14:07:19 +00007823 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Damien Lespiau1b842c82015-01-21 13:50:54 +00007824 if (!intel_fb) {
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007825 DRM_DEBUG_KMS("failed to alloc fb\n");
7826 return;
7827 }
7828
Damien Lespiau1b842c82015-01-21 13:50:54 +00007829 fb = &intel_fb->base;
7830
Ville Syrjäläd2e9f5f2016-11-18 21:52:53 +02007831 fb->dev = dev;
7832
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02007833 val = I915_READ(DSPCNTR(i9xx_plane));
7834
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007835 if (INTEL_GEN(dev_priv) >= 4) {
Daniel Vetter18c52472015-02-10 17:16:09 +00007836 if (val & DISPPLANE_TILED) {
Damien Lespiau49af4492015-01-20 12:51:44 +00007837 plane_config->tiling = I915_TILING_X;
Ville Syrjäläbae781b2016-11-16 13:33:16 +02007838 fb->modifier = I915_FORMAT_MOD_X_TILED;
Daniel Vetter18c52472015-02-10 17:16:09 +00007839 }
Ville Syrjäläf43348a2018-11-20 15:54:50 +02007840
7841 if (val & DISPPLANE_ROTATE_180)
7842 plane_config->rotation = DRM_MODE_ROTATE_180;
Daniel Vetter18c52472015-02-10 17:16:09 +00007843 }
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007844
Ville Syrjäläf43348a2018-11-20 15:54:50 +02007845 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
7846 val & DISPPLANE_MIRROR)
7847 plane_config->rotation |= DRM_MODE_REFLECT_X;
7848
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007849 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
Damien Lespiaub35d63f2015-01-20 12:51:50 +00007850 fourcc = i9xx_format_to_fourcc(pixel_format);
Ville Syrjälä2f3f4762016-11-18 21:52:57 +02007851 fb->format = drm_format_info(fourcc);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007852
Ville Syrjälä81894b22017-11-17 21:19:13 +02007853 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7854 offset = I915_READ(DSPOFFSET(i9xx_plane));
7855 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7856 } else if (INTEL_GEN(dev_priv) >= 4) {
Damien Lespiau49af4492015-01-20 12:51:44 +00007857 if (plane_config->tiling)
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007858 offset = I915_READ(DSPTILEOFF(i9xx_plane));
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007859 else
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007860 offset = I915_READ(DSPLINOFF(i9xx_plane));
7861 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007862 } else {
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007863 base = I915_READ(DSPADDR(i9xx_plane));
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007864 }
7865 plane_config->base = base;
7866
7867 val = I915_READ(PIPESRC(pipe));
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007868 fb->width = ((val >> 16) & 0xfff) + 1;
7869 fb->height = ((val >> 0) & 0xfff) + 1;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007870
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007871 val = I915_READ(DSPSTRIDE(i9xx_plane));
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007872 fb->pitches[0] = val & 0xffffffc0;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007873
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02007874 aligned_height = intel_fb_align_height(fb, 0, fb->height);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007875
Daniel Vetterf37b5c22015-02-10 23:12:27 +01007876 plane_config->size = fb->pitches[0] * aligned_height;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007877
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007878 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7879 crtc->base.name, plane->base.name, fb->width, fb->height,
Ville Syrjälä272725c2016-12-14 23:32:20 +02007880 fb->format->cpp[0] * 8, base, fb->pitches[0],
Damien Lespiau2844a922015-01-20 12:51:48 +00007881 plane_config->size);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007882
Damien Lespiau2d140302015-02-05 17:22:18 +00007883 plane_config->fb = intel_fb;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007884}
7885
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007886static void chv_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007887 struct intel_crtc_state *pipe_config)
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007888{
7889 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007890 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007891 int pipe = pipe_config->cpu_transcoder;
7892 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007893 struct dpll clock;
Imre Deak0d7b6b12015-07-02 14:29:58 +03007894 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007895 int refclk = 100000;
7896
Ville Syrjäläb5219732016-03-15 16:40:01 +02007897 /* In case of DSI, DPLL will not be used */
7898 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7899 return;
7900
Ville Syrjäläa5805162015-05-26 20:42:30 +03007901 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007902 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7903 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7904 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7905 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
Imre Deak0d7b6b12015-07-02 14:29:58 +03007906 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
Ville Syrjäläa5805162015-05-26 20:42:30 +03007907 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007908
7909 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
Imre Deak0d7b6b12015-07-02 14:29:58 +03007910 clock.m2 = (pll_dw0 & 0xff) << 22;
7911 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7912 clock.m2 |= pll_dw2 & 0x3fffff;
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007913 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7914 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7915 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7916
Imre Deakdccbea32015-06-22 23:35:51 +03007917 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007918}
7919
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307920static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
7921 struct intel_crtc_state *pipe_config)
7922{
7923 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7924 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
7925
Shashank Sharma668b6c12018-10-12 11:53:14 +05307926 pipe_config->lspcon_downsampling = false;
7927
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307928 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
7929 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
7930
7931 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
7932 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
7933 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
7934
7935 if (ycbcr420_enabled) {
7936 /* We support 4:2:0 in full blend mode only */
7937 if (!blend)
7938 output = INTEL_OUTPUT_FORMAT_INVALID;
7939 else if (!(IS_GEMINILAKE(dev_priv) ||
7940 INTEL_GEN(dev_priv) >= 10))
7941 output = INTEL_OUTPUT_FORMAT_INVALID;
7942 else
7943 output = INTEL_OUTPUT_FORMAT_YCBCR420;
Shashank Sharma8c79f842018-10-12 11:53:09 +05307944 } else {
Shashank Sharma668b6c12018-10-12 11:53:14 +05307945 /*
7946 * Currently there is no interface defined to
7947 * check user preference between RGB/YCBCR444
7948 * or YCBCR420. So the only possible case for
7949 * YCBCR444 usage is driving YCBCR420 output
7950 * with LSPCON, when pipe is configured for
7951 * YCBCR444 output and LSPCON takes care of
7952 * downsampling it.
7953 */
7954 pipe_config->lspcon_downsampling = true;
Shashank Sharma8c79f842018-10-12 11:53:09 +05307955 output = INTEL_OUTPUT_FORMAT_YCBCR444;
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307956 }
7957 }
7958 }
7959
7960 pipe_config->output_format = output;
7961}
7962
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007963static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007964 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007965{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007966 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Imre Deak17290502016-02-12 18:55:11 +02007967 enum intel_display_power_domain power_domain;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007968 uint32_t tmp;
Imre Deak17290502016-02-12 18:55:11 +02007969 bool ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007970
Imre Deak17290502016-02-12 18:55:11 +02007971 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7972 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Imre Deakb5482bd2014-03-05 16:20:55 +02007973 return false;
7974
Shashank Sharmad9facae2018-10-12 11:53:07 +05307975 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
Daniel Vettere143a212013-07-04 12:01:15 +02007976 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02007977 pipe_config->shared_dpll = NULL;
Daniel Vettereccb1402013-05-22 00:50:22 +02007978
Imre Deak17290502016-02-12 18:55:11 +02007979 ret = false;
7980
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007981 tmp = I915_READ(PIPECONF(crtc->pipe));
7982 if (!(tmp & PIPECONF_ENABLE))
Imre Deak17290502016-02-12 18:55:11 +02007983 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007984
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007985 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7986 IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä42571ae2013-09-06 23:29:00 +03007987 switch (tmp & PIPECONF_BPC_MASK) {
7988 case PIPECONF_6BPC:
7989 pipe_config->pipe_bpp = 18;
7990 break;
7991 case PIPECONF_8BPC:
7992 pipe_config->pipe_bpp = 24;
7993 break;
7994 case PIPECONF_10BPC:
7995 pipe_config->pipe_bpp = 30;
7996 break;
7997 default:
7998 break;
7999 }
8000 }
8001
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01008002 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08008003 (tmp & PIPECONF_COLOR_RANGE_SELECT))
Daniel Vetterb5a9fa02014-04-24 23:54:49 +02008004 pipe_config->limited_color_range = true;
8005
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008006 if (INTEL_GEN(dev_priv) < 4)
Ville Syrjälä282740f2013-09-04 18:30:03 +03008007 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8008
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02008009 intel_get_pipe_timings(crtc, pipe_config);
Jani Nikulabc58be62016-03-18 17:05:39 +02008010 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02008011
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008012 i9xx_get_pfit_config(crtc, pipe_config);
8013
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008014 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjäläc2317752016-03-15 16:39:56 +02008015 /* No way to read it out on pipes B and C */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01008016 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
Ville Syrjäläc2317752016-03-15 16:39:56 +02008017 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8018 else
8019 tmp = I915_READ(DPLL_MD(crtc->pipe));
Daniel Vetter6c49f242013-06-06 12:45:25 +02008020 pipe_config->pixel_multiplier =
8021 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8022 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
Daniel Vetter8bcc2792013-06-05 13:34:28 +02008023 pipe_config->dpll_hw_state.dpll_md = tmp;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01008024 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
Jani Nikula73f67aa2016-12-07 22:48:09 +02008025 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
Daniel Vetter6c49f242013-06-06 12:45:25 +02008026 tmp = I915_READ(DPLL(crtc->pipe));
8027 pipe_config->pixel_multiplier =
8028 ((tmp & SDVO_MULTIPLIER_MASK)
8029 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8030 } else {
8031 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8032 * port and will be fixed up in the encoder->get_config
8033 * function. */
8034 pipe_config->pixel_multiplier = 1;
8035 }
Daniel Vetter8bcc2792013-06-05 13:34:28 +02008036 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01008037 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03008038 /*
8039 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8040 * on 830. Filter it out here so that we don't
8041 * report errors due to that.
8042 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01008043 if (IS_I830(dev_priv))
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03008044 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8045
Daniel Vetter8bcc2792013-06-05 13:34:28 +02008046 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8047 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
Ville Syrjälä165e9012013-06-26 17:44:15 +03008048 } else {
8049 /* Mask out read-only status bits. */
8050 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8051 DPLL_PORTC_READY_MASK |
8052 DPLL_PORTB_READY_MASK);
Daniel Vetter8bcc2792013-06-05 13:34:28 +02008053 }
Daniel Vetter6c49f242013-06-06 12:45:25 +02008054
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01008055 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjälä70b23a92014-04-09 13:28:22 +03008056 chv_crtc_clock_get(crtc, pipe_config);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01008057 else if (IS_VALLEYVIEW(dev_priv))
Jesse Barnesacbec812013-09-20 11:29:32 -07008058 vlv_crtc_clock_get(crtc, pipe_config);
8059 else
8060 i9xx_crtc_clock_get(crtc, pipe_config);
Ville Syrjälä18442d02013-09-13 16:00:08 +03008061
Ville Syrjälä0f646142015-08-26 19:39:18 +03008062 /*
8063 * Normally the dotclock is filled in by the encoder .get_config()
8064 * but in case the pipe is enabled w/o any ports we need a sane
8065 * default.
8066 */
8067 pipe_config->base.adjusted_mode.crtc_clock =
8068 pipe_config->port_clock / pipe_config->pixel_multiplier;
8069
Imre Deak17290502016-02-12 18:55:11 +02008070 ret = true;
8071
8072out:
8073 intel_display_power_put(dev_priv, power_domain);
8074
8075 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008076}
8077
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008078static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
Jesse Barnes13d83a62011-08-03 12:59:20 -07008079{
Jesse Barnes13d83a62011-08-03 12:59:20 -07008080 struct intel_encoder *encoder;
Lyude1c1a24d2016-06-14 11:04:09 -04008081 int i;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008082 u32 val, final;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008083 bool has_lvds = false;
Keith Packard199e5d72011-09-22 12:01:57 -07008084 bool has_cpu_edp = false;
Keith Packard199e5d72011-09-22 12:01:57 -07008085 bool has_panel = false;
Keith Packard99eb6a02011-09-26 14:29:12 -07008086 bool has_ck505 = false;
8087 bool can_ssc = false;
Lyude1c1a24d2016-06-14 11:04:09 -04008088 bool using_ssc_source = false;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008089
8090 /* We need to take the global config into account */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008091 for_each_intel_encoder(&dev_priv->drm, encoder) {
Keith Packard199e5d72011-09-22 12:01:57 -07008092 switch (encoder->type) {
8093 case INTEL_OUTPUT_LVDS:
8094 has_panel = true;
8095 has_lvds = true;
8096 break;
8097 case INTEL_OUTPUT_EDP:
8098 has_panel = true;
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02008099 if (encoder->port == PORT_A)
Keith Packard199e5d72011-09-22 12:01:57 -07008100 has_cpu_edp = true;
8101 break;
Paulo Zanoni6847d71b2014-10-27 17:47:52 -02008102 default:
8103 break;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008104 }
8105 }
8106
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008107 if (HAS_PCH_IBX(dev_priv)) {
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03008108 has_ck505 = dev_priv->vbt.display_clock_mode;
Keith Packard99eb6a02011-09-26 14:29:12 -07008109 can_ssc = has_ck505;
8110 } else {
8111 has_ck505 = false;
8112 can_ssc = true;
8113 }
8114
Lyude1c1a24d2016-06-14 11:04:09 -04008115 /* Check if any DPLLs are using the SSC source */
8116 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8117 u32 temp = I915_READ(PCH_DPLL(i));
8118
8119 if (!(temp & DPLL_VCO_ENABLE))
8120 continue;
8121
8122 if ((temp & PLL_REF_INPUT_MASK) ==
8123 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8124 using_ssc_source = true;
8125 break;
8126 }
8127 }
8128
8129 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8130 has_panel, has_lvds, has_ck505, using_ssc_source);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008131
8132 /* Ironlake: try to setup display ref clock before DPLL
8133 * enabling. This is only under driver's control after
8134 * PCH B stepping, previous chipset stepping should be
8135 * ignoring this setting.
8136 */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008137 val = I915_READ(PCH_DREF_CONTROL);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008138
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008139 /* As we must carefully and slowly disable/enable each source in turn,
8140 * compute the final state we want first and check if we need to
8141 * make any changes at all.
8142 */
8143 final = val;
8144 final &= ~DREF_NONSPREAD_SOURCE_MASK;
Keith Packard99eb6a02011-09-26 14:29:12 -07008145 if (has_ck505)
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008146 final |= DREF_NONSPREAD_CK505_ENABLE;
Keith Packard99eb6a02011-09-26 14:29:12 -07008147 else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008148 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8149
Daniel Vetter8c07eb62016-06-09 18:39:07 +02008150 final &= ~DREF_SSC_SOURCE_MASK;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008151 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Daniel Vetter8c07eb62016-06-09 18:39:07 +02008152 final &= ~DREF_SSC1_ENABLE;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008153
Keith Packard199e5d72011-09-22 12:01:57 -07008154 if (has_panel) {
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008155 final |= DREF_SSC_SOURCE_ENABLE;
8156
8157 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8158 final |= DREF_SSC1_ENABLE;
8159
8160 if (has_cpu_edp) {
8161 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8162 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8163 else
8164 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8165 } else
8166 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Lyude1c1a24d2016-06-14 11:04:09 -04008167 } else if (using_ssc_source) {
8168 final |= DREF_SSC_SOURCE_ENABLE;
8169 final |= DREF_SSC1_ENABLE;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008170 }
8171
8172 if (final == val)
8173 return;
8174
8175 /* Always enable nonspread source */
8176 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8177
8178 if (has_ck505)
8179 val |= DREF_NONSPREAD_CK505_ENABLE;
8180 else
8181 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8182
8183 if (has_panel) {
8184 val &= ~DREF_SSC_SOURCE_MASK;
8185 val |= DREF_SSC_SOURCE_ENABLE;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008186
Keith Packard199e5d72011-09-22 12:01:57 -07008187 /* SSC must be turned on before enabling the CPU output */
Keith Packard99eb6a02011-09-26 14:29:12 -07008188 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
Keith Packard199e5d72011-09-22 12:01:57 -07008189 DRM_DEBUG_KMS("Using SSC on panel\n");
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008190 val |= DREF_SSC1_ENABLE;
Daniel Vettere77166b2012-03-30 22:14:05 +02008191 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008192 val &= ~DREF_SSC1_ENABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008193
8194 /* Get SSC going before enabling the outputs */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008195 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008196 POSTING_READ(PCH_DREF_CONTROL);
8197 udelay(200);
8198
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008199 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008200
8201 /* Enable CPU source on CPU attached eDP */
Keith Packard199e5d72011-09-22 12:01:57 -07008202 if (has_cpu_edp) {
Keith Packard99eb6a02011-09-26 14:29:12 -07008203 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
Keith Packard199e5d72011-09-22 12:01:57 -07008204 DRM_DEBUG_KMS("Using SSC on eDP\n");
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008205 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
Robin Schroereba905b2014-05-18 02:24:50 +02008206 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008207 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
Keith Packard199e5d72011-09-22 12:01:57 -07008208 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008209 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008210
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008211 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008212 POSTING_READ(PCH_DREF_CONTROL);
8213 udelay(200);
8214 } else {
Lyude1c1a24d2016-06-14 11:04:09 -04008215 DRM_DEBUG_KMS("Disabling CPU source output\n");
Keith Packard199e5d72011-09-22 12:01:57 -07008216
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008217 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Keith Packard199e5d72011-09-22 12:01:57 -07008218
8219 /* Turn off CPU output */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008220 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008221
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008222 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008223 POSTING_READ(PCH_DREF_CONTROL);
8224 udelay(200);
8225
Lyude1c1a24d2016-06-14 11:04:09 -04008226 if (!using_ssc_source) {
8227 DRM_DEBUG_KMS("Disabling SSC source\n");
Keith Packard199e5d72011-09-22 12:01:57 -07008228
Lyude1c1a24d2016-06-14 11:04:09 -04008229 /* Turn off the SSC source */
8230 val &= ~DREF_SSC_SOURCE_MASK;
8231 val |= DREF_SSC_SOURCE_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008232
Lyude1c1a24d2016-06-14 11:04:09 -04008233 /* Turn off SSC1 */
8234 val &= ~DREF_SSC1_ENABLE;
8235
8236 I915_WRITE(PCH_DREF_CONTROL, val);
8237 POSTING_READ(PCH_DREF_CONTROL);
8238 udelay(200);
8239 }
Jesse Barnes13d83a62011-08-03 12:59:20 -07008240 }
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008241
8242 BUG_ON(val != final);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008243}
8244
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008245static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
Paulo Zanonidde86e22012-12-01 12:04:25 -02008246{
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008247 uint32_t tmp;
Paulo Zanonidde86e22012-12-01 12:04:25 -02008248
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008249 tmp = I915_READ(SOUTH_CHICKEN2);
8250 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8251 I915_WRITE(SOUTH_CHICKEN2, tmp);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008252
Imre Deakcf3598c2016-06-28 13:37:31 +03008253 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8254 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008255 DRM_ERROR("FDI mPHY reset assert timeout\n");
Paulo Zanonidde86e22012-12-01 12:04:25 -02008256
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008257 tmp = I915_READ(SOUTH_CHICKEN2);
8258 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8259 I915_WRITE(SOUTH_CHICKEN2, tmp);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008260
Imre Deakcf3598c2016-06-28 13:37:31 +03008261 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8262 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008263 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008264}
8265
8266/* WaMPhyProgramming:hsw */
8267static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8268{
8269 uint32_t tmp;
Paulo Zanonidde86e22012-12-01 12:04:25 -02008270
8271 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8272 tmp &= ~(0xFF << 24);
8273 tmp |= (0x12 << 24);
8274 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8275
Paulo Zanonidde86e22012-12-01 12:04:25 -02008276 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8277 tmp |= (1 << 11);
8278 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8279
8280 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8281 tmp |= (1 << 11);
8282 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8283
Paulo Zanonidde86e22012-12-01 12:04:25 -02008284 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8285 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8286 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8287
8288 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8289 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8290 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8291
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008292 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8293 tmp &= ~(7 << 13);
8294 tmp |= (5 << 13);
8295 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008296
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008297 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8298 tmp &= ~(7 << 13);
8299 tmp |= (5 << 13);
8300 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008301
8302 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8303 tmp &= ~0xFF;
8304 tmp |= 0x1C;
8305 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8306
8307 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8308 tmp &= ~0xFF;
8309 tmp |= 0x1C;
8310 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8311
8312 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8313 tmp &= ~(0xFF << 16);
8314 tmp |= (0x1C << 16);
8315 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8316
8317 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8318 tmp &= ~(0xFF << 16);
8319 tmp |= (0x1C << 16);
8320 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8321
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008322 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8323 tmp |= (1 << 27);
8324 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008325
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008326 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8327 tmp |= (1 << 27);
8328 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008329
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008330 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8331 tmp &= ~(0xF << 28);
8332 tmp |= (4 << 28);
8333 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008334
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008335 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8336 tmp &= ~(0xF << 28);
8337 tmp |= (4 << 28);
8338 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008339}
8340
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008341/* Implements 3 different sequences from BSpec chapter "Display iCLK
8342 * Programming" based on the parameters passed:
8343 * - Sequence to enable CLKOUT_DP
8344 * - Sequence to enable CLKOUT_DP without spread
8345 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8346 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008347static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8348 bool with_spread, bool with_fdi)
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008349{
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008350 uint32_t reg, tmp;
8351
8352 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8353 with_spread = true;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008354 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8355 with_fdi, "LP PCH doesn't have FDI\n"))
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008356 with_fdi = false;
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008357
Ville Syrjäläa5805162015-05-26 20:42:30 +03008358 mutex_lock(&dev_priv->sb_lock);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008359
8360 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8361 tmp &= ~SBI_SSCCTL_DISABLE;
8362 tmp |= SBI_SSCCTL_PATHALT;
8363 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8364
8365 udelay(24);
8366
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008367 if (with_spread) {
8368 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8369 tmp &= ~SBI_SSCCTL_PATHALT;
8370 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008371
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008372 if (with_fdi) {
8373 lpt_reset_fdi_mphy(dev_priv);
8374 lpt_program_fdi_mphy(dev_priv);
8375 }
8376 }
Paulo Zanonidde86e22012-12-01 12:04:25 -02008377
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008378 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008379 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8380 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8381 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
Daniel Vetterc00db242013-01-22 15:33:27 +01008382
Ville Syrjäläa5805162015-05-26 20:42:30 +03008383 mutex_unlock(&dev_priv->sb_lock);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008384}
8385
Paulo Zanoni47701c32013-07-23 11:19:25 -03008386/* Sequence to disable CLKOUT_DP */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008387static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
Paulo Zanoni47701c32013-07-23 11:19:25 -03008388{
Paulo Zanoni47701c32013-07-23 11:19:25 -03008389 uint32_t reg, tmp;
8390
Ville Syrjäläa5805162015-05-26 20:42:30 +03008391 mutex_lock(&dev_priv->sb_lock);
Paulo Zanoni47701c32013-07-23 11:19:25 -03008392
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008393 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
Paulo Zanoni47701c32013-07-23 11:19:25 -03008394 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8395 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8396 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8397
8398 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8399 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8400 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8401 tmp |= SBI_SSCCTL_PATHALT;
8402 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8403 udelay(32);
8404 }
8405 tmp |= SBI_SSCCTL_DISABLE;
8406 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8407 }
8408
Ville Syrjäläa5805162015-05-26 20:42:30 +03008409 mutex_unlock(&dev_priv->sb_lock);
Paulo Zanoni47701c32013-07-23 11:19:25 -03008410}
8411
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008412#define BEND_IDX(steps) ((50 + (steps)) / 5)
8413
8414static const uint16_t sscdivintphase[] = {
8415 [BEND_IDX( 50)] = 0x3B23,
8416 [BEND_IDX( 45)] = 0x3B23,
8417 [BEND_IDX( 40)] = 0x3C23,
8418 [BEND_IDX( 35)] = 0x3C23,
8419 [BEND_IDX( 30)] = 0x3D23,
8420 [BEND_IDX( 25)] = 0x3D23,
8421 [BEND_IDX( 20)] = 0x3E23,
8422 [BEND_IDX( 15)] = 0x3E23,
8423 [BEND_IDX( 10)] = 0x3F23,
8424 [BEND_IDX( 5)] = 0x3F23,
8425 [BEND_IDX( 0)] = 0x0025,
8426 [BEND_IDX( -5)] = 0x0025,
8427 [BEND_IDX(-10)] = 0x0125,
8428 [BEND_IDX(-15)] = 0x0125,
8429 [BEND_IDX(-20)] = 0x0225,
8430 [BEND_IDX(-25)] = 0x0225,
8431 [BEND_IDX(-30)] = 0x0325,
8432 [BEND_IDX(-35)] = 0x0325,
8433 [BEND_IDX(-40)] = 0x0425,
8434 [BEND_IDX(-45)] = 0x0425,
8435 [BEND_IDX(-50)] = 0x0525,
8436};
8437
8438/*
8439 * Bend CLKOUT_DP
8440 * steps -50 to 50 inclusive, in steps of 5
8441 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8442 * change in clock period = -(steps / 10) * 5.787 ps
8443 */
8444static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8445{
8446 uint32_t tmp;
8447 int idx = BEND_IDX(steps);
8448
8449 if (WARN_ON(steps % 5 != 0))
8450 return;
8451
8452 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8453 return;
8454
8455 mutex_lock(&dev_priv->sb_lock);
8456
8457 if (steps % 10 != 0)
8458 tmp = 0xAAAAAAAB;
8459 else
8460 tmp = 0x00000000;
8461 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8462
8463 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8464 tmp &= 0xffff0000;
8465 tmp |= sscdivintphase[idx];
8466 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8467
8468 mutex_unlock(&dev_priv->sb_lock);
8469}
8470
8471#undef BEND_IDX
8472
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008473static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008474{
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008475 struct intel_encoder *encoder;
8476 bool has_vga = false;
8477
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008478 for_each_intel_encoder(&dev_priv->drm, encoder) {
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008479 switch (encoder->type) {
8480 case INTEL_OUTPUT_ANALOG:
8481 has_vga = true;
8482 break;
Paulo Zanoni6847d71b2014-10-27 17:47:52 -02008483 default:
8484 break;
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008485 }
8486 }
8487
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008488 if (has_vga) {
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008489 lpt_bend_clkout_dp(dev_priv, 0);
8490 lpt_enable_clkout_dp(dev_priv, true, true);
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008491 } else {
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008492 lpt_disable_clkout_dp(dev_priv);
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008493 }
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008494}
8495
Paulo Zanonidde86e22012-12-01 12:04:25 -02008496/*
8497 * Initialize reference clocks when the driver loads
8498 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008499void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
Paulo Zanonidde86e22012-12-01 12:04:25 -02008500{
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008501 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008502 ironlake_init_pch_refclk(dev_priv);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008503 else if (HAS_PCH_LPT(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008504 lpt_init_pch_refclk(dev_priv);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008505}
8506
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008507static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
Paulo Zanonic8203562012-09-12 10:06:29 -03008508{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008509 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8510 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8511 enum pipe pipe = crtc->pipe;
Paulo Zanonic8203562012-09-12 10:06:29 -03008512 uint32_t val;
8513
Daniel Vetter78114072013-06-13 00:54:57 +02008514 val = 0;
Paulo Zanonic8203562012-09-12 10:06:29 -03008515
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008516 switch (crtc_state->pipe_bpp) {
Paulo Zanonic8203562012-09-12 10:06:29 -03008517 case 18:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008518 val |= PIPECONF_6BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008519 break;
8520 case 24:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008521 val |= PIPECONF_8BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008522 break;
8523 case 30:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008524 val |= PIPECONF_10BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008525 break;
8526 case 36:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008527 val |= PIPECONF_12BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008528 break;
8529 default:
Paulo Zanonicc769b62012-09-20 18:36:03 -03008530 /* Case prevented by intel_choose_pipe_bpp_dither. */
8531 BUG();
Paulo Zanonic8203562012-09-12 10:06:29 -03008532 }
8533
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008534 if (crtc_state->dither)
Paulo Zanonic8203562012-09-12 10:06:29 -03008535 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8536
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008537 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
Paulo Zanonic8203562012-09-12 10:06:29 -03008538 val |= PIPECONF_INTERLACED_ILK;
8539 else
8540 val |= PIPECONF_PROGRESSIVE;
8541
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008542 if (crtc_state->limited_color_range)
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02008543 val |= PIPECONF_COLOR_RANGE_SELECT;
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02008544
Paulo Zanonic8203562012-09-12 10:06:29 -03008545 I915_WRITE(PIPECONF(pipe), val);
8546 POSTING_READ(PIPECONF(pipe));
8547}
8548
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008549static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008550{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008551 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8552 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8553 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Jani Nikula391bf042016-03-18 17:05:40 +02008554 u32 val = 0;
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008555
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008556 if (IS_HASWELL(dev_priv) && crtc_state->dither)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008557 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8558
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008559 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008560 val |= PIPECONF_INTERLACED_ILK;
8561 else
8562 val |= PIPECONF_PROGRESSIVE;
8563
Paulo Zanoni702e7a52012-10-23 18:29:59 -02008564 I915_WRITE(PIPECONF(cpu_transcoder), val);
8565 POSTING_READ(PIPECONF(cpu_transcoder));
Jani Nikula391bf042016-03-18 17:05:40 +02008566}
8567
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008568static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
Jani Nikula391bf042016-03-18 17:05:40 +02008569{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008570 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8571 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
Jani Nikula391bf042016-03-18 17:05:40 +02008572
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00008573 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
Jani Nikula391bf042016-03-18 17:05:40 +02008574 u32 val = 0;
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008575
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008576 switch (crtc_state->pipe_bpp) {
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008577 case 18:
8578 val |= PIPEMISC_DITHER_6_BPC;
8579 break;
8580 case 24:
8581 val |= PIPEMISC_DITHER_8_BPC;
8582 break;
8583 case 30:
8584 val |= PIPEMISC_DITHER_10_BPC;
8585 break;
8586 case 36:
8587 val |= PIPEMISC_DITHER_12_BPC;
8588 break;
8589 default:
8590 /* Case prevented by pipe_config_set_bpp. */
8591 BUG();
8592 }
8593
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008594 if (crtc_state->dither)
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008595 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8596
Shashank Sharma8c79f842018-10-12 11:53:09 +05308597 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8598 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05308599 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
Shashank Sharma8c79f842018-10-12 11:53:09 +05308600
8601 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05308602 val |= PIPEMISC_YUV420_ENABLE |
Shashank Sharmab22ca992017-07-24 19:19:32 +05308603 PIPEMISC_YUV420_MODE_FULL_BLEND;
Shashank Sharmab22ca992017-07-24 19:19:32 +05308604
Jani Nikula391bf042016-03-18 17:05:40 +02008605 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008606 }
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008607}
8608
Paulo Zanonid4b19312012-11-29 11:29:32 -02008609int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8610{
8611 /*
8612 * Account for spread spectrum to avoid
8613 * oversubscribing the link. Max center spread
8614 * is 2.5%; use 5% for safety's sake.
8615 */
8616 u32 bps = target_clock * bpp * 21 / 20;
Ville Syrjälä619d4d02014-02-27 14:23:14 +02008617 return DIV_ROUND_UP(bps, link_bw * 8);
Paulo Zanonid4b19312012-11-29 11:29:32 -02008618}
8619
Daniel Vetter7429e9d2013-04-20 17:19:46 +02008620static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
Daniel Vetter6cf86a52013-04-02 23:38:10 +02008621{
Daniel Vetter7429e9d2013-04-20 17:19:46 +02008622 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
Paulo Zanonif48d8f22012-09-20 18:36:04 -03008623}
8624
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008625static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8626 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03008627 struct dpll *reduced_clock)
Paulo Zanonide13a2e2012-09-20 18:36:05 -03008628{
8629 struct drm_crtc *crtc = &intel_crtc->base;
8630 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008631 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008632 u32 dpll, fp, fp2;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008633 int factor;
Jesse Barnes79e53942008-11-07 14:24:08 -08008634
Chris Wilsonc1858122010-12-03 21:35:48 +00008635 /* Enable autotuning of the PLL clock (if permissible) */
Eric Anholt8febb292011-03-30 13:01:07 -07008636 factor = 21;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008637 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Eric Anholt8febb292011-03-30 13:01:07 -07008638 if ((intel_panel_use_ssc(dev_priv) &&
Ville Syrjäläe91e9412013-12-09 18:54:16 +02008639 dev_priv->vbt.lvds_ssc_freq == 100000) ||
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008640 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
Eric Anholt8febb292011-03-30 13:01:07 -07008641 factor = 25;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008642 } else if (crtc_state->sdvo_tv_clock)
Eric Anholt8febb292011-03-30 13:01:07 -07008643 factor = 20;
Chris Wilsonc1858122010-12-03 21:35:48 +00008644
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008645 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
Chris Wilsonc1858122010-12-03 21:35:48 +00008646
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008647 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8648 fp |= FP_CB_TUNE;
8649
8650 if (reduced_clock) {
8651 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8652
8653 if (reduced_clock->m < factor * reduced_clock->n)
8654 fp2 |= FP_CB_TUNE;
8655 } else {
8656 fp2 = fp;
8657 }
Daniel Vetter9a7c7892013-04-04 22:20:34 +02008658
Chris Wilson5eddb702010-09-11 13:48:45 +01008659 dpll = 0;
Zhenyu Wang2c072452009-06-05 15:38:42 +08008660
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008661 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
Eric Anholta07d6782011-03-30 13:01:08 -07008662 dpll |= DPLLB_MODE_LVDS;
8663 else
8664 dpll |= DPLLB_MODE_DAC_SERIAL;
Daniel Vetter198a037f2013-04-19 11:14:37 +02008665
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008666 dpll |= (crtc_state->pixel_multiplier - 1)
Daniel Vetteref1b4602013-06-01 17:17:04 +02008667 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
Daniel Vetter198a037f2013-04-19 11:14:37 +02008668
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008669 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8670 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Daniel Vetter4a33e482013-07-06 12:52:05 +02008671 dpll |= DPLL_SDVO_HIGH_SPEED;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008672
Ville Syrjälä37a56502016-06-22 21:57:04 +03008673 if (intel_crtc_has_dp_encoder(crtc_state))
Daniel Vetter4a33e482013-07-06 12:52:05 +02008674 dpll |= DPLL_SDVO_HIGH_SPEED;
Jesse Barnes79e53942008-11-07 14:24:08 -08008675
Ville Syrjälä7d7f8632016-09-26 11:30:46 +03008676 /*
8677 * The high speed IO clock is only really required for
8678 * SDVO/HDMI/DP, but we also enable it for CRT to make it
8679 * possible to share the DPLL between CRT and HDMI. Enabling
8680 * the clock needlessly does no real harm, except use up a
8681 * bit of power potentially.
8682 *
8683 * We'll limit this to IVB with 3 pipes, since it has only two
8684 * DPLLs and so DPLL sharing is the only way to get three pipes
8685 * driving PCH ports at the same time. On SNB we could do this,
8686 * and potentially avoid enabling the second DPLL, but it's not
8687 * clear if it''s a win or loss power wise. No point in doing
8688 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8689 */
8690 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8691 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8692 dpll |= DPLL_SDVO_HIGH_SPEED;
8693
Eric Anholta07d6782011-03-30 13:01:08 -07008694 /* compute bitmask from p1 value */
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008695 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
Eric Anholta07d6782011-03-30 13:01:08 -07008696 /* also FPA1 */
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008697 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
Eric Anholta07d6782011-03-30 13:01:08 -07008698
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008699 switch (crtc_state->dpll.p2) {
Eric Anholta07d6782011-03-30 13:01:08 -07008700 case 5:
8701 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8702 break;
8703 case 7:
8704 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8705 break;
8706 case 10:
8707 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8708 break;
8709 case 14:
8710 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8711 break;
Jesse Barnes79e53942008-11-07 14:24:08 -08008712 }
8713
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008714 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8715 intel_panel_use_ssc(dev_priv))
Kristian Høgsberg43565a02009-02-13 20:56:52 -05008716 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
Jesse Barnes79e53942008-11-07 14:24:08 -08008717 else
8718 dpll |= PLL_REF_INPUT_DREFCLK;
8719
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008720 dpll |= DPLL_VCO_ENABLE;
8721
8722 crtc_state->dpll_hw_state.dpll = dpll;
8723 crtc_state->dpll_hw_state.fp0 = fp;
8724 crtc_state->dpll_hw_state.fp1 = fp2;
Paulo Zanonide13a2e2012-09-20 18:36:05 -03008725}
8726
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008727static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8728 struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08008729{
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008730 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008731 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03008732 const struct intel_limit *limit;
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008733 int refclk = 120000;
Jesse Barnes79e53942008-11-07 14:24:08 -08008734
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03008735 memset(&crtc_state->dpll_hw_state, 0,
8736 sizeof(crtc_state->dpll_hw_state));
8737
Ander Conselvan de Oliveiraded220e2016-03-21 18:00:09 +02008738 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8739 if (!crtc_state->has_pch_encoder)
8740 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08008741
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03008742 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008743 if (intel_panel_use_ssc(dev_priv)) {
8744 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8745 dev_priv->vbt.lvds_ssc_freq);
8746 refclk = dev_priv->vbt.lvds_ssc_freq;
8747 }
8748
8749 if (intel_is_dual_link_lvds(dev)) {
8750 if (refclk == 100000)
8751 limit = &intel_limits_ironlake_dual_lvds_100m;
8752 else
8753 limit = &intel_limits_ironlake_dual_lvds;
8754 } else {
8755 if (refclk == 100000)
8756 limit = &intel_limits_ironlake_single_lvds_100m;
8757 else
8758 limit = &intel_limits_ironlake_single_lvds;
8759 }
8760 } else {
8761 limit = &intel_limits_ironlake_dac;
8762 }
8763
Ander Conselvan de Oliveira364ee292016-03-21 18:00:10 +02008764 if (!crtc_state->clock_set &&
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008765 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8766 refclk, NULL, &crtc_state->dpll)) {
Ander Conselvan de Oliveira364ee292016-03-21 18:00:10 +02008767 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8768 return -EINVAL;
Daniel Vetterf47709a2013-03-28 10:42:02 +01008769 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008770
Gustavo A. R. Silvacbaa3312017-05-15 16:56:05 -05008771 ironlake_compute_dpll(crtc, crtc_state, NULL);
Daniel Vetter66e985c2013-06-05 13:34:20 +02008772
Gustavo A. R. Silvaefd38b62017-05-15 17:00:28 -05008773 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
Chris Wilson43031782018-09-13 14:16:26 +01008774 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8775 pipe_name(crtc->pipe));
Ander Conselvan de Oliveiraded220e2016-03-21 18:00:09 +02008776 return -EINVAL;
Ander Conselvan de Oliveira3fb37702014-10-29 11:32:35 +02008777 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008778
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02008779 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08008780}
8781
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008782static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8783 struct intel_link_m_n *m_n)
Daniel Vetter72419202013-04-04 13:28:53 +02008784{
8785 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008786 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008787 enum pipe pipe = crtc->pipe;
Daniel Vetter72419202013-04-04 13:28:53 +02008788
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008789 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8790 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8791 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8792 & ~TU_SIZE_MASK;
8793 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8794 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8795 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8796}
8797
8798static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8799 enum transcoder transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008800 struct intel_link_m_n *m_n,
8801 struct intel_link_m_n *m2_n2)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008802{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008803 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008804 enum pipe pipe = crtc->pipe;
8805
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008806 if (INTEL_GEN(dev_priv) >= 5) {
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008807 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8808 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8809 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8810 & ~TU_SIZE_MASK;
8811 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8812 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8813 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02008814
8815 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008816 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8817 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8818 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8819 & ~TU_SIZE_MASK;
8820 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8821 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8822 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8823 }
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008824 } else {
8825 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8826 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8827 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8828 & ~TU_SIZE_MASK;
8829 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8830 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8831 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8832 }
8833}
8834
8835void intel_dp_get_m_n(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008836 struct intel_crtc_state *pipe_config)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008837{
Ander Conselvan de Oliveira681a8502015-01-15 14:55:24 +02008838 if (pipe_config->has_pch_encoder)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008839 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8840 else
8841 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008842 &pipe_config->dp_m_n,
8843 &pipe_config->dp_m2_n2);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008844}
8845
Daniel Vetter72419202013-04-04 13:28:53 +02008846static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008847 struct intel_crtc_state *pipe_config)
Daniel Vetter72419202013-04-04 13:28:53 +02008848{
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008849 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008850 &pipe_config->fdi_m_n, NULL);
Daniel Vetter72419202013-04-04 13:28:53 +02008851}
8852
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008853static void skylake_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008854 struct intel_crtc_state *pipe_config)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008855{
8856 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008857 struct drm_i915_private *dev_priv = to_i915(dev);
Chandra Kondurua1b22782015-04-07 15:28:45 -07008858 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8859 uint32_t ps_ctrl = 0;
8860 int id = -1;
8861 int i;
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008862
Chandra Kondurua1b22782015-04-07 15:28:45 -07008863 /* find scaler attached to this pipe */
8864 for (i = 0; i < crtc->num_scalers; i++) {
8865 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8866 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8867 id = i;
8868 pipe_config->pch_pfit.enabled = true;
8869 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8870 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8871 break;
8872 }
8873 }
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008874
Chandra Kondurua1b22782015-04-07 15:28:45 -07008875 scaler_state->scaler_id = id;
8876 if (id >= 0) {
8877 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8878 } else {
8879 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008880 }
8881}
8882
Damien Lespiau5724dbd2015-01-20 12:51:52 +00008883static void
8884skylake_get_initial_plane_config(struct intel_crtc *crtc,
8885 struct intel_initial_plane_config *plane_config)
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008886{
8887 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008888 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008889 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8890 enum plane_id plane_id = plane->id;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008891 enum pipe pipe;
James Ausmus4036c782017-11-13 10:11:28 -08008892 u32 val, base, offset, stride_mult, tiling, alpha;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008893 int fourcc, pixel_format;
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00008894 unsigned int aligned_height;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008895 struct drm_framebuffer *fb;
Damien Lespiau1b842c82015-01-21 13:50:54 +00008896 struct intel_framebuffer *intel_fb;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008897
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008898 if (!plane->get_hw_state(plane, &pipe))
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02008899 return;
8900
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008901 WARN_ON(pipe != crtc->pipe);
8902
Damien Lespiaud9806c92015-01-21 14:07:19 +00008903 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Damien Lespiau1b842c82015-01-21 13:50:54 +00008904 if (!intel_fb) {
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008905 DRM_DEBUG_KMS("failed to alloc fb\n");
8906 return;
8907 }
8908
Damien Lespiau1b842c82015-01-21 13:50:54 +00008909 fb = &intel_fb->base;
8910
Ville Syrjäläd2e9f5f2016-11-18 21:52:53 +02008911 fb->dev = dev;
8912
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008913 val = I915_READ(PLANE_CTL(pipe, plane_id));
Damien Lespiau42a7b082015-02-05 19:35:13 +00008914
James Ausmusb5972772018-01-30 11:49:16 -02008915 if (INTEL_GEN(dev_priv) >= 11)
8916 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8917 else
8918 pixel_format = val & PLANE_CTL_FORMAT_MASK;
James Ausmus4036c782017-11-13 10:11:28 -08008919
8920 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008921 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
James Ausmus4036c782017-11-13 10:11:28 -08008922 alpha &= PLANE_COLOR_ALPHA_MASK;
8923 } else {
8924 alpha = val & PLANE_CTL_ALPHA_MASK;
8925 }
8926
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008927 fourcc = skl_format_to_fourcc(pixel_format,
James Ausmus4036c782017-11-13 10:11:28 -08008928 val & PLANE_CTL_ORDER_RGBX, alpha);
Ville Syrjälä2f3f4762016-11-18 21:52:57 +02008929 fb->format = drm_format_info(fourcc);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008930
Damien Lespiau40f46282015-02-27 11:15:21 +00008931 tiling = val & PLANE_CTL_TILED_MASK;
8932 switch (tiling) {
8933 case PLANE_CTL_TILED_LINEAR:
Ben Widawsky2f075562017-03-24 14:29:48 -07008934 fb->modifier = DRM_FORMAT_MOD_LINEAR;
Damien Lespiau40f46282015-02-27 11:15:21 +00008935 break;
8936 case PLANE_CTL_TILED_X:
8937 plane_config->tiling = I915_TILING_X;
Ville Syrjäläbae781b2016-11-16 13:33:16 +02008938 fb->modifier = I915_FORMAT_MOD_X_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008939 break;
8940 case PLANE_CTL_TILED_Y:
Imre Deak914a4fd2018-10-16 19:00:11 +03008941 plane_config->tiling = I915_TILING_Y;
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07008942 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07008943 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8944 else
8945 fb->modifier = I915_FORMAT_MOD_Y_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008946 break;
8947 case PLANE_CTL_TILED_YF:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07008948 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07008949 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8950 else
8951 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008952 break;
8953 default:
8954 MISSING_CASE(tiling);
8955 goto error;
8956 }
8957
Ville Syrjäläf43348a2018-11-20 15:54:50 +02008958 /*
8959 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
8960 * while i915 HW rotation is clockwise, thats why this swapping.
8961 */
8962 switch (val & PLANE_CTL_ROTATE_MASK) {
8963 case PLANE_CTL_ROTATE_0:
8964 plane_config->rotation = DRM_MODE_ROTATE_0;
8965 break;
8966 case PLANE_CTL_ROTATE_90:
8967 plane_config->rotation = DRM_MODE_ROTATE_270;
8968 break;
8969 case PLANE_CTL_ROTATE_180:
8970 plane_config->rotation = DRM_MODE_ROTATE_180;
8971 break;
8972 case PLANE_CTL_ROTATE_270:
8973 plane_config->rotation = DRM_MODE_ROTATE_90;
8974 break;
8975 }
8976
8977 if (INTEL_GEN(dev_priv) >= 10 &&
8978 val & PLANE_CTL_FLIP_HORIZONTAL)
8979 plane_config->rotation |= DRM_MODE_REFLECT_X;
8980
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008981 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008982 plane_config->base = base;
8983
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008984 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008985
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008986 val = I915_READ(PLANE_SIZE(pipe, plane_id));
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008987 fb->height = ((val >> 16) & 0xfff) + 1;
8988 fb->width = ((val >> 0) & 0x1fff) + 1;
8989
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008990 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
Ville Syrjäläb3cf5c02018-09-25 22:37:08 +03008991 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008992 fb->pitches[0] = (val & 0x3ff) * stride_mult;
8993
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02008994 aligned_height = intel_fb_align_height(fb, 0, fb->height);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008995
Daniel Vetterf37b5c22015-02-10 23:12:27 +01008996 plane_config->size = fb->pitches[0] * aligned_height;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008997
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008998 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8999 crtc->base.name, plane->base.name, fb->width, fb->height,
Ville Syrjälä272725c2016-12-14 23:32:20 +02009000 fb->format->cpp[0] * 8, base, fb->pitches[0],
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009001 plane_config->size);
9002
Damien Lespiau2d140302015-02-05 17:22:18 +00009003 plane_config->fb = intel_fb;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009004 return;
9005
9006error:
Matthew Auldd1a3a032016-08-23 16:00:44 +01009007 kfree(intel_fb);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009008}
9009
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009010static void ironlake_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009011 struct intel_crtc_state *pipe_config)
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009012{
9013 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009014 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009015 uint32_t tmp;
9016
9017 tmp = I915_READ(PF_CTL(crtc->pipe));
9018
9019 if (tmp & PF_ENABLE) {
Chris Wilsonfd4daa92013-08-27 17:04:17 +01009020 pipe_config->pch_pfit.enabled = true;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009021 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9022 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
Daniel Vettercb8b2a32013-06-01 17:16:23 +02009023
9024 /* We currently do not free assignements of panel fitters on
9025 * ivb/hsw (since we don't use the higher upscaling modes which
9026 * differentiates them) so just WARN about this case for now. */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08009027 if (IS_GEN(dev_priv, 7)) {
Daniel Vettercb8b2a32013-06-01 17:16:23 +02009028 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9029 PF_PIPE_SEL_IVB(crtc->pipe));
9030 }
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009031 }
Jesse Barnes79e53942008-11-07 14:24:08 -08009032}
9033
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009034static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009035 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009036{
9037 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009038 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deak17290502016-02-12 18:55:11 +02009039 enum intel_display_power_domain power_domain;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009040 uint32_t tmp;
Imre Deak17290502016-02-12 18:55:11 +02009041 bool ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009042
Imre Deak17290502016-02-12 18:55:11 +02009043 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9044 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Paulo Zanoni930e8c92014-07-04 13:38:34 -03009045 return false;
9046
Shashank Sharmad9facae2018-10-12 11:53:07 +05309047 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
Daniel Vettere143a212013-07-04 12:01:15 +02009048 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009049 pipe_config->shared_dpll = NULL;
Daniel Vettereccb1402013-05-22 00:50:22 +02009050
Imre Deak17290502016-02-12 18:55:11 +02009051 ret = false;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009052 tmp = I915_READ(PIPECONF(crtc->pipe));
9053 if (!(tmp & PIPECONF_ENABLE))
Imre Deak17290502016-02-12 18:55:11 +02009054 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009055
Ville Syrjälä42571ae2013-09-06 23:29:00 +03009056 switch (tmp & PIPECONF_BPC_MASK) {
9057 case PIPECONF_6BPC:
9058 pipe_config->pipe_bpp = 18;
9059 break;
9060 case PIPECONF_8BPC:
9061 pipe_config->pipe_bpp = 24;
9062 break;
9063 case PIPECONF_10BPC:
9064 pipe_config->pipe_bpp = 30;
9065 break;
9066 case PIPECONF_12BPC:
9067 pipe_config->pipe_bpp = 36;
9068 break;
9069 default:
9070 break;
9071 }
9072
Daniel Vetterb5a9fa02014-04-24 23:54:49 +02009073 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9074 pipe_config->limited_color_range = true;
9075
Daniel Vetterab9412b2013-05-03 11:49:46 +02009076 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
Daniel Vetter66e985c2013-06-05 13:34:20 +02009077 struct intel_shared_dpll *pll;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009078 enum intel_dpll_id pll_id;
Daniel Vetter66e985c2013-06-05 13:34:20 +02009079
Daniel Vetter88adfff2013-03-28 10:42:01 +01009080 pipe_config->has_pch_encoder = true;
9081
Daniel Vetter627eb5a2013-04-29 19:33:42 +02009082 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9083 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9084 FDI_DP_PORT_WIDTH_SHIFT) + 1;
Daniel Vetter72419202013-04-04 13:28:53 +02009085
9086 ironlake_get_fdi_m_n_config(crtc, pipe_config);
Daniel Vetter6c49f242013-06-06 12:45:25 +02009087
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03009088 if (HAS_PCH_IBX(dev_priv)) {
Imre Deakd9a7bc62016-05-12 16:18:50 +03009089 /*
9090 * The pipe->pch transcoder and pch transcoder->pll
9091 * mapping is fixed.
9092 */
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009093 pll_id = (enum intel_dpll_id) crtc->pipe;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009094 } else {
9095 tmp = I915_READ(PCH_DPLL_SEL);
9096 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009097 pll_id = DPLL_ID_PCH_PLL_B;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009098 else
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009099 pll_id= DPLL_ID_PCH_PLL_A;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009100 }
Daniel Vetter66e985c2013-06-05 13:34:20 +02009101
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009102 pipe_config->shared_dpll =
9103 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9104 pll = pipe_config->shared_dpll;
Daniel Vetter66e985c2013-06-05 13:34:20 +02009105
Lucas De Marchiee1398b2018-03-20 15:06:33 -07009106 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9107 &pipe_config->dpll_hw_state));
Daniel Vetterc93f54c2013-06-27 19:47:19 +02009108
9109 tmp = pipe_config->dpll_hw_state.dpll;
9110 pipe_config->pixel_multiplier =
9111 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9112 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
Ville Syrjälä18442d02013-09-13 16:00:08 +03009113
9114 ironlake_pch_clock_get(crtc, pipe_config);
Daniel Vetter6c49f242013-06-06 12:45:25 +02009115 } else {
9116 pipe_config->pixel_multiplier = 1;
Daniel Vetter627eb5a2013-04-29 19:33:42 +02009117 }
9118
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02009119 intel_get_pipe_timings(crtc, pipe_config);
Jani Nikulabc58be62016-03-18 17:05:39 +02009120 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02009121
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009122 ironlake_get_pfit_config(crtc, pipe_config);
9123
Imre Deak17290502016-02-12 18:55:11 +02009124 ret = true;
9125
9126out:
9127 intel_display_power_put(dev_priv, power_domain);
9128
9129 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009130}
9131
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009132static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9133{
Chris Wilson91c8a322016-07-05 10:40:23 +01009134 struct drm_device *dev = &dev_priv->drm;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009135 struct intel_crtc *crtc;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009136
Damien Lespiaud3fcc802014-05-13 23:32:22 +01009137 for_each_intel_crtc(dev, crtc)
Rob Clarke2c719b2014-12-15 13:56:32 -05009138 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009139 pipe_name(crtc->pipe));
9140
Imre Deak75e39682018-08-06 12:58:39 +03009141 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
Imre Deak9c3a16c2017-08-14 18:15:30 +03009142 "Display power well on\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009143 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
Ville Syrjälä01403de2015-09-18 20:03:33 +03009144 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9145 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
Imre Deak44cb7342016-08-10 14:07:29 +03009146 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009147 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009148 "CPU PWM1 enabled\n");
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009149 if (IS_HASWELL(dev_priv))
Rob Clarke2c719b2014-12-15 13:56:32 -05009150 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
Paulo Zanonic5107b82014-07-04 11:50:30 -03009151 "CPU PWM2 enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009152 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009153 "PCH PWM1 enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009154 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009155 "Utility pin enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009156 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009157
Paulo Zanoni9926ada2014-04-01 19:39:47 -03009158 /*
9159 * In theory we can still leave IRQs enabled, as long as only the HPD
9160 * interrupts remain enabled. We used to check for that, but since it's
9161 * gen-specific and since we only disable LCPLL after we fully disable
9162 * the interrupts, the check below should be enough.
9163 */
Rob Clarke2c719b2014-12-15 13:56:32 -05009164 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009165}
9166
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009167static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9168{
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009169 if (IS_HASWELL(dev_priv))
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009170 return I915_READ(D_COMP_HSW);
9171 else
9172 return I915_READ(D_COMP_BDW);
9173}
9174
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009175static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9176{
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009177 if (IS_HASWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01009178 mutex_lock(&dev_priv->pcu_lock);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009179 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9180 val))
Chris Wilson79cf2192016-08-24 11:16:07 +01009181 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01009182 mutex_unlock(&dev_priv->pcu_lock);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009183 } else {
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009184 I915_WRITE(D_COMP_BDW, val);
9185 POSTING_READ(D_COMP_BDW);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009186 }
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009187}
9188
9189/*
9190 * This function implements pieces of two sequences from BSpec:
9191 * - Sequence for display software to disable LCPLL
9192 * - Sequence for display software to allow package C8+
9193 * The steps implemented here are just the steps that actually touch the LCPLL
9194 * register. Callers should take care of disabling all the display engine
9195 * functions, doing the mode unset, fixing interrupts, etc.
9196 */
Paulo Zanoni6ff58d52013-09-24 13:52:57 -03009197static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9198 bool switch_to_fclk, bool allow_power_down)
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009199{
9200 uint32_t val;
9201
9202 assert_can_disable_lcpll(dev_priv);
9203
9204 val = I915_READ(LCPLL_CTL);
9205
9206 if (switch_to_fclk) {
9207 val |= LCPLL_CD_SOURCE_FCLK;
9208 I915_WRITE(LCPLL_CTL, val);
9209
Imre Deakf53dd632016-06-28 13:37:32 +03009210 if (wait_for_us(I915_READ(LCPLL_CTL) &
9211 LCPLL_CD_SOURCE_FCLK_DONE, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009212 DRM_ERROR("Switching to FCLK failed\n");
9213
9214 val = I915_READ(LCPLL_CTL);
9215 }
9216
9217 val |= LCPLL_PLL_DISABLE;
9218 I915_WRITE(LCPLL_CTL, val);
9219 POSTING_READ(LCPLL_CTL);
9220
Chris Wilson24d84412016-06-30 15:33:07 +01009221 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009222 DRM_ERROR("LCPLL still locked\n");
9223
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009224 val = hsw_read_dcomp(dev_priv);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009225 val |= D_COMP_COMP_DISABLE;
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009226 hsw_write_dcomp(dev_priv, val);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009227 ndelay(100);
9228
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009229 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9230 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009231 DRM_ERROR("D_COMP RCOMP still in progress\n");
9232
9233 if (allow_power_down) {
9234 val = I915_READ(LCPLL_CTL);
9235 val |= LCPLL_POWER_DOWN_ALLOW;
9236 I915_WRITE(LCPLL_CTL, val);
9237 POSTING_READ(LCPLL_CTL);
9238 }
9239}
9240
9241/*
9242 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9243 * source.
9244 */
Paulo Zanoni6ff58d52013-09-24 13:52:57 -03009245static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009246{
9247 uint32_t val;
9248
9249 val = I915_READ(LCPLL_CTL);
9250
9251 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9252 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9253 return;
9254
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03009255 /*
9256 * Make sure we're not on PC8 state before disabling PC8, otherwise
9257 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03009258 */
Mika Kuoppala59bad942015-01-16 11:34:40 +02009259 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Paulo Zanoni215733f2013-08-19 13:18:07 -03009260
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009261 if (val & LCPLL_POWER_DOWN_ALLOW) {
9262 val &= ~LCPLL_POWER_DOWN_ALLOW;
9263 I915_WRITE(LCPLL_CTL, val);
Daniel Vetter35d8f2e2013-08-21 23:38:08 +02009264 POSTING_READ(LCPLL_CTL);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009265 }
9266
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009267 val = hsw_read_dcomp(dev_priv);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009268 val |= D_COMP_COMP_FORCE;
9269 val &= ~D_COMP_COMP_DISABLE;
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009270 hsw_write_dcomp(dev_priv, val);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009271
9272 val = I915_READ(LCPLL_CTL);
9273 val &= ~LCPLL_PLL_DISABLE;
9274 I915_WRITE(LCPLL_CTL, val);
9275
Chris Wilson93220c02016-06-30 15:33:08 +01009276 if (intel_wait_for_register(dev_priv,
9277 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9278 5))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009279 DRM_ERROR("LCPLL not locked yet\n");
9280
9281 if (val & LCPLL_CD_SOURCE_FCLK) {
9282 val = I915_READ(LCPLL_CTL);
9283 val &= ~LCPLL_CD_SOURCE_FCLK;
9284 I915_WRITE(LCPLL_CTL, val);
9285
Imre Deakf53dd632016-06-28 13:37:32 +03009286 if (wait_for_us((I915_READ(LCPLL_CTL) &
9287 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009288 DRM_ERROR("Switching back to LCPLL failed\n");
9289 }
Paulo Zanoni215733f2013-08-19 13:18:07 -03009290
Mika Kuoppala59bad942015-01-16 11:34:40 +02009291 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +03009292
Ville Syrjälä4c75b942016-10-31 22:37:12 +02009293 intel_update_cdclk(dev_priv);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +03009294 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009295}
9296
Paulo Zanoni765dab672014-03-07 20:08:18 -03009297/*
9298 * Package states C8 and deeper are really deep PC states that can only be
9299 * reached when all the devices on the system allow it, so even if the graphics
9300 * device allows PC8+, it doesn't mean the system will actually get to these
9301 * states. Our driver only allows PC8+ when going into runtime PM.
9302 *
9303 * The requirements for PC8+ are that all the outputs are disabled, the power
9304 * well is disabled and most interrupts are disabled, and these are also
9305 * requirements for runtime PM. When these conditions are met, we manually do
9306 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9307 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9308 * hang the machine.
9309 *
9310 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9311 * the state of some registers, so when we come back from PC8+ we need to
9312 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9313 * need to take care of the registers kept by RC6. Notice that this happens even
9314 * if we don't put the device in PCI D3 state (which is what currently happens
9315 * because of the runtime PM support).
9316 *
9317 * For more, read "Display Sequences for Package C8" on the hardware
9318 * documentation.
9319 */
Paulo Zanonia14cb6f2014-03-07 20:08:17 -03009320void hsw_enable_pc8(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03009321{
Paulo Zanonic67a4702013-08-19 13:18:09 -03009322 uint32_t val;
9323
Paulo Zanonic67a4702013-08-19 13:18:09 -03009324 DRM_DEBUG_KMS("Enabling package C8+\n");
9325
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01009326 if (HAS_PCH_LPT_LP(dev_priv)) {
Paulo Zanonic67a4702013-08-19 13:18:09 -03009327 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9328 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9329 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9330 }
9331
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02009332 lpt_disable_clkout_dp(dev_priv);
Paulo Zanonic67a4702013-08-19 13:18:09 -03009333 hsw_disable_lcpll(dev_priv, true, true);
9334}
9335
Paulo Zanonia14cb6f2014-03-07 20:08:17 -03009336void hsw_disable_pc8(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03009337{
Paulo Zanonic67a4702013-08-19 13:18:09 -03009338 uint32_t val;
9339
Paulo Zanonic67a4702013-08-19 13:18:09 -03009340 DRM_DEBUG_KMS("Disabling package C8+\n");
9341
9342 hsw_restore_lcpll(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02009343 lpt_init_pch_refclk(dev_priv);
Paulo Zanonic67a4702013-08-19 13:18:09 -03009344
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01009345 if (HAS_PCH_LPT_LP(dev_priv)) {
Paulo Zanonic67a4702013-08-19 13:18:09 -03009346 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9347 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9348 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9349 }
Paulo Zanonic67a4702013-08-19 13:18:09 -03009350}
9351
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02009352static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9353 struct intel_crtc_state *crtc_state)
Paulo Zanoni09b4ddf2012-10-05 12:05:55 -03009354{
Madhav Chauhan70a057b2018-11-29 16:12:18 +02009355 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03009356 struct intel_atomic_state *state =
9357 to_intel_atomic_state(crtc_state->base.state);
9358
Madhav Chauhan70a057b2018-11-29 16:12:18 +02009359 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9360 IS_ICELAKE(dev_priv)) {
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009361 struct intel_encoder *encoder =
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03009362 intel_get_crtc_new_encoder(state, crtc_state);
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009363
9364 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
Chris Wilson43031782018-09-13 14:16:26 +01009365 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9366 pipe_name(crtc->pipe));
Mika Kaholaaf3997b2016-02-05 13:29:28 +02009367 return -EINVAL;
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009368 }
Mika Kaholaaf3997b2016-02-05 13:29:28 +02009369 }
Daniel Vetter716c2e52014-06-25 22:02:02 +03009370
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02009371 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08009372}
9373
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009374static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9375 enum port port,
9376 struct intel_crtc_state *pipe_config)
9377{
9378 enum intel_dpll_id id;
9379 u32 temp;
9380
9381 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
Paulo Zanonidfbd4502017-08-25 16:40:04 -03009382 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009383
9384 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9385 return;
9386
9387 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9388}
9389
Paulo Zanoni970888e2018-05-21 17:25:44 -07009390static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9391 enum port port,
9392 struct intel_crtc_state *pipe_config)
9393{
9394 enum intel_dpll_id id;
9395 u32 temp;
9396
9397 /* TODO: TBT pll not implemented. */
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309398 if (intel_port_is_combophy(dev_priv, port)) {
Paulo Zanoni970888e2018-05-21 17:25:44 -07009399 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9400 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9401 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9402
Vandita Kulkarnia54270d2018-10-03 12:52:00 +05309403 if (WARN_ON(!intel_dpll_is_combophy(id)))
Paulo Zanoni970888e2018-05-21 17:25:44 -07009404 return;
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309405 } else if (intel_port_is_tc(dev_priv, port)) {
Vandita Kulkarnicb6caf72018-10-03 12:51:58 +05309406 id = icl_port_to_mg_pll_id(port);
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309407 } else {
9408 WARN(1, "Invalid port %x\n", port);
Paulo Zanoni970888e2018-05-21 17:25:44 -07009409 return;
9410 }
9411
9412 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9413}
9414
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309415static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9416 enum port port,
9417 struct intel_crtc_state *pipe_config)
9418{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009419 enum intel_dpll_id id;
9420
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309421 switch (port) {
9422 case PORT_A:
Imre Deak08250c42016-03-14 19:55:34 +02009423 id = DPLL_ID_SKL_DPLL0;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309424 break;
9425 case PORT_B:
Imre Deak08250c42016-03-14 19:55:34 +02009426 id = DPLL_ID_SKL_DPLL1;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309427 break;
9428 case PORT_C:
Imre Deak08250c42016-03-14 19:55:34 +02009429 id = DPLL_ID_SKL_DPLL2;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309430 break;
9431 default:
9432 DRM_ERROR("Incorrect port type\n");
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009433 return;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309434 }
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009435
9436 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309437}
9438
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009439static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9440 enum port port,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009441 struct intel_crtc_state *pipe_config)
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009442{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009443 enum intel_dpll_id id;
Ander Conselvan de Oliveiraa3c988e2016-03-08 17:46:27 +02009444 u32 temp;
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009445
9446 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009447 id = temp >> (port * 3 + 1);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009448
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009449 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009450 return;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009451
9452 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009453}
9454
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009455static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9456 enum port port,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009457 struct intel_crtc_state *pipe_config)
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009458{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009459 enum intel_dpll_id id;
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009460 uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009461
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009462 switch (ddi_pll_sel) {
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009463 case PORT_CLK_SEL_WRPLL1:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009464 id = DPLL_ID_WRPLL1;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009465 break;
9466 case PORT_CLK_SEL_WRPLL2:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009467 id = DPLL_ID_WRPLL2;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009468 break;
Maarten Lankhorst00490c22015-11-16 14:42:12 +01009469 case PORT_CLK_SEL_SPLL:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009470 id = DPLL_ID_SPLL;
Ville Syrjälä79bd23d2015-12-01 23:32:07 +02009471 break;
Ander Conselvan de Oliveira9d16da62016-03-08 17:46:26 +02009472 case PORT_CLK_SEL_LCPLL_810:
9473 id = DPLL_ID_LCPLL_810;
9474 break;
9475 case PORT_CLK_SEL_LCPLL_1350:
9476 id = DPLL_ID_LCPLL_1350;
9477 break;
9478 case PORT_CLK_SEL_LCPLL_2700:
9479 id = DPLL_ID_LCPLL_2700;
9480 break;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009481 default:
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009482 MISSING_CASE(ddi_pll_sel);
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009483 /* fall through */
9484 case PORT_CLK_SEL_NONE:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009485 return;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009486 }
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009487
9488 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009489}
9490
Jani Nikulacf304292016-03-18 17:05:41 +02009491static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9492 struct intel_crtc_state *pipe_config,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009493 u64 *power_domain_mask)
Jani Nikulacf304292016-03-18 17:05:41 +02009494{
9495 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009496 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulacf304292016-03-18 17:05:41 +02009497 enum intel_display_power_domain power_domain;
Jani Nikula07169312018-12-04 12:19:26 +02009498 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
9499 unsigned long enabled_panel_transcoders = 0;
9500 enum transcoder panel_transcoder;
Jani Nikulacf304292016-03-18 17:05:41 +02009501 u32 tmp;
9502
Jani Nikula07169312018-12-04 12:19:26 +02009503 if (IS_ICELAKE(dev_priv))
9504 panel_transcoder_mask |=
9505 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
Jani Nikulacf304292016-03-18 17:05:41 +02009506
Imre Deakd9a7bc62016-05-12 16:18:50 +03009507 /*
9508 * The pipe->transcoder mapping is fixed with the exception of the eDP
Jani Nikula07169312018-12-04 12:19:26 +02009509 * and DSI transcoders handled below.
Imre Deakd9a7bc62016-05-12 16:18:50 +03009510 */
Jani Nikulacf304292016-03-18 17:05:41 +02009511 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9512
9513 /*
9514 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9515 * consistency and less surprising code; it's in always on power).
9516 */
Jani Nikula07169312018-12-04 12:19:26 +02009517 for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
Madhav Chauhan2ca711c2018-11-29 16:12:27 +02009518 enum pipe trans_pipe;
Jani Nikula07169312018-12-04 12:19:26 +02009519
9520 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9521 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9522 continue;
9523
9524 /*
9525 * Log all enabled ones, only use the first one.
9526 *
9527 * FIXME: This won't work for two separate DSI displays.
9528 */
9529 enabled_panel_transcoders |= BIT(panel_transcoder);
9530 if (enabled_panel_transcoders != BIT(panel_transcoder))
9531 continue;
9532
Jani Nikulacf304292016-03-18 17:05:41 +02009533 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9534 default:
Jani Nikula07169312018-12-04 12:19:26 +02009535 WARN(1, "unknown pipe linked to transcoder %s\n",
9536 transcoder_name(panel_transcoder));
Gustavo A. R. Silvaf0d759f2018-06-28 17:35:41 -05009537 /* fall through */
Jani Nikulacf304292016-03-18 17:05:41 +02009538 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9539 case TRANS_DDI_EDP_INPUT_A_ON:
Madhav Chauhan2ca711c2018-11-29 16:12:27 +02009540 trans_pipe = PIPE_A;
Jani Nikulacf304292016-03-18 17:05:41 +02009541 break;
9542 case TRANS_DDI_EDP_INPUT_B_ONOFF:
Madhav Chauhan2ca711c2018-11-29 16:12:27 +02009543 trans_pipe = PIPE_B;
Jani Nikulacf304292016-03-18 17:05:41 +02009544 break;
9545 case TRANS_DDI_EDP_INPUT_C_ONOFF:
Madhav Chauhan2ca711c2018-11-29 16:12:27 +02009546 trans_pipe = PIPE_C;
Jani Nikulacf304292016-03-18 17:05:41 +02009547 break;
9548 }
9549
Jani Nikula07169312018-12-04 12:19:26 +02009550 if (trans_pipe == crtc->pipe)
9551 pipe_config->cpu_transcoder = panel_transcoder;
Jani Nikulacf304292016-03-18 17:05:41 +02009552 }
9553
Jani Nikula07169312018-12-04 12:19:26 +02009554 /*
9555 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9556 */
9557 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
9558 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
9559
Jani Nikulacf304292016-03-18 17:05:41 +02009560 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9561 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9562 return false;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009563 *power_domain_mask |= BIT_ULL(power_domain);
Jani Nikulacf304292016-03-18 17:05:41 +02009564
9565 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9566
9567 return tmp & PIPECONF_ENABLE;
9568}
9569
Jani Nikula4d1de972016-03-18 17:05:42 +02009570static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9571 struct intel_crtc_state *pipe_config,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009572 u64 *power_domain_mask)
Jani Nikula4d1de972016-03-18 17:05:42 +02009573{
9574 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009575 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikula4d1de972016-03-18 17:05:42 +02009576 enum intel_display_power_domain power_domain;
9577 enum port port;
9578 enum transcoder cpu_transcoder;
9579 u32 tmp;
9580
Jani Nikula4d1de972016-03-18 17:05:42 +02009581 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9582 if (port == PORT_A)
9583 cpu_transcoder = TRANSCODER_DSI_A;
9584 else
9585 cpu_transcoder = TRANSCODER_DSI_C;
9586
9587 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9588 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9589 continue;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009590 *power_domain_mask |= BIT_ULL(power_domain);
Jani Nikula4d1de972016-03-18 17:05:42 +02009591
Imre Deakdb18b6a2016-03-24 12:41:40 +02009592 /*
9593 * The PLL needs to be enabled with a valid divider
9594 * configuration, otherwise accessing DSI registers will hang
9595 * the machine. See BSpec North Display Engine
9596 * registers/MIPI[BXT]. We can break out here early, since we
9597 * need the same DSI PLL to be enabled for both DSI ports.
9598 */
Jani Nikulae5186342018-07-05 16:25:08 +03009599 if (!bxt_dsi_pll_is_enabled(dev_priv))
Imre Deakdb18b6a2016-03-24 12:41:40 +02009600 break;
9601
Jani Nikula4d1de972016-03-18 17:05:42 +02009602 /* XXX: this works for video mode only */
9603 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9604 if (!(tmp & DPI_ENABLE))
9605 continue;
9606
9607 tmp = I915_READ(MIPI_CTRL(port));
9608 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9609 continue;
9610
9611 pipe_config->cpu_transcoder = cpu_transcoder;
Jani Nikula4d1de972016-03-18 17:05:42 +02009612 break;
9613 }
9614
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009615 return transcoder_is_dsi(pipe_config->cpu_transcoder);
Jani Nikula4d1de972016-03-18 17:05:42 +02009616}
9617
Daniel Vetter26804af2014-06-25 22:01:55 +03009618static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009619 struct intel_crtc_state *pipe_config)
Daniel Vetter26804af2014-06-25 22:01:55 +03009620{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009621 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetterd452c5b2014-07-04 11:27:39 -03009622 struct intel_shared_dpll *pll;
Daniel Vetter26804af2014-06-25 22:01:55 +03009623 enum port port;
9624 uint32_t tmp;
9625
9626 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9627
9628 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9629
Paulo Zanoni970888e2018-05-21 17:25:44 -07009630 if (IS_ICELAKE(dev_priv))
9631 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9632 else if (IS_CANNONLAKE(dev_priv))
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009633 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9634 else if (IS_GEN9_BC(dev_priv))
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009635 skylake_get_ddi_pll(dev_priv, port, pipe_config);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02009636 else if (IS_GEN9_LP(dev_priv))
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309637 bxt_get_ddi_pll(dev_priv, port, pipe_config);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009638 else
9639 haswell_get_ddi_pll(dev_priv, port, pipe_config);
Daniel Vetter9cd86932014-06-25 22:01:57 +03009640
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009641 pll = pipe_config->shared_dpll;
9642 if (pll) {
Lucas De Marchiee1398b2018-03-20 15:06:33 -07009643 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9644 &pipe_config->dpll_hw_state));
Daniel Vetterd452c5b2014-07-04 11:27:39 -03009645 }
9646
Daniel Vetter26804af2014-06-25 22:01:55 +03009647 /*
9648 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9649 * DDI E. So just check whether this pipe is wired to DDI E and whether
9650 * the PCH transcoder is on.
9651 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009652 if (INTEL_GEN(dev_priv) < 9 &&
Damien Lespiauca370452013-12-03 13:56:24 +00009653 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
Daniel Vetter26804af2014-06-25 22:01:55 +03009654 pipe_config->has_pch_encoder = true;
9655
9656 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9657 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9658 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9659
9660 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9661 }
9662}
9663
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009664static bool haswell_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009665 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009666{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009667 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Imre Deak17290502016-02-12 18:55:11 +02009668 enum intel_display_power_domain power_domain;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009669 u64 power_domain_mask;
Jani Nikulacf304292016-03-18 17:05:41 +02009670 bool active;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009671
Imre Deake79dfb52017-07-20 01:50:57 +03009672 intel_crtc_init_scalers(crtc, pipe_config);
Imre Deak5fb9dad2017-07-20 14:28:20 +03009673
Imre Deak17290502016-02-12 18:55:11 +02009674 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9675 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Imre Deakb5482bd2014-03-05 16:20:55 +02009676 return false;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009677 power_domain_mask = BIT_ULL(power_domain);
Imre Deak17290502016-02-12 18:55:11 +02009678
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009679 pipe_config->shared_dpll = NULL;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009680
Jani Nikulacf304292016-03-18 17:05:41 +02009681 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
Daniel Vettereccb1402013-05-22 00:50:22 +02009682
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02009683 if (IS_GEN9_LP(dev_priv) &&
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009684 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9685 WARN_ON(active);
9686 active = true;
Jani Nikula4d1de972016-03-18 17:05:42 +02009687 }
9688
Jani Nikulacf304292016-03-18 17:05:41 +02009689 if (!active)
Imre Deak17290502016-02-12 18:55:11 +02009690 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009691
Madhav Chauhan2eae5d62018-11-29 16:12:28 +02009692 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
9693 IS_ICELAKE(dev_priv)) {
Jani Nikula4d1de972016-03-18 17:05:42 +02009694 haswell_get_ddi_port_state(crtc, pipe_config);
9695 intel_get_pipe_timings(crtc, pipe_config);
9696 }
Daniel Vetter627eb5a2013-04-29 19:33:42 +02009697
Jani Nikulabc58be62016-03-18 17:05:39 +02009698 intel_get_pipe_src_size(crtc, pipe_config);
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05309699 intel_get_crtc_ycbcr_config(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02009700
Lionel Landwerlin05dc6982016-03-16 10:57:15 +00009701 pipe_config->gamma_mode =
9702 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9703
Imre Deak17290502016-02-12 18:55:11 +02009704 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9705 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009706 power_domain_mask |= BIT_ULL(power_domain);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009707 if (INTEL_GEN(dev_priv) >= 9)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00009708 skylake_get_pfit_config(crtc, pipe_config);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08009709 else
Rodrigo Vivi1c132b42015-09-02 15:19:26 -07009710 ironlake_get_pfit_config(crtc, pipe_config);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00009711 }
Daniel Vetter88adfff2013-03-28 10:42:01 +01009712
Maarten Lankhorst24f28452017-11-22 19:39:01 +01009713 if (hsw_crtc_supports_ips(crtc)) {
9714 if (IS_HASWELL(dev_priv))
9715 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9716 else {
9717 /*
9718 * We cannot readout IPS state on broadwell, set to
9719 * true so we can set it to a defined state on first
9720 * commit.
9721 */
9722 pipe_config->ips_enabled = true;
9723 }
9724 }
9725
Jani Nikula4d1de972016-03-18 17:05:42 +02009726 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9727 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
Clint Taylorebb69c92014-09-30 10:30:22 -07009728 pipe_config->pixel_multiplier =
9729 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9730 } else {
9731 pipe_config->pixel_multiplier = 1;
9732 }
Daniel Vetter6c49f242013-06-06 12:45:25 +02009733
Imre Deak17290502016-02-12 18:55:11 +02009734out:
9735 for_each_power_domain(power_domain, power_domain_mask)
9736 intel_display_power_put(dev_priv, power_domain);
9737
Jani Nikulacf304292016-03-18 17:05:41 +02009738 return active;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009739}
9740
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009741static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009742{
9743 struct drm_i915_private *dev_priv =
9744 to_i915(plane_state->base.plane->dev);
9745 const struct drm_framebuffer *fb = plane_state->base.fb;
9746 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9747 u32 base;
9748
José Roberto de Souzad53db442018-11-30 15:20:48 -08009749 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009750 base = obj->phys_handle->busaddr;
9751 else
9752 base = intel_plane_ggtt_offset(plane_state);
9753
Ville Syrjäläc11ada02018-09-07 18:24:04 +03009754 base += plane_state->color_plane[0].offset;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009755
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009756 /* ILK+ do this automagically */
9757 if (HAS_GMCH_DISPLAY(dev_priv) &&
Dave Airliea82256b2017-05-30 15:25:28 +10009758 plane_state->base.rotation & DRM_MODE_ROTATE_180)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009759 base += (plane_state->base.crtc_h *
9760 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9761
9762 return base;
9763}
9764
Ville Syrjäläed270222017-03-27 21:55:36 +03009765static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9766{
9767 int x = plane_state->base.crtc_x;
9768 int y = plane_state->base.crtc_y;
9769 u32 pos = 0;
9770
9771 if (x < 0) {
9772 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9773 x = -x;
9774 }
9775 pos |= x << CURSOR_X_SHIFT;
9776
9777 if (y < 0) {
9778 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9779 y = -y;
9780 }
9781 pos |= y << CURSOR_Y_SHIFT;
9782
9783 return pos;
9784}
9785
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009786static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9787{
9788 const struct drm_mode_config *config =
9789 &plane_state->base.plane->dev->mode_config;
9790 int width = plane_state->base.crtc_w;
9791 int height = plane_state->base.crtc_h;
9792
9793 return width > 0 && width <= config->cursor_width &&
9794 height > 0 && height <= config->cursor_height;
9795}
9796
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009797static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009798{
9799 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009800 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009801 int src_x, src_y;
9802 u32 offset;
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03009803 int ret;
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009804
9805 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
9806 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
9807
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03009808 ret = intel_plane_check_stride(plane_state);
9809 if (ret)
9810 return ret;
9811
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009812 src_x = plane_state->base.src_x >> 16;
9813 src_y = plane_state->base.src_y >> 16;
9814
9815 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9816 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
9817 plane_state, 0);
9818
9819 if (src_x != 0 || src_y != 0) {
9820 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9821 return -EINVAL;
9822 }
9823
9824 plane_state->color_plane[0].offset = offset;
9825
9826 return 0;
9827}
9828
9829static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9830 struct intel_plane_state *plane_state)
9831{
9832 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009833 int ret;
9834
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009835 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9836 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9837 return -EINVAL;
9838 }
9839
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +02009840 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9841 &crtc_state->base,
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +02009842 DRM_PLANE_HELPER_NO_SCALING,
9843 DRM_PLANE_HELPER_NO_SCALING,
9844 true, true);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009845 if (ret)
9846 return ret;
9847
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009848 if (!plane_state->base.visible)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009849 return 0;
9850
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009851 ret = intel_plane_check_src_coordinates(plane_state);
9852 if (ret)
9853 return ret;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009854
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009855 ret = intel_cursor_check_surface(plane_state);
9856 if (ret)
9857 return ret;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009858
Ville Syrjälä659056f2017-03-27 21:55:39 +03009859 return 0;
9860}
9861
Ville Syrjäläddd57132018-09-07 18:24:02 +03009862static unsigned int
9863i845_cursor_max_stride(struct intel_plane *plane,
9864 u32 pixel_format, u64 modifier,
9865 unsigned int rotation)
9866{
9867 return 2048;
9868}
9869
Ville Syrjälä292889e2017-03-17 23:18:01 +02009870static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9871 const struct intel_plane_state *plane_state)
9872{
Ville Syrjälä292889e2017-03-17 23:18:01 +02009873 return CURSOR_ENABLE |
9874 CURSOR_GAMMA_ENABLE |
9875 CURSOR_FORMAT_ARGB |
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009876 CURSOR_STRIDE(plane_state->color_plane[0].stride);
Ville Syrjälä292889e2017-03-17 23:18:01 +02009877}
9878
Ville Syrjälä659056f2017-03-27 21:55:39 +03009879static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9880{
Ville Syrjälä659056f2017-03-27 21:55:39 +03009881 int width = plane_state->base.crtc_w;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009882
9883 /*
9884 * 845g/865g are only limited by the width of their cursors,
9885 * the height is arbitrary up to the precision of the register.
9886 */
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009887 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009888}
9889
Ville Syrjäläeb0f5042018-08-28 17:27:06 +03009890static int i845_check_cursor(struct intel_crtc_state *crtc_state,
Ville Syrjälä659056f2017-03-27 21:55:39 +03009891 struct intel_plane_state *plane_state)
9892{
9893 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009894 int ret;
9895
9896 ret = intel_check_cursor(crtc_state, plane_state);
9897 if (ret)
9898 return ret;
9899
9900 /* if we want to turn off the cursor ignore width and height */
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009901 if (!fb)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009902 return 0;
9903
9904 /* Check for which cursor types we support */
9905 if (!i845_cursor_size_ok(plane_state)) {
9906 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9907 plane_state->base.crtc_w,
9908 plane_state->base.crtc_h);
9909 return -EINVAL;
9910 }
9911
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009912 WARN_ON(plane_state->base.visible &&
9913 plane_state->color_plane[0].stride != fb->pitches[0]);
9914
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009915 switch (fb->pitches[0]) {
Chris Wilson560b85b2010-08-07 11:01:38 +01009916 case 256:
9917 case 512:
9918 case 1024:
9919 case 2048:
Ville Syrjälädc41c152014-08-13 11:57:05 +03009920 break;
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009921 default:
9922 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9923 fb->pitches[0]);
9924 return -EINVAL;
Chris Wilson560b85b2010-08-07 11:01:38 +01009925 }
Maarten Lankhorst55a08b3f2016-01-07 11:54:10 +01009926
Ville Syrjälä659056f2017-03-27 21:55:39 +03009927 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9928
9929 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08009930}
9931
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009932static void i845_update_cursor(struct intel_plane *plane,
9933 const struct intel_crtc_state *crtc_state,
Chris Wilson560b85b2010-08-07 11:01:38 +01009934 const struct intel_plane_state *plane_state)
9935{
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009936 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009937 u32 cntl = 0, base = 0, pos = 0, size = 0;
9938 unsigned long irqflags;
Chris Wilson560b85b2010-08-07 11:01:38 +01009939
Ville Syrjälä936e71e2016-07-26 19:06:59 +03009940 if (plane_state && plane_state->base.visible) {
Maarten Lankhorst55a08b3f2016-01-07 11:54:10 +01009941 unsigned int width = plane_state->base.crtc_w;
9942 unsigned int height = plane_state->base.crtc_h;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009943
Ville Syrjäläa0864d52017-03-23 21:27:09 +02009944 cntl = plane_state->ctl;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009945 size = (height << 12) | width;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009946
9947 base = intel_cursor_base(plane_state);
9948 pos = intel_cursor_position(plane_state);
Chris Wilson4b0e3332014-05-30 16:35:26 +03009949 }
Chris Wilson560b85b2010-08-07 11:01:38 +01009950
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009951 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9952
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009953 /* On these chipsets we can only modify the base/size/stride
9954 * whilst the cursor is disabled.
9955 */
9956 if (plane->cursor.base != base ||
9957 plane->cursor.size != size ||
9958 plane->cursor.cntl != cntl) {
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009959 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009960 I915_WRITE_FW(CURBASE(PIPE_A), base);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009961 I915_WRITE_FW(CURSIZE, size);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009962 I915_WRITE_FW(CURPOS(PIPE_A), pos);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009963 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
Ville Syrjälä75343a42017-03-27 21:55:38 +03009964
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009965 plane->cursor.base = base;
9966 plane->cursor.size = size;
9967 plane->cursor.cntl = cntl;
9968 } else {
9969 I915_WRITE_FW(CURPOS(PIPE_A), pos);
Ville Syrjälädc41c152014-08-13 11:57:05 +03009970 }
9971
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009972 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9973}
9974
9975static void i845_disable_cursor(struct intel_plane *plane,
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02009976 const struct intel_crtc_state *crtc_state)
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009977{
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02009978 i845_update_cursor(plane, crtc_state, NULL);
Chris Wilson560b85b2010-08-07 11:01:38 +01009979}
9980
Ville Syrjäläeade6c82018-01-30 22:38:03 +02009981static bool i845_cursor_get_hw_state(struct intel_plane *plane,
9982 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02009983{
9984 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9985 enum intel_display_power_domain power_domain;
9986 bool ret;
9987
9988 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9989 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9990 return false;
9991
9992 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9993
Ville Syrjäläeade6c82018-01-30 22:38:03 +02009994 *pipe = PIPE_A;
9995
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02009996 intel_display_power_put(dev_priv, power_domain);
9997
9998 return ret;
9999}
10000
Ville Syrjäläddd57132018-09-07 18:24:02 +030010001static unsigned int
10002i9xx_cursor_max_stride(struct intel_plane *plane,
10003 u32 pixel_format, u64 modifier,
10004 unsigned int rotation)
10005{
10006 return plane->base.dev->mode_config.cursor_width * 4;
10007}
10008
Ville Syrjälä292889e2017-03-17 23:18:01 +020010009static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10010 const struct intel_plane_state *plane_state)
10011{
10012 struct drm_i915_private *dev_priv =
10013 to_i915(plane_state->base.plane->dev);
10014 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
José Roberto de Souzac894d632018-05-18 13:15:47 -070010015 u32 cntl = 0;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010016
Lucas De Marchicf819ef2018-12-12 10:10:43 -080010017 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
Ville Syrjäläe876b782018-01-30 22:38:05 +020010018 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10019
José Roberto de Souzac894d632018-05-18 13:15:47 -070010020 if (INTEL_GEN(dev_priv) <= 10) {
10021 cntl |= MCURSOR_GAMMA_ENABLE;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010022
José Roberto de Souzac894d632018-05-18 13:15:47 -070010023 if (HAS_DDI(dev_priv))
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010024 cntl |= MCURSOR_PIPE_CSC_ENABLE;
José Roberto de Souzac894d632018-05-18 13:15:47 -070010025 }
Ville Syrjälä292889e2017-03-17 23:18:01 +020010026
Ville Syrjälä32ea06b2018-01-30 22:38:01 +020010027 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10028 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
Ville Syrjälä292889e2017-03-17 23:18:01 +020010029
10030 switch (plane_state->base.crtc_w) {
10031 case 64:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010032 cntl |= MCURSOR_MODE_64_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010033 break;
10034 case 128:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010035 cntl |= MCURSOR_MODE_128_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010036 break;
10037 case 256:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010038 cntl |= MCURSOR_MODE_256_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010039 break;
10040 default:
10041 MISSING_CASE(plane_state->base.crtc_w);
10042 return 0;
10043 }
10044
Robert Fossc2c446a2017-05-19 16:50:17 -040010045 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010046 cntl |= MCURSOR_ROTATE_180;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010047
10048 return cntl;
10049}
10050
Ville Syrjälä659056f2017-03-27 21:55:39 +030010051static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
Chris Wilson560b85b2010-08-07 11:01:38 +010010052{
Ville Syrjälä024faac2017-03-27 21:55:42 +030010053 struct drm_i915_private *dev_priv =
10054 to_i915(plane_state->base.plane->dev);
Ville Syrjälä659056f2017-03-27 21:55:39 +030010055 int width = plane_state->base.crtc_w;
10056 int height = plane_state->base.crtc_h;
Chris Wilson560b85b2010-08-07 11:01:38 +010010057
Ville Syrjälä3637ecf2017-03-27 21:55:40 +030010058 if (!intel_cursor_size_ok(plane_state))
Ville Syrjälädc41c152014-08-13 11:57:05 +030010059 return false;
10060
Ville Syrjälä024faac2017-03-27 21:55:42 +030010061 /* Cursor width is limited to a few power-of-two sizes */
10062 switch (width) {
Ville Syrjälä659056f2017-03-27 21:55:39 +030010063 case 256:
10064 case 128:
Ville Syrjälä659056f2017-03-27 21:55:39 +030010065 case 64:
10066 break;
10067 default:
10068 return false;
10069 }
10070
Ville Syrjälädc41c152014-08-13 11:57:05 +030010071 /*
Ville Syrjälä024faac2017-03-27 21:55:42 +030010072 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10073 * height from 8 lines up to the cursor width, when the
10074 * cursor is not rotated. Everything else requires square
10075 * cursors.
Ville Syrjälädc41c152014-08-13 11:57:05 +030010076 */
Ville Syrjälä024faac2017-03-27 21:55:42 +030010077 if (HAS_CUR_FBC(dev_priv) &&
Dave Airliea82256b2017-05-30 15:25:28 +100010078 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
Ville Syrjälä024faac2017-03-27 21:55:42 +030010079 if (height < 8 || height > width)
Ville Syrjälädc41c152014-08-13 11:57:05 +030010080 return false;
10081 } else {
Ville Syrjälä024faac2017-03-27 21:55:42 +030010082 if (height != width)
Ville Syrjälädc41c152014-08-13 11:57:05 +030010083 return false;
Ville Syrjälädc41c152014-08-13 11:57:05 +030010084 }
10085
10086 return true;
10087}
10088
Ville Syrjäläeb0f5042018-08-28 17:27:06 +030010089static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
Ville Syrjälä659056f2017-03-27 21:55:39 +030010090 struct intel_plane_state *plane_state)
10091{
Ville Syrjäläeb0f5042018-08-28 17:27:06 +030010092 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
Ville Syrjälä659056f2017-03-27 21:55:39 +030010093 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10094 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +030010095 enum pipe pipe = plane->pipe;
Ville Syrjälä659056f2017-03-27 21:55:39 +030010096 int ret;
10097
10098 ret = intel_check_cursor(crtc_state, plane_state);
10099 if (ret)
10100 return ret;
10101
10102 /* if we want to turn off the cursor ignore width and height */
Ville Syrjälä1e1bb872017-03-27 21:55:41 +030010103 if (!fb)
Ville Syrjälä659056f2017-03-27 21:55:39 +030010104 return 0;
10105
10106 /* Check for which cursor types we support */
10107 if (!i9xx_cursor_size_ok(plane_state)) {
10108 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10109 plane_state->base.crtc_w,
10110 plane_state->base.crtc_h);
10111 return -EINVAL;
10112 }
10113
Ville Syrjälädf79cf42018-09-11 18:01:39 +030010114 WARN_ON(plane_state->base.visible &&
10115 plane_state->color_plane[0].stride != fb->pitches[0]);
10116
Ville Syrjälä1e1bb872017-03-27 21:55:41 +030010117 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10118 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10119 fb->pitches[0], plane_state->base.crtc_w);
10120 return -EINVAL;
Ville Syrjälä659056f2017-03-27 21:55:39 +030010121 }
10122
10123 /*
10124 * There's something wrong with the cursor on CHV pipe C.
10125 * If it straddles the left edge of the screen then
10126 * moving it away from the edge or disabling it often
10127 * results in a pipe underrun, and often that can lead to
10128 * dead pipe (constant underrun reported, and it scans
10129 * out just a solid color). To recover from that, the
10130 * display power well must be turned off and on again.
10131 * Refuse the put the cursor into that compromised position.
10132 */
10133 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10134 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10135 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10136 return -EINVAL;
10137 }
10138
10139 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10140
10141 return 0;
10142}
10143
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010144static void i9xx_update_cursor(struct intel_plane *plane,
10145 const struct intel_crtc_state *crtc_state,
Sagar Kamble4726e0b2014-03-10 17:06:23 +053010146 const struct intel_plane_state *plane_state)
10147{
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030010148 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10149 enum pipe pipe = plane->pipe;
Ville Syrjälä024faac2017-03-27 21:55:42 +030010150 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010151 unsigned long irqflags;
Sagar Kamble4726e0b2014-03-10 17:06:23 +053010152
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010153 if (plane_state && plane_state->base.visible) {
Ville Syrjäläa0864d52017-03-23 21:27:09 +020010154 cntl = plane_state->ctl;
Chris Wilson4b0e3332014-05-30 16:35:26 +030010155
Ville Syrjälä024faac2017-03-27 21:55:42 +030010156 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10157 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10158
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010159 base = intel_cursor_base(plane_state);
10160 pos = intel_cursor_position(plane_state);
10161 }
10162
10163 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10164
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010165 /*
10166 * On some platforms writing CURCNTR first will also
10167 * cause CURPOS to be armed by the CURBASE write.
10168 * Without the CURCNTR write the CURPOS write would
Ville Syrjälä83234d12018-11-14 23:07:17 +020010169 * arm itself. Thus we always update CURCNTR before
10170 * CURPOS.
Ville Syrjälä8753d2b2017-07-14 18:52:27 +030010171 *
10172 * On other platforms CURPOS always requires the
10173 * CURBASE write to arm the update. Additonally
10174 * a write to any of the cursor register will cancel
10175 * an already armed cursor update. Thus leaving out
10176 * the CURBASE write after CURPOS could lead to a
10177 * cursor that doesn't appear to move, or even change
10178 * shape. Thus we always write CURBASE.
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010179 *
Ville Syrjälä83234d12018-11-14 23:07:17 +020010180 * The other registers are armed by by the CURBASE write
10181 * except when the plane is getting enabled at which time
10182 * the CURCNTR write arms the update.
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010183 */
Ville Syrjäläff43bc32018-11-27 18:59:00 +020010184
10185 if (INTEL_GEN(dev_priv) >= 9)
10186 skl_write_cursor_wm(plane, crtc_state);
10187
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010188 if (plane->cursor.base != base ||
Ville Syrjälä024faac2017-03-27 21:55:42 +030010189 plane->cursor.size != fbc_ctl ||
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010190 plane->cursor.cntl != cntl) {
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010191 if (HAS_CUR_FBC(dev_priv))
10192 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
Ville Syrjälä83234d12018-11-14 23:07:17 +020010193 I915_WRITE_FW(CURCNTR(pipe), cntl);
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010194 I915_WRITE_FW(CURPOS(pipe), pos);
Ville Syrjälä75343a42017-03-27 21:55:38 +030010195 I915_WRITE_FW(CURBASE(pipe), base);
10196
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010197 plane->cursor.base = base;
10198 plane->cursor.size = fbc_ctl;
10199 plane->cursor.cntl = cntl;
10200 } else {
10201 I915_WRITE_FW(CURPOS(pipe), pos);
Ville Syrjälä8753d2b2017-07-14 18:52:27 +030010202 I915_WRITE_FW(CURBASE(pipe), base);
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010203 }
10204
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010205 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Jesse Barnes65a21cd2011-10-12 11:10:21 -070010206}
Ville Syrjälä5efb3e22014-04-09 13:28:53 +030010207
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010208static void i9xx_disable_cursor(struct intel_plane *plane,
Ville Syrjälä0dd14be2018-11-14 23:07:20 +020010209 const struct intel_crtc_state *crtc_state)
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010210{
Ville Syrjälä0dd14be2018-11-14 23:07:20 +020010211 i9xx_update_cursor(plane, crtc_state, NULL);
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010212}
Ville Syrjäläd6e4db12013-09-04 18:25:31 +030010213
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010214static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10215 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010216{
10217 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10218 enum intel_display_power_domain power_domain;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010219 bool ret;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010220 u32 val;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010221
10222 /*
10223 * Not 100% correct for planes that can move between pipes,
10224 * but that's only the case for gen2-3 which don't have any
10225 * display power wells.
10226 */
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010227 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010228 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10229 return false;
10230
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010231 val = I915_READ(CURCNTR(plane->pipe));
10232
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010233 ret = val & MCURSOR_MODE;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010234
10235 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10236 *pipe = plane->pipe;
10237 else
10238 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10239 MCURSOR_PIPE_SELECT_SHIFT;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010240
10241 intel_display_power_put(dev_priv, power_domain);
10242
10243 return ret;
10244}
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010245
Jesse Barnes79e53942008-11-07 14:24:08 -080010246/* VESA 640x480x72Hz mode to set on the pipe */
Ville Syrjäläbacdcd52017-05-18 22:38:37 +030010247static const struct drm_display_mode load_detect_mode = {
Jesse Barnes79e53942008-11-07 14:24:08 -080010248 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10249 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10250};
10251
Daniel Vettera8bb6812014-02-10 18:00:39 +010010252struct drm_framebuffer *
Chris Wilson24dbf512017-02-15 10:59:18 +000010253intel_framebuffer_create(struct drm_i915_gem_object *obj,
10254 struct drm_mode_fb_cmd2 *mode_cmd)
Chris Wilsond2dff872011-04-19 08:36:26 +010010255{
10256 struct intel_framebuffer *intel_fb;
10257 int ret;
10258
10259 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020010260 if (!intel_fb)
Chris Wilsond2dff872011-04-19 08:36:26 +010010261 return ERR_PTR(-ENOMEM);
Chris Wilsond2dff872011-04-19 08:36:26 +010010262
Chris Wilson24dbf512017-02-15 10:59:18 +000010263 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010264 if (ret)
10265 goto err;
Chris Wilsond2dff872011-04-19 08:36:26 +010010266
10267 return &intel_fb->base;
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010268
Lukas Wunnerdcb13942015-07-04 11:50:58 +020010269err:
10270 kfree(intel_fb);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010271 return ERR_PTR(ret);
Chris Wilsond2dff872011-04-19 08:36:26 +010010272}
10273
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010274static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10275 struct drm_crtc *crtc)
Chris Wilsond2dff872011-04-19 08:36:26 +010010276{
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010277 struct drm_plane *plane;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010278 struct drm_plane_state *plane_state;
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010279 int ret, i;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010280
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010281 ret = drm_atomic_add_affected_planes(state, crtc);
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010282 if (ret)
10283 return ret;
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010284
10285 for_each_new_plane_in_state(state, plane, plane_state, i) {
10286 if (plane_state->crtc != crtc)
10287 continue;
10288
10289 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10290 if (ret)
10291 return ret;
10292
10293 drm_atomic_set_fb_for_plane(plane_state, NULL);
10294 }
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010295
10296 return 0;
10297}
10298
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010299int intel_get_load_detect_pipe(struct drm_connector *connector,
Ville Syrjäläbacdcd52017-05-18 22:38:37 +030010300 const struct drm_display_mode *mode,
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010301 struct intel_load_detect_pipe *old,
10302 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnes79e53942008-11-07 14:24:08 -080010303{
10304 struct intel_crtc *intel_crtc;
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010305 struct intel_encoder *intel_encoder =
10306 intel_attached_encoder(connector);
Jesse Barnes79e53942008-11-07 14:24:08 -080010307 struct drm_crtc *possible_crtc;
Chris Wilson4ef69c72010-09-09 15:14:28 +010010308 struct drm_encoder *encoder = &intel_encoder->base;
Jesse Barnes79e53942008-11-07 14:24:08 -080010309 struct drm_crtc *crtc = NULL;
10310 struct drm_device *dev = encoder->dev;
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020010311 struct drm_i915_private *dev_priv = to_i915(dev);
Rob Clark51fd3712013-11-19 12:10:12 -050010312 struct drm_mode_config *config = &dev->mode_config;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010313 struct drm_atomic_state *state = NULL, *restore_state = NULL;
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010314 struct drm_connector_state *connector_state;
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010315 struct intel_crtc_state *crtc_state;
Rob Clark51fd3712013-11-19 12:10:12 -050010316 int ret, i = -1;
Jesse Barnes79e53942008-11-07 14:24:08 -080010317
Chris Wilsond2dff872011-04-19 08:36:26 +010010318 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +030010319 connector->base.id, connector->name,
Jani Nikula8e329a032014-06-03 14:56:21 +030010320 encoder->base.id, encoder->name);
Chris Wilsond2dff872011-04-19 08:36:26 +010010321
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010322 old->restore_state = NULL;
10323
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010324 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
Daniel Vetter6e9f7982014-05-29 23:54:47 +020010325
Jesse Barnes79e53942008-11-07 14:24:08 -080010326 /*
10327 * Algorithm gets a little messy:
Chris Wilson7a5e4802011-04-19 23:21:12 +010010328 *
Jesse Barnes79e53942008-11-07 14:24:08 -080010329 * - if the connector already has an assigned crtc, use it (but make
10330 * sure it's on first)
Chris Wilson7a5e4802011-04-19 23:21:12 +010010331 *
Jesse Barnes79e53942008-11-07 14:24:08 -080010332 * - try to find the first unused crtc that can drive this connector,
10333 * and use that if we find one
Jesse Barnes79e53942008-11-07 14:24:08 -080010334 */
10335
10336 /* See if we already have a CRTC for this connector */
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010337 if (connector->state->crtc) {
10338 crtc = connector->state->crtc;
Chris Wilson8261b192011-04-19 23:18:09 +010010339
Rob Clark51fd3712013-11-19 12:10:12 -050010340 ret = drm_modeset_lock(&crtc->mutex, ctx);
10341 if (ret)
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010342 goto fail;
Chris Wilson8261b192011-04-19 23:18:09 +010010343
10344 /* Make sure the crtc and connector are running */
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010345 goto found;
Jesse Barnes79e53942008-11-07 14:24:08 -080010346 }
10347
10348 /* Find an unused one (if possible) */
Damien Lespiau70e1e0e2014-05-13 23:32:24 +010010349 for_each_crtc(dev, possible_crtc) {
Jesse Barnes79e53942008-11-07 14:24:08 -080010350 i++;
10351 if (!(encoder->possible_crtcs & (1 << i)))
10352 continue;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010353
10354 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10355 if (ret)
10356 goto fail;
10357
10358 if (possible_crtc->state->enable) {
10359 drm_modeset_unlock(&possible_crtc->mutex);
Ville Syrjäläa4592492014-08-11 13:15:36 +030010360 continue;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010361 }
Ville Syrjäläa4592492014-08-11 13:15:36 +030010362
10363 crtc = possible_crtc;
10364 break;
Jesse Barnes79e53942008-11-07 14:24:08 -080010365 }
10366
10367 /*
10368 * If we didn't find an unused CRTC, don't use any.
10369 */
10370 if (!crtc) {
Chris Wilson71731882011-04-19 23:10:58 +010010371 DRM_DEBUG_KMS("no pipe available for load-detect\n");
Dan Carpenterf4bf77b2017-04-14 22:54:25 +030010372 ret = -ENODEV;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010373 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080010374 }
10375
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010376found:
10377 intel_crtc = to_intel_crtc(crtc);
10378
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010379 state = drm_atomic_state_alloc(dev);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010380 restore_state = drm_atomic_state_alloc(dev);
10381 if (!state || !restore_state) {
10382 ret = -ENOMEM;
10383 goto fail;
10384 }
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010385
10386 state->acquire_ctx = ctx;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010387 restore_state->acquire_ctx = ctx;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010388
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010389 connector_state = drm_atomic_get_connector_state(state, connector);
10390 if (IS_ERR(connector_state)) {
10391 ret = PTR_ERR(connector_state);
10392 goto fail;
10393 }
10394
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010395 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10396 if (ret)
10397 goto fail;
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010398
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010399 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10400 if (IS_ERR(crtc_state)) {
10401 ret = PTR_ERR(crtc_state);
10402 goto fail;
10403 }
10404
Maarten Lankhorst49d6fa22015-05-11 10:45:15 +020010405 crtc_state->base.active = crtc_state->base.enable = true;
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010406
Chris Wilson64927112011-04-20 07:25:26 +010010407 if (!mode)
10408 mode = &load_detect_mode;
Jesse Barnes79e53942008-11-07 14:24:08 -080010409
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010410 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010411 if (ret)
10412 goto fail;
10413
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010414 ret = intel_modeset_disable_planes(state, crtc);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010415 if (ret)
10416 goto fail;
10417
10418 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10419 if (!ret)
10420 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
Ville Syrjäläbe90cc32018-03-22 17:23:12 +020010421 if (!ret)
10422 ret = drm_atomic_add_affected_planes(restore_state, crtc);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010423 if (ret) {
10424 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10425 goto fail;
10426 }
Ander Conselvan de Oliveira8c7b5cc2015-04-21 17:13:19 +030010427
Maarten Lankhorst3ba86072016-02-29 09:18:57 +010010428 ret = drm_atomic_commit(state);
10429 if (ret) {
Chris Wilson64927112011-04-20 07:25:26 +010010430 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010431 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080010432 }
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010433
10434 old->restore_state = restore_state;
Chris Wilson7abbd112017-01-19 11:37:49 +000010435 drm_atomic_state_put(state);
Chris Wilson71731882011-04-19 23:10:58 +010010436
Jesse Barnes79e53942008-11-07 14:24:08 -080010437 /* let the connector get through one full cycle before testing */
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020010438 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
Chris Wilson71731882011-04-19 23:10:58 +010010439 return true;
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010440
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010441fail:
Chris Wilson7fb71c82016-10-19 12:37:43 +010010442 if (state) {
10443 drm_atomic_state_put(state);
10444 state = NULL;
10445 }
10446 if (restore_state) {
10447 drm_atomic_state_put(restore_state);
10448 restore_state = NULL;
10449 }
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010450
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010451 if (ret == -EDEADLK)
10452 return ret;
Rob Clark51fd3712013-11-19 12:10:12 -050010453
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010454 return false;
Jesse Barnes79e53942008-11-07 14:24:08 -080010455}
10456
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010457void intel_release_load_detect_pipe(struct drm_connector *connector,
Ander Conselvan de Oliveira49172fe2015-03-20 16:18:02 +020010458 struct intel_load_detect_pipe *old,
10459 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnes79e53942008-11-07 14:24:08 -080010460{
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010461 struct intel_encoder *intel_encoder =
10462 intel_attached_encoder(connector);
Chris Wilson4ef69c72010-09-09 15:14:28 +010010463 struct drm_encoder *encoder = &intel_encoder->base;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010464 struct drm_atomic_state *state = old->restore_state;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010465 int ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080010466
Chris Wilsond2dff872011-04-19 08:36:26 +010010467 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +030010468 connector->base.id, connector->name,
Jani Nikula8e329a032014-06-03 14:56:21 +030010469 encoder->base.id, encoder->name);
Chris Wilsond2dff872011-04-19 08:36:26 +010010470
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010471 if (!state)
Chris Wilson0622a532011-04-21 09:32:11 +010010472 return;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010473
Maarten Lankhorst581e49f2017-01-16 10:37:38 +010010474 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
Chris Wilson08536952016-10-14 13:18:18 +010010475 if (ret)
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010476 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
Chris Wilson08536952016-10-14 13:18:18 +010010477 drm_atomic_state_put(state);
Jesse Barnes79e53942008-11-07 14:24:08 -080010478}
10479
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010480static int i9xx_pll_refclk(struct drm_device *dev,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010481 const struct intel_crtc_state *pipe_config)
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010482{
Chris Wilsonfac5e232016-07-04 11:34:36 +010010483 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010484 u32 dpll = pipe_config->dpll_hw_state.dpll;
10485
10486 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
Ville Syrjäläe91e9412013-12-09 18:54:16 +020010487 return dev_priv->vbt.lvds_ssc_freq;
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010010488 else if (HAS_PCH_SPLIT(dev_priv))
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010489 return 120000;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080010490 else if (!IS_GEN(dev_priv, 2))
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010491 return 96000;
10492 else
10493 return 48000;
10494}
10495
Jesse Barnes79e53942008-11-07 14:24:08 -080010496/* Returns the clock of the currently programmed mode of the given pipe. */
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010497static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010498 struct intel_crtc_state *pipe_config)
Jesse Barnes79e53942008-11-07 14:24:08 -080010499{
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010500 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010010501 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010502 int pipe = pipe_config->cpu_transcoder;
Ville Syrjälä293623f2013-09-13 16:18:46 +030010503 u32 dpll = pipe_config->dpll_hw_state.dpll;
Jesse Barnes79e53942008-11-07 14:24:08 -080010504 u32 fp;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +030010505 struct dpll clock;
Imre Deakdccbea32015-06-22 23:35:51 +030010506 int port_clock;
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010507 int refclk = i9xx_pll_refclk(dev, pipe_config);
Jesse Barnes79e53942008-11-07 14:24:08 -080010508
10509 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
Ville Syrjälä293623f2013-09-13 16:18:46 +030010510 fp = pipe_config->dpll_hw_state.fp0;
Jesse Barnes79e53942008-11-07 14:24:08 -080010511 else
Ville Syrjälä293623f2013-09-13 16:18:46 +030010512 fp = pipe_config->dpll_hw_state.fp1;
Jesse Barnes79e53942008-11-07 14:24:08 -080010513
10514 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010515 if (IS_PINEVIEW(dev_priv)) {
Adam Jacksonf2b115e2009-12-03 17:14:42 -050010516 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10517 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
Shaohua Li21778322009-02-23 15:19:16 +080010518 } else {
10519 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10520 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10521 }
10522
Lucas De Marchicf819ef2018-12-12 10:10:43 -080010523 if (!IS_GEN(dev_priv, 2)) {
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010524 if (IS_PINEVIEW(dev_priv))
Adam Jacksonf2b115e2009-12-03 17:14:42 -050010525 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10526 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
Shaohua Li21778322009-02-23 15:19:16 +080010527 else
10528 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
Jesse Barnes79e53942008-11-07 14:24:08 -080010529 DPLL_FPA01_P1_POST_DIV_SHIFT);
10530
10531 switch (dpll & DPLL_MODE_MASK) {
10532 case DPLLB_MODE_DAC_SERIAL:
10533 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10534 5 : 10;
10535 break;
10536 case DPLLB_MODE_LVDS:
10537 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10538 7 : 14;
10539 break;
10540 default:
Zhao Yakui28c97732009-10-09 11:39:41 +080010541 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
Jesse Barnes79e53942008-11-07 14:24:08 -080010542 "mode\n", (int)(dpll & DPLL_MODE_MASK));
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010543 return;
Jesse Barnes79e53942008-11-07 14:24:08 -080010544 }
10545
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010546 if (IS_PINEVIEW(dev_priv))
Imre Deakdccbea32015-06-22 23:35:51 +030010547 port_clock = pnv_calc_dpll_params(refclk, &clock);
Daniel Vetterac58c3f2013-06-01 17:16:17 +020010548 else
Imre Deakdccbea32015-06-22 23:35:51 +030010549 port_clock = i9xx_calc_dpll_params(refclk, &clock);
Jesse Barnes79e53942008-11-07 14:24:08 -080010550 } else {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +010010551 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
Ville Syrjäläb1c560d2013-12-09 18:54:13 +020010552 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
Jesse Barnes79e53942008-11-07 14:24:08 -080010553
10554 if (is_lvds) {
10555 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10556 DPLL_FPA01_P1_POST_DIV_SHIFT);
Ville Syrjäläb1c560d2013-12-09 18:54:13 +020010557
10558 if (lvds & LVDS_CLKB_POWER_UP)
10559 clock.p2 = 7;
10560 else
10561 clock.p2 = 14;
Jesse Barnes79e53942008-11-07 14:24:08 -080010562 } else {
10563 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10564 clock.p1 = 2;
10565 else {
10566 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10567 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10568 }
10569 if (dpll & PLL_P2_DIVIDE_BY_4)
10570 clock.p2 = 4;
10571 else
10572 clock.p2 = 2;
Jesse Barnes79e53942008-11-07 14:24:08 -080010573 }
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010574
Imre Deakdccbea32015-06-22 23:35:51 +030010575 port_clock = i9xx_calc_dpll_params(refclk, &clock);
Jesse Barnes79e53942008-11-07 14:24:08 -080010576 }
10577
Ville Syrjälä18442d02013-09-13 16:00:08 +030010578 /*
10579 * This value includes pixel_multiplier. We will use
Damien Lespiau241bfc32013-09-25 16:45:37 +010010580 * port_clock to compute adjusted_mode.crtc_clock in the
Ville Syrjälä18442d02013-09-13 16:00:08 +030010581 * encoder's get_config() function.
10582 */
Imre Deakdccbea32015-06-22 23:35:51 +030010583 pipe_config->port_clock = port_clock;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010584}
10585
Ville Syrjälä6878da02013-09-13 15:59:11 +030010586int intel_dotclock_calculate(int link_freq,
10587 const struct intel_link_m_n *m_n)
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010588{
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010589 /*
10590 * The calculation for the data clock is:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010591 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010592 * But we want to avoid losing precison if possible, so:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010593 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010594 *
10595 * and the link clock is simpler:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010596 * link_clock = (m * link_clock) / n
Jesse Barnes79e53942008-11-07 14:24:08 -080010597 */
10598
Ville Syrjälä6878da02013-09-13 15:59:11 +030010599 if (!m_n->link_n)
10600 return 0;
10601
Chris Wilson31236982017-09-13 11:51:53 +010010602 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
Ville Syrjälä6878da02013-09-13 15:59:11 +030010603}
10604
Ville Syrjälä18442d02013-09-13 16:00:08 +030010605static void ironlake_pch_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010606 struct intel_crtc_state *pipe_config)
Ville Syrjälä6878da02013-09-13 15:59:11 +030010607{
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020010608 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä18442d02013-09-13 16:00:08 +030010609
10610 /* read out port_clock from the DPLL */
10611 i9xx_crtc_clock_get(crtc, pipe_config);
Ville Syrjälä6878da02013-09-13 15:59:11 +030010612
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010613 /*
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020010614 * In case there is an active pipe without active ports,
10615 * we may need some idea for the dotclock anyway.
10616 * Calculate one based on the FDI configuration.
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010617 */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020010618 pipe_config->base.adjusted_mode.crtc_clock =
Ville Syrjälä21a727b2016-02-17 21:41:10 +020010619 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
Ville Syrjälä18442d02013-09-13 16:00:08 +030010620 &pipe_config->fdi_m_n);
Jesse Barnes79e53942008-11-07 14:24:08 -080010621}
10622
Ville Syrjäläde330812017-10-09 19:19:50 +030010623/* Returns the currently programmed mode of the given encoder. */
10624struct drm_display_mode *
10625intel_encoder_current_mode(struct intel_encoder *encoder)
Jesse Barnes79e53942008-11-07 14:24:08 -080010626{
Ville Syrjäläde330812017-10-09 19:19:50 +030010627 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10628 struct intel_crtc_state *crtc_state;
Jesse Barnes79e53942008-11-07 14:24:08 -080010629 struct drm_display_mode *mode;
Ville Syrjäläde330812017-10-09 19:19:50 +030010630 struct intel_crtc *crtc;
10631 enum pipe pipe;
10632
10633 if (!encoder->get_hw_state(encoder, &pipe))
10634 return NULL;
10635
10636 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Jesse Barnes79e53942008-11-07 14:24:08 -080010637
10638 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10639 if (!mode)
10640 return NULL;
10641
Ville Syrjäläde330812017-10-09 19:19:50 +030010642 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10643 if (!crtc_state) {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +000010644 kfree(mode);
10645 return NULL;
10646 }
10647
Ville Syrjäläde330812017-10-09 19:19:50 +030010648 crtc_state->base.crtc = &crtc->base;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010649
Ville Syrjäläde330812017-10-09 19:19:50 +030010650 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10651 kfree(crtc_state);
10652 kfree(mode);
10653 return NULL;
10654 }
Ville Syrjäläe30a1542016-04-01 18:37:25 +030010655
Ville Syrjäläde330812017-10-09 19:19:50 +030010656 encoder->get_config(encoder, crtc_state);
Ville Syrjäläe30a1542016-04-01 18:37:25 +030010657
Ville Syrjäläde330812017-10-09 19:19:50 +030010658 intel_mode_from_pipe_config(mode, crtc_state);
Jesse Barnes79e53942008-11-07 14:24:08 -080010659
Ville Syrjäläde330812017-10-09 19:19:50 +030010660 kfree(crtc_state);
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +000010661
Jesse Barnes79e53942008-11-07 14:24:08 -080010662 return mode;
10663}
10664
10665static void intel_crtc_destroy(struct drm_crtc *crtc)
10666{
10667 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10668
10669 drm_crtc_cleanup(crtc);
10670 kfree(intel_crtc);
10671}
10672
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010673/**
10674 * intel_wm_need_update - Check whether watermarks need updating
Chris Wilson6bf19812018-12-31 14:35:05 +000010675 * @cur: current plane state
10676 * @new: new plane state
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010677 *
10678 * Check current plane state versus the new one to determine whether
10679 * watermarks need to be recalculated.
10680 *
10681 * Returns true or false.
10682 */
Matt Ropercd1d3ee2018-12-10 13:54:14 -080010683static bool intel_wm_need_update(struct intel_plane_state *cur,
10684 struct intel_plane_state *new)
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010685{
Matt Roperd21fbe82015-09-24 15:53:12 -070010686 /* Update watermarks on tiling or size changes. */
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010687 if (new->base.visible != cur->base.visible)
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010688 return true;
10689
10690 if (!cur->base.fb || !new->base.fb)
10691 return false;
10692
Ville Syrjäläbae781b2016-11-16 13:33:16 +020010693 if (cur->base.fb->modifier != new->base.fb->modifier ||
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010694 cur->base.rotation != new->base.rotation ||
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010695 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10696 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10697 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10698 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010699 return true;
10700
10701 return false;
10702}
10703
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010704static bool needs_scaling(const struct intel_plane_state *state)
Matt Roperd21fbe82015-09-24 15:53:12 -070010705{
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010706 int src_w = drm_rect_width(&state->base.src) >> 16;
10707 int src_h = drm_rect_height(&state->base.src) >> 16;
10708 int dst_w = drm_rect_width(&state->base.dst);
10709 int dst_h = drm_rect_height(&state->base.dst);
Matt Roperd21fbe82015-09-24 15:53:12 -070010710
10711 return (src_w != dst_w || src_h != dst_h);
10712}
10713
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010714int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10715 struct drm_crtc_state *crtc_state,
10716 const struct intel_plane_state *old_plane_state,
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010717 struct drm_plane_state *plane_state)
10718{
Maarten Lankhorstab1d3a02015-11-19 16:07:14 +010010719 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010720 struct drm_crtc *crtc = crtc_state->crtc;
10721 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010722 struct intel_plane *plane = to_intel_plane(plane_state->plane);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010723 struct drm_device *dev = crtc->dev;
Matt Ropered4a6a72016-02-23 17:20:13 -080010724 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010725 bool mode_changed = needs_modeset(crtc_state);
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010726 bool was_crtc_enabled = old_crtc_state->base.active;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010727 bool is_crtc_enabled = crtc_state->active;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010728 bool turn_off, turn_on, visible, was_visible;
10729 struct drm_framebuffer *fb = plane_state->fb;
Ville Syrjälä78108b72016-05-27 20:59:19 +030010730 int ret;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010731
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010732 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010733 ret = skl_update_scaler_plane(
10734 to_intel_crtc_state(crtc_state),
10735 to_intel_plane_state(plane_state));
10736 if (ret)
10737 return ret;
10738 }
10739
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010740 was_visible = old_plane_state->base.visible;
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +010010741 visible = plane_state->visible;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010742
10743 if (!was_crtc_enabled && WARN_ON(was_visible))
10744 was_visible = false;
10745
Maarten Lankhorst35c08f42015-12-03 14:31:07 +010010746 /*
10747 * Visibility is calculated as if the crtc was on, but
10748 * after scaler setup everything depends on it being off
10749 * when the crtc isn't active.
Ville Syrjäläf818ffe2016-04-29 17:31:18 +030010750 *
10751 * FIXME this is wrong for watermarks. Watermarks should also
10752 * be computed as if the pipe would be active. Perhaps move
10753 * per-plane wm computation to the .check_plane() hook, and
10754 * only combine the results from all planes in the current place?
Maarten Lankhorst35c08f42015-12-03 14:31:07 +010010755 */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010756 if (!is_crtc_enabled) {
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +010010757 plane_state->visible = visible = false;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010758 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10759 }
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010760
10761 if (!was_visible && !visible)
10762 return 0;
10763
Maarten Lankhorste8861672016-02-24 11:24:26 +010010764 if (fb != old_plane_state->base.fb)
10765 pipe_config->fb_changed = true;
10766
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010767 turn_off = was_visible && (!visible || mode_changed);
10768 turn_on = visible && (!was_visible || mode_changed);
10769
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010770 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010771 intel_crtc->base.base.id, intel_crtc->base.name,
10772 plane->base.base.id, plane->base.name,
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010773 fb ? fb->base.id : -1);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010774
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010775 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010776 plane->base.base.id, plane->base.name,
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010777 was_visible, visible,
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010778 turn_off, turn_on, mode_changed);
10779
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010780 if (turn_on) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010781 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010782 pipe_config->update_wm_pre = true;
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010783
10784 /* must disable cxsr around plane enable/disable */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010785 if (plane->id != PLANE_CURSOR)
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010786 pipe_config->disable_cxsr = true;
10787 } else if (turn_off) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010788 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010789 pipe_config->update_wm_post = true;
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010790
Ville Syrjälä852eb002015-06-24 22:00:07 +030010791 /* must disable cxsr around plane enable/disable */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010792 if (plane->id != PLANE_CURSOR)
Maarten Lankhorstab1d3a02015-11-19 16:07:14 +010010793 pipe_config->disable_cxsr = true;
Matt Ropercd1d3ee2018-12-10 13:54:14 -080010794 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
10795 to_intel_plane_state(plane_state))) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010796 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010797 /* FIXME bollocks */
10798 pipe_config->update_wm_pre = true;
10799 pipe_config->update_wm_post = true;
10800 }
Ville Syrjälä852eb002015-06-24 22:00:07 +030010801 }
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010802
Rodrigo Vivi8be6ca82015-08-24 16:38:23 -070010803 if (visible || was_visible)
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010804 pipe_config->fb_bits |= plane->frontbuffer_bit;
Ville Syrjäläa9ff8712015-06-24 21:59:34 +030010805
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010806 /*
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010807 * ILK/SNB DVSACNTR/Sprite Enable
10808 * IVB SPR_CTL/Sprite Enable
10809 * "When in Self Refresh Big FIFO mode, a write to enable the
10810 * plane will be internally buffered and delayed while Big FIFO
10811 * mode is exiting."
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010812 *
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010813 * Which means that enabling the sprite can take an extra frame
10814 * when we start in big FIFO mode (LP1+). Thus we need to drop
10815 * down to LP0 and wait for vblank in order to make sure the
10816 * sprite gets enabled on the next vblank after the register write.
10817 * Doing otherwise would risk enabling the sprite one frame after
10818 * we've already signalled flip completion. We can resume LP1+
10819 * once the sprite has been enabled.
10820 *
10821 *
10822 * WaCxSRDisabledForSpriteScaling:ivb
10823 * IVB SPR_SCALE/Scaling Enable
10824 * "Low Power watermarks must be disabled for at least one
10825 * frame before enabling sprite scaling, and kept disabled
10826 * until sprite scaling is disabled."
10827 *
10828 * ILK/SNB DVSASCALE/Scaling Enable
10829 * "When in Self Refresh Big FIFO mode, scaling enable will be
10830 * masked off while Big FIFO mode is exiting."
10831 *
10832 * Despite the w/a only being listed for IVB we assume that
10833 * the ILK/SNB note has similar ramifications, hence we apply
10834 * the w/a on all three platforms.
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010835 */
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010836 if (plane->id == PLANE_SPRITE0 &&
Lucas De Marchif3ce44a2018-12-12 10:10:44 -080010837 (IS_GEN_RANGE(dev_priv, 5, 6) ||
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010838 IS_IVYBRIDGE(dev_priv)) &&
10839 (turn_on || (!needs_scaling(old_plane_state) &&
10840 needs_scaling(to_intel_plane_state(plane_state)))))
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010841 pipe_config->disable_lp_wm = true;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010842
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010843 return 0;
10844}
10845
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010846static bool encoders_cloneable(const struct intel_encoder *a,
10847 const struct intel_encoder *b)
10848{
10849 /* masks could be asymmetric, so check both ways */
10850 return a == b || (a->cloneable & (1 << b->type) &&
10851 b->cloneable & (1 << a->type));
10852}
10853
10854static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10855 struct intel_crtc *crtc,
10856 struct intel_encoder *encoder)
10857{
10858 struct intel_encoder *source_encoder;
10859 struct drm_connector *connector;
10860 struct drm_connector_state *connector_state;
10861 int i;
10862
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010010863 for_each_new_connector_in_state(state, connector, connector_state, i) {
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010864 if (connector_state->crtc != &crtc->base)
10865 continue;
10866
10867 source_encoder =
10868 to_intel_encoder(connector_state->best_encoder);
10869 if (!encoders_cloneable(encoder, source_encoder))
10870 return false;
10871 }
10872
10873 return true;
10874}
10875
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020010876static int icl_add_linked_planes(struct intel_atomic_state *state)
10877{
10878 struct intel_plane *plane, *linked;
10879 struct intel_plane_state *plane_state, *linked_plane_state;
10880 int i;
10881
10882 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10883 linked = plane_state->linked_plane;
10884
10885 if (!linked)
10886 continue;
10887
10888 linked_plane_state = intel_atomic_get_plane_state(state, linked);
10889 if (IS_ERR(linked_plane_state))
10890 return PTR_ERR(linked_plane_state);
10891
10892 WARN_ON(linked_plane_state->linked_plane != plane);
10893 WARN_ON(linked_plane_state->slave == plane_state->slave);
10894 }
10895
10896 return 0;
10897}
10898
10899static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
10900{
10901 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10902 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10903 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
10904 struct intel_plane *plane, *linked;
10905 struct intel_plane_state *plane_state;
10906 int i;
10907
10908 if (INTEL_GEN(dev_priv) < 11)
10909 return 0;
10910
10911 /*
10912 * Destroy all old plane links and make the slave plane invisible
10913 * in the crtc_state->active_planes mask.
10914 */
10915 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10916 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
10917 continue;
10918
10919 plane_state->linked_plane = NULL;
Ville Syrjäläafbd8a72018-11-27 18:37:42 +020010920 if (plane_state->slave && !plane_state->base.visible) {
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020010921 crtc_state->active_planes &= ~BIT(plane->id);
Ville Syrjäläafbd8a72018-11-27 18:37:42 +020010922 crtc_state->update_planes |= BIT(plane->id);
10923 }
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020010924
10925 plane_state->slave = false;
10926 }
10927
10928 if (!crtc_state->nv12_planes)
10929 return 0;
10930
10931 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10932 struct intel_plane_state *linked_state = NULL;
10933
10934 if (plane->pipe != crtc->pipe ||
10935 !(crtc_state->nv12_planes & BIT(plane->id)))
10936 continue;
10937
10938 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
10939 if (!icl_is_nv12_y_plane(linked->id))
10940 continue;
10941
10942 if (crtc_state->active_planes & BIT(linked->id))
10943 continue;
10944
10945 linked_state = intel_atomic_get_plane_state(state, linked);
10946 if (IS_ERR(linked_state))
10947 return PTR_ERR(linked_state);
10948
10949 break;
10950 }
10951
10952 if (!linked_state) {
10953 DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
10954 hweight8(crtc_state->nv12_planes));
10955
10956 return -EINVAL;
10957 }
10958
10959 plane_state->linked_plane = linked;
10960
10961 linked_state->slave = true;
10962 linked_state->linked_plane = plane;
10963 crtc_state->active_planes |= BIT(linked->id);
Ville Syrjäläafbd8a72018-11-27 18:37:42 +020010964 crtc_state->update_planes |= BIT(linked->id);
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020010965 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
10966 }
10967
10968 return 0;
10969}
10970
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010971static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10972 struct drm_crtc_state *crtc_state)
10973{
Matt Ropercd1d3ee2018-12-10 13:54:14 -080010974 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010975 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +020010976 struct intel_crtc_state *pipe_config =
10977 to_intel_crtc_state(crtc_state);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020010978 int ret;
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010979 bool mode_changed = needs_modeset(crtc_state);
10980
Ville Syrjälä852eb002015-06-24 22:00:07 +030010981 if (mode_changed && !crtc_state->active)
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010982 pipe_config->update_wm_post = true;
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020010983
Maarten Lankhorstad421372015-06-15 12:33:42 +020010984 if (mode_changed && crtc_state->enable &&
10985 dev_priv->display.crtc_compute_clock &&
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020010986 !WARN_ON(pipe_config->shared_dpll)) {
Maarten Lankhorstad421372015-06-15 12:33:42 +020010987 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10988 pipe_config);
10989 if (ret)
10990 return ret;
10991 }
10992
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000010993 if (crtc_state->color_mgmt_changed) {
Matt Roper302da0c2018-12-10 13:54:15 -080010994 ret = intel_color_check(pipe_config);
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000010995 if (ret)
10996 return ret;
Lionel Landwerline7852a42016-05-25 14:30:41 +010010997
10998 /*
10999 * Changing color management on Intel hardware is
11000 * handled as part of planes update.
11001 */
11002 crtc_state->planes_changed = true;
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000011003 }
11004
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020011005 ret = 0;
Matt Roper86c8bbb2015-09-24 15:53:16 -070011006 if (dev_priv->display.compute_pipe_wm) {
Maarten Lankhorste3bddde2016-03-01 11:07:22 +010011007 ret = dev_priv->display.compute_pipe_wm(pipe_config);
Matt Ropered4a6a72016-02-23 17:20:13 -080011008 if (ret) {
11009 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
Matt Roper86c8bbb2015-09-24 15:53:16 -070011010 return ret;
Matt Ropered4a6a72016-02-23 17:20:13 -080011011 }
11012 }
11013
Ville Syrjäläf255c622018-11-08 17:10:13 +020011014 if (dev_priv->display.compute_intermediate_wm) {
Matt Ropered4a6a72016-02-23 17:20:13 -080011015 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11016 return 0;
11017
11018 /*
11019 * Calculate 'intermediate' watermarks that satisfy both the
11020 * old state and the new state. We can program these
11021 * immediately.
11022 */
Matt Ropercd1d3ee2018-12-10 13:54:14 -080011023 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
Matt Ropered4a6a72016-02-23 17:20:13 -080011024 if (ret) {
11025 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11026 return ret;
11027 }
Matt Roper86c8bbb2015-09-24 15:53:16 -070011028 }
11029
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011030 if (INTEL_GEN(dev_priv) >= 9) {
Hans de Goede2c5c4152018-12-17 15:19:03 +010011031 if (mode_changed || pipe_config->update_pipe)
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020011032 ret = skl_update_scaler_crtc(pipe_config);
11033
11034 if (!ret)
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020011035 ret = icl_check_nv12_planes(pipe_config);
11036 if (!ret)
Mahesh Kumar73b0ca82017-05-26 20:45:46 +053011037 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11038 pipe_config);
11039 if (!ret)
Ander Conselvan de Oliveira6ebc6922017-02-23 09:15:59 +020011040 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020011041 pipe_config);
11042 }
11043
Maarten Lankhorst24f28452017-11-22 19:39:01 +010011044 if (HAS_IPS(dev_priv))
11045 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11046
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020011047 return ret;
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020011048}
11049
Jani Nikula65b38e02015-04-13 11:26:56 +030011050static const struct drm_crtc_helper_funcs intel_helper_funcs = {
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020011051 .atomic_check = intel_crtc_atomic_check,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010011052};
11053
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011054static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11055{
11056 struct intel_connector *connector;
Daniel Vetterf9e905c2017-03-01 10:52:25 +010011057 struct drm_connector_list_iter conn_iter;
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011058
Daniel Vetterf9e905c2017-03-01 10:52:25 +010011059 drm_connector_list_iter_begin(dev, &conn_iter);
11060 for_each_intel_connector_iter(connector, &conn_iter) {
Daniel Vetter8863dc72016-05-06 15:39:03 +020011061 if (connector->base.state->crtc)
Thomas Zimmermannef196b52018-06-18 13:01:50 +020011062 drm_connector_put(&connector->base);
Daniel Vetter8863dc72016-05-06 15:39:03 +020011063
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011064 if (connector->base.encoder) {
11065 connector->base.state->best_encoder =
11066 connector->base.encoder;
11067 connector->base.state->crtc =
11068 connector->base.encoder->crtc;
Daniel Vetter8863dc72016-05-06 15:39:03 +020011069
Thomas Zimmermannef196b52018-06-18 13:01:50 +020011070 drm_connector_get(&connector->base);
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011071 } else {
11072 connector->base.state->best_encoder = NULL;
11073 connector->base.state->crtc = NULL;
11074 }
11075 }
Daniel Vetterf9e905c2017-03-01 10:52:25 +010011076 drm_connector_list_iter_end(&conn_iter);
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011077}
11078
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011079static int
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011080compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11081 struct intel_crtc_state *pipe_config)
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011082{
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011083 struct drm_connector *connector = conn_state->connector;
11084 const struct drm_display_info *info = &connector->display_info;
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011085 int bpp;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011086
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011087 switch (conn_state->max_bpc) {
11088 case 6 ... 7:
11089 bpp = 6 * 3;
11090 break;
11091 case 8 ... 9:
11092 bpp = 8 * 3;
11093 break;
11094 case 10 ... 11:
11095 bpp = 10 * 3;
11096 break;
11097 case 12:
11098 bpp = 12 * 3;
11099 break;
11100 default:
11101 return -EINVAL;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011102 }
11103
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011104 if (bpp < pipe_config->pipe_bpp) {
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011105 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11106 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11107 connector->base.id, connector->name,
11108 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011109 pipe_config->pipe_bpp);
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011110
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011111 pipe_config->pipe_bpp = bpp;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011112 }
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011113
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011114 return 0;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011115}
11116
11117static int
11118compute_baseline_pipe_bpp(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020011119 struct intel_crtc_state *pipe_config)
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011120{
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011121 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011122 struct drm_atomic_state *state = pipe_config->base.state;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011123 struct drm_connector *connector;
11124 struct drm_connector_state *connector_state;
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020011125 int bpp, i;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011126
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011127 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11128 IS_CHERRYVIEW(dev_priv)))
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011129 bpp = 10*3;
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011130 else if (INTEL_GEN(dev_priv) >= 5)
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011131 bpp = 12*3;
11132 else
11133 bpp = 8*3;
11134
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011135 pipe_config->pipe_bpp = bpp;
11136
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011137 /* Clamp display bpp to connector max bpp */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011138 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011139 int ret;
11140
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011141 if (connector_state->crtc != &crtc->base)
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020011142 continue;
11143
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011144 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11145 if (ret)
11146 return ret;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011147 }
11148
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011149 return 0;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011150}
11151
Daniel Vetter644db712013-09-19 14:53:58 +020011152static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11153{
11154 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11155 "type: 0x%x flags: 0x%x\n",
Damien Lespiau13428302013-09-25 16:45:36 +010011156 mode->crtc_clock,
Daniel Vetter644db712013-09-19 14:53:58 +020011157 mode->crtc_hdisplay, mode->crtc_hsync_start,
11158 mode->crtc_hsync_end, mode->crtc_htotal,
11159 mode->crtc_vdisplay, mode->crtc_vsync_start,
11160 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11161}
11162
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011163static inline void
11164intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011165 unsigned int lane_count, struct intel_link_m_n *m_n)
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011166{
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011167 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11168 id, lane_count,
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011169 m_n->gmch_m, m_n->gmch_n,
11170 m_n->link_m, m_n->link_n, m_n->tu);
11171}
11172
Ville Syrjälä40b2be42017-10-10 15:11:59 +030011173#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11174
11175static const char * const output_type_str[] = {
11176 OUTPUT_TYPE(UNUSED),
11177 OUTPUT_TYPE(ANALOG),
11178 OUTPUT_TYPE(DVO),
11179 OUTPUT_TYPE(SDVO),
11180 OUTPUT_TYPE(LVDS),
11181 OUTPUT_TYPE(TVOUT),
11182 OUTPUT_TYPE(HDMI),
11183 OUTPUT_TYPE(DP),
11184 OUTPUT_TYPE(EDP),
11185 OUTPUT_TYPE(DSI),
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011186 OUTPUT_TYPE(DDI),
Ville Syrjälä40b2be42017-10-10 15:11:59 +030011187 OUTPUT_TYPE(DP_MST),
11188};
11189
11190#undef OUTPUT_TYPE
11191
11192static void snprintf_output_types(char *buf, size_t len,
11193 unsigned int output_types)
11194{
11195 char *str = buf;
11196 int i;
11197
11198 str[0] = '\0';
11199
11200 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11201 int r;
11202
11203 if ((output_types & BIT(i)) == 0)
11204 continue;
11205
11206 r = snprintf(str, len, "%s%s",
11207 str != buf ? "," : "", output_type_str[i]);
11208 if (r >= len)
11209 break;
11210 str += r;
11211 len -= r;
11212
11213 output_types &= ~BIT(i);
11214 }
11215
11216 WARN_ON_ONCE(output_types != 0);
11217}
11218
Shashank Sharmad9facae2018-10-12 11:53:07 +053011219static const char * const output_format_str[] = {
11220 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11221 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
Shashank Sharma33b7f3e2018-10-12 11:53:08 +053011222 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
Shashank Sharma8c79f842018-10-12 11:53:09 +053011223 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
Shashank Sharmad9facae2018-10-12 11:53:07 +053011224};
11225
11226static const char *output_formats(enum intel_output_format format)
11227{
Shashank Sharma33b7f3e2018-10-12 11:53:08 +053011228 if (format >= ARRAY_SIZE(output_format_str))
Shashank Sharmad9facae2018-10-12 11:53:07 +053011229 format = INTEL_OUTPUT_FORMAT_INVALID;
11230 return output_format_str[format];
11231}
11232
Daniel Vetterc0b03412013-05-28 12:05:54 +020011233static void intel_dump_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020011234 struct intel_crtc_state *pipe_config,
Daniel Vetterc0b03412013-05-28 12:05:54 +020011235 const char *context)
11236{
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011237 struct drm_device *dev = crtc->base.dev;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010011238 struct drm_i915_private *dev_priv = to_i915(dev);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011239 struct drm_plane *plane;
11240 struct intel_plane *intel_plane;
11241 struct intel_plane_state *state;
11242 struct drm_framebuffer *fb;
Ville Syrjälä40b2be42017-10-10 15:11:59 +030011243 char buf[64];
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011244
Tvrtko Ursulin66766e42016-11-17 12:30:10 +000011245 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11246 crtc->base.base.id, crtc->base.name, context);
Daniel Vetterc0b03412013-05-28 12:05:54 +020011247
Ville Syrjälä40b2be42017-10-10 15:11:59 +030011248 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11249 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11250 buf, pipe_config->output_types);
11251
Shashank Sharmad9facae2018-10-12 11:53:07 +053011252 DRM_DEBUG_KMS("output format: %s\n",
11253 output_formats(pipe_config->output_format));
11254
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011255 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11256 transcoder_name(pipe_config->cpu_transcoder),
Daniel Vetterc0b03412013-05-28 12:05:54 +020011257 pipe_config->pipe_bpp, pipe_config->dither);
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011258
11259 if (pipe_config->has_pch_encoder)
11260 intel_dump_m_n_config(pipe_config, "fdi",
11261 pipe_config->fdi_lanes,
11262 &pipe_config->fdi_m_n);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011263
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011264 if (intel_crtc_has_dp_encoder(pipe_config)) {
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011265 intel_dump_m_n_config(pipe_config, "dp m_n",
11266 pipe_config->lane_count, &pipe_config->dp_m_n);
Tvrtko Ursulind806e682016-11-17 15:44:09 +000011267 if (pipe_config->has_drrs)
11268 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11269 pipe_config->lane_count,
11270 &pipe_config->dp_m2_n2);
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011271 }
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011272
Daniel Vetter55072d12014-11-20 16:10:28 +010011273 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011274 pipe_config->has_audio, pipe_config->has_infoframe);
Daniel Vetter55072d12014-11-20 16:10:28 +010011275
Daniel Vetterc0b03412013-05-28 12:05:54 +020011276 DRM_DEBUG_KMS("requested mode:\n");
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011277 drm_mode_debug_printmodeline(&pipe_config->base.mode);
Daniel Vetterc0b03412013-05-28 12:05:54 +020011278 DRM_DEBUG_KMS("adjusted mode:\n");
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011279 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11280 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011281 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011282 pipe_config->port_clock,
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011283 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11284 pipe_config->pixel_rate);
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011285
11286 if (INTEL_GEN(dev_priv) >= 9)
11287 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11288 crtc->num_scalers,
11289 pipe_config->scaler_state.scaler_users,
11290 pipe_config->scaler_state.scaler_id);
Tvrtko Ursulina74f8372016-11-17 12:30:13 +000011291
11292 if (HAS_GMCH_DISPLAY(dev_priv))
11293 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11294 pipe_config->gmch_pfit.control,
11295 pipe_config->gmch_pfit.pgm_ratios,
11296 pipe_config->gmch_pfit.lvds_border_bits);
11297 else
11298 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11299 pipe_config->pch_pfit.pos,
11300 pipe_config->pch_pfit.size,
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000011301 enableddisabled(pipe_config->pch_pfit.enabled));
Tvrtko Ursulina74f8372016-11-17 12:30:13 +000011302
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011303 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11304 pipe_config->ips_enabled, pipe_config->double_wide);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011305
Ander Conselvan de Oliveiraf50b79f2016-12-29 17:22:12 +020011306 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
Tvrtko Ursulin415ff0f2015-05-14 13:38:31 +010011307
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011308 DRM_DEBUG_KMS("planes on this crtc\n");
11309 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
Eric Engestromb3c11ac2016-11-12 01:12:56 +000011310 struct drm_format_name_buf format_name;
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011311 intel_plane = to_intel_plane(plane);
11312 if (intel_plane->pipe != crtc->pipe)
11313 continue;
11314
11315 state = to_intel_plane_state(plane->state);
11316 fb = state->base.fb;
11317 if (!fb) {
Ville Syrjälä1d577e02016-05-27 20:59:25 +030011318 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11319 plane->base.id, plane->name, state->scaler_id);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011320 continue;
11321 }
11322
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011323 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11324 plane->base.id, plane->name,
Eric Engestromb3c11ac2016-11-12 01:12:56 +000011325 fb->base.id, fb->width, fb->height,
Ville Syrjälä438b74a2016-12-14 23:32:55 +020011326 drm_get_format_name(fb->format->format, &format_name));
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011327 if (INTEL_GEN(dev_priv) >= 9)
11328 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11329 state->scaler_id,
11330 state->base.src.x1 >> 16,
11331 state->base.src.y1 >> 16,
11332 drm_rect_width(&state->base.src) >> 16,
11333 drm_rect_height(&state->base.src) >> 16,
11334 state->base.dst.x1, state->base.dst.y1,
11335 drm_rect_width(&state->base.dst),
11336 drm_rect_height(&state->base.dst));
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011337 }
Daniel Vetterc0b03412013-05-28 12:05:54 +020011338}
11339
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011340static bool check_digital_port_conflicts(struct drm_atomic_state *state)
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011341{
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011342 struct drm_device *dev = state->dev;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011343 struct drm_connector *connector;
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011344 struct drm_connector_list_iter conn_iter;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011345 unsigned int used_ports = 0;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011346 unsigned int used_mst_ports = 0;
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011347 bool ret = true;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011348
11349 /*
11350 * Walk the connector list instead of the encoder
11351 * list to detect the problem on ddi platforms
11352 * where there's just one encoder per digital port.
11353 */
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011354 drm_connector_list_iter_begin(dev, &conn_iter);
11355 drm_for_each_connector_iter(connector, &conn_iter) {
Ville Syrjälä0bff4852015-12-10 18:22:31 +020011356 struct drm_connector_state *connector_state;
11357 struct intel_encoder *encoder;
11358
Maarten Lankhorst8b694492018-04-09 14:46:55 +020011359 connector_state = drm_atomic_get_new_connector_state(state, connector);
Ville Syrjälä0bff4852015-12-10 18:22:31 +020011360 if (!connector_state)
11361 connector_state = connector->state;
11362
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011363 if (!connector_state->best_encoder)
11364 continue;
11365
11366 encoder = to_intel_encoder(connector_state->best_encoder);
11367
11368 WARN_ON(!connector_state->crtc);
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011369
11370 switch (encoder->type) {
11371 unsigned int port_mask;
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011372 case INTEL_OUTPUT_DDI:
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010011373 if (WARN_ON(!HAS_DDI(to_i915(dev))))
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011374 break;
Gustavo A. R. Silvaf0d759f2018-06-28 17:35:41 -050011375 /* else: fall through */
Ville Syrjäläcca05022016-06-22 21:57:06 +030011376 case INTEL_OUTPUT_DP:
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011377 case INTEL_OUTPUT_HDMI:
11378 case INTEL_OUTPUT_EDP:
Ville Syrjälä8f4f2792017-11-09 17:24:34 +020011379 port_mask = 1 << encoder->port;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011380
11381 /* the same port mustn't appear more than once */
11382 if (used_ports & port_mask)
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011383 ret = false;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011384
11385 used_ports |= port_mask;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011386 break;
11387 case INTEL_OUTPUT_DP_MST:
11388 used_mst_ports |=
Ville Syrjälä8f4f2792017-11-09 17:24:34 +020011389 1 << encoder->port;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011390 break;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011391 default:
11392 break;
11393 }
11394 }
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011395 drm_connector_list_iter_end(&conn_iter);
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011396
Ville Syrjälä477321e2016-07-28 17:50:40 +030011397 /* can't mix MST and SST/HDMI on the same port */
11398 if (used_ports & used_mst_ports)
11399 return false;
11400
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011401 return ret;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011402}
11403
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011404static void
11405clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11406{
Ville Syrjäläff32c542017-03-02 19:14:57 +020011407 struct drm_i915_private *dev_priv =
11408 to_i915(crtc_state->base.crtc->dev);
Chandra Konduru663a3642015-04-07 15:28:41 -070011409 struct intel_crtc_scaler_state scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011410 struct intel_dpll_hw_state dpll_hw_state;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011411 struct intel_shared_dpll *shared_dpll;
Ville Syrjäläff32c542017-03-02 19:14:57 +020011412 struct intel_crtc_wm_state wm_state;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011413 bool force_thru, ips_force_disable;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011414
Ander Conselvan de Oliveira7546a382015-05-20 09:03:27 +030011415 /* FIXME: before the switch to atomic started, a new pipe_config was
11416 * kzalloc'd. Code that depends on any field being zero should be
11417 * fixed, so that the crtc_state can be safely duplicated. For now,
11418 * only fields that are know to not cause problems are preserved. */
11419
Chandra Konduru663a3642015-04-07 15:28:41 -070011420 scaler_state = crtc_state->scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011421 shared_dpll = crtc_state->shared_dpll;
11422 dpll_hw_state = crtc_state->dpll_hw_state;
Maarten Lankhorstc4e2d042015-08-05 12:36:59 +020011423 force_thru = crtc_state->pch_pfit.force_thru;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011424 ips_force_disable = crtc_state->ips_force_disable;
Ville Syrjälä04548cb2017-04-21 21:14:29 +030011425 if (IS_G4X(dev_priv) ||
11426 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjäläff32c542017-03-02 19:14:57 +020011427 wm_state = crtc_state->wm;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011428
Chris Wilsond2fa80a2017-03-03 15:46:44 +000011429 /* Keep base drm_crtc_state intact, only clear our extended struct */
11430 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11431 memset(&crtc_state->base + 1, 0,
11432 sizeof(*crtc_state) - sizeof(crtc_state->base));
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011433
Chandra Konduru663a3642015-04-07 15:28:41 -070011434 crtc_state->scaler_state = scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011435 crtc_state->shared_dpll = shared_dpll;
11436 crtc_state->dpll_hw_state = dpll_hw_state;
Maarten Lankhorstc4e2d042015-08-05 12:36:59 +020011437 crtc_state->pch_pfit.force_thru = force_thru;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011438 crtc_state->ips_force_disable = ips_force_disable;
Ville Syrjälä04548cb2017-04-21 21:14:29 +030011439 if (IS_G4X(dev_priv) ||
11440 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjäläff32c542017-03-02 19:14:57 +020011441 crtc_state->wm = wm_state;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011442}
11443
Ander Conselvan de Oliveira548ee152015-04-21 17:13:02 +030011444static int
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010011445intel_modeset_pipe_config(struct drm_crtc *crtc,
Maarten Lankhorstb3592832015-06-15 12:33:38 +020011446 struct intel_crtc_state *pipe_config)
Daniel Vetter7758a112012-07-08 19:40:39 +020011447{
Maarten Lankhorstb3592832015-06-15 12:33:38 +020011448 struct drm_atomic_state *state = pipe_config->base.state;
Daniel Vetter7758a112012-07-08 19:40:39 +020011449 struct intel_encoder *encoder;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011450 struct drm_connector *connector;
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011451 struct drm_connector_state *connector_state;
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011452 int base_bpp, ret;
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011453 int i;
Daniel Vettere29c22c2013-02-21 00:00:16 +010011454 bool retry = true;
Daniel Vetter7758a112012-07-08 19:40:39 +020011455
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011456 clear_intel_crtc_state(pipe_config);
Daniel Vetter7758a112012-07-08 19:40:39 +020011457
Daniel Vettere143a212013-07-04 12:01:15 +020011458 pipe_config->cpu_transcoder =
11459 (enum transcoder) to_intel_crtc(crtc)->pipe;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010011460
Imre Deak2960bc92013-07-30 13:36:32 +030011461 /*
11462 * Sanitize sync polarity flags based on requested ones. If neither
11463 * positive or negative polarity is requested, treat this as meaning
11464 * negative polarity.
11465 */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011466 if (!(pipe_config->base.adjusted_mode.flags &
Imre Deak2960bc92013-07-30 13:36:32 +030011467 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011468 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
Imre Deak2960bc92013-07-30 13:36:32 +030011469
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011470 if (!(pipe_config->base.adjusted_mode.flags &
Imre Deak2960bc92013-07-30 13:36:32 +030011471 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011472 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
Imre Deak2960bc92013-07-30 13:36:32 +030011473
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011474 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11475 pipe_config);
11476 if (ret)
11477 return ret;
11478
11479 base_bpp = pipe_config->pipe_bpp;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011480
Ville Syrjäläe41a56b2013-10-01 22:52:14 +030011481 /*
11482 * Determine the real pipe dimensions. Note that stereo modes can
11483 * increase the actual pipe size due to the frame doubling and
11484 * insertion of additional space for blanks between the frame. This
11485 * is stored in the crtc timings. We use the requested mode to do this
11486 * computation to clearly distinguish it from the adjusted mode, which
11487 * can be changed by the connectors in the below retry loop.
11488 */
Daniel Vetter196cd5d2017-01-25 07:26:56 +010011489 drm_mode_get_hv_timing(&pipe_config->base.mode,
Gustavo Padovanecb7e162014-12-01 15:40:09 -080011490 &pipe_config->pipe_src_w,
11491 &pipe_config->pipe_src_h);
Ville Syrjäläe41a56b2013-10-01 22:52:14 +030011492
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011493 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011494 if (connector_state->crtc != crtc)
11495 continue;
11496
11497 encoder = to_intel_encoder(connector_state->best_encoder);
11498
Ville Syrjäläe25148d2016-06-22 21:57:09 +030011499 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11500 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011501 return -EINVAL;
Ville Syrjäläe25148d2016-06-22 21:57:09 +030011502 }
11503
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011504 /*
11505 * Determine output_types before calling the .compute_config()
11506 * hooks so that the hooks can use this information safely.
11507 */
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011508 if (encoder->compute_output_type)
11509 pipe_config->output_types |=
11510 BIT(encoder->compute_output_type(encoder, pipe_config,
11511 connector_state));
11512 else
11513 pipe_config->output_types |= BIT(encoder->type);
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011514 }
11515
Daniel Vettere29c22c2013-02-21 00:00:16 +010011516encoder_retry:
Daniel Vetteref1b4602013-06-01 17:17:04 +020011517 /* Ensure the port clock defaults are reset when retrying. */
Daniel Vetterff9a6752013-06-01 17:16:21 +020011518 pipe_config->port_clock = 0;
Daniel Vetteref1b4602013-06-01 17:17:04 +020011519 pipe_config->pixel_multiplier = 1;
Daniel Vetterff9a6752013-06-01 17:16:21 +020011520
Daniel Vetter135c81b2013-07-21 21:37:09 +020011521 /* Fill in default crtc timings, allow encoders to overwrite them. */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011522 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11523 CRTC_STEREO_DOUBLE);
Daniel Vetter135c81b2013-07-21 21:37:09 +020011524
Daniel Vetter7758a112012-07-08 19:40:39 +020011525 /* Pass our mode to the connectors and the CRTC to give them a chance to
11526 * adjust it according to limitations or connector properties, and also
11527 * a chance to reject the mode entirely.
11528 */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011529 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011530 if (connector_state->crtc != crtc)
11531 continue;
11532
11533 encoder = to_intel_encoder(connector_state->best_encoder);
Lyude Paul96550552019-01-15 15:08:00 -050011534 ret = encoder->compute_config(encoder, pipe_config,
11535 connector_state);
11536 if (ret < 0) {
11537 if (ret != -EDEADLK)
11538 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11539 ret);
11540 return ret;
Daniel Vetter7758a112012-07-08 19:40:39 +020011541 }
11542 }
11543
Daniel Vetterff9a6752013-06-01 17:16:21 +020011544 /* Set default port clock if not overwritten by the encoder. Needs to be
11545 * done afterwards in case the encoder adjusts the mode. */
11546 if (!pipe_config->port_clock)
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011547 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
Damien Lespiau241bfc32013-09-25 16:45:37 +010011548 * pipe_config->pixel_multiplier;
Daniel Vetterff9a6752013-06-01 17:16:21 +020011549
Daniel Vettera43f6e02013-06-07 23:10:32 +020011550 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
Ville Syrjälä8e2b4df2018-11-07 23:35:20 +020011551 if (ret == -EDEADLK)
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011552 return ret;
Daniel Vettere29c22c2013-02-21 00:00:16 +010011553 if (ret < 0) {
Daniel Vetter7758a112012-07-08 19:40:39 +020011554 DRM_DEBUG_KMS("CRTC fixup failed\n");
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011555 return ret;
Daniel Vetter7758a112012-07-08 19:40:39 +020011556 }
Daniel Vettere29c22c2013-02-21 00:00:16 +010011557
11558 if (ret == RETRY) {
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011559 if (WARN(!retry, "loop in pipe configuration computation\n"))
11560 return -EINVAL;
Daniel Vettere29c22c2013-02-21 00:00:16 +010011561
11562 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11563 retry = false;
11564 goto encoder_retry;
11565 }
11566
Daniel Vettere8fa4272015-08-12 11:43:34 +020011567 /* Dithering seems to not pass-through bits correctly when it should, so
Manasi Navare611032b2017-01-24 08:21:49 -080011568 * only enable it on 6bpc panels and when its not a compliance
11569 * test requesting 6bpc video pattern.
11570 */
11571 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11572 !pipe_config->dither_force_disable;
Daniel Vetter62f0ace2015-08-26 18:57:26 +020011573 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011574 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011575
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011576 return 0;
Daniel Vetter7758a112012-07-08 19:40:39 +020011577}
11578
Ville Syrjälä3bd26262013-09-06 23:29:02 +030011579static bool intel_fuzzy_clock_check(int clock1, int clock2)
Jesse Barnesf1f644d2013-06-27 00:39:25 +030011580{
Ville Syrjälä3bd26262013-09-06 23:29:02 +030011581 int diff;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030011582
11583 if (clock1 == clock2)
11584 return true;
11585
11586 if (!clock1 || !clock2)
11587 return false;
11588
11589 diff = abs(clock1 - clock2);
11590
11591 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11592 return true;
11593
11594 return false;
11595}
11596
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011597static bool
11598intel_compare_m_n(unsigned int m, unsigned int n,
11599 unsigned int m2, unsigned int n2,
11600 bool exact)
11601{
11602 if (m == m2 && n == n2)
11603 return true;
11604
11605 if (exact || !m || !n || !m2 || !n2)
11606 return false;
11607
11608 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11609
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011610 if (n > n2) {
11611 while (n > n2) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011612 m2 <<= 1;
11613 n2 <<= 1;
11614 }
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011615 } else if (n < n2) {
11616 while (n < n2) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011617 m <<= 1;
11618 n <<= 1;
11619 }
11620 }
11621
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011622 if (n != n2)
11623 return false;
11624
11625 return intel_fuzzy_clock_check(m, m2);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011626}
11627
11628static bool
11629intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11630 struct intel_link_m_n *m2_n2,
11631 bool adjust)
11632{
11633 if (m_n->tu == m2_n2->tu &&
11634 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11635 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11636 intel_compare_m_n(m_n->link_m, m_n->link_n,
11637 m2_n2->link_m, m2_n2->link_n, !adjust)) {
11638 if (adjust)
11639 *m2_n2 = *m_n;
11640
11641 return true;
11642 }
11643
11644 return false;
11645}
11646
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011647static void __printf(3, 4)
11648pipe_config_err(bool adjust, const char *name, const char *format, ...)
11649{
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011650 struct va_format vaf;
11651 va_list args;
11652
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011653 va_start(args, format);
11654 vaf.fmt = format;
11655 vaf.va = &args;
11656
Joe Perches99a95482018-03-13 15:02:15 -070011657 if (adjust)
11658 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11659 else
11660 drm_err("mismatch in %s %pV", name, &vaf);
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011661
11662 va_end(args);
11663}
11664
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011665static bool
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011666intel_pipe_config_compare(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020011667 struct intel_crtc_state *current_config,
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011668 struct intel_crtc_state *pipe_config,
11669 bool adjust)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011670{
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011671 bool ret = true;
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011672 bool fixup_inherited = adjust &&
11673 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11674 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011675
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011676#define PIPE_CONF_CHECK_X(name) do { \
Daniel Vetter66e985c2013-06-05 13:34:20 +020011677 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011678 pipe_config_err(adjust, __stringify(name), \
Daniel Vetter66e985c2013-06-05 13:34:20 +020011679 "(expected 0x%08x, found 0x%08x)\n", \
11680 current_config->name, \
11681 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011682 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011683 } \
11684} while (0)
Daniel Vetter66e985c2013-06-05 13:34:20 +020011685
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011686#define PIPE_CONF_CHECK_I(name) do { \
Daniel Vetter08a24032013-04-19 11:25:34 +020011687 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011688 pipe_config_err(adjust, __stringify(name), \
Daniel Vetter08a24032013-04-19 11:25:34 +020011689 "(expected %i, found %i)\n", \
11690 current_config->name, \
11691 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011692 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011693 } \
11694} while (0)
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011695
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011696#define PIPE_CONF_CHECK_BOOL(name) do { \
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011697 if (current_config->name != pipe_config->name) { \
11698 pipe_config_err(adjust, __stringify(name), \
11699 "(expected %s, found %s)\n", \
11700 yesno(current_config->name), \
11701 yesno(pipe_config->name)); \
11702 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011703 } \
11704} while (0)
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011705
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011706/*
11707 * Checks state where we only read out the enabling, but not the entire
11708 * state itself (like full infoframes or ELD for audio). These states
11709 * require a full modeset on bootup to fix up.
11710 */
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011711#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011712 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11713 PIPE_CONF_CHECK_BOOL(name); \
11714 } else { \
11715 pipe_config_err(adjust, __stringify(name), \
11716 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11717 yesno(current_config->name), \
11718 yesno(pipe_config->name)); \
11719 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011720 } \
11721} while (0)
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011722
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011723#define PIPE_CONF_CHECK_P(name) do { \
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011724 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011725 pipe_config_err(adjust, __stringify(name), \
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011726 "(expected %p, found %p)\n", \
11727 current_config->name, \
11728 pipe_config->name); \
11729 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011730 } \
11731} while (0)
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011732
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011733#define PIPE_CONF_CHECK_M_N(name) do { \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011734 if (!intel_compare_link_m_n(&current_config->name, \
11735 &pipe_config->name,\
11736 adjust)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011737 pipe_config_err(adjust, __stringify(name), \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011738 "(expected tu %i gmch %i/%i link %i/%i, " \
11739 "found tu %i, gmch %i/%i link %i/%i)\n", \
11740 current_config->name.tu, \
11741 current_config->name.gmch_m, \
11742 current_config->name.gmch_n, \
11743 current_config->name.link_m, \
11744 current_config->name.link_n, \
11745 pipe_config->name.tu, \
11746 pipe_config->name.gmch_m, \
11747 pipe_config->name.gmch_n, \
11748 pipe_config->name.link_m, \
11749 pipe_config->name.link_n); \
11750 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011751 } \
11752} while (0)
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011753
Daniel Vetter55c561a2016-03-30 11:34:36 +020011754/* This is required for BDW+ where there is only one set of registers for
11755 * switching between high and low RR.
11756 * This macro can be used whenever a comparison has to be made between one
11757 * hw state and multiple sw state variables.
11758 */
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011759#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011760 if (!intel_compare_link_m_n(&current_config->name, \
11761 &pipe_config->name, adjust) && \
11762 !intel_compare_link_m_n(&current_config->alt_name, \
11763 &pipe_config->name, adjust)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011764 pipe_config_err(adjust, __stringify(name), \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011765 "(expected tu %i gmch %i/%i link %i/%i, " \
11766 "or tu %i gmch %i/%i link %i/%i, " \
11767 "found tu %i, gmch %i/%i link %i/%i)\n", \
11768 current_config->name.tu, \
11769 current_config->name.gmch_m, \
11770 current_config->name.gmch_n, \
11771 current_config->name.link_m, \
11772 current_config->name.link_n, \
11773 current_config->alt_name.tu, \
11774 current_config->alt_name.gmch_m, \
11775 current_config->alt_name.gmch_n, \
11776 current_config->alt_name.link_m, \
11777 current_config->alt_name.link_n, \
11778 pipe_config->name.tu, \
11779 pipe_config->name.gmch_m, \
11780 pipe_config->name.gmch_n, \
11781 pipe_config->name.link_m, \
11782 pipe_config->name.link_n); \
11783 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011784 } \
11785} while (0)
Daniel Vetter88adfff2013-03-28 10:42:01 +010011786
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011787#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011788 if ((current_config->name ^ pipe_config->name) & (mask)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011789 pipe_config_err(adjust, __stringify(name), \
11790 "(%x) (expected %i, found %i)\n", \
11791 (mask), \
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011792 current_config->name & (mask), \
11793 pipe_config->name & (mask)); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011794 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011795 } \
11796} while (0)
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011797
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011798#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
Ville Syrjälä5e550652013-09-06 23:29:07 +030011799 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011800 pipe_config_err(adjust, __stringify(name), \
Ville Syrjälä5e550652013-09-06 23:29:07 +030011801 "(expected %i, found %i)\n", \
11802 current_config->name, \
11803 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011804 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011805 } \
11806} while (0)
Ville Syrjälä5e550652013-09-06 23:29:07 +030011807
Daniel Vetterbb760062013-06-06 14:55:52 +020011808#define PIPE_CONF_QUIRK(quirk) \
11809 ((current_config->quirks | pipe_config->quirks) & (quirk))
11810
Daniel Vettereccb1402013-05-22 00:50:22 +020011811 PIPE_CONF_CHECK_I(cpu_transcoder);
11812
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011813 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
Daniel Vetter08a24032013-04-19 11:25:34 +020011814 PIPE_CONF_CHECK_I(fdi_lanes);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011815 PIPE_CONF_CHECK_M_N(fdi_m_n);
Daniel Vetter08a24032013-04-19 11:25:34 +020011816
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +030011817 PIPE_CONF_CHECK_I(lane_count);
Imre Deak95a7a2a2016-06-13 16:44:35 +030011818 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011819
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011820 if (INTEL_GEN(dev_priv) < 8) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011821 PIPE_CONF_CHECK_M_N(dp_m_n);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011822
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011823 if (current_config->has_drrs)
11824 PIPE_CONF_CHECK_M_N(dp_m2_n2);
11825 } else
11826 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +030011827
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011828 PIPE_CONF_CHECK_X(output_types);
Jani Nikulaa65347b2015-11-27 12:21:46 +020011829
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011830 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11831 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11832 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11833 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11834 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11835 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011836
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011837 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11838 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11839 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11840 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11841 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11842 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011843
Daniel Vetterc93f54c2013-06-27 19:47:19 +020011844 PIPE_CONF_CHECK_I(pixel_multiplier);
Shashank Sharmad9facae2018-10-12 11:53:07 +053011845 PIPE_CONF_CHECK_I(output_format);
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011846 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +010011847 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010011848 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011849 PIPE_CONF_CHECK_BOOL(limited_color_range);
Shashank Sharma15953632017-03-13 16:54:03 +053011850
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011851 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11852 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011853 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
Daniel Vetter6c49f242013-06-06 12:45:25 +020011854
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011855 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
Daniel Vetter9ed109a2014-04-24 23:54:52 +020011856
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011857 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011858 DRM_MODE_FLAG_INTERLACE);
11859
Daniel Vetterbb760062013-06-06 14:55:52 +020011860 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011861 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011862 DRM_MODE_FLAG_PHSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011863 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011864 DRM_MODE_FLAG_NHSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011865 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011866 DRM_MODE_FLAG_PVSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011867 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011868 DRM_MODE_FLAG_NVSYNC);
11869 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -070011870
Ville Syrjälä333b8ca2015-09-03 21:50:16 +030011871 PIPE_CONF_CHECK_X(gmch_pfit.control);
Daniel Vettere2ff2d42015-07-15 14:15:50 +020011872 /* pfit ratios are autocomputed by the hw on gen4+ */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011873 if (INTEL_GEN(dev_priv) < 4)
Ville Syrjälä7f7d8dd2016-03-15 16:40:07 +020011874 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
Ville Syrjälä333b8ca2015-09-03 21:50:16 +030011875 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
Daniel Vetter99535992014-04-13 12:00:33 +020011876
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +020011877 if (!adjust) {
11878 PIPE_CONF_CHECK_I(pipe_src_w);
11879 PIPE_CONF_CHECK_I(pipe_src_h);
11880
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011881 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +020011882 if (current_config->pch_pfit.enabled) {
11883 PIPE_CONF_CHECK_X(pch_pfit.pos);
11884 PIPE_CONF_CHECK_X(pch_pfit.size);
11885 }
Daniel Vetter2fa2fe92013-05-07 23:34:16 +020011886
Maarten Lankhorst7aefe2b2015-09-14 11:30:10 +020011887 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011888 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
Maarten Lankhorst7aefe2b2015-09-14 11:30:10 +020011889 }
Chandra Kondurua1b22782015-04-07 15:28:45 -070011890
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011891 PIPE_CONF_CHECK_BOOL(double_wide);
Ville Syrjälä282740f2013-09-04 18:30:03 +030011892
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011893 PIPE_CONF_CHECK_P(shared_dpll);
Daniel Vetter66e985c2013-06-05 13:34:20 +020011894 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
Daniel Vetter8bcc2792013-06-05 13:34:28 +020011895 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
Daniel Vetter66e985c2013-06-05 13:34:20 +020011896 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11897 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
Daniel Vetterd452c5b2014-07-04 11:27:39 -030011898 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
Maarten Lankhorst00490c22015-11-16 14:42:12 +010011899 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
Damien Lespiau3f4cd192014-11-13 14:55:21 +000011900 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11901 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11902 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
Paulo Zanoni2de38132017-09-22 17:53:42 -030011903 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11904 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11905 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11906 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11907 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11908 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11909 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11910 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11911 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11912 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11913 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11914 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
Paulo Zanonic27e9172018-04-27 16:14:36 -070011915 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11916 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11917 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11918 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11919 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11920 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11921 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11922 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11923 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11924 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetterc0d43d62013-06-07 23:11:08 +020011925
Ville Syrjälä47eacba2016-04-12 22:14:35 +030011926 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11927 PIPE_CONF_CHECK_X(dsi_pll.div);
11928
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011929 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
Ville Syrjälä42571ae2013-09-06 23:29:00 +030011930 PIPE_CONF_CHECK_I(pipe_bpp);
11931
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011932 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
Jesse Barnesa9a7e982014-01-20 14:18:04 -080011933 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
Ville Syrjälä5e550652013-09-06 23:29:07 +030011934
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030011935 PIPE_CONF_CHECK_I(min_voltage_level);
11936
Daniel Vetter66e985c2013-06-05 13:34:20 +020011937#undef PIPE_CONF_CHECK_X
Daniel Vetter08a24032013-04-19 11:25:34 +020011938#undef PIPE_CONF_CHECK_I
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011939#undef PIPE_CONF_CHECK_BOOL
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011940#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011941#undef PIPE_CONF_CHECK_P
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011942#undef PIPE_CONF_CHECK_FLAGS
Ville Syrjälä5e550652013-09-06 23:29:07 +030011943#undef PIPE_CONF_CHECK_CLOCK_FUZZY
Daniel Vetterbb760062013-06-06 14:55:52 +020011944#undef PIPE_CONF_QUIRK
Daniel Vetter627eb5a2013-04-29 19:33:42 +020011945
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011946 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011947}
11948
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020011949static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11950 const struct intel_crtc_state *pipe_config)
11951{
11952 if (pipe_config->has_pch_encoder) {
Ville Syrjälä21a727b2016-02-17 21:41:10 +020011953 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020011954 &pipe_config->fdi_m_n);
11955 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11956
11957 /*
11958 * FDI already provided one idea for the dotclock.
11959 * Yell if the encoder disagrees.
11960 */
11961 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11962 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11963 fdi_dotclock, dotclock);
11964 }
11965}
11966
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011967static void verify_wm_state(struct drm_crtc *crtc,
11968 struct drm_crtc_state *new_state)
Damien Lespiau08db6652014-11-04 17:06:52 +000011969{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011970 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Damien Lespiau08db6652014-11-04 17:06:52 +000011971 struct skl_ddb_allocation hw_ddb, *sw_ddb;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011972 struct skl_pipe_wm hw_wm, *sw_wm;
11973 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11974 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +020011975 struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
11976 struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011977 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11978 const enum pipe pipe = intel_crtc->pipe;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011979 int plane, level, max_level = ilk_wm_max_level(dev_priv);
Damien Lespiau08db6652014-11-04 17:06:52 +000011980
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011981 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
Damien Lespiau08db6652014-11-04 17:06:52 +000011982 return;
11983
Matt Ropercd1d3ee2018-12-10 13:54:14 -080011984 skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
Maarten Lankhorst03af79e2016-10-26 15:41:36 +020011985 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011986
Ville Syrjäläff43bc32018-11-27 18:59:00 +020011987 skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
11988
Damien Lespiau08db6652014-11-04 17:06:52 +000011989 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11990 sw_ddb = &dev_priv->wm.skl_hw.ddb;
11991
Mahesh Kumar74bd8002018-04-26 19:55:15 +053011992 if (INTEL_GEN(dev_priv) >= 11)
11993 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
11994 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
11995 sw_ddb->enabled_slices,
11996 hw_ddb.enabled_slices);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011997 /* planes */
Matt Roper8b364b42016-10-26 15:51:28 -070011998 for_each_universal_plane(dev_priv, pipe, plane) {
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011999 hw_plane_wm = &hw_wm.planes[plane];
12000 sw_plane_wm = &sw_wm->planes[plane];
Damien Lespiau08db6652014-11-04 17:06:52 +000012001
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012002 /* Watermarks */
12003 for (level = 0; level <= max_level; level++) {
12004 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12005 &sw_plane_wm->wm[level]))
12006 continue;
Damien Lespiau08db6652014-11-04 17:06:52 +000012007
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012008 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12009 pipe_name(pipe), plane + 1, level,
12010 sw_plane_wm->wm[level].plane_en,
12011 sw_plane_wm->wm[level].plane_res_b,
12012 sw_plane_wm->wm[level].plane_res_l,
12013 hw_plane_wm->wm[level].plane_en,
12014 hw_plane_wm->wm[level].plane_res_b,
12015 hw_plane_wm->wm[level].plane_res_l);
12016 }
12017
12018 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12019 &sw_plane_wm->trans_wm)) {
12020 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12021 pipe_name(pipe), plane + 1,
12022 sw_plane_wm->trans_wm.plane_en,
12023 sw_plane_wm->trans_wm.plane_res_b,
12024 sw_plane_wm->trans_wm.plane_res_l,
12025 hw_plane_wm->trans_wm.plane_en,
12026 hw_plane_wm->trans_wm.plane_res_b,
12027 hw_plane_wm->trans_wm.plane_res_l);
12028 }
12029
12030 /* DDB */
Ville Syrjäläff43bc32018-11-27 18:59:00 +020012031 hw_ddb_entry = &hw_ddb_y[plane];
12032 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012033
12034 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
cpaul@redhat.comfaccd992016-10-14 17:31:58 -040012035 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012036 pipe_name(pipe), plane + 1,
12037 sw_ddb_entry->start, sw_ddb_entry->end,
12038 hw_ddb_entry->start, hw_ddb_entry->end);
12039 }
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012040 }
12041
Lyude27082492016-08-24 07:48:10 +020012042 /*
12043 * cursor
12044 * If the cursor plane isn't active, we may not have updated it's ddb
12045 * allocation. In that case since the ddb allocation will be updated
12046 * once the plane becomes visible, we can skip this check
12047 */
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030012048 if (1) {
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012049 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
12050 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012051
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012052 /* Watermarks */
12053 for (level = 0; level <= max_level; level++) {
12054 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12055 &sw_plane_wm->wm[level]))
12056 continue;
12057
12058 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12059 pipe_name(pipe), level,
12060 sw_plane_wm->wm[level].plane_en,
12061 sw_plane_wm->wm[level].plane_res_b,
12062 sw_plane_wm->wm[level].plane_res_l,
12063 hw_plane_wm->wm[level].plane_en,
12064 hw_plane_wm->wm[level].plane_res_b,
12065 hw_plane_wm->wm[level].plane_res_l);
12066 }
12067
12068 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12069 &sw_plane_wm->trans_wm)) {
12070 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12071 pipe_name(pipe),
12072 sw_plane_wm->trans_wm.plane_en,
12073 sw_plane_wm->trans_wm.plane_res_b,
12074 sw_plane_wm->trans_wm.plane_res_l,
12075 hw_plane_wm->trans_wm.plane_en,
12076 hw_plane_wm->trans_wm.plane_res_b,
12077 hw_plane_wm->trans_wm.plane_res_l);
12078 }
12079
12080 /* DDB */
Ville Syrjäläff43bc32018-11-27 18:59:00 +020012081 hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
12082 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012083
12084 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
cpaul@redhat.comfaccd992016-10-14 17:31:58 -040012085 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
Lyude27082492016-08-24 07:48:10 +020012086 pipe_name(pipe),
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012087 sw_ddb_entry->start, sw_ddb_entry->end,
12088 hw_ddb_entry->start, hw_ddb_entry->end);
Lyude27082492016-08-24 07:48:10 +020012089 }
Damien Lespiau08db6652014-11-04 17:06:52 +000012090 }
12091}
12092
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012093static void
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012094verify_connector_state(struct drm_device *dev,
12095 struct drm_atomic_state *state,
12096 struct drm_crtc *crtc)
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012097{
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020012098 struct drm_connector *connector;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012099 struct drm_connector_state *new_conn_state;
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012100 int i;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012101
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012102 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020012103 struct drm_encoder *encoder = connector->encoder;
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020012104 struct drm_crtc_state *crtc_state = NULL;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020012105
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012106 if (new_conn_state->crtc != crtc)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012107 continue;
12108
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020012109 if (crtc)
12110 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12111
12112 intel_connector_verify_state(crtc_state, new_conn_state);
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012113
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012114 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020012115 "connector's atomic encoder doesn't match legacy encoder\n");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012116 }
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012117}
12118
12119static void
Daniel Vetter86b04262017-03-01 10:52:26 +010012120verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012121{
12122 struct intel_encoder *encoder;
Daniel Vetter86b04262017-03-01 10:52:26 +010012123 struct drm_connector *connector;
12124 struct drm_connector_state *old_conn_state, *new_conn_state;
12125 int i;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012126
Damien Lespiaub2784e12014-08-05 11:29:37 +010012127 for_each_intel_encoder(dev, encoder) {
Daniel Vetter86b04262017-03-01 10:52:26 +010012128 bool enabled = false, found = false;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012129 enum pipe pipe;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012130
12131 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12132 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030012133 encoder->base.name);
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012134
Daniel Vetter86b04262017-03-01 10:52:26 +010012135 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12136 new_conn_state, i) {
12137 if (old_conn_state->best_encoder == &encoder->base)
12138 found = true;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020012139
Daniel Vetter86b04262017-03-01 10:52:26 +010012140 if (new_conn_state->best_encoder != &encoder->base)
12141 continue;
12142 found = enabled = true;
12143
12144 I915_STATE_WARN(new_conn_state->crtc !=
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020012145 encoder->base.crtc,
12146 "connector's crtc doesn't match encoder crtc\n");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012147 }
Daniel Vetter86b04262017-03-01 10:52:26 +010012148
12149 if (!found)
12150 continue;
Dave Airlie0e32b392014-05-02 14:02:48 +100012151
Rob Clarke2c719b2014-12-15 13:56:32 -050012152 I915_STATE_WARN(!!encoder->base.crtc != enabled,
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012153 "encoder's enabled state mismatch "
12154 "(expected %i, found %i)\n",
12155 !!encoder->base.crtc, enabled);
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020012156
12157 if (!encoder->base.crtc) {
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012158 bool active;
12159
12160 active = encoder->get_hw_state(encoder, &pipe);
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020012161 I915_STATE_WARN(active,
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012162 "encoder detached but still enabled on pipe %c.\n",
12163 pipe_name(pipe));
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020012164 }
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012165 }
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012166}
12167
12168static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012169verify_crtc_state(struct drm_crtc *crtc,
12170 struct drm_crtc_state *old_crtc_state,
12171 struct drm_crtc_state *new_crtc_state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012172{
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012173 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010012174 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012175 struct intel_encoder *encoder;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012176 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12177 struct intel_crtc_state *pipe_config, *sw_config;
12178 struct drm_atomic_state *old_state;
12179 bool active;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012180
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012181 old_state = old_crtc_state->state;
Daniel Vetterec2dc6a2016-05-09 16:34:09 +020012182 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012183 pipe_config = to_intel_crtc_state(old_crtc_state);
12184 memset(pipe_config, 0, sizeof(*pipe_config));
12185 pipe_config->base.crtc = crtc;
12186 pipe_config->base.state = old_state;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012187
Ville Syrjälä78108b72016-05-27 20:59:19 +030012188 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012189
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012190 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012191
Ville Syrjäläe56134b2017-06-01 17:36:19 +030012192 /* we keep both pipes enabled on 830 */
12193 if (IS_I830(dev_priv))
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012194 active = new_crtc_state->active;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012195
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012196 I915_STATE_WARN(new_crtc_state->active != active,
12197 "crtc active state doesn't match with hw state "
12198 "(expected %i, found %i)\n", new_crtc_state->active, active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012199
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012200 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12201 "transitional active state does not match atomic hw state "
12202 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012203
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012204 for_each_encoder_on_crtc(dev, crtc, encoder) {
12205 enum pipe pipe;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012206
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012207 active = encoder->get_hw_state(encoder, &pipe);
12208 I915_STATE_WARN(active != new_crtc_state->active,
12209 "[ENCODER:%i] active %i with crtc active %i\n",
12210 encoder->base.base.id, active, new_crtc_state->active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012211
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012212 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12213 "Encoder connected to wrong pipe %c\n",
12214 pipe_name(pipe));
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012215
Ville Syrjäläe1214b92017-10-27 22:31:23 +030012216 if (active)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012217 encoder->get_config(encoder, pipe_config);
12218 }
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012219
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020012220 intel_crtc_compute_pixel_rate(pipe_config);
12221
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012222 if (!new_crtc_state->active)
12223 return;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012224
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012225 intel_pipe_config_sanity_check(dev_priv, pipe_config);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012226
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020012227 sw_config = to_intel_crtc_state(new_crtc_state);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000012228 if (!intel_pipe_config_compare(dev_priv, sw_config,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012229 pipe_config, false)) {
12230 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12231 intel_dump_pipe_config(intel_crtc, pipe_config,
12232 "[hw state]");
12233 intel_dump_pipe_config(intel_crtc, sw_config,
12234 "[sw state]");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012235 }
12236}
12237
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012238static void
Ville Syrjäläcff109f2017-11-17 21:19:17 +020012239intel_verify_planes(struct intel_atomic_state *state)
12240{
12241 struct intel_plane *plane;
12242 const struct intel_plane_state *plane_state;
12243 int i;
12244
12245 for_each_new_intel_plane_in_state(state, plane,
12246 plane_state, i)
12247 assert_plane(plane, plane_state->base.visible);
12248}
12249
12250static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012251verify_single_dpll_state(struct drm_i915_private *dev_priv,
12252 struct intel_shared_dpll *pll,
12253 struct drm_crtc *crtc,
12254 struct drm_crtc_state *new_state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012255{
12256 struct intel_dpll_hw_state dpll_hw_state;
Ville Syrjälä40560e22018-06-26 22:47:11 +030012257 unsigned int crtc_mask;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012258 bool active;
12259
12260 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12261
Lucas De Marchi72f775f2018-03-20 15:06:34 -070012262 DRM_DEBUG_KMS("%s\n", pll->info->name);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012263
Lucas De Marchiee1398b2018-03-20 15:06:33 -070012264 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012265
Lucas De Marchi5cd281f2018-03-20 15:06:36 -070012266 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012267 I915_STATE_WARN(!pll->on && pll->active_mask,
12268 "pll in active use but not on in sw tracking\n");
12269 I915_STATE_WARN(pll->on && !pll->active_mask,
12270 "pll is on but not used by any active crtc\n");
12271 I915_STATE_WARN(pll->on != active,
12272 "pll on state mismatch (expected %i, found %i)\n",
12273 pll->on, active);
12274 }
12275
12276 if (!crtc) {
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012277 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012278 "more active pll users than references: %x vs %x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012279 pll->active_mask, pll->state.crtc_mask);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012280
12281 return;
12282 }
12283
Ville Syrjälä40560e22018-06-26 22:47:11 +030012284 crtc_mask = drm_crtc_mask(crtc);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012285
12286 if (new_state->active)
12287 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12288 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12289 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12290 else
12291 I915_STATE_WARN(pll->active_mask & crtc_mask,
12292 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12293 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12294
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012295 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012296 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012297 crtc_mask, pll->state.crtc_mask);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012298
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012299 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012300 &dpll_hw_state,
12301 sizeof(dpll_hw_state)),
12302 "pll hw state mismatch\n");
12303}
12304
12305static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012306verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12307 struct drm_crtc_state *old_crtc_state,
12308 struct drm_crtc_state *new_crtc_state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012309{
Chris Wilsonfac5e232016-07-04 11:34:36 +010012310 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012311 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12312 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12313
12314 if (new_state->shared_dpll)
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012315 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012316
12317 if (old_state->shared_dpll &&
12318 old_state->shared_dpll != new_state->shared_dpll) {
Ville Syrjälä40560e22018-06-26 22:47:11 +030012319 unsigned int crtc_mask = drm_crtc_mask(crtc);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012320 struct intel_shared_dpll *pll = old_state->shared_dpll;
12321
12322 I915_STATE_WARN(pll->active_mask & crtc_mask,
12323 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12324 pipe_name(drm_crtc_index(crtc)));
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012325 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012326 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12327 pipe_name(drm_crtc_index(crtc)));
12328 }
12329}
12330
12331static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012332intel_modeset_verify_crtc(struct drm_crtc *crtc,
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012333 struct drm_atomic_state *state,
12334 struct drm_crtc_state *old_state,
12335 struct drm_crtc_state *new_state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012336{
Daniel Vetter5a21b662016-05-24 17:13:53 +020012337 if (!needs_modeset(new_state) &&
12338 !to_intel_crtc_state(new_state)->update_pipe)
12339 return;
12340
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012341 verify_wm_state(crtc, new_state);
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012342 verify_connector_state(crtc->dev, state, crtc);
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012343 verify_crtc_state(crtc, old_state, new_state);
12344 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012345}
12346
12347static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012348verify_disabled_dpll_state(struct drm_device *dev)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012349{
Chris Wilsonfac5e232016-07-04 11:34:36 +010012350 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012351 int i;
Daniel Vetter53589012013-06-05 13:34:16 +020012352
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012353 for (i = 0; i < dev_priv->num_shared_dpll; i++)
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012354 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012355}
Daniel Vetter53589012013-06-05 13:34:16 +020012356
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012357static void
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012358intel_modeset_verify_disabled(struct drm_device *dev,
12359 struct drm_atomic_state *state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012360{
Daniel Vetter86b04262017-03-01 10:52:26 +010012361 verify_encoder_state(dev, state);
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012362 verify_connector_state(dev, state, NULL);
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012363 verify_disabled_dpll_state(dev);
Daniel Vetter25c5b262012-07-08 22:08:04 +020012364}
12365
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012366static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
Ville Syrjälä80715b22014-05-15 20:23:23 +030012367{
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012368 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010012369 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä80715b22014-05-15 20:23:23 +030012370
12371 /*
12372 * The scanline counter increments at the leading edge of hsync.
12373 *
12374 * On most platforms it starts counting from vtotal-1 on the
12375 * first active line. That means the scanline counter value is
12376 * always one less than what we would expect. Ie. just after
12377 * start of vblank, which also occurs at start of hsync (on the
12378 * last active line), the scanline counter will read vblank_start-1.
12379 *
12380 * On gen2 the scanline counter starts counting from 1 instead
12381 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12382 * to keep the value positive), instead of adding one.
12383 *
12384 * On HSW+ the behaviour of the scanline counter depends on the output
12385 * type. For DP ports it behaves like most other platforms, but on HDMI
12386 * there's an extra 1 line difference. So we need to add two instead of
12387 * one to the value.
Ville Syrjäläec1b4ee2016-12-15 19:47:34 +020012388 *
12389 * On VLV/CHV DSI the scanline counter would appear to increment
12390 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12391 * that means we can't tell whether we're in vblank or not while
12392 * we're on that particular line. We must still set scanline_offset
12393 * to 1 so that the vblank timestamps come out correct when we query
12394 * the scanline counter from within the vblank interrupt handler.
12395 * However if queried just before the start of vblank we'll get an
12396 * answer that's slightly in the future.
Ville Syrjälä80715b22014-05-15 20:23:23 +030012397 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -080012398 if (IS_GEN(dev_priv, 2)) {
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012399 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
Ville Syrjälä80715b22014-05-15 20:23:23 +030012400 int vtotal;
12401
Ville Syrjälä124abe02015-09-08 13:40:45 +030012402 vtotal = adjusted_mode->crtc_vtotal;
12403 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
Ville Syrjälä80715b22014-05-15 20:23:23 +030012404 vtotal /= 2;
12405
12406 crtc->scanline_offset = vtotal - 1;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010012407 } else if (HAS_DDI(dev_priv) &&
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012408 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
Ville Syrjälä80715b22014-05-15 20:23:23 +030012409 crtc->scanline_offset = 2;
12410 } else
12411 crtc->scanline_offset = 1;
12412}
12413
Maarten Lankhorstad421372015-06-15 12:33:42 +020012414static void intel_modeset_clear_plls(struct drm_atomic_state *state)
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012415{
Ander Conselvan de Oliveira225da592015-04-02 14:47:57 +030012416 struct drm_device *dev = state->dev;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012417 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012418 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012419 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012420 int i;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012421
12422 if (!dev_priv->display.crtc_compute_clock)
Maarten Lankhorstad421372015-06-15 12:33:42 +020012423 return;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012424
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012425 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012426 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020012427 struct intel_shared_dpll *old_dpll =
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012428 to_intel_crtc_state(old_crtc_state)->shared_dpll;
Maarten Lankhorstad421372015-06-15 12:33:42 +020012429
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012430 if (!needs_modeset(new_crtc_state))
Ander Conselvan de Oliveira225da592015-04-02 14:47:57 +030012431 continue;
12432
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012433 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012434
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020012435 if (!old_dpll)
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012436 continue;
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012437
Ander Conselvan de Oliveiraa1c414e2016-12-29 17:22:07 +020012438 intel_release_shared_dpll(old_dpll, intel_crtc, state);
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012439 }
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012440}
12441
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012442/*
12443 * This implements the workaround described in the "notes" section of the mode
12444 * set sequence documentation. When going from no pipes or single pipe to
12445 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12446 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12447 */
12448static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12449{
12450 struct drm_crtc_state *crtc_state;
12451 struct intel_crtc *intel_crtc;
12452 struct drm_crtc *crtc;
12453 struct intel_crtc_state *first_crtc_state = NULL;
12454 struct intel_crtc_state *other_crtc_state = NULL;
12455 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12456 int i;
12457
12458 /* look at all crtc's that are going to be enabled in during modeset */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012459 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012460 intel_crtc = to_intel_crtc(crtc);
12461
12462 if (!crtc_state->active || !needs_modeset(crtc_state))
12463 continue;
12464
12465 if (first_crtc_state) {
12466 other_crtc_state = to_intel_crtc_state(crtc_state);
12467 break;
12468 } else {
12469 first_crtc_state = to_intel_crtc_state(crtc_state);
12470 first_pipe = intel_crtc->pipe;
12471 }
12472 }
12473
12474 /* No workaround needed? */
12475 if (!first_crtc_state)
12476 return 0;
12477
12478 /* w/a possibly needed, check how many crtc's are already enabled. */
12479 for_each_intel_crtc(state->dev, intel_crtc) {
12480 struct intel_crtc_state *pipe_config;
12481
12482 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12483 if (IS_ERR(pipe_config))
12484 return PTR_ERR(pipe_config);
12485
12486 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12487
12488 if (!pipe_config->base.active ||
12489 needs_modeset(&pipe_config->base))
12490 continue;
12491
12492 /* 2 or more enabled crtcs means no need for w/a */
12493 if (enabled_pipe != INVALID_PIPE)
12494 return 0;
12495
12496 enabled_pipe = intel_crtc->pipe;
12497 }
12498
12499 if (enabled_pipe != INVALID_PIPE)
12500 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12501 else if (other_crtc_state)
12502 other_crtc_state->hsw_workaround_pipe = first_pipe;
12503
12504 return 0;
12505}
12506
Ville Syrjälä8d965612016-11-14 18:35:10 +020012507static int intel_lock_all_pipes(struct drm_atomic_state *state)
12508{
12509 struct drm_crtc *crtc;
12510
12511 /* Add all pipes to the state */
12512 for_each_crtc(state->dev, crtc) {
12513 struct drm_crtc_state *crtc_state;
12514
12515 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12516 if (IS_ERR(crtc_state))
12517 return PTR_ERR(crtc_state);
12518 }
12519
12520 return 0;
12521}
12522
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012523static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12524{
12525 struct drm_crtc *crtc;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012526
Ville Syrjälä8d965612016-11-14 18:35:10 +020012527 /*
12528 * Add all pipes to the state, and force
12529 * a modeset on all the active ones.
12530 */
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012531 for_each_crtc(state->dev, crtc) {
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012532 struct drm_crtc_state *crtc_state;
12533 int ret;
12534
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012535 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12536 if (IS_ERR(crtc_state))
12537 return PTR_ERR(crtc_state);
12538
12539 if (!crtc_state->active || needs_modeset(crtc_state))
12540 continue;
12541
12542 crtc_state->mode_changed = true;
12543
12544 ret = drm_atomic_add_affected_connectors(state, crtc);
12545 if (ret)
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012546 return ret;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012547
12548 ret = drm_atomic_add_affected_planes(state, crtc);
12549 if (ret)
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012550 return ret;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012551 }
12552
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012553 return 0;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012554}
12555
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012556static int intel_modeset_checks(struct drm_atomic_state *state)
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012557{
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012558 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012559 struct drm_i915_private *dev_priv = to_i915(state->dev);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012560 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012561 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012562 int ret = 0, i;
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012563
Maarten Lankhorstb3592832015-06-15 12:33:38 +020012564 if (!check_digital_port_conflicts(state)) {
12565 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12566 return -EINVAL;
12567 }
12568
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012569 intel_state->modeset = true;
12570 intel_state->active_crtcs = dev_priv->active_crtcs;
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012571 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12572 intel_state->cdclk.actual = dev_priv->cdclk.actual;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012573
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012574 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12575 if (new_crtc_state->active)
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012576 intel_state->active_crtcs |= 1 << i;
12577 else
12578 intel_state->active_crtcs &= ~(1 << i);
Matt Roper8b4a7d02016-05-12 07:06:00 -070012579
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012580 if (old_crtc_state->active != new_crtc_state->active)
Matt Roper8b4a7d02016-05-12 07:06:00 -070012581 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012582 }
12583
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012584 /*
12585 * See if the config requires any additional preparation, e.g.
12586 * to adjust global state with pipes off. We need to do this
12587 * here so we can get the modeset_pipe updated config for the new
12588 * mode set on this crtc. For other crtcs we need to use the
12589 * adjusted_mode bits in the crtc directly.
12590 */
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012591 if (dev_priv->display.modeset_calc_cdclk) {
Clint Taylorc89e39f2016-05-13 23:41:21 +030012592 ret = dev_priv->display.modeset_calc_cdclk(state);
12593 if (ret < 0)
12594 return ret;
12595
Ville Syrjälä8d965612016-11-14 18:35:10 +020012596 /*
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012597 * Writes to dev_priv->cdclk.logical must protected by
Ville Syrjälä8d965612016-11-14 18:35:10 +020012598 * holding all the crtc locks, even if we don't end up
12599 * touching the hardware
12600 */
Ville Syrjälä64600bd2017-10-24 12:52:08 +030012601 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12602 &intel_state->cdclk.logical)) {
Ville Syrjälä8d965612016-11-14 18:35:10 +020012603 ret = intel_lock_all_pipes(state);
12604 if (ret < 0)
12605 return ret;
12606 }
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012607
Ville Syrjälä8d965612016-11-14 18:35:10 +020012608 /* All pipes must be switched off while we change the cdclk. */
Ville Syrjälä64600bd2017-10-24 12:52:08 +030012609 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12610 &intel_state->cdclk.actual)) {
Ville Syrjälä8d965612016-11-14 18:35:10 +020012611 ret = intel_modeset_all_pipes(state);
12612 if (ret < 0)
12613 return ret;
12614 }
Maarten Lankhorste8788cb2016-02-16 10:25:11 +010012615
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012616 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12617 intel_state->cdclk.logical.cdclk,
12618 intel_state->cdclk.actual.cdclk);
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030012619 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12620 intel_state->cdclk.logical.voltage_level,
12621 intel_state->cdclk.actual.voltage_level);
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012622 } else {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012623 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012624 }
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012625
Maarten Lankhorstad421372015-06-15 12:33:42 +020012626 intel_modeset_clear_plls(state);
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012627
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012628 if (IS_HASWELL(dev_priv))
Maarten Lankhorstad421372015-06-15 12:33:42 +020012629 return haswell_mode_set_planes_workaround(state);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012630
Maarten Lankhorstad421372015-06-15 12:33:42 +020012631 return 0;
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012632}
12633
Matt Roperaa363132015-09-24 15:53:18 -070012634/*
12635 * Handle calculation of various watermark data at the end of the atomic check
12636 * phase. The code here should be run after the per-crtc and per-plane 'check'
12637 * handlers to ensure that all derived state has been updated.
12638 */
Matt Ropercd1d3ee2018-12-10 13:54:14 -080012639static int calc_watermark_data(struct intel_atomic_state *state)
Matt Roperaa363132015-09-24 15:53:18 -070012640{
Matt Ropercd1d3ee2018-12-10 13:54:14 -080012641 struct drm_device *dev = state->base.dev;
Matt Roper98d39492016-05-12 07:06:03 -070012642 struct drm_i915_private *dev_priv = to_i915(dev);
Matt Roper98d39492016-05-12 07:06:03 -070012643
12644 /* Is there platform-specific watermark information to calculate? */
12645 if (dev_priv->display.compute_global_watermarks)
Matt Roper55994c22016-05-12 07:06:08 -070012646 return dev_priv->display.compute_global_watermarks(state);
12647
12648 return 0;
Matt Roperaa363132015-09-24 15:53:18 -070012649}
12650
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012651/**
12652 * intel_atomic_check - validate state object
12653 * @dev: drm device
12654 * @state: state to validate
12655 */
12656static int intel_atomic_check(struct drm_device *dev,
12657 struct drm_atomic_state *state)
Daniel Vettera6778b32012-07-02 09:56:42 +020012658{
Paulo Zanonidd8b3bd2016-01-19 11:35:49 -020012659 struct drm_i915_private *dev_priv = to_i915(dev);
Matt Roperaa363132015-09-24 15:53:18 -070012660 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012661 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012662 struct drm_crtc_state *old_crtc_state, *crtc_state;
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012663 int ret, i;
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012664 bool any_ms = false;
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012665
Maarten Lankhorst8c58f732018-02-21 10:28:08 +010012666 /* Catch I915_MODE_FLAG_INHERITED */
12667 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12668 crtc_state, i) {
12669 if (crtc_state->mode.private_flags !=
12670 old_crtc_state->mode.private_flags)
12671 crtc_state->mode_changed = true;
12672 }
12673
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012674 ret = drm_atomic_helper_check_modeset(dev, state);
Daniel Vettera6778b32012-07-02 09:56:42 +020012675 if (ret)
12676 return ret;
12677
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012678 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012679 struct intel_crtc_state *pipe_config =
12680 to_intel_crtc_state(crtc_state);
Daniel Vetter1ed51de2015-07-15 14:15:51 +020012681
Daniel Vetter26495482015-07-15 14:15:52 +020012682 if (!needs_modeset(crtc_state))
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012683 continue;
12684
Daniel Vetteraf4a8792016-05-09 09:31:25 +020012685 if (!crtc_state->enable) {
12686 any_ms = true;
12687 continue;
12688 }
12689
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012690 ret = intel_modeset_pipe_config(crtc, pipe_config);
Ville Syrjälä8e2b4df2018-11-07 23:35:20 +020012691 if (ret == -EDEADLK)
12692 return ret;
Maarten Lankhorst25aa1c32016-05-03 10:30:38 +020012693 if (ret) {
12694 intel_dump_pipe_config(to_intel_crtc(crtc),
12695 pipe_config, "[failed]");
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012696 return ret;
Maarten Lankhorst25aa1c32016-05-03 10:30:38 +020012697 }
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012698
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000012699 if (i915_modparams.fastboot &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000012700 intel_pipe_config_compare(dev_priv,
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012701 to_intel_crtc_state(old_crtc_state),
Daniel Vetter1ed51de2015-07-15 14:15:51 +020012702 pipe_config, true)) {
Daniel Vetter26495482015-07-15 14:15:52 +020012703 crtc_state->mode_changed = false;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012704 pipe_config->update_pipe = true;
Daniel Vetter26495482015-07-15 14:15:52 +020012705 }
12706
Daniel Vetteraf4a8792016-05-09 09:31:25 +020012707 if (needs_modeset(crtc_state))
Daniel Vetter26495482015-07-15 14:15:52 +020012708 any_ms = true;
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012709
Daniel Vetter26495482015-07-15 14:15:52 +020012710 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12711 needs_modeset(crtc_state) ?
12712 "[modeset]" : "[fastset]");
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012713 }
12714
Lyude Pauleceae142019-01-10 19:53:41 -050012715 ret = drm_dp_mst_atomic_check(state);
12716 if (ret)
12717 return ret;
12718
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012719 if (any_ms) {
12720 ret = intel_modeset_checks(state);
12721
12722 if (ret)
12723 return ret;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012724 } else {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012725 intel_state->cdclk.logical = dev_priv->cdclk.logical;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012726 }
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012727
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020012728 ret = icl_add_linked_planes(intel_state);
12729 if (ret)
12730 return ret;
12731
Paulo Zanonidd8b3bd2016-01-19 11:35:49 -020012732 ret = drm_atomic_helper_check_planes(dev, state);
Matt Roperaa363132015-09-24 15:53:18 -070012733 if (ret)
12734 return ret;
12735
Ville Syrjälädd576022017-11-17 21:19:14 +020012736 intel_fbc_choose_crtc(dev_priv, intel_state);
Matt Ropercd1d3ee2018-12-10 13:54:14 -080012737 return calc_watermark_data(intel_state);
Daniel Vettera6778b32012-07-02 09:56:42 +020012738}
12739
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012740static int intel_atomic_prepare_commit(struct drm_device *dev,
Chris Wilsond07f0e52016-10-28 13:58:44 +010012741 struct drm_atomic_state *state)
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012742{
Chris Wilsonfd700752017-07-26 17:00:36 +010012743 return drm_atomic_helper_prepare_planes(dev, state);
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012744}
12745
Maarten Lankhorsta2991412016-05-17 15:07:48 +020012746u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12747{
12748 struct drm_device *dev = crtc->base.dev;
12749
12750 if (!dev->max_vblank_count)
Dhinakaran Pandiyan734cbbf2018-02-02 21:12:54 -080012751 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
Maarten Lankhorsta2991412016-05-17 15:07:48 +020012752
12753 return dev->driver->get_vblank_counter(dev, crtc->pipe);
12754}
12755
Lyude896e5bb2016-08-24 07:48:09 +020012756static void intel_update_crtc(struct drm_crtc *crtc,
12757 struct drm_atomic_state *state,
12758 struct drm_crtc_state *old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012759 struct drm_crtc_state *new_crtc_state)
Lyude896e5bb2016-08-24 07:48:09 +020012760{
12761 struct drm_device *dev = crtc->dev;
12762 struct drm_i915_private *dev_priv = to_i915(dev);
12763 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012764 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12765 bool modeset = needs_modeset(new_crtc_state);
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012766 struct intel_plane_state *new_plane_state =
12767 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12768 to_intel_plane(crtc->primary));
Lyude896e5bb2016-08-24 07:48:09 +020012769
12770 if (modeset) {
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012771 update_scanline_offset(pipe_config);
Lyude896e5bb2016-08-24 07:48:09 +020012772 dev_priv->display.crtc_enable(pipe_config, state);
Maarten Lankhorst033b7a22018-03-08 13:02:02 +010012773
12774 /* vblanks work again, re-enable pipe CRC. */
12775 intel_crtc_enable_pipe_crc(intel_crtc);
Lyude896e5bb2016-08-24 07:48:09 +020012776 } else {
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012777 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12778 pipe_config);
Hans de Goede608ed4a2018-12-20 14:21:18 +010012779
12780 if (pipe_config->update_pipe)
12781 intel_encoders_update_pipe(crtc, pipe_config, state);
Lyude896e5bb2016-08-24 07:48:09 +020012782 }
12783
Maarten Lankhorst50c42fc2018-12-20 16:17:19 +010012784 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
12785 intel_fbc_disable(intel_crtc);
12786 else if (new_plane_state)
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012787 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
Lyude896e5bb2016-08-24 07:48:09 +020012788
Maarten Lankhorst6c246b82018-09-20 12:27:08 +020012789 intel_begin_crtc_commit(crtc, old_crtc_state);
12790
Ville Syrjälä5f2e5112018-11-14 23:07:27 +020012791 if (INTEL_GEN(dev_priv) >= 9)
12792 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
12793 else
12794 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
Maarten Lankhorst6c246b82018-09-20 12:27:08 +020012795
12796 intel_finish_crtc_commit(crtc, old_crtc_state);
Lyude896e5bb2016-08-24 07:48:09 +020012797}
12798
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012799static void intel_update_crtcs(struct drm_atomic_state *state)
Lyude896e5bb2016-08-24 07:48:09 +020012800{
12801 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012802 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Lyude896e5bb2016-08-24 07:48:09 +020012803 int i;
12804
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012805 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12806 if (!new_crtc_state->active)
Lyude896e5bb2016-08-24 07:48:09 +020012807 continue;
12808
12809 intel_update_crtc(crtc, state, old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012810 new_crtc_state);
Lyude896e5bb2016-08-24 07:48:09 +020012811 }
12812}
12813
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012814static void skl_update_crtcs(struct drm_atomic_state *state)
Lyude27082492016-08-24 07:48:10 +020012815{
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020012816 struct drm_i915_private *dev_priv = to_i915(state->dev);
Lyude27082492016-08-24 07:48:10 +020012817 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12818 struct drm_crtc *crtc;
Lyudece0ba282016-09-15 10:46:35 -040012819 struct intel_crtc *intel_crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012820 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Lyudece0ba282016-09-15 10:46:35 -040012821 struct intel_crtc_state *cstate;
Lyude27082492016-08-24 07:48:10 +020012822 unsigned int updated = 0;
12823 bool progress;
12824 enum pipe pipe;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012825 int i;
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012826 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12827 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012828 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012829
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012830 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012831 /* ignore allocations for crtc's that have been turned off. */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012832 if (new_crtc_state->active)
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012833 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
Lyude27082492016-08-24 07:48:10 +020012834
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012835 /* If 2nd DBuf slice required, enable it here */
12836 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12837 icl_dbuf_slices_update(dev_priv, required_slices);
12838
Lyude27082492016-08-24 07:48:10 +020012839 /*
12840 * Whenever the number of active pipes changes, we need to make sure we
12841 * update the pipes in the right order so that their ddb allocations
12842 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12843 * cause pipe underruns and other bad stuff.
12844 */
12845 do {
Lyude27082492016-08-24 07:48:10 +020012846 progress = false;
12847
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012848 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Lyude27082492016-08-24 07:48:10 +020012849 bool vbl_wait = false;
12850 unsigned int cmask = drm_crtc_mask(crtc);
Lyudece0ba282016-09-15 10:46:35 -040012851
12852 intel_crtc = to_intel_crtc(crtc);
Ville Syrjälä21794812017-08-23 18:22:26 +030012853 cstate = to_intel_crtc_state(new_crtc_state);
Lyudece0ba282016-09-15 10:46:35 -040012854 pipe = intel_crtc->pipe;
Lyude27082492016-08-24 07:48:10 +020012855
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012856 if (updated & cmask || !cstate->base.active)
Lyude27082492016-08-24 07:48:10 +020012857 continue;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012858
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012859 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
Mika Kahola2b685042017-10-10 13:17:03 +030012860 entries,
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012861 INTEL_INFO(dev_priv)->num_pipes, i))
Lyude27082492016-08-24 07:48:10 +020012862 continue;
12863
12864 updated |= cmask;
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012865 entries[i] = cstate->wm.skl.ddb;
Lyude27082492016-08-24 07:48:10 +020012866
12867 /*
12868 * If this is an already active pipe, it's DDB changed,
12869 * and this isn't the last pipe that needs updating
12870 * then we need to wait for a vblank to pass for the
12871 * new ddb allocation to take effect.
12872 */
Lyudece0ba282016-09-15 10:46:35 -040012873 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
Maarten Lankhorst512b5522016-11-08 13:55:34 +010012874 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012875 !new_crtc_state->active_changed &&
Lyude27082492016-08-24 07:48:10 +020012876 intel_state->wm_results.dirty_pipes != updated)
12877 vbl_wait = true;
12878
12879 intel_update_crtc(crtc, state, old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012880 new_crtc_state);
Lyude27082492016-08-24 07:48:10 +020012881
12882 if (vbl_wait)
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020012883 intel_wait_for_vblank(dev_priv, pipe);
Lyude27082492016-08-24 07:48:10 +020012884
12885 progress = true;
12886 }
12887 } while (progress);
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012888
12889 /* If 2nd DBuf slice is no more required disable it */
12890 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12891 icl_dbuf_slices_update(dev_priv, required_slices);
Lyude27082492016-08-24 07:48:10 +020012892}
12893
Chris Wilsonba318c62017-02-02 20:47:41 +000012894static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12895{
12896 struct intel_atomic_state *state, *next;
12897 struct llist_node *freed;
12898
12899 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12900 llist_for_each_entry_safe(state, next, freed, freed)
12901 drm_atomic_state_put(&state->base);
12902}
12903
12904static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12905{
12906 struct drm_i915_private *dev_priv =
12907 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12908
12909 intel_atomic_helper_free_state(dev_priv);
12910}
12911
Daniel Vetter9db529a2017-08-08 10:08:28 +020012912static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12913{
12914 struct wait_queue_entry wait_fence, wait_reset;
12915 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12916
12917 init_wait_entry(&wait_fence, 0);
12918 init_wait_entry(&wait_reset, 0);
12919 for (;;) {
12920 prepare_to_wait(&intel_state->commit_ready.wait,
12921 &wait_fence, TASK_UNINTERRUPTIBLE);
12922 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12923 &wait_reset, TASK_UNINTERRUPTIBLE);
12924
12925
12926 if (i915_sw_fence_done(&intel_state->commit_ready)
12927 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12928 break;
12929
12930 schedule();
12931 }
12932 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12933 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12934}
12935
Chris Wilson8d52e442018-06-23 11:39:51 +010012936static void intel_atomic_cleanup_work(struct work_struct *work)
12937{
12938 struct drm_atomic_state *state =
12939 container_of(work, struct drm_atomic_state, commit_work);
12940 struct drm_i915_private *i915 = to_i915(state->dev);
12941
12942 drm_atomic_helper_cleanup_planes(&i915->drm, state);
12943 drm_atomic_helper_commit_cleanup_done(state);
12944 drm_atomic_state_put(state);
12945
12946 intel_atomic_helper_free_state(i915);
12947}
12948
Daniel Vetter94f05022016-06-14 18:01:00 +020012949static void intel_atomic_commit_tail(struct drm_atomic_state *state)
Daniel Vettera6778b32012-07-02 09:56:42 +020012950{
Daniel Vetter94f05022016-06-14 18:01:00 +020012951 struct drm_device *dev = state->dev;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012952 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012953 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012954 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012955 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
Maarten Lankhorst7580d772015-08-18 13:40:06 +020012956 struct drm_crtc *crtc;
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012957 struct intel_crtc *intel_crtc;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +020012958 u64 put_domains[I915_MAX_PIPES] = {};
Chris Wilsone95433c2016-10-28 13:58:27 +010012959 int i;
Daniel Vettera6778b32012-07-02 09:56:42 +020012960
Daniel Vetter9db529a2017-08-08 10:08:28 +020012961 intel_atomic_commit_fence_wait(intel_state);
Daniel Vetter42b062b2017-08-08 10:08:27 +020012962
Daniel Vetterea0000f2016-06-13 16:13:46 +020012963 drm_atomic_helper_wait_for_dependencies(state);
12964
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012965 if (intel_state->modeset)
Daniel Vetter5a21b662016-05-24 17:13:53 +020012966 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012967
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012968 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012969 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
12970 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12971 intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020012972
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012973 if (needs_modeset(new_crtc_state) ||
12974 to_intel_crtc_state(new_crtc_state)->update_pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +020012975
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012976 put_domains[intel_crtc->pipe] =
Daniel Vetter5a21b662016-05-24 17:13:53 +020012977 modeset_get_crtc_power_domains(crtc,
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012978 new_intel_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012979 }
12980
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012981 if (!needs_modeset(new_crtc_state))
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012982 continue;
12983
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012984 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
Daniel Vetter460da9162013-03-27 00:44:51 +010012985
Ville Syrjälä29ceb0e2016-03-09 19:07:27 +020012986 if (old_crtc_state->active) {
Ville Syrjälä0dd14be2018-11-14 23:07:20 +020012987 intel_crtc_disable_planes(intel_state, intel_crtc);
Maarten Lankhorst033b7a22018-03-08 13:02:02 +010012988
12989 /*
12990 * We need to disable pipe CRC before disabling the pipe,
12991 * or we race against vblank off.
12992 */
12993 intel_crtc_disable_pipe_crc(intel_crtc);
12994
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012995 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020012996 intel_crtc->active = false;
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -020012997 intel_fbc_disable(intel_crtc);
Maarten Lankhorst65c307f2018-10-05 11:52:44 +020012998 intel_disable_shared_dpll(old_intel_crtc_state);
Ville Syrjälä9bbc8258a2015-11-20 22:09:20 +020012999
13000 /*
13001 * Underruns don't always raise
13002 * interrupts, so check manually.
13003 */
13004 intel_check_cpu_fifo_underruns(dev_priv);
13005 intel_check_pch_fifo_underruns(dev_priv);
Maarten Lankhorstb9001112015-11-19 16:07:16 +010013006
Ville Syrjäläa748fae2018-10-25 16:05:36 +030013007 /* FIXME unify this for all platforms */
13008 if (!new_crtc_state->active &&
13009 !HAS_GMCH_DISPLAY(dev_priv) &&
13010 dev_priv->display.initial_watermarks)
13011 dev_priv->display.initial_watermarks(intel_state,
13012 new_intel_crtc_state);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020013013 }
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010013014 }
Daniel Vetter7758a112012-07-08 19:40:39 +020013015
Daniel Vetter7a1530d72017-12-07 15:32:02 +010013016 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13017 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13018 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
Daniel Vetterea9d7582012-07-10 10:42:52 +020013019
Maarten Lankhorst565602d2015-12-10 12:33:57 +010013020 if (intel_state->modeset) {
Maarten Lankhorst4740b0f2015-08-05 12:37:10 +020013021 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
Maarten Lankhorst33c8df892016-02-10 13:49:37 +010013022
Ville Syrjäläb0587e42017-01-26 21:52:01 +020013023 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
Maarten Lankhorstf6d19732016-03-23 14:58:07 +010013024
Lyude656d1b82016-08-17 15:55:54 -040013025 /*
13026 * SKL workaround: bspec recommends we disable the SAGV when we
13027 * have more then one pipe enabled
13028 */
Paulo Zanoni56feca92016-09-22 18:00:28 -030013029 if (!intel_can_enable_sagv(state))
Paulo Zanoni16dcdc42016-09-22 18:00:27 -030013030 intel_disable_sagv(dev_priv);
Lyude656d1b82016-08-17 15:55:54 -040013031
Maarten Lankhorst677100c2016-11-08 13:55:41 +010013032 intel_modeset_verify_disabled(dev, state);
Maarten Lankhorst4740b0f2015-08-05 12:37:10 +020013033 }
Daniel Vetter47fab732012-10-26 10:58:18 +020013034
Lyude896e5bb2016-08-24 07:48:09 +020013035 /* Complete the events for pipes that have now been disabled */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013036 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13037 bool modeset = needs_modeset(new_crtc_state);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020013038
Daniel Vetter1f7528c2016-06-13 16:13:45 +020013039 /* Complete events for now disable pipes here. */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013040 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
Daniel Vetter1f7528c2016-06-13 16:13:45 +020013041 spin_lock_irq(&dev->event_lock);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013042 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
Daniel Vetter1f7528c2016-06-13 16:13:45 +020013043 spin_unlock_irq(&dev->event_lock);
13044
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013045 new_crtc_state->event = NULL;
Daniel Vetter1f7528c2016-06-13 16:13:45 +020013046 }
Matt Ropered4a6a72016-02-23 17:20:13 -080013047 }
13048
Lyude896e5bb2016-08-24 07:48:09 +020013049 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020013050 dev_priv->display.update_crtcs(state);
Lyude896e5bb2016-08-24 07:48:09 +020013051
Daniel Vetter94f05022016-06-14 18:01:00 +020013052 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13053 * already, but still need the state for the delayed optimization. To
13054 * fix this:
13055 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13056 * - schedule that vblank worker _before_ calling hw_done
13057 * - at the start of commit_tail, cancel it _synchrously
13058 * - switch over to the vblank wait helper in the core after that since
13059 * we don't need out special handling any more.
13060 */
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020013061 drm_atomic_helper_wait_for_flip_done(dev, state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013062
13063 /*
13064 * Now that the vblank has passed, we can go ahead and program the
13065 * optimal watermarks on platforms that need two-step watermark
13066 * programming.
13067 *
13068 * TODO: Move this (and other cleanup) to an async worker eventually.
13069 */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013070 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013071 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013072
13073 if (dev_priv->display.optimize_watermarks)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013074 dev_priv->display.optimize_watermarks(intel_state,
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013075 new_intel_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013076 }
13077
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013078 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Daniel Vetter5a21b662016-05-24 17:13:53 +020013079 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13080
13081 if (put_domains[i])
13082 modeset_put_power_domains(dev_priv, put_domains[i]);
13083
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013084 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013085 }
13086
Ville Syrjäläcff109f2017-11-17 21:19:17 +020013087 if (intel_state->modeset)
13088 intel_verify_planes(intel_state);
13089
Paulo Zanoni56feca92016-09-22 18:00:28 -030013090 if (intel_state->modeset && intel_can_enable_sagv(state))
Paulo Zanoni16dcdc42016-09-22 18:00:27 -030013091 intel_enable_sagv(dev_priv);
Lyude656d1b82016-08-17 15:55:54 -040013092
Daniel Vetter94f05022016-06-14 18:01:00 +020013093 drm_atomic_helper_commit_hw_done(state);
13094
Chris Wilsond5553c02017-05-04 12:55:08 +010013095 if (intel_state->modeset) {
13096 /* As one of the primary mmio accessors, KMS has a high
13097 * likelihood of triggering bugs in unclaimed access. After we
13098 * finish modesetting, see if an error has been flagged, and if
13099 * so enable debugging for the next modeset - and hope we catch
13100 * the culprit.
13101 */
13102 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013103 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
Chris Wilsond5553c02017-05-04 12:55:08 +010013104 }
Daniel Vetter5a21b662016-05-24 17:13:53 +020013105
Chris Wilson8d52e442018-06-23 11:39:51 +010013106 /*
13107 * Defer the cleanup of the old state to a separate worker to not
13108 * impede the current task (userspace for blocking modesets) that
13109 * are executed inline. For out-of-line asynchronous modesets/flips,
13110 * deferring to a new worker seems overkill, but we would place a
13111 * schedule point (cond_resched()) here anyway to keep latencies
13112 * down.
13113 */
13114 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
Chris Wilson41db6452018-07-12 12:57:29 +010013115 queue_work(system_highpri_wq, &state->commit_work);
Daniel Vetter94f05022016-06-14 18:01:00 +020013116}
13117
13118static void intel_atomic_commit_work(struct work_struct *work)
13119{
Chris Wilsonc004a902016-10-28 13:58:45 +010013120 struct drm_atomic_state *state =
13121 container_of(work, struct drm_atomic_state, commit_work);
13122
Daniel Vetter94f05022016-06-14 18:01:00 +020013123 intel_atomic_commit_tail(state);
13124}
13125
Chris Wilsonc004a902016-10-28 13:58:45 +010013126static int __i915_sw_fence_call
13127intel_atomic_commit_ready(struct i915_sw_fence *fence,
13128 enum i915_sw_fence_notify notify)
13129{
13130 struct intel_atomic_state *state =
13131 container_of(fence, struct intel_atomic_state, commit_ready);
13132
13133 switch (notify) {
13134 case FENCE_COMPLETE:
Daniel Vetter42b062b2017-08-08 10:08:27 +020013135 /* we do blocking waits in the worker, nothing to do here */
Chris Wilsonc004a902016-10-28 13:58:45 +010013136 break;
Chris Wilsonc004a902016-10-28 13:58:45 +010013137 case FENCE_FREE:
Chris Wilsoneb955ee2017-01-23 21:29:39 +000013138 {
13139 struct intel_atomic_helper *helper =
13140 &to_i915(state->base.dev)->atomic_helper;
13141
13142 if (llist_add(&state->freed, &helper->free_list))
13143 schedule_work(&helper->free_work);
13144 break;
13145 }
Chris Wilsonc004a902016-10-28 13:58:45 +010013146 }
13147
13148 return NOTIFY_DONE;
13149}
13150
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013151static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13152{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013153 struct drm_plane_state *old_plane_state, *new_plane_state;
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013154 struct drm_plane *plane;
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013155 int i;
13156
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013157 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
Chris Wilsonfaf5bf02016-08-04 16:32:37 +010013158 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013159 intel_fb_obj(new_plane_state->fb),
Chris Wilsonfaf5bf02016-08-04 16:32:37 +010013160 to_intel_plane(plane)->frontbuffer_bit);
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013161}
13162
Daniel Vetter94f05022016-06-14 18:01:00 +020013163/**
13164 * intel_atomic_commit - commit validated state object
13165 * @dev: DRM device
13166 * @state: the top-level driver state object
13167 * @nonblock: nonblocking commit
13168 *
13169 * This function commits a top-level state object that has been validated
13170 * with drm_atomic_helper_check().
13171 *
Daniel Vetter94f05022016-06-14 18:01:00 +020013172 * RETURNS
13173 * Zero for success or -errno.
13174 */
13175static int intel_atomic_commit(struct drm_device *dev,
13176 struct drm_atomic_state *state,
13177 bool nonblock)
13178{
13179 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010013180 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter94f05022016-06-14 18:01:00 +020013181 int ret = 0;
13182
Chris Wilsonc004a902016-10-28 13:58:45 +010013183 drm_atomic_state_get(state);
13184 i915_sw_fence_init(&intel_state->commit_ready,
13185 intel_atomic_commit_ready);
Daniel Vetter94f05022016-06-14 18:01:00 +020013186
Ville Syrjälä440df932017-03-29 17:21:23 +030013187 /*
13188 * The intel_legacy_cursor_update() fast path takes care
13189 * of avoiding the vblank waits for simple cursor
13190 * movement and flips. For cursor on/off and size changes,
13191 * we want to perform the vblank waits so that watermark
13192 * updates happen during the correct frames. Gen9+ have
13193 * double buffered watermarks and so shouldn't need this.
13194 *
Maarten Lankhorst3cf50c62017-09-19 14:14:18 +020013195 * Unset state->legacy_cursor_update before the call to
13196 * drm_atomic_helper_setup_commit() because otherwise
13197 * drm_atomic_helper_wait_for_flip_done() is a noop and
13198 * we get FIFO underruns because we didn't wait
13199 * for vblank.
Ville Syrjälä440df932017-03-29 17:21:23 +030013200 *
13201 * FIXME doing watermarks and fb cleanup from a vblank worker
13202 * (assuming we had any) would solve these problems.
13203 */
Maarten Lankhorst213f1bd2017-09-19 14:14:19 +020013204 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13205 struct intel_crtc_state *new_crtc_state;
13206 struct intel_crtc *crtc;
13207 int i;
13208
13209 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13210 if (new_crtc_state->wm.need_postvbl_update ||
13211 new_crtc_state->update_wm_post)
13212 state->legacy_cursor_update = false;
13213 }
Ville Syrjälä440df932017-03-29 17:21:23 +030013214
Maarten Lankhorst3cf50c62017-09-19 14:14:18 +020013215 ret = intel_atomic_prepare_commit(dev, state);
13216 if (ret) {
13217 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13218 i915_sw_fence_commit(&intel_state->commit_ready);
13219 return ret;
13220 }
13221
13222 ret = drm_atomic_helper_setup_commit(state, nonblock);
13223 if (!ret)
13224 ret = drm_atomic_helper_swap_state(state, true);
13225
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020013226 if (ret) {
13227 i915_sw_fence_commit(&intel_state->commit_ready);
13228
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020013229 drm_atomic_helper_cleanup_planes(dev, state);
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020013230 return ret;
13231 }
Daniel Vetter94f05022016-06-14 18:01:00 +020013232 dev_priv->wm.distrust_bios_wm = false;
Ander Conselvan de Oliveira3c0fb582016-12-29 17:22:08 +020013233 intel_shared_dpll_swap_state(state);
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013234 intel_atomic_track_fbs(state);
Daniel Vetter94f05022016-06-14 18:01:00 +020013235
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010013236 if (intel_state->modeset) {
Ville Syrjäläd305e062017-08-30 21:57:03 +030013237 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13238 sizeof(intel_state->min_cdclk));
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030013239 memcpy(dev_priv->min_voltage_level,
13240 intel_state->min_voltage_level,
13241 sizeof(intel_state->min_voltage_level));
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010013242 dev_priv->active_crtcs = intel_state->active_crtcs;
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020013243 dev_priv->cdclk.logical = intel_state->cdclk.logical;
13244 dev_priv->cdclk.actual = intel_state->cdclk.actual;
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010013245 }
13246
Chris Wilson08536952016-10-14 13:18:18 +010013247 drm_atomic_state_get(state);
Daniel Vetter42b062b2017-08-08 10:08:27 +020013248 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
Chris Wilsonc004a902016-10-28 13:58:45 +010013249
13250 i915_sw_fence_commit(&intel_state->commit_ready);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020013251 if (nonblock && intel_state->modeset) {
13252 queue_work(dev_priv->modeset_wq, &state->commit_work);
13253 } else if (nonblock) {
Daniel Vetter42b062b2017-08-08 10:08:27 +020013254 queue_work(system_unbound_wq, &state->commit_work);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020013255 } else {
13256 if (intel_state->modeset)
13257 flush_workqueue(dev_priv->modeset_wq);
Daniel Vetter94f05022016-06-14 18:01:00 +020013258 intel_atomic_commit_tail(state);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020013259 }
Mika Kuoppala75714942015-12-16 09:26:48 +020013260
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020013261 return 0;
Daniel Vetterf30da182013-04-11 20:22:50 +020013262}
13263
Chris Wilsonf6e5b162011-04-12 18:06:51 +010013264static const struct drm_crtc_funcs intel_crtc_funcs = {
Daniel Vetter3fab2f02017-04-03 10:32:57 +020013265 .gamma_set = drm_atomic_helper_legacy_gamma_set,
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020013266 .set_config = drm_atomic_helper_set_config,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010013267 .destroy = intel_crtc_destroy,
Maarten Lankhorst4c01ded2016-12-22 11:33:23 +010013268 .page_flip = drm_atomic_helper_page_flip,
Matt Roper13568372015-01-21 16:35:47 -080013269 .atomic_duplicate_state = intel_crtc_duplicate_state,
13270 .atomic_destroy_state = intel_crtc_destroy_state,
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +010013271 .set_crc_source = intel_crtc_set_crc_source,
Mahesh Kumara8c20832018-07-13 19:29:38 +053013272 .verify_crc_source = intel_crtc_verify_crc_source,
Mahesh Kumar260bc552018-07-13 19:29:39 +053013273 .get_crc_sources = intel_crtc_get_crc_sources,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010013274};
13275
Chris Wilson74d290f2017-08-17 13:37:06 +010013276struct wait_rps_boost {
13277 struct wait_queue_entry wait;
13278
13279 struct drm_crtc *crtc;
Chris Wilsone61e0f52018-02-21 09:56:36 +000013280 struct i915_request *request;
Chris Wilson74d290f2017-08-17 13:37:06 +010013281};
13282
13283static int do_rps_boost(struct wait_queue_entry *_wait,
13284 unsigned mode, int sync, void *key)
13285{
13286 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
Chris Wilsone61e0f52018-02-21 09:56:36 +000013287 struct i915_request *rq = wait->request;
Chris Wilson74d290f2017-08-17 13:37:06 +010013288
Chris Wilsone9af4ea2018-01-18 13:16:09 +000013289 /*
13290 * If we missed the vblank, but the request is already running it
13291 * is reasonable to assume that it will complete before the next
13292 * vblank without our intervention, so leave RPS alone.
13293 */
Chris Wilsone61e0f52018-02-21 09:56:36 +000013294 if (!i915_request_started(rq))
Chris Wilsone9af4ea2018-01-18 13:16:09 +000013295 gen6_rps_boost(rq, NULL);
Chris Wilsone61e0f52018-02-21 09:56:36 +000013296 i915_request_put(rq);
Chris Wilson74d290f2017-08-17 13:37:06 +010013297
13298 drm_crtc_vblank_put(wait->crtc);
13299
13300 list_del(&wait->wait.entry);
13301 kfree(wait);
13302 return 1;
13303}
13304
13305static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13306 struct dma_fence *fence)
13307{
13308 struct wait_rps_boost *wait;
13309
13310 if (!dma_fence_is_i915(fence))
13311 return;
13312
13313 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13314 return;
13315
13316 if (drm_crtc_vblank_get(crtc))
13317 return;
13318
13319 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13320 if (!wait) {
13321 drm_crtc_vblank_put(crtc);
13322 return;
13323 }
13324
13325 wait->request = to_request(dma_fence_get(fence));
13326 wait->crtc = crtc;
13327
13328 wait->wait.func = do_rps_boost;
13329 wait->wait.flags = 0;
13330
13331 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13332}
13333
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013334static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13335{
13336 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13337 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13338 struct drm_framebuffer *fb = plane_state->base.fb;
13339 struct i915_vma *vma;
13340
13341 if (plane->id == PLANE_CURSOR &&
José Roberto de Souzad53db442018-11-30 15:20:48 -080013342 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013343 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13344 const int align = intel_cursor_alignment(dev_priv);
Chris Wilson4a477652018-08-17 09:24:05 +010013345 int err;
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013346
Chris Wilson4a477652018-08-17 09:24:05 +010013347 err = i915_gem_object_attach_phys(obj, align);
13348 if (err)
13349 return err;
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013350 }
13351
13352 vma = intel_pin_and_fence_fb_obj(fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +030013353 &plane_state->view,
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013354 intel_plane_uses_fence(plane_state),
13355 &plane_state->flags);
13356 if (IS_ERR(vma))
13357 return PTR_ERR(vma);
13358
13359 plane_state->vma = vma;
13360
13361 return 0;
13362}
13363
13364static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13365{
13366 struct i915_vma *vma;
13367
13368 vma = fetch_and_zero(&old_plane_state->vma);
13369 if (vma)
13370 intel_unpin_fb_vma(vma, old_plane_state->flags);
13371}
13372
Chris Wilsonb7268c52018-04-18 19:40:52 +010013373static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13374{
13375 struct i915_sched_attr attr = {
13376 .priority = I915_PRIORITY_DISPLAY,
13377 };
13378
13379 i915_gem_object_wait_priority(obj, 0, &attr);
13380}
13381
Matt Roper6beb8c232014-12-01 15:40:14 -080013382/**
13383 * intel_prepare_plane_fb - Prepare fb for usage on plane
13384 * @plane: drm plane to prepare for
Chris Wilsonc38c1452018-02-14 13:49:22 +000013385 * @new_state: the plane state being prepared
Matt Roper6beb8c232014-12-01 15:40:14 -080013386 *
13387 * Prepares a framebuffer for usage on a display plane. Generally this
13388 * involves pinning the underlying object and updating the frontbuffer tracking
13389 * bits. Some older platforms need special physical address handling for
13390 * cursor planes.
13391 *
Maarten Lankhorstf9356752015-08-18 13:40:05 +020013392 * Must be called with struct_mutex held.
13393 *
Matt Roper6beb8c232014-12-01 15:40:14 -080013394 * Returns 0 on success, negative error code on failure.
13395 */
13396int
13397intel_prepare_plane_fb(struct drm_plane *plane,
Chris Wilson18320402016-08-18 19:00:16 +010013398 struct drm_plane_state *new_state)
Matt Roper465c1202014-05-29 08:06:54 -070013399{
Chris Wilsonc004a902016-10-28 13:58:45 +010013400 struct intel_atomic_state *intel_state =
13401 to_intel_atomic_state(new_state->state);
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000013402 struct drm_i915_private *dev_priv = to_i915(plane->dev);
Maarten Lankhorst844f9112015-09-02 10:42:40 +020013403 struct drm_framebuffer *fb = new_state->fb;
Matt Roper6beb8c232014-12-01 15:40:14 -080013404 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Maarten Lankhorst1ee49392015-09-23 13:27:08 +020013405 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
Chris Wilsonc004a902016-10-28 13:58:45 +010013406 int ret;
Matt Roper465c1202014-05-29 08:06:54 -070013407
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013408 if (old_obj) {
13409 struct drm_crtc_state *crtc_state =
Maarten Lankhorst8b694492018-04-09 14:46:55 +020013410 drm_atomic_get_new_crtc_state(new_state->state,
13411 plane->state->crtc);
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013412
13413 /* Big Hammer, we also need to ensure that any pending
13414 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13415 * current scanout is retired before unpinning the old
13416 * framebuffer. Note that we rely on userspace rendering
13417 * into the buffer attached to the pipe they are waiting
13418 * on. If not, userspace generates a GPU hang with IPEHR
13419 * point to the MI_WAIT_FOR_EVENT.
13420 *
13421 * This should only fail upon a hung GPU, in which case we
13422 * can safely continue.
13423 */
Chris Wilsonc004a902016-10-28 13:58:45 +010013424 if (needs_modeset(crtc_state)) {
13425 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13426 old_obj->resv, NULL,
13427 false, 0,
13428 GFP_KERNEL);
13429 if (ret < 0)
13430 return ret;
Chris Wilsonf4457ae2016-04-13 17:35:08 +010013431 }
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013432 }
13433
Chris Wilsonc004a902016-10-28 13:58:45 +010013434 if (new_state->fence) { /* explicit fencing */
13435 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13436 new_state->fence,
13437 I915_FENCE_TIMEOUT,
13438 GFP_KERNEL);
13439 if (ret < 0)
13440 return ret;
13441 }
13442
Chris Wilsonc37efb92016-06-17 08:28:47 +010013443 if (!obj)
13444 return 0;
13445
Chris Wilson4d3088c2017-07-26 17:00:38 +010013446 ret = i915_gem_object_pin_pages(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010013447 if (ret)
13448 return ret;
13449
Chris Wilson4d3088c2017-07-26 17:00:38 +010013450 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13451 if (ret) {
13452 i915_gem_object_unpin_pages(obj);
13453 return ret;
13454 }
13455
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013456 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
Chris Wilsonfd700752017-07-26 17:00:36 +010013457
Chris Wilsonfd700752017-07-26 17:00:36 +010013458 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson4d3088c2017-07-26 17:00:38 +010013459 i915_gem_object_unpin_pages(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010013460 if (ret)
13461 return ret;
13462
Chris Wilsone2f34962018-10-01 15:47:54 +010013463 fb_obj_bump_render_priority(obj);
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -080013464 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13465
Chris Wilsonc004a902016-10-28 13:58:45 +010013466 if (!new_state->fence) { /* implicit fencing */
Chris Wilson74d290f2017-08-17 13:37:06 +010013467 struct dma_fence *fence;
13468
Chris Wilsonc004a902016-10-28 13:58:45 +010013469 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13470 obj->resv, NULL,
13471 false, I915_FENCE_TIMEOUT,
13472 GFP_KERNEL);
13473 if (ret < 0)
13474 return ret;
Chris Wilson74d290f2017-08-17 13:37:06 +010013475
13476 fence = reservation_object_get_excl_rcu(obj->resv);
13477 if (fence) {
13478 add_rps_boost_after_vblank(new_state->crtc, fence);
13479 dma_fence_put(fence);
13480 }
13481 } else {
13482 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
Chris Wilsonc004a902016-10-28 13:58:45 +010013483 }
Daniel Vetter5a21b662016-05-24 17:13:53 +020013484
Chris Wilson60548c52018-07-31 14:26:29 +010013485 /*
13486 * We declare pageflips to be interactive and so merit a small bias
13487 * towards upclocking to deliver the frame on time. By only changing
13488 * the RPS thresholds to sample more regularly and aim for higher
13489 * clocks we can hopefully deliver low power workloads (like kodi)
13490 * that are not quite steady state without resorting to forcing
13491 * maximum clocks following a vblank miss (see do_rps_boost()).
13492 */
13493 if (!intel_state->rps_interactive) {
13494 intel_rps_mark_interactive(dev_priv, true);
13495 intel_state->rps_interactive = true;
13496 }
13497
Chris Wilsond07f0e52016-10-28 13:58:44 +010013498 return 0;
Matt Roper6beb8c232014-12-01 15:40:14 -080013499}
13500
Matt Roper38f3ce32014-12-02 07:45:25 -080013501/**
13502 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13503 * @plane: drm plane to clean up for
Chris Wilsonc38c1452018-02-14 13:49:22 +000013504 * @old_state: the state from the previous modeset
Matt Roper38f3ce32014-12-02 07:45:25 -080013505 *
13506 * Cleans up a framebuffer that has just been removed from a plane.
Maarten Lankhorstf9356752015-08-18 13:40:05 +020013507 *
13508 * Must be called with struct_mutex held.
Matt Roper38f3ce32014-12-02 07:45:25 -080013509 */
13510void
13511intel_cleanup_plane_fb(struct drm_plane *plane,
Chris Wilson18320402016-08-18 19:00:16 +010013512 struct drm_plane_state *old_state)
Matt Roper38f3ce32014-12-02 07:45:25 -080013513{
Chris Wilson60548c52018-07-31 14:26:29 +010013514 struct intel_atomic_state *intel_state =
13515 to_intel_atomic_state(old_state->state);
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013516 struct drm_i915_private *dev_priv = to_i915(plane->dev);
Matt Roper38f3ce32014-12-02 07:45:25 -080013517
Chris Wilson60548c52018-07-31 14:26:29 +010013518 if (intel_state->rps_interactive) {
13519 intel_rps_mark_interactive(dev_priv, false);
13520 intel_state->rps_interactive = false;
13521 }
13522
Chris Wilsonbe1e3412017-01-16 15:21:27 +000013523 /* Should only be called after a successful intel_prepare_plane_fb()! */
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013524 mutex_lock(&dev_priv->drm.struct_mutex);
13525 intel_plane_unpin_fb(to_intel_plane_state(old_state));
13526 mutex_unlock(&dev_priv->drm.struct_mutex);
Matt Roper465c1202014-05-29 08:06:54 -070013527}
13528
Chandra Konduru6156a452015-04-27 13:48:39 -070013529int
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013530skl_max_scale(const struct intel_crtc_state *crtc_state,
13531 u32 pixel_format)
Chandra Konduru6156a452015-04-27 13:48:39 -070013532{
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013533 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13534 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Chandra Konduru77224cd2018-04-09 09:11:13 +053013535 int max_scale, mult;
13536 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
Chandra Konduru6156a452015-04-27 13:48:39 -070013537
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013538 if (!crtc_state->base.enable)
Chandra Konduru6156a452015-04-27 13:48:39 -070013539 return DRM_PLANE_HELPER_NO_SCALING;
13540
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013541 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13542 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13543
Rodrigo Vivi43037c82017-10-03 15:31:42 -070013544 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013545 max_dotclk *= 2;
13546
13547 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
Chandra Konduru6156a452015-04-27 13:48:39 -070013548 return DRM_PLANE_HELPER_NO_SCALING;
13549
13550 /*
13551 * skl max scale is lower of:
13552 * close to 3 but not 3, -1 is for that purpose
13553 * or
13554 * cdclk/crtc_clock
13555 */
Chandra Konduru77224cd2018-04-09 09:11:13 +053013556 mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13557 tmpclk1 = (1 << 16) * mult - 1;
13558 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13559 max_scale = min(tmpclk1, tmpclk2);
Chandra Konduru6156a452015-04-27 13:48:39 -070013560
13561 return max_scale;
13562}
13563
Daniel Vetter5a21b662016-05-24 17:13:53 +020013564static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13565 struct drm_crtc_state *old_crtc_state)
13566{
13567 struct drm_device *dev = crtc->dev;
Lyude62e0fb82016-08-22 12:50:08 -040013568 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013569 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013570 struct intel_crtc_state *old_intel_cstate =
Daniel Vetter5a21b662016-05-24 17:13:53 +020013571 to_intel_crtc_state(old_crtc_state);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013572 struct intel_atomic_state *old_intel_state =
13573 to_intel_atomic_state(old_crtc_state->state);
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013574 struct intel_crtc_state *intel_cstate =
13575 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13576 bool modeset = needs_modeset(&intel_cstate->base);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013577
Maarten Lankhorst567f0792017-02-28 15:28:47 +010013578 if (!modeset &&
13579 (intel_cstate->base.color_mgmt_changed ||
13580 intel_cstate->update_pipe)) {
Matt Roper302da0c2018-12-10 13:54:15 -080013581 intel_color_set_csc(intel_cstate);
13582 intel_color_load_luts(intel_cstate);
Maarten Lankhorst567f0792017-02-28 15:28:47 +010013583 }
13584
Daniel Vetter5a21b662016-05-24 17:13:53 +020013585 /* Perform vblank evasion around commit operation */
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013586 intel_pipe_update_start(intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013587
13588 if (modeset)
Maarten Lankhorste62929b2016-11-08 13:55:33 +010013589 goto out;
Daniel Vetter5a21b662016-05-24 17:13:53 +020013590
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013591 if (intel_cstate->update_pipe)
Ville Syrjälä1a15b772017-08-23 18:22:25 +030013592 intel_update_pipe_config(old_intel_cstate, intel_cstate);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013593 else if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +020013594 skl_detach_scalers(intel_cstate);
Lyude62e0fb82016-08-22 12:50:08 -040013595
Maarten Lankhorste62929b2016-11-08 13:55:33 +010013596out:
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013597 if (dev_priv->display.atomic_update_watermarks)
13598 dev_priv->display.atomic_update_watermarks(old_intel_state,
13599 intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013600}
13601
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013602void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13603 struct intel_crtc_state *crtc_state)
13604{
13605 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13606
Lucas De Marchicf819ef2018-12-12 10:10:43 -080013607 if (!IS_GEN(dev_priv, 2))
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013608 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13609
13610 if (crtc_state->has_pch_encoder) {
13611 enum pipe pch_transcoder =
13612 intel_crtc_pch_transcoder(crtc);
13613
13614 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13615 }
13616}
13617
Daniel Vetter5a21b662016-05-24 17:13:53 +020013618static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13619 struct drm_crtc_state *old_crtc_state)
13620{
13621 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013622 struct intel_atomic_state *old_intel_state =
13623 to_intel_atomic_state(old_crtc_state->state);
13624 struct intel_crtc_state *new_crtc_state =
13625 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013626
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013627 intel_pipe_update_end(new_crtc_state);
Maarten Lankhorst33a49862017-11-13 15:40:43 +010013628
13629 if (new_crtc_state->update_pipe &&
13630 !needs_modeset(&new_crtc_state->base) &&
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013631 old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13632 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013633}
13634
Matt Ropercf4c7c12014-12-04 10:27:42 -080013635/**
Matt Roper4a3b8762014-12-23 10:41:51 -080013636 * intel_plane_destroy - destroy a plane
13637 * @plane: plane to destroy
Matt Ropercf4c7c12014-12-04 10:27:42 -080013638 *
Matt Roper4a3b8762014-12-23 10:41:51 -080013639 * Common destruction function for all types of planes (primary, cursor,
13640 * sprite).
Matt Ropercf4c7c12014-12-04 10:27:42 -080013641 */
Matt Roper4a3b8762014-12-23 10:41:51 -080013642void intel_plane_destroy(struct drm_plane *plane)
Matt Roper465c1202014-05-29 08:06:54 -070013643{
Matt Roper465c1202014-05-29 08:06:54 -070013644 drm_plane_cleanup(plane);
Ville Syrjälä69ae5612016-05-27 20:59:22 +030013645 kfree(to_intel_plane(plane));
Matt Roper465c1202014-05-29 08:06:54 -070013646}
13647
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013648static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13649 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013650{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013651 switch (modifier) {
13652 case DRM_FORMAT_MOD_LINEAR:
13653 case I915_FORMAT_MOD_X_TILED:
13654 break;
13655 default:
13656 return false;
13657 }
13658
Ben Widawsky714244e2017-08-01 09:58:16 -070013659 switch (format) {
13660 case DRM_FORMAT_C8:
13661 case DRM_FORMAT_RGB565:
13662 case DRM_FORMAT_XRGB1555:
13663 case DRM_FORMAT_XRGB8888:
13664 return modifier == DRM_FORMAT_MOD_LINEAR ||
13665 modifier == I915_FORMAT_MOD_X_TILED;
13666 default:
13667 return false;
13668 }
13669}
13670
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013671static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13672 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013673{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013674 switch (modifier) {
13675 case DRM_FORMAT_MOD_LINEAR:
13676 case I915_FORMAT_MOD_X_TILED:
13677 break;
13678 default:
13679 return false;
13680 }
13681
Ben Widawsky714244e2017-08-01 09:58:16 -070013682 switch (format) {
13683 case DRM_FORMAT_C8:
13684 case DRM_FORMAT_RGB565:
13685 case DRM_FORMAT_XRGB8888:
13686 case DRM_FORMAT_XBGR8888:
13687 case DRM_FORMAT_XRGB2101010:
13688 case DRM_FORMAT_XBGR2101010:
13689 return modifier == DRM_FORMAT_MOD_LINEAR ||
13690 modifier == I915_FORMAT_MOD_X_TILED;
13691 default:
13692 return false;
13693 }
13694}
13695
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013696static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13697 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013698{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013699 return modifier == DRM_FORMAT_MOD_LINEAR &&
13700 format == DRM_FORMAT_ARGB8888;
Ben Widawsky714244e2017-08-01 09:58:16 -070013701}
13702
Ville Syrjälä679bfe82018-10-05 15:58:07 +030013703static const struct drm_plane_funcs i965_plane_funcs = {
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013704 .update_plane = drm_atomic_helper_update_plane,
13705 .disable_plane = drm_atomic_helper_disable_plane,
13706 .destroy = intel_plane_destroy,
13707 .atomic_get_property = intel_plane_atomic_get_property,
13708 .atomic_set_property = intel_plane_atomic_set_property,
13709 .atomic_duplicate_state = intel_plane_duplicate_state,
13710 .atomic_destroy_state = intel_plane_destroy_state,
13711 .format_mod_supported = i965_plane_format_mod_supported,
13712};
13713
Ville Syrjälä679bfe82018-10-05 15:58:07 +030013714static const struct drm_plane_funcs i8xx_plane_funcs = {
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013715 .update_plane = drm_atomic_helper_update_plane,
13716 .disable_plane = drm_atomic_helper_disable_plane,
13717 .destroy = intel_plane_destroy,
13718 .atomic_get_property = intel_plane_atomic_get_property,
13719 .atomic_set_property = intel_plane_atomic_set_property,
13720 .atomic_duplicate_state = intel_plane_duplicate_state,
13721 .atomic_destroy_state = intel_plane_destroy_state,
13722 .format_mod_supported = i8xx_plane_format_mod_supported,
Matt Roper465c1202014-05-29 08:06:54 -070013723};
13724
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013725static int
13726intel_legacy_cursor_update(struct drm_plane *plane,
13727 struct drm_crtc *crtc,
13728 struct drm_framebuffer *fb,
13729 int crtc_x, int crtc_y,
13730 unsigned int crtc_w, unsigned int crtc_h,
13731 uint32_t src_x, uint32_t src_y,
Daniel Vetter34a2ab52017-03-22 22:50:41 +010013732 uint32_t src_w, uint32_t src_h,
13733 struct drm_modeset_acquire_ctx *ctx)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013734{
13735 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13736 int ret;
13737 struct drm_plane_state *old_plane_state, *new_plane_state;
13738 struct intel_plane *intel_plane = to_intel_plane(plane);
13739 struct drm_framebuffer *old_fb;
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013740 struct intel_crtc_state *crtc_state =
13741 to_intel_crtc_state(crtc->state);
13742 struct intel_crtc_state *new_crtc_state;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013743
13744 /*
13745 * When crtc is inactive or there is a modeset pending,
13746 * wait for it to complete in the slowpath
13747 */
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013748 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
13749 crtc_state->update_pipe)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013750 goto slow;
13751
13752 old_plane_state = plane->state;
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013753 /*
13754 * Don't do an async update if there is an outstanding commit modifying
13755 * the plane. This prevents our async update's changes from getting
13756 * overridden by a previous synchronous update's state.
13757 */
13758 if (old_plane_state->commit &&
13759 !try_wait_for_completion(&old_plane_state->commit->hw_done))
13760 goto slow;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013761
13762 /*
13763 * If any parameters change that may affect watermarks,
13764 * take the slowpath. Only changing fb or position should be
13765 * in the fastpath.
13766 */
13767 if (old_plane_state->crtc != crtc ||
13768 old_plane_state->src_w != src_w ||
13769 old_plane_state->src_h != src_h ||
13770 old_plane_state->crtc_w != crtc_w ||
13771 old_plane_state->crtc_h != crtc_h ||
Ville Syrjäläa5509ab2017-02-17 17:01:59 +020013772 !old_plane_state->fb != !fb)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013773 goto slow;
13774
13775 new_plane_state = intel_plane_duplicate_state(plane);
13776 if (!new_plane_state)
13777 return -ENOMEM;
13778
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013779 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
13780 if (!new_crtc_state) {
13781 ret = -ENOMEM;
13782 goto out_free;
13783 }
13784
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013785 drm_atomic_set_fb_for_plane(new_plane_state, fb);
13786
13787 new_plane_state->src_x = src_x;
13788 new_plane_state->src_y = src_y;
13789 new_plane_state->src_w = src_w;
13790 new_plane_state->src_h = src_h;
13791 new_plane_state->crtc_x = crtc_x;
13792 new_plane_state->crtc_y = crtc_y;
13793 new_plane_state->crtc_w = crtc_w;
13794 new_plane_state->crtc_h = crtc_h;
13795
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013796 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
13797 to_intel_plane_state(old_plane_state),
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013798 to_intel_plane_state(new_plane_state));
13799 if (ret)
13800 goto out_free;
13801
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013802 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13803 if (ret)
13804 goto out_free;
13805
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013806 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13807 if (ret)
13808 goto out_unlock;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013809
Dhinakaran Pandiyana694e222018-03-06 19:34:19 -080013810 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013811
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -080013812 old_fb = old_plane_state->fb;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013813 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13814 intel_plane->frontbuffer_bit);
13815
13816 /* Swap plane state */
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013817 plane->state = new_plane_state;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013818
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013819 /*
13820 * We cannot swap crtc_state as it may be in use by an atomic commit or
13821 * page flip that's running simultaneously. If we swap crtc_state and
13822 * destroy the old state, we will cause a use-after-free there.
13823 *
13824 * Only update active_planes, which is needed for our internal
13825 * bookkeeping. Either value will do the right thing when updating
13826 * planes atomically. If the cursor was part of the atomic update then
13827 * we would have taken the slowpath.
13828 */
13829 crtc_state->active_planes = new_crtc_state->active_planes;
13830
Ville Syrjälä72259532017-03-02 19:15:05 +020013831 if (plane->state->visible) {
13832 trace_intel_update_plane(plane, to_intel_crtc(crtc));
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013833 intel_plane->update_plane(intel_plane, crtc_state,
Ville Syrjäläa5509ab2017-02-17 17:01:59 +020013834 to_intel_plane_state(plane->state));
Ville Syrjälä72259532017-03-02 19:15:05 +020013835 } else {
13836 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
Ville Syrjälä0dd14be2018-11-14 23:07:20 +020013837 intel_plane->disable_plane(intel_plane, crtc_state);
Ville Syrjälä72259532017-03-02 19:15:05 +020013838 }
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013839
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013840 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013841
13842out_unlock:
13843 mutex_unlock(&dev_priv->drm.struct_mutex);
13844out_free:
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013845 if (new_crtc_state)
13846 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013847 if (ret)
13848 intel_plane_destroy_state(plane, new_plane_state);
13849 else
13850 intel_plane_destroy_state(plane, old_plane_state);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013851 return ret;
13852
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013853slow:
13854 return drm_atomic_helper_update_plane(plane, crtc, fb,
13855 crtc_x, crtc_y, crtc_w, crtc_h,
Daniel Vetter34a2ab52017-03-22 22:50:41 +010013856 src_x, src_y, src_w, src_h, ctx);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013857}
13858
13859static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13860 .update_plane = intel_legacy_cursor_update,
13861 .disable_plane = drm_atomic_helper_disable_plane,
13862 .destroy = intel_plane_destroy,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013863 .atomic_get_property = intel_plane_atomic_get_property,
13864 .atomic_set_property = intel_plane_atomic_set_property,
13865 .atomic_duplicate_state = intel_plane_duplicate_state,
13866 .atomic_destroy_state = intel_plane_destroy_state,
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013867 .format_mod_supported = intel_cursor_format_mod_supported,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013868};
13869
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013870static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13871 enum i9xx_plane_id i9xx_plane)
13872{
13873 if (!HAS_FBC(dev_priv))
13874 return false;
13875
13876 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13877 return i9xx_plane == PLANE_A; /* tied to pipe A */
13878 else if (IS_IVYBRIDGE(dev_priv))
13879 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13880 i9xx_plane == PLANE_C;
13881 else if (INTEL_GEN(dev_priv) >= 4)
13882 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13883 else
13884 return i9xx_plane == PLANE_A;
13885}
13886
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013887static struct intel_plane *
Ville Syrjälä580503c2016-10-31 22:37:00 +020013888intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
Matt Roper465c1202014-05-29 08:06:54 -070013889{
Ville Syrjälä881440a2018-10-05 15:58:17 +030013890 struct intel_plane *plane;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013891 const struct drm_plane_funcs *plane_funcs;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013892 unsigned int supported_rotations;
Ville Syrjälädeb19682018-10-05 15:58:08 +030013893 unsigned int possible_crtcs;
Ville Syrjälä881440a2018-10-05 15:58:17 +030013894 const u64 *modifiers;
13895 const u32 *formats;
13896 int num_formats;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013897 int ret;
Matt Roper465c1202014-05-29 08:06:54 -070013898
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013899 if (INTEL_GEN(dev_priv) >= 9)
13900 return skl_universal_plane_create(dev_priv, pipe,
13901 PLANE_PRIMARY);
13902
Ville Syrjälä881440a2018-10-05 15:58:17 +030013903 plane = intel_plane_alloc();
13904 if (IS_ERR(plane))
13905 return plane;
Matt Roperea2c67b2014-12-23 10:41:52 -080013906
Ville Syrjälä881440a2018-10-05 15:58:17 +030013907 plane->pipe = pipe;
Ville Syrjäläe3c566d2016-11-08 16:47:11 +020013908 /*
13909 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13910 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13911 */
13912 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
Ville Syrjälä881440a2018-10-05 15:58:17 +030013913 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
Ville Syrjäläe3c566d2016-11-08 16:47:11 +020013914 else
Ville Syrjälä881440a2018-10-05 15:58:17 +030013915 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
13916 plane->id = PLANE_PRIMARY;
13917 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013918
Ville Syrjälä881440a2018-10-05 15:58:17 +030013919 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
13920 if (plane->has_fbc) {
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013921 struct intel_fbc *fbc = &dev_priv->fbc;
13922
Ville Syrjälä881440a2018-10-05 15:58:17 +030013923 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013924 }
13925
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013926 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä881440a2018-10-05 15:58:17 +030013927 formats = i965_primary_formats;
Damien Lespiau568db4f2015-05-12 16:13:18 +010013928 num_formats = ARRAY_SIZE(i965_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013929 modifiers = i9xx_format_modifiers;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013930
Ville Syrjälä881440a2018-10-05 15:58:17 +030013931 plane->max_stride = i9xx_plane_max_stride;
13932 plane->update_plane = i9xx_update_plane;
13933 plane->disable_plane = i9xx_disable_plane;
13934 plane->get_hw_state = i9xx_plane_get_hw_state;
13935 plane->check_plane = i9xx_plane_check;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013936
13937 plane_funcs = &i965_plane_funcs;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010013938 } else {
Ville Syrjälä881440a2018-10-05 15:58:17 +030013939 formats = i8xx_primary_formats;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010013940 num_formats = ARRAY_SIZE(i8xx_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013941 modifiers = i9xx_format_modifiers;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013942
Ville Syrjälä881440a2018-10-05 15:58:17 +030013943 plane->max_stride = i9xx_plane_max_stride;
13944 plane->update_plane = i9xx_update_plane;
13945 plane->disable_plane = i9xx_disable_plane;
13946 plane->get_hw_state = i9xx_plane_get_hw_state;
13947 plane->check_plane = i9xx_plane_check;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013948
13949 plane_funcs = &i8xx_plane_funcs;
Matt Roper465c1202014-05-29 08:06:54 -070013950 }
13951
Ville Syrjälädeb19682018-10-05 15:58:08 +030013952 possible_crtcs = BIT(pipe);
13953
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013954 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
Ville Syrjälä881440a2018-10-05 15:58:17 +030013955 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030013956 possible_crtcs, plane_funcs,
Ville Syrjälä881440a2018-10-05 15:58:17 +030013957 formats, num_formats, modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013958 DRM_PLANE_TYPE_PRIMARY,
13959 "primary %c", pipe_name(pipe));
13960 else
Ville Syrjälä881440a2018-10-05 15:58:17 +030013961 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030013962 possible_crtcs, plane_funcs,
Ville Syrjälä881440a2018-10-05 15:58:17 +030013963 formats, num_formats, modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013964 DRM_PLANE_TYPE_PRIMARY,
Ville Syrjäläed150302017-11-17 21:19:10 +020013965 "plane %c",
Ville Syrjälä881440a2018-10-05 15:58:17 +030013966 plane_name(plane->i9xx_plane));
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013967 if (ret)
13968 goto fail;
Sonika Jindal48404c12014-08-22 14:06:04 +053013969
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013970 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
Ville Syrjälä4ea7be22016-11-14 18:54:00 +020013971 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040013972 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13973 DRM_MODE_REFLECT_X;
Dave Airlie5481e272016-10-25 16:36:13 +100013974 } else if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013975 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040013976 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013977 } else {
Robert Fossc2c446a2017-05-19 16:50:17 -040013978 supported_rotations = DRM_MODE_ROTATE_0;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013979 }
13980
Dave Airlie5481e272016-10-25 16:36:13 +100013981 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä881440a2018-10-05 15:58:17 +030013982 drm_plane_create_rotation_property(&plane->base,
Robert Fossc2c446a2017-05-19 16:50:17 -040013983 DRM_MODE_ROTATE_0,
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013984 supported_rotations);
Sonika Jindal48404c12014-08-22 14:06:04 +053013985
Ville Syrjälä881440a2018-10-05 15:58:17 +030013986 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
Matt Roperea2c67b2014-12-23 10:41:52 -080013987
Ville Syrjälä881440a2018-10-05 15:58:17 +030013988 return plane;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013989
13990fail:
Ville Syrjälä881440a2018-10-05 15:58:17 +030013991 intel_plane_free(plane);
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013992
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013993 return ERR_PTR(ret);
Matt Roper465c1202014-05-29 08:06:54 -070013994}
13995
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013996static struct intel_plane *
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013997intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13998 enum pipe pipe)
Matt Roper3d7d6512014-06-10 08:28:13 -070013999{
Ville Syrjälädeb19682018-10-05 15:58:08 +030014000 unsigned int possible_crtcs;
Ville Syrjäläc539b572018-10-05 15:58:14 +030014001 struct intel_plane *cursor;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014002 int ret;
Matt Roper3d7d6512014-06-10 08:28:13 -070014003
Ville Syrjäläc539b572018-10-05 15:58:14 +030014004 cursor = intel_plane_alloc();
14005 if (IS_ERR(cursor))
14006 return cursor;
Matt Roperea2c67b2014-12-23 10:41:52 -080014007
Matt Roper3d7d6512014-06-10 08:28:13 -070014008 cursor->pipe = pipe;
Ville Syrjäläed150302017-11-17 21:19:10 +020014009 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
Ville Syrjäläb14e5842016-11-22 18:01:56 +020014010 cursor->id = PLANE_CURSOR;
Ville Syrjäläc19e1122018-01-23 20:33:43 +020014011 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014012
14013 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
Ville Syrjäläddd57132018-09-07 18:24:02 +030014014 cursor->max_stride = i845_cursor_max_stride;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014015 cursor->update_plane = i845_update_cursor;
14016 cursor->disable_plane = i845_disable_cursor;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020014017 cursor->get_hw_state = i845_cursor_get_hw_state;
Ville Syrjälä659056f2017-03-27 21:55:39 +030014018 cursor->check_plane = i845_check_cursor;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014019 } else {
Ville Syrjäläddd57132018-09-07 18:24:02 +030014020 cursor->max_stride = i9xx_cursor_max_stride;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014021 cursor->update_plane = i9xx_update_cursor;
14022 cursor->disable_plane = i9xx_disable_cursor;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020014023 cursor->get_hw_state = i9xx_cursor_get_hw_state;
Ville Syrjälä659056f2017-03-27 21:55:39 +030014024 cursor->check_plane = i9xx_check_cursor;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014025 }
Matt Roper3d7d6512014-06-10 08:28:13 -070014026
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030014027 cursor->cursor.base = ~0;
14028 cursor->cursor.cntl = ~0;
Ville Syrjälä024faac2017-03-27 21:55:42 +030014029
14030 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14031 cursor->cursor.size = ~0;
Matt Roper3d7d6512014-06-10 08:28:13 -070014032
Ville Syrjälädeb19682018-10-05 15:58:08 +030014033 possible_crtcs = BIT(pipe);
14034
Ville Syrjälä580503c2016-10-31 22:37:00 +020014035 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030014036 possible_crtcs, &intel_cursor_plane_funcs,
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014037 intel_cursor_formats,
14038 ARRAY_SIZE(intel_cursor_formats),
Ben Widawsky714244e2017-08-01 09:58:16 -070014039 cursor_format_modifiers,
14040 DRM_PLANE_TYPE_CURSOR,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030014041 "cursor %c", pipe_name(pipe));
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014042 if (ret)
14043 goto fail;
Ville Syrjälä4398ad42014-10-23 07:41:34 -070014044
Dave Airlie5481e272016-10-25 16:36:13 +100014045 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030014046 drm_plane_create_rotation_property(&cursor->base,
Robert Fossc2c446a2017-05-19 16:50:17 -040014047 DRM_MODE_ROTATE_0,
14048 DRM_MODE_ROTATE_0 |
14049 DRM_MODE_ROTATE_180);
Ville Syrjälä4398ad42014-10-23 07:41:34 -070014050
Matt Roperea2c67b2014-12-23 10:41:52 -080014051 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14052
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014053 return cursor;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014054
14055fail:
Ville Syrjäläc539b572018-10-05 15:58:14 +030014056 intel_plane_free(cursor);
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014057
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014058 return ERR_PTR(ret);
Matt Roper3d7d6512014-06-10 08:28:13 -070014059}
14060
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053014061static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14062 struct intel_crtc_state *crtc_state)
Chandra Konduru549e2bf2015-04-07 15:28:38 -070014063{
Ville Syrjälä65edccc2016-10-31 22:37:01 +020014064 struct intel_crtc_scaler_state *scaler_state =
14065 &crtc_state->scaler_state;
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053014066 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Chandra Konduru549e2bf2015-04-07 15:28:38 -070014067 int i;
Chandra Konduru549e2bf2015-04-07 15:28:38 -070014068
Jani Nikula02584042018-12-31 16:56:41 +020014069 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053014070 if (!crtc->num_scalers)
14071 return;
14072
Ville Syrjälä65edccc2016-10-31 22:37:01 +020014073 for (i = 0; i < crtc->num_scalers; i++) {
14074 struct intel_scaler *scaler = &scaler_state->scalers[i];
14075
14076 scaler->in_use = 0;
Maarten Lankhorst0aaf29b2018-09-21 16:44:37 +020014077 scaler->mode = 0;
Chandra Konduru549e2bf2015-04-07 15:28:38 -070014078 }
14079
14080 scaler_state->scaler_id = -1;
14081}
14082
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020014083static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
Jesse Barnes79e53942008-11-07 14:24:08 -080014084{
14085 struct intel_crtc *intel_crtc;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014086 struct intel_crtc_state *crtc_state = NULL;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014087 struct intel_plane *primary = NULL;
14088 struct intel_plane *cursor = NULL;
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030014089 int sprite, ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014090
Daniel Vetter955382f2013-09-19 14:05:45 +020014091 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014092 if (!intel_crtc)
14093 return -ENOMEM;
Jesse Barnes79e53942008-11-07 14:24:08 -080014094
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014095 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014096 if (!crtc_state) {
14097 ret = -ENOMEM;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014098 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014099 }
Ander Conselvan de Oliveira550acef2015-04-21 17:13:24 +030014100 intel_crtc->config = crtc_state;
14101 intel_crtc->base.state = &crtc_state->base;
Matt Roper07878242015-02-25 11:43:26 -080014102 crtc_state->base.crtc = &intel_crtc->base;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014103
Ville Syrjälä580503c2016-10-31 22:37:00 +020014104 primary = intel_primary_plane_create(dev_priv, pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014105 if (IS_ERR(primary)) {
14106 ret = PTR_ERR(primary);
Matt Roper3d7d6512014-06-10 08:28:13 -070014107 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014108 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020014109 intel_crtc->plane_ids_mask |= BIT(primary->id);
Matt Roper3d7d6512014-06-10 08:28:13 -070014110
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030014111 for_each_sprite(dev_priv, pipe, sprite) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014112 struct intel_plane *plane;
14113
Ville Syrjälä580503c2016-10-31 22:37:00 +020014114 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
Ville Syrjäläd2b2cbc2016-11-07 22:20:56 +020014115 if (IS_ERR(plane)) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014116 ret = PTR_ERR(plane);
14117 goto fail;
14118 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020014119 intel_crtc->plane_ids_mask |= BIT(plane->id);
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030014120 }
14121
Ville Syrjälä580503c2016-10-31 22:37:00 +020014122 cursor = intel_cursor_plane_create(dev_priv, pipe);
Ville Syrjäläd2b2cbc2016-11-07 22:20:56 +020014123 if (IS_ERR(cursor)) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014124 ret = PTR_ERR(cursor);
Matt Roper3d7d6512014-06-10 08:28:13 -070014125 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014126 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020014127 intel_crtc->plane_ids_mask |= BIT(cursor->id);
Matt Roper3d7d6512014-06-10 08:28:13 -070014128
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020014129 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014130 &primary->base, &cursor->base,
14131 &intel_crtc_funcs,
Ville Syrjälä4d5d72b72016-05-27 20:59:21 +030014132 "pipe %c", pipe_name(pipe));
Matt Roper3d7d6512014-06-10 08:28:13 -070014133 if (ret)
14134 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080014135
Jesse Barnes80824002009-09-10 15:28:06 -070014136 intel_crtc->pipe = pipe;
Jesse Barnes80824002009-09-10 15:28:06 -070014137
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053014138 /* initialize shared scalers */
14139 intel_crtc_init_scalers(intel_crtc, crtc_state);
14140
Ville Syrjälä1947fd12018-03-05 19:41:22 +020014141 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14142 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14143 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14144
14145 if (INTEL_GEN(dev_priv) < 9) {
14146 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14147
14148 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14149 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14150 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14151 }
Jesse Barnes22fd0fa2009-12-02 13:42:53 -080014152
Jesse Barnes79e53942008-11-07 14:24:08 -080014153 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
Daniel Vetter87b6b102014-05-15 15:33:46 +020014154
Matt Roper302da0c2018-12-10 13:54:15 -080014155 intel_color_init(intel_crtc);
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +000014156
Daniel Vetter87b6b102014-05-15 15:33:46 +020014157 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014158
14159 return 0;
Matt Roper3d7d6512014-06-10 08:28:13 -070014160
14161fail:
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014162 /*
14163 * drm_mode_config_cleanup() will free up any
14164 * crtcs/planes already initialized.
14165 */
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014166 kfree(crtc_state);
Matt Roper3d7d6512014-06-10 08:28:13 -070014167 kfree(intel_crtc);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014168
14169 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014170}
14171
Ville Syrjälä6a20fe72018-02-07 18:48:41 +020014172int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14173 struct drm_file *file)
Carl Worth08d7b3d2009-04-29 14:43:54 -070014174{
Carl Worth08d7b3d2009-04-29 14:43:54 -070014175 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
Rob Clark7707e652014-07-17 23:30:04 -040014176 struct drm_crtc *drmmode_crtc;
Daniel Vetterc05422d2009-08-11 16:05:30 +020014177 struct intel_crtc *crtc;
Carl Worth08d7b3d2009-04-29 14:43:54 -070014178
Keith Packard418da172017-03-14 23:25:07 -070014179 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
Chris Wilson71240ed2016-06-24 14:00:24 +010014180 if (!drmmode_crtc)
Ville Syrjälä3f2c2052013-10-17 13:35:03 +030014181 return -ENOENT;
Carl Worth08d7b3d2009-04-29 14:43:54 -070014182
Rob Clark7707e652014-07-17 23:30:04 -040014183 crtc = to_intel_crtc(drmmode_crtc);
Daniel Vetterc05422d2009-08-11 16:05:30 +020014184 pipe_from_crtc_id->pipe = crtc->pipe;
Carl Worth08d7b3d2009-04-29 14:43:54 -070014185
Daniel Vetterc05422d2009-08-11 16:05:30 +020014186 return 0;
Carl Worth08d7b3d2009-04-29 14:43:54 -070014187}
14188
Daniel Vetter66a92782012-07-12 20:08:18 +020014189static int intel_encoder_clones(struct intel_encoder *encoder)
Jesse Barnes79e53942008-11-07 14:24:08 -080014190{
Daniel Vetter66a92782012-07-12 20:08:18 +020014191 struct drm_device *dev = encoder->base.dev;
14192 struct intel_encoder *source_encoder;
Jesse Barnes79e53942008-11-07 14:24:08 -080014193 int index_mask = 0;
Jesse Barnes79e53942008-11-07 14:24:08 -080014194 int entry = 0;
14195
Damien Lespiaub2784e12014-08-05 11:29:37 +010014196 for_each_intel_encoder(dev, source_encoder) {
Ville Syrjäläbc079e82014-03-03 16:15:28 +020014197 if (encoders_cloneable(encoder, source_encoder))
Daniel Vetter66a92782012-07-12 20:08:18 +020014198 index_mask |= (1 << entry);
14199
Jesse Barnes79e53942008-11-07 14:24:08 -080014200 entry++;
14201 }
Chris Wilson4ef69c72010-09-09 15:14:28 +010014202
Jesse Barnes79e53942008-11-07 14:24:08 -080014203 return index_mask;
14204}
14205
Ville Syrjälä646d5772016-10-31 22:37:14 +020014206static bool has_edp_a(struct drm_i915_private *dev_priv)
Chris Wilson4d302442010-12-14 19:21:29 +000014207{
Ville Syrjälä646d5772016-10-31 22:37:14 +020014208 if (!IS_MOBILE(dev_priv))
Chris Wilson4d302442010-12-14 19:21:29 +000014209 return false;
14210
14211 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14212 return false;
14213
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014214 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
Chris Wilson4d302442010-12-14 19:21:29 +000014215 return false;
14216
14217 return true;
14218}
14219
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014220static bool intel_crt_present(struct drm_i915_private *dev_priv)
Jesse Barnes84b4e042014-06-25 08:24:29 -070014221{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014222 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau884497e2013-12-03 13:56:23 +000014223 return false;
14224
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +010014225 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
Jesse Barnes84b4e042014-06-25 08:24:29 -070014226 return false;
14227
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014228 if (IS_CHERRYVIEW(dev_priv))
Jesse Barnes84b4e042014-06-25 08:24:29 -070014229 return false;
14230
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010014231 if (HAS_PCH_LPT_H(dev_priv) &&
14232 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
Ville Syrjälä65e472e2015-12-01 23:28:55 +020014233 return false;
14234
Ville Syrjälä70ac54d2015-12-01 23:29:56 +020014235 /* DDI E can't be used if DDI A requires 4 lanes */
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010014236 if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
Ville Syrjälä70ac54d2015-12-01 23:29:56 +020014237 return false;
14238
Ville Syrjäläe4abb732015-12-01 23:31:33 +020014239 if (!dev_priv->vbt.int_crt_support)
Jesse Barnes84b4e042014-06-25 08:24:29 -070014240 return false;
14241
14242 return true;
14243}
14244
Imre Deak8090ba82016-08-10 14:07:33 +030014245void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14246{
14247 int pps_num;
14248 int pps_idx;
14249
14250 if (HAS_DDI(dev_priv))
14251 return;
14252 /*
14253 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14254 * everywhere where registers can be write protected.
14255 */
14256 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14257 pps_num = 2;
14258 else
14259 pps_num = 1;
14260
14261 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14262 u32 val = I915_READ(PP_CONTROL(pps_idx));
14263
14264 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14265 I915_WRITE(PP_CONTROL(pps_idx), val);
14266 }
14267}
14268
Imre Deak44cb7342016-08-10 14:07:29 +030014269static void intel_pps_init(struct drm_i915_private *dev_priv)
14270{
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +020014271 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
Imre Deak44cb7342016-08-10 14:07:29 +030014272 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14273 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14274 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14275 else
14276 dev_priv->pps_mmio_base = PPS_BASE;
Imre Deak8090ba82016-08-10 14:07:33 +030014277
14278 intel_pps_unlock_regs_wa(dev_priv);
Imre Deak44cb7342016-08-10 14:07:29 +030014279}
14280
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014281static void intel_setup_outputs(struct drm_i915_private *dev_priv)
Jesse Barnes79e53942008-11-07 14:24:08 -080014282{
Chris Wilson4ef69c72010-09-09 15:14:28 +010014283 struct intel_encoder *encoder;
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014284 bool dpd_is_edp = false;
Jesse Barnes79e53942008-11-07 14:24:08 -080014285
Imre Deak44cb7342016-08-10 14:07:29 +030014286 intel_pps_init(dev_priv);
14287
José Roberto de Souzae1bf0942018-11-30 15:20:47 -080014288 if (!HAS_DISPLAY(dev_priv))
Chris Wilsonfc0c5a92018-08-15 21:12:07 +010014289 return;
14290
Imre Deak97a824e12016-06-21 11:51:47 +030014291 /*
14292 * intel_edp_init_connector() depends on this completing first, to
14293 * prevent the registeration of both eDP and LVDS and the incorrect
14294 * sharing of the PPS.
14295 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014296 intel_lvds_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080014297
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014298 if (intel_crt_present(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014299 intel_crt_init(dev_priv);
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014300
Paulo Zanoni00c92d92018-05-21 17:25:47 -070014301 if (IS_ICELAKE(dev_priv)) {
14302 intel_ddi_init(dev_priv, PORT_A);
14303 intel_ddi_init(dev_priv, PORT_B);
14304 intel_ddi_init(dev_priv, PORT_C);
14305 intel_ddi_init(dev_priv, PORT_D);
14306 intel_ddi_init(dev_priv, PORT_E);
14307 intel_ddi_init(dev_priv, PORT_F);
Madhav Chauhanbf4d57f2018-10-30 13:56:23 +020014308 icl_dsi_init(dev_priv);
Paulo Zanoni00c92d92018-05-21 17:25:47 -070014309 } else if (IS_GEN9_LP(dev_priv)) {
Vandana Kannanc776eb22014-08-19 12:05:01 +053014310 /*
14311 * FIXME: Broxton doesn't support port detection via the
14312 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14313 * detect the ports.
14314 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014315 intel_ddi_init(dev_priv, PORT_A);
14316 intel_ddi_init(dev_priv, PORT_B);
14317 intel_ddi_init(dev_priv, PORT_C);
Shashank Sharmac6c794a2016-03-22 12:01:50 +020014318
Jani Nikulae5186342018-07-05 16:25:08 +030014319 vlv_dsi_init(dev_priv);
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010014320 } else if (HAS_DDI(dev_priv)) {
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014321 int found;
14322
Jesse Barnesde31fac2015-03-06 15:53:32 -080014323 /*
14324 * Haswell uses DDI functions to detect digital outputs.
14325 * On SKL pre-D0 the strap isn't connected, so we assume
14326 * it's there.
14327 */
Ville Syrjälä77179402015-09-18 20:03:35 +030014328 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
Jesse Barnesde31fac2015-03-06 15:53:32 -080014329 /* WaIgnoreDDIAStrap: skl */
Rodrigo Vivib976dc52017-01-23 10:32:37 -080014330 if (found || IS_GEN9_BC(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014331 intel_ddi_init(dev_priv, PORT_A);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014332
Rodrigo Vivi9787e832018-01-29 15:22:22 -080014333 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014334 * register */
14335 found = I915_READ(SFUSE_STRAP);
14336
14337 if (found & SFUSE_STRAP_DDIB_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014338 intel_ddi_init(dev_priv, PORT_B);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014339 if (found & SFUSE_STRAP_DDIC_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014340 intel_ddi_init(dev_priv, PORT_C);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014341 if (found & SFUSE_STRAP_DDID_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014342 intel_ddi_init(dev_priv, PORT_D);
Rodrigo Vivi9787e832018-01-29 15:22:22 -080014343 if (found & SFUSE_STRAP_DDIF_DETECTED)
14344 intel_ddi_init(dev_priv, PORT_F);
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070014345 /*
14346 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14347 */
Rodrigo Vivib976dc52017-01-23 10:32:37 -080014348 if (IS_GEN9_BC(dev_priv) &&
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070014349 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14350 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14351 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014352 intel_ddi_init(dev_priv, PORT_E);
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070014353
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010014354 } else if (HAS_PCH_SPLIT(dev_priv)) {
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014355 int found;
Jani Nikula7b91bf72017-08-18 12:30:19 +030014356 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
Daniel Vetter270b3042012-10-27 15:52:05 +020014357
Ville Syrjälä646d5772016-10-31 22:37:14 +020014358 if (has_edp_a(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014359 intel_dp_init(dev_priv, DP_A, PORT_A);
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014360
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014361 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
Zhao Yakui461ed3c2010-03-30 15:11:33 +080014362 /* PCH SDVOB multiplex with HDMIB */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014363 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014364 if (!found)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014365 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014366 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014367 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014368 }
14369
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014370 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014371 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014372
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014373 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014374 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014375
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014376 if (I915_READ(PCH_DP_C) & DP_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014377 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014378
Daniel Vetter270b3042012-10-27 15:52:05 +020014379 if (I915_READ(PCH_DP_D) & DP_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014380 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014381 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä22f350422016-06-03 12:17:43 +030014382 bool has_edp, has_port;
Chris Wilson457c52d2016-06-01 08:27:50 +010014383
Ville Syrjäläe17ac6d2014-10-09 19:37:15 +030014384 /*
14385 * The DP_DETECTED bit is the latched state of the DDC
14386 * SDA pin at boot. However since eDP doesn't require DDC
14387 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14388 * eDP ports may have been muxed to an alternate function.
14389 * Thus we can't rely on the DP_DETECTED bit alone to detect
14390 * eDP ports. Consult the VBT as well as DP_DETECTED to
14391 * detect eDP ports.
Ville Syrjälä22f350422016-06-03 12:17:43 +030014392 *
14393 * Sadly the straps seem to be missing sometimes even for HDMI
14394 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14395 * and VBT for the presence of the port. Additionally we can't
14396 * trust the port type the VBT declares as we've seen at least
14397 * HDMI ports that the VBT claim are DP or eDP.
Ville Syrjäläe17ac6d2014-10-09 19:37:15 +030014398 */
Jani Nikula7b91bf72017-08-18 12:30:19 +030014399 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014400 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14401 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014402 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014403 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014404 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
Artem Bityutskiy585a94b2013-10-16 18:10:41 +030014405
Jani Nikula7b91bf72017-08-18 12:30:19 +030014406 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014407 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14408 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014409 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014410 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014411 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
Gajanan Bhat19c03922012-09-27 19:13:07 +053014412
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014413 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä22f350422016-06-03 12:17:43 +030014414 /*
14415 * eDP not supported on port D,
14416 * so no need to worry about it
14417 */
14418 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14419 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014420 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014421 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014422 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
Ville Syrjälä9418c1f2014-04-09 13:28:56 +030014423 }
14424
Jani Nikulae5186342018-07-05 16:25:08 +030014425 vlv_dsi_init(dev_priv);
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014426 } else if (!IS_GEN(dev_priv, 2) && !IS_PINEVIEW(dev_priv)) {
Ma Ling27185ae2009-08-24 13:50:23 +080014427 bool found = false;
Eric Anholt7d573822009-01-02 13:33:00 -080014428
Paulo Zanonie2debe92013-02-18 19:00:27 -030014429 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014430 DRM_DEBUG_KMS("probing SDVOB\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014431 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014432 if (!found && IS_G4X(dev_priv)) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014433 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014434 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014435 }
Ma Ling27185ae2009-08-24 13:50:23 +080014436
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014437 if (!found && IS_G4X(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014438 intel_dp_init(dev_priv, DP_B, PORT_B);
Eric Anholt725e30a2009-01-22 13:01:02 -080014439 }
Kristian Høgsberg13520b02009-03-13 15:42:14 -040014440
14441 /* Before G4X SDVOC doesn't have its own detect register */
Kristian Høgsberg13520b02009-03-13 15:42:14 -040014442
Paulo Zanonie2debe92013-02-18 19:00:27 -030014443 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014444 DRM_DEBUG_KMS("probing SDVOC\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014445 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014446 }
Ma Ling27185ae2009-08-24 13:50:23 +080014447
Paulo Zanonie2debe92013-02-18 19:00:27 -030014448 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
Ma Ling27185ae2009-08-24 13:50:23 +080014449
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014450 if (IS_G4X(dev_priv)) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014451 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014452 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014453 }
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014454 if (IS_G4X(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014455 intel_dp_init(dev_priv, DP_C, PORT_C);
Eric Anholt725e30a2009-01-22 13:01:02 -080014456 }
Ma Ling27185ae2009-08-24 13:50:23 +080014457
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014458 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014459 intel_dp_init(dev_priv, DP_D, PORT_D);
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014460 } else if (IS_GEN(dev_priv, 2))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014461 intel_dvo_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080014462
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +000014463 if (SUPPORTS_TV(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014464 intel_tv_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080014465
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014466 intel_psr_init(dev_priv);
Rodrigo Vivi7c8f8a72014-06-13 05:10:03 -070014467
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014468 for_each_intel_encoder(&dev_priv->drm, encoder) {
Chris Wilson4ef69c72010-09-09 15:14:28 +010014469 encoder->base.possible_crtcs = encoder->crtc_mask;
14470 encoder->base.possible_clones =
Daniel Vetter66a92782012-07-12 20:08:18 +020014471 intel_encoder_clones(encoder);
Jesse Barnes79e53942008-11-07 14:24:08 -080014472 }
Chris Wilson47356eb2011-01-11 17:06:04 +000014473
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014474 intel_init_pch_refclk(dev_priv);
Daniel Vetter270b3042012-10-27 15:52:05 +020014475
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014476 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
Jesse Barnes79e53942008-11-07 14:24:08 -080014477}
14478
14479static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14480{
14481 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014482 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Jesse Barnes79e53942008-11-07 14:24:08 -080014483
Daniel Vetteref2d6332014-02-10 18:00:38 +010014484 drm_framebuffer_cleanup(fb);
Chris Wilson70001cd2017-02-16 09:46:21 +000014485
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014486 i915_gem_object_lock(obj);
14487 WARN_ON(!obj->framebuffer_references--);
14488 i915_gem_object_unlock(obj);
Chris Wilsondd689282017-03-01 15:41:28 +000014489
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014490 i915_gem_object_put(obj);
Chris Wilson70001cd2017-02-16 09:46:21 +000014491
Jesse Barnes79e53942008-11-07 14:24:08 -080014492 kfree(intel_fb);
14493}
14494
14495static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
Chris Wilson05394f32010-11-08 19:18:58 +000014496 struct drm_file *file,
Jesse Barnes79e53942008-11-07 14:24:08 -080014497 unsigned int *handle)
14498{
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014499 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Jesse Barnes79e53942008-11-07 14:24:08 -080014500
Chris Wilsoncc917ab2015-10-13 14:22:26 +010014501 if (obj->userptr.mm) {
14502 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14503 return -EINVAL;
14504 }
14505
Chris Wilson05394f32010-11-08 19:18:58 +000014506 return drm_gem_handle_create(file, &obj->base, handle);
Jesse Barnes79e53942008-11-07 14:24:08 -080014507}
14508
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014509static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14510 struct drm_file *file,
14511 unsigned flags, unsigned color,
14512 struct drm_clip_rect *clips,
14513 unsigned num_clips)
14514{
Chris Wilson5a97bcc2017-02-22 11:40:46 +000014515 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014516
Chris Wilson5a97bcc2017-02-22 11:40:46 +000014517 i915_gem_object_flush_if_display(obj);
Chris Wilsond59b21e2017-02-22 11:40:49 +000014518 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014519
14520 return 0;
14521}
14522
Jesse Barnes79e53942008-11-07 14:24:08 -080014523static const struct drm_framebuffer_funcs intel_fb_funcs = {
14524 .destroy = intel_user_framebuffer_destroy,
14525 .create_handle = intel_user_framebuffer_create_handle,
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014526 .dirty = intel_user_framebuffer_dirty,
Jesse Barnes79e53942008-11-07 14:24:08 -080014527};
14528
Damien Lespiaub3218032015-02-27 11:15:18 +000014529static
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014530u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
Dhinakaran Pandiyan4c8d3512018-10-26 12:53:42 -070014531 u32 pixel_format, u64 fb_modifier)
Damien Lespiaub3218032015-02-27 11:15:18 +000014532{
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014533 struct intel_crtc *crtc;
14534 struct intel_plane *plane;
Damien Lespiaub3218032015-02-27 11:15:18 +000014535
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014536 /*
14537 * We assume the primary plane for pipe A has
14538 * the highest stride limits of them all.
14539 */
14540 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14541 plane = to_intel_plane(crtc->base.primary);
Ville Syrjäläac484962016-01-20 21:05:26 +020014542
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014543 return plane->max_stride(plane, pixel_format, fb_modifier,
14544 DRM_MODE_ROTATE_0);
Damien Lespiaub3218032015-02-27 11:15:18 +000014545}
14546
Chris Wilson24dbf512017-02-15 10:59:18 +000014547static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14548 struct drm_i915_gem_object *obj,
14549 struct drm_mode_fb_cmd2 *mode_cmd)
Jesse Barnes79e53942008-11-07 14:24:08 -080014550{
Chris Wilson24dbf512017-02-15 10:59:18 +000014551 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014552 struct drm_framebuffer *fb = &intel_fb->base;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014553 u32 pitch_limit;
Chris Wilsondd689282017-03-01 15:41:28 +000014554 unsigned int tiling, stride;
Chris Wilson24dbf512017-02-15 10:59:18 +000014555 int ret = -EINVAL;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014556 int i;
Jesse Barnes79e53942008-11-07 14:24:08 -080014557
Chris Wilsondd689282017-03-01 15:41:28 +000014558 i915_gem_object_lock(obj);
14559 obj->framebuffer_references++;
14560 tiling = i915_gem_object_get_tiling(obj);
14561 stride = i915_gem_object_get_stride(obj);
14562 i915_gem_object_unlock(obj);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020014563
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014564 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014565 /*
14566 * If there's a fence, enforce that
14567 * the fb modifier and tiling mode match.
14568 */
14569 if (tiling != I915_TILING_NONE &&
14570 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014571 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
Chris Wilson24dbf512017-02-15 10:59:18 +000014572 goto err;
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014573 }
14574 } else {
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014575 if (tiling == I915_TILING_X) {
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014576 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014577 } else if (tiling == I915_TILING_Y) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014578 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
Chris Wilson24dbf512017-02-15 10:59:18 +000014579 goto err;
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014580 }
14581 }
14582
Ville Syrjälä17e8fd12018-10-29 20:34:53 +020014583 if (!drm_any_plane_has_format(&dev_priv->drm,
14584 mode_cmd->pixel_format,
14585 mode_cmd->modifier[0])) {
14586 struct drm_format_name_buf format_name;
14587
14588 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
14589 drm_get_format_name(mode_cmd->pixel_format,
14590 &format_name),
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014591 mode_cmd->modifier[0]);
Chris Wilson24dbf512017-02-15 10:59:18 +000014592 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014593 }
Chris Wilson57cd6502010-08-08 12:34:44 +010014594
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014595 /*
14596 * gen2/3 display engine uses the fence if present,
14597 * so the tiling mode must match the fb modifier exactly.
14598 */
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +000014599 if (INTEL_GEN(dev_priv) < 4 &&
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014600 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014601 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014602 goto err;
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014603 }
14604
Dhinakaran Pandiyan4c8d3512018-10-26 12:53:42 -070014605 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
14606 mode_cmd->modifier[0]);
Chris Wilsona35cdaa2013-06-25 17:26:45 +010014607 if (mode_cmd->pitches[0] > pitch_limit) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014608 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
Ben Widawsky2f075562017-03-24 14:29:48 -070014609 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014610 "tiled" : "linear",
14611 mode_cmd->pitches[0], pitch_limit);
Chris Wilson24dbf512017-02-15 10:59:18 +000014612 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014613 }
Ville Syrjälä5d7bd702012-10-31 17:50:18 +020014614
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014615 /*
14616 * If there's a fence, enforce that
14617 * the fb pitch and fence stride match.
14618 */
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014619 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14620 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14621 mode_cmd->pitches[0], stride);
Chris Wilson24dbf512017-02-15 10:59:18 +000014622 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014623 }
Ville Syrjälä5d7bd702012-10-31 17:50:18 +020014624
Ville Syrjälä90f9a332012-10-31 17:50:19 +020014625 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14626 if (mode_cmd->offsets[0] != 0)
Chris Wilson24dbf512017-02-15 10:59:18 +000014627 goto err;
Ville Syrjälä90f9a332012-10-31 17:50:19 +020014628
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014629 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +020014630
Chandra Kondurue44134f2018-05-12 03:03:15 +053014631 if (fb->format->format == DRM_FORMAT_NV12 &&
14632 (fb->width < SKL_MIN_YUV_420_SRC_W ||
14633 fb->height < SKL_MIN_YUV_420_SRC_H ||
14634 (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14635 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
Ville Syrjälä3b909462018-10-29 16:00:31 +020014636 goto err;
Chandra Kondurue44134f2018-05-12 03:03:15 +053014637 }
14638
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014639 for (i = 0; i < fb->format->num_planes; i++) {
14640 u32 stride_alignment;
14641
14642 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14643 DRM_DEBUG_KMS("bad plane %d handle\n", i);
Christophe JAILLET37875d62017-09-10 10:56:42 +020014644 goto err;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014645 }
14646
14647 stride_alignment = intel_fb_stride_alignment(fb, i);
14648
14649 /*
14650 * Display WA #0531: skl,bxt,kbl,glk
14651 *
14652 * Render decompression and plane width > 3840
14653 * combined with horizontal panning requires the
14654 * plane stride to be a multiple of 4. We'll just
14655 * require the entire fb to accommodate that to avoid
14656 * potential runtime errors at plane configuration time.
14657 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014658 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -070014659 is_ccs_modifier(fb->modifier))
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014660 stride_alignment *= 4;
14661
14662 if (fb->pitches[i] & (stride_alignment - 1)) {
14663 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14664 i, fb->pitches[i], stride_alignment);
14665 goto err;
14666 }
Ville Syrjäläd88c4af2017-03-07 21:42:06 +020014667
Daniel Stonea268bcd2018-05-18 15:30:08 +010014668 fb->obj[i] = &obj->base;
14669 }
Daniel Vetterc7d73f62012-12-13 23:38:38 +010014670
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014671 ret = intel_fill_fb_info(dev_priv, fb);
Ville Syrjälä6687c902015-09-15 13:16:41 +030014672 if (ret)
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014673 goto err;
Ville Syrjälä2d7a2152016-02-15 22:54:47 +020014674
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014675 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
Jesse Barnes79e53942008-11-07 14:24:08 -080014676 if (ret) {
14677 DRM_ERROR("framebuffer init failed %d\n", ret);
Chris Wilson24dbf512017-02-15 10:59:18 +000014678 goto err;
Jesse Barnes79e53942008-11-07 14:24:08 -080014679 }
14680
Jesse Barnes79e53942008-11-07 14:24:08 -080014681 return 0;
Chris Wilson24dbf512017-02-15 10:59:18 +000014682
14683err:
Chris Wilsondd689282017-03-01 15:41:28 +000014684 i915_gem_object_lock(obj);
14685 obj->framebuffer_references--;
14686 i915_gem_object_unlock(obj);
Chris Wilson24dbf512017-02-15 10:59:18 +000014687 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014688}
14689
Jesse Barnes79e53942008-11-07 14:24:08 -080014690static struct drm_framebuffer *
14691intel_user_framebuffer_create(struct drm_device *dev,
14692 struct drm_file *filp,
Ville Syrjälä1eb83452015-11-11 19:11:29 +020014693 const struct drm_mode_fb_cmd2 *user_mode_cmd)
Jesse Barnes79e53942008-11-07 14:24:08 -080014694{
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014695 struct drm_framebuffer *fb;
Chris Wilson05394f32010-11-08 19:18:58 +000014696 struct drm_i915_gem_object *obj;
Ville Syrjälä76dc3762015-11-11 19:11:28 +020014697 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
Jesse Barnes79e53942008-11-07 14:24:08 -080014698
Chris Wilson03ac0642016-07-20 13:31:51 +010014699 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14700 if (!obj)
Chris Wilsoncce13ff2010-08-08 13:36:38 +010014701 return ERR_PTR(-ENOENT);
Jesse Barnes79e53942008-11-07 14:24:08 -080014702
Chris Wilson24dbf512017-02-15 10:59:18 +000014703 fb = intel_framebuffer_create(obj, &mode_cmd);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014704 if (IS_ERR(fb))
Chris Wilsonf0cd5182016-10-28 13:58:43 +010014705 i915_gem_object_put(obj);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014706
14707 return fb;
Jesse Barnes79e53942008-11-07 14:24:08 -080014708}
14709
Chris Wilson778e23a2016-12-05 14:29:39 +000014710static void intel_atomic_state_free(struct drm_atomic_state *state)
14711{
14712 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14713
14714 drm_atomic_state_default_release(state);
14715
14716 i915_sw_fence_fini(&intel_state->commit_ready);
14717
14718 kfree(state);
14719}
14720
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014721static enum drm_mode_status
14722intel_mode_valid(struct drm_device *dev,
14723 const struct drm_display_mode *mode)
14724{
Ville Syrjäläad77c532018-06-15 20:44:05 +030014725 struct drm_i915_private *dev_priv = to_i915(dev);
14726 int hdisplay_max, htotal_max;
14727 int vdisplay_max, vtotal_max;
14728
Ville Syrjäläe4dd27a2018-05-24 15:54:03 +030014729 /*
14730 * Can't reject DBLSCAN here because Xorg ddxen can add piles
14731 * of DBLSCAN modes to the output's mode list when they detect
14732 * the scaling mode property on the connector. And they don't
14733 * ask the kernel to validate those modes in any way until
14734 * modeset time at which point the client gets a protocol error.
14735 * So in order to not upset those clients we silently ignore the
14736 * DBLSCAN flag on such connectors. For other connectors we will
14737 * reject modes with the DBLSCAN flag in encoder->compute_config().
14738 * And we always reject DBLSCAN modes in connector->mode_valid()
14739 * as we never want such modes on the connector's mode list.
14740 */
14741
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014742 if (mode->vscan > 1)
14743 return MODE_NO_VSCAN;
14744
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014745 if (mode->flags & DRM_MODE_FLAG_HSKEW)
14746 return MODE_H_ILLEGAL;
14747
14748 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14749 DRM_MODE_FLAG_NCSYNC |
14750 DRM_MODE_FLAG_PCSYNC))
14751 return MODE_HSYNC;
14752
14753 if (mode->flags & (DRM_MODE_FLAG_BCAST |
14754 DRM_MODE_FLAG_PIXMUX |
14755 DRM_MODE_FLAG_CLKDIV2))
14756 return MODE_BAD;
14757
Ville Syrjäläad77c532018-06-15 20:44:05 +030014758 if (INTEL_GEN(dev_priv) >= 9 ||
14759 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14760 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14761 vdisplay_max = 4096;
14762 htotal_max = 8192;
14763 vtotal_max = 8192;
14764 } else if (INTEL_GEN(dev_priv) >= 3) {
14765 hdisplay_max = 4096;
14766 vdisplay_max = 4096;
14767 htotal_max = 8192;
14768 vtotal_max = 8192;
14769 } else {
14770 hdisplay_max = 2048;
14771 vdisplay_max = 2048;
14772 htotal_max = 4096;
14773 vtotal_max = 4096;
14774 }
14775
14776 if (mode->hdisplay > hdisplay_max ||
14777 mode->hsync_start > htotal_max ||
14778 mode->hsync_end > htotal_max ||
14779 mode->htotal > htotal_max)
14780 return MODE_H_ILLEGAL;
14781
14782 if (mode->vdisplay > vdisplay_max ||
14783 mode->vsync_start > vtotal_max ||
14784 mode->vsync_end > vtotal_max ||
14785 mode->vtotal > vtotal_max)
14786 return MODE_V_ILLEGAL;
14787
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014788 return MODE_OK;
14789}
14790
Jesse Barnes79e53942008-11-07 14:24:08 -080014791static const struct drm_mode_config_funcs intel_mode_funcs = {
Jesse Barnes79e53942008-11-07 14:24:08 -080014792 .fb_create = intel_user_framebuffer_create,
Ville Syrjäläbbfb6ce2017-08-01 09:58:12 -070014793 .get_format_info = intel_get_format_info,
Daniel Vetter0632fef2013-10-08 17:44:49 +020014794 .output_poll_changed = intel_fbdev_output_poll_changed,
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014795 .mode_valid = intel_mode_valid,
Matt Roper5ee67f12015-01-21 16:35:44 -080014796 .atomic_check = intel_atomic_check,
14797 .atomic_commit = intel_atomic_commit,
Maarten Lankhorstde419ab2015-06-04 10:21:28 +020014798 .atomic_state_alloc = intel_atomic_state_alloc,
14799 .atomic_state_clear = intel_atomic_state_clear,
Chris Wilson778e23a2016-12-05 14:29:39 +000014800 .atomic_state_free = intel_atomic_state_free,
Jesse Barnes79e53942008-11-07 14:24:08 -080014801};
14802
Imre Deak88212942016-03-16 13:38:53 +020014803/**
14804 * intel_init_display_hooks - initialize the display modesetting hooks
14805 * @dev_priv: device private
14806 */
14807void intel_init_display_hooks(struct drm_i915_private *dev_priv)
Jesse Barnese70236a2009-09-21 10:42:27 -070014808{
Ville Syrjälä7ff89ca2017-02-07 20:33:05 +020014809 intel_init_cdclk_hooks(dev_priv);
14810
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +000014811 if (INTEL_GEN(dev_priv) >= 9) {
Damien Lespiaubc8d7df2015-01-20 12:51:51 +000014812 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014813 dev_priv->display.get_initial_plane_config =
14814 skylake_get_initial_plane_config;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +000014815 dev_priv->display.crtc_compute_clock =
14816 haswell_crtc_compute_clock;
14817 dev_priv->display.crtc_enable = haswell_crtc_enable;
14818 dev_priv->display.crtc_disable = haswell_crtc_disable;
Imre Deak88212942016-03-16 13:38:53 +020014819 } else if (HAS_DDI(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014820 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014821 dev_priv->display.get_initial_plane_config =
Ville Syrjälä81894b22017-11-17 21:19:13 +020014822 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira797d0252014-10-29 11:32:34 +020014823 dev_priv->display.crtc_compute_clock =
14824 haswell_crtc_compute_clock;
Paulo Zanoni4f771f12012-10-23 18:29:51 -020014825 dev_priv->display.crtc_enable = haswell_crtc_enable;
14826 dev_priv->display.crtc_disable = haswell_crtc_disable;
Imre Deak88212942016-03-16 13:38:53 +020014827 } else if (HAS_PCH_SPLIT(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014828 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014829 dev_priv->display.get_initial_plane_config =
Ville Syrjälä81894b22017-11-17 21:19:13 +020014830 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira3fb37702014-10-29 11:32:35 +020014831 dev_priv->display.crtc_compute_clock =
14832 ironlake_crtc_compute_clock;
Daniel Vetter76e5a892012-06-29 22:39:33 +020014833 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14834 dev_priv->display.crtc_disable = ironlake_crtc_disable;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +020014835 } else if (IS_CHERRYVIEW(dev_priv)) {
Jesse Barnes89b667f2013-04-18 14:51:36 -070014836 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014837 dev_priv->display.get_initial_plane_config =
14838 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +020014839 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14840 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14841 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14842 } else if (IS_VALLEYVIEW(dev_priv)) {
14843 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14844 dev_priv->display.get_initial_plane_config =
14845 i9xx_get_initial_plane_config;
14846 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
Jesse Barnes89b667f2013-04-18 14:51:36 -070014847 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14848 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +020014849 } else if (IS_G4X(dev_priv)) {
14850 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14851 dev_priv->display.get_initial_plane_config =
14852 i9xx_get_initial_plane_config;
14853 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14854 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14855 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +020014856 } else if (IS_PINEVIEW(dev_priv)) {
14857 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14858 dev_priv->display.get_initial_plane_config =
14859 i9xx_get_initial_plane_config;
14860 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14861 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14862 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014863 } else if (!IS_GEN(dev_priv, 2)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014864 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014865 dev_priv->display.get_initial_plane_config =
14866 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveirad6dfee72014-10-29 11:32:36 +020014867 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
Daniel Vetter76e5a892012-06-29 22:39:33 +020014868 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14869 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +020014870 } else {
14871 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14872 dev_priv->display.get_initial_plane_config =
14873 i9xx_get_initial_plane_config;
14874 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14875 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14876 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Eric Anholtf564048e2011-03-30 13:01:02 -070014877 }
Jesse Barnese70236a2009-09-21 10:42:27 -070014878
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014879 if (IS_GEN(dev_priv, 5)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014880 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014881 } else if (IS_GEN(dev_priv, 6)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014882 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014883 } else if (IS_IVYBRIDGE(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014884 /* FIXME: detect B0+ stepping and use auto training */
14885 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014886 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014887 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
Ville Syrjälä445e7802016-05-11 22:44:42 +030014888 }
14889
Rodrigo Vivibd30ca22017-09-26 14:13:46 -070014890 if (INTEL_GEN(dev_priv) >= 9)
Lyude27082492016-08-24 07:48:10 +020014891 dev_priv->display.update_crtcs = skl_update_crtcs;
14892 else
14893 dev_priv->display.update_crtcs = intel_update_crtcs;
Jesse Barnese70236a2009-09-21 10:42:27 -070014894}
14895
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014896/* Disable the VGA plane that we never use */
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000014897static void i915_disable_vga(struct drm_i915_private *dev_priv)
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014898{
David Weinehall52a05c32016-08-22 13:32:44 +030014899 struct pci_dev *pdev = dev_priv->drm.pdev;
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014900 u8 sr1;
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014901 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014902
Ville Syrjälä2b37c612014-01-22 21:32:38 +020014903 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
David Weinehall52a05c32016-08-22 13:32:44 +030014904 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
Jesse Barnes3fdcf432012-04-06 11:46:27 -070014905 outb(SR01, VGA_SR_INDEX);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014906 sr1 = inb(VGA_SR_DATA);
14907 outb(sr1 | 1<<5, VGA_SR_DATA);
David Weinehall52a05c32016-08-22 13:32:44 +030014908 vga_put(pdev, VGA_RSRC_LEGACY_IO);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014909 udelay(300);
14910
Ville Syrjälä01f5a622014-12-16 18:38:37 +020014911 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014912 POSTING_READ(vga_reg);
14913}
14914
Daniel Vetterf8175862012-04-10 15:50:11 +020014915void intel_modeset_init_hw(struct drm_device *dev)
14916{
Chris Wilsonfac5e232016-07-04 11:34:36 +010014917 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst1a617b72015-12-03 14:31:06 +010014918
Ville Syrjälä4c75b942016-10-31 22:37:12 +020014919 intel_update_cdclk(dev_priv);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +030014920 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020014921 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
Daniel Vetterf8175862012-04-10 15:50:11 +020014922}
14923
Matt Roperd93c0372015-12-03 11:37:41 -080014924/*
14925 * Calculate what we think the watermarks should be for the state we've read
14926 * out of the hardware and then immediately program those watermarks so that
14927 * we ensure the hardware settings match our internal state.
14928 *
14929 * We can calculate what we think WM's should be by creating a duplicate of the
14930 * current state (which was constructed during hardware readout) and running it
14931 * through the atomic check code to calculate new watermark values in the
14932 * state object.
14933 */
14934static void sanitize_watermarks(struct drm_device *dev)
14935{
14936 struct drm_i915_private *dev_priv = to_i915(dev);
14937 struct drm_atomic_state *state;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014938 struct intel_atomic_state *intel_state;
Matt Roperd93c0372015-12-03 11:37:41 -080014939 struct drm_crtc *crtc;
14940 struct drm_crtc_state *cstate;
14941 struct drm_modeset_acquire_ctx ctx;
14942 int ret;
14943 int i;
14944
14945 /* Only supported on platforms that use atomic watermark design */
Matt Ropered4a6a72016-02-23 17:20:13 -080014946 if (!dev_priv->display.optimize_watermarks)
Matt Roperd93c0372015-12-03 11:37:41 -080014947 return;
14948
14949 /*
14950 * We need to hold connection_mutex before calling duplicate_state so
14951 * that the connector loop is protected.
14952 */
14953 drm_modeset_acquire_init(&ctx, 0);
14954retry:
Matt Roper0cd12622016-01-12 07:13:37 -080014955 ret = drm_modeset_lock_all_ctx(dev, &ctx);
Matt Roperd93c0372015-12-03 11:37:41 -080014956 if (ret == -EDEADLK) {
14957 drm_modeset_backoff(&ctx);
14958 goto retry;
14959 } else if (WARN_ON(ret)) {
Matt Roper0cd12622016-01-12 07:13:37 -080014960 goto fail;
Matt Roperd93c0372015-12-03 11:37:41 -080014961 }
14962
14963 state = drm_atomic_helper_duplicate_state(dev, &ctx);
14964 if (WARN_ON(IS_ERR(state)))
Matt Roper0cd12622016-01-12 07:13:37 -080014965 goto fail;
Matt Roperd93c0372015-12-03 11:37:41 -080014966
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014967 intel_state = to_intel_atomic_state(state);
14968
Matt Ropered4a6a72016-02-23 17:20:13 -080014969 /*
14970 * Hardware readout is the only time we don't want to calculate
14971 * intermediate watermarks (since we don't trust the current
14972 * watermarks).
14973 */
Ville Syrjälä602ae832017-03-02 19:15:02 +020014974 if (!HAS_GMCH_DISPLAY(dev_priv))
14975 intel_state->skip_intermediate_wm = true;
Matt Ropered4a6a72016-02-23 17:20:13 -080014976
Matt Roperd93c0372015-12-03 11:37:41 -080014977 ret = intel_atomic_check(dev, state);
14978 if (ret) {
14979 /*
14980 * If we fail here, it means that the hardware appears to be
14981 * programmed in a way that shouldn't be possible, given our
14982 * understanding of watermark requirements. This might mean a
14983 * mistake in the hardware readout code or a mistake in the
14984 * watermark calculations for a given platform. Raise a WARN
14985 * so that this is noticeable.
14986 *
14987 * If this actually happens, we'll have to just leave the
14988 * BIOS-programmed watermarks untouched and hope for the best.
14989 */
14990 WARN(true, "Could not determine valid watermarks for inherited state\n");
Arnd Bergmannb9a1b712016-10-18 17:16:23 +020014991 goto put_state;
Matt Roperd93c0372015-12-03 11:37:41 -080014992 }
14993
14994 /* Write calculated watermark values back */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010014995 for_each_new_crtc_in_state(state, crtc, cstate, i) {
Matt Roperd93c0372015-12-03 11:37:41 -080014996 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
14997
Matt Ropered4a6a72016-02-23 17:20:13 -080014998 cs->wm.need_postvbl_update = true;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014999 dev_priv->display.optimize_watermarks(intel_state, cs);
Maarten Lankhorst556fe362017-11-10 12:34:53 +010015000
15001 to_intel_crtc_state(crtc->state)->wm = cs->wm;
Matt Roperd93c0372015-12-03 11:37:41 -080015002 }
15003
Arnd Bergmannb9a1b712016-10-18 17:16:23 +020015004put_state:
Chris Wilson08536952016-10-14 13:18:18 +010015005 drm_atomic_state_put(state);
Matt Roper0cd12622016-01-12 07:13:37 -080015006fail:
Matt Roperd93c0372015-12-03 11:37:41 -080015007 drm_modeset_drop_locks(&ctx);
15008 drm_modeset_acquire_fini(&ctx);
15009}
15010
Chris Wilson58ecd9d2017-11-05 13:49:05 +000015011static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15012{
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015013 if (IS_GEN(dev_priv, 5)) {
Chris Wilson58ecd9d2017-11-05 13:49:05 +000015014 u32 fdi_pll_clk =
15015 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15016
15017 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015018 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
Chris Wilson58ecd9d2017-11-05 13:49:05 +000015019 dev_priv->fdi_pll_freq = 270000;
15020 } else {
15021 return;
15022 }
15023
15024 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15025}
15026
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015027static int intel_initial_commit(struct drm_device *dev)
15028{
15029 struct drm_atomic_state *state = NULL;
15030 struct drm_modeset_acquire_ctx ctx;
15031 struct drm_crtc *crtc;
15032 struct drm_crtc_state *crtc_state;
15033 int ret = 0;
15034
15035 state = drm_atomic_state_alloc(dev);
15036 if (!state)
15037 return -ENOMEM;
15038
15039 drm_modeset_acquire_init(&ctx, 0);
15040
15041retry:
15042 state->acquire_ctx = &ctx;
15043
15044 drm_for_each_crtc(crtc, dev) {
15045 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15046 if (IS_ERR(crtc_state)) {
15047 ret = PTR_ERR(crtc_state);
15048 goto out;
15049 }
15050
15051 if (crtc_state->active) {
15052 ret = drm_atomic_add_affected_planes(state, crtc);
15053 if (ret)
15054 goto out;
Ville Syrjäläfa6af5142018-11-20 15:54:49 +020015055
15056 /*
15057 * FIXME hack to force a LUT update to avoid the
15058 * plane update forcing the pipe gamma on without
15059 * having a proper LUT loaded. Remove once we
15060 * have readout for pipe gamma enable.
15061 */
15062 crtc_state->color_mgmt_changed = true;
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015063 }
15064 }
15065
15066 ret = drm_atomic_commit(state);
15067
15068out:
15069 if (ret == -EDEADLK) {
15070 drm_atomic_state_clear(state);
15071 drm_modeset_backoff(&ctx);
15072 goto retry;
15073 }
15074
15075 drm_atomic_state_put(state);
15076
15077 drm_modeset_drop_locks(&ctx);
15078 drm_modeset_acquire_fini(&ctx);
15079
15080 return ret;
15081}
15082
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015083int intel_modeset_init(struct drm_device *dev)
Jesse Barnes79e53942008-11-07 14:24:08 -080015084{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030015085 struct drm_i915_private *dev_priv = to_i915(dev);
15086 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Damien Lespiau8cc87b72014-03-03 17:31:44 +000015087 enum pipe pipe;
Jesse Barnes46f297f2014-03-07 08:57:48 -080015088 struct intel_crtc *crtc;
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015089 int ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080015090
Ville Syrjälä757fffc2017-11-13 15:36:22 +020015091 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15092
Jesse Barnes79e53942008-11-07 14:24:08 -080015093 drm_mode_config_init(dev);
15094
15095 dev->mode_config.min_width = 0;
15096 dev->mode_config.min_height = 0;
15097
Dave Airlie019d96c2011-09-29 16:20:42 +010015098 dev->mode_config.preferred_depth = 24;
15099 dev->mode_config.prefer_shadow = 1;
15100
Tvrtko Ursulin25bab382015-02-10 17:16:16 +000015101 dev->mode_config.allow_fb_modifiers = true;
15102
Laurent Pincharte6ecefa2012-05-17 13:27:23 +020015103 dev->mode_config.funcs = &intel_mode_funcs;
Jesse Barnes79e53942008-11-07 14:24:08 -080015104
Andrea Arcangeli400c19d2017-04-07 01:23:45 +020015105 init_llist_head(&dev_priv->atomic_helper.free_list);
Chris Wilsoneb955ee2017-01-23 21:29:39 +000015106 INIT_WORK(&dev_priv->atomic_helper.free_work,
Chris Wilsonba318c62017-02-02 20:47:41 +000015107 intel_atomic_helper_free_state_worker);
Chris Wilsoneb955ee2017-01-23 21:29:39 +000015108
Jani Nikula27a981b2018-10-17 12:35:39 +030015109 intel_init_quirks(dev_priv);
Jesse Barnesb690e962010-07-19 13:53:12 -070015110
José Roberto de Souzaacde44b2018-11-07 16:16:45 -080015111 intel_fbc_init(dev_priv);
15112
Ville Syrjälä62d75df2016-10-31 22:37:25 +020015113 intel_init_pm(dev_priv);
Eugeni Dodonov1fa61102012-04-18 15:29:26 -030015114
Lukas Wunner69f92f62015-07-15 13:57:35 +020015115 /*
15116 * There may be no VBT; and if the BIOS enabled SSC we can
15117 * just keep using it to avoid unnecessary flicker. Whereas if the
15118 * BIOS isn't using it, don't assume it will work even if the VBT
15119 * indicates as much.
15120 */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010015121 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
Lukas Wunner69f92f62015-07-15 13:57:35 +020015122 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15123 DREF_SSC1_ENABLE);
15124
15125 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15126 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15127 bios_lvds_use_ssc ? "en" : "dis",
15128 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15129 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15130 }
15131 }
15132
Ville Syrjäläad77c532018-06-15 20:44:05 +030015133 /* maximum framebuffer dimensions */
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015134 if (IS_GEN(dev_priv, 2)) {
Chris Wilsona6c45cf2010-09-17 00:32:17 +010015135 dev->mode_config.max_width = 2048;
15136 dev->mode_config.max_height = 2048;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015137 } else if (IS_GEN(dev_priv, 3)) {
Keith Packard5e4d6fa2009-07-12 23:53:17 -070015138 dev->mode_config.max_width = 4096;
15139 dev->mode_config.max_height = 4096;
Jesse Barnes79e53942008-11-07 14:24:08 -080015140 } else {
Chris Wilsona6c45cf2010-09-17 00:32:17 +010015141 dev->mode_config.max_width = 8192;
15142 dev->mode_config.max_height = 8192;
Jesse Barnes79e53942008-11-07 14:24:08 -080015143 }
Damien Lespiau068be562014-03-28 14:17:49 +000015144
Jani Nikula2a307c22016-11-30 17:43:04 +020015145 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15146 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
Ville Syrjälädc41c152014-08-13 11:57:05 +030015147 dev->mode_config.cursor_height = 1023;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015148 } else if (IS_GEN(dev_priv, 2)) {
Ville Syrjälä98fac1d2018-06-15 20:44:04 +030015149 dev->mode_config.cursor_width = 64;
15150 dev->mode_config.cursor_height = 64;
Damien Lespiau068be562014-03-28 14:17:49 +000015151 } else {
Ville Syrjälä98fac1d2018-06-15 20:44:04 +030015152 dev->mode_config.cursor_width = 256;
15153 dev->mode_config.cursor_height = 256;
Damien Lespiau068be562014-03-28 14:17:49 +000015154 }
15155
Matthew Auld73ebd502017-12-11 15:18:20 +000015156 dev->mode_config.fb_base = ggtt->gmadr.start;
Jesse Barnes79e53942008-11-07 14:24:08 -080015157
Zhao Yakui28c97732009-10-09 11:39:41 +080015158 DRM_DEBUG_KMS("%d display pipe%s available.\n",
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000015159 INTEL_INFO(dev_priv)->num_pipes,
15160 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
Jesse Barnes79e53942008-11-07 14:24:08 -080015161
Damien Lespiau055e3932014-08-18 13:49:10 +010015162 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020015163 ret = intel_crtc_init(dev_priv, pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015164 if (ret) {
15165 drm_mode_config_cleanup(dev);
15166 return ret;
15167 }
Jesse Barnes79e53942008-11-07 14:24:08 -080015168 }
15169
Daniel Vettere72f9fb2013-06-05 13:34:06 +020015170 intel_shared_dpll_init(dev);
Chris Wilson58ecd9d2017-11-05 13:49:05 +000015171 intel_update_fdi_pll_freq(dev_priv);
Jesse Barnesee7b9f92012-04-20 17:11:53 +010015172
Ville Syrjälä5be6e332017-02-20 16:04:43 +020015173 intel_update_czclk(dev_priv);
15174 intel_modeset_init_hw(dev);
15175
Ville Syrjäläb2045352016-05-13 23:41:27 +030015176 if (dev_priv->max_cdclk_freq == 0)
Ville Syrjälä4c75b942016-10-31 22:37:12 +020015177 intel_update_max_cdclk(dev_priv);
Ville Syrjäläb2045352016-05-13 23:41:27 +030015178
Jesse Barnes9cce37f2010-08-13 15:11:26 -070015179 /* Just disable it once at startup */
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015180 i915_disable_vga(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020015181 intel_setup_outputs(dev_priv);
Chris Wilson11be49e2012-11-15 11:32:20 +000015182
Daniel Vetter6e9f7982014-05-29 23:54:47 +020015183 drm_modeset_lock_all(dev);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015184 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
Daniel Vetter6e9f7982014-05-29 23:54:47 +020015185 drm_modeset_unlock_all(dev);
Jesse Barnes46f297f2014-03-07 08:57:48 -080015186
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015187 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorsteeebeac2015-07-14 12:33:29 +020015188 struct intel_initial_plane_config plane_config = {};
15189
Jesse Barnes46f297f2014-03-07 08:57:48 -080015190 if (!crtc->active)
15191 continue;
15192
Jesse Barnes46f297f2014-03-07 08:57:48 -080015193 /*
Jesse Barnes46f297f2014-03-07 08:57:48 -080015194 * Note that reserving the BIOS fb up front prevents us
15195 * from stuffing other stolen allocations like the ring
15196 * on top. This prevents some ugliness at boot time, and
15197 * can even allow for smooth boot transitions if the BIOS
15198 * fb is large enough for the active pipe configuration.
15199 */
Maarten Lankhorsteeebeac2015-07-14 12:33:29 +020015200 dev_priv->display.get_initial_plane_config(crtc,
15201 &plane_config);
15202
15203 /*
15204 * If the fb is shared between multiple heads, we'll
15205 * just get the first one.
15206 */
15207 intel_find_initial_plane_obj(crtc, &plane_config);
Jesse Barnes46f297f2014-03-07 08:57:48 -080015208 }
Matt Roperd93c0372015-12-03 11:37:41 -080015209
15210 /*
15211 * Make sure hardware watermarks really match the state we read out.
15212 * Note that we need to do this after reconstructing the BIOS fb's
15213 * since the watermark calculation done here will use pstate->fb.
15214 */
Ville Syrjälä602ae832017-03-02 19:15:02 +020015215 if (!HAS_GMCH_DISPLAY(dev_priv))
15216 sanitize_watermarks(dev);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015217
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015218 /*
15219 * Force all active planes to recompute their states. So that on
15220 * mode_setcrtc after probe, all the intel_plane_state variables
15221 * are already calculated and there is no assert_plane warnings
15222 * during bootup.
15223 */
15224 ret = intel_initial_commit(dev);
15225 if (ret)
15226 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15227
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015228 return 0;
Chris Wilson2c7111d2011-03-29 10:40:27 +010015229}
Jesse Barnesd5bb0812011-01-05 12:01:26 -080015230
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015231void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15232{
Ville Syrjäläd5fb43c2017-11-29 17:37:31 +020015233 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015234 /* 640x480@60Hz, ~25175 kHz */
15235 struct dpll clock = {
15236 .m1 = 18,
15237 .m2 = 7,
15238 .p1 = 13,
15239 .p2 = 4,
15240 .n = 2,
15241 };
15242 u32 dpll, fp;
15243 int i;
15244
15245 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15246
15247 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15248 pipe_name(pipe), clock.vco, clock.dot);
15249
15250 fp = i9xx_dpll_compute_fp(&clock);
15251 dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15252 DPLL_VGA_MODE_DIS |
15253 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15254 PLL_P2_DIVIDE_BY_4 |
15255 PLL_REF_INPUT_DREFCLK |
15256 DPLL_VCO_ENABLE;
15257
15258 I915_WRITE(FP0(pipe), fp);
15259 I915_WRITE(FP1(pipe), fp);
15260
15261 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15262 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15263 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15264 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15265 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15266 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15267 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15268
15269 /*
15270 * Apparently we need to have VGA mode enabled prior to changing
15271 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15272 * dividers, even though the register value does change.
15273 */
15274 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15275 I915_WRITE(DPLL(pipe), dpll);
15276
15277 /* Wait for the clocks to stabilize. */
15278 POSTING_READ(DPLL(pipe));
15279 udelay(150);
15280
15281 /* The pixel multiplier can only be updated once the
15282 * DPLL is enabled and the clocks are stable.
15283 *
15284 * So write it again.
15285 */
15286 I915_WRITE(DPLL(pipe), dpll);
15287
15288 /* We do this three times for luck */
15289 for (i = 0; i < 3 ; i++) {
15290 I915_WRITE(DPLL(pipe), dpll);
15291 POSTING_READ(DPLL(pipe));
15292 udelay(150); /* wait for warmup */
15293 }
15294
15295 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15296 POSTING_READ(PIPECONF(pipe));
Ville Syrjäläd5fb43c2017-11-29 17:37:31 +020015297
15298 intel_wait_for_pipe_scanline_moving(crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015299}
15300
15301void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15302{
Ville Syrjälä8fedd642017-11-29 17:37:30 +020015303 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15304
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015305 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15306 pipe_name(pipe));
15307
Ville Syrjälä5816d9c2017-11-29 14:54:11 +020015308 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15309 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15310 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020015311 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15312 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015313
15314 I915_WRITE(PIPECONF(pipe), 0);
15315 POSTING_READ(PIPECONF(pipe));
15316
Ville Syrjälä8fedd642017-11-29 17:37:30 +020015317 intel_wait_for_pipe_scanline_stopped(crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015318
15319 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15320 POSTING_READ(DPLL(pipe));
15321}
15322
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015323static void
15324intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15325{
15326 struct intel_crtc *crtc;
Daniel Vetterfa555832012-10-10 23:14:00 +020015327
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015328 if (INTEL_GEN(dev_priv) >= 4)
15329 return;
Daniel Vetterfa555832012-10-10 23:14:00 +020015330
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015331 for_each_intel_crtc(&dev_priv->drm, crtc) {
15332 struct intel_plane *plane =
15333 to_intel_plane(crtc->base.primary);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015334 struct intel_crtc *plane_crtc;
15335 enum pipe pipe;
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015336
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015337 if (!plane->get_hw_state(plane, &pipe))
15338 continue;
15339
15340 if (pipe == crtc->pipe)
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015341 continue;
15342
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +030015343 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15344 plane->base.base.id, plane->base.name);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015345
15346 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15347 intel_plane_disable_noatomic(plane_crtc, plane);
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015348 }
Daniel Vetterfa555832012-10-10 23:14:00 +020015349}
15350
Ville Syrjälä02e93c32015-08-26 19:39:19 +030015351static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15352{
15353 struct drm_device *dev = crtc->base.dev;
15354 struct intel_encoder *encoder;
15355
15356 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15357 return true;
15358
15359 return false;
15360}
15361
Maarten Lankhorst496b0fc2016-08-23 16:18:07 +020015362static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15363{
15364 struct drm_device *dev = encoder->base.dev;
15365 struct intel_connector *connector;
15366
15367 for_each_connector_on_encoder(dev, &encoder->base, connector)
15368 return connector;
15369
15370 return NULL;
15371}
15372
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015373static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015374 enum pipe pch_transcoder)
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015375{
15376 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015377 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015378}
15379
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015380static void intel_sanitize_crtc(struct intel_crtc *crtc,
15381 struct drm_modeset_acquire_ctx *ctx)
Daniel Vetter24929352012-07-02 20:28:59 +020015382{
15383 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010015384 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015385 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15386 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Daniel Vetter24929352012-07-02 20:28:59 +020015387
Daniel Vetter24929352012-07-02 20:28:59 +020015388 /* Clear any frame start delays used for debugging left by the BIOS */
Ville Syrjälä738a8142017-11-15 22:04:42 +020015389 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
Jani Nikula4d1de972016-03-18 17:05:42 +020015390 i915_reg_t reg = PIPECONF(cpu_transcoder);
15391
15392 I915_WRITE(reg,
15393 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15394 }
Daniel Vetter24929352012-07-02 20:28:59 +020015395
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015396 if (crtc_state->base.active) {
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015397 struct intel_plane *plane;
15398
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015399 /* Disable everything but the primary plane */
15400 for_each_intel_plane_on_crtc(dev, crtc, plane) {
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015401 const struct intel_plane_state *plane_state =
15402 to_intel_plane_state(plane->base.state);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015403
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015404 if (plane_state->base.visible &&
15405 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15406 intel_plane_disable_noatomic(crtc, plane);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015407 }
Daniel Vetter96256042015-02-13 21:03:42 +010015408 }
Ville Syrjäläd3eaf882014-05-20 17:20:05 +030015409
Daniel Vetter24929352012-07-02 20:28:59 +020015410 /* Adjust the state of the output pipe according to whether we
15411 * have active connectors/encoders. */
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015412 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
Ville Syrjäläda1d0e22017-06-01 17:36:14 +030015413 intel_crtc_disable_noatomic(&crtc->base, ctx);
Daniel Vetter24929352012-07-02 20:28:59 +020015414
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015415 if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
Daniel Vetter4cc31482014-03-24 00:01:41 +010015416 /*
15417 * We start out with underrun reporting disabled to avoid races.
15418 * For correct bookkeeping mark this on active crtcs.
15419 *
Daniel Vetterc5ab3bc2014-05-14 15:40:34 +020015420 * Also on gmch platforms we dont have any hardware bits to
15421 * disable the underrun reporting. Which means we need to start
15422 * out with underrun reporting disabled also on inactive pipes,
15423 * since otherwise we'll complain about the garbage we read when
15424 * e.g. coming up after runtime pm.
15425 *
Daniel Vetter4cc31482014-03-24 00:01:41 +010015426 * No protection against concurrent access is required - at
15427 * worst a fifo underrun happens which also sets this to false.
15428 */
15429 crtc->cpu_fifo_underrun_disabled = true;
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015430 /*
15431 * We track the PCH trancoder underrun reporting state
15432 * within the crtc. With crtc for pipe A housing the underrun
15433 * reporting state for PCH transcoder A, crtc for pipe B housing
15434 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15435 * and marking underrun reporting as disabled for the non-existing
15436 * PCH transcoders B and C would prevent enabling the south
15437 * error interrupt (see cpt_can_enable_serr_int()).
15438 */
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015439 if (has_pch_trancoder(dev_priv, crtc->pipe))
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015440 crtc->pch_fifo_underrun_disabled = true;
Daniel Vetter4cc31482014-03-24 00:01:41 +010015441 }
Daniel Vetter24929352012-07-02 20:28:59 +020015442}
15443
15444static void intel_sanitize_encoder(struct intel_encoder *encoder)
15445{
Imre Deak70332ac2018-11-01 16:04:27 +020015446 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015447 struct intel_connector *connector;
Daniel Vetter24929352012-07-02 20:28:59 +020015448
15449 /* We need to check both for a crtc link (meaning that the
15450 * encoder is active and trying to read from a pipe) and the
15451 * pipe itself being active. */
15452 bool has_active_crtc = encoder->base.crtc &&
15453 to_intel_crtc(encoder->base.crtc)->active;
15454
Maarten Lankhorst496b0fc2016-08-23 16:18:07 +020015455 connector = intel_encoder_find_connector(encoder);
15456 if (connector && !has_active_crtc) {
Daniel Vetter24929352012-07-02 20:28:59 +020015457 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15458 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030015459 encoder->base.name);
Daniel Vetter24929352012-07-02 20:28:59 +020015460
15461 /* Connector is active, but has no active pipe. This is
15462 * fallout from our resume register restoring. Disable
15463 * the encoder manually again. */
15464 if (encoder->base.crtc) {
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015465 struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15466
Daniel Vetter24929352012-07-02 20:28:59 +020015467 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15468 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030015469 encoder->base.name);
Jani Nikulac84c6fe2018-10-16 15:41:34 +030015470 if (encoder->disable)
15471 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
Ville Syrjäläa62d1492014-06-28 02:04:01 +030015472 if (encoder->post_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015473 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
Daniel Vetter24929352012-07-02 20:28:59 +020015474 }
Egbert Eich7f1950f2014-04-25 10:56:22 +020015475 encoder->base.crtc = NULL;
Daniel Vetter24929352012-07-02 20:28:59 +020015476
15477 /* Inconsistent output/port/pipe state happens presumably due to
15478 * a bug in one of the get_hw_state functions. Or someplace else
15479 * in our code, like the register restore mess on resume. Clamp
15480 * things to off as a safer default. */
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015481
15482 connector->base.dpms = DRM_MODE_DPMS_OFF;
15483 connector->base.encoder = NULL;
Daniel Vetter24929352012-07-02 20:28:59 +020015484 }
Maarten Lankhorstd6cae4a2018-05-16 10:50:38 +020015485
15486 /* notify opregion of the sanitized encoder state */
15487 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
Imre Deak70332ac2018-11-01 16:04:27 +020015488
15489 if (INTEL_GEN(dev_priv) >= 11)
15490 icl_sanitize_encoder_pll_mapping(encoder);
Daniel Vetter24929352012-07-02 20:28:59 +020015491}
15492
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015493void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015494{
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010015495 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015496
Imre Deak04098752014-02-18 00:02:16 +020015497 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15498 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015499 i915_disable_vga(dev_priv);
Imre Deak04098752014-02-18 00:02:16 +020015500 }
15501}
15502
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015503void i915_redisable_vga(struct drm_i915_private *dev_priv)
Imre Deak04098752014-02-18 00:02:16 +020015504{
Paulo Zanoni8dc8a272013-08-02 16:22:24 -030015505 /* This function can be called both from intel_modeset_setup_hw_state or
15506 * at a very early point in our resume sequence, where the power well
15507 * structures are not yet restored. Since this function is at a very
15508 * paranoid "someone might have enabled VGA while we were not looking"
15509 * level, just check if the power well is enabled instead of trying to
15510 * follow the "don't touch the power well if we don't need it" policy
15511 * the rest of the driver uses. */
Imre Deak6392f842016-02-12 18:55:13 +020015512 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
Paulo Zanoni8dc8a272013-08-02 16:22:24 -030015513 return;
15514
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015515 i915_redisable_vga_power_on(dev_priv);
Imre Deak6392f842016-02-12 18:55:13 +020015516
15517 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015518}
15519
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015520/* FIXME read out full plane state for all planes */
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015521static void readout_plane_state(struct drm_i915_private *dev_priv)
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +020015522{
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015523 struct intel_plane *plane;
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015524 struct intel_crtc *crtc;
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +020015525
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015526 for_each_intel_plane(&dev_priv->drm, plane) {
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015527 struct intel_plane_state *plane_state =
15528 to_intel_plane_state(plane->base.state);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015529 struct intel_crtc_state *crtc_state;
15530 enum pipe pipe = PIPE_A;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020015531 bool visible;
15532
15533 visible = plane->get_hw_state(plane, &pipe);
Maarten Lankhorstb26d3ea2015-09-23 16:11:41 +020015534
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015535 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15536 crtc_state = to_intel_crtc_state(crtc->base.state);
15537
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015538 intel_set_plane_visible(crtc_state, plane_state, visible);
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +030015539
15540 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15541 plane->base.base.id, plane->base.name,
15542 enableddisabled(visible), pipe_name(pipe));
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015543 }
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015544
15545 for_each_intel_crtc(&dev_priv->drm, crtc) {
15546 struct intel_crtc_state *crtc_state =
15547 to_intel_crtc_state(crtc->base.state);
15548
15549 fixup_active_planes(crtc_state);
15550 }
Ville Syrjälä98ec7732014-04-30 17:43:01 +030015551}
15552
Daniel Vetter30e984d2013-06-05 13:34:17 +020015553static void intel_modeset_readout_hw_state(struct drm_device *dev)
Daniel Vetter24929352012-07-02 20:28:59 +020015554{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015555 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015556 enum pipe pipe;
Daniel Vetter24929352012-07-02 20:28:59 +020015557 struct intel_crtc *crtc;
15558 struct intel_encoder *encoder;
15559 struct intel_connector *connector;
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015560 struct drm_connector_list_iter conn_iter;
Daniel Vetter53589012013-06-05 13:34:16 +020015561 int i;
Daniel Vetter24929352012-07-02 20:28:59 +020015562
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015563 dev_priv->active_crtcs = 0;
15564
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015565 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015566 struct intel_crtc_state *crtc_state =
15567 to_intel_crtc_state(crtc->base.state);
Daniel Vetter3b117c82013-04-17 20:15:07 +020015568
Daniel Vetterec2dc6a2016-05-09 16:34:09 +020015569 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015570 memset(crtc_state, 0, sizeof(*crtc_state));
15571 crtc_state->base.crtc = &crtc->base;
Daniel Vetter24929352012-07-02 20:28:59 +020015572
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015573 crtc_state->base.active = crtc_state->base.enable =
15574 dev_priv->display.get_pipe_config(crtc, crtc_state);
15575
15576 crtc->base.enabled = crtc_state->base.enable;
15577 crtc->active = crtc_state->base.active;
15578
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015579 if (crtc_state->base.active)
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015580 dev_priv->active_crtcs |= 1 << crtc->pipe;
15581
Ville Syrjälä78108b72016-05-27 20:59:19 +030015582 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15583 crtc->base.base.id, crtc->base.name,
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015584 enableddisabled(crtc_state->base.active));
Daniel Vetter24929352012-07-02 20:28:59 +020015585 }
15586
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015587 readout_plane_state(dev_priv);
15588
Daniel Vetter53589012013-06-05 13:34:16 +020015589 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15590 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15591
Lucas De Marchiee1398b2018-03-20 15:06:33 -070015592 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15593 &pll->state.hw_state);
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015594 pll->state.crtc_mask = 0;
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015595 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015596 struct intel_crtc_state *crtc_state =
15597 to_intel_crtc_state(crtc->base.state);
15598
15599 if (crtc_state->base.active &&
15600 crtc_state->shared_dpll == pll)
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015601 pll->state.crtc_mask |= 1 << crtc->pipe;
Daniel Vetter53589012013-06-05 13:34:16 +020015602 }
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015603 pll->active_mask = pll->state.crtc_mask;
Daniel Vetter53589012013-06-05 13:34:16 +020015604
Ander Conselvan de Oliveira1e6f2dd2014-10-29 11:32:31 +020015605 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
Lucas De Marchi72f775f2018-03-20 15:06:34 -070015606 pll->info->name, pll->state.crtc_mask, pll->on);
Daniel Vetter53589012013-06-05 13:34:16 +020015607 }
15608
Damien Lespiaub2784e12014-08-05 11:29:37 +010015609 for_each_intel_encoder(dev, encoder) {
Daniel Vetter24929352012-07-02 20:28:59 +020015610 pipe = 0;
15611
15612 if (encoder->get_hw_state(encoder, &pipe)) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015613 struct intel_crtc_state *crtc_state;
15614
Ville Syrjälä98187832016-10-31 22:37:10 +020015615 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015616 crtc_state = to_intel_crtc_state(crtc->base.state);
Ville Syrjäläe2af48c2016-10-31 22:37:05 +020015617
Jesse Barnes045ac3b2013-05-14 17:08:26 -070015618 encoder->base.crtc = &crtc->base;
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015619 encoder->get_config(encoder, crtc_state);
Daniel Vetter24929352012-07-02 20:28:59 +020015620 } else {
15621 encoder->base.crtc = NULL;
15622 }
15623
Damien Lespiau6f2bcce2013-10-16 12:29:54 +010015624 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000015625 encoder->base.base.id, encoder->base.name,
15626 enableddisabled(encoder->base.crtc),
Damien Lespiau6f2bcce2013-10-16 12:29:54 +010015627 pipe_name(pipe));
Daniel Vetter24929352012-07-02 20:28:59 +020015628 }
15629
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015630 drm_connector_list_iter_begin(dev, &conn_iter);
15631 for_each_intel_connector_iter(connector, &conn_iter) {
Daniel Vetter24929352012-07-02 20:28:59 +020015632 if (connector->get_hw_state(connector)) {
15633 connector->base.dpms = DRM_MODE_DPMS_ON;
Maarten Lankhorst2aa974c2016-01-06 14:53:25 +010015634
15635 encoder = connector->encoder;
15636 connector->base.encoder = &encoder->base;
15637
15638 if (encoder->base.crtc &&
15639 encoder->base.crtc->state->active) {
15640 /*
15641 * This has to be done during hardware readout
15642 * because anything calling .crtc_disable may
15643 * rely on the connector_mask being accurate.
15644 */
15645 encoder->base.crtc->state->connector_mask |=
Ville Syrjälä40560e22018-06-26 22:47:11 +030015646 drm_connector_mask(&connector->base);
Maarten Lankhorste87a52b2016-01-28 15:04:58 +010015647 encoder->base.crtc->state->encoder_mask |=
Ville Syrjälä40560e22018-06-26 22:47:11 +030015648 drm_encoder_mask(&encoder->base);
Maarten Lankhorst2aa974c2016-01-06 14:53:25 +010015649 }
15650
Daniel Vetter24929352012-07-02 20:28:59 +020015651 } else {
15652 connector->base.dpms = DRM_MODE_DPMS_OFF;
15653 connector->base.encoder = NULL;
15654 }
15655 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000015656 connector->base.base.id, connector->base.name,
15657 enableddisabled(connector->base.encoder));
Daniel Vetter24929352012-07-02 20:28:59 +020015658 }
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015659 drm_connector_list_iter_end(&conn_iter);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015660
15661 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015662 struct intel_crtc_state *crtc_state =
15663 to_intel_crtc_state(crtc->base.state);
Ville Syrjäläd305e062017-08-30 21:57:03 +030015664 int min_cdclk = 0;
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015665
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015666 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015667 if (crtc_state->base.active) {
15668 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
Ville Syrjäläbd4cd032018-04-26 19:30:15 +030015669 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15670 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015671 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015672 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15673
15674 /*
15675 * The initial mode needs to be set in order to keep
15676 * the atomic core happy. It wants a valid mode if the
15677 * crtc's enabled, so we do the above call.
15678 *
Daniel Vetter7800fb62016-12-19 09:24:23 +010015679 * But we don't set all the derived state fully, hence
15680 * set a flag to indicate that a full recalculation is
15681 * needed on the next commit.
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015682 */
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015683 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
Ville Syrjälä9eca68322015-09-10 18:59:10 +030015684
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020015685 intel_crtc_compute_pixel_rate(crtc_state);
15686
Ville Syrjälä9c61de42017-07-10 22:33:47 +030015687 if (dev_priv->display.modeset_calc_cdclk) {
Ville Syrjäläd305e062017-08-30 21:57:03 +030015688 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
Ville Syrjälä9c61de42017-07-10 22:33:47 +030015689 if (WARN_ON(min_cdclk < 0))
15690 min_cdclk = 0;
15691 }
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015692
Daniel Vetter5caa0fe2017-05-09 16:03:29 +020015693 drm_calc_timestamping_constants(&crtc->base,
15694 &crtc_state->base.adjusted_mode);
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020015695 update_scanline_offset(crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015696 }
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020015697
Ville Syrjäläd305e062017-08-30 21:57:03 +030015698 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030015699 dev_priv->min_voltage_level[crtc->pipe] =
15700 crtc_state->min_voltage_level;
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015701
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015702 intel_pipe_config_sanity_check(dev_priv, crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015703 }
Daniel Vetter30e984d2013-06-05 13:34:17 +020015704}
15705
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015706static void
15707get_encoder_power_domains(struct drm_i915_private *dev_priv)
15708{
15709 struct intel_encoder *encoder;
15710
15711 for_each_intel_encoder(&dev_priv->drm, encoder) {
15712 u64 get_domains;
15713 enum intel_display_power_domain domain;
Imre Deak52528052018-06-21 21:44:49 +030015714 struct intel_crtc_state *crtc_state;
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015715
15716 if (!encoder->get_power_domains)
15717 continue;
15718
Imre Deak52528052018-06-21 21:44:49 +030015719 /*
Imre Deakb79ebe72018-07-05 15:26:54 +030015720 * MST-primary and inactive encoders don't have a crtc state
15721 * and neither of these require any power domain references.
Imre Deak52528052018-06-21 21:44:49 +030015722 */
Imre Deakb79ebe72018-07-05 15:26:54 +030015723 if (!encoder->base.crtc)
15724 continue;
Imre Deak52528052018-06-21 21:44:49 +030015725
Imre Deakb79ebe72018-07-05 15:26:54 +030015726 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
Imre Deak52528052018-06-21 21:44:49 +030015727 get_domains = encoder->get_power_domains(encoder, crtc_state);
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015728 for_each_power_domain(domain, get_domains)
15729 intel_display_power_get(dev_priv, domain);
15730 }
15731}
15732
Rodrigo Vividf49ec82017-11-10 16:03:19 -080015733static void intel_early_display_was(struct drm_i915_private *dev_priv)
15734{
15735 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15736 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15737 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15738 DARBF_GATING_DIS);
15739
15740 if (IS_HASWELL(dev_priv)) {
15741 /*
15742 * WaRsPkgCStateDisplayPMReq:hsw
15743 * System hang if this isn't done before disabling all planes!
15744 */
15745 I915_WRITE(CHICKEN_PAR1_1,
15746 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15747 }
15748}
15749
Ville Syrjälä3aefb672018-11-08 16:36:35 +020015750static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
15751 enum port port, i915_reg_t hdmi_reg)
15752{
15753 u32 val = I915_READ(hdmi_reg);
15754
15755 if (val & SDVO_ENABLE ||
15756 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
15757 return;
15758
15759 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
15760 port_name(port));
15761
15762 val &= ~SDVO_PIPE_SEL_MASK;
15763 val |= SDVO_PIPE_SEL(PIPE_A);
15764
15765 I915_WRITE(hdmi_reg, val);
15766}
15767
15768static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
15769 enum port port, i915_reg_t dp_reg)
15770{
15771 u32 val = I915_READ(dp_reg);
15772
15773 if (val & DP_PORT_EN ||
15774 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
15775 return;
15776
15777 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
15778 port_name(port));
15779
15780 val &= ~DP_PIPE_SEL_MASK;
15781 val |= DP_PIPE_SEL(PIPE_A);
15782
15783 I915_WRITE(dp_reg, val);
15784}
15785
15786static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
15787{
15788 /*
15789 * The BIOS may select transcoder B on some of the PCH
15790 * ports even it doesn't enable the port. This would trip
15791 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
15792 * Sanitize the transcoder select bits to prevent that. We
15793 * assume that the BIOS never actually enabled the port,
15794 * because if it did we'd actually have to toggle the port
15795 * on and back off to make the transcoder A select stick
15796 * (see. intel_dp_link_down(), intel_disable_hdmi(),
15797 * intel_disable_sdvo()).
15798 */
15799 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
15800 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
15801 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
15802
15803 /* PCH SDVOB multiplex with HDMIB */
15804 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
15805 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
15806 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
15807}
15808
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015809/* Scan out the current hw modeset state,
15810 * and sanitizes it to the current state
15811 */
15812static void
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015813intel_modeset_setup_hw_state(struct drm_device *dev,
15814 struct drm_modeset_acquire_ctx *ctx)
Daniel Vetter30e984d2013-06-05 13:34:17 +020015815{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015816 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter30e984d2013-06-05 13:34:17 +020015817 struct intel_crtc *crtc;
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015818 struct intel_crtc_state *crtc_state;
Daniel Vetter30e984d2013-06-05 13:34:17 +020015819 struct intel_encoder *encoder;
Daniel Vetter35c95372013-07-17 06:55:04 +020015820 int i;
Daniel Vetter30e984d2013-06-05 13:34:17 +020015821
Imre Deak2cd9a682018-08-16 15:37:57 +030015822 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
15823
Rodrigo Vividf49ec82017-11-10 16:03:19 -080015824 intel_early_display_was(dev_priv);
Daniel Vetter30e984d2013-06-05 13:34:17 +020015825 intel_modeset_readout_hw_state(dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015826
15827 /* HW state is read out, now we need to sanitize this mess. */
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015828 get_encoder_power_domains(dev_priv);
15829
Ville Syrjälä3aefb672018-11-08 16:36:35 +020015830 if (HAS_PCH_IBX(dev_priv))
15831 ibx_sanitize_pch_ports(dev_priv);
15832
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015833 /*
15834 * intel_sanitize_plane_mapping() may need to do vblank
15835 * waits, so we need vblank interrupts restored beforehand.
15836 */
15837 for_each_intel_crtc(&dev_priv->drm, crtc) {
15838 drm_crtc_vblank_reset(&crtc->base);
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015839
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015840 if (crtc->base.state->active)
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015841 drm_crtc_vblank_on(&crtc->base);
Daniel Vetter24929352012-07-02 20:28:59 +020015842 }
15843
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015844 intel_sanitize_plane_mapping(dev_priv);
Ville Syrjäläe2af48c2016-10-31 22:37:05 +020015845
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015846 for_each_intel_encoder(dev, encoder)
15847 intel_sanitize_encoder(encoder);
15848
15849 for_each_intel_crtc(&dev_priv->drm, crtc) {
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015850 crtc_state = to_intel_crtc_state(crtc->base.state);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015851 intel_sanitize_crtc(crtc, ctx);
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015852 intel_dump_pipe_config(crtc, crtc_state,
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +020015853 "[setup_hw_state]");
Daniel Vetter24929352012-07-02 20:28:59 +020015854 }
Daniel Vetter9a935852012-07-05 22:34:27 +020015855
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020015856 intel_modeset_update_connector_atomic_state(dev);
15857
Daniel Vetter35c95372013-07-17 06:55:04 +020015858 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15859 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15860
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +010015861 if (!pll->on || pll->active_mask)
Daniel Vetter35c95372013-07-17 06:55:04 +020015862 continue;
15863
Lucas De Marchi72f775f2018-03-20 15:06:34 -070015864 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15865 pll->info->name);
Daniel Vetter35c95372013-07-17 06:55:04 +020015866
Lucas De Marchiee1398b2018-03-20 15:06:33 -070015867 pll->info->funcs->disable(dev_priv, pll);
Daniel Vetter35c95372013-07-17 06:55:04 +020015868 pll->on = false;
15869 }
15870
Ville Syrjälä04548cb2017-04-21 21:14:29 +030015871 if (IS_G4X(dev_priv)) {
Matt Ropercd1d3ee2018-12-10 13:54:14 -080015872 g4x_wm_get_hw_state(dev_priv);
Ville Syrjälä04548cb2017-04-21 21:14:29 +030015873 g4x_wm_sanitize(dev_priv);
15874 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Matt Ropercd1d3ee2018-12-10 13:54:14 -080015875 vlv_wm_get_hw_state(dev_priv);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015876 vlv_wm_sanitize(dev_priv);
Rodrigo Vivia029fa42017-08-09 13:52:48 -070015877 } else if (INTEL_GEN(dev_priv) >= 9) {
Matt Ropercd1d3ee2018-12-10 13:54:14 -080015878 skl_wm_get_hw_state(dev_priv);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015879 } else if (HAS_PCH_SPLIT(dev_priv)) {
Matt Ropercd1d3ee2018-12-10 13:54:14 -080015880 ilk_wm_get_hw_state(dev_priv);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015881 }
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015882
15883 for_each_intel_crtc(dev, crtc) {
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +020015884 u64 put_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015885
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015886 crtc_state = to_intel_crtc_state(crtc->base.state);
15887 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015888 if (WARN_ON(put_domains))
15889 modeset_put_power_domains(dev_priv, put_domains);
15890 }
Imre Deak2cd9a682018-08-16 15:37:57 +030015891
15892 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni010cf732016-01-19 11:35:48 -020015893
15894 intel_fbc_init_pipe_state(dev_priv);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015895}
Ville Syrjälä7d0bc1e2013-09-16 17:38:33 +030015896
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015897void intel_display_resume(struct drm_device *dev)
15898{
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015899 struct drm_i915_private *dev_priv = to_i915(dev);
15900 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15901 struct drm_modeset_acquire_ctx ctx;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015902 int ret;
Daniel Vetterf30da182013-04-11 20:22:50 +020015903
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015904 dev_priv->modeset_restore_state = NULL;
Maarten Lankhorst73974892016-08-05 23:28:27 +030015905 if (state)
15906 state->acquire_ctx = &ctx;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015907
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015908 drm_modeset_acquire_init(&ctx, 0);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015909
Maarten Lankhorst73974892016-08-05 23:28:27 +030015910 while (1) {
15911 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15912 if (ret != -EDEADLK)
15913 break;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015914
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015915 drm_modeset_backoff(&ctx);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015916 }
15917
Maarten Lankhorst73974892016-08-05 23:28:27 +030015918 if (!ret)
Maarten Lankhorst581e49f2017-01-16 10:37:38 +010015919 ret = __intel_display_resume(dev, state, &ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +030015920
Kumar, Mahesh2503a0f2017-08-17 19:15:28 +053015921 intel_enable_ipc(dev_priv);
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015922 drm_modeset_drop_locks(&ctx);
15923 drm_modeset_acquire_fini(&ctx);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015924
Chris Wilson08536952016-10-14 13:18:18 +010015925 if (ret)
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015926 DRM_ERROR("Restoring old state failed with %i\n", ret);
Chris Wilson3c5e37f2017-01-15 12:58:25 +000015927 if (state)
15928 drm_atomic_state_put(state);
Chris Wilson2c7111d2011-03-29 10:40:27 +010015929}
15930
Manasi Navare886c6b82017-10-26 14:52:00 -070015931static void intel_hpd_poll_fini(struct drm_device *dev)
15932{
15933 struct intel_connector *connector;
15934 struct drm_connector_list_iter conn_iter;
15935
Chris Wilson448aa912017-11-28 11:01:47 +000015936 /* Kill all the work that may have been queued by hpd. */
Manasi Navare886c6b82017-10-26 14:52:00 -070015937 drm_connector_list_iter_begin(dev, &conn_iter);
15938 for_each_intel_connector_iter(connector, &conn_iter) {
15939 if (connector->modeset_retry_work.func)
15940 cancel_work_sync(&connector->modeset_retry_work);
Ramalingam Cd3dacc72018-10-29 15:15:46 +053015941 if (connector->hdcp.shim) {
15942 cancel_delayed_work_sync(&connector->hdcp.check_work);
15943 cancel_work_sync(&connector->hdcp.prop_work);
Sean Paulee5e5e72018-01-08 14:55:39 -050015944 }
Manasi Navare886c6b82017-10-26 14:52:00 -070015945 }
15946 drm_connector_list_iter_end(&conn_iter);
15947}
15948
Jesse Barnes79e53942008-11-07 14:24:08 -080015949void intel_modeset_cleanup(struct drm_device *dev)
15950{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015951 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes652c3932009-08-17 13:31:43 -070015952
Chris Wilson8bcf9f72018-07-10 10:44:20 +010015953 flush_workqueue(dev_priv->modeset_wq);
15954
Chris Wilsoneb955ee2017-01-23 21:29:39 +000015955 flush_work(&dev_priv->atomic_helper.free_work);
15956 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
15957
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015958 /*
15959 * Interrupts and polling as the first thing to avoid creating havoc.
Imre Deak2eb52522014-11-19 15:30:05 +020015960 * Too much stuff here (turning of connectors, ...) would
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015961 * experience fancy races otherwise.
15962 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +020015963 intel_irq_uninstall(dev_priv);
Jesse Barneseb21b922014-06-20 11:57:33 -070015964
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015965 /*
15966 * Due to the hpd irq storm handling the hotplug work can re-arm the
15967 * poll handlers. Hence disable polling after hpd handling is shut down.
15968 */
Manasi Navare886c6b82017-10-26 14:52:00 -070015969 intel_hpd_poll_fini(dev);
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015970
Daniel Vetter4f256d82017-07-15 00:46:55 +020015971 /* poll work can call into fbdev, hence clean that up afterwards */
15972 intel_fbdev_fini(dev_priv);
15973
Jesse Barnes723bfd72010-10-07 16:01:13 -070015974 intel_unregister_dsm_handler();
15975
Paulo Zanonic937ab3e52016-01-19 11:35:46 -020015976 intel_fbc_global_disable(dev_priv);
Kristian Høgsberg69341a52009-11-11 12:19:17 -050015977
Chris Wilson1630fe72011-07-08 12:22:42 +010015978 /* flush any delayed tasks or pending work */
15979 flush_scheduled_work();
15980
Jesse Barnes79e53942008-11-07 14:24:08 -080015981 drm_mode_config_cleanup(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +010015982
José Roberto de Souza58db08a72018-11-07 16:16:47 -080015983 intel_overlay_cleanup(dev_priv);
Imre Deakae484342014-03-31 15:10:44 +030015984
Tvrtko Ursulin40196442016-12-01 14:16:42 +000015985 intel_teardown_gmbus(dev_priv);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020015986
15987 destroy_workqueue(dev_priv->modeset_wq);
José Roberto de Souzaacde44b2018-11-07 16:16:45 -080015988
15989 intel_fbc_cleanup_cfb(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080015990}
15991
Dave Airlie28d52042009-09-21 14:33:58 +100015992/*
15993 * set vga decode state - true == enable VGA decode
15994 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000015995int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
Dave Airlie28d52042009-09-21 14:33:58 +100015996{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000015997 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
Dave Airlie28d52042009-09-21 14:33:58 +100015998 u16 gmch_ctrl;
15999
Chris Wilson75fa0412014-02-07 18:37:02 -020016000 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16001 DRM_ERROR("failed to read control word\n");
16002 return -EIO;
16003 }
16004
Chris Wilsonc0cc8a52014-02-07 18:37:03 -020016005 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16006 return 0;
16007
Dave Airlie28d52042009-09-21 14:33:58 +100016008 if (state)
16009 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16010 else
16011 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
Chris Wilson75fa0412014-02-07 18:37:02 -020016012
16013 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16014 DRM_ERROR("failed to write control word\n");
16015 return -EIO;
16016 }
16017
Dave Airlie28d52042009-09-21 14:33:58 +100016018 return 0;
16019}
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016020
Chris Wilson98a2f412016-10-12 10:05:18 +010016021#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16022
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016023struct intel_display_error_state {
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030016024
16025 u32 power_well_driver;
16026
Chris Wilson63b66e52013-08-08 15:12:06 +020016027 int num_transcoders;
16028
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016029 struct intel_cursor_error_state {
16030 u32 control;
16031 u32 position;
16032 u32 base;
16033 u32 size;
Damien Lespiau52331302012-08-15 19:23:25 +010016034 } cursor[I915_MAX_PIPES];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016035
16036 struct intel_pipe_error_state {
Imre Deakddf9c532013-11-27 22:02:02 +020016037 bool power_domain_on;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016038 u32 source;
Imre Deakf301b1e2014-04-18 15:55:04 +030016039 u32 stat;
Damien Lespiau52331302012-08-15 19:23:25 +010016040 } pipe[I915_MAX_PIPES];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016041
16042 struct intel_plane_error_state {
16043 u32 control;
16044 u32 stride;
16045 u32 size;
16046 u32 pos;
16047 u32 addr;
16048 u32 surface;
16049 u32 tile_offset;
Damien Lespiau52331302012-08-15 19:23:25 +010016050 } plane[I915_MAX_PIPES];
Chris Wilson63b66e52013-08-08 15:12:06 +020016051
16052 struct intel_transcoder_error_state {
Imre Deakddf9c532013-11-27 22:02:02 +020016053 bool power_domain_on;
Chris Wilson63b66e52013-08-08 15:12:06 +020016054 enum transcoder cpu_transcoder;
16055
16056 u32 conf;
16057
16058 u32 htotal;
16059 u32 hblank;
16060 u32 hsync;
16061 u32 vtotal;
16062 u32 vblank;
16063 u32 vsync;
16064 } transcoder[4];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016065};
16066
16067struct intel_display_error_state *
Chris Wilsonc0336662016-05-06 15:40:21 +010016068intel_display_capture_error_state(struct drm_i915_private *dev_priv)
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016069{
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016070 struct intel_display_error_state *error;
Chris Wilson63b66e52013-08-08 15:12:06 +020016071 int transcoders[] = {
16072 TRANSCODER_A,
16073 TRANSCODER_B,
16074 TRANSCODER_C,
16075 TRANSCODER_EDP,
16076 };
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016077 int i;
16078
José Roberto de Souzae1bf0942018-11-30 15:20:47 -080016079 if (!HAS_DISPLAY(dev_priv))
Chris Wilson63b66e52013-08-08 15:12:06 +020016080 return NULL;
16081
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020016082 error = kzalloc(sizeof(*error), GFP_ATOMIC);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016083 if (error == NULL)
16084 return NULL;
16085
Chris Wilsonc0336662016-05-06 15:40:21 +010016086 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Imre Deak75e39682018-08-06 12:58:39 +030016087 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030016088
Damien Lespiau055e3932014-08-18 13:49:10 +010016089 for_each_pipe(dev_priv, i) {
Imre Deakddf9c532013-11-27 22:02:02 +020016090 error->pipe[i].power_domain_on =
Daniel Vetterf458ebb2014-09-30 10:56:39 +020016091 __intel_display_power_is_enabled(dev_priv,
16092 POWER_DOMAIN_PIPE(i));
Imre Deakddf9c532013-11-27 22:02:02 +020016093 if (!error->pipe[i].power_domain_on)
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020016094 continue;
16095
Ville Syrjälä5efb3e22014-04-09 13:28:53 +030016096 error->cursor[i].control = I915_READ(CURCNTR(i));
16097 error->cursor[i].position = I915_READ(CURPOS(i));
16098 error->cursor[i].base = I915_READ(CURBASE(i));
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016099
16100 error->plane[i].control = I915_READ(DSPCNTR(i));
16101 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
Chris Wilsonc0336662016-05-06 15:40:21 +010016102 if (INTEL_GEN(dev_priv) <= 3) {
Paulo Zanoni51889b32013-03-06 20:03:13 -030016103 error->plane[i].size = I915_READ(DSPSIZE(i));
Paulo Zanoni80ca3782013-03-22 14:20:57 -030016104 error->plane[i].pos = I915_READ(DSPPOS(i));
16105 }
Chris Wilsonc0336662016-05-06 15:40:21 +010016106 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
Paulo Zanonica291362013-03-06 20:03:14 -030016107 error->plane[i].addr = I915_READ(DSPADDR(i));
Chris Wilsonc0336662016-05-06 15:40:21 +010016108 if (INTEL_GEN(dev_priv) >= 4) {
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016109 error->plane[i].surface = I915_READ(DSPSURF(i));
16110 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16111 }
16112
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016113 error->pipe[i].source = I915_READ(PIPESRC(i));
Imre Deakf301b1e2014-04-18 15:55:04 +030016114
Chris Wilsonc0336662016-05-06 15:40:21 +010016115 if (HAS_GMCH_DISPLAY(dev_priv))
Imre Deakf301b1e2014-04-18 15:55:04 +030016116 error->pipe[i].stat = I915_READ(PIPESTAT(i));
Chris Wilson63b66e52013-08-08 15:12:06 +020016117 }
16118
Jani Nikula4d1de972016-03-18 17:05:42 +020016119 /* Note: this does not include DSI transcoders. */
Chris Wilsonc0336662016-05-06 15:40:21 +010016120 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +030016121 if (HAS_DDI(dev_priv))
Chris Wilson63b66e52013-08-08 15:12:06 +020016122 error->num_transcoders++; /* Account for eDP. */
16123
16124 for (i = 0; i < error->num_transcoders; i++) {
16125 enum transcoder cpu_transcoder = transcoders[i];
16126
Imre Deakddf9c532013-11-27 22:02:02 +020016127 error->transcoder[i].power_domain_on =
Daniel Vetterf458ebb2014-09-30 10:56:39 +020016128 __intel_display_power_is_enabled(dev_priv,
Paulo Zanoni38cc1da2013-12-20 15:09:41 -020016129 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
Imre Deakddf9c532013-11-27 22:02:02 +020016130 if (!error->transcoder[i].power_domain_on)
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020016131 continue;
16132
Chris Wilson63b66e52013-08-08 15:12:06 +020016133 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16134
16135 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16136 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16137 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16138 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16139 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16140 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16141 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016142 }
16143
16144 return error;
16145}
16146
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016147#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16148
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016149void
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016150intel_display_print_error_state(struct drm_i915_error_state_buf *m,
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016151 struct intel_display_error_state *error)
16152{
Chris Wilson5a4c6f12017-02-14 16:46:11 +000016153 struct drm_i915_private *dev_priv = m->i915;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016154 int i;
16155
Chris Wilson63b66e52013-08-08 15:12:06 +020016156 if (!error)
16157 return;
16158
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000016159 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
Tvrtko Ursulin86527442016-10-13 11:03:00 +010016160 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016161 err_printf(m, "PWR_WELL_CTL2: %08x\n",
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030016162 error->power_well_driver);
Damien Lespiau055e3932014-08-18 13:49:10 +010016163 for_each_pipe(dev_priv, i) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016164 err_printf(m, "Pipe [%d]:\n", i);
Imre Deakddf9c532013-11-27 22:02:02 +020016165 err_printf(m, " Power: %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +020016166 onoff(error->pipe[i].power_domain_on));
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016167 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
Imre Deakf301b1e2014-04-18 15:55:04 +030016168 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016169
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016170 err_printf(m, "Plane [%d]:\n", i);
16171 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16172 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
Tvrtko Ursulin5f56d5f2016-11-16 08:55:37 +000016173 if (INTEL_GEN(dev_priv) <= 3) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016174 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16175 err_printf(m, " POS: %08x\n", error->plane[i].pos);
Paulo Zanoni80ca3782013-03-22 14:20:57 -030016176 }
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +010016177 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016178 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
Tvrtko Ursulin5f56d5f2016-11-16 08:55:37 +000016179 if (INTEL_GEN(dev_priv) >= 4) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016180 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16181 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016182 }
16183
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016184 err_printf(m, "Cursor [%d]:\n", i);
16185 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16186 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16187 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016188 }
Chris Wilson63b66e52013-08-08 15:12:06 +020016189
16190 for (i = 0; i < error->num_transcoders; i++) {
Jani Nikulada205632016-03-15 21:51:10 +020016191 err_printf(m, "CPU transcoder: %s\n",
Chris Wilson63b66e52013-08-08 15:12:06 +020016192 transcoder_name(error->transcoder[i].cpu_transcoder));
Imre Deakddf9c532013-11-27 22:02:02 +020016193 err_printf(m, " Power: %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +020016194 onoff(error->transcoder[i].power_domain_on));
Chris Wilson63b66e52013-08-08 15:12:06 +020016195 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16196 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16197 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16198 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16199 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16200 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16201 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16202 }
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016203}
Chris Wilson98a2f412016-10-12 10:05:18 +010016204
16205#endif