blob: a02ce5a47f44aba3fde5543d40b6d7dd274f6414 [file] [log] [blame]
Jesse Barnes79e53942008-11-07 14:24:08 -08001/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
Jesse Barnesc1c7af62009-09-10 15:28:03 -070027#include <linux/module.h>
28#include <linux/input.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080029#include <linux/i2c.h>
Shaohua Li7662c8b2009-06-26 11:23:55 +080030#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Jesse Barnes9cce37f2010-08-13 15:11:26 -070032#include <linux/vgaarb.h>
Wu Fengguange0dac652011-09-05 14:25:34 +080033#include <drm/drm_edid.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/i915_drm.h>
Xi Ruoyao319c1d42015-03-12 20:16:32 +080035#include <drm/drm_atomic.h>
Matt Roperc196e1d2015-01-21 16:35:48 -080036#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010037#include <drm/drm_dp_helper.h>
38#include <drm/drm_crtc_helper.h>
Matt Roper465c1202014-05-29 08:06:54 -070039#include <drm/drm_plane_helper.h>
40#include <drm/drm_rect.h>
Daniel Vetter72fdb402018-09-05 15:57:11 +020041#include <drm/drm_atomic_uapi.h>
Lu Baoludaedaa32018-11-12 14:40:08 +080042#include <linux/intel-iommu.h>
Alex Goinsfd8e0582015-11-25 18:43:38 -080043#include <linux/reservation.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080044
Chris Wilson9f588922019-01-16 15:33:04 +000045#include "intel_drv.h"
46#include "intel_dsi.h"
47#include "intel_frontbuffer.h"
48
49#include "i915_drv.h"
50#include "i915_gem_clflush.h"
51#include "i915_reset.h"
52#include "i915_trace.h"
53
Matt Roper465c1202014-05-29 08:06:54 -070054/* Primary plane formats for gen <= 3 */
Jani Nikulaba3f4d02019-01-18 14:01:23 +020055static const u32 i8xx_primary_formats[] = {
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010056 DRM_FORMAT_C8,
57 DRM_FORMAT_RGB565,
Matt Roper465c1202014-05-29 08:06:54 -070058 DRM_FORMAT_XRGB1555,
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010059 DRM_FORMAT_XRGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070060};
61
62/* Primary plane formats for gen >= 4 */
Jani Nikulaba3f4d02019-01-18 14:01:23 +020063static const u32 i965_primary_formats[] = {
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010064 DRM_FORMAT_C8,
65 DRM_FORMAT_RGB565,
66 DRM_FORMAT_XRGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070067 DRM_FORMAT_XBGR8888,
Damien Lespiau6c0fd452015-05-19 12:29:16 +010068 DRM_FORMAT_XRGB2101010,
69 DRM_FORMAT_XBGR2101010,
70};
71
Jani Nikulaba3f4d02019-01-18 14:01:23 +020072static const u64 i9xx_format_modifiers[] = {
Ben Widawsky714244e2017-08-01 09:58:16 -070073 I915_FORMAT_MOD_X_TILED,
74 DRM_FORMAT_MOD_LINEAR,
75 DRM_FORMAT_MOD_INVALID
76};
77
Matt Roper3d7d6512014-06-10 08:28:13 -070078/* Cursor formats */
Jani Nikulaba3f4d02019-01-18 14:01:23 +020079static const u32 intel_cursor_formats[] = {
Matt Roper3d7d6512014-06-10 08:28:13 -070080 DRM_FORMAT_ARGB8888,
81};
82
Jani Nikulaba3f4d02019-01-18 14:01:23 +020083static const u64 cursor_format_modifiers[] = {
Ben Widawsky714244e2017-08-01 09:58:16 -070084 DRM_FORMAT_MOD_LINEAR,
85 DRM_FORMAT_MOD_INVALID
86};
87
Jesse Barnesf1f644d2013-06-27 00:39:25 +030088static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020089 struct intel_crtc_state *pipe_config);
Ville Syrjälä18442d02013-09-13 16:00:08 +030090static void ironlake_pch_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020091 struct intel_crtc_state *pipe_config);
Jesse Barnesf1f644d2013-06-27 00:39:25 +030092
Chris Wilson24dbf512017-02-15 10:59:18 +000093static int intel_framebuffer_init(struct intel_framebuffer *ifb,
94 struct drm_i915_gem_object *obj,
95 struct drm_mode_fb_cmd2 *mode_cmd);
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +020096static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
97static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
Maarten Lankhorst4c354752018-10-11 12:04:49 +020098static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
99 const struct intel_link_m_n *m_n,
100 const struct intel_link_m_n *m2_n2);
Maarten Lankhorstfdf73512018-10-04 11:45:52 +0200101static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
102static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
103static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
104static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200105static void vlv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200106 const struct intel_crtc_state *pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200107static void chv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200108 const struct intel_crtc_state *pipe_config);
Daniel Vetter5a21b662016-05-24 17:13:53 +0200109static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
Nabendu Maiti1c74eea2016-11-29 11:23:14 +0530111static void intel_crtc_init_scalers(struct intel_crtc *crtc,
112 struct intel_crtc_state *crtc_state);
Maarten Lankhorstb2562712018-10-04 11:45:53 +0200113static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
114static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
115static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +0300116static void intel_modeset_setup_hw_state(struct drm_device *dev,
117 struct drm_modeset_acquire_ctx *ctx);
Ville Syrjälä2622a082016-03-09 19:07:26 +0200118static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
Damien Lespiaue7457a92013-08-08 22:28:59 +0100119
Ma Lingd4906092009-03-18 20:13:27 +0800120struct intel_limit {
Ander Conselvan de Oliveira4c5def92016-05-04 12:11:58 +0300121 struct {
122 int min, max;
123 } dot, vco, n, m, m1, m2, p, p1;
124
125 struct {
126 int dot_limit;
127 int p2_slow, p2_fast;
128 } p2;
Ma Lingd4906092009-03-18 20:13:27 +0800129};
Jesse Barnes79e53942008-11-07 14:24:08 -0800130
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300131/* returns HPLL frequency in kHz */
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200132int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300133{
134 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
135
136 /* Obtain SKU information */
137 mutex_lock(&dev_priv->sb_lock);
138 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
139 CCK_FUSE_HPLL_FREQ_MASK;
140 mutex_unlock(&dev_priv->sb_lock);
141
142 return vco_freq[hpll_freq] * 1000;
143}
144
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200145int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
146 const char *name, u32 reg, int ref_freq)
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300147{
148 u32 val;
149 int divider;
150
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300151 mutex_lock(&dev_priv->sb_lock);
152 val = vlv_cck_read(dev_priv, reg);
153 mutex_unlock(&dev_priv->sb_lock);
154
155 divider = val & CCK_FREQUENCY_VALUES;
156
157 WARN((val & CCK_FREQUENCY_STATUS) !=
158 (divider << CCK_FREQUENCY_STATUS_SHIFT),
159 "%s change in progress\n", name);
160
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200161 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
162}
163
Ville Syrjälä7ff89ca2017-02-07 20:33:05 +0200164int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
165 const char *name, u32 reg)
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200166{
167 if (dev_priv->hpll_freq == 0)
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200168 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200169
170 return vlv_get_cck_clock(dev_priv, name, reg,
171 dev_priv->hpll_freq);
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300172}
173
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300174static void intel_update_czclk(struct drm_i915_private *dev_priv)
175{
Wayne Boyer666a4532015-12-09 12:29:35 -0800176 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300177 return;
178
179 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
180 CCK_CZ_CLOCK_CONTROL);
181
182 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
183}
184
Chris Wilson021357a2010-09-07 20:54:59 +0100185static inline u32 /* units of 100MHz */
Ville Syrjälä21a727b2016-02-17 21:41:10 +0200186intel_fdi_link_freq(struct drm_i915_private *dev_priv,
187 const struct intel_crtc_state *pipe_config)
Chris Wilson021357a2010-09-07 20:54:59 +0100188{
Ville Syrjälä21a727b2016-02-17 21:41:10 +0200189 if (HAS_DDI(dev_priv))
190 return pipe_config->port_clock; /* SPLL */
Ville Syrjäläe3b247d2016-02-17 21:41:09 +0200191 else
Chris Wilson58ecd9d2017-11-05 13:49:05 +0000192 return dev_priv->fdi_pll_freq;
Chris Wilson021357a2010-09-07 20:54:59 +0100193}
194
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300195static const struct intel_limit intel_limits_i8xx_dac = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400196 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200197 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200198 .n = { .min = 2, .max = 16 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400199 .m = { .min = 96, .max = 140 },
200 .m1 = { .min = 18, .max = 26 },
201 .m2 = { .min = 6, .max = 16 },
202 .p = { .min = 4, .max = 128 },
203 .p1 = { .min = 2, .max = 33 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700204 .p2 = { .dot_limit = 165000,
205 .p2_slow = 4, .p2_fast = 2 },
Keith Packarde4b36692009-06-05 19:22:17 -0700206};
207
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300208static const struct intel_limit intel_limits_i8xx_dvo = {
Daniel Vetter5d536e22013-07-06 12:52:06 +0200209 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200210 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200211 .n = { .min = 2, .max = 16 },
Daniel Vetter5d536e22013-07-06 12:52:06 +0200212 .m = { .min = 96, .max = 140 },
213 .m1 = { .min = 18, .max = 26 },
214 .m2 = { .min = 6, .max = 16 },
215 .p = { .min = 4, .max = 128 },
216 .p1 = { .min = 2, .max = 33 },
217 .p2 = { .dot_limit = 165000,
218 .p2_slow = 4, .p2_fast = 4 },
219};
220
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300221static const struct intel_limit intel_limits_i8xx_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400222 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200223 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200224 .n = { .min = 2, .max = 16 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400225 .m = { .min = 96, .max = 140 },
226 .m1 = { .min = 18, .max = 26 },
227 .m2 = { .min = 6, .max = 16 },
228 .p = { .min = 4, .max = 128 },
229 .p1 = { .min = 1, .max = 6 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700230 .p2 = { .dot_limit = 165000,
231 .p2_slow = 14, .p2_fast = 7 },
Keith Packarde4b36692009-06-05 19:22:17 -0700232};
Eric Anholt273e27c2011-03-30 13:01:10 -0700233
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300234static const struct intel_limit intel_limits_i9xx_sdvo = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400235 .dot = { .min = 20000, .max = 400000 },
236 .vco = { .min = 1400000, .max = 2800000 },
237 .n = { .min = 1, .max = 6 },
238 .m = { .min = 70, .max = 120 },
Patrik Jakobsson4f7dfb62013-02-13 22:20:22 +0100239 .m1 = { .min = 8, .max = 18 },
240 .m2 = { .min = 3, .max = 7 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400241 .p = { .min = 5, .max = 80 },
242 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700243 .p2 = { .dot_limit = 200000,
244 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700245};
246
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300247static const struct intel_limit intel_limits_i9xx_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400248 .dot = { .min = 20000, .max = 400000 },
249 .vco = { .min = 1400000, .max = 2800000 },
250 .n = { .min = 1, .max = 6 },
251 .m = { .min = 70, .max = 120 },
Patrik Jakobsson53a7d2d2013-02-13 22:20:21 +0100252 .m1 = { .min = 8, .max = 18 },
253 .m2 = { .min = 3, .max = 7 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400254 .p = { .min = 7, .max = 98 },
255 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700256 .p2 = { .dot_limit = 112000,
257 .p2_slow = 14, .p2_fast = 7 },
Keith Packarde4b36692009-06-05 19:22:17 -0700258};
259
Eric Anholt273e27c2011-03-30 13:01:10 -0700260
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300261static const struct intel_limit intel_limits_g4x_sdvo = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700262 .dot = { .min = 25000, .max = 270000 },
263 .vco = { .min = 1750000, .max = 3500000},
264 .n = { .min = 1, .max = 4 },
265 .m = { .min = 104, .max = 138 },
266 .m1 = { .min = 17, .max = 23 },
267 .m2 = { .min = 5, .max = 11 },
268 .p = { .min = 10, .max = 30 },
269 .p1 = { .min = 1, .max = 3},
270 .p2 = { .dot_limit = 270000,
271 .p2_slow = 10,
272 .p2_fast = 10
Ma Ling044c7c42009-03-18 20:13:23 +0800273 },
Keith Packarde4b36692009-06-05 19:22:17 -0700274};
275
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300276static const struct intel_limit intel_limits_g4x_hdmi = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700277 .dot = { .min = 22000, .max = 400000 },
278 .vco = { .min = 1750000, .max = 3500000},
279 .n = { .min = 1, .max = 4 },
280 .m = { .min = 104, .max = 138 },
281 .m1 = { .min = 16, .max = 23 },
282 .m2 = { .min = 5, .max = 11 },
283 .p = { .min = 5, .max = 80 },
284 .p1 = { .min = 1, .max = 8},
285 .p2 = { .dot_limit = 165000,
286 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700287};
288
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300289static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700290 .dot = { .min = 20000, .max = 115000 },
291 .vco = { .min = 1750000, .max = 3500000 },
292 .n = { .min = 1, .max = 3 },
293 .m = { .min = 104, .max = 138 },
294 .m1 = { .min = 17, .max = 23 },
295 .m2 = { .min = 5, .max = 11 },
296 .p = { .min = 28, .max = 112 },
297 .p1 = { .min = 2, .max = 8 },
298 .p2 = { .dot_limit = 0,
299 .p2_slow = 14, .p2_fast = 14
Ma Ling044c7c42009-03-18 20:13:23 +0800300 },
Keith Packarde4b36692009-06-05 19:22:17 -0700301};
302
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300303static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700304 .dot = { .min = 80000, .max = 224000 },
305 .vco = { .min = 1750000, .max = 3500000 },
306 .n = { .min = 1, .max = 3 },
307 .m = { .min = 104, .max = 138 },
308 .m1 = { .min = 17, .max = 23 },
309 .m2 = { .min = 5, .max = 11 },
310 .p = { .min = 14, .max = 42 },
311 .p1 = { .min = 2, .max = 6 },
312 .p2 = { .dot_limit = 0,
313 .p2_slow = 7, .p2_fast = 7
Ma Ling044c7c42009-03-18 20:13:23 +0800314 },
Keith Packarde4b36692009-06-05 19:22:17 -0700315};
316
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300317static const struct intel_limit intel_limits_pineview_sdvo = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400318 .dot = { .min = 20000, .max = 400000},
319 .vco = { .min = 1700000, .max = 3500000 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700320 /* Pineview's Ncounter is a ring counter */
Akshay Joshi0206e352011-08-16 15:34:10 -0400321 .n = { .min = 3, .max = 6 },
322 .m = { .min = 2, .max = 256 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700323 /* Pineview only has one combined m divider, which we treat as m2. */
Akshay Joshi0206e352011-08-16 15:34:10 -0400324 .m1 = { .min = 0, .max = 0 },
325 .m2 = { .min = 0, .max = 254 },
326 .p = { .min = 5, .max = 80 },
327 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700328 .p2 = { .dot_limit = 200000,
329 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700330};
331
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300332static const struct intel_limit intel_limits_pineview_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400333 .dot = { .min = 20000, .max = 400000 },
334 .vco = { .min = 1700000, .max = 3500000 },
335 .n = { .min = 3, .max = 6 },
336 .m = { .min = 2, .max = 256 },
337 .m1 = { .min = 0, .max = 0 },
338 .m2 = { .min = 0, .max = 254 },
339 .p = { .min = 7, .max = 112 },
340 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700341 .p2 = { .dot_limit = 112000,
342 .p2_slow = 14, .p2_fast = 14 },
Keith Packarde4b36692009-06-05 19:22:17 -0700343};
344
Eric Anholt273e27c2011-03-30 13:01:10 -0700345/* Ironlake / Sandybridge
346 *
347 * We calculate clock using (register_value + 2) for N/M1/M2, so here
348 * the range value for them is (actual_value - 2).
349 */
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300350static const struct intel_limit intel_limits_ironlake_dac = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700351 .dot = { .min = 25000, .max = 350000 },
352 .vco = { .min = 1760000, .max = 3510000 },
353 .n = { .min = 1, .max = 5 },
354 .m = { .min = 79, .max = 127 },
355 .m1 = { .min = 12, .max = 22 },
356 .m2 = { .min = 5, .max = 9 },
357 .p = { .min = 5, .max = 80 },
358 .p1 = { .min = 1, .max = 8 },
359 .p2 = { .dot_limit = 225000,
360 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700361};
362
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300363static const struct intel_limit intel_limits_ironlake_single_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700364 .dot = { .min = 25000, .max = 350000 },
365 .vco = { .min = 1760000, .max = 3510000 },
366 .n = { .min = 1, .max = 3 },
367 .m = { .min = 79, .max = 118 },
368 .m1 = { .min = 12, .max = 22 },
369 .m2 = { .min = 5, .max = 9 },
370 .p = { .min = 28, .max = 112 },
371 .p1 = { .min = 2, .max = 8 },
372 .p2 = { .dot_limit = 225000,
373 .p2_slow = 14, .p2_fast = 14 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800374};
375
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300376static const struct intel_limit intel_limits_ironlake_dual_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700377 .dot = { .min = 25000, .max = 350000 },
378 .vco = { .min = 1760000, .max = 3510000 },
379 .n = { .min = 1, .max = 3 },
380 .m = { .min = 79, .max = 127 },
381 .m1 = { .min = 12, .max = 22 },
382 .m2 = { .min = 5, .max = 9 },
383 .p = { .min = 14, .max = 56 },
384 .p1 = { .min = 2, .max = 8 },
385 .p2 = { .dot_limit = 225000,
386 .p2_slow = 7, .p2_fast = 7 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800387};
388
Eric Anholt273e27c2011-03-30 13:01:10 -0700389/* LVDS 100mhz refclk limits. */
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300390static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700391 .dot = { .min = 25000, .max = 350000 },
392 .vco = { .min = 1760000, .max = 3510000 },
393 .n = { .min = 1, .max = 2 },
394 .m = { .min = 79, .max = 126 },
395 .m1 = { .min = 12, .max = 22 },
396 .m2 = { .min = 5, .max = 9 },
397 .p = { .min = 28, .max = 112 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400398 .p1 = { .min = 2, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700399 .p2 = { .dot_limit = 225000,
400 .p2_slow = 14, .p2_fast = 14 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800401};
402
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300403static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700404 .dot = { .min = 25000, .max = 350000 },
405 .vco = { .min = 1760000, .max = 3510000 },
406 .n = { .min = 1, .max = 3 },
407 .m = { .min = 79, .max = 126 },
408 .m1 = { .min = 12, .max = 22 },
409 .m2 = { .min = 5, .max = 9 },
410 .p = { .min = 14, .max = 42 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400411 .p1 = { .min = 2, .max = 6 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700412 .p2 = { .dot_limit = 225000,
413 .p2_slow = 7, .p2_fast = 7 },
Zhao Yakui45476682009-12-31 16:06:04 +0800414};
415
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300416static const struct intel_limit intel_limits_vlv = {
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300417 /*
418 * These are the data rate limits (measured in fast clocks)
419 * since those are the strictest limits we have. The fast
420 * clock and actual rate limits are more relaxed, so checking
421 * them would make no difference.
422 */
423 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
Daniel Vetter75e53982013-04-18 21:10:43 +0200424 .vco = { .min = 4000000, .max = 6000000 },
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700425 .n = { .min = 1, .max = 7 },
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700426 .m1 = { .min = 2, .max = 3 },
427 .m2 = { .min = 11, .max = 156 },
Ville Syrjäläb99ab662013-09-24 21:26:26 +0300428 .p1 = { .min = 2, .max = 3 },
Ville Syrjälä5fdc9c492013-09-24 21:26:29 +0300429 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700430};
431
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300432static const struct intel_limit intel_limits_chv = {
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300433 /*
434 * These are the data rate limits (measured in fast clocks)
435 * since those are the strictest limits we have. The fast
436 * clock and actual rate limits are more relaxed, so checking
437 * them would make no difference.
438 */
439 .dot = { .min = 25000 * 5, .max = 540000 * 5},
Ville Syrjälä17fe1022015-02-26 21:01:52 +0200440 .vco = { .min = 4800000, .max = 6480000 },
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300441 .n = { .min = 1, .max = 1 },
442 .m1 = { .min = 2, .max = 2 },
443 .m2 = { .min = 24 << 22, .max = 175 << 22 },
444 .p1 = { .min = 2, .max = 4 },
445 .p2 = { .p2_slow = 1, .p2_fast = 14 },
446};
447
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300448static const struct intel_limit intel_limits_bxt = {
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200449 /* FIXME: find real dot limits */
450 .dot = { .min = 0, .max = INT_MAX },
Vandana Kannane6292552015-07-01 17:02:57 +0530451 .vco = { .min = 4800000, .max = 6700000 },
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200452 .n = { .min = 1, .max = 1 },
453 .m1 = { .min = 2, .max = 2 },
454 /* FIXME: find real m2 limits */
455 .m2 = { .min = 2 << 22, .max = 255 << 22 },
456 .p1 = { .min = 2, .max = 4 },
457 .p2 = { .p2_slow = 1, .p2_fast = 20 },
458};
459
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530460static void
461skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
462{
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530463 if (enable)
464 I915_WRITE(CLKGATE_DIS_PSL(pipe),
465 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
466 else
467 I915_WRITE(CLKGATE_DIS_PSL(pipe),
468 I915_READ(CLKGATE_DIS_PSL(pipe)) &
469 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
470}
471
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200472static bool
Maarten Lankhorst24f28452017-11-22 19:39:01 +0100473needs_modeset(const struct drm_crtc_state *state)
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200474{
Maarten Lankhorstfc596662015-07-21 13:28:57 +0200475 return drm_atomic_crtc_needs_modeset(state);
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200476}
477
Imre Deakdccbea32015-06-22 23:35:51 +0300478/*
479 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
480 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
481 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
482 * The helpers' return value is the rate of the clock that is fed to the
483 * display engine's pipe which can be the above fast dot clock rate or a
484 * divided-down version of it.
485 */
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500486/* m1 is reserved as 0 in Pineview, n is a ring counter */
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300487static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
Jesse Barnes79e53942008-11-07 14:24:08 -0800488{
Shaohua Li21778322009-02-23 15:19:16 +0800489 clock->m = clock->m2 + 2;
490 clock->p = clock->p1 * clock->p2;
Ville Syrjäläed5ca772013-12-02 19:00:45 +0200491 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300492 return 0;
Ville Syrjäläfb03ac02013-10-14 14:50:30 +0300493 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
494 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300495
496 return clock->dot;
Shaohua Li21778322009-02-23 15:19:16 +0800497}
498
Jani Nikulaba3f4d02019-01-18 14:01:23 +0200499static u32 i9xx_dpll_compute_m(struct dpll *dpll)
Daniel Vetter7429e9d2013-04-20 17:19:46 +0200500{
501 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
502}
503
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300504static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
Shaohua Li21778322009-02-23 15:19:16 +0800505{
Daniel Vetter7429e9d2013-04-20 17:19:46 +0200506 clock->m = i9xx_dpll_compute_m(clock);
Jesse Barnes79e53942008-11-07 14:24:08 -0800507 clock->p = clock->p1 * clock->p2;
Ville Syrjäläed5ca772013-12-02 19:00:45 +0200508 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300509 return 0;
Ville Syrjäläfb03ac02013-10-14 14:50:30 +0300510 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
511 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300512
513 return clock->dot;
Jesse Barnes79e53942008-11-07 14:24:08 -0800514}
515
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300516static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
Imre Deak589eca62015-06-22 23:35:50 +0300517{
518 clock->m = clock->m1 * clock->m2;
519 clock->p = clock->p1 * clock->p2;
520 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300521 return 0;
Imre Deak589eca62015-06-22 23:35:50 +0300522 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
523 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300524
525 return clock->dot / 5;
Imre Deak589eca62015-06-22 23:35:50 +0300526}
527
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300528int chv_calc_dpll_params(int refclk, struct dpll *clock)
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300529{
530 clock->m = clock->m1 * clock->m2;
531 clock->p = clock->p1 * clock->p2;
532 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300533 return 0;
Jani Nikulaba3f4d02019-01-18 14:01:23 +0200534 clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
535 clock->n << 22);
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300536 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300537
538 return clock->dot / 5;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300539}
540
Jesse Barnes7c04d1d2009-02-23 15:36:40 -0800541#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
Chris Wilsonc38c1452018-02-14 13:49:22 +0000542
543/*
Jesse Barnes79e53942008-11-07 14:24:08 -0800544 * Returns whether the given set of divisors are valid for a given refclk with
545 * the given connectors.
546 */
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100547static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300548 const struct intel_limit *limit,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300549 const struct dpll *clock)
Jesse Barnes79e53942008-11-07 14:24:08 -0800550{
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300551 if (clock->n < limit->n.min || limit->n.max < clock->n)
552 INTELPllInvalid("n out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800553 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
Akshay Joshi0206e352011-08-16 15:34:10 -0400554 INTELPllInvalid("p1 out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800555 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
Akshay Joshi0206e352011-08-16 15:34:10 -0400556 INTELPllInvalid("m2 out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800557 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
Akshay Joshi0206e352011-08-16 15:34:10 -0400558 INTELPllInvalid("m1 out of range\n");
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300559
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100560 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200561 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300562 if (clock->m1 <= clock->m2)
563 INTELPllInvalid("m1 <= m2\n");
564
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100565 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200566 !IS_GEN9_LP(dev_priv)) {
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300567 if (clock->p < limit->p.min || limit->p.max < clock->p)
568 INTELPllInvalid("p out of range\n");
569 if (clock->m < limit->m.min || limit->m.max < clock->m)
570 INTELPllInvalid("m out of range\n");
571 }
572
Jesse Barnes79e53942008-11-07 14:24:08 -0800573 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
Akshay Joshi0206e352011-08-16 15:34:10 -0400574 INTELPllInvalid("vco out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800575 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
576 * connector, etc., rather than just a single range.
577 */
578 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
Akshay Joshi0206e352011-08-16 15:34:10 -0400579 INTELPllInvalid("dot out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800580
581 return true;
582}
583
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300584static int
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300585i9xx_select_p2_div(const struct intel_limit *limit,
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300586 const struct intel_crtc_state *crtc_state,
587 int target)
Jesse Barnes79e53942008-11-07 14:24:08 -0800588{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300589 struct drm_device *dev = crtc_state->base.crtc->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -0800590
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +0300591 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Jesse Barnes79e53942008-11-07 14:24:08 -0800592 /*
Daniel Vettera210b022012-11-26 17:22:08 +0100593 * For LVDS just rely on its current settings for dual-channel.
594 * We haven't figured out how to reliably set up different
595 * single/dual channel state, if we even can.
Jesse Barnes79e53942008-11-07 14:24:08 -0800596 */
Daniel Vetter1974cad2012-11-26 17:22:09 +0100597 if (intel_is_dual_link_lvds(dev))
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300598 return limit->p2.p2_fast;
Jesse Barnes79e53942008-11-07 14:24:08 -0800599 else
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300600 return limit->p2.p2_slow;
Jesse Barnes79e53942008-11-07 14:24:08 -0800601 } else {
602 if (target < limit->p2.dot_limit)
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300603 return limit->p2.p2_slow;
Jesse Barnes79e53942008-11-07 14:24:08 -0800604 else
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300605 return limit->p2.p2_fast;
Jesse Barnes79e53942008-11-07 14:24:08 -0800606 }
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300607}
608
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200609/*
610 * Returns a set of divisors for the desired target clock with the given
611 * refclk, or FALSE. The returned values represent the clock equation:
612 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
613 *
614 * Target and reference clocks are specified in kHz.
615 *
616 * If match_clock is provided, then best_clock P divider must match the P
617 * divider from @match_clock used for LVDS downclocking.
618 */
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300619static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300620i9xx_find_best_dpll(const struct intel_limit *limit,
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300621 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300622 int target, int refclk, struct dpll *match_clock,
623 struct dpll *best_clock)
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300624{
625 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300626 struct dpll clock;
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300627 int err = target;
Jesse Barnes79e53942008-11-07 14:24:08 -0800628
Akshay Joshi0206e352011-08-16 15:34:10 -0400629 memset(best_clock, 0, sizeof(*best_clock));
Jesse Barnes79e53942008-11-07 14:24:08 -0800630
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300631 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
632
Zhao Yakui42158662009-11-20 11:24:18 +0800633 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
634 clock.m1++) {
635 for (clock.m2 = limit->m2.min;
636 clock.m2 <= limit->m2.max; clock.m2++) {
Daniel Vetterc0efc382013-06-03 20:56:24 +0200637 if (clock.m2 >= clock.m1)
Zhao Yakui42158662009-11-20 11:24:18 +0800638 break;
639 for (clock.n = limit->n.min;
640 clock.n <= limit->n.max; clock.n++) {
641 for (clock.p1 = limit->p1.min;
642 clock.p1 <= limit->p1.max; clock.p1++) {
Jesse Barnes79e53942008-11-07 14:24:08 -0800643 int this_err;
644
Imre Deakdccbea32015-06-22 23:35:51 +0300645 i9xx_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100646 if (!intel_PLL_is_valid(to_i915(dev),
647 limit,
Chris Wilson1b894b52010-12-14 20:04:54 +0000648 &clock))
Jesse Barnes79e53942008-11-07 14:24:08 -0800649 continue;
Sean Paulcec2f352012-01-10 15:09:36 -0800650 if (match_clock &&
651 clock.p != match_clock->p)
652 continue;
Jesse Barnes79e53942008-11-07 14:24:08 -0800653
654 this_err = abs(clock.dot - target);
655 if (this_err < err) {
656 *best_clock = clock;
657 err = this_err;
658 }
659 }
660 }
661 }
662 }
663
664 return (err != target);
665}
666
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200667/*
668 * Returns a set of divisors for the desired target clock with the given
669 * refclk, or FALSE. The returned values represent the clock equation:
670 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
671 *
672 * Target and reference clocks are specified in kHz.
673 *
674 * If match_clock is provided, then best_clock P divider must match the P
675 * divider from @match_clock used for LVDS downclocking.
676 */
Ma Lingd4906092009-03-18 20:13:27 +0800677static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300678pnv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200679 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300680 int target, int refclk, struct dpll *match_clock,
681 struct dpll *best_clock)
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200682{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300683 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300684 struct dpll clock;
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200685 int err = target;
686
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200687 memset(best_clock, 0, sizeof(*best_clock));
688
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300689 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
690
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200691 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
692 clock.m1++) {
693 for (clock.m2 = limit->m2.min;
694 clock.m2 <= limit->m2.max; clock.m2++) {
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200695 for (clock.n = limit->n.min;
696 clock.n <= limit->n.max; clock.n++) {
697 for (clock.p1 = limit->p1.min;
698 clock.p1 <= limit->p1.max; clock.p1++) {
699 int this_err;
700
Imre Deakdccbea32015-06-22 23:35:51 +0300701 pnv_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100702 if (!intel_PLL_is_valid(to_i915(dev),
703 limit,
Jesse Barnes79e53942008-11-07 14:24:08 -0800704 &clock))
705 continue;
706 if (match_clock &&
707 clock.p != match_clock->p)
708 continue;
709
710 this_err = abs(clock.dot - target);
711 if (this_err < err) {
712 *best_clock = clock;
713 err = this_err;
714 }
715 }
716 }
717 }
718 }
719
720 return (err != target);
721}
722
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +0200723/*
724 * Returns a set of divisors for the desired target clock with the given
725 * refclk, or FALSE. The returned values represent the clock equation:
726 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200727 *
728 * Target and reference clocks are specified in kHz.
729 *
730 * If match_clock is provided, then best_clock P divider must match the P
731 * divider from @match_clock used for LVDS downclocking.
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +0200732 */
Ma Lingd4906092009-03-18 20:13:27 +0800733static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300734g4x_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200735 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300736 int target, int refclk, struct dpll *match_clock,
737 struct dpll *best_clock)
Ma Lingd4906092009-03-18 20:13:27 +0800738{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300739 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300740 struct dpll clock;
Ma Lingd4906092009-03-18 20:13:27 +0800741 int max_n;
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300742 bool found = false;
Adam Jackson6ba770d2010-07-02 16:43:30 -0400743 /* approximately equals target * 0.00585 */
744 int err_most = (target >> 8) + (target >> 9);
Ma Lingd4906092009-03-18 20:13:27 +0800745
746 memset(best_clock, 0, sizeof(*best_clock));
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300747
748 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
749
Ma Lingd4906092009-03-18 20:13:27 +0800750 max_n = limit->n.max;
Gilles Espinassef77f13e2010-03-29 15:41:47 +0200751 /* based on hardware requirement, prefer smaller n to precision */
Ma Lingd4906092009-03-18 20:13:27 +0800752 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
Gilles Espinassef77f13e2010-03-29 15:41:47 +0200753 /* based on hardware requirement, prefere larger m1,m2 */
Ma Lingd4906092009-03-18 20:13:27 +0800754 for (clock.m1 = limit->m1.max;
755 clock.m1 >= limit->m1.min; clock.m1--) {
756 for (clock.m2 = limit->m2.max;
757 clock.m2 >= limit->m2.min; clock.m2--) {
758 for (clock.p1 = limit->p1.max;
759 clock.p1 >= limit->p1.min; clock.p1--) {
760 int this_err;
761
Imre Deakdccbea32015-06-22 23:35:51 +0300762 i9xx_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100763 if (!intel_PLL_is_valid(to_i915(dev),
764 limit,
Chris Wilson1b894b52010-12-14 20:04:54 +0000765 &clock))
Ma Lingd4906092009-03-18 20:13:27 +0800766 continue;
Chris Wilson1b894b52010-12-14 20:04:54 +0000767
768 this_err = abs(clock.dot - target);
Ma Lingd4906092009-03-18 20:13:27 +0800769 if (this_err < err_most) {
770 *best_clock = clock;
771 err_most = this_err;
772 max_n = clock.n;
773 found = true;
774 }
775 }
776 }
777 }
778 }
Zhenyu Wang2c072452009-06-05 15:38:42 +0800779 return found;
780}
Ma Lingd4906092009-03-18 20:13:27 +0800781
Imre Deakd5dd62b2015-03-17 11:40:03 +0200782/*
783 * Check if the calculated PLL configuration is more optimal compared to the
784 * best configuration and error found so far. Return the calculated error.
785 */
786static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300787 const struct dpll *calculated_clock,
788 const struct dpll *best_clock,
Imre Deakd5dd62b2015-03-17 11:40:03 +0200789 unsigned int best_error_ppm,
790 unsigned int *error_ppm)
791{
Imre Deak9ca3ba02015-03-17 11:40:05 +0200792 /*
793 * For CHV ignore the error and consider only the P value.
794 * Prefer a bigger P value based on HW requirements.
795 */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100796 if (IS_CHERRYVIEW(to_i915(dev))) {
Imre Deak9ca3ba02015-03-17 11:40:05 +0200797 *error_ppm = 0;
798
799 return calculated_clock->p > best_clock->p;
800 }
801
Imre Deak24be4e42015-03-17 11:40:04 +0200802 if (WARN_ON_ONCE(!target_freq))
803 return false;
804
Imre Deakd5dd62b2015-03-17 11:40:03 +0200805 *error_ppm = div_u64(1000000ULL *
806 abs(target_freq - calculated_clock->dot),
807 target_freq);
808 /*
809 * Prefer a better P value over a better (smaller) error if the error
810 * is small. Ensure this preference for future configurations too by
811 * setting the error to 0.
812 */
813 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
814 *error_ppm = 0;
815
816 return true;
817 }
818
819 return *error_ppm + 10 < best_error_ppm;
820}
821
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200822/*
823 * Returns a set of divisors for the desired target clock with the given
824 * refclk, or FALSE. The returned values represent the clock equation:
825 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
826 */
Zhenyu Wang2c072452009-06-05 15:38:42 +0800827static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300828vlv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200829 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300830 int target, int refclk, struct dpll *match_clock,
831 struct dpll *best_clock)
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700832{
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200833 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveiraa919ff12014-10-20 13:46:43 +0300834 struct drm_device *dev = crtc->base.dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300835 struct dpll clock;
Ville Syrjälä69e4f9002013-09-24 21:26:20 +0300836 unsigned int bestppm = 1000000;
Ville Syrjälä27e639b2013-09-24 21:26:24 +0300837 /* min update 19.2 MHz */
838 int max_n = min(limit->n.max, refclk / 19200);
Ville Syrjälä49e497e2013-09-24 21:26:31 +0300839 bool found = false;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700840
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300841 target *= 5; /* fast clock */
842
843 memset(best_clock, 0, sizeof(*best_clock));
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700844
845 /* based on hardware requirement, prefer smaller n to precision */
Ville Syrjälä27e639b2013-09-24 21:26:24 +0300846 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
Ville Syrjälä811bbf02013-09-24 21:26:25 +0300847 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
Ville Syrjälä889059d2013-09-24 21:26:27 +0300848 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
Ville Syrjäläc1a9ae42013-09-24 21:26:23 +0300849 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300850 clock.p = clock.p1 * clock.p2;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700851 /* based on hardware requirement, prefer bigger m1,m2 values */
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300852 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
Imre Deakd5dd62b2015-03-17 11:40:03 +0200853 unsigned int ppm;
Ville Syrjälä69e4f9002013-09-24 21:26:20 +0300854
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300855 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
856 refclk * clock.m1);
Ville Syrjälä43b0ac52013-09-24 21:26:18 +0300857
Imre Deakdccbea32015-06-22 23:35:51 +0300858 vlv_calc_dpll_params(refclk, &clock);
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300859
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100860 if (!intel_PLL_is_valid(to_i915(dev),
861 limit,
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300862 &clock))
Ville Syrjälä43b0ac52013-09-24 21:26:18 +0300863 continue;
864
Imre Deakd5dd62b2015-03-17 11:40:03 +0200865 if (!vlv_PLL_is_optimal(dev, target,
866 &clock,
867 best_clock,
868 bestppm, &ppm))
869 continue;
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300870
Imre Deakd5dd62b2015-03-17 11:40:03 +0200871 *best_clock = clock;
872 bestppm = ppm;
873 found = true;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700874 }
875 }
876 }
877 }
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700878
Ville Syrjälä49e497e2013-09-24 21:26:31 +0300879 return found;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700880}
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700881
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200882/*
883 * Returns a set of divisors for the desired target clock with the given
884 * refclk, or FALSE. The returned values represent the clock equation:
885 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
886 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300887static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300888chv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200889 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300890 int target, int refclk, struct dpll *match_clock,
891 struct dpll *best_clock)
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300892{
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200893 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveiraa919ff12014-10-20 13:46:43 +0300894 struct drm_device *dev = crtc->base.dev;
Imre Deak9ca3ba02015-03-17 11:40:05 +0200895 unsigned int best_error_ppm;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300896 struct dpll clock;
Jani Nikulaba3f4d02019-01-18 14:01:23 +0200897 u64 m2;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300898 int found = false;
899
900 memset(best_clock, 0, sizeof(*best_clock));
Imre Deak9ca3ba02015-03-17 11:40:05 +0200901 best_error_ppm = 1000000;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300902
903 /*
904 * Based on hardware doc, the n always set to 1, and m1 always
905 * set to 2. If requires to support 200Mhz refclk, we need to
906 * revisit this because n may not 1 anymore.
907 */
908 clock.n = 1, clock.m1 = 2;
909 target *= 5; /* fast clock */
910
911 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
912 for (clock.p2 = limit->p2.p2_fast;
913 clock.p2 >= limit->p2.p2_slow;
914 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
Imre Deak9ca3ba02015-03-17 11:40:05 +0200915 unsigned int error_ppm;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300916
917 clock.p = clock.p1 * clock.p2;
918
Jani Nikulaba3f4d02019-01-18 14:01:23 +0200919 m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300920 clock.n) << 22, refclk * clock.m1);
921
922 if (m2 > INT_MAX/clock.m1)
923 continue;
924
925 clock.m2 = m2;
926
Imre Deakdccbea32015-06-22 23:35:51 +0300927 chv_calc_dpll_params(refclk, &clock);
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300928
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100929 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300930 continue;
931
Imre Deak9ca3ba02015-03-17 11:40:05 +0200932 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
933 best_error_ppm, &error_ppm))
934 continue;
935
936 *best_clock = clock;
937 best_error_ppm = error_ppm;
938 found = true;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300939 }
940 }
941
942 return found;
943}
944
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200945bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300946 struct dpll *best_clock)
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200947{
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200948 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300949 const struct intel_limit *limit = &intel_limits_bxt;
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200950
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200951 return chv_find_best_dpll(limit, crtc_state,
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200952 target_clock, refclk, NULL, best_clock);
953}
954
Ville Syrjälä525b9312016-10-31 22:37:02 +0200955bool intel_crtc_active(struct intel_crtc *crtc)
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300956{
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300957 /* Be paranoid as we can arrive here with only partial
958 * state retrieved from the hardware during setup.
959 *
Damien Lespiau241bfc32013-09-25 16:45:37 +0100960 * We can ditch the adjusted_mode.crtc_clock check as soon
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300961 * as Haswell has gained clock readout/fastboot support.
962 *
Ville Syrjäläcd30fbc2018-05-25 21:50:40 +0300963 * We can ditch the crtc->primary->state->fb check as soon as we can
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300964 * properly reconstruct framebuffers.
Matt Roperc3d1f432015-03-09 10:19:23 -0700965 *
966 * FIXME: The intel_crtc->active here should be switched to
967 * crtc->state->active once we have proper CRTC states wired up
968 * for atomic.
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300969 */
Ville Syrjälä525b9312016-10-31 22:37:02 +0200970 return crtc->active && crtc->base.primary->state->fb &&
971 crtc->config->base.adjusted_mode.crtc_clock;
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300972}
973
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200974enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
975 enum pipe pipe)
976{
Ville Syrjälä98187832016-10-31 22:37:10 +0200977 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200978
Ville Syrjäläe2af48c2016-10-31 22:37:05 +0200979 return crtc->config->cpu_transcoder;
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200980}
981
Ville Syrjälä8fedd642017-11-29 17:37:30 +0200982static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
983 enum pipe pipe)
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300984{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200985 i915_reg_t reg = PIPEDSL(pipe);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300986 u32 line1, line2;
987 u32 line_mask;
988
Lucas De Marchicf819ef2018-12-12 10:10:43 -0800989 if (IS_GEN(dev_priv, 2))
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300990 line_mask = DSL_LINEMASK_GEN2;
991 else
992 line_mask = DSL_LINEMASK_GEN3;
993
994 line1 = I915_READ(reg) & line_mask;
Daniel Vetter6adfb1e2015-07-07 09:10:40 +0200995 msleep(5);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300996 line2 = I915_READ(reg) & line_mask;
997
Ville Syrjälä8fedd642017-11-29 17:37:30 +0200998 return line1 != line2;
999}
1000
1001static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1002{
1003 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1004 enum pipe pipe = crtc->pipe;
1005
1006 /* Wait for the display line to settle/start moving */
1007 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1008 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1009 pipe_name(pipe), onoff(state));
1010}
1011
1012static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1013{
1014 wait_for_pipe_scanline_moving(crtc, false);
1015}
1016
1017static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1018{
1019 wait_for_pipe_scanline_moving(crtc, true);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001020}
1021
Ville Syrjälä4972f702017-11-29 17:37:32 +02001022static void
1023intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001024{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001025 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001026 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001027
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001028 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001029 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001030 i915_reg_t reg = PIPECONF(cpu_transcoder);
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001031
Keith Packardab7ad7f2010-10-03 00:33:06 -07001032 /* Wait for the Pipe State to go off */
Chris Wilsonb8511f52016-06-30 15:32:53 +01001033 if (intel_wait_for_register(dev_priv,
1034 reg, I965_PIPECONF_ACTIVE, 0,
1035 100))
Daniel Vetter284637d2012-07-09 09:51:57 +02001036 WARN(1, "pipe_off wait timed out\n");
Keith Packardab7ad7f2010-10-03 00:33:06 -07001037 } else {
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001038 intel_wait_for_pipe_scanline_stopped(crtc);
Keith Packardab7ad7f2010-10-03 00:33:06 -07001039 }
Jesse Barnes79e53942008-11-07 14:24:08 -08001040}
1041
Jesse Barnesb24e7172011-01-04 15:09:30 -08001042/* Only for pre-ILK configs */
Daniel Vetter55607e82013-06-16 21:42:39 +02001043void assert_pll(struct drm_i915_private *dev_priv,
1044 enum pipe pipe, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001045{
Jesse Barnesb24e7172011-01-04 15:09:30 -08001046 u32 val;
1047 bool cur_state;
1048
Ville Syrjälä649636e2015-09-22 19:50:01 +03001049 val = I915_READ(DPLL(pipe));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001050 cur_state = !!(val & DPLL_VCO_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001051 I915_STATE_WARN(cur_state != state,
Jesse Barnesb24e7172011-01-04 15:09:30 -08001052 "PLL state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001053 onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001054}
Jesse Barnesb24e7172011-01-04 15:09:30 -08001055
Jani Nikula23538ef2013-08-27 15:12:22 +03001056/* XXX: the dsi pll is shared between MIPI DSI ports */
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +00001057void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
Jani Nikula23538ef2013-08-27 15:12:22 +03001058{
1059 u32 val;
1060 bool cur_state;
1061
Ville Syrjäläa5805162015-05-26 20:42:30 +03001062 mutex_lock(&dev_priv->sb_lock);
Jani Nikula23538ef2013-08-27 15:12:22 +03001063 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
Ville Syrjäläa5805162015-05-26 20:42:30 +03001064 mutex_unlock(&dev_priv->sb_lock);
Jani Nikula23538ef2013-08-27 15:12:22 +03001065
1066 cur_state = val & DSI_PLL_VCO_EN;
Rob Clarke2c719b2014-12-15 13:56:32 -05001067 I915_STATE_WARN(cur_state != state,
Jani Nikula23538ef2013-08-27 15:12:22 +03001068 "DSI PLL state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001069 onoff(state), onoff(cur_state));
Jani Nikula23538ef2013-08-27 15:12:22 +03001070}
Jani Nikula23538ef2013-08-27 15:12:22 +03001071
Jesse Barnes040484a2011-01-03 12:14:26 -08001072static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1073 enum pipe pipe, bool state)
1074{
Jesse Barnes040484a2011-01-03 12:14:26 -08001075 bool cur_state;
Paulo Zanoniad80a812012-10-24 16:06:19 -02001076 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1077 pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001078
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001079 if (HAS_DDI(dev_priv)) {
Paulo Zanoniaffa9352012-11-23 15:30:39 -02001080 /* DDI does not have a specific FDI_TX register */
Ville Syrjälä649636e2015-09-22 19:50:01 +03001081 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
Paulo Zanoniad80a812012-10-24 16:06:19 -02001082 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001083 } else {
Ville Syrjälä649636e2015-09-22 19:50:01 +03001084 u32 val = I915_READ(FDI_TX_CTL(pipe));
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001085 cur_state = !!(val & FDI_TX_ENABLE);
1086 }
Rob Clarke2c719b2014-12-15 13:56:32 -05001087 I915_STATE_WARN(cur_state != state,
Jesse Barnes040484a2011-01-03 12:14:26 -08001088 "FDI TX state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001089 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001090}
1091#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1092#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1093
1094static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1095 enum pipe pipe, bool state)
1096{
Jesse Barnes040484a2011-01-03 12:14:26 -08001097 u32 val;
1098 bool cur_state;
1099
Ville Syrjälä649636e2015-09-22 19:50:01 +03001100 val = I915_READ(FDI_RX_CTL(pipe));
Paulo Zanonid63fa0d2012-11-20 13:27:35 -02001101 cur_state = !!(val & FDI_RX_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001102 I915_STATE_WARN(cur_state != state,
Jesse Barnes040484a2011-01-03 12:14:26 -08001103 "FDI RX state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001104 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001105}
1106#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1107#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1108
1109static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1110 enum pipe pipe)
1111{
Jesse Barnes040484a2011-01-03 12:14:26 -08001112 u32 val;
1113
1114 /* ILK FDI PLL is always enabled */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001115 if (IS_GEN(dev_priv, 5))
Jesse Barnes040484a2011-01-03 12:14:26 -08001116 return;
1117
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001118 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001119 if (HAS_DDI(dev_priv))
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001120 return;
1121
Ville Syrjälä649636e2015-09-22 19:50:01 +03001122 val = I915_READ(FDI_TX_CTL(pipe));
Rob Clarke2c719b2014-12-15 13:56:32 -05001123 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
Jesse Barnes040484a2011-01-03 12:14:26 -08001124}
1125
Daniel Vetter55607e82013-06-16 21:42:39 +02001126void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1127 enum pipe pipe, bool state)
Jesse Barnes040484a2011-01-03 12:14:26 -08001128{
Jesse Barnes040484a2011-01-03 12:14:26 -08001129 u32 val;
Daniel Vetter55607e82013-06-16 21:42:39 +02001130 bool cur_state;
Jesse Barnes040484a2011-01-03 12:14:26 -08001131
Ville Syrjälä649636e2015-09-22 19:50:01 +03001132 val = I915_READ(FDI_RX_CTL(pipe));
Daniel Vetter55607e82013-06-16 21:42:39 +02001133 cur_state = !!(val & FDI_RX_PLL_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001134 I915_STATE_WARN(cur_state != state,
Daniel Vetter55607e82013-06-16 21:42:39 +02001135 "FDI RX PLL assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001136 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001137}
1138
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001139void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
Jesse Barnesea0760c2011-01-04 15:09:32 -08001140{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001141 i915_reg_t pp_reg;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001142 u32 val;
Ville Syrjälä10ed55e2018-05-23 17:57:18 +03001143 enum pipe panel_pipe = INVALID_PIPE;
Thomas Jarosch0de3b482011-08-25 15:37:45 +02001144 bool locked = true;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001145
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001146 if (WARN_ON(HAS_DDI(dev_priv)))
Jani Nikulabedd4db2014-08-22 15:04:13 +03001147 return;
1148
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001149 if (HAS_PCH_SPLIT(dev_priv)) {
Jani Nikulabedd4db2014-08-22 15:04:13 +03001150 u32 port_sel;
1151
Imre Deak44cb7342016-08-10 14:07:29 +03001152 pp_reg = PP_CONTROL(0);
1153 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
Jani Nikulabedd4db2014-08-22 15:04:13 +03001154
Ville Syrjälä4c23dea2018-05-18 18:29:30 +03001155 switch (port_sel) {
1156 case PANEL_PORT_SELECT_LVDS:
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001157 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
Ville Syrjälä4c23dea2018-05-18 18:29:30 +03001158 break;
1159 case PANEL_PORT_SELECT_DPA:
1160 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1161 break;
1162 case PANEL_PORT_SELECT_DPC:
1163 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1164 break;
1165 case PANEL_PORT_SELECT_DPD:
1166 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1167 break;
1168 default:
1169 MISSING_CASE(port_sel);
1170 break;
1171 }
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001172 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Jani Nikulabedd4db2014-08-22 15:04:13 +03001173 /* presumably write lock depends on pipe, not port select */
Imre Deak44cb7342016-08-10 14:07:29 +03001174 pp_reg = PP_CONTROL(pipe);
Jani Nikulabedd4db2014-08-22 15:04:13 +03001175 panel_pipe = pipe;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001176 } else {
Ville Syrjäläf0d2b752018-05-18 18:29:31 +03001177 u32 port_sel;
1178
Imre Deak44cb7342016-08-10 14:07:29 +03001179 pp_reg = PP_CONTROL(0);
Ville Syrjäläf0d2b752018-05-18 18:29:31 +03001180 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1181
1182 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001183 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
Jesse Barnesea0760c2011-01-04 15:09:32 -08001184 }
1185
1186 val = I915_READ(pp_reg);
1187 if (!(val & PANEL_POWER_ON) ||
Jani Nikulaec49ba22014-08-21 15:06:25 +03001188 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
Jesse Barnesea0760c2011-01-04 15:09:32 -08001189 locked = false;
1190
Rob Clarke2c719b2014-12-15 13:56:32 -05001191 I915_STATE_WARN(panel_pipe == pipe && locked,
Jesse Barnesea0760c2011-01-04 15:09:32 -08001192 "panel assertion failure, pipe %c regs locked\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001193 pipe_name(pipe));
Jesse Barnesea0760c2011-01-04 15:09:32 -08001194}
1195
Jesse Barnesb840d907f2011-12-13 13:19:38 -08001196void assert_pipe(struct drm_i915_private *dev_priv,
1197 enum pipe pipe, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001198{
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001199 bool cur_state;
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001200 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1201 pipe);
Imre Deak4feed0e2016-02-12 18:55:14 +02001202 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001203 intel_wakeref_t wakeref;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001204
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001205 /* we keep both pipes enabled on 830 */
1206 if (IS_I830(dev_priv))
Daniel Vetter8e636782012-01-22 01:36:48 +01001207 state = true;
1208
Imre Deak4feed0e2016-02-12 18:55:14 +02001209 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001210 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1211 if (wakeref) {
Ville Syrjälä649636e2015-09-22 19:50:01 +03001212 u32 val = I915_READ(PIPECONF(cpu_transcoder));
Paulo Zanoni69310162013-01-29 16:35:19 -02001213 cur_state = !!(val & PIPECONF_ENABLE);
Imre Deak4feed0e2016-02-12 18:55:14 +02001214
Chris Wilson0e6e0be2019-01-14 14:21:24 +00001215 intel_display_power_put(dev_priv, power_domain, wakeref);
Imre Deak4feed0e2016-02-12 18:55:14 +02001216 } else {
1217 cur_state = false;
Paulo Zanoni69310162013-01-29 16:35:19 -02001218 }
1219
Rob Clarke2c719b2014-12-15 13:56:32 -05001220 I915_STATE_WARN(cur_state != state,
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001221 "pipe %c assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001222 pipe_name(pipe), onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001223}
1224
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001225static void assert_plane(struct intel_plane *plane, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001226{
Ville Syrjäläeade6c82018-01-30 22:38:03 +02001227 enum pipe pipe;
1228 bool cur_state;
1229
1230 cur_state = plane->get_hw_state(plane, &pipe);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001231
Rob Clarke2c719b2014-12-15 13:56:32 -05001232 I915_STATE_WARN(cur_state != state,
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001233 "%s assertion failure (expected %s, current %s)\n",
1234 plane->base.name, onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001235}
1236
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001237#define assert_plane_enabled(p) assert_plane(p, true)
1238#define assert_plane_disabled(p) assert_plane(p, false)
Chris Wilson931872f2012-01-16 23:01:13 +00001239
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001240static void assert_planes_disabled(struct intel_crtc *crtc)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001241{
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001242 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1243 struct intel_plane *plane;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001244
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001245 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1246 assert_plane_disabled(plane);
Jesse Barnes19332d72013-03-28 09:55:38 -07001247}
1248
Ville Syrjälä08c71e52014-08-06 14:49:45 +03001249static void assert_vblank_disabled(struct drm_crtc *crtc)
1250{
Rob Clarke2c719b2014-12-15 13:56:32 -05001251 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
Ville Syrjälä08c71e52014-08-06 14:49:45 +03001252 drm_crtc_vblank_put(crtc);
1253}
1254
Ander Conselvan de Oliveira7abd4b32016-03-08 17:46:15 +02001255void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1256 enum pipe pipe)
Jesse Barnes92f25842011-01-04 15:09:34 -08001257{
Jesse Barnes92f25842011-01-04 15:09:34 -08001258 u32 val;
1259 bool enabled;
1260
Ville Syrjälä649636e2015-09-22 19:50:01 +03001261 val = I915_READ(PCH_TRANSCONF(pipe));
Jesse Barnes92f25842011-01-04 15:09:34 -08001262 enabled = !!(val & TRANS_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001263 I915_STATE_WARN(enabled,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001264 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1265 pipe_name(pipe));
Jesse Barnes92f25842011-01-04 15:09:34 -08001266}
1267
Jesse Barnes291906f2011-02-02 12:28:03 -08001268static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001269 enum pipe pipe, enum port port,
1270 i915_reg_t dp_reg)
Jesse Barnes291906f2011-02-02 12:28:03 -08001271{
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001272 enum pipe port_pipe;
1273 bool state;
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001274
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001275 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1276
1277 I915_STATE_WARN(state && port_pipe == pipe,
1278 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1279 port_name(port), pipe_name(pipe));
1280
1281 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1282 "IBX PCH DP %c still using transcoder B\n",
1283 port_name(port));
Jesse Barnes291906f2011-02-02 12:28:03 -08001284}
1285
1286static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
Ville Syrjälä76203462018-05-14 20:24:21 +03001287 enum pipe pipe, enum port port,
1288 i915_reg_t hdmi_reg)
Jesse Barnes291906f2011-02-02 12:28:03 -08001289{
Ville Syrjälä76203462018-05-14 20:24:21 +03001290 enum pipe port_pipe;
1291 bool state;
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001292
Ville Syrjälä76203462018-05-14 20:24:21 +03001293 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1294
1295 I915_STATE_WARN(state && port_pipe == pipe,
1296 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1297 port_name(port), pipe_name(pipe));
1298
1299 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1300 "IBX PCH HDMI %c still using transcoder B\n",
1301 port_name(port));
Jesse Barnes291906f2011-02-02 12:28:03 -08001302}
1303
1304static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1305 enum pipe pipe)
1306{
Ville Syrjälä6102a8e2018-05-14 20:24:19 +03001307 enum pipe port_pipe;
Jesse Barnes291906f2011-02-02 12:28:03 -08001308
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001309 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1310 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1311 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
Jesse Barnes291906f2011-02-02 12:28:03 -08001312
Ville Syrjälä6102a8e2018-05-14 20:24:19 +03001313 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1314 port_pipe == pipe,
1315 "PCH VGA enabled on transcoder %c, should be disabled\n",
1316 pipe_name(pipe));
Jesse Barnes291906f2011-02-02 12:28:03 -08001317
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001318 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1319 port_pipe == pipe,
1320 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1321 pipe_name(pipe));
Jesse Barnes291906f2011-02-02 12:28:03 -08001322
Ville Syrjälä3aefb672018-11-08 16:36:35 +02001323 /* PCH SDVOB multiplex with HDMIB */
Ville Syrjälä76203462018-05-14 20:24:21 +03001324 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1325 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1326 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
Jesse Barnes291906f2011-02-02 12:28:03 -08001327}
1328
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001329static void _vlv_enable_pll(struct intel_crtc *crtc,
1330 const struct intel_crtc_state *pipe_config)
1331{
1332 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1333 enum pipe pipe = crtc->pipe;
1334
1335 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1336 POSTING_READ(DPLL(pipe));
1337 udelay(150);
1338
Chris Wilson2c30b432016-06-30 15:32:54 +01001339 if (intel_wait_for_register(dev_priv,
1340 DPLL(pipe),
1341 DPLL_LOCK_VLV,
1342 DPLL_LOCK_VLV,
1343 1))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001344 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1345}
1346
Ville Syrjäläd288f652014-10-28 13:20:22 +02001347static void vlv_enable_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001348 const struct intel_crtc_state *pipe_config)
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001349{
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001350 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001351 enum pipe pipe = crtc->pipe;
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001352
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001353 assert_pipe_disabled(dev_priv, pipe);
Daniel Vetter58c6eaa2013-04-11 16:29:09 +02001354
Daniel Vetter87442f72013-06-06 00:52:17 +02001355 /* PLL is protected by panel, make sure we can write it */
Ville Syrjälä7d1a83c2016-03-15 16:39:58 +02001356 assert_panel_unlocked(dev_priv, pipe);
Daniel Vetter87442f72013-06-06 00:52:17 +02001357
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001358 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1359 _vlv_enable_pll(crtc, pipe_config);
Daniel Vetter426115c2013-07-11 22:13:42 +02001360
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001361 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1362 POSTING_READ(DPLL_MD(pipe));
Daniel Vetter87442f72013-06-06 00:52:17 +02001363}
1364
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001365
1366static void _chv_enable_pll(struct intel_crtc *crtc,
1367 const struct intel_crtc_state *pipe_config)
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001368{
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001369 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001370 enum pipe pipe = crtc->pipe;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001371 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001372 u32 tmp;
1373
Ville Syrjäläa5805162015-05-26 20:42:30 +03001374 mutex_lock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001375
1376 /* Enable back the 10bit clock to display controller */
1377 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1378 tmp |= DPIO_DCLKP_EN;
1379 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1380
Ville Syrjälä54433e92015-05-26 20:42:31 +03001381 mutex_unlock(&dev_priv->sb_lock);
1382
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001383 /*
1384 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1385 */
1386 udelay(1);
1387
1388 /* Enable PLL */
Ville Syrjäläd288f652014-10-28 13:20:22 +02001389 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001390
1391 /* Check PLL is locked */
Chris Wilson6b188262016-06-30 15:32:55 +01001392 if (intel_wait_for_register(dev_priv,
1393 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1394 1))
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001395 DRM_ERROR("PLL %d failed to lock\n", pipe);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001396}
1397
1398static void chv_enable_pll(struct intel_crtc *crtc,
1399 const struct intel_crtc_state *pipe_config)
1400{
1401 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1402 enum pipe pipe = crtc->pipe;
1403
1404 assert_pipe_disabled(dev_priv, pipe);
1405
1406 /* PLL is protected by panel, make sure we can write it */
1407 assert_panel_unlocked(dev_priv, pipe);
1408
1409 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1410 _chv_enable_pll(crtc, pipe_config);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001411
Ville Syrjäläc2317752016-03-15 16:39:56 +02001412 if (pipe != PIPE_A) {
1413 /*
1414 * WaPixelRepeatModeFixForC0:chv
1415 *
1416 * DPLLCMD is AWOL. Use chicken bits to propagate
1417 * the value from DPLLBMD to either pipe B or C.
1418 */
Ville Syrjälädfa311f2017-09-13 17:08:54 +03001419 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
Ville Syrjäläc2317752016-03-15 16:39:56 +02001420 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1421 I915_WRITE(CBR4_VLV, 0);
1422 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1423
1424 /*
1425 * DPLLB VGA mode also seems to cause problems.
1426 * We should always have it disabled.
1427 */
1428 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1429 } else {
1430 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1431 POSTING_READ(DPLL_MD(pipe));
1432 }
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001433}
1434
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001435static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001436{
1437 struct intel_crtc *crtc;
1438 int count = 0;
1439
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001440 for_each_intel_crtc(&dev_priv->drm, crtc) {
Maarten Lankhorst3538b9d2015-06-01 12:50:10 +02001441 count += crtc->base.state->active &&
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03001442 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1443 }
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001444
1445 return count;
1446}
1447
Ville Syrjälä939994d2017-09-13 17:08:56 +03001448static void i9xx_enable_pll(struct intel_crtc *crtc,
1449 const struct intel_crtc_state *crtc_state)
Daniel Vetter87442f72013-06-06 00:52:17 +02001450{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001451 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001452 i915_reg_t reg = DPLL(crtc->pipe);
Ville Syrjälä939994d2017-09-13 17:08:56 +03001453 u32 dpll = crtc_state->dpll_hw_state.dpll;
Ville Syrjäläbb408dd2017-06-01 17:36:15 +03001454 int i;
Daniel Vetter87442f72013-06-06 00:52:17 +02001455
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001456 assert_pipe_disabled(dev_priv, crtc->pipe);
Daniel Vetter87442f72013-06-06 00:52:17 +02001457
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001458 /* PLL is protected by panel, make sure we can write it */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001459 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001460 assert_panel_unlocked(dev_priv, crtc->pipe);
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001461
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001462 /* Enable DVO 2x clock on both PLLs if necessary */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001463 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001464 /*
1465 * It appears to be important that we don't enable this
1466 * for the current pipe before otherwise configuring the
1467 * PLL. No idea how this should be handled if multiple
1468 * DVO outputs are enabled simultaneosly.
1469 */
1470 dpll |= DPLL_DVO_2X_MODE;
1471 I915_WRITE(DPLL(!crtc->pipe),
1472 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1473 }
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001474
Ville Syrjäläc2b63372015-10-07 22:08:25 +03001475 /*
1476 * Apparently we need to have VGA mode enabled prior to changing
1477 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1478 * dividers, even though the register value does change.
1479 */
1480 I915_WRITE(reg, 0);
1481
Ville Syrjälä8e7a65a2015-10-07 22:08:24 +03001482 I915_WRITE(reg, dpll);
1483
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001484 /* Wait for the clocks to stabilize. */
1485 POSTING_READ(reg);
1486 udelay(150);
1487
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001488 if (INTEL_GEN(dev_priv) >= 4) {
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001489 I915_WRITE(DPLL_MD(crtc->pipe),
Ville Syrjälä939994d2017-09-13 17:08:56 +03001490 crtc_state->dpll_hw_state.dpll_md);
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001491 } else {
1492 /* The pixel multiplier can only be updated once the
1493 * DPLL is enabled and the clocks are stable.
1494 *
1495 * So write it again.
1496 */
1497 I915_WRITE(reg, dpll);
1498 }
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001499
1500 /* We do this three times for luck */
Ville Syrjäläbb408dd2017-06-01 17:36:15 +03001501 for (i = 0; i < 3; i++) {
1502 I915_WRITE(reg, dpll);
1503 POSTING_READ(reg);
1504 udelay(150); /* wait for warmup */
1505 }
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001506}
1507
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001508static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001509{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001510 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001511 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001512 enum pipe pipe = crtc->pipe;
1513
1514 /* Disable DVO 2x clock on both PLLs if necessary */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001515 if (IS_I830(dev_priv) &&
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001516 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001517 !intel_num_dvo_pipes(dev_priv)) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001518 I915_WRITE(DPLL(PIPE_B),
1519 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1520 I915_WRITE(DPLL(PIPE_A),
1521 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1522 }
1523
Ville Syrjäläb6b5d042014-08-15 01:22:07 +03001524 /* Don't disable pipe or pipe PLLs if needed */
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001525 if (IS_I830(dev_priv))
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001526 return;
1527
1528 /* Make sure the pipe isn't still relying on us */
1529 assert_pipe_disabled(dev_priv, pipe);
1530
Ville Syrjäläb8afb912015-06-29 15:25:48 +03001531 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
Daniel Vetter50b44a42013-06-05 13:34:33 +02001532 POSTING_READ(DPLL(pipe));
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001533}
1534
Jesse Barnesf6071162013-10-01 10:41:38 -07001535static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1536{
Ville Syrjäläb8afb912015-06-29 15:25:48 +03001537 u32 val;
Jesse Barnesf6071162013-10-01 10:41:38 -07001538
1539 /* Make sure the pipe isn't still relying on us */
1540 assert_pipe_disabled(dev_priv, pipe);
1541
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02001542 val = DPLL_INTEGRATED_REF_CLK_VLV |
1543 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1544 if (pipe != PIPE_A)
1545 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1546
Jesse Barnesf6071162013-10-01 10:41:38 -07001547 I915_WRITE(DPLL(pipe), val);
1548 POSTING_READ(DPLL(pipe));
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001549}
1550
1551static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1552{
Ville Syrjäläd7520482014-04-09 13:28:59 +03001553 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001554 u32 val;
1555
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001556 /* Make sure the pipe isn't still relying on us */
1557 assert_pipe_disabled(dev_priv, pipe);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001558
Ville Syrjälä60bfe442015-06-29 15:25:49 +03001559 val = DPLL_SSC_REF_CLK_CHV |
1560 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001561 if (pipe != PIPE_A)
1562 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02001563
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001564 I915_WRITE(DPLL(pipe), val);
1565 POSTING_READ(DPLL(pipe));
Ville Syrjäläd7520482014-04-09 13:28:59 +03001566
Ville Syrjäläa5805162015-05-26 20:42:30 +03001567 mutex_lock(&dev_priv->sb_lock);
Ville Syrjäläd7520482014-04-09 13:28:59 +03001568
1569 /* Disable 10bit clock to display controller */
1570 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1571 val &= ~DPIO_DCLKP_EN;
1572 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1573
Ville Syrjäläa5805162015-05-26 20:42:30 +03001574 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesf6071162013-10-01 10:41:38 -07001575}
1576
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001577void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001578 struct intel_digital_port *dport,
1579 unsigned int expected_mask)
Jesse Barnes89b667f2013-04-18 14:51:36 -07001580{
1581 u32 port_mask;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001582 i915_reg_t dpll_reg;
Jesse Barnes89b667f2013-04-18 14:51:36 -07001583
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02001584 switch (dport->base.port) {
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001585 case PORT_B:
Jesse Barnes89b667f2013-04-18 14:51:36 -07001586 port_mask = DPLL_PORTB_READY_MASK;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001587 dpll_reg = DPLL(0);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001588 break;
1589 case PORT_C:
Jesse Barnes89b667f2013-04-18 14:51:36 -07001590 port_mask = DPLL_PORTC_READY_MASK;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001591 dpll_reg = DPLL(0);
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001592 expected_mask <<= 4;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001593 break;
1594 case PORT_D:
1595 port_mask = DPLL_PORTD_READY_MASK;
1596 dpll_reg = DPIO_PHY_STATUS;
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001597 break;
1598 default:
1599 BUG();
1600 }
Jesse Barnes89b667f2013-04-18 14:51:36 -07001601
Chris Wilson370004d2016-06-30 15:32:56 +01001602 if (intel_wait_for_register(dev_priv,
1603 dpll_reg, port_mask, expected_mask,
1604 1000))
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001605 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02001606 port_name(dport->base.port),
1607 I915_READ(dpll_reg) & port_mask, expected_mask);
Jesse Barnes89b667f2013-04-18 14:51:36 -07001608}
1609
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001610static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
Jesse Barnes040484a2011-01-03 12:14:26 -08001611{
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001612 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1613 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1614 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001615 i915_reg_t reg;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02001616 u32 val, pipeconf_val;
Jesse Barnes040484a2011-01-03 12:14:26 -08001617
Jesse Barnes040484a2011-01-03 12:14:26 -08001618 /* Make sure PCH DPLL is enabled */
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001619 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
Jesse Barnes040484a2011-01-03 12:14:26 -08001620
1621 /* FDI must be feeding us bits for PCH ports */
1622 assert_fdi_tx_enabled(dev_priv, pipe);
1623 assert_fdi_rx_enabled(dev_priv, pipe);
1624
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01001625 if (HAS_PCH_CPT(dev_priv)) {
Daniel Vetter23670b322012-11-01 09:15:30 +01001626 /* Workaround: Set the timing override bit before enabling the
1627 * pch transcoder. */
1628 reg = TRANS_CHICKEN2(pipe);
1629 val = I915_READ(reg);
1630 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1631 I915_WRITE(reg, val);
Eugeni Dodonov59c859d2012-05-09 15:37:19 -03001632 }
Daniel Vetter23670b322012-11-01 09:15:30 +01001633
Daniel Vetterab9412b2013-05-03 11:49:46 +02001634 reg = PCH_TRANSCONF(pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001635 val = I915_READ(reg);
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001636 pipeconf_val = I915_READ(PIPECONF(pipe));
Jesse Barnese9bcff52011-06-24 12:19:20 -07001637
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001638 if (HAS_PCH_IBX(dev_priv)) {
Jesse Barnese9bcff52011-06-24 12:19:20 -07001639 /*
Ville Syrjäläc5de7c62015-05-05 17:06:22 +03001640 * Make the BPC in transcoder be consistent with
1641 * that in pipeconf reg. For HDMI we must use 8bpc
1642 * here for both 8bpc and 12bpc.
Jesse Barnese9bcff52011-06-24 12:19:20 -07001643 */
Daniel Vetterdfd07d72012-12-17 11:21:38 +01001644 val &= ~PIPECONF_BPC_MASK;
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001645 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Ville Syrjäläc5de7c62015-05-05 17:06:22 +03001646 val |= PIPECONF_8BPC;
1647 else
1648 val |= pipeconf_val & PIPECONF_BPC_MASK;
Jesse Barnese9bcff52011-06-24 12:19:20 -07001649 }
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001650
1651 val &= ~TRANS_INTERLACE_MASK;
1652 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001653 if (HAS_PCH_IBX(dev_priv) &&
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001654 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Paulo Zanoni7c26e5c2012-02-14 17:07:09 -02001655 val |= TRANS_LEGACY_INTERLACED_ILK;
1656 else
1657 val |= TRANS_INTERLACED;
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001658 else
1659 val |= TRANS_PROGRESSIVE;
1660
Jesse Barnes040484a2011-01-03 12:14:26 -08001661 I915_WRITE(reg, val | TRANS_ENABLE);
Chris Wilson650fbd82016-06-30 15:32:57 +01001662 if (intel_wait_for_register(dev_priv,
1663 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1664 100))
Ville Syrjälä4bb6f1f2013-04-17 17:48:50 +03001665 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
Jesse Barnes040484a2011-01-03 12:14:26 -08001666}
1667
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001668static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
Paulo Zanoni937bb612012-10-31 18:12:47 -02001669 enum transcoder cpu_transcoder)
Jesse Barnes040484a2011-01-03 12:14:26 -08001670{
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001671 u32 val, pipeconf_val;
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001672
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001673 /* FDI must be feeding us bits for PCH ports */
Daniel Vetter1a240d42012-11-29 22:18:51 +01001674 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001675 assert_fdi_rx_enabled(dev_priv, PIPE_A);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001676
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001677 /* Workaround: set timing override bit. */
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001678 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
Daniel Vetter23670b322012-11-01 09:15:30 +01001679 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001680 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001681
Paulo Zanoni25f3ef12012-10-31 18:12:49 -02001682 val = TRANS_ENABLE;
Paulo Zanoni937bb612012-10-31 18:12:47 -02001683 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001684
Paulo Zanoni9a76b1c2012-10-31 18:12:48 -02001685 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1686 PIPECONF_INTERLACED_ILK)
Paulo Zanonia35f2672012-10-31 18:12:45 -02001687 val |= TRANS_INTERLACED;
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001688 else
1689 val |= TRANS_PROGRESSIVE;
1690
Daniel Vetterab9412b2013-05-03 11:49:46 +02001691 I915_WRITE(LPT_TRANSCONF, val);
Chris Wilsond9f96242016-06-30 15:32:58 +01001692 if (intel_wait_for_register(dev_priv,
1693 LPT_TRANSCONF,
1694 TRANS_STATE_ENABLE,
1695 TRANS_STATE_ENABLE,
1696 100))
Paulo Zanoni937bb612012-10-31 18:12:47 -02001697 DRM_ERROR("Failed to enable PCH transcoder\n");
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001698}
1699
Paulo Zanonib8a4f402012-10-31 18:12:42 -02001700static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1701 enum pipe pipe)
Jesse Barnes040484a2011-01-03 12:14:26 -08001702{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001703 i915_reg_t reg;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02001704 u32 val;
Jesse Barnes040484a2011-01-03 12:14:26 -08001705
1706 /* FDI relies on the transcoder */
1707 assert_fdi_tx_disabled(dev_priv, pipe);
1708 assert_fdi_rx_disabled(dev_priv, pipe);
1709
Jesse Barnes291906f2011-02-02 12:28:03 -08001710 /* Ports must be off as well */
1711 assert_pch_ports_disabled(dev_priv, pipe);
1712
Daniel Vetterab9412b2013-05-03 11:49:46 +02001713 reg = PCH_TRANSCONF(pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001714 val = I915_READ(reg);
1715 val &= ~TRANS_ENABLE;
1716 I915_WRITE(reg, val);
1717 /* wait for PCH transcoder off, transcoder state */
Chris Wilsona7d04662016-06-30 15:32:59 +01001718 if (intel_wait_for_register(dev_priv,
1719 reg, TRANS_STATE_ENABLE, 0,
1720 50))
Ville Syrjälä4bb6f1f2013-04-17 17:48:50 +03001721 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
Daniel Vetter23670b322012-11-01 09:15:30 +01001722
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01001723 if (HAS_PCH_CPT(dev_priv)) {
Daniel Vetter23670b322012-11-01 09:15:30 +01001724 /* Workaround: Clear the timing override chicken bit again. */
1725 reg = TRANS_CHICKEN2(pipe);
1726 val = I915_READ(reg);
1727 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1728 I915_WRITE(reg, val);
1729 }
Jesse Barnes040484a2011-01-03 12:14:26 -08001730}
1731
Maarten Lankhorstb7076542016-08-23 16:18:08 +02001732void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001733{
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001734 u32 val;
1735
Daniel Vetterab9412b2013-05-03 11:49:46 +02001736 val = I915_READ(LPT_TRANSCONF);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001737 val &= ~TRANS_ENABLE;
Daniel Vetterab9412b2013-05-03 11:49:46 +02001738 I915_WRITE(LPT_TRANSCONF, val);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001739 /* wait for PCH transcoder off, transcoder state */
Chris Wilsondfdb4742016-06-30 15:33:00 +01001740 if (intel_wait_for_register(dev_priv,
1741 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1742 50))
Paulo Zanoni8a52fd92012-10-31 18:12:51 -02001743 DRM_ERROR("Failed to disable PCH transcoder\n");
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001744
1745 /* Workaround: clear timing override bit. */
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001746 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
Daniel Vetter23670b322012-11-01 09:15:30 +01001747 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001748 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
Jesse Barnes92f25842011-01-04 15:09:34 -08001749}
1750
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001751enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
Ville Syrjälä65f21302016-10-14 20:02:53 +03001752{
1753 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1754
Ville Syrjälä65f21302016-10-14 20:02:53 +03001755 if (HAS_PCH_LPT(dev_priv))
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001756 return PIPE_A;
Ville Syrjälä65f21302016-10-14 20:02:53 +03001757 else
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001758 return crtc->pipe;
Ville Syrjälä65f21302016-10-14 20:02:53 +03001759}
1760
Ville Syrjälä4972f702017-11-29 17:37:32 +02001761static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001762{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001763 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1764 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1765 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
Paulo Zanoni03722642014-01-17 13:51:09 -02001766 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001767 i915_reg_t reg;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001768 u32 val;
1769
Ville Syrjälä9e2ee2d2015-06-24 21:59:35 +03001770 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1771
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001772 assert_planes_disabled(crtc);
Daniel Vetter58c6eaa2013-04-11 16:29:09 +02001773
Jesse Barnesb24e7172011-01-04 15:09:30 -08001774 /*
1775 * A pipe without a PLL won't actually be able to drive bits from
1776 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1777 * need the check.
1778 */
Ville Syrjälä09fa8bb2016-08-05 20:41:34 +03001779 if (HAS_GMCH_DISPLAY(dev_priv)) {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001780 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
Jani Nikula23538ef2013-08-27 15:12:22 +03001781 assert_dsi_pll_enabled(dev_priv);
1782 else
1783 assert_pll_enabled(dev_priv, pipe);
Ville Syrjälä09fa8bb2016-08-05 20:41:34 +03001784 } else {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001785 if (new_crtc_state->has_pch_encoder) {
Jesse Barnes040484a2011-01-03 12:14:26 -08001786 /* if driving the PCH, we need FDI enabled */
Ville Syrjälä65f21302016-10-14 20:02:53 +03001787 assert_fdi_rx_pll_enabled(dev_priv,
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001788 intel_crtc_pch_transcoder(crtc));
Daniel Vetter1a240d42012-11-29 22:18:51 +01001789 assert_fdi_tx_pll_enabled(dev_priv,
1790 (enum pipe) cpu_transcoder);
Jesse Barnes040484a2011-01-03 12:14:26 -08001791 }
1792 /* FIXME: assert CPU port conditions for SNB+ */
1793 }
Jesse Barnesb24e7172011-01-04 15:09:30 -08001794
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001795 reg = PIPECONF(cpu_transcoder);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001796 val = I915_READ(reg);
Paulo Zanoni7ad25d42014-01-17 13:51:13 -02001797 if (val & PIPECONF_ENABLE) {
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001798 /* we keep both pipes enabled on 830 */
1799 WARN_ON(!IS_I830(dev_priv));
Chris Wilson00d70b12011-03-17 07:18:29 +00001800 return;
Paulo Zanoni7ad25d42014-01-17 13:51:13 -02001801 }
Chris Wilson00d70b12011-03-17 07:18:29 +00001802
1803 I915_WRITE(reg, val | PIPECONF_ENABLE);
Paulo Zanoni851855d2013-12-19 19:12:29 -02001804 POSTING_READ(reg);
Ville Syrjäläb7792d82015-12-14 18:23:43 +02001805
1806 /*
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001807 * Until the pipe starts PIPEDSL reads will return a stale value,
1808 * which causes an apparent vblank timestamp jump when PIPEDSL
1809 * resets to its proper value. That also messes up the frame count
1810 * when it's derived from the timestamps. So let's wait for the
1811 * pipe to start properly before we call drm_crtc_vblank_on()
Ville Syrjäläb7792d82015-12-14 18:23:43 +02001812 */
Ville Syrjälä4972f702017-11-29 17:37:32 +02001813 if (dev_priv->drm.max_vblank_count == 0)
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001814 intel_wait_for_pipe_scanline_moving(crtc);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001815}
1816
Ville Syrjälä4972f702017-11-29 17:37:32 +02001817static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001818{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001819 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Chris Wilsonfac5e232016-07-04 11:34:36 +01001820 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä4972f702017-11-29 17:37:32 +02001821 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Ville Syrjälä575f7ab2014-08-15 01:21:56 +03001822 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001823 i915_reg_t reg;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001824 u32 val;
1825
Ville Syrjälä9e2ee2d2015-06-24 21:59:35 +03001826 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1827
Jesse Barnesb24e7172011-01-04 15:09:30 -08001828 /*
1829 * Make sure planes won't keep trying to pump pixels to us,
1830 * or we might hang the display.
1831 */
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001832 assert_planes_disabled(crtc);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001833
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001834 reg = PIPECONF(cpu_transcoder);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001835 val = I915_READ(reg);
Chris Wilson00d70b12011-03-17 07:18:29 +00001836 if ((val & PIPECONF_ENABLE) == 0)
1837 return;
1838
Ville Syrjälä67adc642014-08-15 01:21:57 +03001839 /*
1840 * Double wide has implications for planes
1841 * so best keep it disabled when not needed.
1842 */
Ville Syrjälä4972f702017-11-29 17:37:32 +02001843 if (old_crtc_state->double_wide)
Ville Syrjälä67adc642014-08-15 01:21:57 +03001844 val &= ~PIPECONF_DOUBLE_WIDE;
1845
1846 /* Don't disable pipe or pipe PLLs if needed */
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001847 if (!IS_I830(dev_priv))
Ville Syrjälä67adc642014-08-15 01:21:57 +03001848 val &= ~PIPECONF_ENABLE;
1849
1850 I915_WRITE(reg, val);
1851 if ((val & PIPECONF_ENABLE) == 0)
Ville Syrjälä4972f702017-11-29 17:37:32 +02001852 intel_wait_for_pipe_off(old_crtc_state);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001853}
1854
Ville Syrjälä832be822016-01-12 21:08:33 +02001855static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1856{
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001857 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
Ville Syrjälä832be822016-01-12 21:08:33 +02001858}
1859
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001860static unsigned int
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001861intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001862{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001863 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001864 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001865
1866 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07001867 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001868 return cpp;
1869 case I915_FORMAT_MOD_X_TILED:
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001870 if (IS_GEN(dev_priv, 2))
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001871 return 128;
1872 else
1873 return 512;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001874 case I915_FORMAT_MOD_Y_TILED_CCS:
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001875 if (color_plane == 1)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001876 return 128;
1877 /* fall through */
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001878 case I915_FORMAT_MOD_Y_TILED:
Lucas De Marchicf819ef2018-12-12 10:10:43 -08001879 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001880 return 128;
1881 else
1882 return 512;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001883 case I915_FORMAT_MOD_Yf_TILED_CCS:
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001884 if (color_plane == 1)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001885 return 128;
1886 /* fall through */
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001887 case I915_FORMAT_MOD_Yf_TILED:
1888 switch (cpp) {
1889 case 1:
1890 return 64;
1891 case 2:
1892 case 4:
1893 return 128;
1894 case 8:
1895 case 16:
1896 return 256;
1897 default:
1898 MISSING_CASE(cpp);
1899 return cpp;
1900 }
1901 break;
1902 default:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001903 MISSING_CASE(fb->modifier);
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001904 return cpp;
1905 }
1906}
1907
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001908static unsigned int
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001909intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
Jesse Barnesa57ce0b2014-02-07 12:10:35 -08001910{
Ben Widawsky2f075562017-03-24 14:29:48 -07001911 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
Ville Syrjälä832be822016-01-12 21:08:33 +02001912 return 1;
1913 else
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001914 return intel_tile_size(to_i915(fb->dev)) /
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001915 intel_tile_width_bytes(fb, color_plane);
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001916}
1917
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001918/* Return the tile dimensions in pixel units */
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001919static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001920 unsigned int *tile_width,
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001921 unsigned int *tile_height)
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001922{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001923 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1924 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001925
1926 *tile_width = tile_width_bytes / cpp;
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001927 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001928}
1929
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001930unsigned int
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001931intel_fb_align_height(const struct drm_framebuffer *fb,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001932 int color_plane, unsigned int height)
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001933{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001934 unsigned int tile_height = intel_tile_height(fb, color_plane);
Ville Syrjälä832be822016-01-12 21:08:33 +02001935
1936 return ALIGN(height, tile_height);
Jesse Barnesa57ce0b2014-02-07 12:10:35 -08001937}
1938
Ville Syrjälä1663b9d2016-02-15 22:54:45 +02001939unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1940{
1941 unsigned int size = 0;
1942 int i;
1943
1944 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1945 size += rot_info->plane[i].width * rot_info->plane[i].height;
1946
1947 return size;
1948}
1949
Daniel Vetter75c82a52015-10-14 16:51:04 +02001950static void
Ville Syrjälä3465c582016-02-15 22:54:43 +02001951intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1952 const struct drm_framebuffer *fb,
1953 unsigned int rotation)
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00001954{
Chris Wilson7b92c042017-01-14 00:28:26 +00001955 view->type = I915_GGTT_VIEW_NORMAL;
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03001956 if (drm_rotation_90_or_270(rotation)) {
Chris Wilson7b92c042017-01-14 00:28:26 +00001957 view->type = I915_GGTT_VIEW_ROTATED;
Chris Wilson8bab11932017-01-14 00:28:25 +00001958 view->rotated = to_intel_framebuffer(fb)->rot_info;
Ville Syrjälä2d7a2152016-02-15 22:54:47 +02001959 }
1960}
1961
Ville Syrjäläfabac482017-03-27 21:55:43 +03001962static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1963{
1964 if (IS_I830(dev_priv))
1965 return 16 * 1024;
1966 else if (IS_I85X(dev_priv))
1967 return 256;
Ville Syrjäläd9e15512017-03-27 21:55:45 +03001968 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1969 return 32;
Ville Syrjäläfabac482017-03-27 21:55:43 +03001970 else
1971 return 4 * 1024;
1972}
1973
Ville Syrjälä603525d2016-01-12 21:08:37 +02001974static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001975{
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00001976 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001977 return 256 * 1024;
Jani Nikulac0f86832016-12-07 12:13:04 +02001978 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
Wayne Boyer666a4532015-12-09 12:29:35 -08001979 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001980 return 128 * 1024;
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00001981 else if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001982 return 4 * 1024;
1983 else
Ville Syrjälä44c59052015-06-11 16:31:16 +03001984 return 0;
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001985}
1986
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001987static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001988 int color_plane)
Ville Syrjälä603525d2016-01-12 21:08:37 +02001989{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001990 struct drm_i915_private *dev_priv = to_i915(fb->dev);
1991
Ville Syrjäläb90c1ee2017-03-07 21:42:07 +02001992 /* AUX_DIST needs only 4K alignment */
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001993 if (color_plane == 1)
Ville Syrjäläb90c1ee2017-03-07 21:42:07 +02001994 return 4096;
1995
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001996 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07001997 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjälä603525d2016-01-12 21:08:37 +02001998 return intel_linear_alignment(dev_priv);
1999 case I915_FORMAT_MOD_X_TILED:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002000 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä603525d2016-01-12 21:08:37 +02002001 return 256 * 1024;
2002 return 0;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002003 case I915_FORMAT_MOD_Y_TILED_CCS:
2004 case I915_FORMAT_MOD_Yf_TILED_CCS:
Ville Syrjälä603525d2016-01-12 21:08:37 +02002005 case I915_FORMAT_MOD_Y_TILED:
2006 case I915_FORMAT_MOD_Yf_TILED:
2007 return 1 * 1024 * 1024;
2008 default:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002009 MISSING_CASE(fb->modifier);
Ville Syrjälä603525d2016-01-12 21:08:37 +02002010 return 0;
2011 }
2012}
2013
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002014static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2015{
2016 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2017 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2018
Ville Syrjälä32febd92018-02-21 18:02:33 +02002019 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002020}
2021
Chris Wilson058d88c2016-08-15 10:49:06 +01002022struct i915_vma *
Chris Wilson59354852018-02-20 13:42:06 +00002023intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002024 const struct i915_ggtt_view *view,
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002025 bool uses_fence,
Chris Wilson59354852018-02-20 13:42:06 +00002026 unsigned long *out_flags)
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002027{
Tvrtko Ursulin850c4cd2014-10-30 16:39:38 +00002028 struct drm_device *dev = fb->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002029 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulin850c4cd2014-10-30 16:39:38 +00002030 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Chris Wilson1d264d92019-01-14 14:21:19 +00002031 intel_wakeref_t wakeref;
Chris Wilson058d88c2016-08-15 10:49:06 +01002032 struct i915_vma *vma;
Chris Wilson59354852018-02-20 13:42:06 +00002033 unsigned int pinctl;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002034 u32 alignment;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002035
Matt Roperebcdd392014-07-09 16:22:11 -07002036 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2037
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002038 alignment = intel_surf_alignment(fb, 0);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002039
Chris Wilson693db182013-03-05 14:52:39 +00002040 /* Note that the w/a also requires 64 PTE of padding following the
2041 * bo. We currently fill all unused PTE with the shadow page and so
2042 * we should always have valid PTE following the scanout preventing
2043 * the VT-d warning.
2044 */
Chris Wilson48f112f2016-06-24 14:07:14 +01002045 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
Chris Wilson693db182013-03-05 14:52:39 +00002046 alignment = 256 * 1024;
2047
Paulo Zanonid6dd6842014-08-15 15:59:32 -03002048 /*
2049 * Global gtt pte registers are special registers which actually forward
2050 * writes to a chunk of system memory. Which means that there is no risk
2051 * that the register values disappear as soon as we call
2052 * intel_runtime_pm_put(), so it is correct to wrap only the
2053 * pin/unpin/fence and not more.
2054 */
Chris Wilson1d264d92019-01-14 14:21:19 +00002055 wakeref = intel_runtime_pm_get(dev_priv);
Paulo Zanonid6dd6842014-08-15 15:59:32 -03002056
Daniel Vetter9db529a2017-08-08 10:08:28 +02002057 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2058
Chris Wilson59354852018-02-20 13:42:06 +00002059 pinctl = 0;
2060
2061 /* Valleyview is definitely limited to scanning out the first
2062 * 512MiB. Lets presume this behaviour was inherited from the
2063 * g4x display engine and that all earlier gen are similarly
2064 * limited. Testing suggests that it is a little more
2065 * complicated than this. For example, Cherryview appears quite
2066 * happy to scanout from anywhere within its global aperture.
2067 */
2068 if (HAS_GMCH_DISPLAY(dev_priv))
2069 pinctl |= PIN_MAPPABLE;
2070
2071 vma = i915_gem_object_pin_to_display_plane(obj,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002072 alignment, view, pinctl);
Chris Wilson49ef5292016-08-18 17:17:00 +01002073 if (IS_ERR(vma))
2074 goto err;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002075
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002076 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002077 int ret;
2078
Chris Wilson49ef5292016-08-18 17:17:00 +01002079 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2080 * fence, whereas 965+ only requires a fence if using
2081 * framebuffer compression. For simplicity, we always, when
2082 * possible, install a fence as the cost is not that onerous.
2083 *
2084 * If we fail to fence the tiled scanout, then either the
2085 * modeset will reject the change (which is highly unlikely as
2086 * the affected systems, all but one, do not have unmappable
2087 * space) or we will not be able to enable full powersaving
2088 * techniques (also likely not to apply due to various limits
2089 * FBC and the like impose on the size of the buffer, which
2090 * presumably we violated anyway with this unmappable buffer).
2091 * Anyway, it is presumably better to stumble onwards with
2092 * something and try to run the system in a "less than optimal"
2093 * mode that matches the user configuration.
2094 */
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002095 ret = i915_vma_pin_fence(vma);
2096 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
Chris Wilson75097022018-03-05 10:33:12 +00002097 i915_gem_object_unpin_from_display_plane(vma);
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002098 vma = ERR_PTR(ret);
2099 goto err;
2100 }
2101
2102 if (ret == 0 && vma->fence)
Chris Wilson59354852018-02-20 13:42:06 +00002103 *out_flags |= PLANE_HAS_FENCE;
Vivek Kasireddy98072162015-10-29 18:54:38 -07002104 }
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002105
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002106 i915_vma_get(vma);
Chris Wilson49ef5292016-08-18 17:17:00 +01002107err:
Daniel Vetter9db529a2017-08-08 10:08:28 +02002108 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2109
Chris Wilson1d264d92019-01-14 14:21:19 +00002110 intel_runtime_pm_put(dev_priv, wakeref);
Chris Wilson058d88c2016-08-15 10:49:06 +01002111 return vma;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002112}
2113
Chris Wilson59354852018-02-20 13:42:06 +00002114void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
Chris Wilson1690e1e2011-12-14 13:57:08 +01002115{
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002116 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00002117
Chris Wilson59354852018-02-20 13:42:06 +00002118 if (flags & PLANE_HAS_FENCE)
2119 i915_vma_unpin_fence(vma);
Chris Wilson058d88c2016-08-15 10:49:06 +01002120 i915_gem_object_unpin_from_display_plane(vma);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002121 i915_vma_put(vma);
Chris Wilson1690e1e2011-12-14 13:57:08 +01002122}
2123
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002124static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002125 unsigned int rotation)
2126{
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002127 if (drm_rotation_90_or_270(rotation))
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002128 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002129 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002130 return fb->pitches[color_plane];
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002131}
2132
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002133/*
Ville Syrjälä6687c902015-09-15 13:16:41 +03002134 * Convert the x/y offsets into a linear offset.
2135 * Only valid with 0/180 degree rotation, which is fine since linear
2136 * offset is only used with linear buffers on pre-hsw and tiled buffers
2137 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2138 */
2139u32 intel_fb_xy_to_linear(int x, int y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002140 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002141 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002142{
Ville Syrjälä29490562016-01-20 18:02:50 +02002143 const struct drm_framebuffer *fb = state->base.fb;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002144 unsigned int cpp = fb->format->cpp[color_plane];
2145 unsigned int pitch = state->color_plane[color_plane].stride;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002146
2147 return y * pitch + x * cpp;
2148}
2149
2150/*
2151 * Add the x/y offsets derived from fb->offsets[] to the user
2152 * specified plane src x/y offsets. The resulting x/y offsets
2153 * specify the start of scanout from the beginning of the gtt mapping.
2154 */
2155void intel_add_fb_offsets(int *x, int *y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002156 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002157 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002158
2159{
Ville Syrjälä29490562016-01-20 18:02:50 +02002160 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2161 unsigned int rotation = state->base.rotation;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002162
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002163 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002164 *x += intel_fb->rotated[color_plane].x;
2165 *y += intel_fb->rotated[color_plane].y;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002166 } else {
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002167 *x += intel_fb->normal[color_plane].x;
2168 *y += intel_fb->normal[color_plane].y;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002169 }
2170}
2171
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002172static u32 intel_adjust_tile_offset(int *x, int *y,
2173 unsigned int tile_width,
2174 unsigned int tile_height,
2175 unsigned int tile_size,
2176 unsigned int pitch_tiles,
2177 u32 old_offset,
2178 u32 new_offset)
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002179{
Ville Syrjäläb9b24032016-02-08 18:28:00 +02002180 unsigned int pitch_pixels = pitch_tiles * tile_width;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002181 unsigned int tiles;
2182
2183 WARN_ON(old_offset & (tile_size - 1));
2184 WARN_ON(new_offset & (tile_size - 1));
2185 WARN_ON(new_offset > old_offset);
2186
2187 tiles = (old_offset - new_offset) / tile_size;
2188
2189 *y += tiles / pitch_tiles * tile_height;
2190 *x += tiles % pitch_tiles * tile_width;
2191
Ville Syrjäläb9b24032016-02-08 18:28:00 +02002192 /* minimize x in case it got needlessly big */
2193 *y += *x / pitch_pixels * tile_height;
2194 *x %= pitch_pixels;
2195
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002196 return new_offset;
2197}
2198
Dhinakaran Pandiyan2a11b1b2018-10-26 12:38:04 -07002199static bool is_surface_linear(u64 modifier, int color_plane)
2200{
2201 return modifier == DRM_FORMAT_MOD_LINEAR;
2202}
2203
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002204static u32 intel_adjust_aligned_offset(int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002205 const struct drm_framebuffer *fb,
2206 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002207 unsigned int rotation,
Ville Syrjälädf79cf42018-09-11 18:01:39 +03002208 unsigned int pitch,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002209 u32 old_offset, u32 new_offset)
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002210{
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002211 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002212 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002213
2214 WARN_ON(new_offset > old_offset);
2215
Dhinakaran Pandiyan2a11b1b2018-10-26 12:38:04 -07002216 if (!is_surface_linear(fb->modifier, color_plane)) {
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002217 unsigned int tile_size, tile_width, tile_height;
2218 unsigned int pitch_tiles;
2219
2220 tile_size = intel_tile_size(dev_priv);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002221 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002222
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002223 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002224 pitch_tiles = pitch / tile_height;
2225 swap(tile_width, tile_height);
2226 } else {
2227 pitch_tiles = pitch / (tile_width * cpp);
2228 }
2229
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002230 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2231 tile_size, pitch_tiles,
2232 old_offset, new_offset);
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002233 } else {
2234 old_offset += *y * pitch + *x * cpp;
2235
2236 *y = (old_offset - new_offset) / pitch;
2237 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2238 }
2239
2240 return new_offset;
2241}
2242
2243/*
Ville Syrjälä303ba692017-08-24 22:10:49 +03002244 * Adjust the tile offset by moving the difference into
2245 * the x/y offsets.
2246 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002247static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2248 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002249 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002250 u32 old_offset, u32 new_offset)
Ville Syrjälä303ba692017-08-24 22:10:49 +03002251{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002252 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002253 state->base.rotation,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002254 state->color_plane[color_plane].stride,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002255 old_offset, new_offset);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002256}
2257
2258/*
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002259 * Computes the aligned offset to the base tile and adjusts
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002260 * x, y. bytes per pixel is assumed to be a power-of-two.
2261 *
2262 * In the 90/270 rotated case, x and y are assumed
2263 * to be already rotated to match the rotated GTT view, and
2264 * pitch is the tile_height aligned framebuffer height.
Ville Syrjälä6687c902015-09-15 13:16:41 +03002265 *
2266 * This function is used when computing the derived information
2267 * under intel_framebuffer, so using any of that information
2268 * here is not allowed. Anything under drm_framebuffer can be
2269 * used. This is why the user has to pass in the pitch since it
2270 * is specified in the rotated orientation.
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002271 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002272static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2273 int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002274 const struct drm_framebuffer *fb,
2275 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002276 unsigned int pitch,
2277 unsigned int rotation,
2278 u32 alignment)
Daniel Vetterc2c75132012-07-05 12:17:30 +02002279{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002280 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä6687c902015-09-15 13:16:41 +03002281 u32 offset, offset_aligned;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002282
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002283 if (alignment)
2284 alignment--;
2285
Dhinakaran Pandiyan2a11b1b2018-10-26 12:38:04 -07002286 if (!is_surface_linear(fb->modifier, color_plane)) {
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002287 unsigned int tile_size, tile_width, tile_height;
2288 unsigned int tile_rows, tiles, pitch_tiles;
Daniel Vetterc2c75132012-07-05 12:17:30 +02002289
Ville Syrjäläd8433102016-01-12 21:08:35 +02002290 tile_size = intel_tile_size(dev_priv);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002291 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002292
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002293 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002294 pitch_tiles = pitch / tile_height;
2295 swap(tile_width, tile_height);
2296 } else {
2297 pitch_tiles = pitch / (tile_width * cpp);
2298 }
Daniel Vetterc2c75132012-07-05 12:17:30 +02002299
Ville Syrjäläd8433102016-01-12 21:08:35 +02002300 tile_rows = *y / tile_height;
2301 *y %= tile_height;
Chris Wilsonbc752862013-02-21 20:04:31 +00002302
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002303 tiles = *x / tile_width;
2304 *x %= tile_width;
Ville Syrjäläd8433102016-01-12 21:08:35 +02002305
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002306 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2307 offset_aligned = offset & ~alignment;
Chris Wilsonbc752862013-02-21 20:04:31 +00002308
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002309 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2310 tile_size, pitch_tiles,
2311 offset, offset_aligned);
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002312 } else {
Chris Wilsonbc752862013-02-21 20:04:31 +00002313 offset = *y * pitch + *x * cpp;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002314 offset_aligned = offset & ~alignment;
2315
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002316 *y = (offset & alignment) / pitch;
2317 *x = ((offset & alignment) - *y * pitch) / cpp;
Chris Wilsonbc752862013-02-21 20:04:31 +00002318 }
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002319
2320 return offset_aligned;
Daniel Vetterc2c75132012-07-05 12:17:30 +02002321}
2322
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002323static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2324 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002325 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002326{
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03002327 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2328 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
Ville Syrjälä29490562016-01-20 18:02:50 +02002329 const struct drm_framebuffer *fb = state->base.fb;
2330 unsigned int rotation = state->base.rotation;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002331 int pitch = state->color_plane[color_plane].stride;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03002332 u32 alignment;
2333
2334 if (intel_plane->id == PLANE_CURSOR)
2335 alignment = intel_cursor_alignment(dev_priv);
2336 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002337 alignment = intel_surf_alignment(fb, color_plane);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002338
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002339 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002340 pitch, rotation, alignment);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002341}
2342
Ville Syrjälä303ba692017-08-24 22:10:49 +03002343/* Convert the fb->offset[] into x/y offsets */
2344static int intel_fb_offset_to_xy(int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002345 const struct drm_framebuffer *fb,
2346 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002347{
Ville Syrjälä303ba692017-08-24 22:10:49 +03002348 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä70bbe532018-10-23 19:02:01 +03002349 unsigned int height;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002350
Ville Syrjälä303ba692017-08-24 22:10:49 +03002351 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
Ville Syrjälä70bbe532018-10-23 19:02:01 +03002352 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2353 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2354 fb->offsets[color_plane], color_plane);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002355 return -EINVAL;
Ville Syrjälä70bbe532018-10-23 19:02:01 +03002356 }
2357
2358 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2359 height = ALIGN(height, intel_tile_height(fb, color_plane));
2360
2361 /* Catch potential overflows early */
2362 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2363 fb->offsets[color_plane])) {
2364 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2365 fb->offsets[color_plane], fb->pitches[color_plane],
2366 color_plane);
2367 return -ERANGE;
2368 }
Ville Syrjälä303ba692017-08-24 22:10:49 +03002369
2370 *x = 0;
2371 *y = 0;
2372
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002373 intel_adjust_aligned_offset(x, y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002374 fb, color_plane, DRM_MODE_ROTATE_0,
2375 fb->pitches[color_plane],
2376 fb->offsets[color_plane], 0);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002377
2378 return 0;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002379}
2380
Jani Nikulaba3f4d02019-01-18 14:01:23 +02002381static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
Ville Syrjälä72618eb2016-02-04 20:38:20 +02002382{
2383 switch (fb_modifier) {
2384 case I915_FORMAT_MOD_X_TILED:
2385 return I915_TILING_X;
2386 case I915_FORMAT_MOD_Y_TILED:
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002387 case I915_FORMAT_MOD_Y_TILED_CCS:
Ville Syrjälä72618eb2016-02-04 20:38:20 +02002388 return I915_TILING_Y;
2389 default:
2390 return I915_TILING_NONE;
2391 }
2392}
2393
Ville Syrjälä16af25f2018-01-19 16:41:52 +02002394/*
2395 * From the Sky Lake PRM:
2396 * "The Color Control Surface (CCS) contains the compression status of
2397 * the cache-line pairs. The compression state of the cache-line pair
2398 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2399 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2400 * cache-line-pairs. CCS is always Y tiled."
2401 *
2402 * Since cache line pairs refers to horizontally adjacent cache lines,
2403 * each cache line in the CCS corresponds to an area of 32x16 cache
2404 * lines on the main surface. Since each pixel is 4 bytes, this gives
2405 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2406 * main surface.
2407 */
Ville Syrjäläbbfb6ce2017-08-01 09:58:12 -07002408static const struct drm_format_info ccs_formats[] = {
2409 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2410 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2411 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2412 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2413};
2414
2415static const struct drm_format_info *
2416lookup_format_info(const struct drm_format_info formats[],
2417 int num_formats, u32 format)
2418{
2419 int i;
2420
2421 for (i = 0; i < num_formats; i++) {
2422 if (formats[i].format == format)
2423 return &formats[i];
2424 }
2425
2426 return NULL;
2427}
2428
2429static const struct drm_format_info *
2430intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2431{
2432 switch (cmd->modifier[0]) {
2433 case I915_FORMAT_MOD_Y_TILED_CCS:
2434 case I915_FORMAT_MOD_Yf_TILED_CCS:
2435 return lookup_format_info(ccs_formats,
2436 ARRAY_SIZE(ccs_formats),
2437 cmd->pixel_format);
2438 default:
2439 return NULL;
2440 }
2441}
2442
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07002443bool is_ccs_modifier(u64 modifier)
2444{
2445 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2446 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2447}
2448
Ville Syrjälä6687c902015-09-15 13:16:41 +03002449static int
2450intel_fill_fb_info(struct drm_i915_private *dev_priv,
2451 struct drm_framebuffer *fb)
2452{
2453 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2454 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
Daniel Stonea5ff7a42018-05-18 15:30:07 +01002455 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002456 u32 gtt_offset_rotated = 0;
2457 unsigned int max_size = 0;
Ville Syrjäläbcb0b462016-12-14 23:30:22 +02002458 int i, num_planes = fb->format->num_planes;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002459 unsigned int tile_size = intel_tile_size(dev_priv);
2460
2461 for (i = 0; i < num_planes; i++) {
2462 unsigned int width, height;
2463 unsigned int cpp, size;
2464 u32 offset;
2465 int x, y;
Ville Syrjälä303ba692017-08-24 22:10:49 +03002466 int ret;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002467
Ville Syrjälä353c8592016-12-14 23:30:57 +02002468 cpp = fb->format->cpp[i];
Ville Syrjälä145fcb12016-11-18 21:53:06 +02002469 width = drm_framebuffer_plane_width(fb->width, fb, i);
2470 height = drm_framebuffer_plane_height(fb->height, fb, i);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002471
Ville Syrjälä303ba692017-08-24 22:10:49 +03002472 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2473 if (ret) {
2474 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2475 i, fb->offsets[i]);
2476 return ret;
2477 }
Ville Syrjälä6687c902015-09-15 13:16:41 +03002478
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07002479 if (is_ccs_modifier(fb->modifier) && i == 1) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002480 int hsub = fb->format->hsub;
2481 int vsub = fb->format->vsub;
2482 int tile_width, tile_height;
2483 int main_x, main_y;
2484 int ccs_x, ccs_y;
2485
2486 intel_tile_dims(fb, i, &tile_width, &tile_height);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002487 tile_width *= hsub;
2488 tile_height *= vsub;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002489
Ville Syrjälä303ba692017-08-24 22:10:49 +03002490 ccs_x = (x * hsub) % tile_width;
2491 ccs_y = (y * vsub) % tile_height;
2492 main_x = intel_fb->normal[0].x % tile_width;
2493 main_y = intel_fb->normal[0].y % tile_height;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002494
2495 /*
2496 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2497 * x/y offsets must match between CCS and the main surface.
2498 */
2499 if (main_x != ccs_x || main_y != ccs_y) {
2500 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2501 main_x, main_y,
2502 ccs_x, ccs_y,
2503 intel_fb->normal[0].x,
2504 intel_fb->normal[0].y,
2505 x, y);
2506 return -EINVAL;
2507 }
2508 }
2509
Ville Syrjälä6687c902015-09-15 13:16:41 +03002510 /*
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002511 * The fence (if used) is aligned to the start of the object
2512 * so having the framebuffer wrap around across the edge of the
2513 * fenced region doesn't really work. We have no API to configure
2514 * the fence start offset within the object (nor could we probably
2515 * on gen2/3). So it's just easier if we just require that the
2516 * fb layout agrees with the fence layout. We already check that the
2517 * fb stride matches the fence stride elsewhere.
2518 */
Daniel Stonea5ff7a42018-05-18 15:30:07 +01002519 if (i == 0 && i915_gem_object_is_tiled(obj) &&
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002520 (x + width) * cpp > fb->pitches[i]) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +02002521 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2522 i, fb->offsets[i]);
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002523 return -EINVAL;
2524 }
2525
2526 /*
Ville Syrjälä6687c902015-09-15 13:16:41 +03002527 * First pixel of the framebuffer from
2528 * the start of the normal gtt mapping.
2529 */
2530 intel_fb->normal[i].x = x;
2531 intel_fb->normal[i].y = y;
2532
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002533 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2534 fb->pitches[i],
2535 DRM_MODE_ROTATE_0,
2536 tile_size);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002537 offset /= tile_size;
2538
Dhinakaran Pandiyan2a11b1b2018-10-26 12:38:04 -07002539 if (!is_surface_linear(fb->modifier, i)) {
Ville Syrjälä6687c902015-09-15 13:16:41 +03002540 unsigned int tile_width, tile_height;
2541 unsigned int pitch_tiles;
2542 struct drm_rect r;
2543
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002544 intel_tile_dims(fb, i, &tile_width, &tile_height);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002545
2546 rot_info->plane[i].offset = offset;
2547 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2548 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2549 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2550
2551 intel_fb->rotated[i].pitch =
2552 rot_info->plane[i].height * tile_height;
2553
2554 /* how many tiles does this plane need */
2555 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2556 /*
2557 * If the plane isn't horizontally tile aligned,
2558 * we need one more tile.
2559 */
2560 if (x != 0)
2561 size++;
2562
2563 /* rotate the x/y offsets to match the GTT view */
2564 r.x1 = x;
2565 r.y1 = y;
2566 r.x2 = x + width;
2567 r.y2 = y + height;
2568 drm_rect_rotate(&r,
2569 rot_info->plane[i].width * tile_width,
2570 rot_info->plane[i].height * tile_height,
Robert Fossc2c446a2017-05-19 16:50:17 -04002571 DRM_MODE_ROTATE_270);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002572 x = r.x1;
2573 y = r.y1;
2574
2575 /* rotate the tile dimensions to match the GTT view */
2576 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2577 swap(tile_width, tile_height);
2578
2579 /*
2580 * We only keep the x/y offsets, so push all of the
2581 * gtt offset into the x/y offsets.
2582 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002583 intel_adjust_tile_offset(&x, &y,
2584 tile_width, tile_height,
2585 tile_size, pitch_tiles,
2586 gtt_offset_rotated * tile_size, 0);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002587
2588 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2589
2590 /*
2591 * First pixel of the framebuffer from
2592 * the start of the rotated gtt mapping.
2593 */
2594 intel_fb->rotated[i].x = x;
2595 intel_fb->rotated[i].y = y;
2596 } else {
2597 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2598 x * cpp, tile_size);
2599 }
2600
2601 /* how many tiles in total needed in the bo */
2602 max_size = max(max_size, offset + size);
2603 }
2604
Ville Syrjälä4e050472018-09-12 21:04:43 +03002605 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2606 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2607 mul_u32_u32(max_size, tile_size), obj->base.size);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002608 return -EINVAL;
2609 }
2610
2611 return 0;
2612}
2613
Damien Lespiaub35d63f2015-01-20 12:51:50 +00002614static int i9xx_format_to_fourcc(int format)
Jesse Barnes46f297f2014-03-07 08:57:48 -08002615{
2616 switch (format) {
2617 case DISPPLANE_8BPP:
2618 return DRM_FORMAT_C8;
2619 case DISPPLANE_BGRX555:
2620 return DRM_FORMAT_XRGB1555;
2621 case DISPPLANE_BGRX565:
2622 return DRM_FORMAT_RGB565;
2623 default:
2624 case DISPPLANE_BGRX888:
2625 return DRM_FORMAT_XRGB8888;
2626 case DISPPLANE_RGBX888:
2627 return DRM_FORMAT_XBGR8888;
2628 case DISPPLANE_BGRX101010:
2629 return DRM_FORMAT_XRGB2101010;
2630 case DISPPLANE_RGBX101010:
2631 return DRM_FORMAT_XBGR2101010;
2632 }
2633}
2634
Mahesh Kumarddf34312018-04-09 09:11:03 +05302635int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00002636{
2637 switch (format) {
2638 case PLANE_CTL_FORMAT_RGB_565:
2639 return DRM_FORMAT_RGB565;
Mahesh Kumarf34a2912018-04-09 09:11:02 +05302640 case PLANE_CTL_FORMAT_NV12:
2641 return DRM_FORMAT_NV12;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00002642 default:
2643 case PLANE_CTL_FORMAT_XRGB_8888:
2644 if (rgb_order) {
2645 if (alpha)
2646 return DRM_FORMAT_ABGR8888;
2647 else
2648 return DRM_FORMAT_XBGR8888;
2649 } else {
2650 if (alpha)
2651 return DRM_FORMAT_ARGB8888;
2652 else
2653 return DRM_FORMAT_XRGB8888;
2654 }
2655 case PLANE_CTL_FORMAT_XRGB_2101010:
2656 if (rgb_order)
2657 return DRM_FORMAT_XBGR2101010;
2658 else
2659 return DRM_FORMAT_XRGB2101010;
2660 }
2661}
2662
Damien Lespiau5724dbd2015-01-20 12:51:52 +00002663static bool
Daniel Vetterf6936e22015-03-26 12:17:05 +01002664intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2665 struct intel_initial_plane_config *plane_config)
Jesse Barnes46f297f2014-03-07 08:57:48 -08002666{
2667 struct drm_device *dev = crtc->base.dev;
Paulo Zanoni3badb492015-09-23 12:52:23 -03002668 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes46f297f2014-03-07 08:57:48 -08002669 struct drm_i915_gem_object *obj = NULL;
2670 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
Damien Lespiau2d140302015-02-05 17:22:18 +00002671 struct drm_framebuffer *fb = &plane_config->fb->base;
Daniel Vetterf37b5c22015-02-10 23:12:27 +01002672 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2673 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2674 PAGE_SIZE);
2675
2676 size_aligned -= base_aligned;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002677
Chris Wilsonff2652e2014-03-10 08:07:02 +00002678 if (plane_config->size == 0)
2679 return false;
2680
Paulo Zanoni3badb492015-09-23 12:52:23 -03002681 /* If the FB is too big, just don't use it since fbdev is not very
2682 * important and we should probably use that space with FBC or other
2683 * features. */
Matthew Auldb1ace602017-12-11 15:18:21 +00002684 if (size_aligned * 2 > dev_priv->stolen_usable_size)
Paulo Zanoni3badb492015-09-23 12:52:23 -03002685 return false;
2686
Imre Deak914a4fd2018-10-16 19:00:11 +03002687 switch (fb->modifier) {
2688 case DRM_FORMAT_MOD_LINEAR:
2689 case I915_FORMAT_MOD_X_TILED:
2690 case I915_FORMAT_MOD_Y_TILED:
2691 break;
2692 default:
2693 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
2694 fb->modifier);
2695 return false;
2696 }
2697
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +00002698 mutex_lock(&dev->struct_mutex);
Tvrtko Ursulin187685c2016-12-01 14:16:36 +00002699 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
Daniel Vetterf37b5c22015-02-10 23:12:27 +01002700 base_aligned,
2701 base_aligned,
2702 size_aligned);
Chris Wilson24dbf512017-02-15 10:59:18 +00002703 mutex_unlock(&dev->struct_mutex);
2704 if (!obj)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002705 return false;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002706
Imre Deak914a4fd2018-10-16 19:00:11 +03002707 switch (plane_config->tiling) {
2708 case I915_TILING_NONE:
2709 break;
2710 case I915_TILING_X:
2711 case I915_TILING_Y:
2712 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
2713 break;
2714 default:
2715 MISSING_CASE(plane_config->tiling);
2716 return false;
2717 }
Jesse Barnes46f297f2014-03-07 08:57:48 -08002718
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002719 mode_cmd.pixel_format = fb->format->format;
Damien Lespiau6bf129d2015-02-05 17:22:16 +00002720 mode_cmd.width = fb->width;
2721 mode_cmd.height = fb->height;
2722 mode_cmd.pitches[0] = fb->pitches[0];
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002723 mode_cmd.modifier[0] = fb->modifier;
Daniel Vetter18c52472015-02-10 17:16:09 +00002724 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002725
Chris Wilson24dbf512017-02-15 10:59:18 +00002726 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
Jesse Barnes46f297f2014-03-07 08:57:48 -08002727 DRM_DEBUG_KMS("intel fb init failed\n");
2728 goto out_unref_obj;
2729 }
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +00002730
Jesse Barnes484b41d2014-03-07 08:57:55 -08002731
Daniel Vetterf6936e22015-03-26 12:17:05 +01002732 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002733 return true;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002734
2735out_unref_obj:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002736 i915_gem_object_put(obj);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002737 return false;
2738}
2739
Damien Lespiau5724dbd2015-01-20 12:51:52 +00002740static void
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002741intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2742 struct intel_plane_state *plane_state,
2743 bool visible)
2744{
2745 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2746
2747 plane_state->base.visible = visible;
2748
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002749 if (visible)
Ville Syrjälä40560e22018-06-26 22:47:11 +03002750 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002751 else
Ville Syrjälä40560e22018-06-26 22:47:11 +03002752 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002753}
2754
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002755static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2756{
2757 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2758 struct drm_plane *plane;
2759
2760 /*
2761 * Active_planes aliases if multiple "primary" or cursor planes
2762 * have been used on the same (or wrong) pipe. plane_mask uses
2763 * unique ids, hence we can use that to reconstruct active_planes.
2764 */
2765 crtc_state->active_planes = 0;
2766
2767 drm_for_each_plane_mask(plane, &dev_priv->drm,
2768 crtc_state->base.plane_mask)
2769 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2770}
2771
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002772static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2773 struct intel_plane *plane)
2774{
2775 struct intel_crtc_state *crtc_state =
2776 to_intel_crtc_state(crtc->base.state);
2777 struct intel_plane_state *plane_state =
2778 to_intel_plane_state(plane->base.state);
2779
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +03002780 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2781 plane->base.base.id, plane->base.name,
2782 crtc->base.base.id, crtc->base.name);
2783
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002784 intel_set_plane_visible(crtc_state, plane_state, false);
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002785 fixup_active_planes(crtc_state);
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002786
2787 if (plane->id == PLANE_PRIMARY)
2788 intel_pre_disable_primary_noatomic(&crtc->base);
2789
2790 trace_intel_disable_plane(&plane->base, crtc);
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02002791 plane->disable_plane(plane, crtc_state);
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002792}
2793
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002794static void
Daniel Vetterf6936e22015-03-26 12:17:05 +01002795intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2796 struct intel_initial_plane_config *plane_config)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002797{
2798 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002799 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002800 struct drm_crtc *c;
Matt Roper2ff8fde2014-07-08 07:50:07 -07002801 struct drm_i915_gem_object *obj;
Daniel Vetter88595ac2015-03-26 12:42:24 +01002802 struct drm_plane *primary = intel_crtc->base.primary;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002803 struct drm_plane_state *plane_state = primary->state;
Matt Roper200757f2015-12-03 11:37:36 -08002804 struct intel_plane *intel_plane = to_intel_plane(primary);
Matt Roper0a8d8a82015-12-03 11:37:38 -08002805 struct intel_plane_state *intel_state =
2806 to_intel_plane_state(plane_state);
Daniel Vetter88595ac2015-03-26 12:42:24 +01002807 struct drm_framebuffer *fb;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002808
Damien Lespiau2d140302015-02-05 17:22:18 +00002809 if (!plane_config->fb)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002810 return;
2811
Daniel Vetterf6936e22015-03-26 12:17:05 +01002812 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
Daniel Vetter88595ac2015-03-26 12:42:24 +01002813 fb = &plane_config->fb->base;
2814 goto valid_fb;
Damien Lespiauf55548b2015-02-05 18:30:20 +00002815 }
Jesse Barnes484b41d2014-03-07 08:57:55 -08002816
Damien Lespiau2d140302015-02-05 17:22:18 +00002817 kfree(plane_config->fb);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002818
2819 /*
2820 * Failed to alloc the obj, check to see if we should share
2821 * an fb with another CRTC instead
2822 */
Damien Lespiau70e1e0e2014-05-13 23:32:24 +01002823 for_each_crtc(dev, c) {
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002824 struct intel_plane_state *state;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002825
2826 if (c == &intel_crtc->base)
2827 continue;
2828
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002829 if (!to_intel_crtc(c)->active)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002830 continue;
2831
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002832 state = to_intel_plane_state(c->primary->state);
2833 if (!state->vma)
Matt Roper2ff8fde2014-07-08 07:50:07 -07002834 continue;
2835
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002836 if (intel_plane_ggtt_offset(state) == plane_config->base) {
Ville Syrjälä8bc20f62018-03-22 17:22:59 +02002837 fb = state->base.fb;
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302838 drm_framebuffer_get(fb);
Daniel Vetter88595ac2015-03-26 12:42:24 +01002839 goto valid_fb;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002840 }
2841 }
Daniel Vetter88595ac2015-03-26 12:42:24 +01002842
Matt Roper200757f2015-12-03 11:37:36 -08002843 /*
2844 * We've failed to reconstruct the BIOS FB. Current display state
2845 * indicates that the primary plane is visible, but has a NULL FB,
2846 * which will lead to problems later if we don't fix it up. The
2847 * simplest solution is to just disable the primary plane now and
2848 * pretend the BIOS never had it enabled.
2849 */
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002850 intel_plane_disable_noatomic(intel_crtc, intel_plane);
Matt Roper200757f2015-12-03 11:37:36 -08002851
Daniel Vetter88595ac2015-03-26 12:42:24 +01002852 return;
2853
2854valid_fb:
Ville Syrjäläf43348a2018-11-20 15:54:50 +02002855 intel_state->base.rotation = plane_config->rotation;
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002856 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2857 intel_state->base.rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03002858 intel_state->color_plane[0].stride =
2859 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2860
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002861 mutex_lock(&dev->struct_mutex);
2862 intel_state->vma =
Chris Wilson59354852018-02-20 13:42:06 +00002863 intel_pin_and_fence_fb_obj(fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002864 &intel_state->view,
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002865 intel_plane_uses_fence(intel_state),
Chris Wilson59354852018-02-20 13:42:06 +00002866 &intel_state->flags);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002867 mutex_unlock(&dev->struct_mutex);
2868 if (IS_ERR(intel_state->vma)) {
2869 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2870 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2871
2872 intel_state->vma = NULL;
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302873 drm_framebuffer_put(fb);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002874 return;
2875 }
2876
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -08002877 obj = intel_fb_obj(fb);
2878 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2879
Ville Syrjäläf44e2652015-11-13 19:16:13 +02002880 plane_state->src_x = 0;
2881 plane_state->src_y = 0;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002882 plane_state->src_w = fb->width << 16;
2883 plane_state->src_h = fb->height << 16;
2884
Ville Syrjäläf44e2652015-11-13 19:16:13 +02002885 plane_state->crtc_x = 0;
2886 plane_state->crtc_y = 0;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002887 plane_state->crtc_w = fb->width;
2888 plane_state->crtc_h = fb->height;
2889
Rob Clark1638d302016-11-05 11:08:08 -04002890 intel_state->base.src = drm_plane_state_src(plane_state);
2891 intel_state->base.dst = drm_plane_state_dest(plane_state);
Matt Roper0a8d8a82015-12-03 11:37:38 -08002892
Chris Wilson3e510a82016-08-05 10:14:23 +01002893 if (i915_gem_object_is_tiled(obj))
Daniel Vetter88595ac2015-03-26 12:42:24 +01002894 dev_priv->preserve_bios_swizzle = true;
2895
Ville Syrjäläcd30fbc2018-05-25 21:50:40 +03002896 plane_state->fb = fb;
2897 plane_state->crtc = &intel_crtc->base;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002898
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01002899 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2900 &obj->frontbuffer_bits);
Jesse Barnes46f297f2014-03-07 08:57:48 -08002901}
2902
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002903static int skl_max_plane_width(const struct drm_framebuffer *fb,
2904 int color_plane,
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002905 unsigned int rotation)
2906{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002907 int cpp = fb->format->cpp[color_plane];
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002908
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002909 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07002910 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002911 case I915_FORMAT_MOD_X_TILED:
2912 switch (cpp) {
2913 case 8:
2914 return 4096;
2915 case 4:
2916 case 2:
2917 case 1:
2918 return 8192;
2919 default:
2920 MISSING_CASE(cpp);
2921 break;
2922 }
2923 break;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002924 case I915_FORMAT_MOD_Y_TILED_CCS:
2925 case I915_FORMAT_MOD_Yf_TILED_CCS:
2926 /* FIXME AUX plane? */
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002927 case I915_FORMAT_MOD_Y_TILED:
2928 case I915_FORMAT_MOD_Yf_TILED:
2929 switch (cpp) {
2930 case 8:
2931 return 2048;
2932 case 4:
2933 return 4096;
2934 case 2:
2935 case 1:
2936 return 8192;
2937 default:
2938 MISSING_CASE(cpp);
2939 break;
2940 }
2941 break;
2942 default:
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002943 MISSING_CASE(fb->modifier);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002944 }
2945
2946 return 2048;
2947}
2948
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002949static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2950 int main_x, int main_y, u32 main_offset)
2951{
2952 const struct drm_framebuffer *fb = plane_state->base.fb;
2953 int hsub = fb->format->hsub;
2954 int vsub = fb->format->vsub;
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002955 int aux_x = plane_state->color_plane[1].x;
2956 int aux_y = plane_state->color_plane[1].y;
2957 u32 aux_offset = plane_state->color_plane[1].offset;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002958 u32 alignment = intel_surf_alignment(fb, 1);
2959
2960 while (aux_offset >= main_offset && aux_y <= main_y) {
2961 int x, y;
2962
2963 if (aux_x == main_x && aux_y == main_y)
2964 break;
2965
2966 if (aux_offset == 0)
2967 break;
2968
2969 x = aux_x / hsub;
2970 y = aux_y / vsub;
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002971 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
2972 aux_offset, aux_offset - alignment);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002973 aux_x = x * hsub + aux_x % hsub;
2974 aux_y = y * vsub + aux_y % vsub;
2975 }
2976
2977 if (aux_x != main_x || aux_y != main_y)
2978 return false;
2979
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002980 plane_state->color_plane[1].offset = aux_offset;
2981 plane_state->color_plane[1].x = aux_x;
2982 plane_state->color_plane[1].y = aux_y;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002983
2984 return true;
2985}
2986
Ville Syrjälä73266592018-09-07 18:24:11 +03002987static int skl_check_main_surface(struct intel_plane_state *plane_state)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002988{
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002989 const struct drm_framebuffer *fb = plane_state->base.fb;
2990 unsigned int rotation = plane_state->base.rotation;
Daniel Vettercc926382016-08-15 10:41:47 +02002991 int x = plane_state->base.src.x1 >> 16;
2992 int y = plane_state->base.src.y1 >> 16;
2993 int w = drm_rect_width(&plane_state->base.src) >> 16;
2994 int h = drm_rect_height(&plane_state->base.src) >> 16;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002995 int max_width = skl_max_plane_width(fb, 0, rotation);
2996 int max_height = 4096;
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002997 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002998
2999 if (w > max_width || h > max_height) {
3000 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3001 w, h, max_width, max_height);
3002 return -EINVAL;
3003 }
3004
3005 intel_add_fb_offsets(&x, &y, plane_state, 0);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003006 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003007 alignment = intel_surf_alignment(fb, 0);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003008
3009 /*
Ville Syrjälä8d970652016-01-28 16:30:28 +02003010 * AUX surface offset is specified as the distance from the
3011 * main surface offset, and it must be non-negative. Make
3012 * sure that is what we will get.
3013 */
3014 if (offset > aux_offset)
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003015 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3016 offset, aux_offset & ~(alignment - 1));
Ville Syrjälä8d970652016-01-28 16:30:28 +02003017
3018 /*
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003019 * When using an X-tiled surface, the plane blows up
3020 * if the x offset + width exceed the stride.
3021 *
3022 * TODO: linear and Y-tiled seem fine, Yf untested,
3023 */
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003024 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
Ville Syrjälä353c8592016-12-14 23:30:57 +02003025 int cpp = fb->format->cpp[0];
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003026
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003027 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003028 if (offset == 0) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003029 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003030 return -EINVAL;
3031 }
3032
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003033 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3034 offset, offset - alignment);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003035 }
3036 }
3037
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003038 /*
3039 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3040 * they match with the main surface x/y offsets.
3041 */
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07003042 if (is_ccs_modifier(fb->modifier)) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003043 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3044 if (offset == 0)
3045 break;
3046
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003047 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3048 offset, offset - alignment);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003049 }
3050
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003051 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003052 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3053 return -EINVAL;
3054 }
3055 }
3056
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003057 plane_state->color_plane[0].offset = offset;
3058 plane_state->color_plane[0].x = x;
3059 plane_state->color_plane[0].y = y;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003060
3061 return 0;
3062}
3063
Ville Syrjälä8d970652016-01-28 16:30:28 +02003064static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3065{
3066 const struct drm_framebuffer *fb = plane_state->base.fb;
3067 unsigned int rotation = plane_state->base.rotation;
3068 int max_width = skl_max_plane_width(fb, 1, rotation);
3069 int max_height = 4096;
Daniel Vettercc926382016-08-15 10:41:47 +02003070 int x = plane_state->base.src.x1 >> 17;
3071 int y = plane_state->base.src.y1 >> 17;
3072 int w = drm_rect_width(&plane_state->base.src) >> 17;
3073 int h = drm_rect_height(&plane_state->base.src) >> 17;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003074 u32 offset;
3075
3076 intel_add_fb_offsets(&x, &y, plane_state, 1);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003077 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
Ville Syrjälä8d970652016-01-28 16:30:28 +02003078
3079 /* FIXME not quite sure how/if these apply to the chroma plane */
3080 if (w > max_width || h > max_height) {
3081 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3082 w, h, max_width, max_height);
3083 return -EINVAL;
3084 }
3085
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003086 plane_state->color_plane[1].offset = offset;
3087 plane_state->color_plane[1].x = x;
3088 plane_state->color_plane[1].y = y;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003089
3090 return 0;
3091}
3092
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003093static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3094{
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003095 const struct drm_framebuffer *fb = plane_state->base.fb;
3096 int src_x = plane_state->base.src.x1 >> 16;
3097 int src_y = plane_state->base.src.y1 >> 16;
3098 int hsub = fb->format->hsub;
3099 int vsub = fb->format->vsub;
3100 int x = src_x / hsub;
3101 int y = src_y / vsub;
3102 u32 offset;
3103
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003104 intel_add_fb_offsets(&x, &y, plane_state, 1);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003105 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003106
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003107 plane_state->color_plane[1].offset = offset;
3108 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3109 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003110
3111 return 0;
3112}
3113
Ville Syrjälä73266592018-09-07 18:24:11 +03003114int skl_check_plane_surface(struct intel_plane_state *plane_state)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003115{
3116 const struct drm_framebuffer *fb = plane_state->base.fb;
3117 unsigned int rotation = plane_state->base.rotation;
3118 int ret;
3119
Ville Syrjäläf5929c52018-09-07 18:24:06 +03003120 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003121 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3122 plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3123
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003124 ret = intel_plane_check_stride(plane_state);
3125 if (ret)
3126 return ret;
3127
Ville Syrjäläa5e4c7d2016-11-07 22:20:54 +02003128 if (!plane_state->base.visible)
3129 return 0;
3130
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003131 /* Rotate src coordinates to match rotated GTT view */
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03003132 if (drm_rotation_90_or_270(rotation))
Daniel Vettercc926382016-08-15 10:41:47 +02003133 drm_rect_rotate(&plane_state->base.src,
Ville Syrjäläda064b42016-10-24 19:13:04 +03003134 fb->width << 16, fb->height << 16,
Robert Fossc2c446a2017-05-19 16:50:17 -04003135 DRM_MODE_ROTATE_270);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003136
Ville Syrjälä8d970652016-01-28 16:30:28 +02003137 /*
3138 * Handle the AUX surface first since
3139 * the main surface setup depends on it.
3140 */
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003141 if (fb->format->format == DRM_FORMAT_NV12) {
Ville Syrjälä8d970652016-01-28 16:30:28 +02003142 ret = skl_check_nv12_aux_surface(plane_state);
3143 if (ret)
3144 return ret;
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07003145 } else if (is_ccs_modifier(fb->modifier)) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003146 ret = skl_check_ccs_aux_surface(plane_state);
3147 if (ret)
3148 return ret;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003149 } else {
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003150 plane_state->color_plane[1].offset = ~0xfff;
3151 plane_state->color_plane[1].x = 0;
3152 plane_state->color_plane[1].y = 0;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003153 }
3154
Ville Syrjälä73266592018-09-07 18:24:11 +03003155 ret = skl_check_main_surface(plane_state);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003156 if (ret)
3157 return ret;
3158
3159 return 0;
3160}
3161
Ville Syrjäläddd57132018-09-07 18:24:02 +03003162unsigned int
3163i9xx_plane_max_stride(struct intel_plane *plane,
3164 u32 pixel_format, u64 modifier,
3165 unsigned int rotation)
3166{
3167 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3168
3169 if (!HAS_GMCH_DISPLAY(dev_priv)) {
3170 return 32*1024;
3171 } else if (INTEL_GEN(dev_priv) >= 4) {
3172 if (modifier == I915_FORMAT_MOD_X_TILED)
3173 return 16*1024;
3174 else
3175 return 32*1024;
3176 } else if (INTEL_GEN(dev_priv) >= 3) {
3177 if (modifier == I915_FORMAT_MOD_X_TILED)
3178 return 8*1024;
3179 else
3180 return 16*1024;
3181 } else {
3182 if (plane->i9xx_plane == PLANE_C)
3183 return 4*1024;
3184 else
3185 return 8*1024;
3186 }
3187}
3188
Ville Syrjälä7145f602017-03-23 21:27:07 +02003189static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3190 const struct intel_plane_state *plane_state)
Jesse Barnes81255562010-08-02 12:07:50 -07003191{
Ville Syrjälä7145f602017-03-23 21:27:07 +02003192 struct drm_i915_private *dev_priv =
3193 to_i915(plane_state->base.plane->dev);
3194 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3195 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02003196 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003197 u32 dspcntr;
Ville Syrjäläc9ba6fa2014-08-27 17:48:41 +03003198
Ville Syrjälä7145f602017-03-23 21:27:07 +02003199 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003200
Lucas De Marchicf819ef2018-12-12 10:10:43 -08003201 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3202 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
Ville Syrjälä7145f602017-03-23 21:27:07 +02003203 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003204
Ville Syrjälä6a4407a2017-03-23 21:27:08 +02003205 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3206 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003207
Ville Syrjäläc154d1e2018-01-30 22:38:02 +02003208 if (INTEL_GEN(dev_priv) < 5)
Ville Syrjäläd509e282017-03-27 21:55:32 +03003209 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003210
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003211 switch (fb->format->format) {
Ville Syrjälä57779d02012-10-31 17:50:14 +02003212 case DRM_FORMAT_C8:
Jesse Barnes81255562010-08-02 12:07:50 -07003213 dspcntr |= DISPPLANE_8BPP;
3214 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +02003215 case DRM_FORMAT_XRGB1555:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003216 dspcntr |= DISPPLANE_BGRX555;
Jesse Barnes81255562010-08-02 12:07:50 -07003217 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +02003218 case DRM_FORMAT_RGB565:
3219 dspcntr |= DISPPLANE_BGRX565;
3220 break;
3221 case DRM_FORMAT_XRGB8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003222 dspcntr |= DISPPLANE_BGRX888;
3223 break;
3224 case DRM_FORMAT_XBGR8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003225 dspcntr |= DISPPLANE_RGBX888;
3226 break;
3227 case DRM_FORMAT_XRGB2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003228 dspcntr |= DISPPLANE_BGRX101010;
3229 break;
3230 case DRM_FORMAT_XBGR2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003231 dspcntr |= DISPPLANE_RGBX101010;
Jesse Barnes81255562010-08-02 12:07:50 -07003232 break;
3233 default:
Ville Syrjälä7145f602017-03-23 21:27:07 +02003234 MISSING_CASE(fb->format->format);
3235 return 0;
Jesse Barnes81255562010-08-02 12:07:50 -07003236 }
Ville Syrjälä57779d02012-10-31 17:50:14 +02003237
Ville Syrjälä72618eb2016-02-04 20:38:20 +02003238 if (INTEL_GEN(dev_priv) >= 4 &&
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003239 fb->modifier == I915_FORMAT_MOD_X_TILED)
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003240 dspcntr |= DISPPLANE_TILED;
Jesse Barnes81255562010-08-02 12:07:50 -07003241
Robert Fossc2c446a2017-05-19 16:50:17 -04003242 if (rotation & DRM_MODE_ROTATE_180)
Ville Syrjälädf0cd452016-11-14 18:53:59 +02003243 dspcntr |= DISPPLANE_ROTATE_180;
3244
Robert Fossc2c446a2017-05-19 16:50:17 -04003245 if (rotation & DRM_MODE_REFLECT_X)
Ville Syrjälä4ea7be22016-11-14 18:54:00 +02003246 dspcntr |= DISPPLANE_MIRROR;
3247
Ville Syrjälä7145f602017-03-23 21:27:07 +02003248 return dspcntr;
3249}
Ville Syrjäläde1aa622013-06-07 10:47:01 +03003250
Ville Syrjäläf9407ae2017-03-23 21:27:12 +02003251int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003252{
3253 struct drm_i915_private *dev_priv =
3254 to_i915(plane_state->base.plane->dev);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003255 const struct drm_framebuffer *fb = plane_state->base.fb;
3256 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003257 int src_x = plane_state->base.src.x1 >> 16;
3258 int src_y = plane_state->base.src.y1 >> 16;
3259 u32 offset;
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003260 int ret;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003261
Ville Syrjäläf5929c52018-09-07 18:24:06 +03003262 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003263 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3264
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003265 ret = intel_plane_check_stride(plane_state);
3266 if (ret)
3267 return ret;
3268
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003269 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
Jesse Barnes81255562010-08-02 12:07:50 -07003270
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003271 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003272 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3273 plane_state, 0);
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003274 else
3275 offset = 0;
Daniel Vettere506a0c2012-07-05 12:17:29 +02003276
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003277 /* HSW/BDW do this automagically in hardware */
3278 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003279 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3280 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3281
Robert Fossc2c446a2017-05-19 16:50:17 -04003282 if (rotation & DRM_MODE_ROTATE_180) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003283 src_x += src_w - 1;
3284 src_y += src_h - 1;
Robert Fossc2c446a2017-05-19 16:50:17 -04003285 } else if (rotation & DRM_MODE_REFLECT_X) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003286 src_x += src_w - 1;
3287 }
Sonika Jindal48404c12014-08-22 14:06:04 +05303288 }
3289
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003290 plane_state->color_plane[0].offset = offset;
3291 plane_state->color_plane[0].x = src_x;
3292 plane_state->color_plane[0].y = src_y;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003293
3294 return 0;
3295}
3296
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03003297static int
3298i9xx_plane_check(struct intel_crtc_state *crtc_state,
3299 struct intel_plane_state *plane_state)
3300{
3301 int ret;
3302
Ville Syrjälä25721f82018-09-07 18:24:12 +03003303 ret = chv_plane_check_rotation(plane_state);
3304 if (ret)
3305 return ret;
3306
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03003307 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3308 &crtc_state->base,
3309 DRM_PLANE_HELPER_NO_SCALING,
3310 DRM_PLANE_HELPER_NO_SCALING,
3311 false, true);
3312 if (ret)
3313 return ret;
3314
3315 if (!plane_state->base.visible)
3316 return 0;
3317
3318 ret = intel_plane_check_src_coordinates(plane_state);
3319 if (ret)
3320 return ret;
3321
3322 ret = i9xx_check_plane_surface(plane_state);
3323 if (ret)
3324 return ret;
3325
3326 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3327
3328 return 0;
3329}
3330
Ville Syrjäläed150302017-11-17 21:19:10 +02003331static void i9xx_update_plane(struct intel_plane *plane,
3332 const struct intel_crtc_state *crtc_state,
3333 const struct intel_plane_state *plane_state)
Ville Syrjälä7145f602017-03-23 21:27:07 +02003334{
Ville Syrjäläed150302017-11-17 21:19:10 +02003335 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjäläed150302017-11-17 21:19:10 +02003336 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003337 u32 linear_offset;
Ville Syrjäläa0864d52017-03-23 21:27:09 +02003338 u32 dspcntr = plane_state->ctl;
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003339 int x = plane_state->color_plane[0].x;
3340 int y = plane_state->color_plane[0].y;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003341 unsigned long irqflags;
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003342 u32 dspaddr_offset;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003343
Ville Syrjälä29490562016-01-20 18:02:50 +02003344 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
Ville Syrjälä6687c902015-09-15 13:16:41 +03003345
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003346 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003347 dspaddr_offset = plane_state->color_plane[0].offset;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003348 else
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003349 dspaddr_offset = linear_offset;
Ville Syrjälä6687c902015-09-15 13:16:41 +03003350
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003351 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3352
Ville Syrjälä83234d12018-11-14 23:07:17 +02003353 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3354
Ville Syrjälä78587de2017-03-09 17:44:32 +02003355 if (INTEL_GEN(dev_priv) < 4) {
3356 /* pipesrc and dspsize control the size that is scaled from,
3357 * which should always be the user's requested size.
3358 */
Ville Syrjälä83234d12018-11-14 23:07:17 +02003359 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
Ville Syrjäläed150302017-11-17 21:19:10 +02003360 I915_WRITE_FW(DSPSIZE(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003361 ((crtc_state->pipe_src_h - 1) << 16) |
3362 (crtc_state->pipe_src_w - 1));
Ville Syrjäläed150302017-11-17 21:19:10 +02003363 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
Ville Syrjälä83234d12018-11-14 23:07:17 +02003364 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
Ville Syrjäläed150302017-11-17 21:19:10 +02003365 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003366 ((crtc_state->pipe_src_h - 1) << 16) |
3367 (crtc_state->pipe_src_w - 1));
Ville Syrjäläed150302017-11-17 21:19:10 +02003368 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
Ville Syrjälä78587de2017-03-09 17:44:32 +02003369 }
3370
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003371 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjäläed150302017-11-17 21:19:10 +02003372 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003373 } else if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä83234d12018-11-14 23:07:17 +02003374 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3375 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3376 }
3377
3378 /*
3379 * The control register self-arms if the plane was previously
3380 * disabled. Try to make the plane enable atomic by writing
3381 * the control register just before the surface register.
3382 */
3383 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3384 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjäläed150302017-11-17 21:19:10 +02003385 I915_WRITE_FW(DSPSURF(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003386 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003387 dspaddr_offset);
Ville Syrjälä83234d12018-11-14 23:07:17 +02003388 else
Ville Syrjäläed150302017-11-17 21:19:10 +02003389 I915_WRITE_FW(DSPADDR(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003390 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003391 dspaddr_offset);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003392
3393 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Jesse Barnes17638cd2011-06-24 12:19:23 -07003394}
3395
Ville Syrjäläed150302017-11-17 21:19:10 +02003396static void i9xx_disable_plane(struct intel_plane *plane,
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02003397 const struct intel_crtc_state *crtc_state)
Jesse Barnes17638cd2011-06-24 12:19:23 -07003398{
Ville Syrjäläed150302017-11-17 21:19:10 +02003399 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3400 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003401 unsigned long irqflags;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003402
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003403 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3404
Ville Syrjäläed150302017-11-17 21:19:10 +02003405 I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3406 if (INTEL_GEN(dev_priv) >= 4)
3407 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003408 else
Ville Syrjäläed150302017-11-17 21:19:10 +02003409 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003410
3411 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003412}
3413
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003414static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3415 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003416{
Ville Syrjäläed150302017-11-17 21:19:10 +02003417 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003418 enum intel_display_power_domain power_domain;
Ville Syrjäläed150302017-11-17 21:19:10 +02003419 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Chris Wilson0e6e0be2019-01-14 14:21:24 +00003420 intel_wakeref_t wakeref;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003421 bool ret;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003422 u32 val;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003423
3424 /*
3425 * Not 100% correct for planes that can move between pipes,
3426 * but that's only the case for gen2-4 which don't have any
3427 * display power wells.
3428 */
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003429 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +00003430 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3431 if (!wakeref)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003432 return false;
3433
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003434 val = I915_READ(DSPCNTR(i9xx_plane));
3435
3436 ret = val & DISPLAY_PLANE_ENABLE;
3437
3438 if (INTEL_GEN(dev_priv) >= 5)
3439 *pipe = plane->pipe;
3440 else
3441 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3442 DISPPLANE_SEL_PIPE_SHIFT;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003443
Chris Wilson0e6e0be2019-01-14 14:21:24 +00003444 intel_display_power_put(dev_priv, power_domain, wakeref);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003445
3446 return ret;
3447}
3448
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003449static u32
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003450intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
Damien Lespiaub3218032015-02-27 11:15:18 +00003451{
Ben Widawsky2f075562017-03-24 14:29:48 -07003452 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
Ville Syrjälä7b49f942016-01-12 21:08:32 +02003453 return 64;
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003454 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003455 return intel_tile_width_bytes(fb, color_plane);
Damien Lespiaub3218032015-02-27 11:15:18 +00003456}
3457
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003458static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3459{
3460 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003461 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003462
3463 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3464 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3465 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003466}
3467
Chandra Kondurua1b22782015-04-07 15:28:45 -07003468/*
3469 * This function detaches (aka. unbinds) unused scalers in hardware
3470 */
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003471static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
Chandra Kondurua1b22782015-04-07 15:28:45 -07003472{
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003473 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3474 const struct intel_crtc_scaler_state *scaler_state =
3475 &crtc_state->scaler_state;
Chandra Kondurua1b22782015-04-07 15:28:45 -07003476 int i;
3477
Chandra Kondurua1b22782015-04-07 15:28:45 -07003478 /* loop through and disable scalers that aren't in use */
3479 for (i = 0; i < intel_crtc->num_scalers; i++) {
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003480 if (!scaler_state->scalers[i].in_use)
3481 skl_detach_scaler(intel_crtc, i);
Chandra Kondurua1b22782015-04-07 15:28:45 -07003482 }
3483}
3484
Ville Syrjäläb3cf5c02018-09-25 22:37:08 +03003485static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3486 int color_plane, unsigned int rotation)
3487{
3488 /*
3489 * The stride is either expressed as a multiple of 64 bytes chunks for
3490 * linear buffers or in number of tiles for tiled buffers.
3491 */
3492 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3493 return 64;
3494 else if (drm_rotation_90_or_270(rotation))
3495 return intel_tile_height(fb, color_plane);
3496 else
3497 return intel_tile_width_bytes(fb, color_plane);
3498}
3499
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003500u32 skl_plane_stride(const struct intel_plane_state *plane_state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003501 int color_plane)
Ville Syrjäläd2196772016-01-28 18:33:11 +02003502{
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003503 const struct drm_framebuffer *fb = plane_state->base.fb;
3504 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003505 u32 stride = plane_state->color_plane[color_plane].stride;
Ville Syrjälä1b500532017-03-07 21:42:08 +02003506
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003507 if (color_plane >= fb->format->num_planes)
Ville Syrjälä1b500532017-03-07 21:42:08 +02003508 return 0;
3509
Ville Syrjäläb3cf5c02018-09-25 22:37:08 +03003510 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
Ville Syrjäläd2196772016-01-28 18:33:11 +02003511}
3512
Jani Nikulaba3f4d02019-01-18 14:01:23 +02003513static u32 skl_plane_ctl_format(u32 pixel_format)
Chandra Konduru6156a452015-04-27 13:48:39 -07003514{
Chandra Konduru6156a452015-04-27 13:48:39 -07003515 switch (pixel_format) {
Damien Lespiaud161cf72015-05-12 16:13:17 +01003516 case DRM_FORMAT_C8:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003517 return PLANE_CTL_FORMAT_INDEXED;
Chandra Konduru6156a452015-04-27 13:48:39 -07003518 case DRM_FORMAT_RGB565:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003519 return PLANE_CTL_FORMAT_RGB_565;
Chandra Konduru6156a452015-04-27 13:48:39 -07003520 case DRM_FORMAT_XBGR8888:
James Ausmus4036c782017-11-13 10:11:28 -08003521 case DRM_FORMAT_ABGR8888:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003522 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
Chandra Konduru6156a452015-04-27 13:48:39 -07003523 case DRM_FORMAT_XRGB8888:
Chandra Konduru6156a452015-04-27 13:48:39 -07003524 case DRM_FORMAT_ARGB8888:
James Ausmus4036c782017-11-13 10:11:28 -08003525 return PLANE_CTL_FORMAT_XRGB_8888;
Chandra Konduru6156a452015-04-27 13:48:39 -07003526 case DRM_FORMAT_XRGB2101010:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003527 return PLANE_CTL_FORMAT_XRGB_2101010;
Chandra Konduru6156a452015-04-27 13:48:39 -07003528 case DRM_FORMAT_XBGR2101010:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003529 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
Chandra Konduru6156a452015-04-27 13:48:39 -07003530 case DRM_FORMAT_YUYV:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003531 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
Chandra Konduru6156a452015-04-27 13:48:39 -07003532 case DRM_FORMAT_YVYU:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003533 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
Chandra Konduru6156a452015-04-27 13:48:39 -07003534 case DRM_FORMAT_UYVY:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003535 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
Chandra Konduru6156a452015-04-27 13:48:39 -07003536 case DRM_FORMAT_VYUY:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003537 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
Chandra Konduru77224cd2018-04-09 09:11:13 +05303538 case DRM_FORMAT_NV12:
3539 return PLANE_CTL_FORMAT_NV12;
Chandra Konduru6156a452015-04-27 13:48:39 -07003540 default:
Damien Lespiau4249eee2015-05-12 16:13:16 +01003541 MISSING_CASE(pixel_format);
Chandra Konduru6156a452015-04-27 13:48:39 -07003542 }
Damien Lespiau8cfcba42015-05-12 16:13:14 +01003543
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003544 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003545}
3546
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003547static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
James Ausmus4036c782017-11-13 10:11:28 -08003548{
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003549 if (!plane_state->base.fb->format->has_alpha)
3550 return PLANE_CTL_ALPHA_DISABLE;
3551
3552 switch (plane_state->base.pixel_blend_mode) {
3553 case DRM_MODE_BLEND_PIXEL_NONE:
3554 return PLANE_CTL_ALPHA_DISABLE;
3555 case DRM_MODE_BLEND_PREMULTI:
James Ausmus4036c782017-11-13 10:11:28 -08003556 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003557 case DRM_MODE_BLEND_COVERAGE:
3558 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
James Ausmus4036c782017-11-13 10:11:28 -08003559 default:
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003560 MISSING_CASE(plane_state->base.pixel_blend_mode);
James Ausmus4036c782017-11-13 10:11:28 -08003561 return PLANE_CTL_ALPHA_DISABLE;
3562 }
3563}
3564
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003565static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
James Ausmus4036c782017-11-13 10:11:28 -08003566{
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003567 if (!plane_state->base.fb->format->has_alpha)
3568 return PLANE_COLOR_ALPHA_DISABLE;
3569
3570 switch (plane_state->base.pixel_blend_mode) {
3571 case DRM_MODE_BLEND_PIXEL_NONE:
3572 return PLANE_COLOR_ALPHA_DISABLE;
3573 case DRM_MODE_BLEND_PREMULTI:
James Ausmus4036c782017-11-13 10:11:28 -08003574 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003575 case DRM_MODE_BLEND_COVERAGE:
3576 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
James Ausmus4036c782017-11-13 10:11:28 -08003577 default:
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003578 MISSING_CASE(plane_state->base.pixel_blend_mode);
James Ausmus4036c782017-11-13 10:11:28 -08003579 return PLANE_COLOR_ALPHA_DISABLE;
3580 }
3581}
3582
Jani Nikulaba3f4d02019-01-18 14:01:23 +02003583static u32 skl_plane_ctl_tiling(u64 fb_modifier)
Chandra Konduru6156a452015-04-27 13:48:39 -07003584{
Chandra Konduru6156a452015-04-27 13:48:39 -07003585 switch (fb_modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07003586 case DRM_FORMAT_MOD_LINEAR:
Chandra Konduru6156a452015-04-27 13:48:39 -07003587 break;
3588 case I915_FORMAT_MOD_X_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003589 return PLANE_CTL_TILED_X;
Chandra Konduru6156a452015-04-27 13:48:39 -07003590 case I915_FORMAT_MOD_Y_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003591 return PLANE_CTL_TILED_Y;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003592 case I915_FORMAT_MOD_Y_TILED_CCS:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07003593 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
Chandra Konduru6156a452015-04-27 13:48:39 -07003594 case I915_FORMAT_MOD_Yf_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003595 return PLANE_CTL_TILED_YF;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003596 case I915_FORMAT_MOD_Yf_TILED_CCS:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07003597 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
Chandra Konduru6156a452015-04-27 13:48:39 -07003598 default:
3599 MISSING_CASE(fb_modifier);
3600 }
Damien Lespiau8cfcba42015-05-12 16:13:14 +01003601
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003602 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003603}
3604
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003605static u32 skl_plane_ctl_rotate(unsigned int rotate)
Chandra Konduru6156a452015-04-27 13:48:39 -07003606{
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003607 switch (rotate) {
Robert Fossc2c446a2017-05-19 16:50:17 -04003608 case DRM_MODE_ROTATE_0:
Chandra Konduru6156a452015-04-27 13:48:39 -07003609 break;
Sonika Jindal1e8df162015-05-20 13:40:48 +05303610 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003611 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
Sonika Jindal1e8df162015-05-20 13:40:48 +05303612 * while i915 HW rotation is clockwise, thats why this swapping.
3613 */
Robert Fossc2c446a2017-05-19 16:50:17 -04003614 case DRM_MODE_ROTATE_90:
Sonika Jindal1e8df162015-05-20 13:40:48 +05303615 return PLANE_CTL_ROTATE_270;
Robert Fossc2c446a2017-05-19 16:50:17 -04003616 case DRM_MODE_ROTATE_180:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003617 return PLANE_CTL_ROTATE_180;
Robert Fossc2c446a2017-05-19 16:50:17 -04003618 case DRM_MODE_ROTATE_270:
Sonika Jindal1e8df162015-05-20 13:40:48 +05303619 return PLANE_CTL_ROTATE_90;
Chandra Konduru6156a452015-04-27 13:48:39 -07003620 default:
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003621 MISSING_CASE(rotate);
3622 }
3623
3624 return 0;
3625}
3626
3627static u32 cnl_plane_ctl_flip(unsigned int reflect)
3628{
3629 switch (reflect) {
3630 case 0:
3631 break;
3632 case DRM_MODE_REFLECT_X:
3633 return PLANE_CTL_FLIP_HORIZONTAL;
3634 case DRM_MODE_REFLECT_Y:
3635 default:
3636 MISSING_CASE(reflect);
Chandra Konduru6156a452015-04-27 13:48:39 -07003637 }
3638
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003639 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003640}
3641
Ville Syrjälä2e881262017-03-17 23:17:56 +02003642u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3643 const struct intel_plane_state *plane_state)
Damien Lespiau70d21f02013-07-03 21:06:04 +01003644{
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003645 struct drm_i915_private *dev_priv =
3646 to_i915(plane_state->base.plane->dev);
3647 const struct drm_framebuffer *fb = plane_state->base.fb;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003648 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä2e881262017-03-17 23:17:56 +02003649 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003650 u32 plane_ctl;
Damien Lespiau70d21f02013-07-03 21:06:04 +01003651
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003652 plane_ctl = PLANE_CTL_ENABLE;
3653
James Ausmus4036c782017-11-13 10:11:28 -08003654 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003655 plane_ctl |= skl_plane_ctl_alpha(plane_state);
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003656 plane_ctl |=
3657 PLANE_CTL_PIPE_GAMMA_ENABLE |
3658 PLANE_CTL_PIPE_CSC_ENABLE |
3659 PLANE_CTL_PLANE_GAMMA_DISABLE;
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003660
3661 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3662 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
Ville Syrjäläc8624ed2018-02-14 21:23:27 +02003663
3664 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3665 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003666 }
Damien Lespiau70d21f02013-07-03 21:06:04 +01003667
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003668 plane_ctl |= skl_plane_ctl_format(fb->format->format);
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003669 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003670 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3671
3672 if (INTEL_GEN(dev_priv) >= 10)
3673 plane_ctl |= cnl_plane_ctl_flip(rotation &
3674 DRM_MODE_REFLECT_MASK);
Damien Lespiau70d21f02013-07-03 21:06:04 +01003675
Ville Syrjälä2e881262017-03-17 23:17:56 +02003676 if (key->flags & I915_SET_COLORKEY_DESTINATION)
3677 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3678 else if (key->flags & I915_SET_COLORKEY_SOURCE)
3679 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3680
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003681 return plane_ctl;
3682}
3683
James Ausmus4036c782017-11-13 10:11:28 -08003684u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3685 const struct intel_plane_state *plane_state)
3686{
James Ausmus077ef1f2018-03-28 14:57:56 -07003687 struct drm_i915_private *dev_priv =
3688 to_i915(plane_state->base.plane->dev);
James Ausmus4036c782017-11-13 10:11:28 -08003689 const struct drm_framebuffer *fb = plane_state->base.fb;
Uma Shankarbfe60a02018-11-02 00:40:20 +05303690 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
James Ausmus4036c782017-11-13 10:11:28 -08003691 u32 plane_color_ctl = 0;
3692
James Ausmus077ef1f2018-03-28 14:57:56 -07003693 if (INTEL_GEN(dev_priv) < 11) {
3694 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3695 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3696 }
James Ausmus4036c782017-11-13 10:11:28 -08003697 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003698 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
James Ausmus4036c782017-11-13 10:11:28 -08003699
Uma Shankarbfe60a02018-11-02 00:40:20 +05303700 if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003701 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3702 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3703 else
3704 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
Ville Syrjäläc8624ed2018-02-14 21:23:27 +02003705
3706 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3707 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
Uma Shankarbfe60a02018-11-02 00:40:20 +05303708 } else if (fb->format->is_yuv) {
3709 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003710 }
Ville Syrjälä012d79e2018-05-21 21:56:12 +03003711
James Ausmus4036c782017-11-13 10:11:28 -08003712 return plane_color_ctl;
3713}
3714
Maarten Lankhorst73974892016-08-05 23:28:27 +03003715static int
3716__intel_display_resume(struct drm_device *dev,
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003717 struct drm_atomic_state *state,
3718 struct drm_modeset_acquire_ctx *ctx)
Maarten Lankhorst73974892016-08-05 23:28:27 +03003719{
3720 struct drm_crtc_state *crtc_state;
3721 struct drm_crtc *crtc;
3722 int i, ret;
3723
Ville Syrjäläaecd36b2017-06-01 17:36:13 +03003724 intel_modeset_setup_hw_state(dev, ctx);
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +00003725 i915_redisable_vga(to_i915(dev));
Maarten Lankhorst73974892016-08-05 23:28:27 +03003726
3727 if (!state)
3728 return 0;
3729
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01003730 /*
3731 * We've duplicated the state, pointers to the old state are invalid.
3732 *
3733 * Don't attempt to use the old state until we commit the duplicated state.
3734 */
3735 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst73974892016-08-05 23:28:27 +03003736 /*
3737 * Force recalculation even if we restore
3738 * current state. With fast modeset this may not result
3739 * in a modeset when the state is compatible.
3740 */
3741 crtc_state->mode_changed = true;
3742 }
3743
3744 /* ignore any reset values/BIOS leftovers in the WM registers */
Ville Syrjälä602ae832017-03-02 19:15:02 +02003745 if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3746 to_intel_atomic_state(state)->skip_intermediate_wm = true;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003747
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003748 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003749
3750 WARN_ON(ret == -EDEADLK);
3751 return ret;
3752}
3753
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003754static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3755{
Chris Wilson55277e12019-01-03 11:21:04 +00003756 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
3757 intel_has_gpu_reset(dev_priv));
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003758}
3759
Chris Wilsonc0336662016-05-06 15:40:21 +01003760void intel_prepare_reset(struct drm_i915_private *dev_priv)
Ville Syrjälä75147472014-11-24 18:28:11 +02003761{
Maarten Lankhorst73974892016-08-05 23:28:27 +03003762 struct drm_device *dev = &dev_priv->drm;
3763 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3764 struct drm_atomic_state *state;
3765 int ret;
3766
Daniel Vetterce87ea12017-07-19 14:54:55 +02003767 /* reset doesn't touch the display */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00003768 if (!i915_modparams.force_reset_modeset_test &&
Daniel Vetterce87ea12017-07-19 14:54:55 +02003769 !gpu_reset_clobbers_display(dev_priv))
3770 return;
3771
Daniel Vetter9db529a2017-08-08 10:08:28 +02003772 /* We have a modeset vs reset deadlock, defensively unbreak it. */
3773 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3774 wake_up_all(&dev_priv->gpu_error.wait_queue);
3775
3776 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3777 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3778 i915_gem_set_wedged(dev_priv);
3779 }
Daniel Vetter97154ec2017-08-08 10:08:26 +02003780
Maarten Lankhorst73974892016-08-05 23:28:27 +03003781 /*
3782 * Need mode_config.mutex so that we don't
3783 * trample ongoing ->detect() and whatnot.
3784 */
3785 mutex_lock(&dev->mode_config.mutex);
3786 drm_modeset_acquire_init(ctx, 0);
3787 while (1) {
3788 ret = drm_modeset_lock_all_ctx(dev, ctx);
3789 if (ret != -EDEADLK)
3790 break;
3791
3792 drm_modeset_backoff(ctx);
3793 }
Ville Syrjäläf98ce922014-11-21 21:54:30 +02003794 /*
3795 * Disabling the crtcs gracefully seems nicer. Also the
3796 * g33 docs say we should at least disable all the planes.
3797 */
Maarten Lankhorst73974892016-08-05 23:28:27 +03003798 state = drm_atomic_helper_duplicate_state(dev, ctx);
3799 if (IS_ERR(state)) {
3800 ret = PTR_ERR(state);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003801 DRM_ERROR("Duplicating state failed with %i\n", ret);
Ander Conselvan de Oliveira1e5a15d2017-01-18 14:34:28 +02003802 return;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003803 }
3804
3805 ret = drm_atomic_helper_disable_all(dev, ctx);
3806 if (ret) {
3807 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
Ander Conselvan de Oliveira1e5a15d2017-01-18 14:34:28 +02003808 drm_atomic_state_put(state);
3809 return;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003810 }
3811
3812 dev_priv->modeset_restore_state = state;
3813 state->acquire_ctx = ctx;
Ville Syrjälä75147472014-11-24 18:28:11 +02003814}
3815
Chris Wilsonc0336662016-05-06 15:40:21 +01003816void intel_finish_reset(struct drm_i915_private *dev_priv)
Ville Syrjälä75147472014-11-24 18:28:11 +02003817{
Maarten Lankhorst73974892016-08-05 23:28:27 +03003818 struct drm_device *dev = &dev_priv->drm;
3819 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
Chris Wilson40da1d32018-04-05 13:37:14 +01003820 struct drm_atomic_state *state;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003821 int ret;
3822
Daniel Vetterce87ea12017-07-19 14:54:55 +02003823 /* reset doesn't touch the display */
Chris Wilson40da1d32018-04-05 13:37:14 +01003824 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
Daniel Vetterce87ea12017-07-19 14:54:55 +02003825 return;
3826
Chris Wilson40da1d32018-04-05 13:37:14 +01003827 state = fetch_and_zero(&dev_priv->modeset_restore_state);
Daniel Vetterce87ea12017-07-19 14:54:55 +02003828 if (!state)
3829 goto unlock;
3830
Ville Syrjälä75147472014-11-24 18:28:11 +02003831 /* reset doesn't touch the display */
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003832 if (!gpu_reset_clobbers_display(dev_priv)) {
Daniel Vetterce87ea12017-07-19 14:54:55 +02003833 /* for testing only restore the display */
3834 ret = __intel_display_resume(dev, state, ctx);
Chris Wilson942d5d02017-08-28 11:46:04 +01003835 if (ret)
3836 DRM_ERROR("Restoring old state failed with %i\n", ret);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003837 } else {
3838 /*
3839 * The display has been reset as well,
3840 * so need a full re-initialization.
3841 */
3842 intel_runtime_pm_disable_interrupts(dev_priv);
3843 intel_runtime_pm_enable_interrupts(dev_priv);
3844
Imre Deak51f59202016-09-14 13:04:13 +03003845 intel_pps_unlock_regs_wa(dev_priv);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003846 intel_modeset_init_hw(dev);
Ville Syrjäläf72b84c2017-11-08 15:35:55 +02003847 intel_init_clock_gating(dev_priv);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003848
3849 spin_lock_irq(&dev_priv->irq_lock);
3850 if (dev_priv->display.hpd_irq_setup)
3851 dev_priv->display.hpd_irq_setup(dev_priv);
3852 spin_unlock_irq(&dev_priv->irq_lock);
3853
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003854 ret = __intel_display_resume(dev, state, ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003855 if (ret)
3856 DRM_ERROR("Restoring old state failed with %i\n", ret);
3857
3858 intel_hpd_init(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02003859 }
3860
Daniel Vetterce87ea12017-07-19 14:54:55 +02003861 drm_atomic_state_put(state);
3862unlock:
Maarten Lankhorst73974892016-08-05 23:28:27 +03003863 drm_modeset_drop_locks(ctx);
3864 drm_modeset_acquire_fini(ctx);
3865 mutex_unlock(&dev->mode_config.mutex);
Daniel Vetter9db529a2017-08-08 10:08:28 +02003866
3867 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
Ville Syrjälä75147472014-11-24 18:28:11 +02003868}
3869
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003870static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3871 const struct intel_crtc_state *new_crtc_state)
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003872{
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003873 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003874 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003875
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003876 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003877 crtc->base.mode = new_crtc_state->base.mode;
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003878
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003879 /*
3880 * Update pipe size and adjust fitter if needed: the reason for this is
3881 * that in compute_mode_changes we check the native mode (not the pfit
3882 * mode) to see if we can flip rather than do a full mode set. In the
3883 * fastboot case, we'll flip, but if we don't update the pipesrc and
3884 * pfit state, we'll end up with a big fb scanned out into the wrong
3885 * sized surface.
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003886 */
3887
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003888 I915_WRITE(PIPESRC(crtc->pipe),
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003889 ((new_crtc_state->pipe_src_w - 1) << 16) |
3890 (new_crtc_state->pipe_src_h - 1));
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003891
3892 /* on skylake this is done by detaching scalers */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003893 if (INTEL_GEN(dev_priv) >= 9) {
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003894 skl_detach_scalers(new_crtc_state);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003895
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003896 if (new_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003897 skylake_pfit_enable(new_crtc_state);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003898 } else if (HAS_PCH_SPLIT(dev_priv)) {
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003899 if (new_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003900 ironlake_pfit_enable(new_crtc_state);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003901 else if (old_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003902 ironlake_pfit_disable(old_crtc_state);
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003903 }
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003904}
3905
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003906static void intel_fdi_normal_train(struct intel_crtc *crtc)
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003907{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003908 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003909 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003910 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003911 i915_reg_t reg;
3912 u32 temp;
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003913
3914 /* enable normal train */
3915 reg = FDI_TX_CTL(pipe);
3916 temp = I915_READ(reg);
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01003917 if (IS_IVYBRIDGE(dev_priv)) {
Jesse Barnes357555c2011-04-28 15:09:55 -07003918 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3919 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
Keith Packard61e499b2011-05-17 16:13:52 -07003920 } else {
3921 temp &= ~FDI_LINK_TRAIN_NONE;
3922 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
Jesse Barnes357555c2011-04-28 15:09:55 -07003923 }
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003924 I915_WRITE(reg, temp);
3925
3926 reg = FDI_RX_CTL(pipe);
3927 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003928 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003929 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3930 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3931 } else {
3932 temp &= ~FDI_LINK_TRAIN_NONE;
3933 temp |= FDI_LINK_TRAIN_NONE;
3934 }
3935 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3936
3937 /* wait one idle pattern time */
3938 POSTING_READ(reg);
3939 udelay(1000);
Jesse Barnes357555c2011-04-28 15:09:55 -07003940
3941 /* IVB wants error correction enabled */
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01003942 if (IS_IVYBRIDGE(dev_priv))
Jesse Barnes357555c2011-04-28 15:09:55 -07003943 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3944 FDI_FE_ERRC_ENABLE);
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003945}
3946
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003947/* The FDI link training functions for ILK/Ibexpeak. */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02003948static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3949 const struct intel_crtc_state *crtc_state)
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003950{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003951 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003952 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003953 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003954 i915_reg_t reg;
3955 u32 temp, tries;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003956
Ville Syrjälä1c8562f2014-04-25 22:12:07 +03003957 /* FDI needs bits from pipe first */
Jesse Barnes0fc932b2011-01-04 15:09:37 -08003958 assert_pipe_enabled(dev_priv, pipe);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08003959
Adam Jacksone1a44742010-06-25 15:32:14 -04003960 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3961 for train result */
Chris Wilson5eddb702010-09-11 13:48:45 +01003962 reg = FDI_RX_IMR(pipe);
3963 temp = I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04003964 temp &= ~FDI_RX_SYMBOL_LOCK;
3965 temp &= ~FDI_RX_BIT_LOCK;
Chris Wilson5eddb702010-09-11 13:48:45 +01003966 I915_WRITE(reg, temp);
3967 I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04003968 udelay(150);
3969
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003970 /* enable CPU FDI TX and PCH FDI RX */
Chris Wilson5eddb702010-09-11 13:48:45 +01003971 reg = FDI_TX_CTL(pipe);
3972 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02003973 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02003974 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003975 temp &= ~FDI_LINK_TRAIN_NONE;
3976 temp |= FDI_LINK_TRAIN_PATTERN_1;
Chris Wilson5eddb702010-09-11 13:48:45 +01003977 I915_WRITE(reg, temp | FDI_TX_ENABLE);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003978
Chris Wilson5eddb702010-09-11 13:48:45 +01003979 reg = FDI_RX_CTL(pipe);
3980 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003981 temp &= ~FDI_LINK_TRAIN_NONE;
3982 temp |= FDI_LINK_TRAIN_PATTERN_1;
Chris Wilson5eddb702010-09-11 13:48:45 +01003983 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3984
3985 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003986 udelay(150);
3987
Jesse Barnes5b2adf82010-10-07 16:01:15 -07003988 /* Ironlake workaround, enable clock pointer after FDI enable*/
Daniel Vetter8f5718a2012-10-31 22:52:28 +01003989 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3990 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3991 FDI_RX_PHASE_SYNC_POINTER_EN);
Jesse Barnes5b2adf82010-10-07 16:01:15 -07003992
Chris Wilson5eddb702010-09-11 13:48:45 +01003993 reg = FDI_RX_IIR(pipe);
Adam Jacksone1a44742010-06-25 15:32:14 -04003994 for (tries = 0; tries < 5; tries++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01003995 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003996 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3997
3998 if ((temp & FDI_RX_BIT_LOCK)) {
3999 DRM_DEBUG_KMS("FDI train 1 done.\n");
Chris Wilson5eddb702010-09-11 13:48:45 +01004000 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004001 break;
4002 }
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004003 }
Adam Jacksone1a44742010-06-25 15:32:14 -04004004 if (tries == 5)
Chris Wilson5eddb702010-09-11 13:48:45 +01004005 DRM_ERROR("FDI train 1 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004006
4007 /* Train 2 */
Chris Wilson5eddb702010-09-11 13:48:45 +01004008 reg = FDI_TX_CTL(pipe);
4009 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004010 temp &= ~FDI_LINK_TRAIN_NONE;
4011 temp |= FDI_LINK_TRAIN_PATTERN_2;
Chris Wilson5eddb702010-09-11 13:48:45 +01004012 I915_WRITE(reg, temp);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004013
Chris Wilson5eddb702010-09-11 13:48:45 +01004014 reg = FDI_RX_CTL(pipe);
4015 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004016 temp &= ~FDI_LINK_TRAIN_NONE;
4017 temp |= FDI_LINK_TRAIN_PATTERN_2;
Chris Wilson5eddb702010-09-11 13:48:45 +01004018 I915_WRITE(reg, temp);
4019
4020 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004021 udelay(150);
4022
Chris Wilson5eddb702010-09-11 13:48:45 +01004023 reg = FDI_RX_IIR(pipe);
Adam Jacksone1a44742010-06-25 15:32:14 -04004024 for (tries = 0; tries < 5; tries++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004025 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004026 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4027
4028 if (temp & FDI_RX_SYMBOL_LOCK) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004029 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004030 DRM_DEBUG_KMS("FDI train 2 done.\n");
4031 break;
4032 }
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004033 }
Adam Jacksone1a44742010-06-25 15:32:14 -04004034 if (tries == 5)
Chris Wilson5eddb702010-09-11 13:48:45 +01004035 DRM_ERROR("FDI train 2 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004036
4037 DRM_DEBUG_KMS("FDI train done\n");
Jesse Barnes5c5313c2010-10-07 16:01:11 -07004038
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004039}
4040
Akshay Joshi0206e352011-08-16 15:34:10 -04004041static const int snb_b_fdi_train_param[] = {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004042 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4043 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4044 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4045 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4046};
4047
4048/* The FDI link training functions for SNB/Cougarpoint. */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004049static void gen6_fdi_link_train(struct intel_crtc *crtc,
4050 const struct intel_crtc_state *crtc_state)
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004051{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004052 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004053 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004054 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004055 i915_reg_t reg;
4056 u32 temp, i, retry;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004057
Adam Jacksone1a44742010-06-25 15:32:14 -04004058 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4059 for train result */
Chris Wilson5eddb702010-09-11 13:48:45 +01004060 reg = FDI_RX_IMR(pipe);
4061 temp = I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04004062 temp &= ~FDI_RX_SYMBOL_LOCK;
4063 temp &= ~FDI_RX_BIT_LOCK;
Chris Wilson5eddb702010-09-11 13:48:45 +01004064 I915_WRITE(reg, temp);
4065
4066 POSTING_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04004067 udelay(150);
4068
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004069 /* enable CPU FDI TX and PCH FDI RX */
Chris Wilson5eddb702010-09-11 13:48:45 +01004070 reg = FDI_TX_CTL(pipe);
4071 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02004072 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004073 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004074 temp &= ~FDI_LINK_TRAIN_NONE;
4075 temp |= FDI_LINK_TRAIN_PATTERN_1;
4076 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4077 /* SNB-B */
4078 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
Chris Wilson5eddb702010-09-11 13:48:45 +01004079 I915_WRITE(reg, temp | FDI_TX_ENABLE);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004080
Daniel Vetterd74cf322012-10-26 10:58:13 +02004081 I915_WRITE(FDI_RX_MISC(pipe),
4082 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4083
Chris Wilson5eddb702010-09-11 13:48:45 +01004084 reg = FDI_RX_CTL(pipe);
4085 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004086 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004087 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4088 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4089 } else {
4090 temp &= ~FDI_LINK_TRAIN_NONE;
4091 temp |= FDI_LINK_TRAIN_PATTERN_1;
4092 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004093 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4094
4095 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004096 udelay(150);
4097
Akshay Joshi0206e352011-08-16 15:34:10 -04004098 for (i = 0; i < 4; i++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004099 reg = FDI_TX_CTL(pipe);
4100 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004101 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4102 temp |= snb_b_fdi_train_param[i];
Chris Wilson5eddb702010-09-11 13:48:45 +01004103 I915_WRITE(reg, temp);
4104
4105 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004106 udelay(500);
4107
Sean Paulfa37d392012-03-02 12:53:39 -05004108 for (retry = 0; retry < 5; retry++) {
4109 reg = FDI_RX_IIR(pipe);
4110 temp = I915_READ(reg);
4111 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4112 if (temp & FDI_RX_BIT_LOCK) {
4113 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4114 DRM_DEBUG_KMS("FDI train 1 done.\n");
4115 break;
4116 }
4117 udelay(50);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004118 }
Sean Paulfa37d392012-03-02 12:53:39 -05004119 if (retry < 5)
4120 break;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004121 }
4122 if (i == 4)
Chris Wilson5eddb702010-09-11 13:48:45 +01004123 DRM_ERROR("FDI train 1 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004124
4125 /* Train 2 */
Chris Wilson5eddb702010-09-11 13:48:45 +01004126 reg = FDI_TX_CTL(pipe);
4127 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004128 temp &= ~FDI_LINK_TRAIN_NONE;
4129 temp |= FDI_LINK_TRAIN_PATTERN_2;
Lucas De Marchicf819ef2018-12-12 10:10:43 -08004130 if (IS_GEN(dev_priv, 6)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004131 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4132 /* SNB-B */
4133 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4134 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004135 I915_WRITE(reg, temp);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004136
Chris Wilson5eddb702010-09-11 13:48:45 +01004137 reg = FDI_RX_CTL(pipe);
4138 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004139 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004140 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4141 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4142 } else {
4143 temp &= ~FDI_LINK_TRAIN_NONE;
4144 temp |= FDI_LINK_TRAIN_PATTERN_2;
4145 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004146 I915_WRITE(reg, temp);
4147
4148 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004149 udelay(150);
4150
Akshay Joshi0206e352011-08-16 15:34:10 -04004151 for (i = 0; i < 4; i++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004152 reg = FDI_TX_CTL(pipe);
4153 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004154 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4155 temp |= snb_b_fdi_train_param[i];
Chris Wilson5eddb702010-09-11 13:48:45 +01004156 I915_WRITE(reg, temp);
4157
4158 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004159 udelay(500);
4160
Sean Paulfa37d392012-03-02 12:53:39 -05004161 for (retry = 0; retry < 5; retry++) {
4162 reg = FDI_RX_IIR(pipe);
4163 temp = I915_READ(reg);
4164 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4165 if (temp & FDI_RX_SYMBOL_LOCK) {
4166 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4167 DRM_DEBUG_KMS("FDI train 2 done.\n");
4168 break;
4169 }
4170 udelay(50);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004171 }
Sean Paulfa37d392012-03-02 12:53:39 -05004172 if (retry < 5)
4173 break;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004174 }
4175 if (i == 4)
Chris Wilson5eddb702010-09-11 13:48:45 +01004176 DRM_ERROR("FDI train 2 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004177
4178 DRM_DEBUG_KMS("FDI train done.\n");
4179}
4180
Jesse Barnes357555c2011-04-28 15:09:55 -07004181/* Manual link training for Ivy Bridge A0 parts */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004182static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4183 const struct intel_crtc_state *crtc_state)
Jesse Barnes357555c2011-04-28 15:09:55 -07004184{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004185 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004186 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004187 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004188 i915_reg_t reg;
4189 u32 temp, i, j;
Jesse Barnes357555c2011-04-28 15:09:55 -07004190
4191 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4192 for train result */
4193 reg = FDI_RX_IMR(pipe);
4194 temp = I915_READ(reg);
4195 temp &= ~FDI_RX_SYMBOL_LOCK;
4196 temp &= ~FDI_RX_BIT_LOCK;
4197 I915_WRITE(reg, temp);
4198
4199 POSTING_READ(reg);
4200 udelay(150);
4201
Daniel Vetter01a415f2012-10-27 15:58:40 +02004202 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4203 I915_READ(FDI_RX_IIR(pipe)));
4204
Jesse Barnes139ccd32013-08-19 11:04:55 -07004205 /* Try each vswing and preemphasis setting twice before moving on */
4206 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4207 /* disable first in case we need to retry */
Jesse Barnes357555c2011-04-28 15:09:55 -07004208 reg = FDI_TX_CTL(pipe);
4209 temp = I915_READ(reg);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004210 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4211 temp &= ~FDI_TX_ENABLE;
4212 I915_WRITE(reg, temp);
4213
4214 reg = FDI_RX_CTL(pipe);
4215 temp = I915_READ(reg);
4216 temp &= ~FDI_LINK_TRAIN_AUTO;
4217 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4218 temp &= ~FDI_RX_ENABLE;
4219 I915_WRITE(reg, temp);
4220
4221 /* enable CPU FDI TX and PCH FDI RX */
4222 reg = FDI_TX_CTL(pipe);
4223 temp = I915_READ(reg);
4224 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004225 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004226 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
Jesse Barnes357555c2011-04-28 15:09:55 -07004227 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
Jesse Barnes139ccd32013-08-19 11:04:55 -07004228 temp |= snb_b_fdi_train_param[j/2];
4229 temp |= FDI_COMPOSITE_SYNC;
4230 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4231
4232 I915_WRITE(FDI_RX_MISC(pipe),
4233 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4234
4235 reg = FDI_RX_CTL(pipe);
4236 temp = I915_READ(reg);
4237 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4238 temp |= FDI_COMPOSITE_SYNC;
4239 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4240
4241 POSTING_READ(reg);
4242 udelay(1); /* should be 0.5us */
4243
4244 for (i = 0; i < 4; i++) {
4245 reg = FDI_RX_IIR(pipe);
4246 temp = I915_READ(reg);
4247 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4248
4249 if (temp & FDI_RX_BIT_LOCK ||
4250 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4251 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4252 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4253 i);
4254 break;
4255 }
4256 udelay(1); /* should be 0.5us */
4257 }
4258 if (i == 4) {
4259 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4260 continue;
4261 }
4262
4263 /* Train 2 */
4264 reg = FDI_TX_CTL(pipe);
4265 temp = I915_READ(reg);
4266 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4267 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4268 I915_WRITE(reg, temp);
4269
4270 reg = FDI_RX_CTL(pipe);
4271 temp = I915_READ(reg);
4272 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4273 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
Jesse Barnes357555c2011-04-28 15:09:55 -07004274 I915_WRITE(reg, temp);
4275
4276 POSTING_READ(reg);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004277 udelay(2); /* should be 1.5us */
Jesse Barnes357555c2011-04-28 15:09:55 -07004278
Jesse Barnes139ccd32013-08-19 11:04:55 -07004279 for (i = 0; i < 4; i++) {
4280 reg = FDI_RX_IIR(pipe);
4281 temp = I915_READ(reg);
4282 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
Jesse Barnes357555c2011-04-28 15:09:55 -07004283
Jesse Barnes139ccd32013-08-19 11:04:55 -07004284 if (temp & FDI_RX_SYMBOL_LOCK ||
4285 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4286 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4287 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4288 i);
4289 goto train_done;
4290 }
4291 udelay(2); /* should be 1.5us */
Jesse Barnes357555c2011-04-28 15:09:55 -07004292 }
Jesse Barnes139ccd32013-08-19 11:04:55 -07004293 if (i == 4)
4294 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
Jesse Barnes357555c2011-04-28 15:09:55 -07004295 }
Jesse Barnes357555c2011-04-28 15:09:55 -07004296
Jesse Barnes139ccd32013-08-19 11:04:55 -07004297train_done:
Jesse Barnes357555c2011-04-28 15:09:55 -07004298 DRM_DEBUG_KMS("FDI train done.\n");
4299}
4300
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004301static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnes0e23b992010-09-10 11:10:00 -07004302{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004303 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4304 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004305 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004306 i915_reg_t reg;
4307 u32 temp;
Jesse Barnesc64e3112010-09-10 11:27:03 -07004308
Jesse Barnes0e23b992010-09-10 11:10:00 -07004309 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
Chris Wilson5eddb702010-09-11 13:48:45 +01004310 reg = FDI_RX_CTL(pipe);
4311 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02004312 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004313 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004314 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Chris Wilson5eddb702010-09-11 13:48:45 +01004315 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4316
4317 POSTING_READ(reg);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004318 udelay(200);
4319
4320 /* Switch from Rawclk to PCDclk */
Chris Wilson5eddb702010-09-11 13:48:45 +01004321 temp = I915_READ(reg);
4322 I915_WRITE(reg, temp | FDI_PCDCLK);
4323
4324 POSTING_READ(reg);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004325 udelay(200);
4326
Paulo Zanoni20749732012-11-23 15:30:38 -02004327 /* Enable CPU FDI TX PLL, always on for Ironlake */
4328 reg = FDI_TX_CTL(pipe);
4329 temp = I915_READ(reg);
4330 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4331 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
Chris Wilson5eddb702010-09-11 13:48:45 +01004332
Paulo Zanoni20749732012-11-23 15:30:38 -02004333 POSTING_READ(reg);
4334 udelay(100);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004335 }
4336}
4337
Daniel Vetter88cefb62012-08-12 19:27:14 +02004338static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4339{
4340 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004341 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter88cefb62012-08-12 19:27:14 +02004342 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004343 i915_reg_t reg;
4344 u32 temp;
Daniel Vetter88cefb62012-08-12 19:27:14 +02004345
4346 /* Switch from PCDclk to Rawclk */
4347 reg = FDI_RX_CTL(pipe);
4348 temp = I915_READ(reg);
4349 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4350
4351 /* Disable CPU FDI TX PLL */
4352 reg = FDI_TX_CTL(pipe);
4353 temp = I915_READ(reg);
4354 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4355
4356 POSTING_READ(reg);
4357 udelay(100);
4358
4359 reg = FDI_RX_CTL(pipe);
4360 temp = I915_READ(reg);
4361 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4362
4363 /* Wait for the clocks to turn off. */
4364 POSTING_READ(reg);
4365 udelay(100);
4366}
4367
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004368static void ironlake_fdi_disable(struct drm_crtc *crtc)
4369{
4370 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004371 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004372 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4373 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004374 i915_reg_t reg;
4375 u32 temp;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004376
4377 /* disable CPU FDI tx and PCH FDI rx */
4378 reg = FDI_TX_CTL(pipe);
4379 temp = I915_READ(reg);
4380 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4381 POSTING_READ(reg);
4382
4383 reg = FDI_RX_CTL(pipe);
4384 temp = I915_READ(reg);
4385 temp &= ~(0x7 << 16);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004386 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004387 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4388
4389 POSTING_READ(reg);
4390 udelay(100);
4391
4392 /* Ironlake workaround, disable clock pointer after downing FDI */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004393 if (HAS_PCH_IBX(dev_priv))
Jesse Barnes6f06ce12011-01-04 15:09:38 -08004394 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004395
4396 /* still set train pattern 1 */
4397 reg = FDI_TX_CTL(pipe);
4398 temp = I915_READ(reg);
4399 temp &= ~FDI_LINK_TRAIN_NONE;
4400 temp |= FDI_LINK_TRAIN_PATTERN_1;
4401 I915_WRITE(reg, temp);
4402
4403 reg = FDI_RX_CTL(pipe);
4404 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004405 if (HAS_PCH_CPT(dev_priv)) {
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004406 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4407 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4408 } else {
4409 temp &= ~FDI_LINK_TRAIN_NONE;
4410 temp |= FDI_LINK_TRAIN_PATTERN_1;
4411 }
4412 /* BPC in FDI rx is consistent with that in PIPECONF */
4413 temp &= ~(0x07 << 16);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004414 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004415 I915_WRITE(reg, temp);
4416
4417 POSTING_READ(reg);
4418 udelay(100);
4419}
4420
Chris Wilson49d73912016-11-29 09:50:08 +00004421bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
Chris Wilson5dce5b932014-01-20 10:17:36 +00004422{
Daniel Vetterfa058872017-07-20 19:57:52 +02004423 struct drm_crtc *crtc;
4424 bool cleanup_done;
Chris Wilson5dce5b932014-01-20 10:17:36 +00004425
Daniel Vetterfa058872017-07-20 19:57:52 +02004426 drm_for_each_crtc(crtc, &dev_priv->drm) {
4427 struct drm_crtc_commit *commit;
4428 spin_lock(&crtc->commit_lock);
4429 commit = list_first_entry_or_null(&crtc->commit_list,
4430 struct drm_crtc_commit, commit_entry);
4431 cleanup_done = commit ?
4432 try_wait_for_completion(&commit->cleanup_done) : true;
4433 spin_unlock(&crtc->commit_lock);
4434
4435 if (cleanup_done)
Chris Wilson5dce5b932014-01-20 10:17:36 +00004436 continue;
4437
Daniel Vetterfa058872017-07-20 19:57:52 +02004438 drm_crtc_wait_one_vblank(crtc);
Chris Wilson5dce5b932014-01-20 10:17:36 +00004439
4440 return true;
4441 }
4442
4443 return false;
4444}
4445
Maarten Lankhorstb7076542016-08-23 16:18:08 +02004446void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004447{
4448 u32 temp;
4449
4450 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4451
4452 mutex_lock(&dev_priv->sb_lock);
4453
4454 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4455 temp |= SBI_SSCCTL_DISABLE;
4456 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4457
4458 mutex_unlock(&dev_priv->sb_lock);
4459}
4460
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004461/* Program iCLKIP clock to the desired frequency */
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004462static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004463{
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004464 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004465 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004466 int clock = crtc_state->base.adjusted_mode.crtc_clock;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004467 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4468 u32 temp;
4469
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004470 lpt_disable_iclkip(dev_priv);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004471
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004472 /* The iCLK virtual clock root frequency is in MHz,
4473 * but the adjusted_mode->crtc_clock in in KHz. To get the
4474 * divisors, it is necessary to divide one by another, so we
4475 * convert the virtual clock precision to KHz here for higher
4476 * precision.
4477 */
4478 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004479 u32 iclk_virtual_root_freq = 172800 * 1000;
4480 u32 iclk_pi_range = 64;
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004481 u32 desired_divisor;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004482
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004483 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4484 clock << auxdiv);
4485 divsel = (desired_divisor / iclk_pi_range) - 2;
4486 phaseinc = desired_divisor % iclk_pi_range;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004487
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004488 /*
4489 * Near 20MHz is a corner case which is
4490 * out of range for the 7-bit divisor
4491 */
4492 if (divsel <= 0x7f)
4493 break;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004494 }
4495
4496 /* This should not happen with any sane values */
4497 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4498 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4499 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4500 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4501
4502 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
Ville Syrjälä12d7cee2013-09-04 18:25:19 +03004503 clock,
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004504 auxdiv,
4505 divsel,
4506 phasedir,
4507 phaseinc);
4508
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004509 mutex_lock(&dev_priv->sb_lock);
4510
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004511 /* Program SSCDIVINTPHASE6 */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004512 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004513 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4514 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4515 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4516 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4517 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4518 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004519 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004520
4521 /* Program SSCAUXDIV */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004522 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004523 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4524 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004525 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004526
4527 /* Enable modulator and associated divider */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004528 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004529 temp &= ~SBI_SSCCTL_DISABLE;
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004530 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004531
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004532 mutex_unlock(&dev_priv->sb_lock);
4533
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004534 /* Wait for initialization time */
4535 udelay(24);
4536
4537 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4538}
4539
Ville Syrjälä8802e5b2016-02-17 21:41:12 +02004540int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4541{
4542 u32 divsel, phaseinc, auxdiv;
4543 u32 iclk_virtual_root_freq = 172800 * 1000;
4544 u32 iclk_pi_range = 64;
4545 u32 desired_divisor;
4546 u32 temp;
4547
4548 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4549 return 0;
4550
4551 mutex_lock(&dev_priv->sb_lock);
4552
4553 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4554 if (temp & SBI_SSCCTL_DISABLE) {
4555 mutex_unlock(&dev_priv->sb_lock);
4556 return 0;
4557 }
4558
4559 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4560 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4561 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4562 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4563 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4564
4565 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4566 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4567 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4568
4569 mutex_unlock(&dev_priv->sb_lock);
4570
4571 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4572
4573 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4574 desired_divisor << auxdiv);
4575}
4576
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004577static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
Daniel Vetter275f01b22013-05-03 11:49:47 +02004578 enum pipe pch_transcoder)
4579{
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004580 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4581 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4582 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Daniel Vetter275f01b22013-05-03 11:49:47 +02004583
4584 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4585 I915_READ(HTOTAL(cpu_transcoder)));
4586 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4587 I915_READ(HBLANK(cpu_transcoder)));
4588 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4589 I915_READ(HSYNC(cpu_transcoder)));
4590
4591 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4592 I915_READ(VTOTAL(cpu_transcoder)));
4593 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4594 I915_READ(VBLANK(cpu_transcoder)));
4595 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4596 I915_READ(VSYNC(cpu_transcoder)));
4597 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4598 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4599}
4600
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004601static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004602{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02004603 u32 temp;
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004604
4605 temp = I915_READ(SOUTH_CHICKEN1);
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004606 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004607 return;
4608
4609 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4610 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4611
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004612 temp &= ~FDI_BC_BIFURCATION_SELECT;
4613 if (enable)
4614 temp |= FDI_BC_BIFURCATION_SELECT;
4615
4616 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004617 I915_WRITE(SOUTH_CHICKEN1, temp);
4618 POSTING_READ(SOUTH_CHICKEN1);
4619}
4620
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004621static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004622{
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004623 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4624 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004625
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004626 switch (crtc->pipe) {
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004627 case PIPE_A:
4628 break;
4629 case PIPE_B:
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004630 if (crtc_state->fdi_lanes > 2)
4631 cpt_set_fdi_bc_bifurcation(dev_priv, false);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004632 else
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004633 cpt_set_fdi_bc_bifurcation(dev_priv, true);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004634
4635 break;
4636 case PIPE_C:
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004637 cpt_set_fdi_bc_bifurcation(dev_priv, true);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004638
4639 break;
4640 default:
4641 BUG();
4642 }
4643}
4644
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004645/*
4646 * Finds the encoder associated with the given CRTC. This can only be
4647 * used when we know that the CRTC isn't feeding multiple encoders!
4648 */
4649static struct intel_encoder *
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004650intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4651 const struct intel_crtc_state *crtc_state)
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004652{
4653 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004654 const struct drm_connector_state *connector_state;
4655 const struct drm_connector *connector;
4656 struct intel_encoder *encoder = NULL;
4657 int num_encoders = 0;
4658 int i;
4659
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004660 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004661 if (connector_state->crtc != &crtc->base)
4662 continue;
4663
4664 encoder = to_intel_encoder(connector_state->best_encoder);
4665 num_encoders++;
4666 }
4667
4668 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4669 num_encoders, pipe_name(crtc->pipe));
4670
4671 return encoder;
4672}
4673
Jesse Barnesf67a5592011-01-05 10:31:48 -08004674/*
4675 * Enable PCH resources required for PCH ports:
4676 * - PCH PLLs
4677 * - FDI training & RX/TX
4678 * - update transcoder timings
4679 * - DP transcoding bits
4680 * - transcoder
4681 */
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004682static void ironlake_pch_enable(const struct intel_atomic_state *state,
4683 const struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08004684{
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004685 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004686 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004687 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004688 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004689 u32 temp;
Jesse Barnes6be4a602010-09-10 10:26:01 -07004690
Daniel Vetterab9412b2013-05-03 11:49:46 +02004691 assert_pch_transcoder_disabled(dev_priv, pipe);
Chris Wilsone7e164d2012-05-11 09:21:25 +01004692
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004693 if (IS_IVYBRIDGE(dev_priv))
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004694 ivybridge_update_fdi_bc_bifurcation(crtc_state);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004695
Daniel Vettercd986ab2012-10-26 10:58:12 +02004696 /* Write the TU size bits before fdi link training, so that error
4697 * detection works. */
4698 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4699 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4700
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004701 /* For PCH output, training FDI link */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004702 dev_priv->display.fdi_link_train(crtc, crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07004703
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004704 /* We need to program the right clock selection before writing the pixel
4705 * mutliplier into the DPLL. */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004706 if (HAS_PCH_CPT(dev_priv)) {
Jesse Barnesee7b9f92012-04-20 17:11:53 +01004707 u32 sel;
Jesse Barnes4b645f12011-10-12 09:51:31 -07004708
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004709 temp = I915_READ(PCH_DPLL_SEL);
Daniel Vetter11887392013-06-05 13:34:09 +02004710 temp |= TRANS_DPLL_ENABLE(pipe);
4711 sel = TRANS_DPLLB_SEL(pipe);
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004712 if (crtc_state->shared_dpll ==
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02004713 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
Jesse Barnesee7b9f92012-04-20 17:11:53 +01004714 temp |= sel;
4715 else
4716 temp &= ~sel;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004717 I915_WRITE(PCH_DPLL_SEL, temp);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004718 }
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004719
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004720 /* XXX: pch pll's can be enabled any time before we enable the PCH
4721 * transcoder, and we actually should do this to not upset any PCH
4722 * transcoder that already use the clock when we share it.
4723 *
4724 * Note that enable_shared_dpll tries to do the right thing, but
4725 * get_shared_dpll unconditionally resets the pll - we need that to have
4726 * the right LVDS enable sequence. */
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02004727 intel_enable_shared_dpll(crtc_state);
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004728
Jesse Barnesd9b6cb52011-01-04 15:09:35 -08004729 /* set transcoder timing, panel must allow it */
4730 assert_panel_unlocked(dev_priv, pipe);
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004731 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004732
Paulo Zanoni303b81e2012-10-31 18:12:23 -02004733 intel_fdi_normal_train(crtc);
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08004734
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004735 /* For PCH DP, enable TRANS_DP_CTL */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004736 if (HAS_PCH_CPT(dev_priv) &&
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004737 intel_crtc_has_dp_encoder(crtc_state)) {
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004738 const struct drm_display_mode *adjusted_mode =
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004739 &crtc_state->base.adjusted_mode;
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004740 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004741 i915_reg_t reg = TRANS_DP_CTL(pipe);
Ville Syrjäläf67dc6d2018-05-18 18:29:26 +03004742 enum port port;
4743
Chris Wilson5eddb702010-09-11 13:48:45 +01004744 temp = I915_READ(reg);
4745 temp &= ~(TRANS_DP_PORT_SEL_MASK |
Eric Anholt220cad32010-11-18 09:32:58 +08004746 TRANS_DP_SYNC_MASK |
4747 TRANS_DP_BPC_MASK);
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03004748 temp |= TRANS_DP_OUTPUT_ENABLE;
Jesse Barnes9325c9f2011-06-24 12:19:21 -07004749 temp |= bpc << 9; /* same format but at 11:9 */
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004750
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004751 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
Chris Wilson5eddb702010-09-11 13:48:45 +01004752 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004753 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
Chris Wilson5eddb702010-09-11 13:48:45 +01004754 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004755
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004756 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
Ville Syrjäläf67dc6d2018-05-18 18:29:26 +03004757 WARN_ON(port < PORT_B || port > PORT_D);
4758 temp |= TRANS_DP_PORT_SEL(port);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004759
Chris Wilson5eddb702010-09-11 13:48:45 +01004760 I915_WRITE(reg, temp);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004761 }
4762
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02004763 ironlake_enable_pch_transcoder(crtc_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08004764}
4765
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004766static void lpt_pch_enable(const struct intel_atomic_state *state,
4767 const struct intel_crtc_state *crtc_state)
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004768{
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004769 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004771 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004772
Matthias Kaehlckea2196032017-07-17 11:14:03 -07004773 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004774
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004775 lpt_program_iclkip(crtc_state);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004776
Paulo Zanoni0540e482012-10-31 18:12:40 -02004777 /* Set transcoder timing. */
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004778 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004779
Paulo Zanoni937bb612012-10-31 18:12:47 -02004780 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
Jesse Barnesf67a5592011-01-05 10:31:48 -08004781}
4782
Daniel Vettera1520312013-05-03 11:49:50 +02004783static void cpt_verify_modeset(struct drm_device *dev, int pipe)
Jesse Barnesd4270e52011-10-11 10:43:02 -07004784{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004785 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004786 i915_reg_t dslreg = PIPEDSL(pipe);
Jesse Barnesd4270e52011-10-11 10:43:02 -07004787 u32 temp;
4788
4789 temp = I915_READ(dslreg);
4790 udelay(500);
4791 if (wait_for(I915_READ(dslreg) != temp, 5)) {
Jesse Barnesd4270e52011-10-11 10:43:02 -07004792 if (wait_for(I915_READ(dslreg) != temp, 5))
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03004793 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
Jesse Barnesd4270e52011-10-11 10:43:02 -07004794 }
4795}
4796
Ville Syrjälä0a599522018-05-21 21:56:13 +03004797/*
4798 * The hardware phase 0.0 refers to the center of the pixel.
4799 * We want to start from the top/left edge which is phase
4800 * -0.5. That matches how the hardware calculates the scaling
4801 * factors (from top-left of the first pixel to bottom-right
4802 * of the last pixel, as opposed to the pixel centers).
4803 *
4804 * For 4:2:0 subsampled chroma planes we obviously have to
4805 * adjust that so that the chroma sample position lands in
4806 * the right spot.
4807 *
4808 * Note that for packed YCbCr 4:2:2 formats there is no way to
4809 * control chroma siting. The hardware simply replicates the
4810 * chroma samples for both of the luma samples, and thus we don't
4811 * actually get the expected MPEG2 chroma siting convention :(
4812 * The same behaviour is observed on pre-SKL platforms as well.
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02004813 *
4814 * Theory behind the formula (note that we ignore sub-pixel
4815 * source coordinates):
4816 * s = source sample position
4817 * d = destination sample position
4818 *
4819 * Downscaling 4:1:
4820 * -0.5
4821 * | 0.0
4822 * | | 1.5 (initial phase)
4823 * | | |
4824 * v v v
4825 * | s | s | s | s |
4826 * | d |
4827 *
4828 * Upscaling 1:4:
4829 * -0.5
4830 * | -0.375 (initial phase)
4831 * | | 0.0
4832 * | | |
4833 * v v v
4834 * | s |
4835 * | d | d | d | d |
Ville Syrjälä0a599522018-05-21 21:56:13 +03004836 */
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02004837u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
Ville Syrjälä0a599522018-05-21 21:56:13 +03004838{
4839 int phase = -0x8000;
4840 u16 trip = 0;
4841
4842 if (chroma_cosited)
4843 phase += (sub - 1) * 0x8000 / sub;
4844
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02004845 phase += scale / (2 * sub);
4846
4847 /*
4848 * Hardware initial phase limited to [-0.5:1.5].
4849 * Since the max hardware scale factor is 3.0, we
4850 * should never actually excdeed 1.0 here.
4851 */
4852 WARN_ON(phase < -0x8000 || phase > 0x18000);
4853
Ville Syrjälä0a599522018-05-21 21:56:13 +03004854 if (phase < 0)
4855 phase = 0x10000 + phase;
4856 else
4857 trip = PS_PHASE_TRIP;
4858
4859 return ((phase >> 2) & PS_PHASE_MASK) | trip;
4860}
4861
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004862static int
4863skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
Ville Syrjäläd96a7d22017-03-31 21:00:54 +03004864 unsigned int scaler_user, int *scaler_id,
Chandra Konduru77224cd2018-04-09 09:11:13 +05304865 int src_w, int src_h, int dst_w, int dst_h,
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004866 const struct drm_format_info *format, bool need_scaler)
Chandra Kondurua1b22782015-04-07 15:28:45 -07004867{
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004868 struct intel_crtc_scaler_state *scaler_state =
4869 &crtc_state->scaler_state;
4870 struct intel_crtc *intel_crtc =
4871 to_intel_crtc(crtc_state->base.crtc);
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304872 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4873 const struct drm_display_mode *adjusted_mode =
4874 &crtc_state->base.adjusted_mode;
Chandra Konduru6156a452015-04-27 13:48:39 -07004875
Ville Syrjäläd96a7d22017-03-31 21:00:54 +03004876 /*
4877 * Src coordinates are already rotated by 270 degrees for
4878 * the 90/270 degree plane rotation cases (to match the
4879 * GTT mapping), hence no need to account for rotation here.
4880 */
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004881 if (src_w != dst_w || src_h != dst_h)
4882 need_scaler = true;
Shashank Sharmae5c05932017-07-21 20:55:05 +05304883
Chandra Kondurua1b22782015-04-07 15:28:45 -07004884 /*
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304885 * Scaling/fitting not supported in IF-ID mode in GEN9+
4886 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4887 * Once NV12 is enabled, handle it here while allocating scaler
4888 * for NV12.
4889 */
4890 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004891 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304892 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4893 return -EINVAL;
4894 }
4895
4896 /*
Chandra Kondurua1b22782015-04-07 15:28:45 -07004897 * if plane is being disabled or scaler is no more required or force detach
4898 * - free scaler binded to this plane/crtc
4899 * - in order to do this, update crtc->scaler_usage
4900 *
4901 * Here scaler state in crtc_state is set free so that
4902 * scaler can be assigned to other user. Actual register
4903 * update to free the scaler is done in plane/panel-fit programming.
4904 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4905 */
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004906 if (force_detach || !need_scaler) {
Chandra Kondurua1b22782015-04-07 15:28:45 -07004907 if (*scaler_id >= 0) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004908 scaler_state->scaler_users &= ~(1 << scaler_user);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004909 scaler_state->scalers[*scaler_id].in_use = 0;
4910
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004911 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4912 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4913 intel_crtc->pipe, scaler_user, *scaler_id,
Chandra Kondurua1b22782015-04-07 15:28:45 -07004914 scaler_state->scaler_users);
4915 *scaler_id = -1;
4916 }
4917 return 0;
4918 }
4919
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004920 if (format && format->format == DRM_FORMAT_NV12 &&
Maarten Lankhorst5d794282018-05-12 03:03:14 +05304921 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
Chandra Konduru77224cd2018-04-09 09:11:13 +05304922 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4923 return -EINVAL;
4924 }
4925
Chandra Kondurua1b22782015-04-07 15:28:45 -07004926 /* range checks */
4927 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
Nabendu Maiti323301a2018-03-23 10:24:18 -07004928 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
Lucas De Marchicf819ef2018-12-12 10:10:43 -08004929 (IS_GEN(dev_priv, 11) &&
Nabendu Maiti323301a2018-03-23 10:24:18 -07004930 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4931 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
Lucas De Marchicf819ef2018-12-12 10:10:43 -08004932 (!IS_GEN(dev_priv, 11) &&
Nabendu Maiti323301a2018-03-23 10:24:18 -07004933 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4934 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004935 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
Chandra Kondurua1b22782015-04-07 15:28:45 -07004936 "size is out of scaler range\n",
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004937 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004938 return -EINVAL;
4939 }
4940
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004941 /* mark this plane as a scaler user in crtc_state */
4942 scaler_state->scaler_users |= (1 << scaler_user);
4943 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4944 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4945 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4946 scaler_state->scaler_users);
4947
4948 return 0;
4949}
4950
4951/**
4952 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4953 *
4954 * @state: crtc's scaler state
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004955 *
4956 * Return
4957 * 0 - scaler_usage updated successfully
4958 * error - requested scaling cannot be supported or other error condition
4959 */
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004960int skl_update_scaler_crtc(struct intel_crtc_state *state)
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004961{
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03004962 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004963 bool need_scaler = false;
4964
4965 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4966 need_scaler = true;
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004967
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004968 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
Chandra Konduru77224cd2018-04-09 09:11:13 +05304969 &state->scaler_state.scaler_id,
4970 state->pipe_src_w, state->pipe_src_h,
4971 adjusted_mode->crtc_hdisplay,
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004972 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004973}
4974
4975/**
4976 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
Chris Wilsonc38c1452018-02-14 13:49:22 +00004977 * @crtc_state: crtc's scaler state
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004978 * @plane_state: atomic plane state to update
4979 *
4980 * Return
4981 * 0 - scaler_usage updated successfully
4982 * error - requested scaling cannot be supported or other error condition
4983 */
Maarten Lankhorstda20eab2015-06-15 12:33:44 +02004984static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4985 struct intel_plane_state *plane_state)
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004986{
Maarten Lankhorstda20eab2015-06-15 12:33:44 +02004987 struct intel_plane *intel_plane =
4988 to_intel_plane(plane_state->base.plane);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004989 struct drm_framebuffer *fb = plane_state->base.fb;
4990 int ret;
Ville Syrjälä936e71e2016-07-26 19:06:59 +03004991 bool force_detach = !fb || !plane_state->base.visible;
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02004992 bool need_scaler = false;
4993
4994 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
4995 if (!icl_is_hdr_plane(intel_plane) &&
4996 fb && fb->format->format == DRM_FORMAT_NV12)
4997 need_scaler = true;
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004998
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004999 ret = skl_update_scaler(crtc_state, force_detach,
5000 drm_plane_index(&intel_plane->base),
5001 &plane_state->scaler_id,
Ville Syrjälä936e71e2016-07-26 19:06:59 +03005002 drm_rect_width(&plane_state->base.src) >> 16,
5003 drm_rect_height(&plane_state->base.src) >> 16,
5004 drm_rect_width(&plane_state->base.dst),
Chandra Konduru77224cd2018-04-09 09:11:13 +05305005 drm_rect_height(&plane_state->base.dst),
Maarten Lankhorstb1554e22018-10-18 13:51:31 +02005006 fb ? fb->format : NULL, need_scaler);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02005007
5008 if (ret || plane_state->scaler_id < 0)
5009 return ret;
5010
Chandra Kondurua1b22782015-04-07 15:28:45 -07005011 /* check colorkey */
Ville Syrjälä6ec5bd32018-02-02 22:42:31 +02005012 if (plane_state->ckey.flags) {
Ville Syrjälä72660ce2016-05-27 20:59:20 +03005013 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5014 intel_plane->base.base.id,
5015 intel_plane->base.name);
Chandra Kondurua1b22782015-04-07 15:28:45 -07005016 return -EINVAL;
5017 }
5018
5019 /* Check src format */
Ville Syrjälä438b74a2016-12-14 23:32:55 +02005020 switch (fb->format->format) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02005021 case DRM_FORMAT_RGB565:
5022 case DRM_FORMAT_XBGR8888:
5023 case DRM_FORMAT_XRGB8888:
5024 case DRM_FORMAT_ABGR8888:
5025 case DRM_FORMAT_ARGB8888:
5026 case DRM_FORMAT_XRGB2101010:
5027 case DRM_FORMAT_XBGR2101010:
5028 case DRM_FORMAT_YUYV:
5029 case DRM_FORMAT_YVYU:
5030 case DRM_FORMAT_UYVY:
5031 case DRM_FORMAT_VYUY:
Chandra Konduru77224cd2018-04-09 09:11:13 +05305032 case DRM_FORMAT_NV12:
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02005033 break;
5034 default:
Ville Syrjälä72660ce2016-05-27 20:59:20 +03005035 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5036 intel_plane->base.base.id, intel_plane->base.name,
Ville Syrjälä438b74a2016-12-14 23:32:55 +02005037 fb->base.id, fb->format->format);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02005038 return -EINVAL;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005039 }
5040
Chandra Kondurua1b22782015-04-07 15:28:45 -07005041 return 0;
5042}
5043
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02005044static void skylake_scaler_disable(struct intel_crtc *crtc)
5045{
5046 int i;
5047
5048 for (i = 0; i < crtc->num_scalers; i++)
5049 skl_detach_scaler(crtc, i);
5050}
5051
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005052static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00005053{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005054 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5055 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5056 enum pipe pipe = crtc->pipe;
5057 const struct intel_crtc_scaler_state *scaler_state =
5058 &crtc_state->scaler_state;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005059
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005060 if (crtc_state->pch_pfit.enabled) {
Ville Syrjälä0a599522018-05-21 21:56:13 +03005061 u16 uv_rgb_hphase, uv_rgb_vphase;
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02005062 int pfit_w, pfit_h, hscale, vscale;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005063 int id;
5064
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005065 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
Chandra Kondurua1b22782015-04-07 15:28:45 -07005066 return;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005067
Ville Syrjäläe7a278a2018-10-29 20:18:20 +02005068 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5069 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5070
5071 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5072 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5073
5074 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5075 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
Ville Syrjälä0a599522018-05-21 21:56:13 +03005076
Chandra Kondurua1b22782015-04-07 15:28:45 -07005077 id = scaler_state->scaler_id;
5078 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5079 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
Ville Syrjälä0a599522018-05-21 21:56:13 +03005080 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5081 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5082 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5083 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005084 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5085 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00005086 }
5087}
5088
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005089static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnesb074cec2013-04-25 12:55:02 -07005090{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005091 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5092 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesb074cec2013-04-25 12:55:02 -07005093 int pipe = crtc->pipe;
5094
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005095 if (crtc_state->pch_pfit.enabled) {
Jesse Barnesb074cec2013-04-25 12:55:02 -07005096 /* Force use of hard-coded filter coefficients
5097 * as some pre-programmed values are broken,
5098 * e.g. x201.
5099 */
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01005100 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
Jesse Barnesb074cec2013-04-25 12:55:02 -07005101 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5102 PF_PIPE_SEL_IVB(pipe));
5103 else
5104 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005105 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5106 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
Jesse Barnes040484a2011-01-03 12:14:26 -08005107 }
Jesse Barnesf67a5592011-01-05 10:31:48 -08005108}
5109
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005110void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005111{
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005112 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ville Syrjäläcea165c2014-04-15 21:41:35 +03005113 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005114 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005115
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005116 if (!crtc_state->ips_enabled)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005117 return;
5118
Maarten Lankhorst307e4492016-03-23 14:33:28 +01005119 /*
5120 * We can only enable IPS after we enable a plane and wait for a vblank
5121 * This function is called from post_plane_update, which is run after
5122 * a vblank wait.
5123 */
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005124 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02005125
Tvrtko Ursulin86527442016-10-13 11:03:00 +01005126 if (IS_BROADWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005127 mutex_lock(&dev_priv->pcu_lock);
Ville Syrjälä61843f02017-09-12 18:34:11 +03005128 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5129 IPS_ENABLE | IPS_PCODE_CONTROL));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005130 mutex_unlock(&dev_priv->pcu_lock);
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005131 /* Quoting Art Runyan: "its not safe to expect any particular
5132 * value in IPS_CTL bit 31 after enabling IPS through the
Jesse Barnese59150d2014-01-07 13:30:45 -08005133 * mailbox." Moreover, the mailbox may return a bogus state,
5134 * so we need to just enable it and continue on.
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005135 */
5136 } else {
5137 I915_WRITE(IPS_CTL, IPS_ENABLE);
5138 /* The bit only becomes 1 in the next vblank, so this wait here
5139 * is essentially intel_wait_for_vblank. If we don't have this
5140 * and don't wait for vblanks until the end of crtc_enable, then
5141 * the HW state readout code will complain that the expected
5142 * IPS_CTL value is not the one we read. */
Chris Wilson2ec9ba32016-06-30 15:33:01 +01005143 if (intel_wait_for_register(dev_priv,
5144 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5145 50))
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005146 DRM_ERROR("Timed out waiting for IPS enable\n");
5147 }
Paulo Zanonid77e4532013-09-24 13:52:55 -03005148}
5149
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005150void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005151{
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005152 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005153 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005154 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005155
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005156 if (!crtc_state->ips_enabled)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005157 return;
5158
Tvrtko Ursulin86527442016-10-13 11:03:00 +01005159 if (IS_BROADWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005160 mutex_lock(&dev_priv->pcu_lock);
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005161 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005162 mutex_unlock(&dev_priv->pcu_lock);
Imre Deakacb3ef02018-09-05 13:00:05 +03005163 /*
5164 * Wait for PCODE to finish disabling IPS. The BSpec specified
5165 * 42ms timeout value leads to occasional timeouts so use 100ms
5166 * instead.
5167 */
Chris Wilsonb85c1ec2016-06-30 15:33:02 +01005168 if (intel_wait_for_register(dev_priv,
5169 IPS_CTL, IPS_ENABLE, 0,
Imre Deakacb3ef02018-09-05 13:00:05 +03005170 100))
Ben Widawsky23d0b132014-04-10 14:32:41 -07005171 DRM_ERROR("Timed out waiting for IPS disable\n");
Jesse Barnese59150d2014-01-07 13:30:45 -08005172 } else {
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005173 I915_WRITE(IPS_CTL, 0);
Jesse Barnese59150d2014-01-07 13:30:45 -08005174 POSTING_READ(IPS_CTL);
5175 }
Paulo Zanonid77e4532013-09-24 13:52:55 -03005176
5177 /* We need to wait for a vblank before we can disable the plane. */
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005178 intel_wait_for_vblank(dev_priv, crtc->pipe);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005179}
5180
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005181static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005182{
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005183 if (intel_crtc->overlay) {
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005184 struct drm_device *dev = intel_crtc->base.dev;
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005185
5186 mutex_lock(&dev->struct_mutex);
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005187 (void) intel_overlay_switch_off(intel_crtc->overlay);
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005188 mutex_unlock(&dev->struct_mutex);
5189 }
5190
5191 /* Let userspace switch the overlay on again. In most cases userspace
5192 * has to recompute where to put it anyway.
5193 */
5194}
5195
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005196/**
5197 * intel_post_enable_primary - Perform operations after enabling primary plane
5198 * @crtc: the CRTC whose primary plane was just enabled
Chris Wilsonc38c1452018-02-14 13:49:22 +00005199 * @new_crtc_state: the enabling state
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005200 *
5201 * Performs potentially sleeping operations that must be done after the primary
5202 * plane is enabled, such as updating FBC and IPS. Note that this may be
5203 * called due to an explicit primary plane update, or due to an implicit
5204 * re-enable that is caused when a sprite plane is updated to no longer
5205 * completely hide the primary plane.
5206 */
5207static void
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005208intel_post_enable_primary(struct drm_crtc *crtc,
5209 const struct intel_crtc_state *new_crtc_state)
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005210{
5211 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005212 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5214 int pipe = intel_crtc->pipe;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005215
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005216 /*
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005217 * Gen2 reports pipe underruns whenever all planes are disabled.
5218 * So don't enable underrun reporting before at least some planes
5219 * are enabled.
5220 * FIXME: Need to fix the logic to work when we turn off all planes
5221 * but leave the pipe running.
Daniel Vetterf99d7062014-06-19 16:01:59 +02005222 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08005223 if (IS_GEN(dev_priv, 2))
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005224 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5225
Ville Syrjäläaca7b682015-10-30 19:22:21 +02005226 /* Underruns don't always raise interrupts, so check manually. */
5227 intel_check_cpu_fifo_underruns(dev_priv);
5228 intel_check_pch_fifo_underruns(dev_priv);
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005229}
5230
Ville Syrjälä2622a082016-03-09 19:07:26 +02005231/* FIXME get rid of this and use pre_plane_update */
5232static void
5233intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5234{
5235 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005236 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä2622a082016-03-09 19:07:26 +02005237 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5238 int pipe = intel_crtc->pipe;
5239
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005240 /*
5241 * Gen2 reports pipe underruns whenever all planes are disabled.
5242 * So disable underrun reporting before all the planes get disabled.
5243 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08005244 if (IS_GEN(dev_priv, 2))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005245 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5246
5247 hsw_disable_ips(to_intel_crtc_state(crtc->state));
Ville Syrjälä2622a082016-03-09 19:07:26 +02005248
5249 /*
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005250 * Vblank time updates from the shadow to live plane control register
5251 * are blocked if the memory self-refresh mode is active at that
5252 * moment. So to make sure the plane gets truly disabled, disable
5253 * first the self-refresh mode. The self-refresh enable bit in turn
5254 * will be checked/applied by the HW only at the next frame start
5255 * event which is after the vblank start event, so we need to have a
5256 * wait-for-vblank between disabling the plane and the pipe.
5257 */
Ville Syrjälä11a85d62016-11-28 19:37:12 +02005258 if (HAS_GMCH_DISPLAY(dev_priv) &&
5259 intel_set_memory_cxsr(dev_priv, false))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005260 intel_wait_for_vblank(dev_priv, pipe);
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005261}
5262
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005263static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5264 const struct intel_crtc_state *new_crtc_state)
5265{
5266 if (!old_crtc_state->ips_enabled)
5267 return false;
5268
5269 if (needs_modeset(&new_crtc_state->base))
5270 return true;
5271
5272 return !new_crtc_state->ips_enabled;
5273}
5274
5275static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5276 const struct intel_crtc_state *new_crtc_state)
5277{
5278 if (!new_crtc_state->ips_enabled)
5279 return false;
5280
5281 if (needs_modeset(&new_crtc_state->base))
5282 return true;
5283
5284 /*
5285 * We can't read out IPS on broadwell, assume the worst and
5286 * forcibly enable IPS on the first fastset.
5287 */
5288 if (new_crtc_state->update_pipe &&
5289 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5290 return true;
5291
5292 return !old_crtc_state->ips_enabled;
5293}
5294
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305295static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5296 const struct intel_crtc_state *crtc_state)
5297{
5298 if (!crtc_state->nv12_planes)
5299 return false;
5300
Rodrigo Vivi1347d3c2018-10-31 09:28:45 -07005301 /* WA Display #0827: Gen9:all */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08005302 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305303 return true;
5304
5305 return false;
5306}
5307
Daniel Vetter5a21b662016-05-24 17:13:53 +02005308static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5309{
5310 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +05305311 struct drm_device *dev = crtc->base.dev;
5312 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005313 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5314 struct intel_crtc_state *pipe_config =
Ville Syrjäläf9a8c142017-08-23 18:22:24 +03005315 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5316 crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005317 struct drm_plane *primary = crtc->base.primary;
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005318 struct drm_plane_state *old_primary_state =
5319 drm_atomic_get_old_plane_state(old_state, primary);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005320
Chris Wilson5748b6a2016-08-04 16:32:38 +01005321 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005322
Daniel Vetter5a21b662016-05-24 17:13:53 +02005323 if (pipe_config->update_wm_post && pipe_config->base.active)
Ville Syrjälä432081b2016-10-31 22:37:03 +02005324 intel_update_watermarks(crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005325
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005326 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5327 hsw_enable_ips(pipe_config);
5328
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005329 if (old_primary_state) {
5330 struct drm_plane_state *new_primary_state =
5331 drm_atomic_get_new_plane_state(old_state, primary);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005332
5333 intel_fbc_post_update(crtc);
5334
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005335 if (new_primary_state->visible &&
Daniel Vetter5a21b662016-05-24 17:13:53 +02005336 (needs_modeset(&pipe_config->base) ||
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005337 !old_primary_state->visible))
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005338 intel_post_enable_primary(&crtc->base, pipe_config);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005339 }
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305340
5341 /* Display WA 827 */
5342 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
Vidya Srinivas6deef9b2018-05-12 03:03:13 +05305343 !needs_nv12_wa(dev_priv, pipe_config)) {
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305344 skl_wa_clkgate(dev_priv, crtc->pipe, false);
Vidya Srinivas6deef9b2018-05-12 03:03:13 +05305345 }
Daniel Vetter5a21b662016-05-24 17:13:53 +02005346}
5347
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005348static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5349 struct intel_crtc_state *pipe_config)
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005350{
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005351 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005352 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005353 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005354 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5355 struct drm_plane *primary = crtc->base.primary;
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005356 struct drm_plane_state *old_primary_state =
5357 drm_atomic_get_old_plane_state(old_state, primary);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005358 bool modeset = needs_modeset(&pipe_config->base);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005359 struct intel_atomic_state *old_intel_state =
5360 to_intel_atomic_state(old_state);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005361
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005362 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5363 hsw_disable_ips(old_crtc_state);
5364
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005365 if (old_primary_state) {
5366 struct intel_plane_state *new_primary_state =
Ville Syrjäläf9a8c142017-08-23 18:22:24 +03005367 intel_atomic_get_new_plane_state(old_intel_state,
5368 to_intel_plane(primary));
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005369
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005370 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005371 /*
5372 * Gen2 reports pipe underruns whenever all planes are disabled.
5373 * So disable underrun reporting before all the planes get disabled.
5374 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08005375 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005376 (modeset || !new_primary_state->base.visible))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005377 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005378 }
Ville Syrjälä852eb002015-06-24 22:00:07 +03005379
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305380 /* Display WA 827 */
5381 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
Vidya Srinivas6deef9b2018-05-12 03:03:13 +05305382 needs_nv12_wa(dev_priv, pipe_config)) {
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305383 skl_wa_clkgate(dev_priv, crtc->pipe, true);
Vidya Srinivas6deef9b2018-05-12 03:03:13 +05305384 }
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305385
Ville Syrjälä5eeb7982017-03-02 19:15:00 +02005386 /*
5387 * Vblank time updates from the shadow to live plane control register
5388 * are blocked if the memory self-refresh mode is active at that
5389 * moment. So to make sure the plane gets truly disabled, disable
5390 * first the self-refresh mode. The self-refresh enable bit in turn
5391 * will be checked/applied by the HW only at the next frame start
5392 * event which is after the vblank start event, so we need to have a
5393 * wait-for-vblank between disabling the plane and the pipe.
5394 */
5395 if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5396 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5397 intel_wait_for_vblank(dev_priv, crtc->pipe);
Maarten Lankhorst92826fc2015-12-03 13:49:13 +01005398
Matt Ropered4a6a72016-02-23 17:20:13 -08005399 /*
5400 * IVB workaround: must disable low power watermarks for at least
5401 * one frame before enabling scaling. LP watermarks can be re-enabled
5402 * when scaling is disabled.
5403 *
5404 * WaCxSRDisabledForSpriteScaling:ivb
5405 */
Ville Syrjälä8e7a4422018-10-04 15:15:27 +03005406 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5407 old_crtc_state->base.active)
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005408 intel_wait_for_vblank(dev_priv, crtc->pipe);
Matt Ropered4a6a72016-02-23 17:20:13 -08005409
5410 /*
5411 * If we're doing a modeset, we're done. No need to do any pre-vblank
5412 * watermark programming here.
5413 */
5414 if (needs_modeset(&pipe_config->base))
5415 return;
5416
5417 /*
5418 * For platforms that support atomic watermarks, program the
5419 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5420 * will be the intermediate values that are safe for both pre- and
5421 * post- vblank; when vblank happens, the 'active' values will be set
5422 * to the final 'target' values and we'll do this again to get the
5423 * optimal watermarks. For gen9+ platforms, the values we program here
5424 * will be the final target values which will get automatically latched
5425 * at vblank time; no further programming will be necessary.
5426 *
5427 * If a platform hasn't been transitioned to atomic watermarks yet,
5428 * we'll continue to update watermarks the old way, if flags tell
5429 * us to.
5430 */
5431 if (dev_priv->display.initial_watermarks != NULL)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005432 dev_priv->display.initial_watermarks(old_intel_state,
5433 pipe_config);
Ville Syrjäläcaed3612016-03-09 19:07:25 +02005434 else if (pipe_config->update_wm_pre)
Ville Syrjälä432081b2016-10-31 22:37:03 +02005435 intel_update_watermarks(crtc);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005436}
5437
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005438static void intel_crtc_disable_planes(struct intel_atomic_state *state,
5439 struct intel_crtc *crtc)
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005440{
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005441 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5442 const struct intel_crtc_state *new_crtc_state =
5443 intel_atomic_get_new_crtc_state(state, crtc);
5444 unsigned int update_mask = new_crtc_state->update_planes;
5445 const struct intel_plane_state *old_plane_state;
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005446 struct intel_plane *plane;
5447 unsigned fb_bits = 0;
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005448 int i;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005449
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005450 intel_crtc_dpms_overlay_disable(crtc);
Maarten Lankhorst27321ae2015-04-21 17:12:52 +03005451
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005452 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
5453 if (crtc->pipe != plane->pipe ||
5454 !(update_mask & BIT(plane->id)))
5455 continue;
Ville Syrjäläf98551a2014-05-22 17:48:06 +03005456
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005457 plane->disable_plane(plane, new_crtc_state);
5458
5459 if (old_plane_state->base.visible)
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005460 fb_bits |= plane->frontbuffer_bit;
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005461 }
5462
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02005463 intel_frontbuffer_flip(dev_priv, fb_bits);
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005464}
5465
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005466static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005467 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005468 struct drm_atomic_state *old_state)
5469{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005470 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005471 struct drm_connector *conn;
5472 int i;
5473
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005474 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005475 struct intel_encoder *encoder =
5476 to_intel_encoder(conn_state->best_encoder);
5477
5478 if (conn_state->crtc != crtc)
5479 continue;
5480
5481 if (encoder->pre_pll_enable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005482 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005483 }
5484}
5485
5486static void intel_encoders_pre_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005487 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005488 struct drm_atomic_state *old_state)
5489{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005490 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005491 struct drm_connector *conn;
5492 int i;
5493
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005494 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005495 struct intel_encoder *encoder =
5496 to_intel_encoder(conn_state->best_encoder);
5497
5498 if (conn_state->crtc != crtc)
5499 continue;
5500
5501 if (encoder->pre_enable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005502 encoder->pre_enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005503 }
5504}
5505
5506static void intel_encoders_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005507 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005508 struct drm_atomic_state *old_state)
5509{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005510 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005511 struct drm_connector *conn;
5512 int i;
5513
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005514 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005515 struct intel_encoder *encoder =
5516 to_intel_encoder(conn_state->best_encoder);
5517
5518 if (conn_state->crtc != crtc)
5519 continue;
5520
Jani Nikulac84c6fe2018-10-16 15:41:34 +03005521 if (encoder->enable)
5522 encoder->enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005523 intel_opregion_notify_encoder(encoder, true);
5524 }
5525}
5526
5527static void intel_encoders_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005528 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005529 struct drm_atomic_state *old_state)
5530{
5531 struct drm_connector_state *old_conn_state;
5532 struct drm_connector *conn;
5533 int i;
5534
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005535 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005536 struct intel_encoder *encoder =
5537 to_intel_encoder(old_conn_state->best_encoder);
5538
5539 if (old_conn_state->crtc != crtc)
5540 continue;
5541
5542 intel_opregion_notify_encoder(encoder, false);
Jani Nikulac84c6fe2018-10-16 15:41:34 +03005543 if (encoder->disable)
5544 encoder->disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005545 }
5546}
5547
5548static void intel_encoders_post_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005549 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005550 struct drm_atomic_state *old_state)
5551{
5552 struct drm_connector_state *old_conn_state;
5553 struct drm_connector *conn;
5554 int i;
5555
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005556 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005557 struct intel_encoder *encoder =
5558 to_intel_encoder(old_conn_state->best_encoder);
5559
5560 if (old_conn_state->crtc != crtc)
5561 continue;
5562
5563 if (encoder->post_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005564 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005565 }
5566}
5567
5568static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005569 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005570 struct drm_atomic_state *old_state)
5571{
5572 struct drm_connector_state *old_conn_state;
5573 struct drm_connector *conn;
5574 int i;
5575
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005576 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005577 struct intel_encoder *encoder =
5578 to_intel_encoder(old_conn_state->best_encoder);
5579
5580 if (old_conn_state->crtc != crtc)
5581 continue;
5582
5583 if (encoder->post_pll_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005584 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005585 }
5586}
5587
Hans de Goede608ed4a2018-12-20 14:21:18 +01005588static void intel_encoders_update_pipe(struct drm_crtc *crtc,
5589 struct intel_crtc_state *crtc_state,
5590 struct drm_atomic_state *old_state)
5591{
5592 struct drm_connector_state *conn_state;
5593 struct drm_connector *conn;
5594 int i;
5595
5596 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5597 struct intel_encoder *encoder =
5598 to_intel_encoder(conn_state->best_encoder);
5599
5600 if (conn_state->crtc != crtc)
5601 continue;
5602
5603 if (encoder->update_pipe)
5604 encoder->update_pipe(encoder, crtc_state, conn_state);
5605 }
5606}
5607
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005608static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5609 struct drm_atomic_state *old_state)
Jesse Barnesf67a5592011-01-05 10:31:48 -08005610{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005611 struct drm_crtc *crtc = pipe_config->base.crtc;
Jesse Barnesf67a5592011-01-05 10:31:48 -08005612 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005613 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005614 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5615 int pipe = intel_crtc->pipe;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005616 struct intel_atomic_state *old_intel_state =
5617 to_intel_atomic_state(old_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005618
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005619 if (WARN_ON(intel_crtc->active))
Jesse Barnesf67a5592011-01-05 10:31:48 -08005620 return;
5621
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005622 /*
5623 * Sometimes spurious CPU pipe underruns happen during FDI
5624 * training, at least with VGA+HDMI cloning. Suppress them.
5625 *
5626 * On ILK we get an occasional spurious CPU pipe underruns
5627 * between eDP port A enable and vdd enable. Also PCH port
5628 * enable seems to result in the occasional CPU pipe underrun.
5629 *
5630 * Spurious PCH underruns also occur during PCH enabling.
5631 */
Ville Syrjälä2b5b6312018-05-24 22:04:06 +03005632 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5633 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005634
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02005635 if (pipe_config->has_pch_encoder)
5636 intel_prepare_shared_dpll(pipe_config);
Daniel Vetterb14b1052014-04-24 23:55:13 +02005637
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005638 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005639 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005640
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005641 intel_set_pipe_timings(pipe_config);
5642 intel_set_pipe_src_size(pipe_config);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005643
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005644 if (pipe_config->has_pch_encoder) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005645 intel_cpu_transcoder_set_m_n(pipe_config,
5646 &pipe_config->fdi_m_n, NULL);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005647 }
5648
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005649 ironlake_set_pipeconf(pipe_config);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005650
Jesse Barnesf67a5592011-01-05 10:31:48 -08005651 intel_crtc->active = true;
Paulo Zanoni86642812013-04-12 17:57:57 -03005652
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005653 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005654
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005655 if (pipe_config->has_pch_encoder) {
Daniel Vetterfff367c2012-10-27 15:50:28 +02005656 /* Note: FDI PLL enabling _must_ be done before we enable the
5657 * cpu pipes, hence this is separate from all the other fdi/pch
5658 * enabling. */
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02005659 ironlake_fdi_pll_enable(pipe_config);
Daniel Vetter46b6f812012-09-06 22:08:33 +02005660 } else {
5661 assert_fdi_tx_disabled(dev_priv, pipe);
5662 assert_fdi_rx_disabled(dev_priv, pipe);
5663 }
Jesse Barnesf67a5592011-01-05 10:31:48 -08005664
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005665 ironlake_pfit_enable(pipe_config);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005666
Jesse Barnes9c54c0d2011-06-15 23:32:33 +02005667 /*
5668 * On ILK+ LUT must be loaded before the pipe is running but with
5669 * clocks enabled
5670 */
Matt Roper302da0c2018-12-10 13:54:15 -08005671 intel_color_load_luts(pipe_config);
Jesse Barnes9c54c0d2011-06-15 23:32:33 +02005672
Imre Deak1d5bf5d2016-02-29 22:10:33 +02005673 if (dev_priv->display.initial_watermarks != NULL)
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005674 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
Ville Syrjälä4972f702017-11-29 17:37:32 +02005675 intel_enable_pipe(pipe_config);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005676
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005677 if (pipe_config->has_pch_encoder)
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03005678 ironlake_pch_enable(old_intel_state, pipe_config);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005679
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005680 assert_vblank_disabled(crtc);
5681 drm_crtc_vblank_on(crtc);
5682
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005683 intel_encoders_enable(crtc, pipe_config, old_state);
Daniel Vetter61b77dd2012-07-02 00:16:19 +02005684
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01005685 if (HAS_PCH_CPT(dev_priv))
Daniel Vettera1520312013-05-03 11:49:50 +02005686 cpt_verify_modeset(dev, intel_crtc->pipe);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005687
Ville Syrjäläea80a662018-05-24 22:04:05 +03005688 /*
5689 * Must wait for vblank to avoid spurious PCH FIFO underruns.
5690 * And a second vblank wait is needed at least on ILK with
5691 * some interlaced HDMI modes. Let's do the double wait always
5692 * in case there are more corner cases we don't know about.
5693 */
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005694 if (pipe_config->has_pch_encoder) {
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005695 intel_wait_for_vblank(dev_priv, pipe);
Ville Syrjäläea80a662018-05-24 22:04:05 +03005696 intel_wait_for_vblank(dev_priv, pipe);
5697 }
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005698 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005699 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005700}
5701
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005702/* IPS only exists on ULT machines and is tied to pipe A. */
5703static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5704{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01005705 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005706}
5707
Imre Deaked69cd42017-10-02 10:55:57 +03005708static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5709 enum pipe pipe, bool apply)
5710{
5711 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5712 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5713
5714 if (apply)
5715 val |= mask;
5716 else
5717 val &= ~mask;
5718
5719 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5720}
5721
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005722static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5723{
5724 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5725 enum pipe pipe = crtc->pipe;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02005726 u32 val;
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005727
Rodrigo Vivi443d5e32018-10-04 08:18:14 -07005728 val = MBUS_DBOX_A_CREDIT(2);
5729 val |= MBUS_DBOX_BW_CREDIT(1);
5730 val |= MBUS_DBOX_B_CREDIT(8);
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005731
5732 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5733}
5734
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005735static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5736 struct drm_atomic_state *old_state)
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005737{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005738 struct drm_crtc *crtc = pipe_config->base.crtc;
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005739 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005740 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005741 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005742 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005743 struct intel_atomic_state *old_intel_state =
5744 to_intel_atomic_state(old_state);
Imre Deaked69cd42017-10-02 10:55:57 +03005745 bool psl_clkgate_wa;
Vandita Kulkarnie16a3752018-06-21 20:43:56 +05305746 u32 pipe_chicken;
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005747
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005748 if (WARN_ON(intel_crtc->active))
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005749 return;
5750
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005751 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
Imre Deak95a7a2a2016-06-13 16:44:35 +03005752
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02005753 if (pipe_config->shared_dpll)
5754 intel_enable_shared_dpll(pipe_config);
Daniel Vetterdf8ad702014-06-25 22:02:03 +03005755
Paulo Zanonic8af5272018-05-02 14:58:51 -07005756 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5757
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005758 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005759 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter229fca92014-04-24 23:55:09 +02005760
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005761 if (!transcoder_is_dsi(cpu_transcoder))
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005762 intel_set_pipe_timings(pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005763
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005764 intel_set_pipe_src_size(pipe_config);
Daniel Vetter229fca92014-04-24 23:55:09 +02005765
Jani Nikula4d1de972016-03-18 17:05:42 +02005766 if (cpu_transcoder != TRANSCODER_EDP &&
5767 !transcoder_is_dsi(cpu_transcoder)) {
5768 I915_WRITE(PIPE_MULT(cpu_transcoder),
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005769 pipe_config->pixel_multiplier - 1);
Clint Taylorebb69c92014-09-30 10:30:22 -07005770 }
5771
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005772 if (pipe_config->has_pch_encoder) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005773 intel_cpu_transcoder_set_m_n(pipe_config,
5774 &pipe_config->fdi_m_n, NULL);
Daniel Vetter229fca92014-04-24 23:55:09 +02005775 }
5776
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005777 if (!transcoder_is_dsi(cpu_transcoder))
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005778 haswell_set_pipeconf(pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005779
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005780 haswell_set_pipemisc(pipe_config);
Daniel Vetter229fca92014-04-24 23:55:09 +02005781
Matt Roper302da0c2018-12-10 13:54:15 -08005782 intel_color_set_csc(pipe_config);
Daniel Vetter229fca92014-04-24 23:55:09 +02005783
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005784 intel_crtc->active = true;
Paulo Zanoni86642812013-04-12 17:57:57 -03005785
Imre Deaked69cd42017-10-02 10:55:57 +03005786 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5787 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005788 pipe_config->pch_pfit.enabled;
Imre Deaked69cd42017-10-02 10:55:57 +03005789 if (psl_clkgate_wa)
5790 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5791
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005792 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005793 skylake_pfit_enable(pipe_config);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08005794 else
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005795 ironlake_pfit_enable(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005796
5797 /*
5798 * On ILK+ LUT must be loaded before the pipe is running but with
5799 * clocks enabled
5800 */
Matt Roper302da0c2018-12-10 13:54:15 -08005801 intel_color_load_luts(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005802
Vandita Kulkarnie16a3752018-06-21 20:43:56 +05305803 /*
5804 * Display WA #1153: enable hardware to bypass the alpha math
5805 * and rounding for per-pixel values 00 and 0xff
5806 */
5807 if (INTEL_GEN(dev_priv) >= 11) {
5808 pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
5809 if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
5810 I915_WRITE_FW(PIPE_CHICKEN(pipe),
5811 pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
5812 }
5813
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005814 intel_ddi_set_pipe_settings(pipe_config);
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005815 if (!transcoder_is_dsi(cpu_transcoder))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005816 intel_ddi_enable_transcoder_func(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005817
Imre Deak1d5bf5d2016-02-29 22:10:33 +02005818 if (dev_priv->display.initial_watermarks != NULL)
Ville Syrjälä3125d392016-11-28 19:37:03 +02005819 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005820
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005821 if (INTEL_GEN(dev_priv) >= 11)
5822 icl_pipe_mbus_enable(intel_crtc);
5823
Jani Nikula4d1de972016-03-18 17:05:42 +02005824 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005825 if (!transcoder_is_dsi(cpu_transcoder))
Ville Syrjälä4972f702017-11-29 17:37:32 +02005826 intel_enable_pipe(pipe_config);
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005827
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005828 if (pipe_config->has_pch_encoder)
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03005829 lpt_pch_enable(old_intel_state, pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005830
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005831 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005832 intel_ddi_set_vc_payload_alloc(pipe_config, true);
Dave Airlie0e32b392014-05-02 14:02:48 +10005833
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005834 assert_vblank_disabled(crtc);
5835 drm_crtc_vblank_on(crtc);
5836
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005837 intel_encoders_enable(crtc, pipe_config, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005838
Imre Deaked69cd42017-10-02 10:55:57 +03005839 if (psl_clkgate_wa) {
5840 intel_wait_for_vblank(dev_priv, pipe);
5841 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5842 }
5843
Paulo Zanonie4916942013-09-20 16:21:19 -03005844 /* If we change the relative order between pipe/planes enabling, we need
5845 * to change the workaround. */
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005846 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01005847 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005848 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5849 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005850 }
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005851}
5852
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005853static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005854{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005855 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5856 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5857 enum pipe pipe = crtc->pipe;
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005858
5859 /* To avoid upsetting the power well on haswell only disable the pfit if
5860 * it's in use. The hw state code will make sure we get this right. */
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005861 if (old_crtc_state->pch_pfit.enabled) {
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005862 I915_WRITE(PF_CTL(pipe), 0);
5863 I915_WRITE(PF_WIN_POS(pipe), 0);
5864 I915_WRITE(PF_WIN_SZ(pipe), 0);
5865 }
5866}
5867
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005868static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5869 struct drm_atomic_state *old_state)
Jesse Barnes6be4a602010-09-10 10:26:01 -07005870{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005871 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Jesse Barnes6be4a602010-09-10 10:26:01 -07005872 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005873 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005874 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5875 int pipe = intel_crtc->pipe;
Jesse Barnes6be4a602010-09-10 10:26:01 -07005876
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005877 /*
5878 * Sometimes spurious CPU pipe underruns happen when the
5879 * pipe is already disabled, but FDI RX/TX is still enabled.
5880 * Happens at least with VGA+HDMI cloning. Suppress them.
5881 */
Ville Syrjälä2b5b6312018-05-24 22:04:06 +03005882 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5883 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005884
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005885 intel_encoders_disable(crtc, old_crtc_state, old_state);
Daniel Vetterea9d7582012-07-10 10:42:52 +02005886
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005887 drm_crtc_vblank_off(crtc);
5888 assert_vblank_disabled(crtc);
5889
Ville Syrjälä4972f702017-11-29 17:37:32 +02005890 intel_disable_pipe(old_crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005891
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005892 ironlake_pfit_disable(old_crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005893
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005894 if (old_crtc_state->has_pch_encoder)
Ville Syrjälä5a74f702015-05-05 17:17:38 +03005895 ironlake_fdi_disable(crtc);
5896
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005897 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005898
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005899 if (old_crtc_state->has_pch_encoder) {
Daniel Vetterd925c592013-06-05 13:34:04 +02005900 ironlake_disable_pch_transcoder(dev_priv, pipe);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005901
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01005902 if (HAS_PCH_CPT(dev_priv)) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005903 i915_reg_t reg;
5904 u32 temp;
5905
Daniel Vetterd925c592013-06-05 13:34:04 +02005906 /* disable TRANS_DP_CTL */
5907 reg = TRANS_DP_CTL(pipe);
5908 temp = I915_READ(reg);
5909 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5910 TRANS_DP_PORT_SEL_MASK);
5911 temp |= TRANS_DP_PORT_SEL_NONE;
5912 I915_WRITE(reg, temp);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005913
Daniel Vetterd925c592013-06-05 13:34:04 +02005914 /* disable DPLL_SEL */
5915 temp = I915_READ(PCH_DPLL_SEL);
Daniel Vetter11887392013-06-05 13:34:09 +02005916 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
Daniel Vetterd925c592013-06-05 13:34:04 +02005917 I915_WRITE(PCH_DPLL_SEL, temp);
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08005918 }
Daniel Vetterd925c592013-06-05 13:34:04 +02005919
Daniel Vetterd925c592013-06-05 13:34:04 +02005920 ironlake_fdi_pll_disable(intel_crtc);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005921 }
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005922
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005923 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005924 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005925}
5926
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005927static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5928 struct drm_atomic_state *old_state)
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005929{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005930 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005931 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005932 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Imre Deak24a28172018-06-13 20:07:06 +03005933 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005934
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005935 intel_encoders_disable(crtc, old_crtc_state, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005936
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005937 drm_crtc_vblank_off(crtc);
5938 assert_vblank_disabled(crtc);
5939
Jani Nikula4d1de972016-03-18 17:05:42 +02005940 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005941 if (!transcoder_is_dsi(cpu_transcoder))
Ville Syrjälä4972f702017-11-29 17:37:32 +02005942 intel_disable_pipe(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005943
Imre Deak24a28172018-06-13 20:07:06 +03005944 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
5945 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
Ville Syrjäläa4bf2142014-08-18 21:27:34 +03005946
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005947 if (!transcoder_is_dsi(cpu_transcoder))
Clint Taylor90c3e212018-07-10 13:02:05 -07005948 intel_ddi_disable_transcoder_func(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005949
Manasi Navarea6006222018-11-28 12:26:23 -08005950 intel_dsc_disable(old_crtc_state);
5951
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005952 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02005953 skylake_scaler_disable(intel_crtc);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08005954 else
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005955 ironlake_pfit_disable(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005956
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005957 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Paulo Zanonic27e9172018-04-27 16:14:36 -07005958
Imre Deakbdaa29b2018-11-01 16:04:24 +02005959 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005960}
5961
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005962static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnes2dd24552013-04-25 12:55:01 -07005963{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005964 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5965 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnes2dd24552013-04-25 12:55:01 -07005966
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005967 if (!crtc_state->gmch_pfit.control)
Jesse Barnes2dd24552013-04-25 12:55:01 -07005968 return;
5969
Daniel Vetterc0b03412013-05-28 12:05:54 +02005970 /*
5971 * The panel fitter should only be adjusted whilst the pipe is disabled,
5972 * according to register description and PRM.
5973 */
Jesse Barnes2dd24552013-04-25 12:55:01 -07005974 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5975 assert_pipe_disabled(dev_priv, crtc->pipe);
5976
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005977 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
5978 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
Daniel Vetter5a80c452013-04-25 22:52:18 +02005979
5980 /* Border color in case we don't scale up to the full screen. Black by
5981 * default, change to something else for debugging. */
5982 I915_WRITE(BCLRPAT(crtc->pipe), 0);
Jesse Barnes2dd24552013-04-25 12:55:01 -07005983}
5984
Mahesh Kumar176597a2018-10-04 14:20:43 +05305985bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
5986{
5987 if (port == PORT_NONE)
5988 return false;
5989
5990 if (IS_ICELAKE(dev_priv))
5991 return port <= PORT_B;
5992
5993 return false;
5994}
5995
Paulo Zanoniac213c12018-05-21 17:25:37 -07005996bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
5997{
5998 if (IS_ICELAKE(dev_priv))
5999 return port >= PORT_C && port <= PORT_F;
6000
6001 return false;
6002}
6003
6004enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6005{
6006 if (!intel_port_is_tc(dev_priv, port))
6007 return PORT_TC_NONE;
6008
6009 return port - PORT_C;
6010}
6011
Ander Conselvan de Oliveira79f255a2017-02-22 08:34:27 +02006012enum intel_display_power_domain intel_port_to_power_domain(enum port port)
Dave Airlied05410f2014-06-05 13:22:59 +10006013{
6014 switch (port) {
6015 case PORT_A:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006016 return POWER_DOMAIN_PORT_DDI_A_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10006017 case PORT_B:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006018 return POWER_DOMAIN_PORT_DDI_B_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10006019 case PORT_C:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006020 return POWER_DOMAIN_PORT_DDI_C_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10006021 case PORT_D:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006022 return POWER_DOMAIN_PORT_DDI_D_LANES;
Xiong Zhangd8e19f92015-08-13 18:00:12 +08006023 case PORT_E:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01006024 return POWER_DOMAIN_PORT_DDI_E_LANES;
Rodrigo Vivi9787e832018-01-29 15:22:22 -08006025 case PORT_F:
6026 return POWER_DOMAIN_PORT_DDI_F_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10006027 default:
Imre Deakb9fec162015-11-18 15:57:25 +02006028 MISSING_CASE(port);
Dave Airlied05410f2014-06-05 13:22:59 +10006029 return POWER_DOMAIN_PORT_OTHER;
6030 }
6031}
6032
Imre Deak337837a2018-11-01 16:04:23 +02006033enum intel_display_power_domain
6034intel_aux_power_domain(struct intel_digital_port *dig_port)
6035{
6036 switch (dig_port->aux_ch) {
6037 case AUX_CH_A:
6038 return POWER_DOMAIN_AUX_A;
6039 case AUX_CH_B:
6040 return POWER_DOMAIN_AUX_B;
6041 case AUX_CH_C:
6042 return POWER_DOMAIN_AUX_C;
6043 case AUX_CH_D:
6044 return POWER_DOMAIN_AUX_D;
6045 case AUX_CH_E:
6046 return POWER_DOMAIN_AUX_E;
6047 case AUX_CH_F:
6048 return POWER_DOMAIN_AUX_F;
6049 default:
6050 MISSING_CASE(dig_port->aux_ch);
6051 return POWER_DOMAIN_AUX_A;
6052 }
6053}
6054
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006055static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6056 struct intel_crtc_state *crtc_state)
Imre Deak319be8a2014-03-04 19:22:57 +02006057{
6058 struct drm_device *dev = crtc->dev;
Maarten Lankhorst37255d82016-12-15 15:29:43 +01006059 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006060 struct drm_encoder *encoder;
Imre Deak319be8a2014-03-04 19:22:57 +02006061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6062 enum pipe pipe = intel_crtc->pipe;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006063 u64 mask;
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006064 enum transcoder transcoder = crtc_state->cpu_transcoder;
Imre Deak77d22dc2014-03-05 16:20:52 +02006065
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006066 if (!crtc_state->base.active)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006067 return 0;
6068
Imre Deak17bd6e62018-01-09 14:20:40 +02006069 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6070 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006071 if (crtc_state->pch_pfit.enabled ||
6072 crtc_state->pch_pfit.force_thru)
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006073 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
Imre Deak77d22dc2014-03-05 16:20:52 +02006074
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006075 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6076 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6077
Ander Conselvan de Oliveira79f255a2017-02-22 08:34:27 +02006078 mask |= BIT_ULL(intel_encoder->power_domain);
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006079 }
Imre Deak319be8a2014-03-04 19:22:57 +02006080
Maarten Lankhorst37255d82016-12-15 15:29:43 +01006081 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
Imre Deak17bd6e62018-01-09 14:20:40 +02006082 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
Maarten Lankhorst37255d82016-12-15 15:29:43 +01006083
Maarten Lankhorst15e7ec22016-03-14 09:27:54 +01006084 if (crtc_state->shared_dpll)
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006085 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
Maarten Lankhorst15e7ec22016-03-14 09:27:54 +01006086
Imre Deak77d22dc2014-03-05 16:20:52 +02006087 return mask;
6088}
6089
Ander Conselvan de Oliveirad2d15012017-02-13 16:57:33 +02006090static u64
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006091modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6092 struct intel_crtc_state *crtc_state)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006093{
Chris Wilsonfac5e232016-07-04 11:34:36 +01006094 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006095 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6096 enum intel_display_power_domain domain;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006097 u64 domains, new_domains, old_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006098
6099 old_domains = intel_crtc->enabled_power_domains;
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006100 intel_crtc->enabled_power_domains = new_domains =
6101 get_crtc_power_domains(crtc, crtc_state);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006102
Daniel Vetter5a21b662016-05-24 17:13:53 +02006103 domains = new_domains & ~old_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006104
6105 for_each_power_domain(domain, domains)
6106 intel_display_power_get(dev_priv, domain);
6107
Daniel Vetter5a21b662016-05-24 17:13:53 +02006108 return old_domains & ~new_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006109}
6110
6111static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006112 u64 domains)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006113{
6114 enum intel_display_power_domain domain;
6115
6116 for_each_power_domain(domain, domains)
Chris Wilson0e6e0be2019-01-14 14:21:24 +00006117 intel_display_power_put_unchecked(dev_priv, domain);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006118}
6119
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006120static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6121 struct drm_atomic_state *old_state)
Jesse Barnes89b667f2013-04-18 14:51:36 -07006122{
Ville Syrjäläff32c542017-03-02 19:14:57 +02006123 struct intel_atomic_state *old_intel_state =
6124 to_intel_atomic_state(old_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006125 struct drm_crtc *crtc = pipe_config->base.crtc;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006126 struct drm_device *dev = crtc->dev;
Daniel Vettera72e4c92014-09-30 10:56:47 +02006127 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006128 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006129 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006130
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02006131 if (WARN_ON(intel_crtc->active))
Jesse Barnes89b667f2013-04-18 14:51:36 -07006132 return;
6133
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006134 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006135 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006136
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02006137 intel_set_pipe_timings(pipe_config);
6138 intel_set_pipe_src_size(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006139
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006140 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
Ville Syrjäläc14b0482014-10-16 20:52:34 +03006141 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6142 I915_WRITE(CHV_CANVAS(pipe), 0);
6143 }
6144
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02006145 i9xx_set_pipeconf(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006146
Matt Roper302da0c2018-12-10 13:54:15 -08006147 intel_color_set_csc(pipe_config);
P Raviraj Sitaramc59d2da2018-09-10 19:57:14 +05306148
Jesse Barnes89b667f2013-04-18 14:51:36 -07006149 intel_crtc->active = true;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006150
Daniel Vettera72e4c92014-09-30 10:56:47 +02006151 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä4a3436e2014-05-16 19:40:25 +03006152
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006153 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006154
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006155 if (IS_CHERRYVIEW(dev_priv)) {
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006156 chv_prepare_pll(intel_crtc, pipe_config);
6157 chv_enable_pll(intel_crtc, pipe_config);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006158 } else {
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006159 vlv_prepare_pll(intel_crtc, pipe_config);
6160 vlv_enable_pll(intel_crtc, pipe_config);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006161 }
Jesse Barnes89b667f2013-04-18 14:51:36 -07006162
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006163 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006164
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006165 i9xx_pfit_enable(pipe_config);
Jesse Barnes2dd24552013-04-25 12:55:01 -07006166
Matt Roper302da0c2018-12-10 13:54:15 -08006167 intel_color_load_luts(pipe_config);
Ville Syrjälä63cbb072013-06-04 13:48:59 +03006168
Ville Syrjäläff32c542017-03-02 19:14:57 +02006169 dev_priv->display.initial_watermarks(old_intel_state,
6170 pipe_config);
Ville Syrjälä4972f702017-11-29 17:37:32 +02006171 intel_enable_pipe(pipe_config);
Daniel Vetterbe6a6f82014-04-15 18:41:22 +02006172
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006173 assert_vblank_disabled(crtc);
6174 drm_crtc_vblank_on(crtc);
6175
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006176 intel_encoders_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006177}
6178
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006179static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006180{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006181 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6182 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006183
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006184 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6185 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006186}
6187
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006188static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6189 struct drm_atomic_state *old_state)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006190{
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006191 struct intel_atomic_state *old_intel_state =
6192 to_intel_atomic_state(old_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006193 struct drm_crtc *crtc = pipe_config->base.crtc;
Zhenyu Wang2c072452009-06-05 15:38:42 +08006194 struct drm_device *dev = crtc->dev;
Daniel Vettera72e4c92014-09-30 10:56:47 +02006195 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08006196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006197 enum pipe pipe = intel_crtc->pipe;
Jesse Barnes79e53942008-11-07 14:24:08 -08006198
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02006199 if (WARN_ON(intel_crtc->active))
Chris Wilsonf7abfe82010-09-13 14:19:16 +01006200 return;
6201
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006202 i9xx_set_pll_dividers(pipe_config);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006203
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006204 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006205 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006206
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02006207 intel_set_pipe_timings(pipe_config);
6208 intel_set_pipe_src_size(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006209
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02006210 i9xx_set_pipeconf(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006211
Chris Wilsonf7abfe82010-09-13 14:19:16 +01006212 intel_crtc->active = true;
Chris Wilson6b383a72010-09-13 13:54:26 +01006213
Lucas De Marchicf819ef2018-12-12 10:10:43 -08006214 if (!IS_GEN(dev_priv, 2))
Daniel Vettera72e4c92014-09-30 10:56:47 +02006215 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä4a3436e2014-05-16 19:40:25 +03006216
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006217 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Mika Kuoppala9d6d9f12013-02-08 16:35:38 +02006218
Ville Syrjälä939994d2017-09-13 17:08:56 +03006219 i9xx_enable_pll(intel_crtc, pipe_config);
Daniel Vetterf6736a12013-06-05 13:34:30 +02006220
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006221 i9xx_pfit_enable(pipe_config);
Jesse Barnes2dd24552013-04-25 12:55:01 -07006222
Matt Roper302da0c2018-12-10 13:54:15 -08006223 intel_color_load_luts(pipe_config);
Ville Syrjälä63cbb072013-06-04 13:48:59 +03006224
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006225 if (dev_priv->display.initial_watermarks != NULL)
6226 dev_priv->display.initial_watermarks(old_intel_state,
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006227 pipe_config);
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006228 else
6229 intel_update_watermarks(intel_crtc);
Ville Syrjälä4972f702017-11-29 17:37:32 +02006230 intel_enable_pipe(pipe_config);
Daniel Vetterbe6a6f82014-04-15 18:41:22 +02006231
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006232 assert_vblank_disabled(crtc);
6233 drm_crtc_vblank_on(crtc);
6234
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006235 intel_encoders_enable(crtc, pipe_config, old_state);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006236}
6237
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006238static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
Daniel Vetter87476d62013-04-11 16:29:06 +02006239{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006240 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6241 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter328d8e82013-05-08 10:36:31 +02006242
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006243 if (!old_crtc_state->gmch_pfit.control)
Daniel Vetter328d8e82013-05-08 10:36:31 +02006244 return;
Daniel Vetter87476d62013-04-11 16:29:06 +02006245
6246 assert_pipe_disabled(dev_priv, crtc->pipe);
6247
Chris Wilson43031782018-09-13 14:16:26 +01006248 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6249 I915_READ(PFIT_CONTROL));
Daniel Vetter328d8e82013-05-08 10:36:31 +02006250 I915_WRITE(PFIT_CONTROL, 0);
Daniel Vetter87476d62013-04-11 16:29:06 +02006251}
6252
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006253static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6254 struct drm_atomic_state *old_state)
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006255{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006256 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006257 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006258 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006259 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6260 int pipe = intel_crtc->pipe;
Daniel Vetteref9c3ae2012-06-29 22:40:09 +02006261
Ville Syrjälä6304cd92014-04-25 13:30:12 +03006262 /*
6263 * On gen2 planes are double buffered but the pipe isn't, so we must
6264 * wait for planes to fully turn off before disabling the pipe.
6265 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08006266 if (IS_GEN(dev_priv, 2))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02006267 intel_wait_for_vblank(dev_priv, pipe);
Ville Syrjälä6304cd92014-04-25 13:30:12 +03006268
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006269 intel_encoders_disable(crtc, old_crtc_state, old_state);
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006270
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01006271 drm_crtc_vblank_off(crtc);
6272 assert_vblank_disabled(crtc);
6273
Ville Syrjälä4972f702017-11-29 17:37:32 +02006274 intel_disable_pipe(old_crtc_state);
Mika Kuoppala24a1f162013-02-08 16:35:37 +02006275
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006276 i9xx_pfit_disable(old_crtc_state);
Mika Kuoppala24a1f162013-02-08 16:35:37 +02006277
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006278 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006279
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006280 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006281 if (IS_CHERRYVIEW(dev_priv))
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006282 chv_disable_pll(dev_priv, pipe);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01006283 else if (IS_VALLEYVIEW(dev_priv))
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006284 vlv_disable_pll(dev_priv, pipe);
6285 else
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006286 i9xx_disable_pll(old_crtc_state);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006287 }
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006288
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006289 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03006290
Lucas De Marchicf819ef2018-12-12 10:10:43 -08006291 if (!IS_GEN(dev_priv, 2))
Daniel Vettera72e4c92014-09-30 10:56:47 +02006292 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjäläff32c542017-03-02 19:14:57 +02006293
6294 if (!dev_priv->display.initial_watermarks)
6295 intel_update_watermarks(intel_crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03006296
6297 /* clock the pipe down to 640x480@60 to potentially save power */
6298 if (IS_I830(dev_priv))
6299 i830_enable_pipe(dev_priv, pipe);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006300}
6301
Ville Syrjäläda1d0e22017-06-01 17:36:14 +03006302static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6303 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnesee7b9f92012-04-20 17:11:53 +01006304{
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006305 struct intel_encoder *encoder;
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006306 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006307 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006308 enum intel_display_power_domain domain;
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006309 struct intel_plane *plane;
Ander Conselvan de Oliveirad2d15012017-02-13 16:57:33 +02006310 u64 domains;
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006311 struct drm_atomic_state *state;
6312 struct intel_crtc_state *crtc_state;
6313 int ret;
Daniel Vetter976f8a22012-07-08 22:34:21 +02006314
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006315 if (!intel_crtc->active)
6316 return;
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006317
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006318 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6319 const struct intel_plane_state *plane_state =
6320 to_intel_plane_state(plane->base.state);
Maarten Lankhorst54a419612015-11-23 10:25:28 +01006321
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006322 if (plane_state->base.visible)
6323 intel_plane_disable_noatomic(intel_crtc, plane);
Maarten Lankhorsta5392052015-06-15 12:33:52 +02006324 }
6325
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006326 state = drm_atomic_state_alloc(crtc->dev);
Ander Conselvan de Oliveira31bb2ef2017-01-20 16:28:45 +02006327 if (!state) {
6328 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6329 crtc->base.id, crtc->name);
6330 return;
6331 }
6332
Ville Syrjäläda1d0e22017-06-01 17:36:14 +03006333 state->acquire_ctx = ctx;
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006334
6335 /* Everything's already locked, -EDEADLK can't happen. */
6336 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6337 ret = drm_atomic_add_affected_connectors(state, crtc);
6338
6339 WARN_ON(IS_ERR(crtc_state) || ret);
6340
6341 dev_priv->display.crtc_disable(crtc_state, state);
6342
Chris Wilson08536952016-10-14 13:18:18 +01006343 drm_atomic_state_put(state);
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006344
Ville Syrjälä78108b72016-05-27 20:59:19 +03006345 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6346 crtc->base.id, crtc->name);
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006347
6348 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6349 crtc->state->active = false;
Matt Roper37d90782015-09-24 15:53:06 -07006350 intel_crtc->active = false;
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006351 crtc->enabled = false;
6352 crtc->state->connector_mask = 0;
6353 crtc->state->encoder_mask = 0;
6354
6355 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6356 encoder->base.crtc = NULL;
6357
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -02006358 intel_fbc_disable(intel_crtc);
Ville Syrjälä432081b2016-10-31 22:37:03 +02006359 intel_update_watermarks(intel_crtc);
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02006360 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006361
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006362 domains = intel_crtc->enabled_power_domains;
6363 for_each_power_domain(domain, domains)
Chris Wilson0e6e0be2019-01-14 14:21:24 +00006364 intel_display_power_put_unchecked(dev_priv, domain);
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006365 intel_crtc->enabled_power_domains = 0;
Maarten Lankhorst565602d2015-12-10 12:33:57 +01006366
6367 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
Ville Syrjäläd305e062017-08-30 21:57:03 +03006368 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
Ville Syrjälä53e9bf52017-10-24 12:52:14 +03006369 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006370}
6371
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006372/*
6373 * turn all crtc's off, but do not adjust state
6374 * This has to be paired with a call to intel_modeset_setup_hw_state.
6375 */
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006376int intel_display_suspend(struct drm_device *dev)
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006377{
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006378 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006379 struct drm_atomic_state *state;
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006380 int ret;
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006381
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006382 state = drm_atomic_helper_suspend(dev);
6383 ret = PTR_ERR_OR_ZERO(state);
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006384 if (ret)
6385 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006386 else
6387 dev_priv->modeset_restore_state = state;
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006388 return ret;
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006389}
6390
Chris Wilsonea5b2132010-08-04 13:50:23 +01006391void intel_encoder_destroy(struct drm_encoder *encoder)
6392{
Chris Wilson4ef69c72010-09-09 15:14:28 +01006393 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
Chris Wilsonea5b2132010-08-04 13:50:23 +01006394
Chris Wilsonea5b2132010-08-04 13:50:23 +01006395 drm_encoder_cleanup(encoder);
6396 kfree(intel_encoder);
6397}
6398
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006399/* Cross check the actual hw state with our own modeset state tracking (and it's
6400 * internal consistency). */
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006401static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6402 struct drm_connector_state *conn_state)
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006403{
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006404 struct intel_connector *connector = to_intel_connector(conn_state->connector);
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006405
6406 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6407 connector->base.base.id,
6408 connector->base.name);
6409
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006410 if (connector->get_hw_state(connector)) {
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006411 struct intel_encoder *encoder = connector->encoder;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006412
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006413 I915_STATE_WARN(!crtc_state,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006414 "connector enabled without attached crtc\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006415
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006416 if (!crtc_state)
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006417 return;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006418
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006419 I915_STATE_WARN(!crtc_state->active,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006420 "connector is active, but attached crtc isn't\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006421
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006422 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006423 return;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006424
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006425 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006426 "atomic encoder doesn't match attached encoder\n");
Dave Airlie36cd7442014-05-02 13:44:18 +10006427
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006428 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006429 "attached encoder crtc differs from connector crtc\n");
6430 } else {
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006431 I915_STATE_WARN(crtc_state && crtc_state->active,
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02006432 "attached crtc is active, but connector isn't\n");
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006433 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006434 "best encoder set without crtc!\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006435 }
6436}
6437
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006438static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
Ville Syrjäläd272ddf2015-03-11 18:52:31 +02006439{
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006440 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6441 return crtc_state->fdi_lanes;
Ville Syrjäläd272ddf2015-03-11 18:52:31 +02006442
6443 return 0;
6444}
6445
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006446static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006447 struct intel_crtc_state *pipe_config)
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006448{
Tvrtko Ursulin86527442016-10-13 11:03:00 +01006449 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006450 struct drm_atomic_state *state = pipe_config->base.state;
6451 struct intel_crtc *other_crtc;
6452 struct intel_crtc_state *other_crtc_state;
6453
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006454 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6455 pipe_name(pipe), pipe_config->fdi_lanes);
6456 if (pipe_config->fdi_lanes > 4) {
6457 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6458 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006459 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006460 }
6461
Tvrtko Ursulin86527442016-10-13 11:03:00 +01006462 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006463 if (pipe_config->fdi_lanes > 2) {
6464 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6465 pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006466 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006467 } else {
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006468 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006469 }
6470 }
6471
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +00006472 if (INTEL_INFO(dev_priv)->num_pipes == 2)
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006473 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006474
6475 /* Ivybridge 3 pipe is really complicated */
6476 switch (pipe) {
6477 case PIPE_A:
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006478 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006479 case PIPE_B:
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006480 if (pipe_config->fdi_lanes <= 2)
6481 return 0;
6482
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02006483 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006484 other_crtc_state =
6485 intel_atomic_get_crtc_state(state, other_crtc);
6486 if (IS_ERR(other_crtc_state))
6487 return PTR_ERR(other_crtc_state);
6488
6489 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006490 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6491 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006492 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006493 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006494 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006495 case PIPE_C:
Ville Syrjälä251cc672015-03-11 18:52:30 +02006496 if (pipe_config->fdi_lanes > 2) {
6497 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6498 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006499 return -EINVAL;
Ville Syrjälä251cc672015-03-11 18:52:30 +02006500 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006501
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02006502 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006503 other_crtc_state =
6504 intel_atomic_get_crtc_state(state, other_crtc);
6505 if (IS_ERR(other_crtc_state))
6506 return PTR_ERR(other_crtc_state);
6507
6508 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006509 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006510 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006511 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006512 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006513 default:
6514 BUG();
6515 }
6516}
6517
Daniel Vettere29c22c2013-02-21 00:00:16 +01006518#define RETRY 1
6519static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006520 struct intel_crtc_state *pipe_config)
Daniel Vetter877d48d2013-04-19 11:24:43 +02006521{
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006522 struct drm_device *dev = intel_crtc->base.dev;
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03006523 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006524 int lane, link_bw, fdi_dotclock, ret;
6525 bool needs_recompute = false;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006526
Daniel Vettere29c22c2013-02-21 00:00:16 +01006527retry:
Daniel Vetter877d48d2013-04-19 11:24:43 +02006528 /* FDI is a binary signal running at ~2.7GHz, encoding
6529 * each output octet as 10 bits. The actual frequency
6530 * is stored as a divider into a 100MHz clock, and the
6531 * mode pixel clock is stored in units of 1KHz.
6532 * Hence the bw of each lane in terms of the mode signal
6533 * is:
6534 */
Ville Syrjälä21a727b2016-02-17 21:41:10 +02006535 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
Daniel Vetter877d48d2013-04-19 11:24:43 +02006536
Damien Lespiau241bfc32013-09-25 16:45:37 +01006537 fdi_dotclock = adjusted_mode->crtc_clock;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006538
Daniel Vetter2bd89a02013-06-01 17:16:19 +02006539 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
Daniel Vetter877d48d2013-04-19 11:24:43 +02006540 pipe_config->pipe_bpp);
6541
6542 pipe_config->fdi_lanes = lane;
6543
Daniel Vetter2bd89a02013-06-01 17:16:19 +02006544 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006545 link_bw, &pipe_config->fdi_m_n, false);
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006546
Ville Syrjäläe3b247d2016-02-17 21:41:09 +02006547 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
Ville Syrjälä8e2b4df2018-11-07 23:35:20 +02006548 if (ret == -EDEADLK)
6549 return ret;
6550
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006551 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
Daniel Vettere29c22c2013-02-21 00:00:16 +01006552 pipe_config->pipe_bpp -= 2*3;
6553 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6554 pipe_config->pipe_bpp);
6555 needs_recompute = true;
6556 pipe_config->bw_constrained = true;
6557
6558 goto retry;
6559 }
6560
6561 if (needs_recompute)
6562 return RETRY;
6563
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006564 return ret;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006565}
6566
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006567bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006568{
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006569 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6570 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6571
6572 /* IPS only exists on ULT machines and is tied to pipe A. */
6573 if (!hsw_crtc_supports_ips(crtc))
Ville Syrjälä6e644622017-08-17 17:55:09 +03006574 return false;
6575
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006576 if (!i915_modparams.enable_ips)
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006577 return false;
6578
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006579 if (crtc_state->pipe_bpp > 24)
6580 return false;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006581
6582 /*
Ville Syrjäläb432e5c2015-06-03 15:45:13 +03006583 * We compare against max which means we must take
6584 * the increased cdclk requirement into account when
6585 * calculating the new cdclk.
6586 *
6587 * Should measure whether using a lower cdclk w/o IPS
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006588 */
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006589 if (IS_BROADWELL(dev_priv) &&
6590 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6591 return false;
6592
6593 return true;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006594}
6595
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006596static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
Paulo Zanoni42db64e2013-05-31 16:33:22 -03006597{
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006598 struct drm_i915_private *dev_priv =
6599 to_i915(crtc_state->base.crtc->dev);
6600 struct intel_atomic_state *intel_state =
6601 to_intel_atomic_state(crtc_state->base.state);
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006602
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006603 if (!hsw_crtc_state_ips_capable(crtc_state))
6604 return false;
6605
6606 if (crtc_state->ips_force_disable)
6607 return false;
6608
Maarten Lankhorstadbe5c52017-11-22 19:39:06 +01006609 /* IPS should be fine as long as at least one plane is enabled. */
6610 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006611 return false;
6612
6613 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6614 if (IS_BROADWELL(dev_priv) &&
6615 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6616 return false;
6617
6618 return true;
Paulo Zanoni42db64e2013-05-31 16:33:22 -03006619}
6620
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006621static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6622{
6623 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6624
6625 /* GDG double wide on either pipe, otherwise pipe A only */
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00006626 return INTEL_GEN(dev_priv) < 4 &&
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006627 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6628}
6629
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006630static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
Ville Syrjäläceb99322017-01-20 20:22:05 +02006631{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006632 u32 pixel_rate;
Ville Syrjäläceb99322017-01-20 20:22:05 +02006633
6634 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6635
6636 /*
6637 * We only use IF-ID interlacing. If we ever use
6638 * PF-ID we'll need to adjust the pixel_rate here.
6639 */
6640
6641 if (pipe_config->pch_pfit.enabled) {
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006642 u64 pipe_w, pipe_h, pfit_w, pfit_h;
6643 u32 pfit_size = pipe_config->pch_pfit.size;
Ville Syrjäläceb99322017-01-20 20:22:05 +02006644
6645 pipe_w = pipe_config->pipe_src_w;
6646 pipe_h = pipe_config->pipe_src_h;
6647
6648 pfit_w = (pfit_size >> 16) & 0xFFFF;
6649 pfit_h = pfit_size & 0xFFFF;
6650 if (pipe_w < pfit_w)
6651 pipe_w = pfit_w;
6652 if (pipe_h < pfit_h)
6653 pipe_h = pfit_h;
6654
6655 if (WARN_ON(!pfit_w || !pfit_h))
6656 return pixel_rate;
6657
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006658 pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
Ville Syrjäläceb99322017-01-20 20:22:05 +02006659 pfit_w * pfit_h);
6660 }
6661
6662 return pixel_rate;
6663}
6664
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +02006665static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6666{
6667 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6668
6669 if (HAS_GMCH_DISPLAY(dev_priv))
6670 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6671 crtc_state->pixel_rate =
6672 crtc_state->base.adjusted_mode.crtc_clock;
6673 else
6674 crtc_state->pixel_rate =
6675 ilk_pipe_pixel_rate(crtc_state);
6676}
6677
Daniel Vettera43f6e02013-06-07 23:10:32 +02006678static int intel_crtc_compute_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006679 struct intel_crtc_state *pipe_config)
Jesse Barnes79e53942008-11-07 14:24:08 -08006680{
Daniel Vettera43f6e02013-06-07 23:10:32 +02006681 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006682 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03006683 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Ville Syrjäläf3261152016-05-24 21:34:18 +03006684 int clock_limit = dev_priv->max_dotclk_freq;
Chris Wilson89749352010-09-12 18:25:19 +01006685
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006686 if (INTEL_GEN(dev_priv) < 4) {
Ville Syrjäläf3261152016-05-24 21:34:18 +03006687 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006688
6689 /*
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006690 * Enable double wide mode when the dot clock
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006691 * is > 90% of the (display) core speed.
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006692 */
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006693 if (intel_crtc_supports_double_wide(crtc) &&
6694 adjusted_mode->crtc_clock > clock_limit) {
Ville Syrjäläf3261152016-05-24 21:34:18 +03006695 clock_limit = dev_priv->max_dotclk_freq;
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006696 pipe_config->double_wide = true;
Ville Syrjäläad3a4472013-09-04 18:30:04 +03006697 }
Ville Syrjäläf3261152016-05-24 21:34:18 +03006698 }
Ville Syrjäläad3a4472013-09-04 18:30:04 +03006699
Ville Syrjäläf3261152016-05-24 21:34:18 +03006700 if (adjusted_mode->crtc_clock > clock_limit) {
6701 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6702 adjusted_mode->crtc_clock, clock_limit,
6703 yesno(pipe_config->double_wide));
6704 return -EINVAL;
Zhenyu Wang2c072452009-06-05 15:38:42 +08006705 }
Chris Wilson89749352010-09-12 18:25:19 +01006706
Shashank Sharma8c79f842018-10-12 11:53:09 +05306707 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6708 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6709 pipe_config->base.ctm) {
Shashank Sharma25edf912017-07-21 20:55:07 +05306710 /*
6711 * There is only one pipe CSC unit per pipe, and we need that
6712 * for output conversion from RGB->YCBCR. So if CTM is already
6713 * applied we can't support YCBCR420 output.
6714 */
6715 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6716 return -EINVAL;
6717 }
6718
Ville Syrjälä1d1d0e22013-09-04 18:30:05 +03006719 /*
6720 * Pipe horizontal size must be even in:
6721 * - DVO ganged mode
6722 * - LVDS dual channel mode
6723 * - Double wide pipe
6724 */
Ville Syrjälä0574bd82017-11-23 21:04:48 +02006725 if (pipe_config->pipe_src_w & 1) {
6726 if (pipe_config->double_wide) {
6727 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6728 return -EINVAL;
6729 }
6730
6731 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6732 intel_is_dual_link_lvds(dev)) {
6733 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6734 return -EINVAL;
6735 }
6736 }
Ville Syrjälä1d1d0e22013-09-04 18:30:05 +03006737
Damien Lespiau8693a822013-05-03 18:48:11 +01006738 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6739 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
Chris Wilson44f46b422012-06-21 13:19:59 +03006740 */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01006741 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
Ville Syrjäläaad941d2015-09-25 16:38:56 +03006742 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
Daniel Vettere29c22c2013-02-21 00:00:16 +01006743 return -EINVAL;
Chris Wilson44f46b422012-06-21 13:19:59 +03006744
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +02006745 intel_crtc_compute_pixel_rate(pipe_config);
6746
Daniel Vetter877d48d2013-04-19 11:24:43 +02006747 if (pipe_config->has_pch_encoder)
Daniel Vettera43f6e02013-06-07 23:10:32 +02006748 return ironlake_fdi_compute_config(crtc, pipe_config);
Daniel Vetter877d48d2013-04-19 11:24:43 +02006749
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +02006750 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08006751}
6752
Zhenyu Wang2c072452009-06-05 15:38:42 +08006753static void
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006754intel_reduce_m_n_ratio(u32 *num, u32 *den)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006755{
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006756 while (*num > DATA_LINK_M_N_MASK ||
6757 *den > DATA_LINK_M_N_MASK) {
Zhenyu Wang2c072452009-06-05 15:38:42 +08006758 *num >>= 1;
6759 *den >>= 1;
6760 }
6761}
6762
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006763static void compute_m_n(unsigned int m, unsigned int n,
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006764 u32 *ret_m, u32 *ret_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006765 bool constant_n)
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006766{
Jani Nikula9a86cda2017-03-27 14:33:25 +03006767 /*
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006768 * Several DP dongles in particular seem to be fussy about
6769 * too large link M/N values. Give N value as 0x8000 that
6770 * should be acceptable by specific devices. 0x8000 is the
6771 * specified fixed N value for asynchronous clock mode,
6772 * which the devices expect also in synchronous clock mode.
Jani Nikula9a86cda2017-03-27 14:33:25 +03006773 */
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006774 if (constant_n)
6775 *ret_n = 0x8000;
6776 else
6777 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
Jani Nikula9a86cda2017-03-27 14:33:25 +03006778
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006779 *ret_m = div_u64((u64)m * *ret_n, n);
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006780 intel_reduce_m_n_ratio(ret_m, ret_n);
6781}
6782
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006783void
Manasi Navarea4a15772018-11-28 13:36:21 -08006784intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006785 int pixel_clock, int link_clock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006786 struct intel_link_m_n *m_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006787 bool constant_n)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006788{
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006789 m_n->tu = 64;
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006790
6791 compute_m_n(bits_per_pixel * pixel_clock,
6792 link_clock * nlanes * 8,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006793 &m_n->gmch_m, &m_n->gmch_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006794 constant_n);
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006795
6796 compute_m_n(pixel_clock, link_clock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006797 &m_n->link_m, &m_n->link_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006798 constant_n);
Zhenyu Wang2c072452009-06-05 15:38:42 +08006799}
6800
Chris Wilsona7615032011-01-12 17:04:08 +00006801static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6802{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00006803 if (i915_modparams.panel_use_ssc >= 0)
6804 return i915_modparams.panel_use_ssc != 0;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03006805 return dev_priv->vbt.lvds_use_ssc
Keith Packard435793d2011-07-12 14:56:22 -07006806 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
Chris Wilsona7615032011-01-12 17:04:08 +00006807}
6808
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006809static u32 pnv_dpll_compute_fp(struct dpll *dpll)
Jesse Barnesc65d77d2011-12-15 12:30:36 -08006810{
Daniel Vetter7df00d72013-05-21 21:54:55 +02006811 return (1 << dpll->n) << 16 | dpll->m2;
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006812}
Daniel Vetterf47709a2013-03-28 10:42:02 +01006813
Jani Nikulaba3f4d02019-01-18 14:01:23 +02006814static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006815{
6816 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
Jesse Barnesc65d77d2011-12-15 12:30:36 -08006817}
6818
Daniel Vetterf47709a2013-03-28 10:42:02 +01006819static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006820 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03006821 struct dpll *reduced_clock)
Jesse Barnesa7516a02011-12-15 12:30:37 -08006822{
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02006823 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006824 u32 fp, fp2 = 0;
6825
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02006826 if (IS_PINEVIEW(dev_priv)) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006827 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006828 if (reduced_clock)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006829 fp2 = pnv_dpll_compute_fp(reduced_clock);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006830 } else {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006831 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006832 if (reduced_clock)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006833 fp2 = i9xx_dpll_compute_fp(reduced_clock);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006834 }
6835
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006836 crtc_state->dpll_hw_state.fp0 = fp;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006837
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03006838 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Rodrigo Viviab585de2015-03-24 12:40:09 -07006839 reduced_clock) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006840 crtc_state->dpll_hw_state.fp1 = fp2;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006841 } else {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006842 crtc_state->dpll_hw_state.fp1 = fp;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006843 }
6844}
6845
Chon Ming Lee5e69f972013-09-05 20:41:49 +08006846static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6847 pipe)
Jesse Barnes89b667f2013-04-18 14:51:36 -07006848{
6849 u32 reg_val;
6850
6851 /*
6852 * PLLB opamp always calibrates to max value of 0x3f, force enable it
6853 * and set it to a reasonable value instead.
6854 */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006855 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006856 reg_val &= 0xffffff00;
6857 reg_val |= 0x00000030;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006858 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006859
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006860 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
Imre Deaked585702017-05-10 12:21:47 +03006861 reg_val &= 0x00ffffff;
6862 reg_val |= 0x8c000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006863 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006864
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006865 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006866 reg_val &= 0xffffff00;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006867 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006868
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006869 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006870 reg_val &= 0x00ffffff;
6871 reg_val |= 0xb0000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006872 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006873}
6874
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006875static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6876 const struct intel_link_m_n *m_n)
Daniel Vetterb5518422013-05-03 11:49:48 +02006877{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006878 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6879 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6880 enum pipe pipe = crtc->pipe;
Daniel Vetterb5518422013-05-03 11:49:48 +02006881
Daniel Vettere3b95f12013-05-03 11:49:49 +02006882 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6883 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6884 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6885 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
Daniel Vetterb5518422013-05-03 11:49:48 +02006886}
6887
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006888static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
6889 enum transcoder transcoder)
6890{
6891 if (IS_HASWELL(dev_priv))
6892 return transcoder == TRANSCODER_EDP;
6893
6894 /*
6895 * Strictly speaking some registers are available before
6896 * gen7, but we only support DRRS on gen7+
6897 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08006898 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006899}
6900
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006901static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6902 const struct intel_link_m_n *m_n,
6903 const struct intel_link_m_n *m2_n2)
Daniel Vetterb5518422013-05-03 11:49:48 +02006904{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006905 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006906 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006907 enum pipe pipe = crtc->pipe;
6908 enum transcoder transcoder = crtc_state->cpu_transcoder;
Daniel Vetterb5518422013-05-03 11:49:48 +02006909
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006910 if (INTEL_GEN(dev_priv) >= 5) {
Daniel Vetterb5518422013-05-03 11:49:48 +02006911 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6912 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6913 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6914 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006915 /*
6916 * M2_N2 registers are set only if DRRS is supported
6917 * (to make sure the registers are not unnecessarily accessed).
Vandana Kannanf769cd22014-08-05 07:51:22 -07006918 */
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006919 if (m2_n2 && crtc_state->has_drrs &&
6920 transcoder_has_m2_n2(dev_priv, transcoder)) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07006921 I915_WRITE(PIPE_DATA_M2(transcoder),
6922 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6923 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6924 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6925 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6926 }
Daniel Vetterb5518422013-05-03 11:49:48 +02006927 } else {
Daniel Vettere3b95f12013-05-03 11:49:49 +02006928 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6929 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6930 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6931 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
Daniel Vetterb5518422013-05-03 11:49:48 +02006932 }
6933}
6934
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006935void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006936{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006937 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306938
6939 if (m_n == M1_N1) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006940 dp_m_n = &crtc_state->dp_m_n;
6941 dp_m2_n2 = &crtc_state->dp_m2_n2;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306942 } else if (m_n == M2_N2) {
6943
6944 /*
6945 * M2_N2 registers are not supported. Hence m2_n2 divider value
6946 * needs to be programmed into M1_N1.
6947 */
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006948 dp_m_n = &crtc_state->dp_m2_n2;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306949 } else {
6950 DRM_ERROR("Unsupported divider value\n");
6951 return;
6952 }
6953
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006954 if (crtc_state->has_pch_encoder)
6955 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006956 else
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006957 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006958}
6959
Daniel Vetter251ac862015-06-18 10:30:24 +02006960static void vlv_compute_dpll(struct intel_crtc *crtc,
6961 struct intel_crtc_state *pipe_config)
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006962{
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006963 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006964 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006965 if (crtc->pipe != PIPE_A)
6966 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006967
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006968 /* DPLL not used with DSI, but still need the rest set up */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006969 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006970 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6971 DPLL_EXT_BUFFER_ENABLE_VLV;
6972
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006973 pipe_config->dpll_hw_state.dpll_md =
6974 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6975}
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006976
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006977static void chv_compute_dpll(struct intel_crtc *crtc,
6978 struct intel_crtc_state *pipe_config)
6979{
6980 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006981 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006982 if (crtc->pipe != PIPE_A)
6983 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6984
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006985 /* DPLL not used with DSI, but still need the rest set up */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006986 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006987 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6988
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006989 pipe_config->dpll_hw_state.dpll_md =
6990 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006991}
6992
Ville Syrjäläd288f652014-10-28 13:20:22 +02006993static void vlv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006994 const struct intel_crtc_state *pipe_config)
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006995{
Daniel Vetterf47709a2013-03-28 10:42:02 +01006996 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006997 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006998 enum pipe pipe = crtc->pipe;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006999 u32 mdiv;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007000 u32 bestn, bestm1, bestm2, bestp1, bestp2;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02007001 u32 coreclk, reg_val;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007002
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03007003 /* Enable Refclk */
7004 I915_WRITE(DPLL(pipe),
7005 pipe_config->dpll_hw_state.dpll &
7006 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7007
7008 /* No need to actually set up the DPLL with DSI */
7009 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7010 return;
7011
Ville Syrjäläa5805162015-05-26 20:42:30 +03007012 mutex_lock(&dev_priv->sb_lock);
Daniel Vetter09153002012-12-12 14:06:44 +01007013
Ville Syrjäläd288f652014-10-28 13:20:22 +02007014 bestn = pipe_config->dpll.n;
7015 bestm1 = pipe_config->dpll.m1;
7016 bestm2 = pipe_config->dpll.m2;
7017 bestp1 = pipe_config->dpll.p1;
7018 bestp2 = pipe_config->dpll.p2;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007019
Jesse Barnes89b667f2013-04-18 14:51:36 -07007020 /* See eDP HDMI DPIO driver vbios notes doc */
7021
7022 /* PLL B needs special handling */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02007023 if (pipe == PIPE_B)
Chon Ming Lee5e69f972013-09-05 20:41:49 +08007024 vlv_pllb_recal_opamp(dev_priv, pipe);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007025
7026 /* Set up Tx target for periodic Rcomp update */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007027 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007028
7029 /* Disable target IRef on PLL */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007030 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
Jesse Barnes89b667f2013-04-18 14:51:36 -07007031 reg_val &= 0x00ffffff;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007032 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007033
7034 /* Disable fast lock */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007035 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007036
7037 /* Set idtafcrecal before PLL is enabled */
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007038 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7039 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7040 mdiv |= ((bestn << DPIO_N_SHIFT));
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007041 mdiv |= (1 << DPIO_K_SHIFT);
Jesse Barnes7df50802013-05-02 10:48:09 -07007042
7043 /*
7044 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7045 * but we don't support that).
7046 * Note: don't use the DAC post divider as it seems unstable.
7047 */
7048 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007049 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007050
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007051 mdiv |= DPIO_ENABLE_CALIBRATION;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007052 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007053
Jesse Barnes89b667f2013-04-18 14:51:36 -07007054 /* Set HBR and RBR LPF coefficients */
Ville Syrjäläd288f652014-10-28 13:20:22 +02007055 if (pipe_config->port_clock == 162000 ||
Maarten Lankhorst92d54b02018-10-11 12:04:50 +02007056 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7057 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007058 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
Ville Syrjälä885b01202013-07-05 19:21:38 +03007059 0x009f0003);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007060 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007061 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007062 0x00d0000f);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007063
Ville Syrjälä37a56502016-06-22 21:57:04 +03007064 if (intel_crtc_has_dp_encoder(pipe_config)) {
Jesse Barnes89b667f2013-04-18 14:51:36 -07007065 /* Use SSC source */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02007066 if (pipe == PIPE_A)
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007067 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007068 0x0df40000);
7069 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007070 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007071 0x0df70000);
7072 } else { /* HDMI or VGA */
7073 /* Use bend source */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02007074 if (pipe == PIPE_A)
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007075 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007076 0x0df70000);
7077 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007078 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07007079 0x0df40000);
7080 }
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007081
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007082 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
Jesse Barnes89b667f2013-04-18 14:51:36 -07007083 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
Maarten Lankhorst92d54b02018-10-11 12:04:50 +02007084 if (intel_crtc_has_dp_encoder(pipe_config))
Jesse Barnes89b667f2013-04-18 14:51:36 -07007085 coreclk |= 0x01000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007086 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
Jesse Barnes89b667f2013-04-18 14:51:36 -07007087
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007088 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03007089 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07007090}
7091
Ville Syrjäläd288f652014-10-28 13:20:22 +02007092static void chv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007093 const struct intel_crtc_state *pipe_config)
Ville Syrjälä1ae0d132014-06-28 02:04:00 +03007094{
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007095 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007096 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03007097 enum pipe pipe = crtc->pipe;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007098 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307099 u32 loopfilter, tribuf_calcntr;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007100 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05307101 u32 dpio_val;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307102 int vco;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007103
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03007104 /* Enable Refclk and SSC */
7105 I915_WRITE(DPLL(pipe),
7106 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7107
7108 /* No need to actually set up the DPLL with DSI */
7109 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7110 return;
7111
Ville Syrjäläd288f652014-10-28 13:20:22 +02007112 bestn = pipe_config->dpll.n;
7113 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7114 bestm1 = pipe_config->dpll.m1;
7115 bestm2 = pipe_config->dpll.m2 >> 22;
7116 bestp1 = pipe_config->dpll.p1;
7117 bestp2 = pipe_config->dpll.p2;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307118 vco = pipe_config->dpll.vco;
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05307119 dpio_val = 0;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307120 loopfilter = 0;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007121
Ville Syrjäläa5805162015-05-26 20:42:30 +03007122 mutex_lock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007123
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007124 /* p1 and p2 divider */
7125 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7126 5 << DPIO_CHV_S1_DIV_SHIFT |
7127 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7128 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7129 1 << DPIO_CHV_K_DIV_SHIFT);
7130
7131 /* Feedback post-divider - m2 */
7132 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7133
7134 /* Feedback refclk divider - n and m1 */
7135 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7136 DPIO_CHV_M1_DIV_BY_2 |
7137 1 << DPIO_CHV_N_DIV_SHIFT);
7138
7139 /* M2 fraction division */
Ville Syrjälä25a25df2015-07-08 23:45:47 +03007140 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007141
7142 /* M2 fraction division enable */
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05307143 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7144 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7145 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7146 if (bestm2_frac)
7147 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7148 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007149
Vijay Purushothamande3a0fd2015-03-05 19:32:06 +05307150 /* Program digital lock detect threshold */
7151 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7152 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7153 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7154 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7155 if (!bestm2_frac)
7156 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7157 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7158
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007159 /* Loop filter */
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307160 if (vco == 5400000) {
7161 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7162 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7163 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7164 tribuf_calcntr = 0x9;
7165 } else if (vco <= 6200000) {
7166 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7167 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7168 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7169 tribuf_calcntr = 0x9;
7170 } else if (vco <= 6480000) {
7171 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7172 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7173 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7174 tribuf_calcntr = 0x8;
7175 } else {
7176 /* Not supported. Apply the same limits as in the max case */
7177 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7178 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7179 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7180 tribuf_calcntr = 0;
7181 }
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007182 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7183
Ville Syrjälä968040b2015-03-11 22:52:08 +02007184 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307185 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7186 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7187 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7188
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007189 /* AFC Recal */
7190 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7191 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7192 DPIO_AFC_RECAL);
7193
Ville Syrjäläa5805162015-05-26 20:42:30 +03007194 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007195}
7196
Ville Syrjäläd288f652014-10-28 13:20:22 +02007197/**
7198 * vlv_force_pll_on - forcibly enable just the PLL
7199 * @dev_priv: i915 private structure
7200 * @pipe: pipe PLL to enable
7201 * @dpll: PLL configuration
7202 *
7203 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7204 * in cases where we need the PLL enabled even when @pipe is not going to
7205 * be enabled.
7206 */
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007207int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007208 const struct dpll *dpll)
Ville Syrjäläd288f652014-10-28 13:20:22 +02007209{
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02007210 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007211 struct intel_crtc_state *pipe_config;
7212
7213 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7214 if (!pipe_config)
7215 return -ENOMEM;
7216
7217 pipe_config->base.crtc = &crtc->base;
7218 pipe_config->pixel_multiplier = 1;
7219 pipe_config->dpll = *dpll;
Ville Syrjäläd288f652014-10-28 13:20:22 +02007220
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007221 if (IS_CHERRYVIEW(dev_priv)) {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007222 chv_compute_dpll(crtc, pipe_config);
7223 chv_prepare_pll(crtc, pipe_config);
7224 chv_enable_pll(crtc, pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007225 } else {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007226 vlv_compute_dpll(crtc, pipe_config);
7227 vlv_prepare_pll(crtc, pipe_config);
7228 vlv_enable_pll(crtc, pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007229 }
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007230
7231 kfree(pipe_config);
7232
7233 return 0;
Ville Syrjäläd288f652014-10-28 13:20:22 +02007234}
7235
7236/**
7237 * vlv_force_pll_off - forcibly disable just the PLL
7238 * @dev_priv: i915 private structure
7239 * @pipe: pipe PLL to disable
7240 *
7241 * Disable the PLL for @pipe. To be used in cases where we need
7242 * the PLL enabled even when @pipe is not going to be enabled.
7243 */
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007244void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
Ville Syrjäläd288f652014-10-28 13:20:22 +02007245{
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007246 if (IS_CHERRYVIEW(dev_priv))
7247 chv_disable_pll(dev_priv, pipe);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007248 else
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007249 vlv_disable_pll(dev_priv, pipe);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007250}
7251
Daniel Vetter251ac862015-06-18 10:30:24 +02007252static void i9xx_compute_dpll(struct intel_crtc *crtc,
7253 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007254 struct dpll *reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007255{
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007256 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007257 u32 dpll;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007258 struct dpll *clock = &crtc_state->dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007259
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007260 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
Vijay Purushothaman2a8f64c2012-09-27 19:13:06 +05307261
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007262 dpll = DPLL_VGA_MODE_DIS;
7263
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007264 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007265 dpll |= DPLLB_MODE_LVDS;
7266 else
7267 dpll |= DPLLB_MODE_DAC_SERIAL;
Daniel Vetter6cc5f342013-03-27 00:44:53 +01007268
Jani Nikula73f67aa2016-12-07 22:48:09 +02007269 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7270 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007271 dpll |= (crtc_state->pixel_multiplier - 1)
Daniel Vetter198a037f2013-04-19 11:14:37 +02007272 << SDVO_MULTIPLIER_SHIFT_HIRES;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007273 }
Daniel Vetter198a037f2013-04-19 11:14:37 +02007274
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03007275 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7276 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007277 dpll |= DPLL_SDVO_HIGH_SPEED;
Daniel Vetter198a037f2013-04-19 11:14:37 +02007278
Ville Syrjälä37a56502016-06-22 21:57:04 +03007279 if (intel_crtc_has_dp_encoder(crtc_state))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007280 dpll |= DPLL_SDVO_HIGH_SPEED;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007281
7282 /* compute bitmask from p1 value */
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007283 if (IS_PINEVIEW(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007284 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7285 else {
7286 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007287 if (IS_G4X(dev_priv) && reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007288 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7289 }
7290 switch (clock->p2) {
7291 case 5:
7292 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7293 break;
7294 case 7:
7295 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7296 break;
7297 case 10:
7298 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7299 break;
7300 case 14:
7301 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7302 break;
7303 }
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007304 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007305 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7306
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007307 if (crtc_state->sdvo_tv_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007308 dpll |= PLL_REF_INPUT_TVCLKINBC;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007309 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Ander Conselvan de Oliveiraceb41002016-03-21 18:00:02 +02007310 intel_panel_use_ssc(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007311 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7312 else
7313 dpll |= PLL_REF_INPUT_DREFCLK;
7314
7315 dpll |= DPLL_VCO_ENABLE;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007316 crtc_state->dpll_hw_state.dpll = dpll;
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007317
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007318 if (INTEL_GEN(dev_priv) >= 4) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007319 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
Daniel Vetteref1b4602013-06-01 17:17:04 +02007320 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007321 crtc_state->dpll_hw_state.dpll_md = dpll_md;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007322 }
7323}
7324
Daniel Vetter251ac862015-06-18 10:30:24 +02007325static void i8xx_compute_dpll(struct intel_crtc *crtc,
7326 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007327 struct dpll *reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007328{
Daniel Vetterf47709a2013-03-28 10:42:02 +01007329 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007330 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007331 u32 dpll;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007332 struct dpll *clock = &crtc_state->dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007333
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007334 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
Vijay Purushothaman2a8f64c2012-09-27 19:13:06 +05307335
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007336 dpll = DPLL_VGA_MODE_DIS;
7337
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007338 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007339 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7340 } else {
7341 if (clock->p1 == 2)
7342 dpll |= PLL_P1_DIVIDE_BY_TWO;
7343 else
7344 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7345 if (clock->p2 == 4)
7346 dpll |= PLL_P2_DIVIDE_BY_4;
7347 }
7348
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007349 if (!IS_I830(dev_priv) &&
7350 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007351 dpll |= DPLL_DVO_2X_MODE;
7352
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007353 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Ander Conselvan de Oliveiraceb41002016-03-21 18:00:02 +02007354 intel_panel_use_ssc(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007355 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7356 else
7357 dpll |= PLL_REF_INPUT_DREFCLK;
7358
7359 dpll |= DPLL_VCO_ENABLE;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007360 crtc_state->dpll_hw_state.dpll = dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007361}
7362
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007363static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007364{
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007365 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7366 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7367 enum pipe pipe = crtc->pipe;
7368 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7369 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02007370 u32 crtc_vtotal, crtc_vblank_end;
Ville Syrjälä1caea6e2014-03-28 23:29:32 +02007371 int vsyncshift = 0;
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007372
7373 /* We need to be careful not to changed the adjusted mode, for otherwise
7374 * the hw state checker will get angry at the mismatch. */
7375 crtc_vtotal = adjusted_mode->crtc_vtotal;
7376 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007377
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007378 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007379 /* the chip adds 2 halflines automatically */
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007380 crtc_vtotal -= 1;
7381 crtc_vblank_end -= 1;
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007382
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007383 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007384 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7385 else
7386 vsyncshift = adjusted_mode->crtc_hsync_start -
7387 adjusted_mode->crtc_htotal / 2;
Ville Syrjälä1caea6e2014-03-28 23:29:32 +02007388 if (vsyncshift < 0)
7389 vsyncshift += adjusted_mode->crtc_htotal;
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007390 }
7391
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007392 if (INTEL_GEN(dev_priv) > 3)
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007393 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007394
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007395 I915_WRITE(HTOTAL(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007396 (adjusted_mode->crtc_hdisplay - 1) |
7397 ((adjusted_mode->crtc_htotal - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007398 I915_WRITE(HBLANK(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007399 (adjusted_mode->crtc_hblank_start - 1) |
7400 ((adjusted_mode->crtc_hblank_end - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007401 I915_WRITE(HSYNC(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007402 (adjusted_mode->crtc_hsync_start - 1) |
7403 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7404
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007405 I915_WRITE(VTOTAL(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007406 (adjusted_mode->crtc_vdisplay - 1) |
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007407 ((crtc_vtotal - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007408 I915_WRITE(VBLANK(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007409 (adjusted_mode->crtc_vblank_start - 1) |
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007410 ((crtc_vblank_end - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007411 I915_WRITE(VSYNC(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007412 (adjusted_mode->crtc_vsync_start - 1) |
7413 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7414
Paulo Zanonib5e508d2012-10-24 11:34:43 -02007415 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7416 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7417 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7418 * bits. */
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01007419 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
Paulo Zanonib5e508d2012-10-24 11:34:43 -02007420 (pipe == PIPE_B || pipe == PIPE_C))
7421 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7422
Jani Nikulabc58be62016-03-18 17:05:39 +02007423}
7424
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007425static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
Jani Nikulabc58be62016-03-18 17:05:39 +02007426{
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007427 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7428 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7429 enum pipe pipe = crtc->pipe;
Jani Nikulabc58be62016-03-18 17:05:39 +02007430
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007431 /* pipesrc controls the size that is scaled from, which should
7432 * always be the user's requested size.
7433 */
7434 I915_WRITE(PIPESRC(pipe),
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007435 ((crtc_state->pipe_src_w - 1) << 16) |
7436 (crtc_state->pipe_src_h - 1));
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007437}
7438
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007439static void intel_get_pipe_timings(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007440 struct intel_crtc_state *pipe_config)
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007441{
7442 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007443 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007444 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02007445 u32 tmp;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007446
7447 tmp = I915_READ(HTOTAL(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007448 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7449 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007450 tmp = I915_READ(HBLANK(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007451 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7452 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007453 tmp = I915_READ(HSYNC(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007454 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7455 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007456
7457 tmp = I915_READ(VTOTAL(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007458 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7459 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007460 tmp = I915_READ(VBLANK(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007461 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7462 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007463 tmp = I915_READ(VSYNC(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007464 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7465 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007466
7467 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007468 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7469 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7470 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007471 }
Jani Nikulabc58be62016-03-18 17:05:39 +02007472}
7473
7474static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7475 struct intel_crtc_state *pipe_config)
7476{
7477 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007478 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulabc58be62016-03-18 17:05:39 +02007479 u32 tmp;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007480
7481 tmp = I915_READ(PIPESRC(crtc->pipe));
Ville Syrjälä37327ab2013-09-04 18:25:28 +03007482 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7483 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7484
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007485 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7486 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007487}
7488
Daniel Vetterf6a83282014-02-11 15:28:57 -08007489void intel_mode_from_pipe_config(struct drm_display_mode *mode,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007490 struct intel_crtc_state *pipe_config)
Jesse Barnesbabea612013-06-26 18:57:38 +03007491{
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007492 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7493 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7494 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7495 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
Jesse Barnesbabea612013-06-26 18:57:38 +03007496
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007497 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7498 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7499 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7500 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
Jesse Barnesbabea612013-06-26 18:57:38 +03007501
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007502 mode->flags = pipe_config->base.adjusted_mode.flags;
Maarten Lankhorstcd13f5a2015-07-14 14:12:02 +02007503 mode->type = DRM_MODE_TYPE_DRIVER;
Jesse Barnesbabea612013-06-26 18:57:38 +03007504
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007505 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
Maarten Lankhorstcd13f5a2015-07-14 14:12:02 +02007506
7507 mode->hsync = drm_mode_hsync(mode);
7508 mode->vrefresh = drm_mode_vrefresh(mode);
7509 drm_mode_set_name(mode);
Jesse Barnesbabea612013-06-26 18:57:38 +03007510}
7511
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007512static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
Daniel Vetter84b046f2013-02-19 18:48:54 +01007513{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007514 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7515 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jani Nikulaba3f4d02019-01-18 14:01:23 +02007516 u32 pipeconf;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007517
Daniel Vetter9f11a9e2013-06-13 00:54:58 +02007518 pipeconf = 0;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007519
Ville Syrjäläe56134b2017-06-01 17:36:19 +03007520 /* we keep both pipes enabled on 830 */
7521 if (IS_I830(dev_priv))
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007522 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
Daniel Vetter67c72a12013-09-24 11:46:14 +02007523
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007524 if (crtc_state->double_wide)
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03007525 pipeconf |= PIPECONF_DOUBLE_WIDE;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007526
Daniel Vetterff9ce462013-04-24 14:57:17 +02007527 /* only g4x and later have fancy bpc/dither controls */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007528 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7529 IS_CHERRYVIEW(dev_priv)) {
Daniel Vetterff9ce462013-04-24 14:57:17 +02007530 /* Bspec claims that we can't use dithering for 30bpp pipes. */
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007531 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
Daniel Vetterff9ce462013-04-24 14:57:17 +02007532 pipeconf |= PIPECONF_DITHER_EN |
7533 PIPECONF_DITHER_TYPE_SP;
7534
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007535 switch (crtc_state->pipe_bpp) {
Daniel Vetterff9ce462013-04-24 14:57:17 +02007536 case 18:
7537 pipeconf |= PIPECONF_6BPC;
7538 break;
7539 case 24:
7540 pipeconf |= PIPECONF_8BPC;
7541 break;
7542 case 30:
7543 pipeconf |= PIPECONF_10BPC;
7544 break;
7545 default:
7546 /* Case prevented by intel_choose_pipe_bpp_dither. */
7547 BUG();
Daniel Vetter84b046f2013-02-19 18:48:54 +01007548 }
7549 }
7550
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007551 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007552 if (INTEL_GEN(dev_priv) < 4 ||
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007553 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Ville Syrjäläefc2cff2014-03-28 23:29:31 +02007554 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7555 else
7556 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7557 } else
Daniel Vetter84b046f2013-02-19 18:48:54 +01007558 pipeconf |= PIPECONF_PROGRESSIVE;
7559
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007560 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007561 crtc_state->limited_color_range)
Daniel Vetter9f11a9e2013-06-13 00:54:58 +02007562 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
Ville Syrjälä9c8e09b2013-04-02 16:10:09 +03007563
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007564 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7565 POSTING_READ(PIPECONF(crtc->pipe));
Daniel Vetter84b046f2013-02-19 18:48:54 +01007566}
7567
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007568static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7569 struct intel_crtc_state *crtc_state)
7570{
7571 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007572 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007573 const struct intel_limit *limit;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007574 int refclk = 48000;
7575
7576 memset(&crtc_state->dpll_hw_state, 0,
7577 sizeof(crtc_state->dpll_hw_state));
7578
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007579 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007580 if (intel_panel_use_ssc(dev_priv)) {
7581 refclk = dev_priv->vbt.lvds_ssc_freq;
7582 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7583 }
7584
7585 limit = &intel_limits_i8xx_lvds;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007586 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007587 limit = &intel_limits_i8xx_dvo;
7588 } else {
7589 limit = &intel_limits_i8xx_dac;
7590 }
7591
7592 if (!crtc_state->clock_set &&
7593 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7594 refclk, NULL, &crtc_state->dpll)) {
7595 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7596 return -EINVAL;
7597 }
7598
7599 i8xx_compute_dpll(crtc, crtc_state, NULL);
7600
7601 return 0;
7602}
7603
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007604static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7605 struct intel_crtc_state *crtc_state)
7606{
7607 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007608 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007609 const struct intel_limit *limit;
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007610 int refclk = 96000;
7611
7612 memset(&crtc_state->dpll_hw_state, 0,
7613 sizeof(crtc_state->dpll_hw_state));
7614
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007615 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007616 if (intel_panel_use_ssc(dev_priv)) {
7617 refclk = dev_priv->vbt.lvds_ssc_freq;
7618 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7619 }
7620
7621 if (intel_is_dual_link_lvds(dev))
7622 limit = &intel_limits_g4x_dual_channel_lvds;
7623 else
7624 limit = &intel_limits_g4x_single_channel_lvds;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007625 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7626 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007627 limit = &intel_limits_g4x_hdmi;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007628 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007629 limit = &intel_limits_g4x_sdvo;
7630 } else {
7631 /* The option is for other outputs */
7632 limit = &intel_limits_i9xx_sdvo;
7633 }
7634
7635 if (!crtc_state->clock_set &&
7636 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7637 refclk, NULL, &crtc_state->dpll)) {
7638 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7639 return -EINVAL;
7640 }
7641
7642 i9xx_compute_dpll(crtc, crtc_state, NULL);
7643
7644 return 0;
7645}
7646
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007647static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7648 struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08007649{
Ander Conselvan de Oliveirac7653192014-10-20 13:46:44 +03007650 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007651 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007652 const struct intel_limit *limit;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007653 int refclk = 96000;
Jesse Barnes79e53942008-11-07 14:24:08 -08007654
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03007655 memset(&crtc_state->dpll_hw_state, 0,
7656 sizeof(crtc_state->dpll_hw_state));
7657
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007658 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007659 if (intel_panel_use_ssc(dev_priv)) {
7660 refclk = dev_priv->vbt.lvds_ssc_freq;
7661 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7662 }
Jesse Barnes79e53942008-11-07 14:24:08 -08007663
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007664 limit = &intel_limits_pineview_lvds;
7665 } else {
7666 limit = &intel_limits_pineview_sdvo;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007667 }
Jani Nikulaf2335332013-09-13 11:03:09 +03007668
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007669 if (!crtc_state->clock_set &&
7670 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7671 refclk, NULL, &crtc_state->dpll)) {
7672 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7673 return -EINVAL;
7674 }
7675
7676 i9xx_compute_dpll(crtc, crtc_state, NULL);
7677
7678 return 0;
7679}
7680
7681static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7682 struct intel_crtc_state *crtc_state)
7683{
7684 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007685 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007686 const struct intel_limit *limit;
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007687 int refclk = 96000;
7688
7689 memset(&crtc_state->dpll_hw_state, 0,
7690 sizeof(crtc_state->dpll_hw_state));
7691
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007692 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007693 if (intel_panel_use_ssc(dev_priv)) {
7694 refclk = dev_priv->vbt.lvds_ssc_freq;
7695 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
Jani Nikulae9fd1c02013-08-27 15:12:23 +03007696 }
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007697
7698 limit = &intel_limits_i9xx_lvds;
7699 } else {
7700 limit = &intel_limits_i9xx_sdvo;
7701 }
7702
7703 if (!crtc_state->clock_set &&
7704 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7705 refclk, NULL, &crtc_state->dpll)) {
7706 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7707 return -EINVAL;
Daniel Vetterf47709a2013-03-28 10:42:02 +01007708 }
Eric Anholtf564048e2011-03-30 13:01:02 -07007709
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007710 i9xx_compute_dpll(crtc, crtc_state, NULL);
Eric Anholtf564048e2011-03-30 13:01:02 -07007711
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02007712 return 0;
Eric Anholtf564048e2011-03-30 13:01:02 -07007713}
7714
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007715static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7716 struct intel_crtc_state *crtc_state)
7717{
7718 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007719 const struct intel_limit *limit = &intel_limits_chv;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007720
7721 memset(&crtc_state->dpll_hw_state, 0,
7722 sizeof(crtc_state->dpll_hw_state));
7723
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007724 if (!crtc_state->clock_set &&
7725 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7726 refclk, NULL, &crtc_state->dpll)) {
7727 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7728 return -EINVAL;
7729 }
7730
7731 chv_compute_dpll(crtc, crtc_state);
7732
7733 return 0;
7734}
7735
7736static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7737 struct intel_crtc_state *crtc_state)
7738{
7739 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007740 const struct intel_limit *limit = &intel_limits_vlv;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007741
7742 memset(&crtc_state->dpll_hw_state, 0,
7743 sizeof(crtc_state->dpll_hw_state));
7744
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007745 if (!crtc_state->clock_set &&
7746 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7747 refclk, NULL, &crtc_state->dpll)) {
7748 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7749 return -EINVAL;
7750 }
7751
7752 vlv_compute_dpll(crtc, crtc_state);
7753
7754 return 0;
7755}
7756
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007757static void i9xx_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007758 struct intel_crtc_state *pipe_config)
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007759{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007760 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jani Nikulaba3f4d02019-01-18 14:01:23 +02007761 u32 tmp;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007762
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007763 if (INTEL_GEN(dev_priv) <= 3 &&
7764 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
Ville Syrjälädc9e7dec2014-01-10 14:06:45 +02007765 return;
7766
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007767 tmp = I915_READ(PFIT_CONTROL);
Daniel Vetter06922822013-07-11 13:35:40 +02007768 if (!(tmp & PFIT_ENABLE))
7769 return;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007770
Daniel Vetter06922822013-07-11 13:35:40 +02007771 /* Check whether the pfit is attached to our pipe. */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007772 if (INTEL_GEN(dev_priv) < 4) {
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007773 if (crtc->pipe != PIPE_B)
7774 return;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007775 } else {
7776 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7777 return;
7778 }
7779
Daniel Vetter06922822013-07-11 13:35:40 +02007780 pipe_config->gmch_pfit.control = tmp;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007781 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007782}
7783
Jesse Barnesacbec812013-09-20 11:29:32 -07007784static void vlv_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007785 struct intel_crtc_state *pipe_config)
Jesse Barnesacbec812013-09-20 11:29:32 -07007786{
7787 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007788 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesacbec812013-09-20 11:29:32 -07007789 int pipe = pipe_config->cpu_transcoder;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007790 struct dpll clock;
Jesse Barnesacbec812013-09-20 11:29:32 -07007791 u32 mdiv;
Chris Wilson662c6ec2013-09-25 14:24:01 -07007792 int refclk = 100000;
Jesse Barnesacbec812013-09-20 11:29:32 -07007793
Ville Syrjäläb5219732016-03-15 16:40:01 +02007794 /* In case of DSI, DPLL will not be used */
7795 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
Shobhit Kumarf573de52014-07-30 20:32:37 +05307796 return;
7797
Ville Syrjäläa5805162015-05-26 20:42:30 +03007798 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007799 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
Ville Syrjäläa5805162015-05-26 20:42:30 +03007800 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesacbec812013-09-20 11:29:32 -07007801
7802 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7803 clock.m2 = mdiv & DPIO_M2DIV_MASK;
7804 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7805 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7806 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7807
Imre Deakdccbea32015-06-22 23:35:51 +03007808 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
Jesse Barnesacbec812013-09-20 11:29:32 -07007809}
7810
Damien Lespiau5724dbd2015-01-20 12:51:52 +00007811static void
7812i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7813 struct intel_initial_plane_config *plane_config)
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007814{
7815 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007816 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007817 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7818 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007819 enum pipe pipe;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007820 u32 val, base, offset;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007821 int fourcc, pixel_format;
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00007822 unsigned int aligned_height;
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007823 struct drm_framebuffer *fb;
Damien Lespiau1b842c82015-01-21 13:50:54 +00007824 struct intel_framebuffer *intel_fb;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007825
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007826 if (!plane->get_hw_state(plane, &pipe))
Damien Lespiau42a7b082015-02-05 19:35:13 +00007827 return;
7828
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007829 WARN_ON(pipe != crtc->pipe);
7830
Damien Lespiaud9806c92015-01-21 14:07:19 +00007831 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Damien Lespiau1b842c82015-01-21 13:50:54 +00007832 if (!intel_fb) {
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007833 DRM_DEBUG_KMS("failed to alloc fb\n");
7834 return;
7835 }
7836
Damien Lespiau1b842c82015-01-21 13:50:54 +00007837 fb = &intel_fb->base;
7838
Ville Syrjäläd2e9f5f2016-11-18 21:52:53 +02007839 fb->dev = dev;
7840
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02007841 val = I915_READ(DSPCNTR(i9xx_plane));
7842
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007843 if (INTEL_GEN(dev_priv) >= 4) {
Daniel Vetter18c52472015-02-10 17:16:09 +00007844 if (val & DISPPLANE_TILED) {
Damien Lespiau49af4492015-01-20 12:51:44 +00007845 plane_config->tiling = I915_TILING_X;
Ville Syrjäläbae781b2016-11-16 13:33:16 +02007846 fb->modifier = I915_FORMAT_MOD_X_TILED;
Daniel Vetter18c52472015-02-10 17:16:09 +00007847 }
Ville Syrjäläf43348a2018-11-20 15:54:50 +02007848
7849 if (val & DISPPLANE_ROTATE_180)
7850 plane_config->rotation = DRM_MODE_ROTATE_180;
Daniel Vetter18c52472015-02-10 17:16:09 +00007851 }
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007852
Ville Syrjäläf43348a2018-11-20 15:54:50 +02007853 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
7854 val & DISPPLANE_MIRROR)
7855 plane_config->rotation |= DRM_MODE_REFLECT_X;
7856
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007857 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
Damien Lespiaub35d63f2015-01-20 12:51:50 +00007858 fourcc = i9xx_format_to_fourcc(pixel_format);
Ville Syrjälä2f3f4762016-11-18 21:52:57 +02007859 fb->format = drm_format_info(fourcc);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007860
Ville Syrjälä81894b22017-11-17 21:19:13 +02007861 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7862 offset = I915_READ(DSPOFFSET(i9xx_plane));
7863 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7864 } else if (INTEL_GEN(dev_priv) >= 4) {
Damien Lespiau49af4492015-01-20 12:51:44 +00007865 if (plane_config->tiling)
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007866 offset = I915_READ(DSPTILEOFF(i9xx_plane));
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007867 else
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007868 offset = I915_READ(DSPLINOFF(i9xx_plane));
7869 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007870 } else {
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007871 base = I915_READ(DSPADDR(i9xx_plane));
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007872 }
7873 plane_config->base = base;
7874
7875 val = I915_READ(PIPESRC(pipe));
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007876 fb->width = ((val >> 16) & 0xfff) + 1;
7877 fb->height = ((val >> 0) & 0xfff) + 1;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007878
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007879 val = I915_READ(DSPSTRIDE(i9xx_plane));
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007880 fb->pitches[0] = val & 0xffffffc0;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007881
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02007882 aligned_height = intel_fb_align_height(fb, 0, fb->height);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007883
Daniel Vetterf37b5c22015-02-10 23:12:27 +01007884 plane_config->size = fb->pitches[0] * aligned_height;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007885
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007886 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7887 crtc->base.name, plane->base.name, fb->width, fb->height,
Ville Syrjälä272725c2016-12-14 23:32:20 +02007888 fb->format->cpp[0] * 8, base, fb->pitches[0],
Damien Lespiau2844a922015-01-20 12:51:48 +00007889 plane_config->size);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007890
Damien Lespiau2d140302015-02-05 17:22:18 +00007891 plane_config->fb = intel_fb;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007892}
7893
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007894static void chv_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007895 struct intel_crtc_state *pipe_config)
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007896{
7897 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007898 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007899 int pipe = pipe_config->cpu_transcoder;
7900 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007901 struct dpll clock;
Imre Deak0d7b6b12015-07-02 14:29:58 +03007902 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007903 int refclk = 100000;
7904
Ville Syrjäläb5219732016-03-15 16:40:01 +02007905 /* In case of DSI, DPLL will not be used */
7906 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7907 return;
7908
Ville Syrjäläa5805162015-05-26 20:42:30 +03007909 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007910 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7911 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7912 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7913 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
Imre Deak0d7b6b12015-07-02 14:29:58 +03007914 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
Ville Syrjäläa5805162015-05-26 20:42:30 +03007915 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007916
7917 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
Imre Deak0d7b6b12015-07-02 14:29:58 +03007918 clock.m2 = (pll_dw0 & 0xff) << 22;
7919 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7920 clock.m2 |= pll_dw2 & 0x3fffff;
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007921 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7922 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7923 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7924
Imre Deakdccbea32015-06-22 23:35:51 +03007925 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007926}
7927
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307928static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
7929 struct intel_crtc_state *pipe_config)
7930{
7931 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7932 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
7933
Shashank Sharma668b6c12018-10-12 11:53:14 +05307934 pipe_config->lspcon_downsampling = false;
7935
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307936 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
7937 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
7938
7939 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
7940 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
7941 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
7942
7943 if (ycbcr420_enabled) {
7944 /* We support 4:2:0 in full blend mode only */
7945 if (!blend)
7946 output = INTEL_OUTPUT_FORMAT_INVALID;
7947 else if (!(IS_GEMINILAKE(dev_priv) ||
7948 INTEL_GEN(dev_priv) >= 10))
7949 output = INTEL_OUTPUT_FORMAT_INVALID;
7950 else
7951 output = INTEL_OUTPUT_FORMAT_YCBCR420;
Shashank Sharma8c79f842018-10-12 11:53:09 +05307952 } else {
Shashank Sharma668b6c12018-10-12 11:53:14 +05307953 /*
7954 * Currently there is no interface defined to
7955 * check user preference between RGB/YCBCR444
7956 * or YCBCR420. So the only possible case for
7957 * YCBCR444 usage is driving YCBCR420 output
7958 * with LSPCON, when pipe is configured for
7959 * YCBCR444 output and LSPCON takes care of
7960 * downsampling it.
7961 */
7962 pipe_config->lspcon_downsampling = true;
Shashank Sharma8c79f842018-10-12 11:53:09 +05307963 output = INTEL_OUTPUT_FORMAT_YCBCR444;
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307964 }
7965 }
7966 }
7967
7968 pipe_config->output_format = output;
7969}
7970
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007971static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007972 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007973{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007974 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Imre Deak17290502016-02-12 18:55:11 +02007975 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +00007976 intel_wakeref_t wakeref;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02007977 u32 tmp;
Imre Deak17290502016-02-12 18:55:11 +02007978 bool ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007979
Imre Deak17290502016-02-12 18:55:11 +02007980 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +00007981 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
7982 if (!wakeref)
Imre Deakb5482bd2014-03-05 16:20:55 +02007983 return false;
7984
Shashank Sharmad9facae2018-10-12 11:53:07 +05307985 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
Daniel Vettere143a212013-07-04 12:01:15 +02007986 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02007987 pipe_config->shared_dpll = NULL;
Daniel Vettereccb1402013-05-22 00:50:22 +02007988
Imre Deak17290502016-02-12 18:55:11 +02007989 ret = false;
7990
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007991 tmp = I915_READ(PIPECONF(crtc->pipe));
7992 if (!(tmp & PIPECONF_ENABLE))
Imre Deak17290502016-02-12 18:55:11 +02007993 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007994
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007995 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7996 IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä42571ae2013-09-06 23:29:00 +03007997 switch (tmp & PIPECONF_BPC_MASK) {
7998 case PIPECONF_6BPC:
7999 pipe_config->pipe_bpp = 18;
8000 break;
8001 case PIPECONF_8BPC:
8002 pipe_config->pipe_bpp = 24;
8003 break;
8004 case PIPECONF_10BPC:
8005 pipe_config->pipe_bpp = 30;
8006 break;
8007 default:
8008 break;
8009 }
8010 }
8011
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01008012 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08008013 (tmp & PIPECONF_COLOR_RANGE_SELECT))
Daniel Vetterb5a9fa02014-04-24 23:54:49 +02008014 pipe_config->limited_color_range = true;
8015
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008016 if (INTEL_GEN(dev_priv) < 4)
Ville Syrjälä282740f2013-09-04 18:30:03 +03008017 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8018
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02008019 intel_get_pipe_timings(crtc, pipe_config);
Jani Nikulabc58be62016-03-18 17:05:39 +02008020 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02008021
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008022 i9xx_get_pfit_config(crtc, pipe_config);
8023
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008024 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjäläc2317752016-03-15 16:39:56 +02008025 /* No way to read it out on pipes B and C */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01008026 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
Ville Syrjäläc2317752016-03-15 16:39:56 +02008027 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8028 else
8029 tmp = I915_READ(DPLL_MD(crtc->pipe));
Daniel Vetter6c49f242013-06-06 12:45:25 +02008030 pipe_config->pixel_multiplier =
8031 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8032 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
Daniel Vetter8bcc2792013-06-05 13:34:28 +02008033 pipe_config->dpll_hw_state.dpll_md = tmp;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01008034 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
Jani Nikula73f67aa2016-12-07 22:48:09 +02008035 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
Daniel Vetter6c49f242013-06-06 12:45:25 +02008036 tmp = I915_READ(DPLL(crtc->pipe));
8037 pipe_config->pixel_multiplier =
8038 ((tmp & SDVO_MULTIPLIER_MASK)
8039 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8040 } else {
8041 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8042 * port and will be fixed up in the encoder->get_config
8043 * function. */
8044 pipe_config->pixel_multiplier = 1;
8045 }
Daniel Vetter8bcc2792013-06-05 13:34:28 +02008046 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01008047 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03008048 /*
8049 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8050 * on 830. Filter it out here so that we don't
8051 * report errors due to that.
8052 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01008053 if (IS_I830(dev_priv))
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03008054 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8055
Daniel Vetter8bcc2792013-06-05 13:34:28 +02008056 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8057 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
Ville Syrjälä165e9012013-06-26 17:44:15 +03008058 } else {
8059 /* Mask out read-only status bits. */
8060 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8061 DPLL_PORTC_READY_MASK |
8062 DPLL_PORTB_READY_MASK);
Daniel Vetter8bcc2792013-06-05 13:34:28 +02008063 }
Daniel Vetter6c49f242013-06-06 12:45:25 +02008064
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01008065 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjälä70b23a92014-04-09 13:28:22 +03008066 chv_crtc_clock_get(crtc, pipe_config);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01008067 else if (IS_VALLEYVIEW(dev_priv))
Jesse Barnesacbec812013-09-20 11:29:32 -07008068 vlv_crtc_clock_get(crtc, pipe_config);
8069 else
8070 i9xx_crtc_clock_get(crtc, pipe_config);
Ville Syrjälä18442d02013-09-13 16:00:08 +03008071
Ville Syrjälä0f646142015-08-26 19:39:18 +03008072 /*
8073 * Normally the dotclock is filled in by the encoder .get_config()
8074 * but in case the pipe is enabled w/o any ports we need a sane
8075 * default.
8076 */
8077 pipe_config->base.adjusted_mode.crtc_clock =
8078 pipe_config->port_clock / pipe_config->pixel_multiplier;
8079
Imre Deak17290502016-02-12 18:55:11 +02008080 ret = true;
8081
8082out:
Chris Wilson0e6e0be2019-01-14 14:21:24 +00008083 intel_display_power_put(dev_priv, power_domain, wakeref);
Imre Deak17290502016-02-12 18:55:11 +02008084
8085 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008086}
8087
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008088static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
Jesse Barnes13d83a62011-08-03 12:59:20 -07008089{
Jesse Barnes13d83a62011-08-03 12:59:20 -07008090 struct intel_encoder *encoder;
Lyude1c1a24d2016-06-14 11:04:09 -04008091 int i;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008092 u32 val, final;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008093 bool has_lvds = false;
Keith Packard199e5d72011-09-22 12:01:57 -07008094 bool has_cpu_edp = false;
Keith Packard199e5d72011-09-22 12:01:57 -07008095 bool has_panel = false;
Keith Packard99eb6a02011-09-26 14:29:12 -07008096 bool has_ck505 = false;
8097 bool can_ssc = false;
Lyude1c1a24d2016-06-14 11:04:09 -04008098 bool using_ssc_source = false;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008099
8100 /* We need to take the global config into account */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008101 for_each_intel_encoder(&dev_priv->drm, encoder) {
Keith Packard199e5d72011-09-22 12:01:57 -07008102 switch (encoder->type) {
8103 case INTEL_OUTPUT_LVDS:
8104 has_panel = true;
8105 has_lvds = true;
8106 break;
8107 case INTEL_OUTPUT_EDP:
8108 has_panel = true;
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02008109 if (encoder->port == PORT_A)
Keith Packard199e5d72011-09-22 12:01:57 -07008110 has_cpu_edp = true;
8111 break;
Paulo Zanoni6847d71b2014-10-27 17:47:52 -02008112 default:
8113 break;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008114 }
8115 }
8116
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008117 if (HAS_PCH_IBX(dev_priv)) {
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03008118 has_ck505 = dev_priv->vbt.display_clock_mode;
Keith Packard99eb6a02011-09-26 14:29:12 -07008119 can_ssc = has_ck505;
8120 } else {
8121 has_ck505 = false;
8122 can_ssc = true;
8123 }
8124
Lyude1c1a24d2016-06-14 11:04:09 -04008125 /* Check if any DPLLs are using the SSC source */
8126 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8127 u32 temp = I915_READ(PCH_DPLL(i));
8128
8129 if (!(temp & DPLL_VCO_ENABLE))
8130 continue;
8131
8132 if ((temp & PLL_REF_INPUT_MASK) ==
8133 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8134 using_ssc_source = true;
8135 break;
8136 }
8137 }
8138
8139 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8140 has_panel, has_lvds, has_ck505, using_ssc_source);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008141
8142 /* Ironlake: try to setup display ref clock before DPLL
8143 * enabling. This is only under driver's control after
8144 * PCH B stepping, previous chipset stepping should be
8145 * ignoring this setting.
8146 */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008147 val = I915_READ(PCH_DREF_CONTROL);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008148
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008149 /* As we must carefully and slowly disable/enable each source in turn,
8150 * compute the final state we want first and check if we need to
8151 * make any changes at all.
8152 */
8153 final = val;
8154 final &= ~DREF_NONSPREAD_SOURCE_MASK;
Keith Packard99eb6a02011-09-26 14:29:12 -07008155 if (has_ck505)
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008156 final |= DREF_NONSPREAD_CK505_ENABLE;
Keith Packard99eb6a02011-09-26 14:29:12 -07008157 else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008158 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8159
Daniel Vetter8c07eb62016-06-09 18:39:07 +02008160 final &= ~DREF_SSC_SOURCE_MASK;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008161 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Daniel Vetter8c07eb62016-06-09 18:39:07 +02008162 final &= ~DREF_SSC1_ENABLE;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008163
Keith Packard199e5d72011-09-22 12:01:57 -07008164 if (has_panel) {
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008165 final |= DREF_SSC_SOURCE_ENABLE;
8166
8167 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8168 final |= DREF_SSC1_ENABLE;
8169
8170 if (has_cpu_edp) {
8171 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8172 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8173 else
8174 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8175 } else
8176 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Lyude1c1a24d2016-06-14 11:04:09 -04008177 } else if (using_ssc_source) {
8178 final |= DREF_SSC_SOURCE_ENABLE;
8179 final |= DREF_SSC1_ENABLE;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008180 }
8181
8182 if (final == val)
8183 return;
8184
8185 /* Always enable nonspread source */
8186 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8187
8188 if (has_ck505)
8189 val |= DREF_NONSPREAD_CK505_ENABLE;
8190 else
8191 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8192
8193 if (has_panel) {
8194 val &= ~DREF_SSC_SOURCE_MASK;
8195 val |= DREF_SSC_SOURCE_ENABLE;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008196
Keith Packard199e5d72011-09-22 12:01:57 -07008197 /* SSC must be turned on before enabling the CPU output */
Keith Packard99eb6a02011-09-26 14:29:12 -07008198 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
Keith Packard199e5d72011-09-22 12:01:57 -07008199 DRM_DEBUG_KMS("Using SSC on panel\n");
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008200 val |= DREF_SSC1_ENABLE;
Daniel Vettere77166b2012-03-30 22:14:05 +02008201 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008202 val &= ~DREF_SSC1_ENABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008203
8204 /* Get SSC going before enabling the outputs */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008205 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008206 POSTING_READ(PCH_DREF_CONTROL);
8207 udelay(200);
8208
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008209 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008210
8211 /* Enable CPU source on CPU attached eDP */
Keith Packard199e5d72011-09-22 12:01:57 -07008212 if (has_cpu_edp) {
Keith Packard99eb6a02011-09-26 14:29:12 -07008213 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
Keith Packard199e5d72011-09-22 12:01:57 -07008214 DRM_DEBUG_KMS("Using SSC on eDP\n");
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008215 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
Robin Schroereba905b2014-05-18 02:24:50 +02008216 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008217 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
Keith Packard199e5d72011-09-22 12:01:57 -07008218 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008219 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008220
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008221 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008222 POSTING_READ(PCH_DREF_CONTROL);
8223 udelay(200);
8224 } else {
Lyude1c1a24d2016-06-14 11:04:09 -04008225 DRM_DEBUG_KMS("Disabling CPU source output\n");
Keith Packard199e5d72011-09-22 12:01:57 -07008226
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008227 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Keith Packard199e5d72011-09-22 12:01:57 -07008228
8229 /* Turn off CPU output */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008230 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008231
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008232 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008233 POSTING_READ(PCH_DREF_CONTROL);
8234 udelay(200);
8235
Lyude1c1a24d2016-06-14 11:04:09 -04008236 if (!using_ssc_source) {
8237 DRM_DEBUG_KMS("Disabling SSC source\n");
Keith Packard199e5d72011-09-22 12:01:57 -07008238
Lyude1c1a24d2016-06-14 11:04:09 -04008239 /* Turn off the SSC source */
8240 val &= ~DREF_SSC_SOURCE_MASK;
8241 val |= DREF_SSC_SOURCE_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008242
Lyude1c1a24d2016-06-14 11:04:09 -04008243 /* Turn off SSC1 */
8244 val &= ~DREF_SSC1_ENABLE;
8245
8246 I915_WRITE(PCH_DREF_CONTROL, val);
8247 POSTING_READ(PCH_DREF_CONTROL);
8248 udelay(200);
8249 }
Jesse Barnes13d83a62011-08-03 12:59:20 -07008250 }
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008251
8252 BUG_ON(val != final);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008253}
8254
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008255static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
Paulo Zanonidde86e22012-12-01 12:04:25 -02008256{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02008257 u32 tmp;
Paulo Zanonidde86e22012-12-01 12:04:25 -02008258
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008259 tmp = I915_READ(SOUTH_CHICKEN2);
8260 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8261 I915_WRITE(SOUTH_CHICKEN2, tmp);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008262
Imre Deakcf3598c2016-06-28 13:37:31 +03008263 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8264 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008265 DRM_ERROR("FDI mPHY reset assert timeout\n");
Paulo Zanonidde86e22012-12-01 12:04:25 -02008266
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008267 tmp = I915_READ(SOUTH_CHICKEN2);
8268 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8269 I915_WRITE(SOUTH_CHICKEN2, tmp);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008270
Imre Deakcf3598c2016-06-28 13:37:31 +03008271 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8272 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008273 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008274}
8275
8276/* WaMPhyProgramming:hsw */
8277static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8278{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02008279 u32 tmp;
Paulo Zanonidde86e22012-12-01 12:04:25 -02008280
8281 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8282 tmp &= ~(0xFF << 24);
8283 tmp |= (0x12 << 24);
8284 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8285
Paulo Zanonidde86e22012-12-01 12:04:25 -02008286 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8287 tmp |= (1 << 11);
8288 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8289
8290 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8291 tmp |= (1 << 11);
8292 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8293
Paulo Zanonidde86e22012-12-01 12:04:25 -02008294 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8295 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8296 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8297
8298 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8299 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8300 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8301
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008302 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8303 tmp &= ~(7 << 13);
8304 tmp |= (5 << 13);
8305 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008306
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008307 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8308 tmp &= ~(7 << 13);
8309 tmp |= (5 << 13);
8310 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008311
8312 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8313 tmp &= ~0xFF;
8314 tmp |= 0x1C;
8315 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8316
8317 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8318 tmp &= ~0xFF;
8319 tmp |= 0x1C;
8320 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8321
8322 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8323 tmp &= ~(0xFF << 16);
8324 tmp |= (0x1C << 16);
8325 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8326
8327 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8328 tmp &= ~(0xFF << 16);
8329 tmp |= (0x1C << 16);
8330 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8331
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008332 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8333 tmp |= (1 << 27);
8334 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008335
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008336 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8337 tmp |= (1 << 27);
8338 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008339
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008340 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8341 tmp &= ~(0xF << 28);
8342 tmp |= (4 << 28);
8343 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008344
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008345 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8346 tmp &= ~(0xF << 28);
8347 tmp |= (4 << 28);
8348 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008349}
8350
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008351/* Implements 3 different sequences from BSpec chapter "Display iCLK
8352 * Programming" based on the parameters passed:
8353 * - Sequence to enable CLKOUT_DP
8354 * - Sequence to enable CLKOUT_DP without spread
8355 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8356 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008357static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8358 bool with_spread, bool with_fdi)
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008359{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02008360 u32 reg, tmp;
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008361
8362 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8363 with_spread = true;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008364 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8365 with_fdi, "LP PCH doesn't have FDI\n"))
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008366 with_fdi = false;
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008367
Ville Syrjäläa5805162015-05-26 20:42:30 +03008368 mutex_lock(&dev_priv->sb_lock);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008369
8370 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8371 tmp &= ~SBI_SSCCTL_DISABLE;
8372 tmp |= SBI_SSCCTL_PATHALT;
8373 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8374
8375 udelay(24);
8376
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008377 if (with_spread) {
8378 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8379 tmp &= ~SBI_SSCCTL_PATHALT;
8380 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008381
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008382 if (with_fdi) {
8383 lpt_reset_fdi_mphy(dev_priv);
8384 lpt_program_fdi_mphy(dev_priv);
8385 }
8386 }
Paulo Zanonidde86e22012-12-01 12:04:25 -02008387
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008388 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008389 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8390 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8391 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
Daniel Vetterc00db242013-01-22 15:33:27 +01008392
Ville Syrjäläa5805162015-05-26 20:42:30 +03008393 mutex_unlock(&dev_priv->sb_lock);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008394}
8395
Paulo Zanoni47701c32013-07-23 11:19:25 -03008396/* Sequence to disable CLKOUT_DP */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008397static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
Paulo Zanoni47701c32013-07-23 11:19:25 -03008398{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02008399 u32 reg, tmp;
Paulo Zanoni47701c32013-07-23 11:19:25 -03008400
Ville Syrjäläa5805162015-05-26 20:42:30 +03008401 mutex_lock(&dev_priv->sb_lock);
Paulo Zanoni47701c32013-07-23 11:19:25 -03008402
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008403 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
Paulo Zanoni47701c32013-07-23 11:19:25 -03008404 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8405 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8406 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8407
8408 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8409 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8410 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8411 tmp |= SBI_SSCCTL_PATHALT;
8412 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8413 udelay(32);
8414 }
8415 tmp |= SBI_SSCCTL_DISABLE;
8416 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8417 }
8418
Ville Syrjäläa5805162015-05-26 20:42:30 +03008419 mutex_unlock(&dev_priv->sb_lock);
Paulo Zanoni47701c32013-07-23 11:19:25 -03008420}
8421
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008422#define BEND_IDX(steps) ((50 + (steps)) / 5)
8423
Jani Nikulaba3f4d02019-01-18 14:01:23 +02008424static const u16 sscdivintphase[] = {
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008425 [BEND_IDX( 50)] = 0x3B23,
8426 [BEND_IDX( 45)] = 0x3B23,
8427 [BEND_IDX( 40)] = 0x3C23,
8428 [BEND_IDX( 35)] = 0x3C23,
8429 [BEND_IDX( 30)] = 0x3D23,
8430 [BEND_IDX( 25)] = 0x3D23,
8431 [BEND_IDX( 20)] = 0x3E23,
8432 [BEND_IDX( 15)] = 0x3E23,
8433 [BEND_IDX( 10)] = 0x3F23,
8434 [BEND_IDX( 5)] = 0x3F23,
8435 [BEND_IDX( 0)] = 0x0025,
8436 [BEND_IDX( -5)] = 0x0025,
8437 [BEND_IDX(-10)] = 0x0125,
8438 [BEND_IDX(-15)] = 0x0125,
8439 [BEND_IDX(-20)] = 0x0225,
8440 [BEND_IDX(-25)] = 0x0225,
8441 [BEND_IDX(-30)] = 0x0325,
8442 [BEND_IDX(-35)] = 0x0325,
8443 [BEND_IDX(-40)] = 0x0425,
8444 [BEND_IDX(-45)] = 0x0425,
8445 [BEND_IDX(-50)] = 0x0525,
8446};
8447
8448/*
8449 * Bend CLKOUT_DP
8450 * steps -50 to 50 inclusive, in steps of 5
8451 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8452 * change in clock period = -(steps / 10) * 5.787 ps
8453 */
8454static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8455{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02008456 u32 tmp;
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008457 int idx = BEND_IDX(steps);
8458
8459 if (WARN_ON(steps % 5 != 0))
8460 return;
8461
8462 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8463 return;
8464
8465 mutex_lock(&dev_priv->sb_lock);
8466
8467 if (steps % 10 != 0)
8468 tmp = 0xAAAAAAAB;
8469 else
8470 tmp = 0x00000000;
8471 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8472
8473 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8474 tmp &= 0xffff0000;
8475 tmp |= sscdivintphase[idx];
8476 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8477
8478 mutex_unlock(&dev_priv->sb_lock);
8479}
8480
8481#undef BEND_IDX
8482
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008483static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008484{
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008485 struct intel_encoder *encoder;
8486 bool has_vga = false;
8487
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008488 for_each_intel_encoder(&dev_priv->drm, encoder) {
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008489 switch (encoder->type) {
8490 case INTEL_OUTPUT_ANALOG:
8491 has_vga = true;
8492 break;
Paulo Zanoni6847d71b2014-10-27 17:47:52 -02008493 default:
8494 break;
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008495 }
8496 }
8497
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008498 if (has_vga) {
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008499 lpt_bend_clkout_dp(dev_priv, 0);
8500 lpt_enable_clkout_dp(dev_priv, true, true);
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008501 } else {
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008502 lpt_disable_clkout_dp(dev_priv);
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008503 }
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008504}
8505
Paulo Zanonidde86e22012-12-01 12:04:25 -02008506/*
8507 * Initialize reference clocks when the driver loads
8508 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008509void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
Paulo Zanonidde86e22012-12-01 12:04:25 -02008510{
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008511 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008512 ironlake_init_pch_refclk(dev_priv);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008513 else if (HAS_PCH_LPT(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008514 lpt_init_pch_refclk(dev_priv);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008515}
8516
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008517static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
Paulo Zanonic8203562012-09-12 10:06:29 -03008518{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008519 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8520 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8521 enum pipe pipe = crtc->pipe;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02008522 u32 val;
Paulo Zanonic8203562012-09-12 10:06:29 -03008523
Daniel Vetter78114072013-06-13 00:54:57 +02008524 val = 0;
Paulo Zanonic8203562012-09-12 10:06:29 -03008525
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008526 switch (crtc_state->pipe_bpp) {
Paulo Zanonic8203562012-09-12 10:06:29 -03008527 case 18:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008528 val |= PIPECONF_6BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008529 break;
8530 case 24:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008531 val |= PIPECONF_8BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008532 break;
8533 case 30:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008534 val |= PIPECONF_10BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008535 break;
8536 case 36:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008537 val |= PIPECONF_12BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008538 break;
8539 default:
Paulo Zanonicc769b62012-09-20 18:36:03 -03008540 /* Case prevented by intel_choose_pipe_bpp_dither. */
8541 BUG();
Paulo Zanonic8203562012-09-12 10:06:29 -03008542 }
8543
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008544 if (crtc_state->dither)
Paulo Zanonic8203562012-09-12 10:06:29 -03008545 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8546
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008547 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
Paulo Zanonic8203562012-09-12 10:06:29 -03008548 val |= PIPECONF_INTERLACED_ILK;
8549 else
8550 val |= PIPECONF_PROGRESSIVE;
8551
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008552 if (crtc_state->limited_color_range)
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02008553 val |= PIPECONF_COLOR_RANGE_SELECT;
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02008554
Paulo Zanonic8203562012-09-12 10:06:29 -03008555 I915_WRITE(PIPECONF(pipe), val);
8556 POSTING_READ(PIPECONF(pipe));
8557}
8558
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008559static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008560{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008561 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8562 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8563 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Jani Nikula391bf042016-03-18 17:05:40 +02008564 u32 val = 0;
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008565
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008566 if (IS_HASWELL(dev_priv) && crtc_state->dither)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008567 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8568
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008569 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008570 val |= PIPECONF_INTERLACED_ILK;
8571 else
8572 val |= PIPECONF_PROGRESSIVE;
8573
Paulo Zanoni702e7a52012-10-23 18:29:59 -02008574 I915_WRITE(PIPECONF(cpu_transcoder), val);
8575 POSTING_READ(PIPECONF(cpu_transcoder));
Jani Nikula391bf042016-03-18 17:05:40 +02008576}
8577
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008578static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
Jani Nikula391bf042016-03-18 17:05:40 +02008579{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008580 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8581 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
Jani Nikula391bf042016-03-18 17:05:40 +02008582
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00008583 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
Jani Nikula391bf042016-03-18 17:05:40 +02008584 u32 val = 0;
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008585
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008586 switch (crtc_state->pipe_bpp) {
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008587 case 18:
8588 val |= PIPEMISC_DITHER_6_BPC;
8589 break;
8590 case 24:
8591 val |= PIPEMISC_DITHER_8_BPC;
8592 break;
8593 case 30:
8594 val |= PIPEMISC_DITHER_10_BPC;
8595 break;
8596 case 36:
8597 val |= PIPEMISC_DITHER_12_BPC;
8598 break;
8599 default:
8600 /* Case prevented by pipe_config_set_bpp. */
8601 BUG();
8602 }
8603
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008604 if (crtc_state->dither)
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008605 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8606
Shashank Sharma8c79f842018-10-12 11:53:09 +05308607 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8608 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05308609 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
Shashank Sharma8c79f842018-10-12 11:53:09 +05308610
8611 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05308612 val |= PIPEMISC_YUV420_ENABLE |
Shashank Sharmab22ca992017-07-24 19:19:32 +05308613 PIPEMISC_YUV420_MODE_FULL_BLEND;
Shashank Sharmab22ca992017-07-24 19:19:32 +05308614
Jani Nikula391bf042016-03-18 17:05:40 +02008615 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008616 }
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008617}
8618
Paulo Zanonid4b19312012-11-29 11:29:32 -02008619int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8620{
8621 /*
8622 * Account for spread spectrum to avoid
8623 * oversubscribing the link. Max center spread
8624 * is 2.5%; use 5% for safety's sake.
8625 */
8626 u32 bps = target_clock * bpp * 21 / 20;
Ville Syrjälä619d4d02014-02-27 14:23:14 +02008627 return DIV_ROUND_UP(bps, link_bw * 8);
Paulo Zanonid4b19312012-11-29 11:29:32 -02008628}
8629
Daniel Vetter7429e9d2013-04-20 17:19:46 +02008630static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
Daniel Vetter6cf86a52013-04-02 23:38:10 +02008631{
Daniel Vetter7429e9d2013-04-20 17:19:46 +02008632 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
Paulo Zanonif48d8f22012-09-20 18:36:04 -03008633}
8634
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008635static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8636 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03008637 struct dpll *reduced_clock)
Paulo Zanonide13a2e2012-09-20 18:36:05 -03008638{
8639 struct drm_crtc *crtc = &intel_crtc->base;
8640 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008641 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008642 u32 dpll, fp, fp2;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008643 int factor;
Jesse Barnes79e53942008-11-07 14:24:08 -08008644
Chris Wilsonc1858122010-12-03 21:35:48 +00008645 /* Enable autotuning of the PLL clock (if permissible) */
Eric Anholt8febb292011-03-30 13:01:07 -07008646 factor = 21;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008647 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Eric Anholt8febb292011-03-30 13:01:07 -07008648 if ((intel_panel_use_ssc(dev_priv) &&
Ville Syrjäläe91e9412013-12-09 18:54:16 +02008649 dev_priv->vbt.lvds_ssc_freq == 100000) ||
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008650 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
Eric Anholt8febb292011-03-30 13:01:07 -07008651 factor = 25;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008652 } else if (crtc_state->sdvo_tv_clock)
Eric Anholt8febb292011-03-30 13:01:07 -07008653 factor = 20;
Chris Wilsonc1858122010-12-03 21:35:48 +00008654
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008655 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
Chris Wilsonc1858122010-12-03 21:35:48 +00008656
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008657 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8658 fp |= FP_CB_TUNE;
8659
8660 if (reduced_clock) {
8661 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8662
8663 if (reduced_clock->m < factor * reduced_clock->n)
8664 fp2 |= FP_CB_TUNE;
8665 } else {
8666 fp2 = fp;
8667 }
Daniel Vetter9a7c7892013-04-04 22:20:34 +02008668
Chris Wilson5eddb702010-09-11 13:48:45 +01008669 dpll = 0;
Zhenyu Wang2c072452009-06-05 15:38:42 +08008670
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008671 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
Eric Anholta07d6782011-03-30 13:01:08 -07008672 dpll |= DPLLB_MODE_LVDS;
8673 else
8674 dpll |= DPLLB_MODE_DAC_SERIAL;
Daniel Vetter198a037f2013-04-19 11:14:37 +02008675
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008676 dpll |= (crtc_state->pixel_multiplier - 1)
Daniel Vetteref1b4602013-06-01 17:17:04 +02008677 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
Daniel Vetter198a037f2013-04-19 11:14:37 +02008678
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008679 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8680 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Daniel Vetter4a33e482013-07-06 12:52:05 +02008681 dpll |= DPLL_SDVO_HIGH_SPEED;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008682
Ville Syrjälä37a56502016-06-22 21:57:04 +03008683 if (intel_crtc_has_dp_encoder(crtc_state))
Daniel Vetter4a33e482013-07-06 12:52:05 +02008684 dpll |= DPLL_SDVO_HIGH_SPEED;
Jesse Barnes79e53942008-11-07 14:24:08 -08008685
Ville Syrjälä7d7f8632016-09-26 11:30:46 +03008686 /*
8687 * The high speed IO clock is only really required for
8688 * SDVO/HDMI/DP, but we also enable it for CRT to make it
8689 * possible to share the DPLL between CRT and HDMI. Enabling
8690 * the clock needlessly does no real harm, except use up a
8691 * bit of power potentially.
8692 *
8693 * We'll limit this to IVB with 3 pipes, since it has only two
8694 * DPLLs and so DPLL sharing is the only way to get three pipes
8695 * driving PCH ports at the same time. On SNB we could do this,
8696 * and potentially avoid enabling the second DPLL, but it's not
8697 * clear if it''s a win or loss power wise. No point in doing
8698 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8699 */
8700 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8701 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8702 dpll |= DPLL_SDVO_HIGH_SPEED;
8703
Eric Anholta07d6782011-03-30 13:01:08 -07008704 /* compute bitmask from p1 value */
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008705 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
Eric Anholta07d6782011-03-30 13:01:08 -07008706 /* also FPA1 */
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008707 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
Eric Anholta07d6782011-03-30 13:01:08 -07008708
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008709 switch (crtc_state->dpll.p2) {
Eric Anholta07d6782011-03-30 13:01:08 -07008710 case 5:
8711 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8712 break;
8713 case 7:
8714 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8715 break;
8716 case 10:
8717 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8718 break;
8719 case 14:
8720 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8721 break;
Jesse Barnes79e53942008-11-07 14:24:08 -08008722 }
8723
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008724 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8725 intel_panel_use_ssc(dev_priv))
Kristian Høgsberg43565a02009-02-13 20:56:52 -05008726 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
Jesse Barnes79e53942008-11-07 14:24:08 -08008727 else
8728 dpll |= PLL_REF_INPUT_DREFCLK;
8729
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008730 dpll |= DPLL_VCO_ENABLE;
8731
8732 crtc_state->dpll_hw_state.dpll = dpll;
8733 crtc_state->dpll_hw_state.fp0 = fp;
8734 crtc_state->dpll_hw_state.fp1 = fp2;
Paulo Zanonide13a2e2012-09-20 18:36:05 -03008735}
8736
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008737static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8738 struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08008739{
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008740 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008741 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03008742 const struct intel_limit *limit;
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008743 int refclk = 120000;
Jesse Barnes79e53942008-11-07 14:24:08 -08008744
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03008745 memset(&crtc_state->dpll_hw_state, 0,
8746 sizeof(crtc_state->dpll_hw_state));
8747
Ander Conselvan de Oliveiraded220e2016-03-21 18:00:09 +02008748 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8749 if (!crtc_state->has_pch_encoder)
8750 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08008751
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03008752 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008753 if (intel_panel_use_ssc(dev_priv)) {
8754 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8755 dev_priv->vbt.lvds_ssc_freq);
8756 refclk = dev_priv->vbt.lvds_ssc_freq;
8757 }
8758
8759 if (intel_is_dual_link_lvds(dev)) {
8760 if (refclk == 100000)
8761 limit = &intel_limits_ironlake_dual_lvds_100m;
8762 else
8763 limit = &intel_limits_ironlake_dual_lvds;
8764 } else {
8765 if (refclk == 100000)
8766 limit = &intel_limits_ironlake_single_lvds_100m;
8767 else
8768 limit = &intel_limits_ironlake_single_lvds;
8769 }
8770 } else {
8771 limit = &intel_limits_ironlake_dac;
8772 }
8773
Ander Conselvan de Oliveira364ee292016-03-21 18:00:10 +02008774 if (!crtc_state->clock_set &&
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008775 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8776 refclk, NULL, &crtc_state->dpll)) {
Ander Conselvan de Oliveira364ee292016-03-21 18:00:10 +02008777 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8778 return -EINVAL;
Daniel Vetterf47709a2013-03-28 10:42:02 +01008779 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008780
Gustavo A. R. Silvacbaa3312017-05-15 16:56:05 -05008781 ironlake_compute_dpll(crtc, crtc_state, NULL);
Daniel Vetter66e985c2013-06-05 13:34:20 +02008782
Gustavo A. R. Silvaefd38b62017-05-15 17:00:28 -05008783 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
Chris Wilson43031782018-09-13 14:16:26 +01008784 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8785 pipe_name(crtc->pipe));
Ander Conselvan de Oliveiraded220e2016-03-21 18:00:09 +02008786 return -EINVAL;
Ander Conselvan de Oliveira3fb37702014-10-29 11:32:35 +02008787 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008788
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02008789 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08008790}
8791
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008792static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8793 struct intel_link_m_n *m_n)
Daniel Vetter72419202013-04-04 13:28:53 +02008794{
8795 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008796 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008797 enum pipe pipe = crtc->pipe;
Daniel Vetter72419202013-04-04 13:28:53 +02008798
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008799 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8800 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8801 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8802 & ~TU_SIZE_MASK;
8803 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8804 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8805 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8806}
8807
8808static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8809 enum transcoder transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008810 struct intel_link_m_n *m_n,
8811 struct intel_link_m_n *m2_n2)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008812{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008813 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008814 enum pipe pipe = crtc->pipe;
8815
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008816 if (INTEL_GEN(dev_priv) >= 5) {
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008817 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8818 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8819 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8820 & ~TU_SIZE_MASK;
8821 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8822 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8823 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02008824
8825 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008826 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8827 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8828 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8829 & ~TU_SIZE_MASK;
8830 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8831 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8832 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8833 }
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008834 } else {
8835 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8836 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8837 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8838 & ~TU_SIZE_MASK;
8839 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8840 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8841 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8842 }
8843}
8844
8845void intel_dp_get_m_n(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008846 struct intel_crtc_state *pipe_config)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008847{
Ander Conselvan de Oliveira681a8502015-01-15 14:55:24 +02008848 if (pipe_config->has_pch_encoder)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008849 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8850 else
8851 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008852 &pipe_config->dp_m_n,
8853 &pipe_config->dp_m2_n2);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008854}
8855
Daniel Vetter72419202013-04-04 13:28:53 +02008856static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008857 struct intel_crtc_state *pipe_config)
Daniel Vetter72419202013-04-04 13:28:53 +02008858{
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008859 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008860 &pipe_config->fdi_m_n, NULL);
Daniel Vetter72419202013-04-04 13:28:53 +02008861}
8862
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008863static void skylake_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008864 struct intel_crtc_state *pipe_config)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008865{
8866 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008867 struct drm_i915_private *dev_priv = to_i915(dev);
Chandra Kondurua1b22782015-04-07 15:28:45 -07008868 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02008869 u32 ps_ctrl = 0;
Chandra Kondurua1b22782015-04-07 15:28:45 -07008870 int id = -1;
8871 int i;
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008872
Chandra Kondurua1b22782015-04-07 15:28:45 -07008873 /* find scaler attached to this pipe */
8874 for (i = 0; i < crtc->num_scalers; i++) {
8875 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8876 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8877 id = i;
8878 pipe_config->pch_pfit.enabled = true;
8879 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8880 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8881 break;
8882 }
8883 }
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008884
Chandra Kondurua1b22782015-04-07 15:28:45 -07008885 scaler_state->scaler_id = id;
8886 if (id >= 0) {
8887 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8888 } else {
8889 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008890 }
8891}
8892
Damien Lespiau5724dbd2015-01-20 12:51:52 +00008893static void
8894skylake_get_initial_plane_config(struct intel_crtc *crtc,
8895 struct intel_initial_plane_config *plane_config)
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008896{
8897 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008898 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008899 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8900 enum plane_id plane_id = plane->id;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008901 enum pipe pipe;
James Ausmus4036c782017-11-13 10:11:28 -08008902 u32 val, base, offset, stride_mult, tiling, alpha;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008903 int fourcc, pixel_format;
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00008904 unsigned int aligned_height;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008905 struct drm_framebuffer *fb;
Damien Lespiau1b842c82015-01-21 13:50:54 +00008906 struct intel_framebuffer *intel_fb;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008907
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008908 if (!plane->get_hw_state(plane, &pipe))
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02008909 return;
8910
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008911 WARN_ON(pipe != crtc->pipe);
8912
Damien Lespiaud9806c92015-01-21 14:07:19 +00008913 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Damien Lespiau1b842c82015-01-21 13:50:54 +00008914 if (!intel_fb) {
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008915 DRM_DEBUG_KMS("failed to alloc fb\n");
8916 return;
8917 }
8918
Damien Lespiau1b842c82015-01-21 13:50:54 +00008919 fb = &intel_fb->base;
8920
Ville Syrjäläd2e9f5f2016-11-18 21:52:53 +02008921 fb->dev = dev;
8922
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008923 val = I915_READ(PLANE_CTL(pipe, plane_id));
Damien Lespiau42a7b082015-02-05 19:35:13 +00008924
James Ausmusb5972772018-01-30 11:49:16 -02008925 if (INTEL_GEN(dev_priv) >= 11)
8926 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8927 else
8928 pixel_format = val & PLANE_CTL_FORMAT_MASK;
James Ausmus4036c782017-11-13 10:11:28 -08008929
8930 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008931 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
James Ausmus4036c782017-11-13 10:11:28 -08008932 alpha &= PLANE_COLOR_ALPHA_MASK;
8933 } else {
8934 alpha = val & PLANE_CTL_ALPHA_MASK;
8935 }
8936
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008937 fourcc = skl_format_to_fourcc(pixel_format,
James Ausmus4036c782017-11-13 10:11:28 -08008938 val & PLANE_CTL_ORDER_RGBX, alpha);
Ville Syrjälä2f3f4762016-11-18 21:52:57 +02008939 fb->format = drm_format_info(fourcc);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008940
Damien Lespiau40f46282015-02-27 11:15:21 +00008941 tiling = val & PLANE_CTL_TILED_MASK;
8942 switch (tiling) {
8943 case PLANE_CTL_TILED_LINEAR:
Ben Widawsky2f075562017-03-24 14:29:48 -07008944 fb->modifier = DRM_FORMAT_MOD_LINEAR;
Damien Lespiau40f46282015-02-27 11:15:21 +00008945 break;
8946 case PLANE_CTL_TILED_X:
8947 plane_config->tiling = I915_TILING_X;
Ville Syrjäläbae781b2016-11-16 13:33:16 +02008948 fb->modifier = I915_FORMAT_MOD_X_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008949 break;
8950 case PLANE_CTL_TILED_Y:
Imre Deak914a4fd2018-10-16 19:00:11 +03008951 plane_config->tiling = I915_TILING_Y;
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07008952 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07008953 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8954 else
8955 fb->modifier = I915_FORMAT_MOD_Y_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008956 break;
8957 case PLANE_CTL_TILED_YF:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07008958 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07008959 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8960 else
8961 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008962 break;
8963 default:
8964 MISSING_CASE(tiling);
8965 goto error;
8966 }
8967
Ville Syrjäläf43348a2018-11-20 15:54:50 +02008968 /*
8969 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
8970 * while i915 HW rotation is clockwise, thats why this swapping.
8971 */
8972 switch (val & PLANE_CTL_ROTATE_MASK) {
8973 case PLANE_CTL_ROTATE_0:
8974 plane_config->rotation = DRM_MODE_ROTATE_0;
8975 break;
8976 case PLANE_CTL_ROTATE_90:
8977 plane_config->rotation = DRM_MODE_ROTATE_270;
8978 break;
8979 case PLANE_CTL_ROTATE_180:
8980 plane_config->rotation = DRM_MODE_ROTATE_180;
8981 break;
8982 case PLANE_CTL_ROTATE_270:
8983 plane_config->rotation = DRM_MODE_ROTATE_90;
8984 break;
8985 }
8986
8987 if (INTEL_GEN(dev_priv) >= 10 &&
8988 val & PLANE_CTL_FLIP_HORIZONTAL)
8989 plane_config->rotation |= DRM_MODE_REFLECT_X;
8990
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008991 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008992 plane_config->base = base;
8993
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008994 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008995
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008996 val = I915_READ(PLANE_SIZE(pipe, plane_id));
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008997 fb->height = ((val >> 16) & 0xfff) + 1;
8998 fb->width = ((val >> 0) & 0x1fff) + 1;
8999
Ville Syrjälä282e83e2017-11-17 21:19:12 +02009000 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
Ville Syrjäläb3cf5c02018-09-25 22:37:08 +03009001 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009002 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9003
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02009004 aligned_height = intel_fb_align_height(fb, 0, fb->height);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009005
Daniel Vetterf37b5c22015-02-10 23:12:27 +01009006 plane_config->size = fb->pitches[0] * aligned_height;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009007
Ville Syrjälä282e83e2017-11-17 21:19:12 +02009008 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9009 crtc->base.name, plane->base.name, fb->width, fb->height,
Ville Syrjälä272725c2016-12-14 23:32:20 +02009010 fb->format->cpp[0] * 8, base, fb->pitches[0],
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009011 plane_config->size);
9012
Damien Lespiau2d140302015-02-05 17:22:18 +00009013 plane_config->fb = intel_fb;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009014 return;
9015
9016error:
Matthew Auldd1a3a032016-08-23 16:00:44 +01009017 kfree(intel_fb);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00009018}
9019
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009020static void ironlake_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009021 struct intel_crtc_state *pipe_config)
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009022{
9023 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009024 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009025 u32 tmp;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009026
9027 tmp = I915_READ(PF_CTL(crtc->pipe));
9028
9029 if (tmp & PF_ENABLE) {
Chris Wilsonfd4daa92013-08-27 17:04:17 +01009030 pipe_config->pch_pfit.enabled = true;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009031 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9032 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
Daniel Vettercb8b2a32013-06-01 17:16:23 +02009033
9034 /* We currently do not free assignements of panel fitters on
9035 * ivb/hsw (since we don't use the higher upscaling modes which
9036 * differentiates them) so just WARN about this case for now. */
Lucas De Marchicf819ef2018-12-12 10:10:43 -08009037 if (IS_GEN(dev_priv, 7)) {
Daniel Vettercb8b2a32013-06-01 17:16:23 +02009038 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9039 PF_PIPE_SEL_IVB(crtc->pipe));
9040 }
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009041 }
Jesse Barnes79e53942008-11-07 14:24:08 -08009042}
9043
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009044static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009045 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009046{
9047 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009048 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deak17290502016-02-12 18:55:11 +02009049 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +00009050 intel_wakeref_t wakeref;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009051 u32 tmp;
Imre Deak17290502016-02-12 18:55:11 +02009052 bool ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009053
Imre Deak17290502016-02-12 18:55:11 +02009054 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +00009055 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9056 if (!wakeref)
Paulo Zanoni930e8c92014-07-04 13:38:34 -03009057 return false;
9058
Shashank Sharmad9facae2018-10-12 11:53:07 +05309059 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
Daniel Vettere143a212013-07-04 12:01:15 +02009060 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009061 pipe_config->shared_dpll = NULL;
Daniel Vettereccb1402013-05-22 00:50:22 +02009062
Imre Deak17290502016-02-12 18:55:11 +02009063 ret = false;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009064 tmp = I915_READ(PIPECONF(crtc->pipe));
9065 if (!(tmp & PIPECONF_ENABLE))
Imre Deak17290502016-02-12 18:55:11 +02009066 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009067
Ville Syrjälä42571ae2013-09-06 23:29:00 +03009068 switch (tmp & PIPECONF_BPC_MASK) {
9069 case PIPECONF_6BPC:
9070 pipe_config->pipe_bpp = 18;
9071 break;
9072 case PIPECONF_8BPC:
9073 pipe_config->pipe_bpp = 24;
9074 break;
9075 case PIPECONF_10BPC:
9076 pipe_config->pipe_bpp = 30;
9077 break;
9078 case PIPECONF_12BPC:
9079 pipe_config->pipe_bpp = 36;
9080 break;
9081 default:
9082 break;
9083 }
9084
Daniel Vetterb5a9fa02014-04-24 23:54:49 +02009085 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9086 pipe_config->limited_color_range = true;
9087
Daniel Vetterab9412b2013-05-03 11:49:46 +02009088 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
Daniel Vetter66e985c2013-06-05 13:34:20 +02009089 struct intel_shared_dpll *pll;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009090 enum intel_dpll_id pll_id;
Daniel Vetter66e985c2013-06-05 13:34:20 +02009091
Daniel Vetter88adfff2013-03-28 10:42:01 +01009092 pipe_config->has_pch_encoder = true;
9093
Daniel Vetter627eb5a2013-04-29 19:33:42 +02009094 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9095 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9096 FDI_DP_PORT_WIDTH_SHIFT) + 1;
Daniel Vetter72419202013-04-04 13:28:53 +02009097
9098 ironlake_get_fdi_m_n_config(crtc, pipe_config);
Daniel Vetter6c49f242013-06-06 12:45:25 +02009099
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03009100 if (HAS_PCH_IBX(dev_priv)) {
Imre Deakd9a7bc62016-05-12 16:18:50 +03009101 /*
9102 * The pipe->pch transcoder and pch transcoder->pll
9103 * mapping is fixed.
9104 */
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009105 pll_id = (enum intel_dpll_id) crtc->pipe;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009106 } else {
9107 tmp = I915_READ(PCH_DPLL_SEL);
9108 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009109 pll_id = DPLL_ID_PCH_PLL_B;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009110 else
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009111 pll_id= DPLL_ID_PCH_PLL_A;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009112 }
Daniel Vetter66e985c2013-06-05 13:34:20 +02009113
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009114 pipe_config->shared_dpll =
9115 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9116 pll = pipe_config->shared_dpll;
Daniel Vetter66e985c2013-06-05 13:34:20 +02009117
Lucas De Marchiee1398b2018-03-20 15:06:33 -07009118 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9119 &pipe_config->dpll_hw_state));
Daniel Vetterc93f54c2013-06-27 19:47:19 +02009120
9121 tmp = pipe_config->dpll_hw_state.dpll;
9122 pipe_config->pixel_multiplier =
9123 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9124 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
Ville Syrjälä18442d02013-09-13 16:00:08 +03009125
9126 ironlake_pch_clock_get(crtc, pipe_config);
Daniel Vetter6c49f242013-06-06 12:45:25 +02009127 } else {
9128 pipe_config->pixel_multiplier = 1;
Daniel Vetter627eb5a2013-04-29 19:33:42 +02009129 }
9130
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02009131 intel_get_pipe_timings(crtc, pipe_config);
Jani Nikulabc58be62016-03-18 17:05:39 +02009132 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02009133
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02009134 ironlake_get_pfit_config(crtc, pipe_config);
9135
Imre Deak17290502016-02-12 18:55:11 +02009136 ret = true;
9137
9138out:
Chris Wilson0e6e0be2019-01-14 14:21:24 +00009139 intel_display_power_put(dev_priv, power_domain, wakeref);
Imre Deak17290502016-02-12 18:55:11 +02009140
9141 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009142}
9143
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009144static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9145{
Chris Wilson91c8a322016-07-05 10:40:23 +01009146 struct drm_device *dev = &dev_priv->drm;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009147 struct intel_crtc *crtc;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009148
Damien Lespiaud3fcc802014-05-13 23:32:22 +01009149 for_each_intel_crtc(dev, crtc)
Rob Clarke2c719b2014-12-15 13:56:32 -05009150 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009151 pipe_name(crtc->pipe));
9152
Imre Deak75e39682018-08-06 12:58:39 +03009153 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
Imre Deak9c3a16c2017-08-14 18:15:30 +03009154 "Display power well on\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009155 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
Ville Syrjälä01403de2015-09-18 20:03:33 +03009156 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9157 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
Imre Deak44cb7342016-08-10 14:07:29 +03009158 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009159 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009160 "CPU PWM1 enabled\n");
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009161 if (IS_HASWELL(dev_priv))
Rob Clarke2c719b2014-12-15 13:56:32 -05009162 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
Paulo Zanonic5107b82014-07-04 11:50:30 -03009163 "CPU PWM2 enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009164 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009165 "PCH PWM1 enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009166 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009167 "Utility pin enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009168 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009169
Paulo Zanoni9926ada2014-04-01 19:39:47 -03009170 /*
9171 * In theory we can still leave IRQs enabled, as long as only the HPD
9172 * interrupts remain enabled. We used to check for that, but since it's
9173 * gen-specific and since we only disable LCPLL after we fully disable
9174 * the interrupts, the check below should be enough.
9175 */
Rob Clarke2c719b2014-12-15 13:56:32 -05009176 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009177}
9178
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009179static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009180{
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009181 if (IS_HASWELL(dev_priv))
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009182 return I915_READ(D_COMP_HSW);
9183 else
9184 return I915_READ(D_COMP_BDW);
9185}
9186
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009187static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009188{
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009189 if (IS_HASWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01009190 mutex_lock(&dev_priv->pcu_lock);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009191 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9192 val))
Chris Wilson79cf2192016-08-24 11:16:07 +01009193 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01009194 mutex_unlock(&dev_priv->pcu_lock);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009195 } else {
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009196 I915_WRITE(D_COMP_BDW, val);
9197 POSTING_READ(D_COMP_BDW);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009198 }
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009199}
9200
9201/*
9202 * This function implements pieces of two sequences from BSpec:
9203 * - Sequence for display software to disable LCPLL
9204 * - Sequence for display software to allow package C8+
9205 * The steps implemented here are just the steps that actually touch the LCPLL
9206 * register. Callers should take care of disabling all the display engine
9207 * functions, doing the mode unset, fixing interrupts, etc.
9208 */
Paulo Zanoni6ff58d52013-09-24 13:52:57 -03009209static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9210 bool switch_to_fclk, bool allow_power_down)
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009211{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009212 u32 val;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009213
9214 assert_can_disable_lcpll(dev_priv);
9215
9216 val = I915_READ(LCPLL_CTL);
9217
9218 if (switch_to_fclk) {
9219 val |= LCPLL_CD_SOURCE_FCLK;
9220 I915_WRITE(LCPLL_CTL, val);
9221
Imre Deakf53dd632016-06-28 13:37:32 +03009222 if (wait_for_us(I915_READ(LCPLL_CTL) &
9223 LCPLL_CD_SOURCE_FCLK_DONE, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009224 DRM_ERROR("Switching to FCLK failed\n");
9225
9226 val = I915_READ(LCPLL_CTL);
9227 }
9228
9229 val |= LCPLL_PLL_DISABLE;
9230 I915_WRITE(LCPLL_CTL, val);
9231 POSTING_READ(LCPLL_CTL);
9232
Chris Wilson24d84412016-06-30 15:33:07 +01009233 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009234 DRM_ERROR("LCPLL still locked\n");
9235
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009236 val = hsw_read_dcomp(dev_priv);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009237 val |= D_COMP_COMP_DISABLE;
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009238 hsw_write_dcomp(dev_priv, val);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009239 ndelay(100);
9240
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009241 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9242 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009243 DRM_ERROR("D_COMP RCOMP still in progress\n");
9244
9245 if (allow_power_down) {
9246 val = I915_READ(LCPLL_CTL);
9247 val |= LCPLL_POWER_DOWN_ALLOW;
9248 I915_WRITE(LCPLL_CTL, val);
9249 POSTING_READ(LCPLL_CTL);
9250 }
9251}
9252
9253/*
9254 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9255 * source.
9256 */
Paulo Zanoni6ff58d52013-09-24 13:52:57 -03009257static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009258{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009259 u32 val;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009260
9261 val = I915_READ(LCPLL_CTL);
9262
9263 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9264 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9265 return;
9266
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03009267 /*
9268 * Make sure we're not on PC8 state before disabling PC8, otherwise
9269 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03009270 */
Mika Kuoppala59bad942015-01-16 11:34:40 +02009271 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Paulo Zanoni215733f2013-08-19 13:18:07 -03009272
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009273 if (val & LCPLL_POWER_DOWN_ALLOW) {
9274 val &= ~LCPLL_POWER_DOWN_ALLOW;
9275 I915_WRITE(LCPLL_CTL, val);
Daniel Vetter35d8f2e2013-08-21 23:38:08 +02009276 POSTING_READ(LCPLL_CTL);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009277 }
9278
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009279 val = hsw_read_dcomp(dev_priv);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009280 val |= D_COMP_COMP_FORCE;
9281 val &= ~D_COMP_COMP_DISABLE;
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009282 hsw_write_dcomp(dev_priv, val);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009283
9284 val = I915_READ(LCPLL_CTL);
9285 val &= ~LCPLL_PLL_DISABLE;
9286 I915_WRITE(LCPLL_CTL, val);
9287
Chris Wilson93220c02016-06-30 15:33:08 +01009288 if (intel_wait_for_register(dev_priv,
9289 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9290 5))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009291 DRM_ERROR("LCPLL not locked yet\n");
9292
9293 if (val & LCPLL_CD_SOURCE_FCLK) {
9294 val = I915_READ(LCPLL_CTL);
9295 val &= ~LCPLL_CD_SOURCE_FCLK;
9296 I915_WRITE(LCPLL_CTL, val);
9297
Imre Deakf53dd632016-06-28 13:37:32 +03009298 if (wait_for_us((I915_READ(LCPLL_CTL) &
9299 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009300 DRM_ERROR("Switching back to LCPLL failed\n");
9301 }
Paulo Zanoni215733f2013-08-19 13:18:07 -03009302
Mika Kuoppala59bad942015-01-16 11:34:40 +02009303 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +03009304
Ville Syrjälä4c75b942016-10-31 22:37:12 +02009305 intel_update_cdclk(dev_priv);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +03009306 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009307}
9308
Paulo Zanoni765dab672014-03-07 20:08:18 -03009309/*
9310 * Package states C8 and deeper are really deep PC states that can only be
9311 * reached when all the devices on the system allow it, so even if the graphics
9312 * device allows PC8+, it doesn't mean the system will actually get to these
9313 * states. Our driver only allows PC8+ when going into runtime PM.
9314 *
9315 * The requirements for PC8+ are that all the outputs are disabled, the power
9316 * well is disabled and most interrupts are disabled, and these are also
9317 * requirements for runtime PM. When these conditions are met, we manually do
9318 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9319 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9320 * hang the machine.
9321 *
9322 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9323 * the state of some registers, so when we come back from PC8+ we need to
9324 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9325 * need to take care of the registers kept by RC6. Notice that this happens even
9326 * if we don't put the device in PCI D3 state (which is what currently happens
9327 * because of the runtime PM support).
9328 *
9329 * For more, read "Display Sequences for Package C8" on the hardware
9330 * documentation.
9331 */
Paulo Zanonia14cb6f2014-03-07 20:08:17 -03009332void hsw_enable_pc8(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03009333{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009334 u32 val;
Paulo Zanonic67a4702013-08-19 13:18:09 -03009335
Paulo Zanonic67a4702013-08-19 13:18:09 -03009336 DRM_DEBUG_KMS("Enabling package C8+\n");
9337
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01009338 if (HAS_PCH_LPT_LP(dev_priv)) {
Paulo Zanonic67a4702013-08-19 13:18:09 -03009339 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9340 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9341 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9342 }
9343
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02009344 lpt_disable_clkout_dp(dev_priv);
Paulo Zanonic67a4702013-08-19 13:18:09 -03009345 hsw_disable_lcpll(dev_priv, true, true);
9346}
9347
Paulo Zanonia14cb6f2014-03-07 20:08:17 -03009348void hsw_disable_pc8(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03009349{
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009350 u32 val;
Paulo Zanonic67a4702013-08-19 13:18:09 -03009351
Paulo Zanonic67a4702013-08-19 13:18:09 -03009352 DRM_DEBUG_KMS("Disabling package C8+\n");
9353
9354 hsw_restore_lcpll(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02009355 lpt_init_pch_refclk(dev_priv);
Paulo Zanonic67a4702013-08-19 13:18:09 -03009356
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01009357 if (HAS_PCH_LPT_LP(dev_priv)) {
Paulo Zanonic67a4702013-08-19 13:18:09 -03009358 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9359 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9360 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9361 }
Paulo Zanonic67a4702013-08-19 13:18:09 -03009362}
9363
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02009364static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9365 struct intel_crtc_state *crtc_state)
Paulo Zanoni09b4ddf2012-10-05 12:05:55 -03009366{
Madhav Chauhan70a057b2018-11-29 16:12:18 +02009367 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03009368 struct intel_atomic_state *state =
9369 to_intel_atomic_state(crtc_state->base.state);
9370
Madhav Chauhan70a057b2018-11-29 16:12:18 +02009371 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9372 IS_ICELAKE(dev_priv)) {
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009373 struct intel_encoder *encoder =
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03009374 intel_get_crtc_new_encoder(state, crtc_state);
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009375
9376 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
Chris Wilson43031782018-09-13 14:16:26 +01009377 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9378 pipe_name(crtc->pipe));
Mika Kaholaaf3997b2016-02-05 13:29:28 +02009379 return -EINVAL;
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009380 }
Mika Kaholaaf3997b2016-02-05 13:29:28 +02009381 }
Daniel Vetter716c2e52014-06-25 22:02:02 +03009382
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02009383 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08009384}
9385
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009386static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9387 enum port port,
9388 struct intel_crtc_state *pipe_config)
9389{
9390 enum intel_dpll_id id;
9391 u32 temp;
9392
9393 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
Paulo Zanonidfbd4502017-08-25 16:40:04 -03009394 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009395
9396 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9397 return;
9398
9399 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9400}
9401
Paulo Zanoni970888e2018-05-21 17:25:44 -07009402static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9403 enum port port,
9404 struct intel_crtc_state *pipe_config)
9405{
9406 enum intel_dpll_id id;
9407 u32 temp;
9408
9409 /* TODO: TBT pll not implemented. */
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309410 if (intel_port_is_combophy(dev_priv, port)) {
Paulo Zanoni970888e2018-05-21 17:25:44 -07009411 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9412 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9413 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9414
Vandita Kulkarnia54270d2018-10-03 12:52:00 +05309415 if (WARN_ON(!intel_dpll_is_combophy(id)))
Paulo Zanoni970888e2018-05-21 17:25:44 -07009416 return;
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309417 } else if (intel_port_is_tc(dev_priv, port)) {
Vandita Kulkarnicb6caf72018-10-03 12:51:58 +05309418 id = icl_port_to_mg_pll_id(port);
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309419 } else {
9420 WARN(1, "Invalid port %x\n", port);
Paulo Zanoni970888e2018-05-21 17:25:44 -07009421 return;
9422 }
9423
9424 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9425}
9426
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309427static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9428 enum port port,
9429 struct intel_crtc_state *pipe_config)
9430{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009431 enum intel_dpll_id id;
9432
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309433 switch (port) {
9434 case PORT_A:
Imre Deak08250c42016-03-14 19:55:34 +02009435 id = DPLL_ID_SKL_DPLL0;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309436 break;
9437 case PORT_B:
Imre Deak08250c42016-03-14 19:55:34 +02009438 id = DPLL_ID_SKL_DPLL1;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309439 break;
9440 case PORT_C:
Imre Deak08250c42016-03-14 19:55:34 +02009441 id = DPLL_ID_SKL_DPLL2;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309442 break;
9443 default:
9444 DRM_ERROR("Incorrect port type\n");
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009445 return;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309446 }
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009447
9448 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309449}
9450
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009451static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9452 enum port port,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009453 struct intel_crtc_state *pipe_config)
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009454{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009455 enum intel_dpll_id id;
Ander Conselvan de Oliveiraa3c988e2016-03-08 17:46:27 +02009456 u32 temp;
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009457
9458 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009459 id = temp >> (port * 3 + 1);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009460
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009461 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009462 return;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009463
9464 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009465}
9466
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009467static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9468 enum port port,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009469 struct intel_crtc_state *pipe_config)
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009470{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009471 enum intel_dpll_id id;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009472 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009473
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009474 switch (ddi_pll_sel) {
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009475 case PORT_CLK_SEL_WRPLL1:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009476 id = DPLL_ID_WRPLL1;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009477 break;
9478 case PORT_CLK_SEL_WRPLL2:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009479 id = DPLL_ID_WRPLL2;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009480 break;
Maarten Lankhorst00490c22015-11-16 14:42:12 +01009481 case PORT_CLK_SEL_SPLL:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009482 id = DPLL_ID_SPLL;
Ville Syrjälä79bd23d2015-12-01 23:32:07 +02009483 break;
Ander Conselvan de Oliveira9d16da62016-03-08 17:46:26 +02009484 case PORT_CLK_SEL_LCPLL_810:
9485 id = DPLL_ID_LCPLL_810;
9486 break;
9487 case PORT_CLK_SEL_LCPLL_1350:
9488 id = DPLL_ID_LCPLL_1350;
9489 break;
9490 case PORT_CLK_SEL_LCPLL_2700:
9491 id = DPLL_ID_LCPLL_2700;
9492 break;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009493 default:
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009494 MISSING_CASE(ddi_pll_sel);
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009495 /* fall through */
9496 case PORT_CLK_SEL_NONE:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009497 return;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009498 }
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009499
9500 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009501}
9502
Jani Nikulacf304292016-03-18 17:05:41 +02009503static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9504 struct intel_crtc_state *pipe_config,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009505 u64 *power_domain_mask)
Jani Nikulacf304292016-03-18 17:05:41 +02009506{
9507 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009508 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulacf304292016-03-18 17:05:41 +02009509 enum intel_display_power_domain power_domain;
Jani Nikula07169312018-12-04 12:19:26 +02009510 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
9511 unsigned long enabled_panel_transcoders = 0;
9512 enum transcoder panel_transcoder;
Jani Nikulacf304292016-03-18 17:05:41 +02009513 u32 tmp;
Jani Nikula07169312018-12-04 12:19:26 +02009514
9515 if (IS_ICELAKE(dev_priv))
9516 panel_transcoder_mask |=
9517 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
Jani Nikulacf304292016-03-18 17:05:41 +02009518
Imre Deakd9a7bc62016-05-12 16:18:50 +03009519 /*
9520 * The pipe->transcoder mapping is fixed with the exception of the eDP
Jani Nikula07169312018-12-04 12:19:26 +02009521 * and DSI transcoders handled below.
Imre Deakd9a7bc62016-05-12 16:18:50 +03009522 */
Jani Nikulacf304292016-03-18 17:05:41 +02009523 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9524
9525 /*
9526 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9527 * consistency and less surprising code; it's in always on power).
9528 */
Chris Wilson1b4bd5c2019-01-16 15:54:21 +00009529 for_each_set_bit(panel_transcoder,
9530 &panel_transcoder_mask,
9531 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
Madhav Chauhan2ca711c2018-11-29 16:12:27 +02009532 enum pipe trans_pipe;
Jani Nikula07169312018-12-04 12:19:26 +02009533
9534 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9535 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9536 continue;
9537
9538 /*
9539 * Log all enabled ones, only use the first one.
9540 *
9541 * FIXME: This won't work for two separate DSI displays.
9542 */
9543 enabled_panel_transcoders |= BIT(panel_transcoder);
9544 if (enabled_panel_transcoders != BIT(panel_transcoder))
9545 continue;
9546
Jani Nikulacf304292016-03-18 17:05:41 +02009547 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9548 default:
Jani Nikula07169312018-12-04 12:19:26 +02009549 WARN(1, "unknown pipe linked to transcoder %s\n",
9550 transcoder_name(panel_transcoder));
Gustavo A. R. Silvaf0d759f2018-06-28 17:35:41 -05009551 /* fall through */
Jani Nikulacf304292016-03-18 17:05:41 +02009552 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9553 case TRANS_DDI_EDP_INPUT_A_ON:
Madhav Chauhan2ca711c2018-11-29 16:12:27 +02009554 trans_pipe = PIPE_A;
Jani Nikulacf304292016-03-18 17:05:41 +02009555 break;
9556 case TRANS_DDI_EDP_INPUT_B_ONOFF:
Madhav Chauhan2ca711c2018-11-29 16:12:27 +02009557 trans_pipe = PIPE_B;
Jani Nikulacf304292016-03-18 17:05:41 +02009558 break;
9559 case TRANS_DDI_EDP_INPUT_C_ONOFF:
Madhav Chauhan2ca711c2018-11-29 16:12:27 +02009560 trans_pipe = PIPE_C;
Jani Nikulacf304292016-03-18 17:05:41 +02009561 break;
9562 }
9563
Jani Nikula07169312018-12-04 12:19:26 +02009564 if (trans_pipe == crtc->pipe)
9565 pipe_config->cpu_transcoder = panel_transcoder;
Jani Nikulacf304292016-03-18 17:05:41 +02009566 }
9567
Jani Nikula07169312018-12-04 12:19:26 +02009568 /*
9569 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
9570 */
9571 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
9572 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
9573
Jani Nikulacf304292016-03-18 17:05:41 +02009574 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9575 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9576 return false;
Chris Wilson04161d62019-01-14 14:21:27 +00009577
9578 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009579 *power_domain_mask |= BIT_ULL(power_domain);
Jani Nikulacf304292016-03-18 17:05:41 +02009580
9581 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9582
9583 return tmp & PIPECONF_ENABLE;
9584}
9585
Jani Nikula4d1de972016-03-18 17:05:42 +02009586static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9587 struct intel_crtc_state *pipe_config,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009588 u64 *power_domain_mask)
Jani Nikula4d1de972016-03-18 17:05:42 +02009589{
9590 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009591 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikula4d1de972016-03-18 17:05:42 +02009592 enum intel_display_power_domain power_domain;
9593 enum port port;
9594 enum transcoder cpu_transcoder;
9595 u32 tmp;
9596
Jani Nikula4d1de972016-03-18 17:05:42 +02009597 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9598 if (port == PORT_A)
9599 cpu_transcoder = TRANSCODER_DSI_A;
9600 else
9601 cpu_transcoder = TRANSCODER_DSI_C;
9602
9603 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9604 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9605 continue;
Chris Wilson04161d62019-01-14 14:21:27 +00009606
9607 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009608 *power_domain_mask |= BIT_ULL(power_domain);
Jani Nikula4d1de972016-03-18 17:05:42 +02009609
Imre Deakdb18b6a2016-03-24 12:41:40 +02009610 /*
9611 * The PLL needs to be enabled with a valid divider
9612 * configuration, otherwise accessing DSI registers will hang
9613 * the machine. See BSpec North Display Engine
9614 * registers/MIPI[BXT]. We can break out here early, since we
9615 * need the same DSI PLL to be enabled for both DSI ports.
9616 */
Jani Nikulae5186342018-07-05 16:25:08 +03009617 if (!bxt_dsi_pll_is_enabled(dev_priv))
Imre Deakdb18b6a2016-03-24 12:41:40 +02009618 break;
9619
Jani Nikula4d1de972016-03-18 17:05:42 +02009620 /* XXX: this works for video mode only */
9621 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9622 if (!(tmp & DPI_ENABLE))
9623 continue;
9624
9625 tmp = I915_READ(MIPI_CTRL(port));
9626 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9627 continue;
9628
9629 pipe_config->cpu_transcoder = cpu_transcoder;
Jani Nikula4d1de972016-03-18 17:05:42 +02009630 break;
9631 }
9632
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009633 return transcoder_is_dsi(pipe_config->cpu_transcoder);
Jani Nikula4d1de972016-03-18 17:05:42 +02009634}
9635
Daniel Vetter26804af2014-06-25 22:01:55 +03009636static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009637 struct intel_crtc_state *pipe_config)
Daniel Vetter26804af2014-06-25 22:01:55 +03009638{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009639 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetterd452c5b2014-07-04 11:27:39 -03009640 struct intel_shared_dpll *pll;
Daniel Vetter26804af2014-06-25 22:01:55 +03009641 enum port port;
Jani Nikulaba3f4d02019-01-18 14:01:23 +02009642 u32 tmp;
Daniel Vetter26804af2014-06-25 22:01:55 +03009643
9644 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9645
9646 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9647
Paulo Zanoni970888e2018-05-21 17:25:44 -07009648 if (IS_ICELAKE(dev_priv))
9649 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9650 else if (IS_CANNONLAKE(dev_priv))
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009651 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9652 else if (IS_GEN9_BC(dev_priv))
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009653 skylake_get_ddi_pll(dev_priv, port, pipe_config);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02009654 else if (IS_GEN9_LP(dev_priv))
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309655 bxt_get_ddi_pll(dev_priv, port, pipe_config);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009656 else
9657 haswell_get_ddi_pll(dev_priv, port, pipe_config);
Daniel Vetter9cd86932014-06-25 22:01:57 +03009658
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009659 pll = pipe_config->shared_dpll;
9660 if (pll) {
Lucas De Marchiee1398b2018-03-20 15:06:33 -07009661 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9662 &pipe_config->dpll_hw_state));
Daniel Vetterd452c5b2014-07-04 11:27:39 -03009663 }
9664
Daniel Vetter26804af2014-06-25 22:01:55 +03009665 /*
9666 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9667 * DDI E. So just check whether this pipe is wired to DDI E and whether
9668 * the PCH transcoder is on.
9669 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009670 if (INTEL_GEN(dev_priv) < 9 &&
Damien Lespiauca370452013-12-03 13:56:24 +00009671 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
Daniel Vetter26804af2014-06-25 22:01:55 +03009672 pipe_config->has_pch_encoder = true;
9673
9674 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9675 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9676 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9677
9678 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9679 }
9680}
9681
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009682static bool haswell_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009683 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009684{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009685 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Imre Deak17290502016-02-12 18:55:11 +02009686 enum intel_display_power_domain power_domain;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009687 u64 power_domain_mask;
Jani Nikulacf304292016-03-18 17:05:41 +02009688 bool active;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009689
Imre Deake79dfb52017-07-20 01:50:57 +03009690 intel_crtc_init_scalers(crtc, pipe_config);
Imre Deak5fb9dad2017-07-20 14:28:20 +03009691
Imre Deak17290502016-02-12 18:55:11 +02009692 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9693 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Imre Deakb5482bd2014-03-05 16:20:55 +02009694 return false;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009695 power_domain_mask = BIT_ULL(power_domain);
Imre Deak17290502016-02-12 18:55:11 +02009696
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009697 pipe_config->shared_dpll = NULL;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009698
Jani Nikulacf304292016-03-18 17:05:41 +02009699 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
Daniel Vettereccb1402013-05-22 00:50:22 +02009700
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02009701 if (IS_GEN9_LP(dev_priv) &&
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009702 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9703 WARN_ON(active);
9704 active = true;
Jani Nikula4d1de972016-03-18 17:05:42 +02009705 }
9706
Jani Nikulacf304292016-03-18 17:05:41 +02009707 if (!active)
Imre Deak17290502016-02-12 18:55:11 +02009708 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009709
Madhav Chauhan2eae5d62018-11-29 16:12:28 +02009710 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
9711 IS_ICELAKE(dev_priv)) {
Jani Nikula4d1de972016-03-18 17:05:42 +02009712 haswell_get_ddi_port_state(crtc, pipe_config);
9713 intel_get_pipe_timings(crtc, pipe_config);
9714 }
Daniel Vetter627eb5a2013-04-29 19:33:42 +02009715
Jani Nikulabc58be62016-03-18 17:05:39 +02009716 intel_get_pipe_src_size(crtc, pipe_config);
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05309717 intel_get_crtc_ycbcr_config(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02009718
Lionel Landwerlin05dc6982016-03-16 10:57:15 +00009719 pipe_config->gamma_mode =
9720 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9721
Imre Deak17290502016-02-12 18:55:11 +02009722 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9723 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
Chris Wilson04161d62019-01-14 14:21:27 +00009724 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009725 power_domain_mask |= BIT_ULL(power_domain);
Chris Wilson04161d62019-01-14 14:21:27 +00009726
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009727 if (INTEL_GEN(dev_priv) >= 9)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00009728 skylake_get_pfit_config(crtc, pipe_config);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08009729 else
Rodrigo Vivi1c132b42015-09-02 15:19:26 -07009730 ironlake_get_pfit_config(crtc, pipe_config);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00009731 }
Daniel Vetter88adfff2013-03-28 10:42:01 +01009732
Maarten Lankhorst24f28452017-11-22 19:39:01 +01009733 if (hsw_crtc_supports_ips(crtc)) {
9734 if (IS_HASWELL(dev_priv))
9735 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9736 else {
9737 /*
9738 * We cannot readout IPS state on broadwell, set to
9739 * true so we can set it to a defined state on first
9740 * commit.
9741 */
9742 pipe_config->ips_enabled = true;
9743 }
9744 }
9745
Jani Nikula4d1de972016-03-18 17:05:42 +02009746 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9747 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
Clint Taylorebb69c92014-09-30 10:30:22 -07009748 pipe_config->pixel_multiplier =
9749 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9750 } else {
9751 pipe_config->pixel_multiplier = 1;
9752 }
Daniel Vetter6c49f242013-06-06 12:45:25 +02009753
Imre Deak17290502016-02-12 18:55:11 +02009754out:
9755 for_each_power_domain(power_domain, power_domain_mask)
Chris Wilson0e6e0be2019-01-14 14:21:24 +00009756 intel_display_power_put_unchecked(dev_priv, power_domain);
Imre Deak17290502016-02-12 18:55:11 +02009757
Jani Nikulacf304292016-03-18 17:05:41 +02009758 return active;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009759}
9760
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009761static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009762{
9763 struct drm_i915_private *dev_priv =
9764 to_i915(plane_state->base.plane->dev);
9765 const struct drm_framebuffer *fb = plane_state->base.fb;
9766 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9767 u32 base;
9768
José Roberto de Souzad53db442018-11-30 15:20:48 -08009769 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009770 base = obj->phys_handle->busaddr;
9771 else
9772 base = intel_plane_ggtt_offset(plane_state);
9773
Ville Syrjäläc11ada02018-09-07 18:24:04 +03009774 base += plane_state->color_plane[0].offset;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009775
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009776 /* ILK+ do this automagically */
9777 if (HAS_GMCH_DISPLAY(dev_priv) &&
Dave Airliea82256b2017-05-30 15:25:28 +10009778 plane_state->base.rotation & DRM_MODE_ROTATE_180)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009779 base += (plane_state->base.crtc_h *
9780 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9781
9782 return base;
9783}
9784
Ville Syrjäläed270222017-03-27 21:55:36 +03009785static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9786{
9787 int x = plane_state->base.crtc_x;
9788 int y = plane_state->base.crtc_y;
9789 u32 pos = 0;
9790
9791 if (x < 0) {
9792 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9793 x = -x;
9794 }
9795 pos |= x << CURSOR_X_SHIFT;
9796
9797 if (y < 0) {
9798 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9799 y = -y;
9800 }
9801 pos |= y << CURSOR_Y_SHIFT;
9802
9803 return pos;
9804}
9805
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009806static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9807{
9808 const struct drm_mode_config *config =
9809 &plane_state->base.plane->dev->mode_config;
9810 int width = plane_state->base.crtc_w;
9811 int height = plane_state->base.crtc_h;
9812
9813 return width > 0 && width <= config->cursor_width &&
9814 height > 0 && height <= config->cursor_height;
9815}
9816
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009817static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009818{
9819 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009820 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009821 int src_x, src_y;
9822 u32 offset;
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03009823 int ret;
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009824
9825 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
9826 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
9827
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03009828 ret = intel_plane_check_stride(plane_state);
9829 if (ret)
9830 return ret;
9831
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009832 src_x = plane_state->base.src_x >> 16;
9833 src_y = plane_state->base.src_y >> 16;
9834
9835 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9836 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
9837 plane_state, 0);
9838
9839 if (src_x != 0 || src_y != 0) {
9840 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9841 return -EINVAL;
9842 }
9843
9844 plane_state->color_plane[0].offset = offset;
9845
9846 return 0;
9847}
9848
9849static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9850 struct intel_plane_state *plane_state)
9851{
9852 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009853 int ret;
9854
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009855 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9856 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9857 return -EINVAL;
9858 }
9859
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +02009860 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9861 &crtc_state->base,
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +02009862 DRM_PLANE_HELPER_NO_SCALING,
9863 DRM_PLANE_HELPER_NO_SCALING,
9864 true, true);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009865 if (ret)
9866 return ret;
9867
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009868 if (!plane_state->base.visible)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009869 return 0;
9870
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009871 ret = intel_plane_check_src_coordinates(plane_state);
9872 if (ret)
9873 return ret;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009874
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009875 ret = intel_cursor_check_surface(plane_state);
9876 if (ret)
9877 return ret;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009878
Ville Syrjälä659056f2017-03-27 21:55:39 +03009879 return 0;
9880}
9881
Ville Syrjäläddd57132018-09-07 18:24:02 +03009882static unsigned int
9883i845_cursor_max_stride(struct intel_plane *plane,
9884 u32 pixel_format, u64 modifier,
9885 unsigned int rotation)
9886{
9887 return 2048;
9888}
9889
Ville Syrjälä292889e2017-03-17 23:18:01 +02009890static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9891 const struct intel_plane_state *plane_state)
9892{
Ville Syrjälä292889e2017-03-17 23:18:01 +02009893 return CURSOR_ENABLE |
9894 CURSOR_GAMMA_ENABLE |
9895 CURSOR_FORMAT_ARGB |
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009896 CURSOR_STRIDE(plane_state->color_plane[0].stride);
Ville Syrjälä292889e2017-03-17 23:18:01 +02009897}
9898
Ville Syrjälä659056f2017-03-27 21:55:39 +03009899static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9900{
Ville Syrjälä659056f2017-03-27 21:55:39 +03009901 int width = plane_state->base.crtc_w;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009902
9903 /*
9904 * 845g/865g are only limited by the width of their cursors,
9905 * the height is arbitrary up to the precision of the register.
9906 */
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009907 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009908}
9909
Ville Syrjäläeb0f5042018-08-28 17:27:06 +03009910static int i845_check_cursor(struct intel_crtc_state *crtc_state,
Ville Syrjälä659056f2017-03-27 21:55:39 +03009911 struct intel_plane_state *plane_state)
9912{
9913 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009914 int ret;
9915
9916 ret = intel_check_cursor(crtc_state, plane_state);
9917 if (ret)
9918 return ret;
9919
9920 /* if we want to turn off the cursor ignore width and height */
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009921 if (!fb)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009922 return 0;
9923
9924 /* Check for which cursor types we support */
9925 if (!i845_cursor_size_ok(plane_state)) {
9926 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9927 plane_state->base.crtc_w,
9928 plane_state->base.crtc_h);
9929 return -EINVAL;
9930 }
9931
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009932 WARN_ON(plane_state->base.visible &&
9933 plane_state->color_plane[0].stride != fb->pitches[0]);
9934
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009935 switch (fb->pitches[0]) {
Chris Wilson560b85b2010-08-07 11:01:38 +01009936 case 256:
9937 case 512:
9938 case 1024:
9939 case 2048:
Ville Syrjälädc41c152014-08-13 11:57:05 +03009940 break;
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009941 default:
9942 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9943 fb->pitches[0]);
9944 return -EINVAL;
Chris Wilson560b85b2010-08-07 11:01:38 +01009945 }
Maarten Lankhorst55a08b3f2016-01-07 11:54:10 +01009946
Ville Syrjälä659056f2017-03-27 21:55:39 +03009947 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9948
9949 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08009950}
9951
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009952static void i845_update_cursor(struct intel_plane *plane,
9953 const struct intel_crtc_state *crtc_state,
Chris Wilson560b85b2010-08-07 11:01:38 +01009954 const struct intel_plane_state *plane_state)
9955{
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009956 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009957 u32 cntl = 0, base = 0, pos = 0, size = 0;
9958 unsigned long irqflags;
Chris Wilson560b85b2010-08-07 11:01:38 +01009959
Ville Syrjälä936e71e2016-07-26 19:06:59 +03009960 if (plane_state && plane_state->base.visible) {
Maarten Lankhorst55a08b3f2016-01-07 11:54:10 +01009961 unsigned int width = plane_state->base.crtc_w;
9962 unsigned int height = plane_state->base.crtc_h;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009963
Ville Syrjäläa0864d52017-03-23 21:27:09 +02009964 cntl = plane_state->ctl;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009965 size = (height << 12) | width;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009966
9967 base = intel_cursor_base(plane_state);
9968 pos = intel_cursor_position(plane_state);
Chris Wilson4b0e3332014-05-30 16:35:26 +03009969 }
Chris Wilson560b85b2010-08-07 11:01:38 +01009970
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009971 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9972
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009973 /* On these chipsets we can only modify the base/size/stride
9974 * whilst the cursor is disabled.
9975 */
9976 if (plane->cursor.base != base ||
9977 plane->cursor.size != size ||
9978 plane->cursor.cntl != cntl) {
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009979 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009980 I915_WRITE_FW(CURBASE(PIPE_A), base);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009981 I915_WRITE_FW(CURSIZE, size);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009982 I915_WRITE_FW(CURPOS(PIPE_A), pos);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009983 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
Ville Syrjälä75343a42017-03-27 21:55:38 +03009984
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009985 plane->cursor.base = base;
9986 plane->cursor.size = size;
9987 plane->cursor.cntl = cntl;
9988 } else {
9989 I915_WRITE_FW(CURPOS(PIPE_A), pos);
Ville Syrjälädc41c152014-08-13 11:57:05 +03009990 }
9991
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009992 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9993}
9994
9995static void i845_disable_cursor(struct intel_plane *plane,
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02009996 const struct intel_crtc_state *crtc_state)
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009997{
Ville Syrjälä0dd14be2018-11-14 23:07:20 +02009998 i845_update_cursor(plane, crtc_state, NULL);
Chris Wilson560b85b2010-08-07 11:01:38 +01009999}
10000
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010001static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10002 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010003{
10004 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10005 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +000010006 intel_wakeref_t wakeref;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010007 bool ret;
10008
10009 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
Chris Wilson0e6e0be2019-01-14 14:21:24 +000010010 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10011 if (!wakeref)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010012 return false;
10013
10014 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10015
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010016 *pipe = PIPE_A;
10017
Chris Wilson0e6e0be2019-01-14 14:21:24 +000010018 intel_display_power_put(dev_priv, power_domain, wakeref);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010019
10020 return ret;
10021}
10022
Ville Syrjäläddd57132018-09-07 18:24:02 +030010023static unsigned int
10024i9xx_cursor_max_stride(struct intel_plane *plane,
10025 u32 pixel_format, u64 modifier,
10026 unsigned int rotation)
10027{
10028 return plane->base.dev->mode_config.cursor_width * 4;
10029}
10030
Ville Syrjälä292889e2017-03-17 23:18:01 +020010031static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10032 const struct intel_plane_state *plane_state)
10033{
10034 struct drm_i915_private *dev_priv =
10035 to_i915(plane_state->base.plane->dev);
10036 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
José Roberto de Souzac894d632018-05-18 13:15:47 -070010037 u32 cntl = 0;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010038
Lucas De Marchicf819ef2018-12-12 10:10:43 -080010039 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
Ville Syrjäläe876b782018-01-30 22:38:05 +020010040 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10041
José Roberto de Souzac894d632018-05-18 13:15:47 -070010042 if (INTEL_GEN(dev_priv) <= 10) {
10043 cntl |= MCURSOR_GAMMA_ENABLE;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010044
José Roberto de Souzac894d632018-05-18 13:15:47 -070010045 if (HAS_DDI(dev_priv))
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010046 cntl |= MCURSOR_PIPE_CSC_ENABLE;
José Roberto de Souzac894d632018-05-18 13:15:47 -070010047 }
Ville Syrjälä292889e2017-03-17 23:18:01 +020010048
Ville Syrjälä32ea06b2018-01-30 22:38:01 +020010049 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10050 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
Ville Syrjälä292889e2017-03-17 23:18:01 +020010051
10052 switch (plane_state->base.crtc_w) {
10053 case 64:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010054 cntl |= MCURSOR_MODE_64_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010055 break;
10056 case 128:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010057 cntl |= MCURSOR_MODE_128_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010058 break;
10059 case 256:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010060 cntl |= MCURSOR_MODE_256_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010061 break;
10062 default:
10063 MISSING_CASE(plane_state->base.crtc_w);
10064 return 0;
10065 }
10066
Robert Fossc2c446a2017-05-19 16:50:17 -040010067 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010068 cntl |= MCURSOR_ROTATE_180;
Ville Syrjälä292889e2017-03-17 23:18:01 +020010069
10070 return cntl;
10071}
10072
Ville Syrjälä659056f2017-03-27 21:55:39 +030010073static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
Chris Wilson560b85b2010-08-07 11:01:38 +010010074{
Ville Syrjälä024faac2017-03-27 21:55:42 +030010075 struct drm_i915_private *dev_priv =
10076 to_i915(plane_state->base.plane->dev);
Ville Syrjälä659056f2017-03-27 21:55:39 +030010077 int width = plane_state->base.crtc_w;
10078 int height = plane_state->base.crtc_h;
Chris Wilson560b85b2010-08-07 11:01:38 +010010079
Ville Syrjälä3637ecf2017-03-27 21:55:40 +030010080 if (!intel_cursor_size_ok(plane_state))
Ville Syrjälädc41c152014-08-13 11:57:05 +030010081 return false;
10082
Ville Syrjälä024faac2017-03-27 21:55:42 +030010083 /* Cursor width is limited to a few power-of-two sizes */
10084 switch (width) {
Ville Syrjälä659056f2017-03-27 21:55:39 +030010085 case 256:
10086 case 128:
Ville Syrjälä659056f2017-03-27 21:55:39 +030010087 case 64:
10088 break;
10089 default:
10090 return false;
10091 }
10092
Ville Syrjälädc41c152014-08-13 11:57:05 +030010093 /*
Ville Syrjälä024faac2017-03-27 21:55:42 +030010094 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10095 * height from 8 lines up to the cursor width, when the
10096 * cursor is not rotated. Everything else requires square
10097 * cursors.
Ville Syrjälädc41c152014-08-13 11:57:05 +030010098 */
Ville Syrjälä024faac2017-03-27 21:55:42 +030010099 if (HAS_CUR_FBC(dev_priv) &&
Dave Airliea82256b2017-05-30 15:25:28 +100010100 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
Ville Syrjälä024faac2017-03-27 21:55:42 +030010101 if (height < 8 || height > width)
Ville Syrjälädc41c152014-08-13 11:57:05 +030010102 return false;
10103 } else {
Ville Syrjälä024faac2017-03-27 21:55:42 +030010104 if (height != width)
Ville Syrjälädc41c152014-08-13 11:57:05 +030010105 return false;
Ville Syrjälädc41c152014-08-13 11:57:05 +030010106 }
10107
10108 return true;
10109}
10110
Ville Syrjäläeb0f5042018-08-28 17:27:06 +030010111static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
Ville Syrjälä659056f2017-03-27 21:55:39 +030010112 struct intel_plane_state *plane_state)
10113{
Ville Syrjäläeb0f5042018-08-28 17:27:06 +030010114 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
Ville Syrjälä659056f2017-03-27 21:55:39 +030010115 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10116 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +030010117 enum pipe pipe = plane->pipe;
Ville Syrjälä659056f2017-03-27 21:55:39 +030010118 int ret;
10119
10120 ret = intel_check_cursor(crtc_state, plane_state);
10121 if (ret)
10122 return ret;
10123
10124 /* if we want to turn off the cursor ignore width and height */
Ville Syrjälä1e1bb872017-03-27 21:55:41 +030010125 if (!fb)
Ville Syrjälä659056f2017-03-27 21:55:39 +030010126 return 0;
10127
10128 /* Check for which cursor types we support */
10129 if (!i9xx_cursor_size_ok(plane_state)) {
10130 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10131 plane_state->base.crtc_w,
10132 plane_state->base.crtc_h);
10133 return -EINVAL;
10134 }
10135
Ville Syrjälädf79cf42018-09-11 18:01:39 +030010136 WARN_ON(plane_state->base.visible &&
10137 plane_state->color_plane[0].stride != fb->pitches[0]);
10138
Ville Syrjälä1e1bb872017-03-27 21:55:41 +030010139 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10140 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10141 fb->pitches[0], plane_state->base.crtc_w);
10142 return -EINVAL;
Ville Syrjälä659056f2017-03-27 21:55:39 +030010143 }
10144
10145 /*
10146 * There's something wrong with the cursor on CHV pipe C.
10147 * If it straddles the left edge of the screen then
10148 * moving it away from the edge or disabling it often
10149 * results in a pipe underrun, and often that can lead to
10150 * dead pipe (constant underrun reported, and it scans
10151 * out just a solid color). To recover from that, the
10152 * display power well must be turned off and on again.
10153 * Refuse the put the cursor into that compromised position.
10154 */
10155 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10156 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10157 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10158 return -EINVAL;
10159 }
10160
10161 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10162
10163 return 0;
10164}
10165
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010166static void i9xx_update_cursor(struct intel_plane *plane,
10167 const struct intel_crtc_state *crtc_state,
Sagar Kamble4726e0b2014-03-10 17:06:23 +053010168 const struct intel_plane_state *plane_state)
10169{
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030010170 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10171 enum pipe pipe = plane->pipe;
Ville Syrjälä024faac2017-03-27 21:55:42 +030010172 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010173 unsigned long irqflags;
Sagar Kamble4726e0b2014-03-10 17:06:23 +053010174
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010175 if (plane_state && plane_state->base.visible) {
Ville Syrjäläa0864d52017-03-23 21:27:09 +020010176 cntl = plane_state->ctl;
Chris Wilson4b0e3332014-05-30 16:35:26 +030010177
Ville Syrjälä024faac2017-03-27 21:55:42 +030010178 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10179 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10180
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010181 base = intel_cursor_base(plane_state);
10182 pos = intel_cursor_position(plane_state);
10183 }
10184
10185 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10186
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010187 /*
10188 * On some platforms writing CURCNTR first will also
10189 * cause CURPOS to be armed by the CURBASE write.
10190 * Without the CURCNTR write the CURPOS write would
Ville Syrjälä83234d12018-11-14 23:07:17 +020010191 * arm itself. Thus we always update CURCNTR before
10192 * CURPOS.
Ville Syrjälä8753d2b2017-07-14 18:52:27 +030010193 *
10194 * On other platforms CURPOS always requires the
10195 * CURBASE write to arm the update. Additonally
10196 * a write to any of the cursor register will cancel
10197 * an already armed cursor update. Thus leaving out
10198 * the CURBASE write after CURPOS could lead to a
10199 * cursor that doesn't appear to move, or even change
10200 * shape. Thus we always write CURBASE.
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010201 *
Ville Syrjälä83234d12018-11-14 23:07:17 +020010202 * The other registers are armed by by the CURBASE write
10203 * except when the plane is getting enabled at which time
10204 * the CURCNTR write arms the update.
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010205 */
Ville Syrjäläff43bc32018-11-27 18:59:00 +020010206
10207 if (INTEL_GEN(dev_priv) >= 9)
10208 skl_write_cursor_wm(plane, crtc_state);
10209
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010210 if (plane->cursor.base != base ||
Ville Syrjälä024faac2017-03-27 21:55:42 +030010211 plane->cursor.size != fbc_ctl ||
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010212 plane->cursor.cntl != cntl) {
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010213 if (HAS_CUR_FBC(dev_priv))
10214 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
Ville Syrjälä83234d12018-11-14 23:07:17 +020010215 I915_WRITE_FW(CURCNTR(pipe), cntl);
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010216 I915_WRITE_FW(CURPOS(pipe), pos);
Ville Syrjälä75343a42017-03-27 21:55:38 +030010217 I915_WRITE_FW(CURBASE(pipe), base);
10218
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010219 plane->cursor.base = base;
10220 plane->cursor.size = fbc_ctl;
10221 plane->cursor.cntl = cntl;
10222 } else {
10223 I915_WRITE_FW(CURPOS(pipe), pos);
Ville Syrjälä8753d2b2017-07-14 18:52:27 +030010224 I915_WRITE_FW(CURBASE(pipe), base);
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010225 }
10226
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010227 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Jesse Barnes65a21cd2011-10-12 11:10:21 -070010228}
Ville Syrjälä5efb3e22014-04-09 13:28:53 +030010229
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010230static void i9xx_disable_cursor(struct intel_plane *plane,
Ville Syrjälä0dd14be2018-11-14 23:07:20 +020010231 const struct intel_crtc_state *crtc_state)
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010232{
Ville Syrjälä0dd14be2018-11-14 23:07:20 +020010233 i9xx_update_cursor(plane, crtc_state, NULL);
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010234}
Ville Syrjäläd6e4db12013-09-04 18:25:31 +030010235
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010236static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10237 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010238{
10239 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10240 enum intel_display_power_domain power_domain;
Chris Wilson0e6e0be2019-01-14 14:21:24 +000010241 intel_wakeref_t wakeref;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010242 bool ret;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010243 u32 val;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010244
10245 /*
10246 * Not 100% correct for planes that can move between pipes,
10247 * but that's only the case for gen2-3 which don't have any
10248 * display power wells.
10249 */
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010250 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
Chris Wilson0e6e0be2019-01-14 14:21:24 +000010251 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10252 if (!wakeref)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010253 return false;
10254
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010255 val = I915_READ(CURCNTR(plane->pipe));
10256
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010257 ret = val & MCURSOR_MODE;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010258
10259 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10260 *pipe = plane->pipe;
10261 else
10262 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10263 MCURSOR_PIPE_SELECT_SHIFT;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010264
Chris Wilson0e6e0be2019-01-14 14:21:24 +000010265 intel_display_power_put(dev_priv, power_domain, wakeref);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010266
10267 return ret;
10268}
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010269
Jesse Barnes79e53942008-11-07 14:24:08 -080010270/* VESA 640x480x72Hz mode to set on the pipe */
Ville Syrjäläbacdcd52017-05-18 22:38:37 +030010271static const struct drm_display_mode load_detect_mode = {
Jesse Barnes79e53942008-11-07 14:24:08 -080010272 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10273 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10274};
10275
Daniel Vettera8bb6812014-02-10 18:00:39 +010010276struct drm_framebuffer *
Chris Wilson24dbf512017-02-15 10:59:18 +000010277intel_framebuffer_create(struct drm_i915_gem_object *obj,
10278 struct drm_mode_fb_cmd2 *mode_cmd)
Chris Wilsond2dff872011-04-19 08:36:26 +010010279{
10280 struct intel_framebuffer *intel_fb;
10281 int ret;
10282
10283 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020010284 if (!intel_fb)
Chris Wilsond2dff872011-04-19 08:36:26 +010010285 return ERR_PTR(-ENOMEM);
Chris Wilsond2dff872011-04-19 08:36:26 +010010286
Chris Wilson24dbf512017-02-15 10:59:18 +000010287 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010288 if (ret)
10289 goto err;
Chris Wilsond2dff872011-04-19 08:36:26 +010010290
10291 return &intel_fb->base;
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010292
Lukas Wunnerdcb13942015-07-04 11:50:58 +020010293err:
10294 kfree(intel_fb);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010295 return ERR_PTR(ret);
Chris Wilsond2dff872011-04-19 08:36:26 +010010296}
10297
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010298static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10299 struct drm_crtc *crtc)
Chris Wilsond2dff872011-04-19 08:36:26 +010010300{
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010301 struct drm_plane *plane;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010302 struct drm_plane_state *plane_state;
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010303 int ret, i;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010304
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010305 ret = drm_atomic_add_affected_planes(state, crtc);
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010306 if (ret)
10307 return ret;
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010308
10309 for_each_new_plane_in_state(state, plane, plane_state, i) {
10310 if (plane_state->crtc != crtc)
10311 continue;
10312
10313 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10314 if (ret)
10315 return ret;
10316
10317 drm_atomic_set_fb_for_plane(plane_state, NULL);
10318 }
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010319
10320 return 0;
10321}
10322
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010323int intel_get_load_detect_pipe(struct drm_connector *connector,
Ville Syrjäläbacdcd52017-05-18 22:38:37 +030010324 const struct drm_display_mode *mode,
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010325 struct intel_load_detect_pipe *old,
10326 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnes79e53942008-11-07 14:24:08 -080010327{
10328 struct intel_crtc *intel_crtc;
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010329 struct intel_encoder *intel_encoder =
10330 intel_attached_encoder(connector);
Jesse Barnes79e53942008-11-07 14:24:08 -080010331 struct drm_crtc *possible_crtc;
Chris Wilson4ef69c72010-09-09 15:14:28 +010010332 struct drm_encoder *encoder = &intel_encoder->base;
Jesse Barnes79e53942008-11-07 14:24:08 -080010333 struct drm_crtc *crtc = NULL;
10334 struct drm_device *dev = encoder->dev;
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020010335 struct drm_i915_private *dev_priv = to_i915(dev);
Rob Clark51fd3712013-11-19 12:10:12 -050010336 struct drm_mode_config *config = &dev->mode_config;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010337 struct drm_atomic_state *state = NULL, *restore_state = NULL;
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010338 struct drm_connector_state *connector_state;
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010339 struct intel_crtc_state *crtc_state;
Rob Clark51fd3712013-11-19 12:10:12 -050010340 int ret, i = -1;
Jesse Barnes79e53942008-11-07 14:24:08 -080010341
Chris Wilsond2dff872011-04-19 08:36:26 +010010342 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +030010343 connector->base.id, connector->name,
Jani Nikula8e329a032014-06-03 14:56:21 +030010344 encoder->base.id, encoder->name);
Chris Wilsond2dff872011-04-19 08:36:26 +010010345
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010346 old->restore_state = NULL;
10347
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010348 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
Daniel Vetter6e9f7982014-05-29 23:54:47 +020010349
Jesse Barnes79e53942008-11-07 14:24:08 -080010350 /*
10351 * Algorithm gets a little messy:
Chris Wilson7a5e4802011-04-19 23:21:12 +010010352 *
Jesse Barnes79e53942008-11-07 14:24:08 -080010353 * - if the connector already has an assigned crtc, use it (but make
10354 * sure it's on first)
Chris Wilson7a5e4802011-04-19 23:21:12 +010010355 *
Jesse Barnes79e53942008-11-07 14:24:08 -080010356 * - try to find the first unused crtc that can drive this connector,
10357 * and use that if we find one
Jesse Barnes79e53942008-11-07 14:24:08 -080010358 */
10359
10360 /* See if we already have a CRTC for this connector */
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010361 if (connector->state->crtc) {
10362 crtc = connector->state->crtc;
Chris Wilson8261b192011-04-19 23:18:09 +010010363
Rob Clark51fd3712013-11-19 12:10:12 -050010364 ret = drm_modeset_lock(&crtc->mutex, ctx);
10365 if (ret)
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010366 goto fail;
Chris Wilson8261b192011-04-19 23:18:09 +010010367
10368 /* Make sure the crtc and connector are running */
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010369 goto found;
Jesse Barnes79e53942008-11-07 14:24:08 -080010370 }
10371
10372 /* Find an unused one (if possible) */
Damien Lespiau70e1e0e2014-05-13 23:32:24 +010010373 for_each_crtc(dev, possible_crtc) {
Jesse Barnes79e53942008-11-07 14:24:08 -080010374 i++;
10375 if (!(encoder->possible_crtcs & (1 << i)))
10376 continue;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010377
10378 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10379 if (ret)
10380 goto fail;
10381
10382 if (possible_crtc->state->enable) {
10383 drm_modeset_unlock(&possible_crtc->mutex);
Ville Syrjäläa4592492014-08-11 13:15:36 +030010384 continue;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010385 }
Ville Syrjäläa4592492014-08-11 13:15:36 +030010386
10387 crtc = possible_crtc;
10388 break;
Jesse Barnes79e53942008-11-07 14:24:08 -080010389 }
10390
10391 /*
10392 * If we didn't find an unused CRTC, don't use any.
10393 */
10394 if (!crtc) {
Chris Wilson71731882011-04-19 23:10:58 +010010395 DRM_DEBUG_KMS("no pipe available for load-detect\n");
Dan Carpenterf4bf77b2017-04-14 22:54:25 +030010396 ret = -ENODEV;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010397 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080010398 }
10399
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010400found:
10401 intel_crtc = to_intel_crtc(crtc);
10402
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010403 state = drm_atomic_state_alloc(dev);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010404 restore_state = drm_atomic_state_alloc(dev);
10405 if (!state || !restore_state) {
10406 ret = -ENOMEM;
10407 goto fail;
10408 }
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010409
10410 state->acquire_ctx = ctx;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010411 restore_state->acquire_ctx = ctx;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010412
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010413 connector_state = drm_atomic_get_connector_state(state, connector);
10414 if (IS_ERR(connector_state)) {
10415 ret = PTR_ERR(connector_state);
10416 goto fail;
10417 }
10418
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010419 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10420 if (ret)
10421 goto fail;
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010422
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010423 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10424 if (IS_ERR(crtc_state)) {
10425 ret = PTR_ERR(crtc_state);
10426 goto fail;
10427 }
10428
Maarten Lankhorst49d6fa22015-05-11 10:45:15 +020010429 crtc_state->base.active = crtc_state->base.enable = true;
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010430
Chris Wilson64927112011-04-20 07:25:26 +010010431 if (!mode)
10432 mode = &load_detect_mode;
Jesse Barnes79e53942008-11-07 14:24:08 -080010433
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010434 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010435 if (ret)
10436 goto fail;
10437
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010438 ret = intel_modeset_disable_planes(state, crtc);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010439 if (ret)
10440 goto fail;
10441
10442 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10443 if (!ret)
10444 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
Ville Syrjäläbe90cc32018-03-22 17:23:12 +020010445 if (!ret)
10446 ret = drm_atomic_add_affected_planes(restore_state, crtc);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010447 if (ret) {
10448 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10449 goto fail;
10450 }
Ander Conselvan de Oliveira8c7b5cc2015-04-21 17:13:19 +030010451
Maarten Lankhorst3ba86072016-02-29 09:18:57 +010010452 ret = drm_atomic_commit(state);
10453 if (ret) {
Chris Wilson64927112011-04-20 07:25:26 +010010454 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010455 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080010456 }
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010457
10458 old->restore_state = restore_state;
Chris Wilson7abbd112017-01-19 11:37:49 +000010459 drm_atomic_state_put(state);
Chris Wilson71731882011-04-19 23:10:58 +010010460
Jesse Barnes79e53942008-11-07 14:24:08 -080010461 /* let the connector get through one full cycle before testing */
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020010462 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
Chris Wilson71731882011-04-19 23:10:58 +010010463 return true;
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010464
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010465fail:
Chris Wilson7fb71c82016-10-19 12:37:43 +010010466 if (state) {
10467 drm_atomic_state_put(state);
10468 state = NULL;
10469 }
10470 if (restore_state) {
10471 drm_atomic_state_put(restore_state);
10472 restore_state = NULL;
10473 }
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010474
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010475 if (ret == -EDEADLK)
10476 return ret;
Rob Clark51fd3712013-11-19 12:10:12 -050010477
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010478 return false;
Jesse Barnes79e53942008-11-07 14:24:08 -080010479}
10480
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010481void intel_release_load_detect_pipe(struct drm_connector *connector,
Ander Conselvan de Oliveira49172fe2015-03-20 16:18:02 +020010482 struct intel_load_detect_pipe *old,
10483 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnes79e53942008-11-07 14:24:08 -080010484{
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010485 struct intel_encoder *intel_encoder =
10486 intel_attached_encoder(connector);
Chris Wilson4ef69c72010-09-09 15:14:28 +010010487 struct drm_encoder *encoder = &intel_encoder->base;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010488 struct drm_atomic_state *state = old->restore_state;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010489 int ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080010490
Chris Wilsond2dff872011-04-19 08:36:26 +010010491 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +030010492 connector->base.id, connector->name,
Jani Nikula8e329a032014-06-03 14:56:21 +030010493 encoder->base.id, encoder->name);
Chris Wilsond2dff872011-04-19 08:36:26 +010010494
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010495 if (!state)
Chris Wilson0622a532011-04-21 09:32:11 +010010496 return;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010497
Maarten Lankhorst581e49f2017-01-16 10:37:38 +010010498 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
Chris Wilson08536952016-10-14 13:18:18 +010010499 if (ret)
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010500 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
Chris Wilson08536952016-10-14 13:18:18 +010010501 drm_atomic_state_put(state);
Jesse Barnes79e53942008-11-07 14:24:08 -080010502}
10503
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010504static int i9xx_pll_refclk(struct drm_device *dev,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010505 const struct intel_crtc_state *pipe_config)
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010506{
Chris Wilsonfac5e232016-07-04 11:34:36 +010010507 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010508 u32 dpll = pipe_config->dpll_hw_state.dpll;
10509
10510 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
Ville Syrjäläe91e9412013-12-09 18:54:16 +020010511 return dev_priv->vbt.lvds_ssc_freq;
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010010512 else if (HAS_PCH_SPLIT(dev_priv))
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010513 return 120000;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080010514 else if (!IS_GEN(dev_priv, 2))
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010515 return 96000;
10516 else
10517 return 48000;
10518}
10519
Jesse Barnes79e53942008-11-07 14:24:08 -080010520/* Returns the clock of the currently programmed mode of the given pipe. */
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010521static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010522 struct intel_crtc_state *pipe_config)
Jesse Barnes79e53942008-11-07 14:24:08 -080010523{
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010524 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010010525 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010526 int pipe = pipe_config->cpu_transcoder;
Ville Syrjälä293623f2013-09-13 16:18:46 +030010527 u32 dpll = pipe_config->dpll_hw_state.dpll;
Jesse Barnes79e53942008-11-07 14:24:08 -080010528 u32 fp;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +030010529 struct dpll clock;
Imre Deakdccbea32015-06-22 23:35:51 +030010530 int port_clock;
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010531 int refclk = i9xx_pll_refclk(dev, pipe_config);
Jesse Barnes79e53942008-11-07 14:24:08 -080010532
10533 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
Ville Syrjälä293623f2013-09-13 16:18:46 +030010534 fp = pipe_config->dpll_hw_state.fp0;
Jesse Barnes79e53942008-11-07 14:24:08 -080010535 else
Ville Syrjälä293623f2013-09-13 16:18:46 +030010536 fp = pipe_config->dpll_hw_state.fp1;
Jesse Barnes79e53942008-11-07 14:24:08 -080010537
10538 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010539 if (IS_PINEVIEW(dev_priv)) {
Adam Jacksonf2b115e2009-12-03 17:14:42 -050010540 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10541 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
Shaohua Li21778322009-02-23 15:19:16 +080010542 } else {
10543 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10544 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10545 }
10546
Lucas De Marchicf819ef2018-12-12 10:10:43 -080010547 if (!IS_GEN(dev_priv, 2)) {
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010548 if (IS_PINEVIEW(dev_priv))
Adam Jacksonf2b115e2009-12-03 17:14:42 -050010549 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10550 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
Shaohua Li21778322009-02-23 15:19:16 +080010551 else
10552 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
Jesse Barnes79e53942008-11-07 14:24:08 -080010553 DPLL_FPA01_P1_POST_DIV_SHIFT);
10554
10555 switch (dpll & DPLL_MODE_MASK) {
10556 case DPLLB_MODE_DAC_SERIAL:
10557 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10558 5 : 10;
10559 break;
10560 case DPLLB_MODE_LVDS:
10561 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10562 7 : 14;
10563 break;
10564 default:
Zhao Yakui28c97732009-10-09 11:39:41 +080010565 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
Jesse Barnes79e53942008-11-07 14:24:08 -080010566 "mode\n", (int)(dpll & DPLL_MODE_MASK));
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010567 return;
Jesse Barnes79e53942008-11-07 14:24:08 -080010568 }
10569
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010570 if (IS_PINEVIEW(dev_priv))
Imre Deakdccbea32015-06-22 23:35:51 +030010571 port_clock = pnv_calc_dpll_params(refclk, &clock);
Daniel Vetterac58c3f2013-06-01 17:16:17 +020010572 else
Imre Deakdccbea32015-06-22 23:35:51 +030010573 port_clock = i9xx_calc_dpll_params(refclk, &clock);
Jesse Barnes79e53942008-11-07 14:24:08 -080010574 } else {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +010010575 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
Ville Syrjäläb1c560d2013-12-09 18:54:13 +020010576 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
Jesse Barnes79e53942008-11-07 14:24:08 -080010577
10578 if (is_lvds) {
10579 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10580 DPLL_FPA01_P1_POST_DIV_SHIFT);
Ville Syrjäläb1c560d2013-12-09 18:54:13 +020010581
10582 if (lvds & LVDS_CLKB_POWER_UP)
10583 clock.p2 = 7;
10584 else
10585 clock.p2 = 14;
Jesse Barnes79e53942008-11-07 14:24:08 -080010586 } else {
10587 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10588 clock.p1 = 2;
10589 else {
10590 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10591 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10592 }
10593 if (dpll & PLL_P2_DIVIDE_BY_4)
10594 clock.p2 = 4;
10595 else
10596 clock.p2 = 2;
Jesse Barnes79e53942008-11-07 14:24:08 -080010597 }
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010598
Imre Deakdccbea32015-06-22 23:35:51 +030010599 port_clock = i9xx_calc_dpll_params(refclk, &clock);
Jesse Barnes79e53942008-11-07 14:24:08 -080010600 }
10601
Ville Syrjälä18442d02013-09-13 16:00:08 +030010602 /*
10603 * This value includes pixel_multiplier. We will use
Damien Lespiau241bfc32013-09-25 16:45:37 +010010604 * port_clock to compute adjusted_mode.crtc_clock in the
Ville Syrjälä18442d02013-09-13 16:00:08 +030010605 * encoder's get_config() function.
10606 */
Imre Deakdccbea32015-06-22 23:35:51 +030010607 pipe_config->port_clock = port_clock;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010608}
10609
Ville Syrjälä6878da02013-09-13 15:59:11 +030010610int intel_dotclock_calculate(int link_freq,
10611 const struct intel_link_m_n *m_n)
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010612{
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010613 /*
10614 * The calculation for the data clock is:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010615 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010616 * But we want to avoid losing precison if possible, so:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010617 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010618 *
10619 * and the link clock is simpler:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010620 * link_clock = (m * link_clock) / n
Jesse Barnes79e53942008-11-07 14:24:08 -080010621 */
10622
Ville Syrjälä6878da02013-09-13 15:59:11 +030010623 if (!m_n->link_n)
10624 return 0;
10625
Chris Wilson31236982017-09-13 11:51:53 +010010626 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
Ville Syrjälä6878da02013-09-13 15:59:11 +030010627}
10628
Ville Syrjälä18442d02013-09-13 16:00:08 +030010629static void ironlake_pch_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010630 struct intel_crtc_state *pipe_config)
Ville Syrjälä6878da02013-09-13 15:59:11 +030010631{
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020010632 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä18442d02013-09-13 16:00:08 +030010633
10634 /* read out port_clock from the DPLL */
10635 i9xx_crtc_clock_get(crtc, pipe_config);
Ville Syrjälä6878da02013-09-13 15:59:11 +030010636
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010637 /*
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020010638 * In case there is an active pipe without active ports,
10639 * we may need some idea for the dotclock anyway.
10640 * Calculate one based on the FDI configuration.
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010641 */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020010642 pipe_config->base.adjusted_mode.crtc_clock =
Ville Syrjälä21a727b2016-02-17 21:41:10 +020010643 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
Ville Syrjälä18442d02013-09-13 16:00:08 +030010644 &pipe_config->fdi_m_n);
Jesse Barnes79e53942008-11-07 14:24:08 -080010645}
10646
Ville Syrjäläde330812017-10-09 19:19:50 +030010647/* Returns the currently programmed mode of the given encoder. */
10648struct drm_display_mode *
10649intel_encoder_current_mode(struct intel_encoder *encoder)
Jesse Barnes79e53942008-11-07 14:24:08 -080010650{
Ville Syrjäläde330812017-10-09 19:19:50 +030010651 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10652 struct intel_crtc_state *crtc_state;
Jesse Barnes79e53942008-11-07 14:24:08 -080010653 struct drm_display_mode *mode;
Ville Syrjäläde330812017-10-09 19:19:50 +030010654 struct intel_crtc *crtc;
10655 enum pipe pipe;
10656
10657 if (!encoder->get_hw_state(encoder, &pipe))
10658 return NULL;
10659
10660 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Jesse Barnes79e53942008-11-07 14:24:08 -080010661
10662 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10663 if (!mode)
10664 return NULL;
10665
Ville Syrjäläde330812017-10-09 19:19:50 +030010666 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10667 if (!crtc_state) {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +000010668 kfree(mode);
10669 return NULL;
10670 }
10671
Ville Syrjäläde330812017-10-09 19:19:50 +030010672 crtc_state->base.crtc = &crtc->base;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010673
Ville Syrjäläde330812017-10-09 19:19:50 +030010674 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10675 kfree(crtc_state);
10676 kfree(mode);
10677 return NULL;
10678 }
Ville Syrjäläe30a1542016-04-01 18:37:25 +030010679
Ville Syrjäläde330812017-10-09 19:19:50 +030010680 encoder->get_config(encoder, crtc_state);
Ville Syrjäläe30a1542016-04-01 18:37:25 +030010681
Ville Syrjäläde330812017-10-09 19:19:50 +030010682 intel_mode_from_pipe_config(mode, crtc_state);
Jesse Barnes79e53942008-11-07 14:24:08 -080010683
Ville Syrjäläde330812017-10-09 19:19:50 +030010684 kfree(crtc_state);
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +000010685
Jesse Barnes79e53942008-11-07 14:24:08 -080010686 return mode;
10687}
10688
10689static void intel_crtc_destroy(struct drm_crtc *crtc)
10690{
10691 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10692
10693 drm_crtc_cleanup(crtc);
10694 kfree(intel_crtc);
10695}
10696
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010697/**
10698 * intel_wm_need_update - Check whether watermarks need updating
Chris Wilson6bf19812018-12-31 14:35:05 +000010699 * @cur: current plane state
10700 * @new: new plane state
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010701 *
10702 * Check current plane state versus the new one to determine whether
10703 * watermarks need to be recalculated.
10704 *
10705 * Returns true or false.
10706 */
Matt Ropercd1d3ee2018-12-10 13:54:14 -080010707static bool intel_wm_need_update(struct intel_plane_state *cur,
10708 struct intel_plane_state *new)
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010709{
Matt Roperd21fbe82015-09-24 15:53:12 -070010710 /* Update watermarks on tiling or size changes. */
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010711 if (new->base.visible != cur->base.visible)
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010712 return true;
10713
10714 if (!cur->base.fb || !new->base.fb)
10715 return false;
10716
Ville Syrjäläbae781b2016-11-16 13:33:16 +020010717 if (cur->base.fb->modifier != new->base.fb->modifier ||
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010718 cur->base.rotation != new->base.rotation ||
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010719 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10720 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10721 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10722 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010723 return true;
10724
10725 return false;
10726}
10727
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010728static bool needs_scaling(const struct intel_plane_state *state)
Matt Roperd21fbe82015-09-24 15:53:12 -070010729{
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010730 int src_w = drm_rect_width(&state->base.src) >> 16;
10731 int src_h = drm_rect_height(&state->base.src) >> 16;
10732 int dst_w = drm_rect_width(&state->base.dst);
10733 int dst_h = drm_rect_height(&state->base.dst);
Matt Roperd21fbe82015-09-24 15:53:12 -070010734
10735 return (src_w != dst_w || src_h != dst_h);
10736}
10737
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010738int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10739 struct drm_crtc_state *crtc_state,
10740 const struct intel_plane_state *old_plane_state,
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010741 struct drm_plane_state *plane_state)
10742{
Maarten Lankhorstab1d3a02015-11-19 16:07:14 +010010743 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010744 struct drm_crtc *crtc = crtc_state->crtc;
10745 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010746 struct intel_plane *plane = to_intel_plane(plane_state->plane);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010747 struct drm_device *dev = crtc->dev;
Matt Ropered4a6a72016-02-23 17:20:13 -080010748 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010749 bool mode_changed = needs_modeset(crtc_state);
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010750 bool was_crtc_enabled = old_crtc_state->base.active;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010751 bool is_crtc_enabled = crtc_state->active;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010752 bool turn_off, turn_on, visible, was_visible;
10753 struct drm_framebuffer *fb = plane_state->fb;
Ville Syrjälä78108b72016-05-27 20:59:19 +030010754 int ret;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010755
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010756 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010757 ret = skl_update_scaler_plane(
10758 to_intel_crtc_state(crtc_state),
10759 to_intel_plane_state(plane_state));
10760 if (ret)
10761 return ret;
10762 }
10763
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010764 was_visible = old_plane_state->base.visible;
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +010010765 visible = plane_state->visible;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010766
10767 if (!was_crtc_enabled && WARN_ON(was_visible))
10768 was_visible = false;
10769
Maarten Lankhorst35c08f42015-12-03 14:31:07 +010010770 /*
10771 * Visibility is calculated as if the crtc was on, but
10772 * after scaler setup everything depends on it being off
10773 * when the crtc isn't active.
Ville Syrjäläf818ffe2016-04-29 17:31:18 +030010774 *
10775 * FIXME this is wrong for watermarks. Watermarks should also
10776 * be computed as if the pipe would be active. Perhaps move
10777 * per-plane wm computation to the .check_plane() hook, and
10778 * only combine the results from all planes in the current place?
Maarten Lankhorst35c08f42015-12-03 14:31:07 +010010779 */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010780 if (!is_crtc_enabled) {
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +010010781 plane_state->visible = visible = false;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010782 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10783 }
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010784
10785 if (!was_visible && !visible)
10786 return 0;
10787
Maarten Lankhorste8861672016-02-24 11:24:26 +010010788 if (fb != old_plane_state->base.fb)
10789 pipe_config->fb_changed = true;
10790
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010791 turn_off = was_visible && (!visible || mode_changed);
10792 turn_on = visible && (!was_visible || mode_changed);
10793
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010794 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010795 intel_crtc->base.base.id, intel_crtc->base.name,
10796 plane->base.base.id, plane->base.name,
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010797 fb ? fb->base.id : -1);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010798
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010799 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010800 plane->base.base.id, plane->base.name,
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010801 was_visible, visible,
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010802 turn_off, turn_on, mode_changed);
10803
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010804 if (turn_on) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010805 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010806 pipe_config->update_wm_pre = true;
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010807
10808 /* must disable cxsr around plane enable/disable */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010809 if (plane->id != PLANE_CURSOR)
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010810 pipe_config->disable_cxsr = true;
10811 } else if (turn_off) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010812 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010813 pipe_config->update_wm_post = true;
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010814
Ville Syrjälä852eb002015-06-24 22:00:07 +030010815 /* must disable cxsr around plane enable/disable */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010816 if (plane->id != PLANE_CURSOR)
Maarten Lankhorstab1d3a02015-11-19 16:07:14 +010010817 pipe_config->disable_cxsr = true;
Matt Ropercd1d3ee2018-12-10 13:54:14 -080010818 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
10819 to_intel_plane_state(plane_state))) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010820 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010821 /* FIXME bollocks */
10822 pipe_config->update_wm_pre = true;
10823 pipe_config->update_wm_post = true;
10824 }
Ville Syrjälä852eb002015-06-24 22:00:07 +030010825 }
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010826
Rodrigo Vivi8be6ca82015-08-24 16:38:23 -070010827 if (visible || was_visible)
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010828 pipe_config->fb_bits |= plane->frontbuffer_bit;
Ville Syrjäläa9ff8712015-06-24 21:59:34 +030010829
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010830 /*
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010831 * ILK/SNB DVSACNTR/Sprite Enable
10832 * IVB SPR_CTL/Sprite Enable
10833 * "When in Self Refresh Big FIFO mode, a write to enable the
10834 * plane will be internally buffered and delayed while Big FIFO
10835 * mode is exiting."
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010836 *
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010837 * Which means that enabling the sprite can take an extra frame
10838 * when we start in big FIFO mode (LP1+). Thus we need to drop
10839 * down to LP0 and wait for vblank in order to make sure the
10840 * sprite gets enabled on the next vblank after the register write.
10841 * Doing otherwise would risk enabling the sprite one frame after
10842 * we've already signalled flip completion. We can resume LP1+
10843 * once the sprite has been enabled.
10844 *
10845 *
10846 * WaCxSRDisabledForSpriteScaling:ivb
10847 * IVB SPR_SCALE/Scaling Enable
10848 * "Low Power watermarks must be disabled for at least one
10849 * frame before enabling sprite scaling, and kept disabled
10850 * until sprite scaling is disabled."
10851 *
10852 * ILK/SNB DVSASCALE/Scaling Enable
10853 * "When in Self Refresh Big FIFO mode, scaling enable will be
10854 * masked off while Big FIFO mode is exiting."
10855 *
10856 * Despite the w/a only being listed for IVB we assume that
10857 * the ILK/SNB note has similar ramifications, hence we apply
10858 * the w/a on all three platforms.
Juha-Pekka Heikkilad8af3272018-12-20 13:26:08 +020010859 *
10860 * With experimental results seems this is needed also for primary
10861 * plane, not only sprite plane.
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010862 */
Juha-Pekka Heikkilad8af3272018-12-20 13:26:08 +020010863 if (plane->id != PLANE_CURSOR &&
Lucas De Marchif3ce44a2018-12-12 10:10:44 -080010864 (IS_GEN_RANGE(dev_priv, 5, 6) ||
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010865 IS_IVYBRIDGE(dev_priv)) &&
10866 (turn_on || (!needs_scaling(old_plane_state) &&
10867 needs_scaling(to_intel_plane_state(plane_state)))))
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010868 pipe_config->disable_lp_wm = true;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010869
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010870 return 0;
10871}
10872
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010873static bool encoders_cloneable(const struct intel_encoder *a,
10874 const struct intel_encoder *b)
10875{
10876 /* masks could be asymmetric, so check both ways */
10877 return a == b || (a->cloneable & (1 << b->type) &&
10878 b->cloneable & (1 << a->type));
10879}
10880
10881static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10882 struct intel_crtc *crtc,
10883 struct intel_encoder *encoder)
10884{
10885 struct intel_encoder *source_encoder;
10886 struct drm_connector *connector;
10887 struct drm_connector_state *connector_state;
10888 int i;
10889
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010010890 for_each_new_connector_in_state(state, connector, connector_state, i) {
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010891 if (connector_state->crtc != &crtc->base)
10892 continue;
10893
10894 source_encoder =
10895 to_intel_encoder(connector_state->best_encoder);
10896 if (!encoders_cloneable(encoder, source_encoder))
10897 return false;
10898 }
10899
10900 return true;
10901}
10902
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020010903static int icl_add_linked_planes(struct intel_atomic_state *state)
10904{
10905 struct intel_plane *plane, *linked;
10906 struct intel_plane_state *plane_state, *linked_plane_state;
10907 int i;
10908
10909 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10910 linked = plane_state->linked_plane;
10911
10912 if (!linked)
10913 continue;
10914
10915 linked_plane_state = intel_atomic_get_plane_state(state, linked);
10916 if (IS_ERR(linked_plane_state))
10917 return PTR_ERR(linked_plane_state);
10918
10919 WARN_ON(linked_plane_state->linked_plane != plane);
10920 WARN_ON(linked_plane_state->slave == plane_state->slave);
10921 }
10922
10923 return 0;
10924}
10925
10926static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
10927{
10928 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10929 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10930 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
10931 struct intel_plane *plane, *linked;
10932 struct intel_plane_state *plane_state;
10933 int i;
10934
10935 if (INTEL_GEN(dev_priv) < 11)
10936 return 0;
10937
10938 /*
10939 * Destroy all old plane links and make the slave plane invisible
10940 * in the crtc_state->active_planes mask.
10941 */
10942 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10943 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
10944 continue;
10945
10946 plane_state->linked_plane = NULL;
Ville Syrjäläafbd8a72018-11-27 18:37:42 +020010947 if (plane_state->slave && !plane_state->base.visible) {
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020010948 crtc_state->active_planes &= ~BIT(plane->id);
Ville Syrjäläafbd8a72018-11-27 18:37:42 +020010949 crtc_state->update_planes |= BIT(plane->id);
10950 }
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020010951
10952 plane_state->slave = false;
10953 }
10954
10955 if (!crtc_state->nv12_planes)
10956 return 0;
10957
10958 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10959 struct intel_plane_state *linked_state = NULL;
10960
10961 if (plane->pipe != crtc->pipe ||
10962 !(crtc_state->nv12_planes & BIT(plane->id)))
10963 continue;
10964
10965 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
10966 if (!icl_is_nv12_y_plane(linked->id))
10967 continue;
10968
10969 if (crtc_state->active_planes & BIT(linked->id))
10970 continue;
10971
10972 linked_state = intel_atomic_get_plane_state(state, linked);
10973 if (IS_ERR(linked_state))
10974 return PTR_ERR(linked_state);
10975
10976 break;
10977 }
10978
10979 if (!linked_state) {
10980 DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
10981 hweight8(crtc_state->nv12_planes));
10982
10983 return -EINVAL;
10984 }
10985
10986 plane_state->linked_plane = linked;
10987
10988 linked_state->slave = true;
10989 linked_state->linked_plane = plane;
10990 crtc_state->active_planes |= BIT(linked->id);
Ville Syrjäläafbd8a72018-11-27 18:37:42 +020010991 crtc_state->update_planes |= BIT(linked->id);
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020010992 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
10993 }
10994
10995 return 0;
10996}
10997
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010998static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10999 struct drm_crtc_state *crtc_state)
11000{
Matt Ropercd1d3ee2018-12-10 13:54:14 -080011001 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020011002 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +020011003 struct intel_crtc_state *pipe_config =
11004 to_intel_crtc_state(crtc_state);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011005 int ret;
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020011006 bool mode_changed = needs_modeset(crtc_state);
11007
Ville Syrjälä852eb002015-06-24 22:00:07 +030011008 if (mode_changed && !crtc_state->active)
Ville Syrjäläcaed3612016-03-09 19:07:25 +020011009 pipe_config->update_wm_post = true;
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020011010
Maarten Lankhorstad421372015-06-15 12:33:42 +020011011 if (mode_changed && crtc_state->enable &&
11012 dev_priv->display.crtc_compute_clock &&
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011013 !WARN_ON(pipe_config->shared_dpll)) {
Maarten Lankhorstad421372015-06-15 12:33:42 +020011014 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11015 pipe_config);
11016 if (ret)
11017 return ret;
11018 }
11019
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000011020 if (crtc_state->color_mgmt_changed) {
Matt Roper302da0c2018-12-10 13:54:15 -080011021 ret = intel_color_check(pipe_config);
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000011022 if (ret)
11023 return ret;
Lionel Landwerline7852a42016-05-25 14:30:41 +010011024
11025 /*
11026 * Changing color management on Intel hardware is
11027 * handled as part of planes update.
11028 */
11029 crtc_state->planes_changed = true;
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000011030 }
11031
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020011032 ret = 0;
Matt Roper86c8bbb2015-09-24 15:53:16 -070011033 if (dev_priv->display.compute_pipe_wm) {
Maarten Lankhorste3bddde2016-03-01 11:07:22 +010011034 ret = dev_priv->display.compute_pipe_wm(pipe_config);
Matt Ropered4a6a72016-02-23 17:20:13 -080011035 if (ret) {
11036 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
Matt Roper86c8bbb2015-09-24 15:53:16 -070011037 return ret;
Matt Ropered4a6a72016-02-23 17:20:13 -080011038 }
11039 }
11040
Ville Syrjäläf255c622018-11-08 17:10:13 +020011041 if (dev_priv->display.compute_intermediate_wm) {
Matt Ropered4a6a72016-02-23 17:20:13 -080011042 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11043 return 0;
11044
11045 /*
11046 * Calculate 'intermediate' watermarks that satisfy both the
11047 * old state and the new state. We can program these
11048 * immediately.
11049 */
Matt Ropercd1d3ee2018-12-10 13:54:14 -080011050 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
Matt Ropered4a6a72016-02-23 17:20:13 -080011051 if (ret) {
11052 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11053 return ret;
11054 }
Matt Roper86c8bbb2015-09-24 15:53:16 -070011055 }
11056
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011057 if (INTEL_GEN(dev_priv) >= 9) {
Hans de Goede2c5c4152018-12-17 15:19:03 +010011058 if (mode_changed || pipe_config->update_pipe)
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020011059 ret = skl_update_scaler_crtc(pipe_config);
11060
11061 if (!ret)
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020011062 ret = icl_check_nv12_planes(pipe_config);
11063 if (!ret)
Mahesh Kumar73b0ca82017-05-26 20:45:46 +053011064 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11065 pipe_config);
11066 if (!ret)
Ander Conselvan de Oliveira6ebc6922017-02-23 09:15:59 +020011067 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020011068 pipe_config);
11069 }
11070
Maarten Lankhorst24f28452017-11-22 19:39:01 +010011071 if (HAS_IPS(dev_priv))
11072 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11073
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020011074 return ret;
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020011075}
11076
Jani Nikula65b38e02015-04-13 11:26:56 +030011077static const struct drm_crtc_helper_funcs intel_helper_funcs = {
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020011078 .atomic_check = intel_crtc_atomic_check,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010011079};
11080
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011081static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11082{
11083 struct intel_connector *connector;
Daniel Vetterf9e905c2017-03-01 10:52:25 +010011084 struct drm_connector_list_iter conn_iter;
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011085
Daniel Vetterf9e905c2017-03-01 10:52:25 +010011086 drm_connector_list_iter_begin(dev, &conn_iter);
11087 for_each_intel_connector_iter(connector, &conn_iter) {
Daniel Vetter8863dc72016-05-06 15:39:03 +020011088 if (connector->base.state->crtc)
Thomas Zimmermannef196b52018-06-18 13:01:50 +020011089 drm_connector_put(&connector->base);
Daniel Vetter8863dc72016-05-06 15:39:03 +020011090
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011091 if (connector->base.encoder) {
11092 connector->base.state->best_encoder =
11093 connector->base.encoder;
11094 connector->base.state->crtc =
11095 connector->base.encoder->crtc;
Daniel Vetter8863dc72016-05-06 15:39:03 +020011096
Thomas Zimmermannef196b52018-06-18 13:01:50 +020011097 drm_connector_get(&connector->base);
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011098 } else {
11099 connector->base.state->best_encoder = NULL;
11100 connector->base.state->crtc = NULL;
11101 }
11102 }
Daniel Vetterf9e905c2017-03-01 10:52:25 +010011103 drm_connector_list_iter_end(&conn_iter);
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020011104}
11105
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011106static int
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011107compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11108 struct intel_crtc_state *pipe_config)
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011109{
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011110 struct drm_connector *connector = conn_state->connector;
11111 const struct drm_display_info *info = &connector->display_info;
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011112 int bpp;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011113
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011114 switch (conn_state->max_bpc) {
11115 case 6 ... 7:
11116 bpp = 6 * 3;
11117 break;
11118 case 8 ... 9:
11119 bpp = 8 * 3;
11120 break;
11121 case 10 ... 11:
11122 bpp = 10 * 3;
11123 break;
11124 case 12:
11125 bpp = 12 * 3;
11126 break;
11127 default:
11128 return -EINVAL;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011129 }
11130
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011131 if (bpp < pipe_config->pipe_bpp) {
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011132 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11133 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11134 connector->base.id, connector->name,
11135 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011136 pipe_config->pipe_bpp);
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011137
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011138 pipe_config->pipe_bpp = bpp;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011139 }
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011140
Radhakrishna Sripadaf1a12172018-10-22 18:44:00 -070011141 return 0;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011142}
11143
11144static int
11145compute_baseline_pipe_bpp(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020011146 struct intel_crtc_state *pipe_config)
Daniel Vetter050f7ae2013-06-02 13:26:23 +020011147{
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011148 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011149 struct drm_atomic_state *state = pipe_config->base.state;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011150 struct drm_connector *connector;
11151 struct drm_connector_state *connector_state;
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020011152 int bpp, i;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011153
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011154 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11155 IS_CHERRYVIEW(dev_priv)))
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011156 bpp = 10*3;
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011157 else if (INTEL_GEN(dev_priv) >= 5)
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011158 bpp = 12*3;
11159 else
11160 bpp = 8*3;
11161
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011162 pipe_config->pipe_bpp = bpp;
11163
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011164 /* Clamp display bpp to connector max bpp */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011165 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011166 int ret;
11167
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011168 if (connector_state->crtc != &crtc->base)
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020011169 continue;
11170
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011171 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11172 if (ret)
11173 return ret;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011174 }
11175
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011176 return 0;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011177}
11178
Daniel Vetter644db712013-09-19 14:53:58 +020011179static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11180{
11181 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11182 "type: 0x%x flags: 0x%x\n",
Damien Lespiau13428302013-09-25 16:45:36 +010011183 mode->crtc_clock,
Daniel Vetter644db712013-09-19 14:53:58 +020011184 mode->crtc_hdisplay, mode->crtc_hsync_start,
11185 mode->crtc_hsync_end, mode->crtc_htotal,
11186 mode->crtc_vdisplay, mode->crtc_vsync_start,
11187 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11188}
11189
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011190static inline void
11191intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011192 unsigned int lane_count, struct intel_link_m_n *m_n)
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011193{
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011194 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11195 id, lane_count,
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011196 m_n->gmch_m, m_n->gmch_n,
11197 m_n->link_m, m_n->link_n, m_n->tu);
11198}
11199
Ville Syrjälä40b2be42017-10-10 15:11:59 +030011200#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11201
11202static const char * const output_type_str[] = {
11203 OUTPUT_TYPE(UNUSED),
11204 OUTPUT_TYPE(ANALOG),
11205 OUTPUT_TYPE(DVO),
11206 OUTPUT_TYPE(SDVO),
11207 OUTPUT_TYPE(LVDS),
11208 OUTPUT_TYPE(TVOUT),
11209 OUTPUT_TYPE(HDMI),
11210 OUTPUT_TYPE(DP),
11211 OUTPUT_TYPE(EDP),
11212 OUTPUT_TYPE(DSI),
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011213 OUTPUT_TYPE(DDI),
Ville Syrjälä40b2be42017-10-10 15:11:59 +030011214 OUTPUT_TYPE(DP_MST),
11215};
11216
11217#undef OUTPUT_TYPE
11218
11219static void snprintf_output_types(char *buf, size_t len,
11220 unsigned int output_types)
11221{
11222 char *str = buf;
11223 int i;
11224
11225 str[0] = '\0';
11226
11227 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11228 int r;
11229
11230 if ((output_types & BIT(i)) == 0)
11231 continue;
11232
11233 r = snprintf(str, len, "%s%s",
11234 str != buf ? "," : "", output_type_str[i]);
11235 if (r >= len)
11236 break;
11237 str += r;
11238 len -= r;
11239
11240 output_types &= ~BIT(i);
11241 }
11242
11243 WARN_ON_ONCE(output_types != 0);
11244}
11245
Shashank Sharmad9facae2018-10-12 11:53:07 +053011246static const char * const output_format_str[] = {
11247 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11248 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
Shashank Sharma33b7f3e2018-10-12 11:53:08 +053011249 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
Shashank Sharma8c79f842018-10-12 11:53:09 +053011250 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
Shashank Sharmad9facae2018-10-12 11:53:07 +053011251};
11252
11253static const char *output_formats(enum intel_output_format format)
11254{
Shashank Sharma33b7f3e2018-10-12 11:53:08 +053011255 if (format >= ARRAY_SIZE(output_format_str))
Shashank Sharmad9facae2018-10-12 11:53:07 +053011256 format = INTEL_OUTPUT_FORMAT_INVALID;
11257 return output_format_str[format];
11258}
11259
Daniel Vetterc0b03412013-05-28 12:05:54 +020011260static void intel_dump_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020011261 struct intel_crtc_state *pipe_config,
Daniel Vetterc0b03412013-05-28 12:05:54 +020011262 const char *context)
11263{
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011264 struct drm_device *dev = crtc->base.dev;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010011265 struct drm_i915_private *dev_priv = to_i915(dev);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011266 struct drm_plane *plane;
11267 struct intel_plane *intel_plane;
11268 struct intel_plane_state *state;
11269 struct drm_framebuffer *fb;
Ville Syrjälä40b2be42017-10-10 15:11:59 +030011270 char buf[64];
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011271
Tvrtko Ursulin66766e42016-11-17 12:30:10 +000011272 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
11273 crtc->base.base.id, crtc->base.name, context);
Daniel Vetterc0b03412013-05-28 12:05:54 +020011274
Ville Syrjälä40b2be42017-10-10 15:11:59 +030011275 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11276 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
11277 buf, pipe_config->output_types);
11278
Shashank Sharmad9facae2018-10-12 11:53:07 +053011279 DRM_DEBUG_KMS("output format: %s\n",
11280 output_formats(pipe_config->output_format));
11281
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011282 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11283 transcoder_name(pipe_config->cpu_transcoder),
Daniel Vetterc0b03412013-05-28 12:05:54 +020011284 pipe_config->pipe_bpp, pipe_config->dither);
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011285
11286 if (pipe_config->has_pch_encoder)
11287 intel_dump_m_n_config(pipe_config, "fdi",
11288 pipe_config->fdi_lanes,
11289 &pipe_config->fdi_m_n);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011290
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011291 if (intel_crtc_has_dp_encoder(pipe_config)) {
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011292 intel_dump_m_n_config(pipe_config, "dp m_n",
11293 pipe_config->lane_count, &pipe_config->dp_m_n);
Tvrtko Ursulind806e682016-11-17 15:44:09 +000011294 if (pipe_config->has_drrs)
11295 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11296 pipe_config->lane_count,
11297 &pipe_config->dp_m2_n2);
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011298 }
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011299
Daniel Vetter55072d12014-11-20 16:10:28 +010011300 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011301 pipe_config->has_audio, pipe_config->has_infoframe);
Daniel Vetter55072d12014-11-20 16:10:28 +010011302
Daniel Vetterc0b03412013-05-28 12:05:54 +020011303 DRM_DEBUG_KMS("requested mode:\n");
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011304 drm_mode_debug_printmodeline(&pipe_config->base.mode);
Daniel Vetterc0b03412013-05-28 12:05:54 +020011305 DRM_DEBUG_KMS("adjusted mode:\n");
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011306 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11307 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011308 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011309 pipe_config->port_clock,
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011310 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11311 pipe_config->pixel_rate);
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011312
11313 if (INTEL_GEN(dev_priv) >= 9)
11314 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11315 crtc->num_scalers,
11316 pipe_config->scaler_state.scaler_users,
11317 pipe_config->scaler_state.scaler_id);
Tvrtko Ursulina74f8372016-11-17 12:30:13 +000011318
11319 if (HAS_GMCH_DISPLAY(dev_priv))
11320 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11321 pipe_config->gmch_pfit.control,
11322 pipe_config->gmch_pfit.pgm_ratios,
11323 pipe_config->gmch_pfit.lvds_border_bits);
11324 else
11325 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11326 pipe_config->pch_pfit.pos,
11327 pipe_config->pch_pfit.size,
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000011328 enableddisabled(pipe_config->pch_pfit.enabled));
Tvrtko Ursulina74f8372016-11-17 12:30:13 +000011329
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011330 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11331 pipe_config->ips_enabled, pipe_config->double_wide);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011332
Ander Conselvan de Oliveiraf50b79f2016-12-29 17:22:12 +020011333 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
Tvrtko Ursulin415ff0f2015-05-14 13:38:31 +010011334
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011335 DRM_DEBUG_KMS("planes on this crtc\n");
11336 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
Eric Engestromb3c11ac2016-11-12 01:12:56 +000011337 struct drm_format_name_buf format_name;
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011338 intel_plane = to_intel_plane(plane);
11339 if (intel_plane->pipe != crtc->pipe)
11340 continue;
11341
11342 state = to_intel_plane_state(plane->state);
11343 fb = state->base.fb;
11344 if (!fb) {
Ville Syrjälä1d577e02016-05-27 20:59:25 +030011345 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11346 plane->base.id, plane->name, state->scaler_id);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011347 continue;
11348 }
11349
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011350 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11351 plane->base.id, plane->name,
Eric Engestromb3c11ac2016-11-12 01:12:56 +000011352 fb->base.id, fb->width, fb->height,
Ville Syrjälä438b74a2016-12-14 23:32:55 +020011353 drm_get_format_name(fb->format->format, &format_name));
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011354 if (INTEL_GEN(dev_priv) >= 9)
11355 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11356 state->scaler_id,
11357 state->base.src.x1 >> 16,
11358 state->base.src.y1 >> 16,
11359 drm_rect_width(&state->base.src) >> 16,
11360 drm_rect_height(&state->base.src) >> 16,
11361 state->base.dst.x1, state->base.dst.y1,
11362 drm_rect_width(&state->base.dst),
11363 drm_rect_height(&state->base.dst));
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011364 }
Daniel Vetterc0b03412013-05-28 12:05:54 +020011365}
11366
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011367static bool check_digital_port_conflicts(struct drm_atomic_state *state)
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011368{
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011369 struct drm_device *dev = state->dev;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011370 struct drm_connector *connector;
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011371 struct drm_connector_list_iter conn_iter;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011372 unsigned int used_ports = 0;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011373 unsigned int used_mst_ports = 0;
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011374 bool ret = true;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011375
11376 /*
11377 * Walk the connector list instead of the encoder
11378 * list to detect the problem on ddi platforms
11379 * where there's just one encoder per digital port.
11380 */
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011381 drm_connector_list_iter_begin(dev, &conn_iter);
11382 drm_for_each_connector_iter(connector, &conn_iter) {
Ville Syrjälä0bff4852015-12-10 18:22:31 +020011383 struct drm_connector_state *connector_state;
11384 struct intel_encoder *encoder;
11385
Maarten Lankhorst8b694492018-04-09 14:46:55 +020011386 connector_state = drm_atomic_get_new_connector_state(state, connector);
Ville Syrjälä0bff4852015-12-10 18:22:31 +020011387 if (!connector_state)
11388 connector_state = connector->state;
11389
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011390 if (!connector_state->best_encoder)
11391 continue;
11392
11393 encoder = to_intel_encoder(connector_state->best_encoder);
11394
11395 WARN_ON(!connector_state->crtc);
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011396
11397 switch (encoder->type) {
11398 unsigned int port_mask;
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011399 case INTEL_OUTPUT_DDI:
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010011400 if (WARN_ON(!HAS_DDI(to_i915(dev))))
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011401 break;
Gustavo A. R. Silvaf0d759f2018-06-28 17:35:41 -050011402 /* else: fall through */
Ville Syrjäläcca05022016-06-22 21:57:06 +030011403 case INTEL_OUTPUT_DP:
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011404 case INTEL_OUTPUT_HDMI:
11405 case INTEL_OUTPUT_EDP:
Ville Syrjälä8f4f2792017-11-09 17:24:34 +020011406 port_mask = 1 << encoder->port;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011407
11408 /* the same port mustn't appear more than once */
11409 if (used_ports & port_mask)
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011410 ret = false;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011411
11412 used_ports |= port_mask;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011413 break;
11414 case INTEL_OUTPUT_DP_MST:
11415 used_mst_ports |=
Ville Syrjälä8f4f2792017-11-09 17:24:34 +020011416 1 << encoder->port;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011417 break;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011418 default:
11419 break;
11420 }
11421 }
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011422 drm_connector_list_iter_end(&conn_iter);
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011423
Ville Syrjälä477321e2016-07-28 17:50:40 +030011424 /* can't mix MST and SST/HDMI on the same port */
11425 if (used_ports & used_mst_ports)
11426 return false;
11427
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011428 return ret;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011429}
11430
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011431static void
11432clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11433{
Ville Syrjäläff32c542017-03-02 19:14:57 +020011434 struct drm_i915_private *dev_priv =
11435 to_i915(crtc_state->base.crtc->dev);
Chandra Konduru663a3642015-04-07 15:28:41 -070011436 struct intel_crtc_scaler_state scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011437 struct intel_dpll_hw_state dpll_hw_state;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011438 struct intel_shared_dpll *shared_dpll;
Ville Syrjäläff32c542017-03-02 19:14:57 +020011439 struct intel_crtc_wm_state wm_state;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011440 bool force_thru, ips_force_disable;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011441
Ander Conselvan de Oliveira7546a382015-05-20 09:03:27 +030011442 /* FIXME: before the switch to atomic started, a new pipe_config was
11443 * kzalloc'd. Code that depends on any field being zero should be
11444 * fixed, so that the crtc_state can be safely duplicated. For now,
11445 * only fields that are know to not cause problems are preserved. */
11446
Chandra Konduru663a3642015-04-07 15:28:41 -070011447 scaler_state = crtc_state->scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011448 shared_dpll = crtc_state->shared_dpll;
11449 dpll_hw_state = crtc_state->dpll_hw_state;
Maarten Lankhorstc4e2d042015-08-05 12:36:59 +020011450 force_thru = crtc_state->pch_pfit.force_thru;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011451 ips_force_disable = crtc_state->ips_force_disable;
Ville Syrjälä04548cb2017-04-21 21:14:29 +030011452 if (IS_G4X(dev_priv) ||
11453 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjäläff32c542017-03-02 19:14:57 +020011454 wm_state = crtc_state->wm;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011455
Chris Wilsond2fa80a2017-03-03 15:46:44 +000011456 /* Keep base drm_crtc_state intact, only clear our extended struct */
11457 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11458 memset(&crtc_state->base + 1, 0,
11459 sizeof(*crtc_state) - sizeof(crtc_state->base));
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011460
Chandra Konduru663a3642015-04-07 15:28:41 -070011461 crtc_state->scaler_state = scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011462 crtc_state->shared_dpll = shared_dpll;
11463 crtc_state->dpll_hw_state = dpll_hw_state;
Maarten Lankhorstc4e2d042015-08-05 12:36:59 +020011464 crtc_state->pch_pfit.force_thru = force_thru;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011465 crtc_state->ips_force_disable = ips_force_disable;
Ville Syrjälä04548cb2017-04-21 21:14:29 +030011466 if (IS_G4X(dev_priv) ||
11467 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjäläff32c542017-03-02 19:14:57 +020011468 crtc_state->wm = wm_state;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011469}
11470
Ander Conselvan de Oliveira548ee152015-04-21 17:13:02 +030011471static int
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010011472intel_modeset_pipe_config(struct drm_crtc *crtc,
Maarten Lankhorstb3592832015-06-15 12:33:38 +020011473 struct intel_crtc_state *pipe_config)
Daniel Vetter7758a112012-07-08 19:40:39 +020011474{
Maarten Lankhorstb3592832015-06-15 12:33:38 +020011475 struct drm_atomic_state *state = pipe_config->base.state;
Daniel Vetter7758a112012-07-08 19:40:39 +020011476 struct intel_encoder *encoder;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011477 struct drm_connector *connector;
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011478 struct drm_connector_state *connector_state;
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011479 int base_bpp, ret;
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011480 int i;
Daniel Vettere29c22c2013-02-21 00:00:16 +010011481 bool retry = true;
Daniel Vetter7758a112012-07-08 19:40:39 +020011482
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011483 clear_intel_crtc_state(pipe_config);
Daniel Vetter7758a112012-07-08 19:40:39 +020011484
Daniel Vettere143a212013-07-04 12:01:15 +020011485 pipe_config->cpu_transcoder =
11486 (enum transcoder) to_intel_crtc(crtc)->pipe;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010011487
Imre Deak2960bc92013-07-30 13:36:32 +030011488 /*
11489 * Sanitize sync polarity flags based on requested ones. If neither
11490 * positive or negative polarity is requested, treat this as meaning
11491 * negative polarity.
11492 */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011493 if (!(pipe_config->base.adjusted_mode.flags &
Imre Deak2960bc92013-07-30 13:36:32 +030011494 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011495 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
Imre Deak2960bc92013-07-30 13:36:32 +030011496
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011497 if (!(pipe_config->base.adjusted_mode.flags &
Imre Deak2960bc92013-07-30 13:36:32 +030011498 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011499 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
Imre Deak2960bc92013-07-30 13:36:32 +030011500
Ville Syrjäläbcce8d82018-11-07 23:35:22 +020011501 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11502 pipe_config);
11503 if (ret)
11504 return ret;
11505
11506 base_bpp = pipe_config->pipe_bpp;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011507
Ville Syrjäläe41a56b2013-10-01 22:52:14 +030011508 /*
11509 * Determine the real pipe dimensions. Note that stereo modes can
11510 * increase the actual pipe size due to the frame doubling and
11511 * insertion of additional space for blanks between the frame. This
11512 * is stored in the crtc timings. We use the requested mode to do this
11513 * computation to clearly distinguish it from the adjusted mode, which
11514 * can be changed by the connectors in the below retry loop.
11515 */
Daniel Vetter196cd5d2017-01-25 07:26:56 +010011516 drm_mode_get_hv_timing(&pipe_config->base.mode,
Gustavo Padovanecb7e162014-12-01 15:40:09 -080011517 &pipe_config->pipe_src_w,
11518 &pipe_config->pipe_src_h);
Ville Syrjäläe41a56b2013-10-01 22:52:14 +030011519
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011520 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011521 if (connector_state->crtc != crtc)
11522 continue;
11523
11524 encoder = to_intel_encoder(connector_state->best_encoder);
11525
Ville Syrjäläe25148d2016-06-22 21:57:09 +030011526 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11527 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011528 return -EINVAL;
Ville Syrjäläe25148d2016-06-22 21:57:09 +030011529 }
11530
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011531 /*
11532 * Determine output_types before calling the .compute_config()
11533 * hooks so that the hooks can use this information safely.
11534 */
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011535 if (encoder->compute_output_type)
11536 pipe_config->output_types |=
11537 BIT(encoder->compute_output_type(encoder, pipe_config,
11538 connector_state));
11539 else
11540 pipe_config->output_types |= BIT(encoder->type);
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011541 }
11542
Daniel Vettere29c22c2013-02-21 00:00:16 +010011543encoder_retry:
Daniel Vetteref1b4602013-06-01 17:17:04 +020011544 /* Ensure the port clock defaults are reset when retrying. */
Daniel Vetterff9a6752013-06-01 17:16:21 +020011545 pipe_config->port_clock = 0;
Daniel Vetteref1b4602013-06-01 17:17:04 +020011546 pipe_config->pixel_multiplier = 1;
Daniel Vetterff9a6752013-06-01 17:16:21 +020011547
Daniel Vetter135c81b2013-07-21 21:37:09 +020011548 /* Fill in default crtc timings, allow encoders to overwrite them. */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011549 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11550 CRTC_STEREO_DOUBLE);
Daniel Vetter135c81b2013-07-21 21:37:09 +020011551
Daniel Vetter7758a112012-07-08 19:40:39 +020011552 /* Pass our mode to the connectors and the CRTC to give them a chance to
11553 * adjust it according to limitations or connector properties, and also
11554 * a chance to reject the mode entirely.
11555 */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011556 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011557 if (connector_state->crtc != crtc)
11558 continue;
11559
11560 encoder = to_intel_encoder(connector_state->best_encoder);
Lyude Paul204474a2019-01-15 15:08:00 -050011561 ret = encoder->compute_config(encoder, pipe_config,
11562 connector_state);
11563 if (ret < 0) {
11564 if (ret != -EDEADLK)
11565 DRM_DEBUG_KMS("Encoder config failure: %d\n",
11566 ret);
11567 return ret;
Daniel Vetter7758a112012-07-08 19:40:39 +020011568 }
11569 }
11570
Daniel Vetterff9a6752013-06-01 17:16:21 +020011571 /* Set default port clock if not overwritten by the encoder. Needs to be
11572 * done afterwards in case the encoder adjusts the mode. */
11573 if (!pipe_config->port_clock)
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011574 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
Damien Lespiau241bfc32013-09-25 16:45:37 +010011575 * pipe_config->pixel_multiplier;
Daniel Vetterff9a6752013-06-01 17:16:21 +020011576
Daniel Vettera43f6e02013-06-07 23:10:32 +020011577 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
Ville Syrjälä8e2b4df2018-11-07 23:35:20 +020011578 if (ret == -EDEADLK)
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011579 return ret;
Daniel Vettere29c22c2013-02-21 00:00:16 +010011580 if (ret < 0) {
Daniel Vetter7758a112012-07-08 19:40:39 +020011581 DRM_DEBUG_KMS("CRTC fixup failed\n");
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011582 return ret;
Daniel Vetter7758a112012-07-08 19:40:39 +020011583 }
Daniel Vettere29c22c2013-02-21 00:00:16 +010011584
11585 if (ret == RETRY) {
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011586 if (WARN(!retry, "loop in pipe configuration computation\n"))
11587 return -EINVAL;
Daniel Vettere29c22c2013-02-21 00:00:16 +010011588
11589 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11590 retry = false;
11591 goto encoder_retry;
11592 }
11593
Daniel Vettere8fa4272015-08-12 11:43:34 +020011594 /* Dithering seems to not pass-through bits correctly when it should, so
Manasi Navare611032b2017-01-24 08:21:49 -080011595 * only enable it on 6bpc panels and when its not a compliance
11596 * test requesting 6bpc video pattern.
11597 */
11598 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11599 !pipe_config->dither_force_disable;
Daniel Vetter62f0ace2015-08-26 18:57:26 +020011600 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011601 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011602
Ville Syrjäläd26592c2018-11-07 23:35:21 +020011603 return 0;
Daniel Vetter7758a112012-07-08 19:40:39 +020011604}
11605
Ville Syrjälä3bd26262013-09-06 23:29:02 +030011606static bool intel_fuzzy_clock_check(int clock1, int clock2)
Jesse Barnesf1f644d2013-06-27 00:39:25 +030011607{
Ville Syrjälä3bd26262013-09-06 23:29:02 +030011608 int diff;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030011609
11610 if (clock1 == clock2)
11611 return true;
11612
11613 if (!clock1 || !clock2)
11614 return false;
11615
11616 diff = abs(clock1 - clock2);
11617
11618 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11619 return true;
11620
11621 return false;
11622}
11623
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011624static bool
11625intel_compare_m_n(unsigned int m, unsigned int n,
11626 unsigned int m2, unsigned int n2,
11627 bool exact)
11628{
11629 if (m == m2 && n == n2)
11630 return true;
11631
11632 if (exact || !m || !n || !m2 || !n2)
11633 return false;
11634
11635 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11636
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011637 if (n > n2) {
11638 while (n > n2) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011639 m2 <<= 1;
11640 n2 <<= 1;
11641 }
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011642 } else if (n < n2) {
11643 while (n < n2) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011644 m <<= 1;
11645 n <<= 1;
11646 }
11647 }
11648
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011649 if (n != n2)
11650 return false;
11651
11652 return intel_fuzzy_clock_check(m, m2);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011653}
11654
11655static bool
11656intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11657 struct intel_link_m_n *m2_n2,
11658 bool adjust)
11659{
11660 if (m_n->tu == m2_n2->tu &&
11661 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11662 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11663 intel_compare_m_n(m_n->link_m, m_n->link_n,
11664 m2_n2->link_m, m2_n2->link_n, !adjust)) {
11665 if (adjust)
11666 *m2_n2 = *m_n;
11667
11668 return true;
11669 }
11670
11671 return false;
11672}
11673
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011674static void __printf(3, 4)
11675pipe_config_err(bool adjust, const char *name, const char *format, ...)
11676{
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011677 struct va_format vaf;
11678 va_list args;
11679
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011680 va_start(args, format);
11681 vaf.fmt = format;
11682 vaf.va = &args;
11683
Joe Perches99a95482018-03-13 15:02:15 -070011684 if (adjust)
11685 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11686 else
11687 drm_err("mismatch in %s %pV", name, &vaf);
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011688
11689 va_end(args);
11690}
11691
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011692static bool
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011693intel_pipe_config_compare(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020011694 struct intel_crtc_state *current_config,
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011695 struct intel_crtc_state *pipe_config,
11696 bool adjust)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011697{
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011698 bool ret = true;
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011699 bool fixup_inherited = adjust &&
11700 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11701 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011702
Maarten Lankhorstd19f9582019-01-08 17:08:40 +010011703 if (fixup_inherited && !i915_modparams.fastboot) {
11704 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
11705 ret = false;
11706 }
11707
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011708#define PIPE_CONF_CHECK_X(name) do { \
Daniel Vetter66e985c2013-06-05 13:34:20 +020011709 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011710 pipe_config_err(adjust, __stringify(name), \
Daniel Vetter66e985c2013-06-05 13:34:20 +020011711 "(expected 0x%08x, found 0x%08x)\n", \
11712 current_config->name, \
11713 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011714 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011715 } \
11716} while (0)
Daniel Vetter66e985c2013-06-05 13:34:20 +020011717
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011718#define PIPE_CONF_CHECK_I(name) do { \
Daniel Vetter08a24032013-04-19 11:25:34 +020011719 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011720 pipe_config_err(adjust, __stringify(name), \
Daniel Vetter08a24032013-04-19 11:25:34 +020011721 "(expected %i, found %i)\n", \
11722 current_config->name, \
11723 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011724 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011725 } \
11726} while (0)
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011727
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011728#define PIPE_CONF_CHECK_BOOL(name) do { \
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011729 if (current_config->name != pipe_config->name) { \
11730 pipe_config_err(adjust, __stringify(name), \
11731 "(expected %s, found %s)\n", \
11732 yesno(current_config->name), \
11733 yesno(pipe_config->name)); \
11734 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011735 } \
11736} while (0)
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011737
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011738/*
11739 * Checks state where we only read out the enabling, but not the entire
11740 * state itself (like full infoframes or ELD for audio). These states
11741 * require a full modeset on bootup to fix up.
11742 */
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011743#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011744 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11745 PIPE_CONF_CHECK_BOOL(name); \
11746 } else { \
11747 pipe_config_err(adjust, __stringify(name), \
11748 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11749 yesno(current_config->name), \
11750 yesno(pipe_config->name)); \
11751 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011752 } \
11753} while (0)
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011754
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011755#define PIPE_CONF_CHECK_P(name) do { \
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011756 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011757 pipe_config_err(adjust, __stringify(name), \
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011758 "(expected %p, found %p)\n", \
11759 current_config->name, \
11760 pipe_config->name); \
11761 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011762 } \
11763} while (0)
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011764
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011765#define PIPE_CONF_CHECK_M_N(name) do { \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011766 if (!intel_compare_link_m_n(&current_config->name, \
11767 &pipe_config->name,\
11768 adjust)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011769 pipe_config_err(adjust, __stringify(name), \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011770 "(expected tu %i gmch %i/%i link %i/%i, " \
11771 "found tu %i, gmch %i/%i link %i/%i)\n", \
11772 current_config->name.tu, \
11773 current_config->name.gmch_m, \
11774 current_config->name.gmch_n, \
11775 current_config->name.link_m, \
11776 current_config->name.link_n, \
11777 pipe_config->name.tu, \
11778 pipe_config->name.gmch_m, \
11779 pipe_config->name.gmch_n, \
11780 pipe_config->name.link_m, \
11781 pipe_config->name.link_n); \
11782 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011783 } \
11784} while (0)
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011785
Daniel Vetter55c561a2016-03-30 11:34:36 +020011786/* This is required for BDW+ where there is only one set of registers for
11787 * switching between high and low RR.
11788 * This macro can be used whenever a comparison has to be made between one
11789 * hw state and multiple sw state variables.
11790 */
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011791#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011792 if (!intel_compare_link_m_n(&current_config->name, \
11793 &pipe_config->name, adjust) && \
11794 !intel_compare_link_m_n(&current_config->alt_name, \
11795 &pipe_config->name, adjust)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011796 pipe_config_err(adjust, __stringify(name), \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011797 "(expected tu %i gmch %i/%i link %i/%i, " \
11798 "or tu %i gmch %i/%i link %i/%i, " \
11799 "found tu %i, gmch %i/%i link %i/%i)\n", \
11800 current_config->name.tu, \
11801 current_config->name.gmch_m, \
11802 current_config->name.gmch_n, \
11803 current_config->name.link_m, \
11804 current_config->name.link_n, \
11805 current_config->alt_name.tu, \
11806 current_config->alt_name.gmch_m, \
11807 current_config->alt_name.gmch_n, \
11808 current_config->alt_name.link_m, \
11809 current_config->alt_name.link_n, \
11810 pipe_config->name.tu, \
11811 pipe_config->name.gmch_m, \
11812 pipe_config->name.gmch_n, \
11813 pipe_config->name.link_m, \
11814 pipe_config->name.link_n); \
11815 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011816 } \
11817} while (0)
Daniel Vetter88adfff2013-03-28 10:42:01 +010011818
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011819#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011820 if ((current_config->name ^ pipe_config->name) & (mask)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011821 pipe_config_err(adjust, __stringify(name), \
11822 "(%x) (expected %i, found %i)\n", \
11823 (mask), \
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011824 current_config->name & (mask), \
11825 pipe_config->name & (mask)); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011826 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011827 } \
11828} while (0)
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011829
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011830#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
Ville Syrjälä5e550652013-09-06 23:29:07 +030011831 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011832 pipe_config_err(adjust, __stringify(name), \
Ville Syrjälä5e550652013-09-06 23:29:07 +030011833 "(expected %i, found %i)\n", \
11834 current_config->name, \
11835 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011836 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011837 } \
11838} while (0)
Ville Syrjälä5e550652013-09-06 23:29:07 +030011839
Daniel Vetterbb760062013-06-06 14:55:52 +020011840#define PIPE_CONF_QUIRK(quirk) \
11841 ((current_config->quirks | pipe_config->quirks) & (quirk))
11842
Daniel Vettereccb1402013-05-22 00:50:22 +020011843 PIPE_CONF_CHECK_I(cpu_transcoder);
11844
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011845 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
Daniel Vetter08a24032013-04-19 11:25:34 +020011846 PIPE_CONF_CHECK_I(fdi_lanes);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011847 PIPE_CONF_CHECK_M_N(fdi_m_n);
Daniel Vetter08a24032013-04-19 11:25:34 +020011848
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +030011849 PIPE_CONF_CHECK_I(lane_count);
Imre Deak95a7a2a2016-06-13 16:44:35 +030011850 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011851
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011852 if (INTEL_GEN(dev_priv) < 8) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011853 PIPE_CONF_CHECK_M_N(dp_m_n);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011854
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011855 if (current_config->has_drrs)
11856 PIPE_CONF_CHECK_M_N(dp_m2_n2);
11857 } else
11858 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +030011859
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011860 PIPE_CONF_CHECK_X(output_types);
Jani Nikulaa65347b2015-11-27 12:21:46 +020011861
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011862 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11863 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11864 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11865 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11866 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11867 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011868
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011869 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11870 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11871 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11872 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11873 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11874 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011875
Daniel Vetterc93f54c2013-06-27 19:47:19 +020011876 PIPE_CONF_CHECK_I(pixel_multiplier);
Shashank Sharmad9facae2018-10-12 11:53:07 +053011877 PIPE_CONF_CHECK_I(output_format);
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011878 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +010011879 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010011880 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011881 PIPE_CONF_CHECK_BOOL(limited_color_range);
Shashank Sharma15953632017-03-13 16:54:03 +053011882
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011883 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11884 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011885 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
Daniel Vetter6c49f242013-06-06 12:45:25 +020011886
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011887 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
Daniel Vetter9ed109a2014-04-24 23:54:52 +020011888
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011889 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011890 DRM_MODE_FLAG_INTERLACE);
11891
Daniel Vetterbb760062013-06-06 14:55:52 +020011892 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011893 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011894 DRM_MODE_FLAG_PHSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011895 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011896 DRM_MODE_FLAG_NHSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011897 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011898 DRM_MODE_FLAG_PVSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011899 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011900 DRM_MODE_FLAG_NVSYNC);
11901 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -070011902
Ville Syrjälä333b8ca2015-09-03 21:50:16 +030011903 PIPE_CONF_CHECK_X(gmch_pfit.control);
Daniel Vettere2ff2d42015-07-15 14:15:50 +020011904 /* pfit ratios are autocomputed by the hw on gen4+ */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011905 if (INTEL_GEN(dev_priv) < 4)
Ville Syrjälä7f7d8dd2016-03-15 16:40:07 +020011906 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
Ville Syrjälä333b8ca2015-09-03 21:50:16 +030011907 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
Daniel Vetter99535992014-04-13 12:00:33 +020011908
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +020011909 if (!adjust) {
11910 PIPE_CONF_CHECK_I(pipe_src_w);
11911 PIPE_CONF_CHECK_I(pipe_src_h);
11912
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011913 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +020011914 if (current_config->pch_pfit.enabled) {
11915 PIPE_CONF_CHECK_X(pch_pfit.pos);
11916 PIPE_CONF_CHECK_X(pch_pfit.size);
11917 }
Daniel Vetter2fa2fe92013-05-07 23:34:16 +020011918
Maarten Lankhorst7aefe2b2015-09-14 11:30:10 +020011919 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011920 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
Maarten Lankhorst7aefe2b2015-09-14 11:30:10 +020011921 }
Chandra Kondurua1b22782015-04-07 15:28:45 -070011922
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011923 PIPE_CONF_CHECK_BOOL(double_wide);
Ville Syrjälä282740f2013-09-04 18:30:03 +030011924
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011925 PIPE_CONF_CHECK_P(shared_dpll);
Daniel Vetter66e985c2013-06-05 13:34:20 +020011926 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
Daniel Vetter8bcc2792013-06-05 13:34:28 +020011927 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
Daniel Vetter66e985c2013-06-05 13:34:20 +020011928 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11929 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
Daniel Vetterd452c5b2014-07-04 11:27:39 -030011930 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
Maarten Lankhorst00490c22015-11-16 14:42:12 +010011931 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
Damien Lespiau3f4cd192014-11-13 14:55:21 +000011932 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11933 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11934 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
Paulo Zanoni2de38132017-09-22 17:53:42 -030011935 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11936 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11937 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11938 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11939 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11940 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11941 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11942 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11943 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11944 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11945 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11946 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
Paulo Zanonic27e9172018-04-27 16:14:36 -070011947 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11948 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11949 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11950 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11951 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11952 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11953 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11954 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11955 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11956 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetterc0d43d62013-06-07 23:11:08 +020011957
Ville Syrjälä47eacba2016-04-12 22:14:35 +030011958 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11959 PIPE_CONF_CHECK_X(dsi_pll.div);
11960
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011961 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
Ville Syrjälä42571ae2013-09-06 23:29:00 +030011962 PIPE_CONF_CHECK_I(pipe_bpp);
11963
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011964 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
Jesse Barnesa9a7e982014-01-20 14:18:04 -080011965 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
Ville Syrjälä5e550652013-09-06 23:29:07 +030011966
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030011967 PIPE_CONF_CHECK_I(min_voltage_level);
11968
Daniel Vetter66e985c2013-06-05 13:34:20 +020011969#undef PIPE_CONF_CHECK_X
Daniel Vetter08a24032013-04-19 11:25:34 +020011970#undef PIPE_CONF_CHECK_I
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011971#undef PIPE_CONF_CHECK_BOOL
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011972#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011973#undef PIPE_CONF_CHECK_P
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011974#undef PIPE_CONF_CHECK_FLAGS
Ville Syrjälä5e550652013-09-06 23:29:07 +030011975#undef PIPE_CONF_CHECK_CLOCK_FUZZY
Daniel Vetterbb760062013-06-06 14:55:52 +020011976#undef PIPE_CONF_QUIRK
Daniel Vetter627eb5a2013-04-29 19:33:42 +020011977
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011978 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011979}
11980
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020011981static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11982 const struct intel_crtc_state *pipe_config)
11983{
11984 if (pipe_config->has_pch_encoder) {
Ville Syrjälä21a727b2016-02-17 21:41:10 +020011985 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020011986 &pipe_config->fdi_m_n);
11987 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11988
11989 /*
11990 * FDI already provided one idea for the dotclock.
11991 * Yell if the encoder disagrees.
11992 */
11993 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11994 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11995 fdi_dotclock, dotclock);
11996 }
11997}
11998
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011999static void verify_wm_state(struct drm_crtc *crtc,
12000 struct drm_crtc_state *new_state)
Damien Lespiau08db6652014-11-04 17:06:52 +000012001{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000012002 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Damien Lespiau08db6652014-11-04 17:06:52 +000012003 struct skl_ddb_allocation hw_ddb, *sw_ddb;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012004 struct skl_pipe_wm hw_wm, *sw_wm;
12005 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12006 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
Ville Syrjäläff43bc32018-11-27 18:59:00 +020012007 struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
12008 struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012009 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12010 const enum pipe pipe = intel_crtc->pipe;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012011 int plane, level, max_level = ilk_wm_max_level(dev_priv);
Damien Lespiau08db6652014-11-04 17:06:52 +000012012
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000012013 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
Damien Lespiau08db6652014-11-04 17:06:52 +000012014 return;
12015
Matt Ropercd1d3ee2018-12-10 13:54:14 -080012016 skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
Maarten Lankhorst03af79e2016-10-26 15:41:36 +020012017 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012018
Ville Syrjäläff43bc32018-11-27 18:59:00 +020012019 skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
12020
Damien Lespiau08db6652014-11-04 17:06:52 +000012021 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12022 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12023
Mahesh Kumar74bd8002018-04-26 19:55:15 +053012024 if (INTEL_GEN(dev_priv) >= 11)
12025 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
12026 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12027 sw_ddb->enabled_slices,
12028 hw_ddb.enabled_slices);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012029 /* planes */
Matt Roper8b364b42016-10-26 15:51:28 -070012030 for_each_universal_plane(dev_priv, pipe, plane) {
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012031 hw_plane_wm = &hw_wm.planes[plane];
12032 sw_plane_wm = &sw_wm->planes[plane];
Damien Lespiau08db6652014-11-04 17:06:52 +000012033
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012034 /* Watermarks */
12035 for (level = 0; level <= max_level; level++) {
12036 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12037 &sw_plane_wm->wm[level]))
12038 continue;
Damien Lespiau08db6652014-11-04 17:06:52 +000012039
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012040 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12041 pipe_name(pipe), plane + 1, level,
12042 sw_plane_wm->wm[level].plane_en,
12043 sw_plane_wm->wm[level].plane_res_b,
12044 sw_plane_wm->wm[level].plane_res_l,
12045 hw_plane_wm->wm[level].plane_en,
12046 hw_plane_wm->wm[level].plane_res_b,
12047 hw_plane_wm->wm[level].plane_res_l);
12048 }
12049
12050 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12051 &sw_plane_wm->trans_wm)) {
12052 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12053 pipe_name(pipe), plane + 1,
12054 sw_plane_wm->trans_wm.plane_en,
12055 sw_plane_wm->trans_wm.plane_res_b,
12056 sw_plane_wm->trans_wm.plane_res_l,
12057 hw_plane_wm->trans_wm.plane_en,
12058 hw_plane_wm->trans_wm.plane_res_b,
12059 hw_plane_wm->trans_wm.plane_res_l);
12060 }
12061
12062 /* DDB */
Ville Syrjäläff43bc32018-11-27 18:59:00 +020012063 hw_ddb_entry = &hw_ddb_y[plane];
12064 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012065
12066 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
cpaul@redhat.comfaccd992016-10-14 17:31:58 -040012067 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012068 pipe_name(pipe), plane + 1,
12069 sw_ddb_entry->start, sw_ddb_entry->end,
12070 hw_ddb_entry->start, hw_ddb_entry->end);
12071 }
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012072 }
12073
Lyude27082492016-08-24 07:48:10 +020012074 /*
12075 * cursor
12076 * If the cursor plane isn't active, we may not have updated it's ddb
12077 * allocation. In that case since the ddb allocation will be updated
12078 * once the plane becomes visible, we can skip this check
12079 */
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030012080 if (1) {
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012081 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
12082 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012083
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012084 /* Watermarks */
12085 for (level = 0; level <= max_level; level++) {
12086 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12087 &sw_plane_wm->wm[level]))
12088 continue;
12089
12090 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12091 pipe_name(pipe), level,
12092 sw_plane_wm->wm[level].plane_en,
12093 sw_plane_wm->wm[level].plane_res_b,
12094 sw_plane_wm->wm[level].plane_res_l,
12095 hw_plane_wm->wm[level].plane_en,
12096 hw_plane_wm->wm[level].plane_res_b,
12097 hw_plane_wm->wm[level].plane_res_l);
12098 }
12099
12100 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12101 &sw_plane_wm->trans_wm)) {
12102 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12103 pipe_name(pipe),
12104 sw_plane_wm->trans_wm.plane_en,
12105 sw_plane_wm->trans_wm.plane_res_b,
12106 sw_plane_wm->trans_wm.plane_res_l,
12107 hw_plane_wm->trans_wm.plane_en,
12108 hw_plane_wm->trans_wm.plane_res_b,
12109 hw_plane_wm->trans_wm.plane_res_l);
12110 }
12111
12112 /* DDB */
Ville Syrjäläff43bc32018-11-27 18:59:00 +020012113 hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
12114 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012115
12116 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
cpaul@redhat.comfaccd992016-10-14 17:31:58 -040012117 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
Lyude27082492016-08-24 07:48:10 +020012118 pipe_name(pipe),
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040012119 sw_ddb_entry->start, sw_ddb_entry->end,
12120 hw_ddb_entry->start, hw_ddb_entry->end);
Lyude27082492016-08-24 07:48:10 +020012121 }
Damien Lespiau08db6652014-11-04 17:06:52 +000012122 }
12123}
12124
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012125static void
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012126verify_connector_state(struct drm_device *dev,
12127 struct drm_atomic_state *state,
12128 struct drm_crtc *crtc)
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012129{
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020012130 struct drm_connector *connector;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012131 struct drm_connector_state *new_conn_state;
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012132 int i;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012133
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012134 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020012135 struct drm_encoder *encoder = connector->encoder;
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020012136 struct drm_crtc_state *crtc_state = NULL;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020012137
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012138 if (new_conn_state->crtc != crtc)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012139 continue;
12140
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020012141 if (crtc)
12142 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12143
12144 intel_connector_verify_state(crtc_state, new_conn_state);
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012145
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012146 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020012147 "connector's atomic encoder doesn't match legacy encoder\n");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012148 }
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012149}
12150
12151static void
Daniel Vetter86b04262017-03-01 10:52:26 +010012152verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012153{
12154 struct intel_encoder *encoder;
Daniel Vetter86b04262017-03-01 10:52:26 +010012155 struct drm_connector *connector;
12156 struct drm_connector_state *old_conn_state, *new_conn_state;
12157 int i;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012158
Damien Lespiaub2784e12014-08-05 11:29:37 +010012159 for_each_intel_encoder(dev, encoder) {
Daniel Vetter86b04262017-03-01 10:52:26 +010012160 bool enabled = false, found = false;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012161 enum pipe pipe;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012162
12163 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12164 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030012165 encoder->base.name);
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012166
Daniel Vetter86b04262017-03-01 10:52:26 +010012167 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12168 new_conn_state, i) {
12169 if (old_conn_state->best_encoder == &encoder->base)
12170 found = true;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020012171
Daniel Vetter86b04262017-03-01 10:52:26 +010012172 if (new_conn_state->best_encoder != &encoder->base)
12173 continue;
12174 found = enabled = true;
12175
12176 I915_STATE_WARN(new_conn_state->crtc !=
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020012177 encoder->base.crtc,
12178 "connector's crtc doesn't match encoder crtc\n");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012179 }
Daniel Vetter86b04262017-03-01 10:52:26 +010012180
12181 if (!found)
12182 continue;
Dave Airlie0e32b392014-05-02 14:02:48 +100012183
Rob Clarke2c719b2014-12-15 13:56:32 -050012184 I915_STATE_WARN(!!encoder->base.crtc != enabled,
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012185 "encoder's enabled state mismatch "
12186 "(expected %i, found %i)\n",
12187 !!encoder->base.crtc, enabled);
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020012188
12189 if (!encoder->base.crtc) {
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012190 bool active;
12191
12192 active = encoder->get_hw_state(encoder, &pipe);
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020012193 I915_STATE_WARN(active,
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012194 "encoder detached but still enabled on pipe %c.\n",
12195 pipe_name(pipe));
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020012196 }
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012197 }
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012198}
12199
12200static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012201verify_crtc_state(struct drm_crtc *crtc,
12202 struct drm_crtc_state *old_crtc_state,
12203 struct drm_crtc_state *new_crtc_state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012204{
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012205 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010012206 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012207 struct intel_encoder *encoder;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012208 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12209 struct intel_crtc_state *pipe_config, *sw_config;
12210 struct drm_atomic_state *old_state;
12211 bool active;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012212
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012213 old_state = old_crtc_state->state;
Daniel Vetterec2dc6a2016-05-09 16:34:09 +020012214 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012215 pipe_config = to_intel_crtc_state(old_crtc_state);
12216 memset(pipe_config, 0, sizeof(*pipe_config));
12217 pipe_config->base.crtc = crtc;
12218 pipe_config->base.state = old_state;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012219
Ville Syrjälä78108b72016-05-27 20:59:19 +030012220 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012221
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012222 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012223
Ville Syrjäläe56134b2017-06-01 17:36:19 +030012224 /* we keep both pipes enabled on 830 */
12225 if (IS_I830(dev_priv))
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012226 active = new_crtc_state->active;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012227
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012228 I915_STATE_WARN(new_crtc_state->active != active,
12229 "crtc active state doesn't match with hw state "
12230 "(expected %i, found %i)\n", new_crtc_state->active, active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012231
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012232 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12233 "transitional active state does not match atomic hw state "
12234 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012235
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012236 for_each_encoder_on_crtc(dev, crtc, encoder) {
12237 enum pipe pipe;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012238
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012239 active = encoder->get_hw_state(encoder, &pipe);
12240 I915_STATE_WARN(active != new_crtc_state->active,
12241 "[ENCODER:%i] active %i with crtc active %i\n",
12242 encoder->base.base.id, active, new_crtc_state->active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012243
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012244 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12245 "Encoder connected to wrong pipe %c\n",
12246 pipe_name(pipe));
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012247
Ville Syrjäläe1214b92017-10-27 22:31:23 +030012248 if (active)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012249 encoder->get_config(encoder, pipe_config);
12250 }
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012251
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020012252 intel_crtc_compute_pixel_rate(pipe_config);
12253
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012254 if (!new_crtc_state->active)
12255 return;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012256
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012257 intel_pipe_config_sanity_check(dev_priv, pipe_config);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020012258
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020012259 sw_config = to_intel_crtc_state(new_crtc_state);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000012260 if (!intel_pipe_config_compare(dev_priv, sw_config,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012261 pipe_config, false)) {
12262 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12263 intel_dump_pipe_config(intel_crtc, pipe_config,
12264 "[hw state]");
12265 intel_dump_pipe_config(intel_crtc, sw_config,
12266 "[sw state]");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020012267 }
12268}
12269
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012270static void
Ville Syrjäläcff109f2017-11-17 21:19:17 +020012271intel_verify_planes(struct intel_atomic_state *state)
12272{
12273 struct intel_plane *plane;
12274 const struct intel_plane_state *plane_state;
12275 int i;
12276
12277 for_each_new_intel_plane_in_state(state, plane,
12278 plane_state, i)
12279 assert_plane(plane, plane_state->base.visible);
12280}
12281
12282static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012283verify_single_dpll_state(struct drm_i915_private *dev_priv,
12284 struct intel_shared_dpll *pll,
12285 struct drm_crtc *crtc,
12286 struct drm_crtc_state *new_state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012287{
12288 struct intel_dpll_hw_state dpll_hw_state;
Ville Syrjälä40560e22018-06-26 22:47:11 +030012289 unsigned int crtc_mask;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012290 bool active;
12291
12292 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12293
Lucas De Marchi72f775f2018-03-20 15:06:34 -070012294 DRM_DEBUG_KMS("%s\n", pll->info->name);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012295
Lucas De Marchiee1398b2018-03-20 15:06:33 -070012296 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012297
Lucas De Marchi5cd281f2018-03-20 15:06:36 -070012298 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012299 I915_STATE_WARN(!pll->on && pll->active_mask,
12300 "pll in active use but not on in sw tracking\n");
12301 I915_STATE_WARN(pll->on && !pll->active_mask,
12302 "pll is on but not used by any active crtc\n");
12303 I915_STATE_WARN(pll->on != active,
12304 "pll on state mismatch (expected %i, found %i)\n",
12305 pll->on, active);
12306 }
12307
12308 if (!crtc) {
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012309 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012310 "more active pll users than references: %x vs %x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012311 pll->active_mask, pll->state.crtc_mask);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012312
12313 return;
12314 }
12315
Ville Syrjälä40560e22018-06-26 22:47:11 +030012316 crtc_mask = drm_crtc_mask(crtc);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012317
12318 if (new_state->active)
12319 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12320 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12321 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12322 else
12323 I915_STATE_WARN(pll->active_mask & crtc_mask,
12324 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12325 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12326
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012327 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012328 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012329 crtc_mask, pll->state.crtc_mask);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012330
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012331 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012332 &dpll_hw_state,
12333 sizeof(dpll_hw_state)),
12334 "pll hw state mismatch\n");
12335}
12336
12337static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012338verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12339 struct drm_crtc_state *old_crtc_state,
12340 struct drm_crtc_state *new_crtc_state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012341{
Chris Wilsonfac5e232016-07-04 11:34:36 +010012342 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012343 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12344 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12345
12346 if (new_state->shared_dpll)
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012347 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012348
12349 if (old_state->shared_dpll &&
12350 old_state->shared_dpll != new_state->shared_dpll) {
Ville Syrjälä40560e22018-06-26 22:47:11 +030012351 unsigned int crtc_mask = drm_crtc_mask(crtc);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012352 struct intel_shared_dpll *pll = old_state->shared_dpll;
12353
12354 I915_STATE_WARN(pll->active_mask & crtc_mask,
12355 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12356 pipe_name(drm_crtc_index(crtc)));
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012357 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012358 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12359 pipe_name(drm_crtc_index(crtc)));
12360 }
12361}
12362
12363static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012364intel_modeset_verify_crtc(struct drm_crtc *crtc,
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012365 struct drm_atomic_state *state,
12366 struct drm_crtc_state *old_state,
12367 struct drm_crtc_state *new_state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012368{
Daniel Vetter5a21b662016-05-24 17:13:53 +020012369 if (!needs_modeset(new_state) &&
12370 !to_intel_crtc_state(new_state)->update_pipe)
12371 return;
12372
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012373 verify_wm_state(crtc, new_state);
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012374 verify_connector_state(crtc->dev, state, crtc);
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012375 verify_crtc_state(crtc, old_state, new_state);
12376 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012377}
12378
12379static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012380verify_disabled_dpll_state(struct drm_device *dev)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012381{
Chris Wilsonfac5e232016-07-04 11:34:36 +010012382 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012383 int i;
Daniel Vetter53589012013-06-05 13:34:16 +020012384
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012385 for (i = 0; i < dev_priv->num_shared_dpll; i++)
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012386 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012387}
Daniel Vetter53589012013-06-05 13:34:16 +020012388
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012389static void
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012390intel_modeset_verify_disabled(struct drm_device *dev,
12391 struct drm_atomic_state *state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012392{
Daniel Vetter86b04262017-03-01 10:52:26 +010012393 verify_encoder_state(dev, state);
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012394 verify_connector_state(dev, state, NULL);
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012395 verify_disabled_dpll_state(dev);
Daniel Vetter25c5b262012-07-08 22:08:04 +020012396}
12397
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012398static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
Ville Syrjälä80715b22014-05-15 20:23:23 +030012399{
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012400 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010012401 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä80715b22014-05-15 20:23:23 +030012402
12403 /*
12404 * The scanline counter increments at the leading edge of hsync.
12405 *
12406 * On most platforms it starts counting from vtotal-1 on the
12407 * first active line. That means the scanline counter value is
12408 * always one less than what we would expect. Ie. just after
12409 * start of vblank, which also occurs at start of hsync (on the
12410 * last active line), the scanline counter will read vblank_start-1.
12411 *
12412 * On gen2 the scanline counter starts counting from 1 instead
12413 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12414 * to keep the value positive), instead of adding one.
12415 *
12416 * On HSW+ the behaviour of the scanline counter depends on the output
12417 * type. For DP ports it behaves like most other platforms, but on HDMI
12418 * there's an extra 1 line difference. So we need to add two instead of
12419 * one to the value.
Ville Syrjäläec1b4ee2016-12-15 19:47:34 +020012420 *
12421 * On VLV/CHV DSI the scanline counter would appear to increment
12422 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12423 * that means we can't tell whether we're in vblank or not while
12424 * we're on that particular line. We must still set scanline_offset
12425 * to 1 so that the vblank timestamps come out correct when we query
12426 * the scanline counter from within the vblank interrupt handler.
12427 * However if queried just before the start of vblank we'll get an
12428 * answer that's slightly in the future.
Ville Syrjälä80715b22014-05-15 20:23:23 +030012429 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -080012430 if (IS_GEN(dev_priv, 2)) {
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012431 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
Ville Syrjälä80715b22014-05-15 20:23:23 +030012432 int vtotal;
12433
Ville Syrjälä124abe02015-09-08 13:40:45 +030012434 vtotal = adjusted_mode->crtc_vtotal;
12435 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
Ville Syrjälä80715b22014-05-15 20:23:23 +030012436 vtotal /= 2;
12437
12438 crtc->scanline_offset = vtotal - 1;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010012439 } else if (HAS_DDI(dev_priv) &&
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012440 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
Ville Syrjälä80715b22014-05-15 20:23:23 +030012441 crtc->scanline_offset = 2;
12442 } else
12443 crtc->scanline_offset = 1;
12444}
12445
Maarten Lankhorstad421372015-06-15 12:33:42 +020012446static void intel_modeset_clear_plls(struct drm_atomic_state *state)
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012447{
Ander Conselvan de Oliveira225da592015-04-02 14:47:57 +030012448 struct drm_device *dev = state->dev;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012449 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012450 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012451 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012452 int i;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012453
12454 if (!dev_priv->display.crtc_compute_clock)
Maarten Lankhorstad421372015-06-15 12:33:42 +020012455 return;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012456
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012457 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012458 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020012459 struct intel_shared_dpll *old_dpll =
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012460 to_intel_crtc_state(old_crtc_state)->shared_dpll;
Maarten Lankhorstad421372015-06-15 12:33:42 +020012461
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012462 if (!needs_modeset(new_crtc_state))
Ander Conselvan de Oliveira225da592015-04-02 14:47:57 +030012463 continue;
12464
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012465 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012466
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020012467 if (!old_dpll)
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012468 continue;
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012469
Ander Conselvan de Oliveiraa1c414e2016-12-29 17:22:07 +020012470 intel_release_shared_dpll(old_dpll, intel_crtc, state);
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012471 }
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012472}
12473
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012474/*
12475 * This implements the workaround described in the "notes" section of the mode
12476 * set sequence documentation. When going from no pipes or single pipe to
12477 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12478 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12479 */
12480static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12481{
12482 struct drm_crtc_state *crtc_state;
12483 struct intel_crtc *intel_crtc;
12484 struct drm_crtc *crtc;
12485 struct intel_crtc_state *first_crtc_state = NULL;
12486 struct intel_crtc_state *other_crtc_state = NULL;
12487 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12488 int i;
12489
12490 /* look at all crtc's that are going to be enabled in during modeset */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012491 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012492 intel_crtc = to_intel_crtc(crtc);
12493
12494 if (!crtc_state->active || !needs_modeset(crtc_state))
12495 continue;
12496
12497 if (first_crtc_state) {
12498 other_crtc_state = to_intel_crtc_state(crtc_state);
12499 break;
12500 } else {
12501 first_crtc_state = to_intel_crtc_state(crtc_state);
12502 first_pipe = intel_crtc->pipe;
12503 }
12504 }
12505
12506 /* No workaround needed? */
12507 if (!first_crtc_state)
12508 return 0;
12509
12510 /* w/a possibly needed, check how many crtc's are already enabled. */
12511 for_each_intel_crtc(state->dev, intel_crtc) {
12512 struct intel_crtc_state *pipe_config;
12513
12514 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12515 if (IS_ERR(pipe_config))
12516 return PTR_ERR(pipe_config);
12517
12518 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12519
12520 if (!pipe_config->base.active ||
12521 needs_modeset(&pipe_config->base))
12522 continue;
12523
12524 /* 2 or more enabled crtcs means no need for w/a */
12525 if (enabled_pipe != INVALID_PIPE)
12526 return 0;
12527
12528 enabled_pipe = intel_crtc->pipe;
12529 }
12530
12531 if (enabled_pipe != INVALID_PIPE)
12532 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12533 else if (other_crtc_state)
12534 other_crtc_state->hsw_workaround_pipe = first_pipe;
12535
12536 return 0;
12537}
12538
Ville Syrjälä8d965612016-11-14 18:35:10 +020012539static int intel_lock_all_pipes(struct drm_atomic_state *state)
12540{
12541 struct drm_crtc *crtc;
12542
12543 /* Add all pipes to the state */
12544 for_each_crtc(state->dev, crtc) {
12545 struct drm_crtc_state *crtc_state;
12546
12547 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12548 if (IS_ERR(crtc_state))
12549 return PTR_ERR(crtc_state);
12550 }
12551
12552 return 0;
12553}
12554
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012555static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12556{
12557 struct drm_crtc *crtc;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012558
Ville Syrjälä8d965612016-11-14 18:35:10 +020012559 /*
12560 * Add all pipes to the state, and force
12561 * a modeset on all the active ones.
12562 */
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012563 for_each_crtc(state->dev, crtc) {
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012564 struct drm_crtc_state *crtc_state;
12565 int ret;
12566
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012567 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12568 if (IS_ERR(crtc_state))
12569 return PTR_ERR(crtc_state);
12570
12571 if (!crtc_state->active || needs_modeset(crtc_state))
12572 continue;
12573
12574 crtc_state->mode_changed = true;
12575
12576 ret = drm_atomic_add_affected_connectors(state, crtc);
12577 if (ret)
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012578 return ret;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012579
12580 ret = drm_atomic_add_affected_planes(state, crtc);
12581 if (ret)
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012582 return ret;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012583 }
12584
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012585 return 0;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012586}
12587
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012588static int intel_modeset_checks(struct drm_atomic_state *state)
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012589{
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012590 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012591 struct drm_i915_private *dev_priv = to_i915(state->dev);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012592 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012593 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012594 int ret = 0, i;
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012595
Maarten Lankhorstb3592832015-06-15 12:33:38 +020012596 if (!check_digital_port_conflicts(state)) {
12597 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12598 return -EINVAL;
12599 }
12600
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012601 intel_state->modeset = true;
12602 intel_state->active_crtcs = dev_priv->active_crtcs;
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012603 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12604 intel_state->cdclk.actual = dev_priv->cdclk.actual;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012605
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012606 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12607 if (new_crtc_state->active)
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012608 intel_state->active_crtcs |= 1 << i;
12609 else
12610 intel_state->active_crtcs &= ~(1 << i);
Matt Roper8b4a7d02016-05-12 07:06:00 -070012611
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012612 if (old_crtc_state->active != new_crtc_state->active)
Matt Roper8b4a7d02016-05-12 07:06:00 -070012613 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012614 }
12615
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012616 /*
12617 * See if the config requires any additional preparation, e.g.
12618 * to adjust global state with pipes off. We need to do this
12619 * here so we can get the modeset_pipe updated config for the new
12620 * mode set on this crtc. For other crtcs we need to use the
12621 * adjusted_mode bits in the crtc directly.
12622 */
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012623 if (dev_priv->display.modeset_calc_cdclk) {
Clint Taylorc89e39f2016-05-13 23:41:21 +030012624 ret = dev_priv->display.modeset_calc_cdclk(state);
12625 if (ret < 0)
12626 return ret;
12627
Ville Syrjälä8d965612016-11-14 18:35:10 +020012628 /*
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012629 * Writes to dev_priv->cdclk.logical must protected by
Ville Syrjälä8d965612016-11-14 18:35:10 +020012630 * holding all the crtc locks, even if we don't end up
12631 * touching the hardware
12632 */
Ville Syrjälä64600bd2017-10-24 12:52:08 +030012633 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12634 &intel_state->cdclk.logical)) {
Ville Syrjälä8d965612016-11-14 18:35:10 +020012635 ret = intel_lock_all_pipes(state);
12636 if (ret < 0)
12637 return ret;
12638 }
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012639
Ville Syrjälä8d965612016-11-14 18:35:10 +020012640 /* All pipes must be switched off while we change the cdclk. */
Ville Syrjälä64600bd2017-10-24 12:52:08 +030012641 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12642 &intel_state->cdclk.actual)) {
Ville Syrjälä8d965612016-11-14 18:35:10 +020012643 ret = intel_modeset_all_pipes(state);
12644 if (ret < 0)
12645 return ret;
12646 }
Maarten Lankhorste8788cb2016-02-16 10:25:11 +010012647
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012648 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12649 intel_state->cdclk.logical.cdclk,
12650 intel_state->cdclk.actual.cdclk);
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030012651 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12652 intel_state->cdclk.logical.voltage_level,
12653 intel_state->cdclk.actual.voltage_level);
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012654 } else {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012655 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012656 }
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012657
Maarten Lankhorstad421372015-06-15 12:33:42 +020012658 intel_modeset_clear_plls(state);
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012659
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012660 if (IS_HASWELL(dev_priv))
Maarten Lankhorstad421372015-06-15 12:33:42 +020012661 return haswell_mode_set_planes_workaround(state);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012662
Maarten Lankhorstad421372015-06-15 12:33:42 +020012663 return 0;
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012664}
12665
Matt Roperaa363132015-09-24 15:53:18 -070012666/*
12667 * Handle calculation of various watermark data at the end of the atomic check
12668 * phase. The code here should be run after the per-crtc and per-plane 'check'
12669 * handlers to ensure that all derived state has been updated.
12670 */
Matt Ropercd1d3ee2018-12-10 13:54:14 -080012671static int calc_watermark_data(struct intel_atomic_state *state)
Matt Roperaa363132015-09-24 15:53:18 -070012672{
Matt Ropercd1d3ee2018-12-10 13:54:14 -080012673 struct drm_device *dev = state->base.dev;
Matt Roper98d39492016-05-12 07:06:03 -070012674 struct drm_i915_private *dev_priv = to_i915(dev);
Matt Roper98d39492016-05-12 07:06:03 -070012675
12676 /* Is there platform-specific watermark information to calculate? */
12677 if (dev_priv->display.compute_global_watermarks)
Matt Roper55994c22016-05-12 07:06:08 -070012678 return dev_priv->display.compute_global_watermarks(state);
12679
12680 return 0;
Matt Roperaa363132015-09-24 15:53:18 -070012681}
12682
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012683/**
12684 * intel_atomic_check - validate state object
12685 * @dev: drm device
12686 * @state: state to validate
12687 */
12688static int intel_atomic_check(struct drm_device *dev,
12689 struct drm_atomic_state *state)
Daniel Vettera6778b32012-07-02 09:56:42 +020012690{
Paulo Zanonidd8b3bd2016-01-19 11:35:49 -020012691 struct drm_i915_private *dev_priv = to_i915(dev);
Matt Roperaa363132015-09-24 15:53:18 -070012692 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012693 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012694 struct drm_crtc_state *old_crtc_state, *crtc_state;
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012695 int ret, i;
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012696 bool any_ms = false;
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012697
Maarten Lankhorst8c58f732018-02-21 10:28:08 +010012698 /* Catch I915_MODE_FLAG_INHERITED */
12699 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12700 crtc_state, i) {
12701 if (crtc_state->mode.private_flags !=
12702 old_crtc_state->mode.private_flags)
12703 crtc_state->mode_changed = true;
12704 }
12705
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012706 ret = drm_atomic_helper_check_modeset(dev, state);
Daniel Vettera6778b32012-07-02 09:56:42 +020012707 if (ret)
12708 return ret;
12709
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012710 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012711 struct intel_crtc_state *pipe_config =
12712 to_intel_crtc_state(crtc_state);
Daniel Vetter1ed51de2015-07-15 14:15:51 +020012713
Daniel Vetter26495482015-07-15 14:15:52 +020012714 if (!needs_modeset(crtc_state))
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012715 continue;
12716
Daniel Vetteraf4a8792016-05-09 09:31:25 +020012717 if (!crtc_state->enable) {
12718 any_ms = true;
12719 continue;
12720 }
12721
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012722 ret = intel_modeset_pipe_config(crtc, pipe_config);
Ville Syrjälä8e2b4df2018-11-07 23:35:20 +020012723 if (ret == -EDEADLK)
12724 return ret;
Maarten Lankhorst25aa1c32016-05-03 10:30:38 +020012725 if (ret) {
12726 intel_dump_pipe_config(to_intel_crtc(crtc),
12727 pipe_config, "[failed]");
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012728 return ret;
Maarten Lankhorst25aa1c32016-05-03 10:30:38 +020012729 }
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012730
Maarten Lankhorstd19f9582019-01-08 17:08:40 +010012731 if (intel_pipe_config_compare(dev_priv,
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012732 to_intel_crtc_state(old_crtc_state),
Daniel Vetter1ed51de2015-07-15 14:15:51 +020012733 pipe_config, true)) {
Daniel Vetter26495482015-07-15 14:15:52 +020012734 crtc_state->mode_changed = false;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012735 pipe_config->update_pipe = true;
Daniel Vetter26495482015-07-15 14:15:52 +020012736 }
12737
Daniel Vetteraf4a8792016-05-09 09:31:25 +020012738 if (needs_modeset(crtc_state))
Daniel Vetter26495482015-07-15 14:15:52 +020012739 any_ms = true;
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012740
Daniel Vetter26495482015-07-15 14:15:52 +020012741 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12742 needs_modeset(crtc_state) ?
12743 "[modeset]" : "[fastset]");
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012744 }
12745
Lyude Pauleceae142019-01-10 19:53:41 -050012746 ret = drm_dp_mst_atomic_check(state);
12747 if (ret)
12748 return ret;
12749
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012750 if (any_ms) {
12751 ret = intel_modeset_checks(state);
12752
12753 if (ret)
12754 return ret;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012755 } else {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012756 intel_state->cdclk.logical = dev_priv->cdclk.logical;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012757 }
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012758
Maarten Lankhorst1ab554b2018-10-22 15:51:52 +020012759 ret = icl_add_linked_planes(intel_state);
12760 if (ret)
12761 return ret;
12762
Paulo Zanonidd8b3bd2016-01-19 11:35:49 -020012763 ret = drm_atomic_helper_check_planes(dev, state);
Matt Roperaa363132015-09-24 15:53:18 -070012764 if (ret)
12765 return ret;
12766
Ville Syrjälädd576022017-11-17 21:19:14 +020012767 intel_fbc_choose_crtc(dev_priv, intel_state);
Matt Ropercd1d3ee2018-12-10 13:54:14 -080012768 return calc_watermark_data(intel_state);
Daniel Vettera6778b32012-07-02 09:56:42 +020012769}
12770
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012771static int intel_atomic_prepare_commit(struct drm_device *dev,
Chris Wilsond07f0e52016-10-28 13:58:44 +010012772 struct drm_atomic_state *state)
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012773{
Chris Wilsonfd700752017-07-26 17:00:36 +010012774 return drm_atomic_helper_prepare_planes(dev, state);
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012775}
12776
Maarten Lankhorsta2991412016-05-17 15:07:48 +020012777u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12778{
12779 struct drm_device *dev = crtc->base.dev;
12780
12781 if (!dev->max_vblank_count)
Dhinakaran Pandiyan734cbbf2018-02-02 21:12:54 -080012782 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
Maarten Lankhorsta2991412016-05-17 15:07:48 +020012783
12784 return dev->driver->get_vblank_counter(dev, crtc->pipe);
12785}
12786
Lyude896e5bb2016-08-24 07:48:09 +020012787static void intel_update_crtc(struct drm_crtc *crtc,
12788 struct drm_atomic_state *state,
12789 struct drm_crtc_state *old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012790 struct drm_crtc_state *new_crtc_state)
Lyude896e5bb2016-08-24 07:48:09 +020012791{
12792 struct drm_device *dev = crtc->dev;
12793 struct drm_i915_private *dev_priv = to_i915(dev);
12794 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012795 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12796 bool modeset = needs_modeset(new_crtc_state);
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012797 struct intel_plane_state *new_plane_state =
12798 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12799 to_intel_plane(crtc->primary));
Lyude896e5bb2016-08-24 07:48:09 +020012800
12801 if (modeset) {
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012802 update_scanline_offset(pipe_config);
Lyude896e5bb2016-08-24 07:48:09 +020012803 dev_priv->display.crtc_enable(pipe_config, state);
Maarten Lankhorst033b7a22018-03-08 13:02:02 +010012804
12805 /* vblanks work again, re-enable pipe CRC. */
12806 intel_crtc_enable_pipe_crc(intel_crtc);
Lyude896e5bb2016-08-24 07:48:09 +020012807 } else {
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012808 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12809 pipe_config);
Hans de Goede608ed4a2018-12-20 14:21:18 +010012810
12811 if (pipe_config->update_pipe)
12812 intel_encoders_update_pipe(crtc, pipe_config, state);
Lyude896e5bb2016-08-24 07:48:09 +020012813 }
12814
Maarten Lankhorst50c42fc2018-12-20 16:17:19 +010012815 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
12816 intel_fbc_disable(intel_crtc);
12817 else if (new_plane_state)
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012818 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
Lyude896e5bb2016-08-24 07:48:09 +020012819
Maarten Lankhorst6c246b82018-09-20 12:27:08 +020012820 intel_begin_crtc_commit(crtc, old_crtc_state);
12821
Ville Syrjälä5f2e5112018-11-14 23:07:27 +020012822 if (INTEL_GEN(dev_priv) >= 9)
12823 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
12824 else
12825 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
Maarten Lankhorst6c246b82018-09-20 12:27:08 +020012826
12827 intel_finish_crtc_commit(crtc, old_crtc_state);
Lyude896e5bb2016-08-24 07:48:09 +020012828}
12829
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012830static void intel_update_crtcs(struct drm_atomic_state *state)
Lyude896e5bb2016-08-24 07:48:09 +020012831{
12832 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012833 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Lyude896e5bb2016-08-24 07:48:09 +020012834 int i;
12835
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012836 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12837 if (!new_crtc_state->active)
Lyude896e5bb2016-08-24 07:48:09 +020012838 continue;
12839
12840 intel_update_crtc(crtc, state, old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012841 new_crtc_state);
Lyude896e5bb2016-08-24 07:48:09 +020012842 }
12843}
12844
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012845static void skl_update_crtcs(struct drm_atomic_state *state)
Lyude27082492016-08-24 07:48:10 +020012846{
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020012847 struct drm_i915_private *dev_priv = to_i915(state->dev);
Lyude27082492016-08-24 07:48:10 +020012848 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12849 struct drm_crtc *crtc;
Lyudece0ba282016-09-15 10:46:35 -040012850 struct intel_crtc *intel_crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012851 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Lyudece0ba282016-09-15 10:46:35 -040012852 struct intel_crtc_state *cstate;
Lyude27082492016-08-24 07:48:10 +020012853 unsigned int updated = 0;
12854 bool progress;
12855 enum pipe pipe;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012856 int i;
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012857 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12858 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012859 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012860
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012861 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012862 /* ignore allocations for crtc's that have been turned off. */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012863 if (new_crtc_state->active)
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012864 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
Lyude27082492016-08-24 07:48:10 +020012865
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012866 /* If 2nd DBuf slice required, enable it here */
12867 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12868 icl_dbuf_slices_update(dev_priv, required_slices);
12869
Lyude27082492016-08-24 07:48:10 +020012870 /*
12871 * Whenever the number of active pipes changes, we need to make sure we
12872 * update the pipes in the right order so that their ddb allocations
12873 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12874 * cause pipe underruns and other bad stuff.
12875 */
12876 do {
Lyude27082492016-08-24 07:48:10 +020012877 progress = false;
12878
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012879 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Lyude27082492016-08-24 07:48:10 +020012880 bool vbl_wait = false;
12881 unsigned int cmask = drm_crtc_mask(crtc);
Lyudece0ba282016-09-15 10:46:35 -040012882
12883 intel_crtc = to_intel_crtc(crtc);
Ville Syrjälä21794812017-08-23 18:22:26 +030012884 cstate = to_intel_crtc_state(new_crtc_state);
Lyudece0ba282016-09-15 10:46:35 -040012885 pipe = intel_crtc->pipe;
Lyude27082492016-08-24 07:48:10 +020012886
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012887 if (updated & cmask || !cstate->base.active)
Lyude27082492016-08-24 07:48:10 +020012888 continue;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012889
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012890 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
Mika Kahola2b685042017-10-10 13:17:03 +030012891 entries,
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012892 INTEL_INFO(dev_priv)->num_pipes, i))
Lyude27082492016-08-24 07:48:10 +020012893 continue;
12894
12895 updated |= cmask;
Ville Syrjälä53cc68802018-11-01 17:05:59 +020012896 entries[i] = cstate->wm.skl.ddb;
Lyude27082492016-08-24 07:48:10 +020012897
12898 /*
12899 * If this is an already active pipe, it's DDB changed,
12900 * and this isn't the last pipe that needs updating
12901 * then we need to wait for a vblank to pass for the
12902 * new ddb allocation to take effect.
12903 */
Lyudece0ba282016-09-15 10:46:35 -040012904 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
Maarten Lankhorst512b5522016-11-08 13:55:34 +010012905 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012906 !new_crtc_state->active_changed &&
Lyude27082492016-08-24 07:48:10 +020012907 intel_state->wm_results.dirty_pipes != updated)
12908 vbl_wait = true;
12909
12910 intel_update_crtc(crtc, state, old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012911 new_crtc_state);
Lyude27082492016-08-24 07:48:10 +020012912
12913 if (vbl_wait)
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020012914 intel_wait_for_vblank(dev_priv, pipe);
Lyude27082492016-08-24 07:48:10 +020012915
12916 progress = true;
12917 }
12918 } while (progress);
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012919
12920 /* If 2nd DBuf slice is no more required disable it */
12921 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12922 icl_dbuf_slices_update(dev_priv, required_slices);
Lyude27082492016-08-24 07:48:10 +020012923}
12924
Chris Wilsonba318c62017-02-02 20:47:41 +000012925static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12926{
12927 struct intel_atomic_state *state, *next;
12928 struct llist_node *freed;
12929
12930 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12931 llist_for_each_entry_safe(state, next, freed, freed)
12932 drm_atomic_state_put(&state->base);
12933}
12934
12935static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12936{
12937 struct drm_i915_private *dev_priv =
12938 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12939
12940 intel_atomic_helper_free_state(dev_priv);
12941}
12942
Daniel Vetter9db529a2017-08-08 10:08:28 +020012943static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12944{
12945 struct wait_queue_entry wait_fence, wait_reset;
12946 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12947
12948 init_wait_entry(&wait_fence, 0);
12949 init_wait_entry(&wait_reset, 0);
12950 for (;;) {
12951 prepare_to_wait(&intel_state->commit_ready.wait,
12952 &wait_fence, TASK_UNINTERRUPTIBLE);
12953 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12954 &wait_reset, TASK_UNINTERRUPTIBLE);
12955
12956
12957 if (i915_sw_fence_done(&intel_state->commit_ready)
12958 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12959 break;
12960
12961 schedule();
12962 }
12963 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12964 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12965}
12966
Chris Wilson8d52e442018-06-23 11:39:51 +010012967static void intel_atomic_cleanup_work(struct work_struct *work)
12968{
12969 struct drm_atomic_state *state =
12970 container_of(work, struct drm_atomic_state, commit_work);
12971 struct drm_i915_private *i915 = to_i915(state->dev);
12972
12973 drm_atomic_helper_cleanup_planes(&i915->drm, state);
12974 drm_atomic_helper_commit_cleanup_done(state);
12975 drm_atomic_state_put(state);
12976
12977 intel_atomic_helper_free_state(i915);
12978}
12979
Daniel Vetter94f05022016-06-14 18:01:00 +020012980static void intel_atomic_commit_tail(struct drm_atomic_state *state)
Daniel Vettera6778b32012-07-02 09:56:42 +020012981{
Daniel Vetter94f05022016-06-14 18:01:00 +020012982 struct drm_device *dev = state->dev;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012983 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012984 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012985 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012986 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
Maarten Lankhorst7580d772015-08-18 13:40:06 +020012987 struct drm_crtc *crtc;
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020012988 struct intel_crtc *intel_crtc;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +020012989 u64 put_domains[I915_MAX_PIPES] = {};
Chris Wilson0e6e0be2019-01-14 14:21:24 +000012990 intel_wakeref_t wakeref = 0;
Chris Wilsone95433c2016-10-28 13:58:27 +010012991 int i;
Daniel Vettera6778b32012-07-02 09:56:42 +020012992
Daniel Vetter9db529a2017-08-08 10:08:28 +020012993 intel_atomic_commit_fence_wait(intel_state);
Daniel Vetter42b062b2017-08-08 10:08:27 +020012994
Daniel Vetterea0000f2016-06-13 16:13:46 +020012995 drm_atomic_helper_wait_for_dependencies(state);
12996
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012997 if (intel_state->modeset)
Chris Wilson0e6e0be2019-01-14 14:21:24 +000012998 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012999
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013000 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013001 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13002 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13003 intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020013004
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013005 if (needs_modeset(new_crtc_state) ||
13006 to_intel_crtc_state(new_crtc_state)->update_pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +020013007
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013008 put_domains[intel_crtc->pipe] =
Daniel Vetter5a21b662016-05-24 17:13:53 +020013009 modeset_get_crtc_power_domains(crtc,
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013010 new_intel_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013011 }
13012
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013013 if (!needs_modeset(new_crtc_state))
Maarten Lankhorst61333b62015-06-15 12:33:50 +020013014 continue;
13015
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013016 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
Daniel Vetter460da9162013-03-27 00:44:51 +010013017
Ville Syrjälä29ceb0e2016-03-09 19:07:27 +020013018 if (old_crtc_state->active) {
Ville Syrjälä0dd14be2018-11-14 23:07:20 +020013019 intel_crtc_disable_planes(intel_state, intel_crtc);
Maarten Lankhorst033b7a22018-03-08 13:02:02 +010013020
13021 /*
13022 * We need to disable pipe CRC before disabling the pipe,
13023 * or we race against vblank off.
13024 */
13025 intel_crtc_disable_pipe_crc(intel_crtc);
13026
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013027 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020013028 intel_crtc->active = false;
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -020013029 intel_fbc_disable(intel_crtc);
Maarten Lankhorst65c307f2018-10-05 11:52:44 +020013030 intel_disable_shared_dpll(old_intel_crtc_state);
Ville Syrjälä9bbc8258a2015-11-20 22:09:20 +020013031
13032 /*
13033 * Underruns don't always raise
13034 * interrupts, so check manually.
13035 */
13036 intel_check_cpu_fifo_underruns(dev_priv);
13037 intel_check_pch_fifo_underruns(dev_priv);
Maarten Lankhorstb9001112015-11-19 16:07:16 +010013038
Ville Syrjäläa748fae2018-10-25 16:05:36 +030013039 /* FIXME unify this for all platforms */
13040 if (!new_crtc_state->active &&
13041 !HAS_GMCH_DISPLAY(dev_priv) &&
13042 dev_priv->display.initial_watermarks)
13043 dev_priv->display.initial_watermarks(intel_state,
13044 new_intel_crtc_state);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020013045 }
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010013046 }
Daniel Vetter7758a112012-07-08 19:40:39 +020013047
Daniel Vetter7a1530d72017-12-07 15:32:02 +010013048 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13049 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13050 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
Daniel Vetterea9d7582012-07-10 10:42:52 +020013051
Maarten Lankhorst565602d2015-12-10 12:33:57 +010013052 if (intel_state->modeset) {
Maarten Lankhorst4740b0f2015-08-05 12:37:10 +020013053 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
Maarten Lankhorst33c8df892016-02-10 13:49:37 +010013054
Ville Syrjäläb0587e42017-01-26 21:52:01 +020013055 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
Maarten Lankhorstf6d19732016-03-23 14:58:07 +010013056
Lyude656d1b82016-08-17 15:55:54 -040013057 /*
13058 * SKL workaround: bspec recommends we disable the SAGV when we
13059 * have more then one pipe enabled
13060 */
Paulo Zanoni56feca92016-09-22 18:00:28 -030013061 if (!intel_can_enable_sagv(state))
Paulo Zanoni16dcdc42016-09-22 18:00:27 -030013062 intel_disable_sagv(dev_priv);
Lyude656d1b82016-08-17 15:55:54 -040013063
Maarten Lankhorst677100c2016-11-08 13:55:41 +010013064 intel_modeset_verify_disabled(dev, state);
Maarten Lankhorst4740b0f2015-08-05 12:37:10 +020013065 }
Daniel Vetter47fab732012-10-26 10:58:18 +020013066
Lyude896e5bb2016-08-24 07:48:09 +020013067 /* Complete the events for pipes that have now been disabled */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013068 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13069 bool modeset = needs_modeset(new_crtc_state);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020013070
Daniel Vetter1f7528c2016-06-13 16:13:45 +020013071 /* Complete events for now disable pipes here. */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013072 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
Daniel Vetter1f7528c2016-06-13 16:13:45 +020013073 spin_lock_irq(&dev->event_lock);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013074 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
Daniel Vetter1f7528c2016-06-13 16:13:45 +020013075 spin_unlock_irq(&dev->event_lock);
13076
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013077 new_crtc_state->event = NULL;
Daniel Vetter1f7528c2016-06-13 16:13:45 +020013078 }
Matt Ropered4a6a72016-02-23 17:20:13 -080013079 }
13080
Lyude896e5bb2016-08-24 07:48:09 +020013081 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020013082 dev_priv->display.update_crtcs(state);
Lyude896e5bb2016-08-24 07:48:09 +020013083
Daniel Vetter94f05022016-06-14 18:01:00 +020013084 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13085 * already, but still need the state for the delayed optimization. To
13086 * fix this:
13087 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13088 * - schedule that vblank worker _before_ calling hw_done
13089 * - at the start of commit_tail, cancel it _synchrously
13090 * - switch over to the vblank wait helper in the core after that since
13091 * we don't need out special handling any more.
13092 */
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020013093 drm_atomic_helper_wait_for_flip_done(dev, state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013094
13095 /*
13096 * Now that the vblank has passed, we can go ahead and program the
13097 * optimal watermarks on platforms that need two-step watermark
13098 * programming.
13099 *
13100 * TODO: Move this (and other cleanup) to an async worker eventually.
13101 */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013102 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013103 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013104
13105 if (dev_priv->display.optimize_watermarks)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013106 dev_priv->display.optimize_watermarks(intel_state,
Maarten Lankhorsta1cccdcf2018-09-20 12:27:04 +020013107 new_intel_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013108 }
13109
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013110 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Daniel Vetter5a21b662016-05-24 17:13:53 +020013111 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13112
13113 if (put_domains[i])
13114 modeset_put_power_domains(dev_priv, put_domains[i]);
13115
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013116 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013117 }
13118
Ville Syrjäläcff109f2017-11-17 21:19:17 +020013119 if (intel_state->modeset)
13120 intel_verify_planes(intel_state);
13121
Paulo Zanoni56feca92016-09-22 18:00:28 -030013122 if (intel_state->modeset && intel_can_enable_sagv(state))
Paulo Zanoni16dcdc42016-09-22 18:00:27 -030013123 intel_enable_sagv(dev_priv);
Lyude656d1b82016-08-17 15:55:54 -040013124
Daniel Vetter94f05022016-06-14 18:01:00 +020013125 drm_atomic_helper_commit_hw_done(state);
13126
Chris Wilsond5553c02017-05-04 12:55:08 +010013127 if (intel_state->modeset) {
13128 /* As one of the primary mmio accessors, KMS has a high
13129 * likelihood of triggering bugs in unclaimed access. After we
13130 * finish modesetting, see if an error has been flagged, and if
13131 * so enable debugging for the next modeset - and hope we catch
13132 * the culprit.
13133 */
13134 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
Chris Wilson0e6e0be2019-01-14 14:21:24 +000013135 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
Chris Wilsond5553c02017-05-04 12:55:08 +010013136 }
Daniel Vetter5a21b662016-05-24 17:13:53 +020013137
Chris Wilson8d52e442018-06-23 11:39:51 +010013138 /*
13139 * Defer the cleanup of the old state to a separate worker to not
13140 * impede the current task (userspace for blocking modesets) that
13141 * are executed inline. For out-of-line asynchronous modesets/flips,
13142 * deferring to a new worker seems overkill, but we would place a
13143 * schedule point (cond_resched()) here anyway to keep latencies
13144 * down.
13145 */
13146 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
Chris Wilson41db6452018-07-12 12:57:29 +010013147 queue_work(system_highpri_wq, &state->commit_work);
Daniel Vetter94f05022016-06-14 18:01:00 +020013148}
13149
13150static void intel_atomic_commit_work(struct work_struct *work)
13151{
Chris Wilsonc004a902016-10-28 13:58:45 +010013152 struct drm_atomic_state *state =
13153 container_of(work, struct drm_atomic_state, commit_work);
13154
Daniel Vetter94f05022016-06-14 18:01:00 +020013155 intel_atomic_commit_tail(state);
13156}
13157
Chris Wilsonc004a902016-10-28 13:58:45 +010013158static int __i915_sw_fence_call
13159intel_atomic_commit_ready(struct i915_sw_fence *fence,
13160 enum i915_sw_fence_notify notify)
13161{
13162 struct intel_atomic_state *state =
13163 container_of(fence, struct intel_atomic_state, commit_ready);
13164
13165 switch (notify) {
13166 case FENCE_COMPLETE:
Daniel Vetter42b062b2017-08-08 10:08:27 +020013167 /* we do blocking waits in the worker, nothing to do here */
Chris Wilsonc004a902016-10-28 13:58:45 +010013168 break;
Chris Wilsonc004a902016-10-28 13:58:45 +010013169 case FENCE_FREE:
Chris Wilsoneb955ee2017-01-23 21:29:39 +000013170 {
13171 struct intel_atomic_helper *helper =
13172 &to_i915(state->base.dev)->atomic_helper;
13173
13174 if (llist_add(&state->freed, &helper->free_list))
13175 schedule_work(&helper->free_work);
13176 break;
13177 }
Chris Wilsonc004a902016-10-28 13:58:45 +010013178 }
13179
13180 return NOTIFY_DONE;
13181}
13182
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013183static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13184{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013185 struct drm_plane_state *old_plane_state, *new_plane_state;
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013186 struct drm_plane *plane;
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013187 int i;
13188
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013189 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
Chris Wilsonfaf5bf02016-08-04 16:32:37 +010013190 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010013191 intel_fb_obj(new_plane_state->fb),
Chris Wilsonfaf5bf02016-08-04 16:32:37 +010013192 to_intel_plane(plane)->frontbuffer_bit);
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013193}
13194
Daniel Vetter94f05022016-06-14 18:01:00 +020013195/**
13196 * intel_atomic_commit - commit validated state object
13197 * @dev: DRM device
13198 * @state: the top-level driver state object
13199 * @nonblock: nonblocking commit
13200 *
13201 * This function commits a top-level state object that has been validated
13202 * with drm_atomic_helper_check().
13203 *
Daniel Vetter94f05022016-06-14 18:01:00 +020013204 * RETURNS
13205 * Zero for success or -errno.
13206 */
13207static int intel_atomic_commit(struct drm_device *dev,
13208 struct drm_atomic_state *state,
13209 bool nonblock)
13210{
13211 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010013212 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter94f05022016-06-14 18:01:00 +020013213 int ret = 0;
13214
Chris Wilsonc004a902016-10-28 13:58:45 +010013215 drm_atomic_state_get(state);
13216 i915_sw_fence_init(&intel_state->commit_ready,
13217 intel_atomic_commit_ready);
Daniel Vetter94f05022016-06-14 18:01:00 +020013218
Ville Syrjälä440df932017-03-29 17:21:23 +030013219 /*
13220 * The intel_legacy_cursor_update() fast path takes care
13221 * of avoiding the vblank waits for simple cursor
13222 * movement and flips. For cursor on/off and size changes,
13223 * we want to perform the vblank waits so that watermark
13224 * updates happen during the correct frames. Gen9+ have
13225 * double buffered watermarks and so shouldn't need this.
13226 *
Maarten Lankhorst3cf50c62017-09-19 14:14:18 +020013227 * Unset state->legacy_cursor_update before the call to
13228 * drm_atomic_helper_setup_commit() because otherwise
13229 * drm_atomic_helper_wait_for_flip_done() is a noop and
13230 * we get FIFO underruns because we didn't wait
13231 * for vblank.
Ville Syrjälä440df932017-03-29 17:21:23 +030013232 *
13233 * FIXME doing watermarks and fb cleanup from a vblank worker
13234 * (assuming we had any) would solve these problems.
13235 */
Maarten Lankhorst213f1bd2017-09-19 14:14:19 +020013236 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13237 struct intel_crtc_state *new_crtc_state;
13238 struct intel_crtc *crtc;
13239 int i;
13240
13241 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13242 if (new_crtc_state->wm.need_postvbl_update ||
13243 new_crtc_state->update_wm_post)
13244 state->legacy_cursor_update = false;
13245 }
Ville Syrjälä440df932017-03-29 17:21:23 +030013246
Maarten Lankhorst3cf50c62017-09-19 14:14:18 +020013247 ret = intel_atomic_prepare_commit(dev, state);
13248 if (ret) {
13249 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13250 i915_sw_fence_commit(&intel_state->commit_ready);
13251 return ret;
13252 }
13253
13254 ret = drm_atomic_helper_setup_commit(state, nonblock);
13255 if (!ret)
13256 ret = drm_atomic_helper_swap_state(state, true);
13257
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020013258 if (ret) {
13259 i915_sw_fence_commit(&intel_state->commit_ready);
13260
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020013261 drm_atomic_helper_cleanup_planes(dev, state);
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020013262 return ret;
13263 }
Daniel Vetter94f05022016-06-14 18:01:00 +020013264 dev_priv->wm.distrust_bios_wm = false;
Ander Conselvan de Oliveira3c0fb582016-12-29 17:22:08 +020013265 intel_shared_dpll_swap_state(state);
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020013266 intel_atomic_track_fbs(state);
Daniel Vetter94f05022016-06-14 18:01:00 +020013267
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010013268 if (intel_state->modeset) {
Ville Syrjäläd305e062017-08-30 21:57:03 +030013269 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13270 sizeof(intel_state->min_cdclk));
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030013271 memcpy(dev_priv->min_voltage_level,
13272 intel_state->min_voltage_level,
13273 sizeof(intel_state->min_voltage_level));
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010013274 dev_priv->active_crtcs = intel_state->active_crtcs;
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020013275 dev_priv->cdclk.logical = intel_state->cdclk.logical;
13276 dev_priv->cdclk.actual = intel_state->cdclk.actual;
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010013277 }
13278
Chris Wilson08536952016-10-14 13:18:18 +010013279 drm_atomic_state_get(state);
Daniel Vetter42b062b2017-08-08 10:08:27 +020013280 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
Chris Wilsonc004a902016-10-28 13:58:45 +010013281
13282 i915_sw_fence_commit(&intel_state->commit_ready);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020013283 if (nonblock && intel_state->modeset) {
13284 queue_work(dev_priv->modeset_wq, &state->commit_work);
13285 } else if (nonblock) {
Daniel Vetter42b062b2017-08-08 10:08:27 +020013286 queue_work(system_unbound_wq, &state->commit_work);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020013287 } else {
13288 if (intel_state->modeset)
13289 flush_workqueue(dev_priv->modeset_wq);
Daniel Vetter94f05022016-06-14 18:01:00 +020013290 intel_atomic_commit_tail(state);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020013291 }
Mika Kuoppala75714942015-12-16 09:26:48 +020013292
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020013293 return 0;
Daniel Vetterf30da182013-04-11 20:22:50 +020013294}
13295
Chris Wilsonf6e5b162011-04-12 18:06:51 +010013296static const struct drm_crtc_funcs intel_crtc_funcs = {
Daniel Vetter3fab2f02017-04-03 10:32:57 +020013297 .gamma_set = drm_atomic_helper_legacy_gamma_set,
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020013298 .set_config = drm_atomic_helper_set_config,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010013299 .destroy = intel_crtc_destroy,
Maarten Lankhorst4c01ded2016-12-22 11:33:23 +010013300 .page_flip = drm_atomic_helper_page_flip,
Matt Roper13568372015-01-21 16:35:47 -080013301 .atomic_duplicate_state = intel_crtc_duplicate_state,
13302 .atomic_destroy_state = intel_crtc_destroy_state,
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +010013303 .set_crc_source = intel_crtc_set_crc_source,
Mahesh Kumara8c20832018-07-13 19:29:38 +053013304 .verify_crc_source = intel_crtc_verify_crc_source,
Mahesh Kumar260bc552018-07-13 19:29:39 +053013305 .get_crc_sources = intel_crtc_get_crc_sources,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010013306};
13307
Chris Wilson74d290f2017-08-17 13:37:06 +010013308struct wait_rps_boost {
13309 struct wait_queue_entry wait;
13310
13311 struct drm_crtc *crtc;
Chris Wilsone61e0f52018-02-21 09:56:36 +000013312 struct i915_request *request;
Chris Wilson74d290f2017-08-17 13:37:06 +010013313};
13314
13315static int do_rps_boost(struct wait_queue_entry *_wait,
13316 unsigned mode, int sync, void *key)
13317{
13318 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
Chris Wilsone61e0f52018-02-21 09:56:36 +000013319 struct i915_request *rq = wait->request;
Chris Wilson74d290f2017-08-17 13:37:06 +010013320
Chris Wilsone9af4ea2018-01-18 13:16:09 +000013321 /*
13322 * If we missed the vblank, but the request is already running it
13323 * is reasonable to assume that it will complete before the next
13324 * vblank without our intervention, so leave RPS alone.
13325 */
Chris Wilsone61e0f52018-02-21 09:56:36 +000013326 if (!i915_request_started(rq))
Chris Wilsone9af4ea2018-01-18 13:16:09 +000013327 gen6_rps_boost(rq, NULL);
Chris Wilsone61e0f52018-02-21 09:56:36 +000013328 i915_request_put(rq);
Chris Wilson74d290f2017-08-17 13:37:06 +010013329
13330 drm_crtc_vblank_put(wait->crtc);
13331
13332 list_del(&wait->wait.entry);
13333 kfree(wait);
13334 return 1;
13335}
13336
13337static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13338 struct dma_fence *fence)
13339{
13340 struct wait_rps_boost *wait;
13341
13342 if (!dma_fence_is_i915(fence))
13343 return;
13344
13345 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13346 return;
13347
13348 if (drm_crtc_vblank_get(crtc))
13349 return;
13350
13351 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13352 if (!wait) {
13353 drm_crtc_vblank_put(crtc);
13354 return;
13355 }
13356
13357 wait->request = to_request(dma_fence_get(fence));
13358 wait->crtc = crtc;
13359
13360 wait->wait.func = do_rps_boost;
13361 wait->wait.flags = 0;
13362
13363 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13364}
13365
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013366static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13367{
13368 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13369 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13370 struct drm_framebuffer *fb = plane_state->base.fb;
13371 struct i915_vma *vma;
13372
13373 if (plane->id == PLANE_CURSOR &&
José Roberto de Souzad53db442018-11-30 15:20:48 -080013374 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013375 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13376 const int align = intel_cursor_alignment(dev_priv);
Chris Wilson4a477652018-08-17 09:24:05 +010013377 int err;
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013378
Chris Wilson4a477652018-08-17 09:24:05 +010013379 err = i915_gem_object_attach_phys(obj, align);
13380 if (err)
13381 return err;
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013382 }
13383
13384 vma = intel_pin_and_fence_fb_obj(fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +030013385 &plane_state->view,
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013386 intel_plane_uses_fence(plane_state),
13387 &plane_state->flags);
13388 if (IS_ERR(vma))
13389 return PTR_ERR(vma);
13390
13391 plane_state->vma = vma;
13392
13393 return 0;
13394}
13395
13396static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13397{
13398 struct i915_vma *vma;
13399
13400 vma = fetch_and_zero(&old_plane_state->vma);
13401 if (vma)
13402 intel_unpin_fb_vma(vma, old_plane_state->flags);
13403}
13404
Chris Wilsonb7268c52018-04-18 19:40:52 +010013405static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13406{
13407 struct i915_sched_attr attr = {
13408 .priority = I915_PRIORITY_DISPLAY,
13409 };
13410
13411 i915_gem_object_wait_priority(obj, 0, &attr);
13412}
13413
Matt Roper6beb8c232014-12-01 15:40:14 -080013414/**
13415 * intel_prepare_plane_fb - Prepare fb for usage on plane
13416 * @plane: drm plane to prepare for
Chris Wilsonc38c1452018-02-14 13:49:22 +000013417 * @new_state: the plane state being prepared
Matt Roper6beb8c232014-12-01 15:40:14 -080013418 *
13419 * Prepares a framebuffer for usage on a display plane. Generally this
13420 * involves pinning the underlying object and updating the frontbuffer tracking
13421 * bits. Some older platforms need special physical address handling for
13422 * cursor planes.
13423 *
Maarten Lankhorstf9356752015-08-18 13:40:05 +020013424 * Must be called with struct_mutex held.
13425 *
Matt Roper6beb8c232014-12-01 15:40:14 -080013426 * Returns 0 on success, negative error code on failure.
13427 */
13428int
13429intel_prepare_plane_fb(struct drm_plane *plane,
Chris Wilson18320402016-08-18 19:00:16 +010013430 struct drm_plane_state *new_state)
Matt Roper465c1202014-05-29 08:06:54 -070013431{
Chris Wilsonc004a902016-10-28 13:58:45 +010013432 struct intel_atomic_state *intel_state =
13433 to_intel_atomic_state(new_state->state);
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000013434 struct drm_i915_private *dev_priv = to_i915(plane->dev);
Maarten Lankhorst844f9112015-09-02 10:42:40 +020013435 struct drm_framebuffer *fb = new_state->fb;
Matt Roper6beb8c232014-12-01 15:40:14 -080013436 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Maarten Lankhorst1ee49392015-09-23 13:27:08 +020013437 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
Chris Wilsonc004a902016-10-28 13:58:45 +010013438 int ret;
Matt Roper465c1202014-05-29 08:06:54 -070013439
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013440 if (old_obj) {
13441 struct drm_crtc_state *crtc_state =
Maarten Lankhorst8b694492018-04-09 14:46:55 +020013442 drm_atomic_get_new_crtc_state(new_state->state,
13443 plane->state->crtc);
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013444
13445 /* Big Hammer, we also need to ensure that any pending
13446 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13447 * current scanout is retired before unpinning the old
13448 * framebuffer. Note that we rely on userspace rendering
13449 * into the buffer attached to the pipe they are waiting
13450 * on. If not, userspace generates a GPU hang with IPEHR
13451 * point to the MI_WAIT_FOR_EVENT.
13452 *
13453 * This should only fail upon a hung GPU, in which case we
13454 * can safely continue.
13455 */
Chris Wilsonc004a902016-10-28 13:58:45 +010013456 if (needs_modeset(crtc_state)) {
13457 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13458 old_obj->resv, NULL,
13459 false, 0,
13460 GFP_KERNEL);
13461 if (ret < 0)
13462 return ret;
Chris Wilsonf4457ae2016-04-13 17:35:08 +010013463 }
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013464 }
13465
Chris Wilsonc004a902016-10-28 13:58:45 +010013466 if (new_state->fence) { /* explicit fencing */
13467 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13468 new_state->fence,
13469 I915_FENCE_TIMEOUT,
13470 GFP_KERNEL);
13471 if (ret < 0)
13472 return ret;
13473 }
13474
Chris Wilsonc37efb92016-06-17 08:28:47 +010013475 if (!obj)
13476 return 0;
13477
Chris Wilson4d3088c2017-07-26 17:00:38 +010013478 ret = i915_gem_object_pin_pages(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010013479 if (ret)
13480 return ret;
13481
Chris Wilson4d3088c2017-07-26 17:00:38 +010013482 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13483 if (ret) {
13484 i915_gem_object_unpin_pages(obj);
13485 return ret;
13486 }
13487
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013488 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
Chris Wilsonfd700752017-07-26 17:00:36 +010013489
Chris Wilsonfd700752017-07-26 17:00:36 +010013490 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson4d3088c2017-07-26 17:00:38 +010013491 i915_gem_object_unpin_pages(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010013492 if (ret)
13493 return ret;
13494
Chris Wilsone2f34962018-10-01 15:47:54 +010013495 fb_obj_bump_render_priority(obj);
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -080013496 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13497
Chris Wilsonc004a902016-10-28 13:58:45 +010013498 if (!new_state->fence) { /* implicit fencing */
Chris Wilson74d290f2017-08-17 13:37:06 +010013499 struct dma_fence *fence;
13500
Chris Wilsonc004a902016-10-28 13:58:45 +010013501 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13502 obj->resv, NULL,
13503 false, I915_FENCE_TIMEOUT,
13504 GFP_KERNEL);
13505 if (ret < 0)
13506 return ret;
Chris Wilson74d290f2017-08-17 13:37:06 +010013507
13508 fence = reservation_object_get_excl_rcu(obj->resv);
13509 if (fence) {
13510 add_rps_boost_after_vblank(new_state->crtc, fence);
13511 dma_fence_put(fence);
13512 }
13513 } else {
13514 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
Chris Wilsonc004a902016-10-28 13:58:45 +010013515 }
Daniel Vetter5a21b662016-05-24 17:13:53 +020013516
Chris Wilson60548c52018-07-31 14:26:29 +010013517 /*
13518 * We declare pageflips to be interactive and so merit a small bias
13519 * towards upclocking to deliver the frame on time. By only changing
13520 * the RPS thresholds to sample more regularly and aim for higher
13521 * clocks we can hopefully deliver low power workloads (like kodi)
13522 * that are not quite steady state without resorting to forcing
13523 * maximum clocks following a vblank miss (see do_rps_boost()).
13524 */
13525 if (!intel_state->rps_interactive) {
13526 intel_rps_mark_interactive(dev_priv, true);
13527 intel_state->rps_interactive = true;
13528 }
13529
Chris Wilsond07f0e52016-10-28 13:58:44 +010013530 return 0;
Matt Roper6beb8c232014-12-01 15:40:14 -080013531}
13532
Matt Roper38f3ce32014-12-02 07:45:25 -080013533/**
13534 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13535 * @plane: drm plane to clean up for
Chris Wilsonc38c1452018-02-14 13:49:22 +000013536 * @old_state: the state from the previous modeset
Matt Roper38f3ce32014-12-02 07:45:25 -080013537 *
13538 * Cleans up a framebuffer that has just been removed from a plane.
Maarten Lankhorstf9356752015-08-18 13:40:05 +020013539 *
13540 * Must be called with struct_mutex held.
Matt Roper38f3ce32014-12-02 07:45:25 -080013541 */
13542void
13543intel_cleanup_plane_fb(struct drm_plane *plane,
Chris Wilson18320402016-08-18 19:00:16 +010013544 struct drm_plane_state *old_state)
Matt Roper38f3ce32014-12-02 07:45:25 -080013545{
Chris Wilson60548c52018-07-31 14:26:29 +010013546 struct intel_atomic_state *intel_state =
13547 to_intel_atomic_state(old_state->state);
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013548 struct drm_i915_private *dev_priv = to_i915(plane->dev);
Matt Roper38f3ce32014-12-02 07:45:25 -080013549
Chris Wilson60548c52018-07-31 14:26:29 +010013550 if (intel_state->rps_interactive) {
13551 intel_rps_mark_interactive(dev_priv, false);
13552 intel_state->rps_interactive = false;
13553 }
13554
Chris Wilsonbe1e3412017-01-16 15:21:27 +000013555 /* Should only be called after a successful intel_prepare_plane_fb()! */
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013556 mutex_lock(&dev_priv->drm.struct_mutex);
13557 intel_plane_unpin_fb(to_intel_plane_state(old_state));
13558 mutex_unlock(&dev_priv->drm.struct_mutex);
Matt Roper465c1202014-05-29 08:06:54 -070013559}
13560
Chandra Konduru6156a452015-04-27 13:48:39 -070013561int
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013562skl_max_scale(const struct intel_crtc_state *crtc_state,
13563 u32 pixel_format)
Chandra Konduru6156a452015-04-27 13:48:39 -070013564{
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013565 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13566 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Chandra Konduru77224cd2018-04-09 09:11:13 +053013567 int max_scale, mult;
13568 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
Chandra Konduru6156a452015-04-27 13:48:39 -070013569
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013570 if (!crtc_state->base.enable)
Chandra Konduru6156a452015-04-27 13:48:39 -070013571 return DRM_PLANE_HELPER_NO_SCALING;
13572
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013573 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13574 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13575
Rodrigo Vivi43037c82017-10-03 15:31:42 -070013576 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013577 max_dotclk *= 2;
13578
13579 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
Chandra Konduru6156a452015-04-27 13:48:39 -070013580 return DRM_PLANE_HELPER_NO_SCALING;
13581
13582 /*
13583 * skl max scale is lower of:
13584 * close to 3 but not 3, -1 is for that purpose
13585 * or
13586 * cdclk/crtc_clock
13587 */
Chandra Konduru77224cd2018-04-09 09:11:13 +053013588 mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13589 tmpclk1 = (1 << 16) * mult - 1;
13590 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13591 max_scale = min(tmpclk1, tmpclk2);
Chandra Konduru6156a452015-04-27 13:48:39 -070013592
13593 return max_scale;
13594}
13595
Daniel Vetter5a21b662016-05-24 17:13:53 +020013596static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13597 struct drm_crtc_state *old_crtc_state)
13598{
13599 struct drm_device *dev = crtc->dev;
Lyude62e0fb82016-08-22 12:50:08 -040013600 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013601 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013602 struct intel_crtc_state *old_intel_cstate =
Daniel Vetter5a21b662016-05-24 17:13:53 +020013603 to_intel_crtc_state(old_crtc_state);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013604 struct intel_atomic_state *old_intel_state =
13605 to_intel_atomic_state(old_crtc_state->state);
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013606 struct intel_crtc_state *intel_cstate =
13607 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13608 bool modeset = needs_modeset(&intel_cstate->base);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013609
Maarten Lankhorst567f0792017-02-28 15:28:47 +010013610 if (!modeset &&
13611 (intel_cstate->base.color_mgmt_changed ||
13612 intel_cstate->update_pipe)) {
Matt Roper302da0c2018-12-10 13:54:15 -080013613 intel_color_set_csc(intel_cstate);
13614 intel_color_load_luts(intel_cstate);
Maarten Lankhorst567f0792017-02-28 15:28:47 +010013615 }
13616
Daniel Vetter5a21b662016-05-24 17:13:53 +020013617 /* Perform vblank evasion around commit operation */
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013618 intel_pipe_update_start(intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013619
13620 if (modeset)
Maarten Lankhorste62929b2016-11-08 13:55:33 +010013621 goto out;
Daniel Vetter5a21b662016-05-24 17:13:53 +020013622
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013623 if (intel_cstate->update_pipe)
Ville Syrjälä1a15b772017-08-23 18:22:25 +030013624 intel_update_pipe_config(old_intel_cstate, intel_cstate);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013625 else if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +020013626 skl_detach_scalers(intel_cstate);
Lyude62e0fb82016-08-22 12:50:08 -040013627
Maarten Lankhorste62929b2016-11-08 13:55:33 +010013628out:
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013629 if (dev_priv->display.atomic_update_watermarks)
13630 dev_priv->display.atomic_update_watermarks(old_intel_state,
13631 intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013632}
13633
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013634void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13635 struct intel_crtc_state *crtc_state)
13636{
13637 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13638
Lucas De Marchicf819ef2018-12-12 10:10:43 -080013639 if (!IS_GEN(dev_priv, 2))
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013640 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13641
13642 if (crtc_state->has_pch_encoder) {
13643 enum pipe pch_transcoder =
13644 intel_crtc_pch_transcoder(crtc);
13645
13646 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13647 }
13648}
13649
Daniel Vetter5a21b662016-05-24 17:13:53 +020013650static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13651 struct drm_crtc_state *old_crtc_state)
13652{
13653 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013654 struct intel_atomic_state *old_intel_state =
13655 to_intel_atomic_state(old_crtc_state->state);
13656 struct intel_crtc_state *new_crtc_state =
13657 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013658
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013659 intel_pipe_update_end(new_crtc_state);
Maarten Lankhorst33a49862017-11-13 15:40:43 +010013660
13661 if (new_crtc_state->update_pipe &&
13662 !needs_modeset(&new_crtc_state->base) &&
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013663 old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13664 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013665}
13666
Matt Ropercf4c7c12014-12-04 10:27:42 -080013667/**
Matt Roper4a3b8762014-12-23 10:41:51 -080013668 * intel_plane_destroy - destroy a plane
13669 * @plane: plane to destroy
Matt Ropercf4c7c12014-12-04 10:27:42 -080013670 *
Matt Roper4a3b8762014-12-23 10:41:51 -080013671 * Common destruction function for all types of planes (primary, cursor,
13672 * sprite).
Matt Ropercf4c7c12014-12-04 10:27:42 -080013673 */
Matt Roper4a3b8762014-12-23 10:41:51 -080013674void intel_plane_destroy(struct drm_plane *plane)
Matt Roper465c1202014-05-29 08:06:54 -070013675{
Matt Roper465c1202014-05-29 08:06:54 -070013676 drm_plane_cleanup(plane);
Ville Syrjälä69ae5612016-05-27 20:59:22 +030013677 kfree(to_intel_plane(plane));
Matt Roper465c1202014-05-29 08:06:54 -070013678}
13679
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013680static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13681 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013682{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013683 switch (modifier) {
13684 case DRM_FORMAT_MOD_LINEAR:
13685 case I915_FORMAT_MOD_X_TILED:
13686 break;
13687 default:
13688 return false;
13689 }
13690
Ben Widawsky714244e2017-08-01 09:58:16 -070013691 switch (format) {
13692 case DRM_FORMAT_C8:
13693 case DRM_FORMAT_RGB565:
13694 case DRM_FORMAT_XRGB1555:
13695 case DRM_FORMAT_XRGB8888:
13696 return modifier == DRM_FORMAT_MOD_LINEAR ||
13697 modifier == I915_FORMAT_MOD_X_TILED;
13698 default:
13699 return false;
13700 }
13701}
13702
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013703static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13704 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013705{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013706 switch (modifier) {
13707 case DRM_FORMAT_MOD_LINEAR:
13708 case I915_FORMAT_MOD_X_TILED:
13709 break;
13710 default:
13711 return false;
13712 }
13713
Ben Widawsky714244e2017-08-01 09:58:16 -070013714 switch (format) {
13715 case DRM_FORMAT_C8:
13716 case DRM_FORMAT_RGB565:
13717 case DRM_FORMAT_XRGB8888:
13718 case DRM_FORMAT_XBGR8888:
13719 case DRM_FORMAT_XRGB2101010:
13720 case DRM_FORMAT_XBGR2101010:
13721 return modifier == DRM_FORMAT_MOD_LINEAR ||
13722 modifier == I915_FORMAT_MOD_X_TILED;
13723 default:
13724 return false;
13725 }
13726}
13727
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013728static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13729 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013730{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013731 return modifier == DRM_FORMAT_MOD_LINEAR &&
13732 format == DRM_FORMAT_ARGB8888;
Ben Widawsky714244e2017-08-01 09:58:16 -070013733}
13734
Ville Syrjälä679bfe82018-10-05 15:58:07 +030013735static const struct drm_plane_funcs i965_plane_funcs = {
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013736 .update_plane = drm_atomic_helper_update_plane,
13737 .disable_plane = drm_atomic_helper_disable_plane,
13738 .destroy = intel_plane_destroy,
13739 .atomic_get_property = intel_plane_atomic_get_property,
13740 .atomic_set_property = intel_plane_atomic_set_property,
13741 .atomic_duplicate_state = intel_plane_duplicate_state,
13742 .atomic_destroy_state = intel_plane_destroy_state,
13743 .format_mod_supported = i965_plane_format_mod_supported,
13744};
13745
Ville Syrjälä679bfe82018-10-05 15:58:07 +030013746static const struct drm_plane_funcs i8xx_plane_funcs = {
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013747 .update_plane = drm_atomic_helper_update_plane,
13748 .disable_plane = drm_atomic_helper_disable_plane,
13749 .destroy = intel_plane_destroy,
13750 .atomic_get_property = intel_plane_atomic_get_property,
13751 .atomic_set_property = intel_plane_atomic_set_property,
13752 .atomic_duplicate_state = intel_plane_duplicate_state,
13753 .atomic_destroy_state = intel_plane_destroy_state,
13754 .format_mod_supported = i8xx_plane_format_mod_supported,
Matt Roper465c1202014-05-29 08:06:54 -070013755};
13756
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013757static int
13758intel_legacy_cursor_update(struct drm_plane *plane,
13759 struct drm_crtc *crtc,
13760 struct drm_framebuffer *fb,
13761 int crtc_x, int crtc_y,
13762 unsigned int crtc_w, unsigned int crtc_h,
Jani Nikulaba3f4d02019-01-18 14:01:23 +020013763 u32 src_x, u32 src_y,
13764 u32 src_w, u32 src_h,
Daniel Vetter34a2ab52017-03-22 22:50:41 +010013765 struct drm_modeset_acquire_ctx *ctx)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013766{
13767 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13768 int ret;
13769 struct drm_plane_state *old_plane_state, *new_plane_state;
13770 struct intel_plane *intel_plane = to_intel_plane(plane);
13771 struct drm_framebuffer *old_fb;
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013772 struct intel_crtc_state *crtc_state =
13773 to_intel_crtc_state(crtc->state);
13774 struct intel_crtc_state *new_crtc_state;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013775
13776 /*
13777 * When crtc is inactive or there is a modeset pending,
13778 * wait for it to complete in the slowpath
13779 */
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013780 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
13781 crtc_state->update_pipe)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013782 goto slow;
13783
13784 old_plane_state = plane->state;
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013785 /*
13786 * Don't do an async update if there is an outstanding commit modifying
13787 * the plane. This prevents our async update's changes from getting
13788 * overridden by a previous synchronous update's state.
13789 */
13790 if (old_plane_state->commit &&
13791 !try_wait_for_completion(&old_plane_state->commit->hw_done))
13792 goto slow;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013793
13794 /*
13795 * If any parameters change that may affect watermarks,
13796 * take the slowpath. Only changing fb or position should be
13797 * in the fastpath.
13798 */
13799 if (old_plane_state->crtc != crtc ||
13800 old_plane_state->src_w != src_w ||
13801 old_plane_state->src_h != src_h ||
13802 old_plane_state->crtc_w != crtc_w ||
13803 old_plane_state->crtc_h != crtc_h ||
Ville Syrjäläa5509ab2017-02-17 17:01:59 +020013804 !old_plane_state->fb != !fb)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013805 goto slow;
13806
13807 new_plane_state = intel_plane_duplicate_state(plane);
13808 if (!new_plane_state)
13809 return -ENOMEM;
13810
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013811 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
13812 if (!new_crtc_state) {
13813 ret = -ENOMEM;
13814 goto out_free;
13815 }
13816
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013817 drm_atomic_set_fb_for_plane(new_plane_state, fb);
13818
13819 new_plane_state->src_x = src_x;
13820 new_plane_state->src_y = src_y;
13821 new_plane_state->src_w = src_w;
13822 new_plane_state->src_h = src_h;
13823 new_plane_state->crtc_x = crtc_x;
13824 new_plane_state->crtc_y = crtc_y;
13825 new_plane_state->crtc_w = crtc_w;
13826 new_plane_state->crtc_h = crtc_h;
13827
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013828 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
13829 to_intel_plane_state(old_plane_state),
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013830 to_intel_plane_state(new_plane_state));
13831 if (ret)
13832 goto out_free;
13833
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013834 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13835 if (ret)
13836 goto out_free;
13837
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013838 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13839 if (ret)
13840 goto out_unlock;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013841
Dhinakaran Pandiyana694e222018-03-06 19:34:19 -080013842 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013843
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -080013844 old_fb = old_plane_state->fb;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013845 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13846 intel_plane->frontbuffer_bit);
13847
13848 /* Swap plane state */
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013849 plane->state = new_plane_state;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013850
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013851 /*
13852 * We cannot swap crtc_state as it may be in use by an atomic commit or
13853 * page flip that's running simultaneously. If we swap crtc_state and
13854 * destroy the old state, we will cause a use-after-free there.
13855 *
13856 * Only update active_planes, which is needed for our internal
13857 * bookkeeping. Either value will do the right thing when updating
13858 * planes atomically. If the cursor was part of the atomic update then
13859 * we would have taken the slowpath.
13860 */
13861 crtc_state->active_planes = new_crtc_state->active_planes;
13862
Ville Syrjälä72259532017-03-02 19:15:05 +020013863 if (plane->state->visible) {
13864 trace_intel_update_plane(plane, to_intel_crtc(crtc));
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013865 intel_plane->update_plane(intel_plane, crtc_state,
Ville Syrjäläa5509ab2017-02-17 17:01:59 +020013866 to_intel_plane_state(plane->state));
Ville Syrjälä72259532017-03-02 19:15:05 +020013867 } else {
13868 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
Ville Syrjälä0dd14be2018-11-14 23:07:20 +020013869 intel_plane->disable_plane(intel_plane, crtc_state);
Ville Syrjälä72259532017-03-02 19:15:05 +020013870 }
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013871
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013872 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013873
13874out_unlock:
13875 mutex_unlock(&dev_priv->drm.struct_mutex);
13876out_free:
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013877 if (new_crtc_state)
13878 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013879 if (ret)
13880 intel_plane_destroy_state(plane, new_plane_state);
13881 else
13882 intel_plane_destroy_state(plane, old_plane_state);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013883 return ret;
13884
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013885slow:
13886 return drm_atomic_helper_update_plane(plane, crtc, fb,
13887 crtc_x, crtc_y, crtc_w, crtc_h,
Daniel Vetter34a2ab52017-03-22 22:50:41 +010013888 src_x, src_y, src_w, src_h, ctx);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013889}
13890
13891static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13892 .update_plane = intel_legacy_cursor_update,
13893 .disable_plane = drm_atomic_helper_disable_plane,
13894 .destroy = intel_plane_destroy,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013895 .atomic_get_property = intel_plane_atomic_get_property,
13896 .atomic_set_property = intel_plane_atomic_set_property,
13897 .atomic_duplicate_state = intel_plane_duplicate_state,
13898 .atomic_destroy_state = intel_plane_destroy_state,
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013899 .format_mod_supported = intel_cursor_format_mod_supported,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013900};
13901
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013902static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13903 enum i9xx_plane_id i9xx_plane)
13904{
13905 if (!HAS_FBC(dev_priv))
13906 return false;
13907
13908 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13909 return i9xx_plane == PLANE_A; /* tied to pipe A */
13910 else if (IS_IVYBRIDGE(dev_priv))
13911 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13912 i9xx_plane == PLANE_C;
13913 else if (INTEL_GEN(dev_priv) >= 4)
13914 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13915 else
13916 return i9xx_plane == PLANE_A;
13917}
13918
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013919static struct intel_plane *
Ville Syrjälä580503c2016-10-31 22:37:00 +020013920intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
Matt Roper465c1202014-05-29 08:06:54 -070013921{
Ville Syrjälä881440a2018-10-05 15:58:17 +030013922 struct intel_plane *plane;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013923 const struct drm_plane_funcs *plane_funcs;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013924 unsigned int supported_rotations;
Ville Syrjälädeb19682018-10-05 15:58:08 +030013925 unsigned int possible_crtcs;
Ville Syrjälä881440a2018-10-05 15:58:17 +030013926 const u64 *modifiers;
13927 const u32 *formats;
13928 int num_formats;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013929 int ret;
Matt Roper465c1202014-05-29 08:06:54 -070013930
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013931 if (INTEL_GEN(dev_priv) >= 9)
13932 return skl_universal_plane_create(dev_priv, pipe,
13933 PLANE_PRIMARY);
13934
Ville Syrjälä881440a2018-10-05 15:58:17 +030013935 plane = intel_plane_alloc();
13936 if (IS_ERR(plane))
13937 return plane;
Matt Roperea2c67b2014-12-23 10:41:52 -080013938
Ville Syrjälä881440a2018-10-05 15:58:17 +030013939 plane->pipe = pipe;
Ville Syrjäläe3c566d2016-11-08 16:47:11 +020013940 /*
13941 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13942 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13943 */
13944 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
Ville Syrjälä881440a2018-10-05 15:58:17 +030013945 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
Ville Syrjäläe3c566d2016-11-08 16:47:11 +020013946 else
Ville Syrjälä881440a2018-10-05 15:58:17 +030013947 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
13948 plane->id = PLANE_PRIMARY;
13949 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013950
Ville Syrjälä881440a2018-10-05 15:58:17 +030013951 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
13952 if (plane->has_fbc) {
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013953 struct intel_fbc *fbc = &dev_priv->fbc;
13954
Ville Syrjälä881440a2018-10-05 15:58:17 +030013955 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013956 }
13957
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013958 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä881440a2018-10-05 15:58:17 +030013959 formats = i965_primary_formats;
Damien Lespiau568db4f2015-05-12 16:13:18 +010013960 num_formats = ARRAY_SIZE(i965_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013961 modifiers = i9xx_format_modifiers;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013962
Ville Syrjälä881440a2018-10-05 15:58:17 +030013963 plane->max_stride = i9xx_plane_max_stride;
13964 plane->update_plane = i9xx_update_plane;
13965 plane->disable_plane = i9xx_disable_plane;
13966 plane->get_hw_state = i9xx_plane_get_hw_state;
13967 plane->check_plane = i9xx_plane_check;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013968
13969 plane_funcs = &i965_plane_funcs;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010013970 } else {
Ville Syrjälä881440a2018-10-05 15:58:17 +030013971 formats = i8xx_primary_formats;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010013972 num_formats = ARRAY_SIZE(i8xx_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013973 modifiers = i9xx_format_modifiers;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013974
Ville Syrjälä881440a2018-10-05 15:58:17 +030013975 plane->max_stride = i9xx_plane_max_stride;
13976 plane->update_plane = i9xx_update_plane;
13977 plane->disable_plane = i9xx_disable_plane;
13978 plane->get_hw_state = i9xx_plane_get_hw_state;
13979 plane->check_plane = i9xx_plane_check;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013980
13981 plane_funcs = &i8xx_plane_funcs;
Matt Roper465c1202014-05-29 08:06:54 -070013982 }
13983
Ville Syrjälädeb19682018-10-05 15:58:08 +030013984 possible_crtcs = BIT(pipe);
13985
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013986 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
Ville Syrjälä881440a2018-10-05 15:58:17 +030013987 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030013988 possible_crtcs, plane_funcs,
Ville Syrjälä881440a2018-10-05 15:58:17 +030013989 formats, num_formats, modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013990 DRM_PLANE_TYPE_PRIMARY,
13991 "primary %c", pipe_name(pipe));
13992 else
Ville Syrjälä881440a2018-10-05 15:58:17 +030013993 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030013994 possible_crtcs, plane_funcs,
Ville Syrjälä881440a2018-10-05 15:58:17 +030013995 formats, num_formats, modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013996 DRM_PLANE_TYPE_PRIMARY,
Ville Syrjäläed150302017-11-17 21:19:10 +020013997 "plane %c",
Ville Syrjälä881440a2018-10-05 15:58:17 +030013998 plane_name(plane->i9xx_plane));
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013999 if (ret)
14000 goto fail;
Sonika Jindal48404c12014-08-22 14:06:04 +053014001
Ville Syrjäläb7c80602018-10-05 15:58:15 +030014002 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
Ville Syrjälä4ea7be22016-11-14 18:54:00 +020014003 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040014004 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14005 DRM_MODE_REFLECT_X;
Dave Airlie5481e272016-10-25 16:36:13 +100014006 } else if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030014007 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040014008 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030014009 } else {
Robert Fossc2c446a2017-05-19 16:50:17 -040014010 supported_rotations = DRM_MODE_ROTATE_0;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030014011 }
14012
Dave Airlie5481e272016-10-25 16:36:13 +100014013 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä881440a2018-10-05 15:58:17 +030014014 drm_plane_create_rotation_property(&plane->base,
Robert Fossc2c446a2017-05-19 16:50:17 -040014015 DRM_MODE_ROTATE_0,
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030014016 supported_rotations);
Sonika Jindal48404c12014-08-22 14:06:04 +053014017
Ville Syrjälä881440a2018-10-05 15:58:17 +030014018 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
Matt Roperea2c67b2014-12-23 10:41:52 -080014019
Ville Syrjälä881440a2018-10-05 15:58:17 +030014020 return plane;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014021
14022fail:
Ville Syrjälä881440a2018-10-05 15:58:17 +030014023 intel_plane_free(plane);
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014024
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014025 return ERR_PTR(ret);
Matt Roper465c1202014-05-29 08:06:54 -070014026}
14027
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014028static struct intel_plane *
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014029intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14030 enum pipe pipe)
Matt Roper3d7d6512014-06-10 08:28:13 -070014031{
Ville Syrjälädeb19682018-10-05 15:58:08 +030014032 unsigned int possible_crtcs;
Ville Syrjäläc539b572018-10-05 15:58:14 +030014033 struct intel_plane *cursor;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014034 int ret;
Matt Roper3d7d6512014-06-10 08:28:13 -070014035
Ville Syrjäläc539b572018-10-05 15:58:14 +030014036 cursor = intel_plane_alloc();
14037 if (IS_ERR(cursor))
14038 return cursor;
Matt Roperea2c67b2014-12-23 10:41:52 -080014039
Matt Roper3d7d6512014-06-10 08:28:13 -070014040 cursor->pipe = pipe;
Ville Syrjäläed150302017-11-17 21:19:10 +020014041 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
Ville Syrjäläb14e5842016-11-22 18:01:56 +020014042 cursor->id = PLANE_CURSOR;
Ville Syrjäläc19e1122018-01-23 20:33:43 +020014043 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014044
14045 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
Ville Syrjäläddd57132018-09-07 18:24:02 +030014046 cursor->max_stride = i845_cursor_max_stride;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014047 cursor->update_plane = i845_update_cursor;
14048 cursor->disable_plane = i845_disable_cursor;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020014049 cursor->get_hw_state = i845_cursor_get_hw_state;
Ville Syrjälä659056f2017-03-27 21:55:39 +030014050 cursor->check_plane = i845_check_cursor;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014051 } else {
Ville Syrjäläddd57132018-09-07 18:24:02 +030014052 cursor->max_stride = i9xx_cursor_max_stride;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014053 cursor->update_plane = i9xx_update_cursor;
14054 cursor->disable_plane = i9xx_disable_cursor;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020014055 cursor->get_hw_state = i9xx_cursor_get_hw_state;
Ville Syrjälä659056f2017-03-27 21:55:39 +030014056 cursor->check_plane = i9xx_check_cursor;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030014057 }
Matt Roper3d7d6512014-06-10 08:28:13 -070014058
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030014059 cursor->cursor.base = ~0;
14060 cursor->cursor.cntl = ~0;
Ville Syrjälä024faac2017-03-27 21:55:42 +030014061
14062 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14063 cursor->cursor.size = ~0;
Matt Roper3d7d6512014-06-10 08:28:13 -070014064
Ville Syrjälädeb19682018-10-05 15:58:08 +030014065 possible_crtcs = BIT(pipe);
14066
Ville Syrjälä580503c2016-10-31 22:37:00 +020014067 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030014068 possible_crtcs, &intel_cursor_plane_funcs,
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014069 intel_cursor_formats,
14070 ARRAY_SIZE(intel_cursor_formats),
Ben Widawsky714244e2017-08-01 09:58:16 -070014071 cursor_format_modifiers,
14072 DRM_PLANE_TYPE_CURSOR,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030014073 "cursor %c", pipe_name(pipe));
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014074 if (ret)
14075 goto fail;
Ville Syrjälä4398ad42014-10-23 07:41:34 -070014076
Dave Airlie5481e272016-10-25 16:36:13 +100014077 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030014078 drm_plane_create_rotation_property(&cursor->base,
Robert Fossc2c446a2017-05-19 16:50:17 -040014079 DRM_MODE_ROTATE_0,
14080 DRM_MODE_ROTATE_0 |
14081 DRM_MODE_ROTATE_180);
Ville Syrjälä4398ad42014-10-23 07:41:34 -070014082
Matt Roperea2c67b2014-12-23 10:41:52 -080014083 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14084
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014085 return cursor;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014086
14087fail:
Ville Syrjäläc539b572018-10-05 15:58:14 +030014088 intel_plane_free(cursor);
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000014089
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014090 return ERR_PTR(ret);
Matt Roper3d7d6512014-06-10 08:28:13 -070014091}
14092
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053014093static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14094 struct intel_crtc_state *crtc_state)
Chandra Konduru549e2bf2015-04-07 15:28:38 -070014095{
Ville Syrjälä65edccc2016-10-31 22:37:01 +020014096 struct intel_crtc_scaler_state *scaler_state =
14097 &crtc_state->scaler_state;
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053014098 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Chandra Konduru549e2bf2015-04-07 15:28:38 -070014099 int i;
Chandra Konduru549e2bf2015-04-07 15:28:38 -070014100
Jani Nikula02584042018-12-31 16:56:41 +020014101 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053014102 if (!crtc->num_scalers)
14103 return;
14104
Ville Syrjälä65edccc2016-10-31 22:37:01 +020014105 for (i = 0; i < crtc->num_scalers; i++) {
14106 struct intel_scaler *scaler = &scaler_state->scalers[i];
14107
14108 scaler->in_use = 0;
Maarten Lankhorst0aaf29b2018-09-21 16:44:37 +020014109 scaler->mode = 0;
Chandra Konduru549e2bf2015-04-07 15:28:38 -070014110 }
14111
14112 scaler_state->scaler_id = -1;
14113}
14114
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020014115static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
Jesse Barnes79e53942008-11-07 14:24:08 -080014116{
14117 struct intel_crtc *intel_crtc;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014118 struct intel_crtc_state *crtc_state = NULL;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014119 struct intel_plane *primary = NULL;
14120 struct intel_plane *cursor = NULL;
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030014121 int sprite, ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014122
Daniel Vetter955382f2013-09-19 14:05:45 +020014123 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014124 if (!intel_crtc)
14125 return -ENOMEM;
Jesse Barnes79e53942008-11-07 14:24:08 -080014126
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014127 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014128 if (!crtc_state) {
14129 ret = -ENOMEM;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014130 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014131 }
Ander Conselvan de Oliveira550acef2015-04-21 17:13:24 +030014132 intel_crtc->config = crtc_state;
14133 intel_crtc->base.state = &crtc_state->base;
Matt Roper07878242015-02-25 11:43:26 -080014134 crtc_state->base.crtc = &intel_crtc->base;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014135
Ville Syrjälä580503c2016-10-31 22:37:00 +020014136 primary = intel_primary_plane_create(dev_priv, pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014137 if (IS_ERR(primary)) {
14138 ret = PTR_ERR(primary);
Matt Roper3d7d6512014-06-10 08:28:13 -070014139 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014140 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020014141 intel_crtc->plane_ids_mask |= BIT(primary->id);
Matt Roper3d7d6512014-06-10 08:28:13 -070014142
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030014143 for_each_sprite(dev_priv, pipe, sprite) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014144 struct intel_plane *plane;
14145
Ville Syrjälä580503c2016-10-31 22:37:00 +020014146 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
Ville Syrjäläd2b2cbc2016-11-07 22:20:56 +020014147 if (IS_ERR(plane)) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014148 ret = PTR_ERR(plane);
14149 goto fail;
14150 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020014151 intel_crtc->plane_ids_mask |= BIT(plane->id);
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030014152 }
14153
Ville Syrjälä580503c2016-10-31 22:37:00 +020014154 cursor = intel_cursor_plane_create(dev_priv, pipe);
Ville Syrjäläd2b2cbc2016-11-07 22:20:56 +020014155 if (IS_ERR(cursor)) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014156 ret = PTR_ERR(cursor);
Matt Roper3d7d6512014-06-10 08:28:13 -070014157 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014158 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020014159 intel_crtc->plane_ids_mask |= BIT(cursor->id);
Matt Roper3d7d6512014-06-10 08:28:13 -070014160
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020014161 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014162 &primary->base, &cursor->base,
14163 &intel_crtc_funcs,
Ville Syrjälä4d5d72b72016-05-27 20:59:21 +030014164 "pipe %c", pipe_name(pipe));
Matt Roper3d7d6512014-06-10 08:28:13 -070014165 if (ret)
14166 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080014167
Jesse Barnes80824002009-09-10 15:28:06 -070014168 intel_crtc->pipe = pipe;
Jesse Barnes80824002009-09-10 15:28:06 -070014169
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053014170 /* initialize shared scalers */
14171 intel_crtc_init_scalers(intel_crtc, crtc_state);
14172
Ville Syrjälä1947fd12018-03-05 19:41:22 +020014173 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14174 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14175 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14176
14177 if (INTEL_GEN(dev_priv) < 9) {
14178 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14179
14180 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14181 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14182 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14183 }
Jesse Barnes22fd0fa2009-12-02 13:42:53 -080014184
Jesse Barnes79e53942008-11-07 14:24:08 -080014185 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
Daniel Vetter87b6b102014-05-15 15:33:46 +020014186
Matt Roper302da0c2018-12-10 13:54:15 -080014187 intel_color_init(intel_crtc);
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +000014188
Daniel Vetter87b6b102014-05-15 15:33:46 +020014189 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014190
14191 return 0;
Matt Roper3d7d6512014-06-10 08:28:13 -070014192
14193fail:
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014194 /*
14195 * drm_mode_config_cleanup() will free up any
14196 * crtcs/planes already initialized.
14197 */
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020014198 kfree(crtc_state);
Matt Roper3d7d6512014-06-10 08:28:13 -070014199 kfree(intel_crtc);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014200
14201 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014202}
14203
Ville Syrjälä6a20fe72018-02-07 18:48:41 +020014204int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14205 struct drm_file *file)
Carl Worth08d7b3d2009-04-29 14:43:54 -070014206{
Carl Worth08d7b3d2009-04-29 14:43:54 -070014207 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
Rob Clark7707e652014-07-17 23:30:04 -040014208 struct drm_crtc *drmmode_crtc;
Daniel Vetterc05422d2009-08-11 16:05:30 +020014209 struct intel_crtc *crtc;
Carl Worth08d7b3d2009-04-29 14:43:54 -070014210
Keith Packard418da172017-03-14 23:25:07 -070014211 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
Chris Wilson71240ed2016-06-24 14:00:24 +010014212 if (!drmmode_crtc)
Ville Syrjälä3f2c2052013-10-17 13:35:03 +030014213 return -ENOENT;
Carl Worth08d7b3d2009-04-29 14:43:54 -070014214
Rob Clark7707e652014-07-17 23:30:04 -040014215 crtc = to_intel_crtc(drmmode_crtc);
Daniel Vetterc05422d2009-08-11 16:05:30 +020014216 pipe_from_crtc_id->pipe = crtc->pipe;
Carl Worth08d7b3d2009-04-29 14:43:54 -070014217
Daniel Vetterc05422d2009-08-11 16:05:30 +020014218 return 0;
Carl Worth08d7b3d2009-04-29 14:43:54 -070014219}
14220
Daniel Vetter66a92782012-07-12 20:08:18 +020014221static int intel_encoder_clones(struct intel_encoder *encoder)
Jesse Barnes79e53942008-11-07 14:24:08 -080014222{
Daniel Vetter66a92782012-07-12 20:08:18 +020014223 struct drm_device *dev = encoder->base.dev;
14224 struct intel_encoder *source_encoder;
Jesse Barnes79e53942008-11-07 14:24:08 -080014225 int index_mask = 0;
Jesse Barnes79e53942008-11-07 14:24:08 -080014226 int entry = 0;
14227
Damien Lespiaub2784e12014-08-05 11:29:37 +010014228 for_each_intel_encoder(dev, source_encoder) {
Ville Syrjäläbc079e82014-03-03 16:15:28 +020014229 if (encoders_cloneable(encoder, source_encoder))
Daniel Vetter66a92782012-07-12 20:08:18 +020014230 index_mask |= (1 << entry);
14231
Jesse Barnes79e53942008-11-07 14:24:08 -080014232 entry++;
14233 }
Chris Wilson4ef69c72010-09-09 15:14:28 +010014234
Jesse Barnes79e53942008-11-07 14:24:08 -080014235 return index_mask;
14236}
14237
Jani Nikulaa5916fd2019-01-22 10:23:05 +020014238static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
Chris Wilson4d302442010-12-14 19:21:29 +000014239{
Ville Syrjälä646d5772016-10-31 22:37:14 +020014240 if (!IS_MOBILE(dev_priv))
Chris Wilson4d302442010-12-14 19:21:29 +000014241 return false;
14242
14243 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14244 return false;
14245
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014246 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
Chris Wilson4d302442010-12-14 19:21:29 +000014247 return false;
14248
14249 return true;
14250}
14251
Jani Nikula63cb4e62019-01-22 10:23:01 +020014252static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
Jesse Barnes84b4e042014-06-25 08:24:29 -070014253{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014254 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau884497e2013-12-03 13:56:23 +000014255 return false;
14256
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +010014257 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
Jesse Barnes84b4e042014-06-25 08:24:29 -070014258 return false;
14259
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010014260 if (HAS_PCH_LPT_H(dev_priv) &&
14261 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
Ville Syrjälä65e472e2015-12-01 23:28:55 +020014262 return false;
14263
Ville Syrjälä70ac54d2015-12-01 23:29:56 +020014264 /* DDI E can't be used if DDI A requires 4 lanes */
Jani Nikula63cb4e62019-01-22 10:23:01 +020014265 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
Ville Syrjälä70ac54d2015-12-01 23:29:56 +020014266 return false;
14267
Ville Syrjäläe4abb732015-12-01 23:31:33 +020014268 if (!dev_priv->vbt.int_crt_support)
Jesse Barnes84b4e042014-06-25 08:24:29 -070014269 return false;
14270
14271 return true;
14272}
14273
Imre Deak8090ba82016-08-10 14:07:33 +030014274void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14275{
14276 int pps_num;
14277 int pps_idx;
14278
14279 if (HAS_DDI(dev_priv))
14280 return;
14281 /*
14282 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14283 * everywhere where registers can be write protected.
14284 */
14285 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14286 pps_num = 2;
14287 else
14288 pps_num = 1;
14289
14290 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14291 u32 val = I915_READ(PP_CONTROL(pps_idx));
14292
14293 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14294 I915_WRITE(PP_CONTROL(pps_idx), val);
14295 }
14296}
14297
Imre Deak44cb7342016-08-10 14:07:29 +030014298static void intel_pps_init(struct drm_i915_private *dev_priv)
14299{
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +020014300 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
Imre Deak44cb7342016-08-10 14:07:29 +030014301 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14302 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14303 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14304 else
14305 dev_priv->pps_mmio_base = PPS_BASE;
Imre Deak8090ba82016-08-10 14:07:33 +030014306
14307 intel_pps_unlock_regs_wa(dev_priv);
Imre Deak44cb7342016-08-10 14:07:29 +030014308}
14309
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014310static void intel_setup_outputs(struct drm_i915_private *dev_priv)
Jesse Barnes79e53942008-11-07 14:24:08 -080014311{
Chris Wilson4ef69c72010-09-09 15:14:28 +010014312 struct intel_encoder *encoder;
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014313 bool dpd_is_edp = false;
Jesse Barnes79e53942008-11-07 14:24:08 -080014314
Imre Deak44cb7342016-08-10 14:07:29 +030014315 intel_pps_init(dev_priv);
14316
José Roberto de Souzae1bf0942018-11-30 15:20:47 -080014317 if (!HAS_DISPLAY(dev_priv))
Chris Wilsonfc0c5a92018-08-15 21:12:07 +010014318 return;
14319
Paulo Zanoni00c92d92018-05-21 17:25:47 -070014320 if (IS_ICELAKE(dev_priv)) {
14321 intel_ddi_init(dev_priv, PORT_A);
14322 intel_ddi_init(dev_priv, PORT_B);
14323 intel_ddi_init(dev_priv, PORT_C);
14324 intel_ddi_init(dev_priv, PORT_D);
14325 intel_ddi_init(dev_priv, PORT_E);
Imre Deak3f2e9ed2018-12-20 15:26:03 +020014326 /*
14327 * On some ICL SKUs port F is not present. No strap bits for
14328 * this, so rely on VBT.
14329 */
14330 if (intel_bios_is_port_present(dev_priv, PORT_F))
14331 intel_ddi_init(dev_priv, PORT_F);
14332
Madhav Chauhanbf4d57f2018-10-30 13:56:23 +020014333 icl_dsi_init(dev_priv);
Paulo Zanoni00c92d92018-05-21 17:25:47 -070014334 } else if (IS_GEN9_LP(dev_priv)) {
Vandana Kannanc776eb22014-08-19 12:05:01 +053014335 /*
14336 * FIXME: Broxton doesn't support port detection via the
14337 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14338 * detect the ports.
14339 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014340 intel_ddi_init(dev_priv, PORT_A);
14341 intel_ddi_init(dev_priv, PORT_B);
14342 intel_ddi_init(dev_priv, PORT_C);
Shashank Sharmac6c794a2016-03-22 12:01:50 +020014343
Jani Nikulae5186342018-07-05 16:25:08 +030014344 vlv_dsi_init(dev_priv);
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010014345 } else if (HAS_DDI(dev_priv)) {
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014346 int found;
14347
Jani Nikula63cb4e62019-01-22 10:23:01 +020014348 if (intel_ddi_crt_present(dev_priv))
14349 intel_crt_init(dev_priv);
14350
Jesse Barnesde31fac2015-03-06 15:53:32 -080014351 /*
14352 * Haswell uses DDI functions to detect digital outputs.
14353 * On SKL pre-D0 the strap isn't connected, so we assume
14354 * it's there.
14355 */
Ville Syrjälä77179402015-09-18 20:03:35 +030014356 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
Jesse Barnesde31fac2015-03-06 15:53:32 -080014357 /* WaIgnoreDDIAStrap: skl */
Rodrigo Vivib976dc52017-01-23 10:32:37 -080014358 if (found || IS_GEN9_BC(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014359 intel_ddi_init(dev_priv, PORT_A);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014360
Rodrigo Vivi9787e832018-01-29 15:22:22 -080014361 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014362 * register */
14363 found = I915_READ(SFUSE_STRAP);
14364
14365 if (found & SFUSE_STRAP_DDIB_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014366 intel_ddi_init(dev_priv, PORT_B);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014367 if (found & SFUSE_STRAP_DDIC_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014368 intel_ddi_init(dev_priv, PORT_C);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014369 if (found & SFUSE_STRAP_DDID_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014370 intel_ddi_init(dev_priv, PORT_D);
Rodrigo Vivi9787e832018-01-29 15:22:22 -080014371 if (found & SFUSE_STRAP_DDIF_DETECTED)
14372 intel_ddi_init(dev_priv, PORT_F);
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070014373 /*
14374 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14375 */
Rodrigo Vivib976dc52017-01-23 10:32:37 -080014376 if (IS_GEN9_BC(dev_priv) &&
Imre Deake9d49bb2018-12-20 15:26:02 +020014377 intel_bios_is_port_present(dev_priv, PORT_E))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014378 intel_ddi_init(dev_priv, PORT_E);
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070014379
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010014380 } else if (HAS_PCH_SPLIT(dev_priv)) {
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014381 int found;
Jani Nikula63cb4e62019-01-22 10:23:01 +020014382
Jani Nikula0fafa222019-01-22 10:23:02 +020014383 /*
14384 * intel_edp_init_connector() depends on this completing first,
14385 * to prevent the registration of both eDP and LVDS and the
14386 * incorrect sharing of the PPS.
14387 */
14388 intel_lvds_init(dev_priv);
Jani Nikula74d021e2019-01-22 10:23:07 +020014389 intel_crt_init(dev_priv);
Jani Nikula63cb4e62019-01-22 10:23:01 +020014390
Jani Nikula7b91bf72017-08-18 12:30:19 +030014391 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
Daniel Vetter270b3042012-10-27 15:52:05 +020014392
Jani Nikulaa5916fd2019-01-22 10:23:05 +020014393 if (ilk_has_edp_a(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014394 intel_dp_init(dev_priv, DP_A, PORT_A);
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014395
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014396 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
Zhao Yakui461ed3c2010-03-30 15:11:33 +080014397 /* PCH SDVOB multiplex with HDMIB */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014398 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014399 if (!found)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014400 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014401 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014402 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014403 }
14404
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014405 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014406 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014407
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014408 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014409 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014410
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014411 if (I915_READ(PCH_DP_C) & DP_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014412 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014413
Daniel Vetter270b3042012-10-27 15:52:05 +020014414 if (I915_READ(PCH_DP_D) & DP_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014415 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014416 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä22f350422016-06-03 12:17:43 +030014417 bool has_edp, has_port;
Chris Wilson457c52d2016-06-01 08:27:50 +010014418
Jani Nikula63cb4e62019-01-22 10:23:01 +020014419 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
14420 intel_crt_init(dev_priv);
14421
Ville Syrjäläe17ac6d2014-10-09 19:37:15 +030014422 /*
14423 * The DP_DETECTED bit is the latched state of the DDC
14424 * SDA pin at boot. However since eDP doesn't require DDC
14425 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14426 * eDP ports may have been muxed to an alternate function.
14427 * Thus we can't rely on the DP_DETECTED bit alone to detect
14428 * eDP ports. Consult the VBT as well as DP_DETECTED to
14429 * detect eDP ports.
Ville Syrjälä22f350422016-06-03 12:17:43 +030014430 *
14431 * Sadly the straps seem to be missing sometimes even for HDMI
14432 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14433 * and VBT for the presence of the port. Additionally we can't
14434 * trust the port type the VBT declares as we've seen at least
14435 * HDMI ports that the VBT claim are DP or eDP.
Ville Syrjäläe17ac6d2014-10-09 19:37:15 +030014436 */
Jani Nikula7b91bf72017-08-18 12:30:19 +030014437 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014438 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14439 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014440 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014441 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014442 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
Artem Bityutskiy585a94b2013-10-16 18:10:41 +030014443
Jani Nikula7b91bf72017-08-18 12:30:19 +030014444 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014445 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14446 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014447 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014448 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014449 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
Gajanan Bhat19c03922012-09-27 19:13:07 +053014450
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014451 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä22f350422016-06-03 12:17:43 +030014452 /*
14453 * eDP not supported on port D,
14454 * so no need to worry about it
14455 */
14456 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14457 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014458 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014459 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014460 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
Ville Syrjälä9418c1f2014-04-09 13:28:56 +030014461 }
14462
Jani Nikulae5186342018-07-05 16:25:08 +030014463 vlv_dsi_init(dev_priv);
Jani Nikula63cb4e62019-01-22 10:23:01 +020014464 } else if (IS_PINEVIEW(dev_priv)) {
Jani Nikula0fafa222019-01-22 10:23:02 +020014465 intel_lvds_init(dev_priv);
Jani Nikula74d021e2019-01-22 10:23:07 +020014466 intel_crt_init(dev_priv);
Jani Nikula63cb4e62019-01-22 10:23:01 +020014467 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
Ma Ling27185ae2009-08-24 13:50:23 +080014468 bool found = false;
Eric Anholt7d573822009-01-02 13:33:00 -080014469
Jani Nikula9bedc7e2019-01-22 10:23:03 +020014470 if (IS_MOBILE(dev_priv))
14471 intel_lvds_init(dev_priv);
Jani Nikula0fafa222019-01-22 10:23:02 +020014472
Jani Nikula74d021e2019-01-22 10:23:07 +020014473 intel_crt_init(dev_priv);
Jani Nikula63cb4e62019-01-22 10:23:01 +020014474
Paulo Zanonie2debe92013-02-18 19:00:27 -030014475 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014476 DRM_DEBUG_KMS("probing SDVOB\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014477 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014478 if (!found && IS_G4X(dev_priv)) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014479 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014480 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014481 }
Ma Ling27185ae2009-08-24 13:50:23 +080014482
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014483 if (!found && IS_G4X(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014484 intel_dp_init(dev_priv, DP_B, PORT_B);
Eric Anholt725e30a2009-01-22 13:01:02 -080014485 }
Kristian Høgsberg13520b02009-03-13 15:42:14 -040014486
14487 /* Before G4X SDVOC doesn't have its own detect register */
Kristian Høgsberg13520b02009-03-13 15:42:14 -040014488
Paulo Zanonie2debe92013-02-18 19:00:27 -030014489 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014490 DRM_DEBUG_KMS("probing SDVOC\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014491 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014492 }
Ma Ling27185ae2009-08-24 13:50:23 +080014493
Paulo Zanonie2debe92013-02-18 19:00:27 -030014494 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
Ma Ling27185ae2009-08-24 13:50:23 +080014495
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014496 if (IS_G4X(dev_priv)) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014497 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014498 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014499 }
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014500 if (IS_G4X(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014501 intel_dp_init(dev_priv, DP_C, PORT_C);
Eric Anholt725e30a2009-01-22 13:01:02 -080014502 }
Ma Ling27185ae2009-08-24 13:50:23 +080014503
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014504 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014505 intel_dp_init(dev_priv, DP_D, PORT_D);
Jani Nikulad6521462019-01-22 10:23:04 +020014506
14507 if (SUPPORTS_TV(dev_priv))
14508 intel_tv_init(dev_priv);
Jani Nikula63cb4e62019-01-22 10:23:01 +020014509 } else if (IS_GEN(dev_priv, 2)) {
Jani Nikula346073c2019-01-22 10:23:06 +020014510 if (IS_I85X(dev_priv))
Jani Nikula9bedc7e2019-01-22 10:23:03 +020014511 intel_lvds_init(dev_priv);
Jani Nikula0fafa222019-01-22 10:23:02 +020014512
Jani Nikula74d021e2019-01-22 10:23:07 +020014513 intel_crt_init(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014514 intel_dvo_init(dev_priv);
Jani Nikula63cb4e62019-01-22 10:23:01 +020014515 }
Jesse Barnes79e53942008-11-07 14:24:08 -080014516
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014517 intel_psr_init(dev_priv);
Rodrigo Vivi7c8f8a72014-06-13 05:10:03 -070014518
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014519 for_each_intel_encoder(&dev_priv->drm, encoder) {
Chris Wilson4ef69c72010-09-09 15:14:28 +010014520 encoder->base.possible_crtcs = encoder->crtc_mask;
14521 encoder->base.possible_clones =
Daniel Vetter66a92782012-07-12 20:08:18 +020014522 intel_encoder_clones(encoder);
Jesse Barnes79e53942008-11-07 14:24:08 -080014523 }
Chris Wilson47356eb2011-01-11 17:06:04 +000014524
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014525 intel_init_pch_refclk(dev_priv);
Daniel Vetter270b3042012-10-27 15:52:05 +020014526
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014527 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
Jesse Barnes79e53942008-11-07 14:24:08 -080014528}
14529
14530static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14531{
14532 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014533 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Jesse Barnes79e53942008-11-07 14:24:08 -080014534
Daniel Vetteref2d6332014-02-10 18:00:38 +010014535 drm_framebuffer_cleanup(fb);
Chris Wilson70001cd2017-02-16 09:46:21 +000014536
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014537 i915_gem_object_lock(obj);
14538 WARN_ON(!obj->framebuffer_references--);
14539 i915_gem_object_unlock(obj);
Chris Wilsondd689282017-03-01 15:41:28 +000014540
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014541 i915_gem_object_put(obj);
Chris Wilson70001cd2017-02-16 09:46:21 +000014542
Jesse Barnes79e53942008-11-07 14:24:08 -080014543 kfree(intel_fb);
14544}
14545
14546static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
Chris Wilson05394f32010-11-08 19:18:58 +000014547 struct drm_file *file,
Jesse Barnes79e53942008-11-07 14:24:08 -080014548 unsigned int *handle)
14549{
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014550 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Jesse Barnes79e53942008-11-07 14:24:08 -080014551
Chris Wilsoncc917ab2015-10-13 14:22:26 +010014552 if (obj->userptr.mm) {
14553 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14554 return -EINVAL;
14555 }
14556
Chris Wilson05394f32010-11-08 19:18:58 +000014557 return drm_gem_handle_create(file, &obj->base, handle);
Jesse Barnes79e53942008-11-07 14:24:08 -080014558}
14559
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014560static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14561 struct drm_file *file,
14562 unsigned flags, unsigned color,
14563 struct drm_clip_rect *clips,
14564 unsigned num_clips)
14565{
Chris Wilson5a97bcc2017-02-22 11:40:46 +000014566 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014567
Chris Wilson5a97bcc2017-02-22 11:40:46 +000014568 i915_gem_object_flush_if_display(obj);
Chris Wilsond59b21e2017-02-22 11:40:49 +000014569 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014570
14571 return 0;
14572}
14573
Jesse Barnes79e53942008-11-07 14:24:08 -080014574static const struct drm_framebuffer_funcs intel_fb_funcs = {
14575 .destroy = intel_user_framebuffer_destroy,
14576 .create_handle = intel_user_framebuffer_create_handle,
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014577 .dirty = intel_user_framebuffer_dirty,
Jesse Barnes79e53942008-11-07 14:24:08 -080014578};
14579
Damien Lespiaub3218032015-02-27 11:15:18 +000014580static
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014581u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
Dhinakaran Pandiyan4c8d3512018-10-26 12:53:42 -070014582 u32 pixel_format, u64 fb_modifier)
Damien Lespiaub3218032015-02-27 11:15:18 +000014583{
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014584 struct intel_crtc *crtc;
14585 struct intel_plane *plane;
Damien Lespiaub3218032015-02-27 11:15:18 +000014586
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014587 /*
14588 * We assume the primary plane for pipe A has
14589 * the highest stride limits of them all.
14590 */
14591 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14592 plane = to_intel_plane(crtc->base.primary);
Ville Syrjäläac484962016-01-20 21:05:26 +020014593
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014594 return plane->max_stride(plane, pixel_format, fb_modifier,
14595 DRM_MODE_ROTATE_0);
Damien Lespiaub3218032015-02-27 11:15:18 +000014596}
14597
Chris Wilson24dbf512017-02-15 10:59:18 +000014598static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14599 struct drm_i915_gem_object *obj,
14600 struct drm_mode_fb_cmd2 *mode_cmd)
Jesse Barnes79e53942008-11-07 14:24:08 -080014601{
Chris Wilson24dbf512017-02-15 10:59:18 +000014602 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014603 struct drm_framebuffer *fb = &intel_fb->base;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014604 u32 pitch_limit;
Chris Wilsondd689282017-03-01 15:41:28 +000014605 unsigned int tiling, stride;
Chris Wilson24dbf512017-02-15 10:59:18 +000014606 int ret = -EINVAL;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014607 int i;
Jesse Barnes79e53942008-11-07 14:24:08 -080014608
Chris Wilsondd689282017-03-01 15:41:28 +000014609 i915_gem_object_lock(obj);
14610 obj->framebuffer_references++;
14611 tiling = i915_gem_object_get_tiling(obj);
14612 stride = i915_gem_object_get_stride(obj);
14613 i915_gem_object_unlock(obj);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020014614
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014615 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014616 /*
14617 * If there's a fence, enforce that
14618 * the fb modifier and tiling mode match.
14619 */
14620 if (tiling != I915_TILING_NONE &&
14621 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014622 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
Chris Wilson24dbf512017-02-15 10:59:18 +000014623 goto err;
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014624 }
14625 } else {
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014626 if (tiling == I915_TILING_X) {
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014627 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014628 } else if (tiling == I915_TILING_Y) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014629 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
Chris Wilson24dbf512017-02-15 10:59:18 +000014630 goto err;
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014631 }
14632 }
14633
Ville Syrjälä17e8fd12018-10-29 20:34:53 +020014634 if (!drm_any_plane_has_format(&dev_priv->drm,
14635 mode_cmd->pixel_format,
14636 mode_cmd->modifier[0])) {
14637 struct drm_format_name_buf format_name;
14638
14639 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
14640 drm_get_format_name(mode_cmd->pixel_format,
14641 &format_name),
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014642 mode_cmd->modifier[0]);
Chris Wilson24dbf512017-02-15 10:59:18 +000014643 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014644 }
Chris Wilson57cd6502010-08-08 12:34:44 +010014645
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014646 /*
14647 * gen2/3 display engine uses the fence if present,
14648 * so the tiling mode must match the fb modifier exactly.
14649 */
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +000014650 if (INTEL_GEN(dev_priv) < 4 &&
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014651 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014652 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014653 goto err;
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014654 }
14655
Dhinakaran Pandiyan4c8d3512018-10-26 12:53:42 -070014656 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
14657 mode_cmd->modifier[0]);
Chris Wilsona35cdaa2013-06-25 17:26:45 +010014658 if (mode_cmd->pitches[0] > pitch_limit) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014659 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
Ben Widawsky2f075562017-03-24 14:29:48 -070014660 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014661 "tiled" : "linear",
14662 mode_cmd->pitches[0], pitch_limit);
Chris Wilson24dbf512017-02-15 10:59:18 +000014663 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014664 }
Ville Syrjälä5d7bd702012-10-31 17:50:18 +020014665
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014666 /*
14667 * If there's a fence, enforce that
14668 * the fb pitch and fence stride match.
14669 */
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014670 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14671 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14672 mode_cmd->pitches[0], stride);
Chris Wilson24dbf512017-02-15 10:59:18 +000014673 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014674 }
Ville Syrjälä5d7bd702012-10-31 17:50:18 +020014675
Ville Syrjälä90f9a332012-10-31 17:50:19 +020014676 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14677 if (mode_cmd->offsets[0] != 0)
Chris Wilson24dbf512017-02-15 10:59:18 +000014678 goto err;
Ville Syrjälä90f9a332012-10-31 17:50:19 +020014679
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014680 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +020014681
Chandra Kondurue44134f2018-05-12 03:03:15 +053014682 if (fb->format->format == DRM_FORMAT_NV12 &&
14683 (fb->width < SKL_MIN_YUV_420_SRC_W ||
14684 fb->height < SKL_MIN_YUV_420_SRC_H ||
14685 (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14686 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
Ville Syrjälä3b909462018-10-29 16:00:31 +020014687 goto err;
Chandra Kondurue44134f2018-05-12 03:03:15 +053014688 }
14689
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014690 for (i = 0; i < fb->format->num_planes; i++) {
14691 u32 stride_alignment;
14692
14693 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14694 DRM_DEBUG_KMS("bad plane %d handle\n", i);
Christophe JAILLET37875d62017-09-10 10:56:42 +020014695 goto err;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014696 }
14697
14698 stride_alignment = intel_fb_stride_alignment(fb, i);
14699
14700 /*
14701 * Display WA #0531: skl,bxt,kbl,glk
14702 *
14703 * Render decompression and plane width > 3840
14704 * combined with horizontal panning requires the
14705 * plane stride to be a multiple of 4. We'll just
14706 * require the entire fb to accommodate that to avoid
14707 * potential runtime errors at plane configuration time.
14708 */
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014709 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -070014710 is_ccs_modifier(fb->modifier))
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014711 stride_alignment *= 4;
14712
14713 if (fb->pitches[i] & (stride_alignment - 1)) {
14714 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14715 i, fb->pitches[i], stride_alignment);
14716 goto err;
14717 }
Ville Syrjäläd88c4af2017-03-07 21:42:06 +020014718
Daniel Stonea268bcd2018-05-18 15:30:08 +010014719 fb->obj[i] = &obj->base;
14720 }
Daniel Vetterc7d73f62012-12-13 23:38:38 +010014721
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014722 ret = intel_fill_fb_info(dev_priv, fb);
Ville Syrjälä6687c902015-09-15 13:16:41 +030014723 if (ret)
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014724 goto err;
Ville Syrjälä2d7a2152016-02-15 22:54:47 +020014725
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014726 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
Jesse Barnes79e53942008-11-07 14:24:08 -080014727 if (ret) {
14728 DRM_ERROR("framebuffer init failed %d\n", ret);
Chris Wilson24dbf512017-02-15 10:59:18 +000014729 goto err;
Jesse Barnes79e53942008-11-07 14:24:08 -080014730 }
14731
Jesse Barnes79e53942008-11-07 14:24:08 -080014732 return 0;
Chris Wilson24dbf512017-02-15 10:59:18 +000014733
14734err:
Chris Wilsondd689282017-03-01 15:41:28 +000014735 i915_gem_object_lock(obj);
14736 obj->framebuffer_references--;
14737 i915_gem_object_unlock(obj);
Chris Wilson24dbf512017-02-15 10:59:18 +000014738 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014739}
14740
Jesse Barnes79e53942008-11-07 14:24:08 -080014741static struct drm_framebuffer *
14742intel_user_framebuffer_create(struct drm_device *dev,
14743 struct drm_file *filp,
Ville Syrjälä1eb83452015-11-11 19:11:29 +020014744 const struct drm_mode_fb_cmd2 *user_mode_cmd)
Jesse Barnes79e53942008-11-07 14:24:08 -080014745{
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014746 struct drm_framebuffer *fb;
Chris Wilson05394f32010-11-08 19:18:58 +000014747 struct drm_i915_gem_object *obj;
Ville Syrjälä76dc3762015-11-11 19:11:28 +020014748 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
Jesse Barnes79e53942008-11-07 14:24:08 -080014749
Chris Wilson03ac0642016-07-20 13:31:51 +010014750 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14751 if (!obj)
Chris Wilsoncce13ff2010-08-08 13:36:38 +010014752 return ERR_PTR(-ENOENT);
Jesse Barnes79e53942008-11-07 14:24:08 -080014753
Chris Wilson24dbf512017-02-15 10:59:18 +000014754 fb = intel_framebuffer_create(obj, &mode_cmd);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014755 if (IS_ERR(fb))
Chris Wilsonf0cd5182016-10-28 13:58:43 +010014756 i915_gem_object_put(obj);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014757
14758 return fb;
Jesse Barnes79e53942008-11-07 14:24:08 -080014759}
14760
Chris Wilson778e23a2016-12-05 14:29:39 +000014761static void intel_atomic_state_free(struct drm_atomic_state *state)
14762{
14763 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14764
14765 drm_atomic_state_default_release(state);
14766
14767 i915_sw_fence_fini(&intel_state->commit_ready);
14768
14769 kfree(state);
14770}
14771
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014772static enum drm_mode_status
14773intel_mode_valid(struct drm_device *dev,
14774 const struct drm_display_mode *mode)
14775{
Ville Syrjäläad77c532018-06-15 20:44:05 +030014776 struct drm_i915_private *dev_priv = to_i915(dev);
14777 int hdisplay_max, htotal_max;
14778 int vdisplay_max, vtotal_max;
14779
Ville Syrjäläe4dd27a2018-05-24 15:54:03 +030014780 /*
14781 * Can't reject DBLSCAN here because Xorg ddxen can add piles
14782 * of DBLSCAN modes to the output's mode list when they detect
14783 * the scaling mode property on the connector. And they don't
14784 * ask the kernel to validate those modes in any way until
14785 * modeset time at which point the client gets a protocol error.
14786 * So in order to not upset those clients we silently ignore the
14787 * DBLSCAN flag on such connectors. For other connectors we will
14788 * reject modes with the DBLSCAN flag in encoder->compute_config().
14789 * And we always reject DBLSCAN modes in connector->mode_valid()
14790 * as we never want such modes on the connector's mode list.
14791 */
14792
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014793 if (mode->vscan > 1)
14794 return MODE_NO_VSCAN;
14795
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014796 if (mode->flags & DRM_MODE_FLAG_HSKEW)
14797 return MODE_H_ILLEGAL;
14798
14799 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14800 DRM_MODE_FLAG_NCSYNC |
14801 DRM_MODE_FLAG_PCSYNC))
14802 return MODE_HSYNC;
14803
14804 if (mode->flags & (DRM_MODE_FLAG_BCAST |
14805 DRM_MODE_FLAG_PIXMUX |
14806 DRM_MODE_FLAG_CLKDIV2))
14807 return MODE_BAD;
14808
Ville Syrjäläad77c532018-06-15 20:44:05 +030014809 if (INTEL_GEN(dev_priv) >= 9 ||
14810 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14811 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14812 vdisplay_max = 4096;
14813 htotal_max = 8192;
14814 vtotal_max = 8192;
14815 } else if (INTEL_GEN(dev_priv) >= 3) {
14816 hdisplay_max = 4096;
14817 vdisplay_max = 4096;
14818 htotal_max = 8192;
14819 vtotal_max = 8192;
14820 } else {
14821 hdisplay_max = 2048;
14822 vdisplay_max = 2048;
14823 htotal_max = 4096;
14824 vtotal_max = 4096;
14825 }
14826
14827 if (mode->hdisplay > hdisplay_max ||
14828 mode->hsync_start > htotal_max ||
14829 mode->hsync_end > htotal_max ||
14830 mode->htotal > htotal_max)
14831 return MODE_H_ILLEGAL;
14832
14833 if (mode->vdisplay > vdisplay_max ||
14834 mode->vsync_start > vtotal_max ||
14835 mode->vsync_end > vtotal_max ||
14836 mode->vtotal > vtotal_max)
14837 return MODE_V_ILLEGAL;
14838
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014839 return MODE_OK;
14840}
14841
Jesse Barnes79e53942008-11-07 14:24:08 -080014842static const struct drm_mode_config_funcs intel_mode_funcs = {
Jesse Barnes79e53942008-11-07 14:24:08 -080014843 .fb_create = intel_user_framebuffer_create,
Ville Syrjäläbbfb6ce2017-08-01 09:58:12 -070014844 .get_format_info = intel_get_format_info,
Daniel Vetter0632fef2013-10-08 17:44:49 +020014845 .output_poll_changed = intel_fbdev_output_poll_changed,
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014846 .mode_valid = intel_mode_valid,
Matt Roper5ee67f12015-01-21 16:35:44 -080014847 .atomic_check = intel_atomic_check,
14848 .atomic_commit = intel_atomic_commit,
Maarten Lankhorstde419ab2015-06-04 10:21:28 +020014849 .atomic_state_alloc = intel_atomic_state_alloc,
14850 .atomic_state_clear = intel_atomic_state_clear,
Chris Wilson778e23a2016-12-05 14:29:39 +000014851 .atomic_state_free = intel_atomic_state_free,
Jesse Barnes79e53942008-11-07 14:24:08 -080014852};
14853
Imre Deak88212942016-03-16 13:38:53 +020014854/**
14855 * intel_init_display_hooks - initialize the display modesetting hooks
14856 * @dev_priv: device private
14857 */
14858void intel_init_display_hooks(struct drm_i915_private *dev_priv)
Jesse Barnese70236a2009-09-21 10:42:27 -070014859{
Ville Syrjälä7ff89ca2017-02-07 20:33:05 +020014860 intel_init_cdclk_hooks(dev_priv);
14861
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +000014862 if (INTEL_GEN(dev_priv) >= 9) {
Damien Lespiaubc8d7df2015-01-20 12:51:51 +000014863 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014864 dev_priv->display.get_initial_plane_config =
14865 skylake_get_initial_plane_config;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +000014866 dev_priv->display.crtc_compute_clock =
14867 haswell_crtc_compute_clock;
14868 dev_priv->display.crtc_enable = haswell_crtc_enable;
14869 dev_priv->display.crtc_disable = haswell_crtc_disable;
Imre Deak88212942016-03-16 13:38:53 +020014870 } else if (HAS_DDI(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014871 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014872 dev_priv->display.get_initial_plane_config =
Ville Syrjälä81894b22017-11-17 21:19:13 +020014873 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira797d0252014-10-29 11:32:34 +020014874 dev_priv->display.crtc_compute_clock =
14875 haswell_crtc_compute_clock;
Paulo Zanoni4f771f12012-10-23 18:29:51 -020014876 dev_priv->display.crtc_enable = haswell_crtc_enable;
14877 dev_priv->display.crtc_disable = haswell_crtc_disable;
Imre Deak88212942016-03-16 13:38:53 +020014878 } else if (HAS_PCH_SPLIT(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014879 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014880 dev_priv->display.get_initial_plane_config =
Ville Syrjälä81894b22017-11-17 21:19:13 +020014881 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira3fb37702014-10-29 11:32:35 +020014882 dev_priv->display.crtc_compute_clock =
14883 ironlake_crtc_compute_clock;
Daniel Vetter76e5a892012-06-29 22:39:33 +020014884 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14885 dev_priv->display.crtc_disable = ironlake_crtc_disable;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +020014886 } else if (IS_CHERRYVIEW(dev_priv)) {
Jesse Barnes89b667f2013-04-18 14:51:36 -070014887 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014888 dev_priv->display.get_initial_plane_config =
14889 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +020014890 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14891 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14892 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14893 } else if (IS_VALLEYVIEW(dev_priv)) {
14894 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14895 dev_priv->display.get_initial_plane_config =
14896 i9xx_get_initial_plane_config;
14897 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
Jesse Barnes89b667f2013-04-18 14:51:36 -070014898 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14899 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +020014900 } else if (IS_G4X(dev_priv)) {
14901 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14902 dev_priv->display.get_initial_plane_config =
14903 i9xx_get_initial_plane_config;
14904 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14905 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14906 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +020014907 } else if (IS_PINEVIEW(dev_priv)) {
14908 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14909 dev_priv->display.get_initial_plane_config =
14910 i9xx_get_initial_plane_config;
14911 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14912 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14913 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014914 } else if (!IS_GEN(dev_priv, 2)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014915 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014916 dev_priv->display.get_initial_plane_config =
14917 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveirad6dfee72014-10-29 11:32:36 +020014918 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
Daniel Vetter76e5a892012-06-29 22:39:33 +020014919 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14920 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +020014921 } else {
14922 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14923 dev_priv->display.get_initial_plane_config =
14924 i9xx_get_initial_plane_config;
14925 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14926 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14927 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Eric Anholtf564048e2011-03-30 13:01:02 -070014928 }
Jesse Barnese70236a2009-09-21 10:42:27 -070014929
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014930 if (IS_GEN(dev_priv, 5)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014931 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080014932 } else if (IS_GEN(dev_priv, 6)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014933 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014934 } else if (IS_IVYBRIDGE(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014935 /* FIXME: detect B0+ stepping and use auto training */
14936 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014937 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014938 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
Ville Syrjälä445e7802016-05-11 22:44:42 +030014939 }
14940
Rodrigo Vivibd30ca22017-09-26 14:13:46 -070014941 if (INTEL_GEN(dev_priv) >= 9)
Lyude27082492016-08-24 07:48:10 +020014942 dev_priv->display.update_crtcs = skl_update_crtcs;
14943 else
14944 dev_priv->display.update_crtcs = intel_update_crtcs;
Jesse Barnese70236a2009-09-21 10:42:27 -070014945}
14946
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014947/* Disable the VGA plane that we never use */
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000014948static void i915_disable_vga(struct drm_i915_private *dev_priv)
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014949{
David Weinehall52a05c32016-08-22 13:32:44 +030014950 struct pci_dev *pdev = dev_priv->drm.pdev;
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014951 u8 sr1;
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014952 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014953
Ville Syrjälä2b37c612014-01-22 21:32:38 +020014954 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
David Weinehall52a05c32016-08-22 13:32:44 +030014955 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
Jesse Barnes3fdcf432012-04-06 11:46:27 -070014956 outb(SR01, VGA_SR_INDEX);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014957 sr1 = inb(VGA_SR_DATA);
14958 outb(sr1 | 1<<5, VGA_SR_DATA);
David Weinehall52a05c32016-08-22 13:32:44 +030014959 vga_put(pdev, VGA_RSRC_LEGACY_IO);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014960 udelay(300);
14961
Ville Syrjälä01f5a622014-12-16 18:38:37 +020014962 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014963 POSTING_READ(vga_reg);
14964}
14965
Daniel Vetterf8175862012-04-10 15:50:11 +020014966void intel_modeset_init_hw(struct drm_device *dev)
14967{
Chris Wilsonfac5e232016-07-04 11:34:36 +010014968 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst1a617b72015-12-03 14:31:06 +010014969
Ville Syrjälä4c75b942016-10-31 22:37:12 +020014970 intel_update_cdclk(dev_priv);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +030014971 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020014972 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
Daniel Vetterf8175862012-04-10 15:50:11 +020014973}
14974
Matt Roperd93c0372015-12-03 11:37:41 -080014975/*
14976 * Calculate what we think the watermarks should be for the state we've read
14977 * out of the hardware and then immediately program those watermarks so that
14978 * we ensure the hardware settings match our internal state.
14979 *
14980 * We can calculate what we think WM's should be by creating a duplicate of the
14981 * current state (which was constructed during hardware readout) and running it
14982 * through the atomic check code to calculate new watermark values in the
14983 * state object.
14984 */
14985static void sanitize_watermarks(struct drm_device *dev)
14986{
14987 struct drm_i915_private *dev_priv = to_i915(dev);
14988 struct drm_atomic_state *state;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014989 struct intel_atomic_state *intel_state;
Matt Roperd93c0372015-12-03 11:37:41 -080014990 struct drm_crtc *crtc;
14991 struct drm_crtc_state *cstate;
14992 struct drm_modeset_acquire_ctx ctx;
14993 int ret;
14994 int i;
14995
14996 /* Only supported on platforms that use atomic watermark design */
Matt Ropered4a6a72016-02-23 17:20:13 -080014997 if (!dev_priv->display.optimize_watermarks)
Matt Roperd93c0372015-12-03 11:37:41 -080014998 return;
14999
15000 /*
15001 * We need to hold connection_mutex before calling duplicate_state so
15002 * that the connector loop is protected.
15003 */
15004 drm_modeset_acquire_init(&ctx, 0);
15005retry:
Matt Roper0cd12622016-01-12 07:13:37 -080015006 ret = drm_modeset_lock_all_ctx(dev, &ctx);
Matt Roperd93c0372015-12-03 11:37:41 -080015007 if (ret == -EDEADLK) {
15008 drm_modeset_backoff(&ctx);
15009 goto retry;
15010 } else if (WARN_ON(ret)) {
Matt Roper0cd12622016-01-12 07:13:37 -080015011 goto fail;
Matt Roperd93c0372015-12-03 11:37:41 -080015012 }
15013
15014 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15015 if (WARN_ON(IS_ERR(state)))
Matt Roper0cd12622016-01-12 07:13:37 -080015016 goto fail;
Matt Roperd93c0372015-12-03 11:37:41 -080015017
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010015018 intel_state = to_intel_atomic_state(state);
15019
Matt Ropered4a6a72016-02-23 17:20:13 -080015020 /*
15021 * Hardware readout is the only time we don't want to calculate
15022 * intermediate watermarks (since we don't trust the current
15023 * watermarks).
15024 */
Ville Syrjälä602ae832017-03-02 19:15:02 +020015025 if (!HAS_GMCH_DISPLAY(dev_priv))
15026 intel_state->skip_intermediate_wm = true;
Matt Ropered4a6a72016-02-23 17:20:13 -080015027
Matt Roperd93c0372015-12-03 11:37:41 -080015028 ret = intel_atomic_check(dev, state);
15029 if (ret) {
15030 /*
15031 * If we fail here, it means that the hardware appears to be
15032 * programmed in a way that shouldn't be possible, given our
15033 * understanding of watermark requirements. This might mean a
15034 * mistake in the hardware readout code or a mistake in the
15035 * watermark calculations for a given platform. Raise a WARN
15036 * so that this is noticeable.
15037 *
15038 * If this actually happens, we'll have to just leave the
15039 * BIOS-programmed watermarks untouched and hope for the best.
15040 */
15041 WARN(true, "Could not determine valid watermarks for inherited state\n");
Arnd Bergmannb9a1b712016-10-18 17:16:23 +020015042 goto put_state;
Matt Roperd93c0372015-12-03 11:37:41 -080015043 }
15044
15045 /* Write calculated watermark values back */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010015046 for_each_new_crtc_in_state(state, crtc, cstate, i) {
Matt Roperd93c0372015-12-03 11:37:41 -080015047 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15048
Matt Ropered4a6a72016-02-23 17:20:13 -080015049 cs->wm.need_postvbl_update = true;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010015050 dev_priv->display.optimize_watermarks(intel_state, cs);
Maarten Lankhorst556fe362017-11-10 12:34:53 +010015051
15052 to_intel_crtc_state(crtc->state)->wm = cs->wm;
Matt Roperd93c0372015-12-03 11:37:41 -080015053 }
15054
Arnd Bergmannb9a1b712016-10-18 17:16:23 +020015055put_state:
Chris Wilson08536952016-10-14 13:18:18 +010015056 drm_atomic_state_put(state);
Matt Roper0cd12622016-01-12 07:13:37 -080015057fail:
Matt Roperd93c0372015-12-03 11:37:41 -080015058 drm_modeset_drop_locks(&ctx);
15059 drm_modeset_acquire_fini(&ctx);
15060}
15061
Chris Wilson58ecd9d2017-11-05 13:49:05 +000015062static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15063{
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015064 if (IS_GEN(dev_priv, 5)) {
Chris Wilson58ecd9d2017-11-05 13:49:05 +000015065 u32 fdi_pll_clk =
15066 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15067
15068 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015069 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
Chris Wilson58ecd9d2017-11-05 13:49:05 +000015070 dev_priv->fdi_pll_freq = 270000;
15071 } else {
15072 return;
15073 }
15074
15075 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15076}
15077
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015078static int intel_initial_commit(struct drm_device *dev)
15079{
15080 struct drm_atomic_state *state = NULL;
15081 struct drm_modeset_acquire_ctx ctx;
15082 struct drm_crtc *crtc;
15083 struct drm_crtc_state *crtc_state;
15084 int ret = 0;
15085
15086 state = drm_atomic_state_alloc(dev);
15087 if (!state)
15088 return -ENOMEM;
15089
15090 drm_modeset_acquire_init(&ctx, 0);
15091
15092retry:
15093 state->acquire_ctx = &ctx;
15094
15095 drm_for_each_crtc(crtc, dev) {
15096 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15097 if (IS_ERR(crtc_state)) {
15098 ret = PTR_ERR(crtc_state);
15099 goto out;
15100 }
15101
15102 if (crtc_state->active) {
15103 ret = drm_atomic_add_affected_planes(state, crtc);
15104 if (ret)
15105 goto out;
Ville Syrjäläfa6af5142018-11-20 15:54:49 +020015106
15107 /*
15108 * FIXME hack to force a LUT update to avoid the
15109 * plane update forcing the pipe gamma on without
15110 * having a proper LUT loaded. Remove once we
15111 * have readout for pipe gamma enable.
15112 */
15113 crtc_state->color_mgmt_changed = true;
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015114 }
15115 }
15116
15117 ret = drm_atomic_commit(state);
15118
15119out:
15120 if (ret == -EDEADLK) {
15121 drm_atomic_state_clear(state);
15122 drm_modeset_backoff(&ctx);
15123 goto retry;
15124 }
15125
15126 drm_atomic_state_put(state);
15127
15128 drm_modeset_drop_locks(&ctx);
15129 drm_modeset_acquire_fini(&ctx);
15130
15131 return ret;
15132}
15133
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015134int intel_modeset_init(struct drm_device *dev)
Jesse Barnes79e53942008-11-07 14:24:08 -080015135{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030015136 struct drm_i915_private *dev_priv = to_i915(dev);
15137 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Damien Lespiau8cc87b72014-03-03 17:31:44 +000015138 enum pipe pipe;
Jesse Barnes46f297f2014-03-07 08:57:48 -080015139 struct intel_crtc *crtc;
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015140 int ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080015141
Ville Syrjälä757fffc2017-11-13 15:36:22 +020015142 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15143
Jesse Barnes79e53942008-11-07 14:24:08 -080015144 drm_mode_config_init(dev);
15145
15146 dev->mode_config.min_width = 0;
15147 dev->mode_config.min_height = 0;
15148
Dave Airlie019d96c2011-09-29 16:20:42 +010015149 dev->mode_config.preferred_depth = 24;
15150 dev->mode_config.prefer_shadow = 1;
15151
Tvrtko Ursulin25bab382015-02-10 17:16:16 +000015152 dev->mode_config.allow_fb_modifiers = true;
15153
Laurent Pincharte6ecefa2012-05-17 13:27:23 +020015154 dev->mode_config.funcs = &intel_mode_funcs;
Jesse Barnes79e53942008-11-07 14:24:08 -080015155
Andrea Arcangeli400c19d2017-04-07 01:23:45 +020015156 init_llist_head(&dev_priv->atomic_helper.free_list);
Chris Wilsoneb955ee2017-01-23 21:29:39 +000015157 INIT_WORK(&dev_priv->atomic_helper.free_work,
Chris Wilsonba318c62017-02-02 20:47:41 +000015158 intel_atomic_helper_free_state_worker);
Chris Wilsoneb955ee2017-01-23 21:29:39 +000015159
Jani Nikula27a981b2018-10-17 12:35:39 +030015160 intel_init_quirks(dev_priv);
Jesse Barnesb690e962010-07-19 13:53:12 -070015161
José Roberto de Souzaacde44b2018-11-07 16:16:45 -080015162 intel_fbc_init(dev_priv);
15163
Ville Syrjälä62d75df2016-10-31 22:37:25 +020015164 intel_init_pm(dev_priv);
Eugeni Dodonov1fa61102012-04-18 15:29:26 -030015165
Lukas Wunner69f92f62015-07-15 13:57:35 +020015166 /*
15167 * There may be no VBT; and if the BIOS enabled SSC we can
15168 * just keep using it to avoid unnecessary flicker. Whereas if the
15169 * BIOS isn't using it, don't assume it will work even if the VBT
15170 * indicates as much.
15171 */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010015172 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
Lukas Wunner69f92f62015-07-15 13:57:35 +020015173 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15174 DREF_SSC1_ENABLE);
15175
15176 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15177 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15178 bios_lvds_use_ssc ? "en" : "dis",
15179 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15180 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15181 }
15182 }
15183
Ville Syrjäläad77c532018-06-15 20:44:05 +030015184 /* maximum framebuffer dimensions */
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015185 if (IS_GEN(dev_priv, 2)) {
Chris Wilsona6c45cf2010-09-17 00:32:17 +010015186 dev->mode_config.max_width = 2048;
15187 dev->mode_config.max_height = 2048;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015188 } else if (IS_GEN(dev_priv, 3)) {
Keith Packard5e4d6fa2009-07-12 23:53:17 -070015189 dev->mode_config.max_width = 4096;
15190 dev->mode_config.max_height = 4096;
Jesse Barnes79e53942008-11-07 14:24:08 -080015191 } else {
Chris Wilsona6c45cf2010-09-17 00:32:17 +010015192 dev->mode_config.max_width = 8192;
15193 dev->mode_config.max_height = 8192;
Jesse Barnes79e53942008-11-07 14:24:08 -080015194 }
Damien Lespiau068be562014-03-28 14:17:49 +000015195
Jani Nikula2a307c22016-11-30 17:43:04 +020015196 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15197 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
Ville Syrjälädc41c152014-08-13 11:57:05 +030015198 dev->mode_config.cursor_height = 1023;
Lucas De Marchicf819ef2018-12-12 10:10:43 -080015199 } else if (IS_GEN(dev_priv, 2)) {
Ville Syrjälä98fac1d2018-06-15 20:44:04 +030015200 dev->mode_config.cursor_width = 64;
15201 dev->mode_config.cursor_height = 64;
Damien Lespiau068be562014-03-28 14:17:49 +000015202 } else {
Ville Syrjälä98fac1d2018-06-15 20:44:04 +030015203 dev->mode_config.cursor_width = 256;
15204 dev->mode_config.cursor_height = 256;
Damien Lespiau068be562014-03-28 14:17:49 +000015205 }
15206
Matthew Auld73ebd502017-12-11 15:18:20 +000015207 dev->mode_config.fb_base = ggtt->gmadr.start;
Jesse Barnes79e53942008-11-07 14:24:08 -080015208
Zhao Yakui28c97732009-10-09 11:39:41 +080015209 DRM_DEBUG_KMS("%d display pipe%s available.\n",
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000015210 INTEL_INFO(dev_priv)->num_pipes,
15211 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
Jesse Barnes79e53942008-11-07 14:24:08 -080015212
Damien Lespiau055e3932014-08-18 13:49:10 +010015213 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020015214 ret = intel_crtc_init(dev_priv, pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015215 if (ret) {
15216 drm_mode_config_cleanup(dev);
15217 return ret;
15218 }
Jesse Barnes79e53942008-11-07 14:24:08 -080015219 }
15220
Daniel Vettere72f9fb2013-06-05 13:34:06 +020015221 intel_shared_dpll_init(dev);
Chris Wilson58ecd9d2017-11-05 13:49:05 +000015222 intel_update_fdi_pll_freq(dev_priv);
Jesse Barnesee7b9f92012-04-20 17:11:53 +010015223
Ville Syrjälä5be6e332017-02-20 16:04:43 +020015224 intel_update_czclk(dev_priv);
15225 intel_modeset_init_hw(dev);
15226
Ville Syrjäläb2045352016-05-13 23:41:27 +030015227 if (dev_priv->max_cdclk_freq == 0)
Ville Syrjälä4c75b942016-10-31 22:37:12 +020015228 intel_update_max_cdclk(dev_priv);
Ville Syrjäläb2045352016-05-13 23:41:27 +030015229
Jesse Barnes9cce37f2010-08-13 15:11:26 -070015230 /* Just disable it once at startup */
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015231 i915_disable_vga(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020015232 intel_setup_outputs(dev_priv);
Chris Wilson11be49e2012-11-15 11:32:20 +000015233
Daniel Vetter6e9f7982014-05-29 23:54:47 +020015234 drm_modeset_lock_all(dev);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015235 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
Daniel Vetter6e9f7982014-05-29 23:54:47 +020015236 drm_modeset_unlock_all(dev);
Jesse Barnes46f297f2014-03-07 08:57:48 -080015237
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015238 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorsteeebeac2015-07-14 12:33:29 +020015239 struct intel_initial_plane_config plane_config = {};
15240
Jesse Barnes46f297f2014-03-07 08:57:48 -080015241 if (!crtc->active)
15242 continue;
15243
Jesse Barnes46f297f2014-03-07 08:57:48 -080015244 /*
Jesse Barnes46f297f2014-03-07 08:57:48 -080015245 * Note that reserving the BIOS fb up front prevents us
15246 * from stuffing other stolen allocations like the ring
15247 * on top. This prevents some ugliness at boot time, and
15248 * can even allow for smooth boot transitions if the BIOS
15249 * fb is large enough for the active pipe configuration.
15250 */
Maarten Lankhorsteeebeac2015-07-14 12:33:29 +020015251 dev_priv->display.get_initial_plane_config(crtc,
15252 &plane_config);
15253
15254 /*
15255 * If the fb is shared between multiple heads, we'll
15256 * just get the first one.
15257 */
15258 intel_find_initial_plane_obj(crtc, &plane_config);
Jesse Barnes46f297f2014-03-07 08:57:48 -080015259 }
Matt Roperd93c0372015-12-03 11:37:41 -080015260
15261 /*
15262 * Make sure hardware watermarks really match the state we read out.
15263 * Note that we need to do this after reconstructing the BIOS fb's
15264 * since the watermark calculation done here will use pstate->fb.
15265 */
Ville Syrjälä602ae832017-03-02 19:15:02 +020015266 if (!HAS_GMCH_DISPLAY(dev_priv))
15267 sanitize_watermarks(dev);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015268
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015269 /*
15270 * Force all active planes to recompute their states. So that on
15271 * mode_setcrtc after probe, all the intel_plane_state variables
15272 * are already calculated and there is no assert_plane warnings
15273 * during bootup.
15274 */
15275 ret = intel_initial_commit(dev);
15276 if (ret)
15277 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15278
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015279 return 0;
Chris Wilson2c7111d2011-03-29 10:40:27 +010015280}
Jesse Barnesd5bb0812011-01-05 12:01:26 -080015281
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015282void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15283{
Ville Syrjäläd5fb43c2017-11-29 17:37:31 +020015284 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015285 /* 640x480@60Hz, ~25175 kHz */
15286 struct dpll clock = {
15287 .m1 = 18,
15288 .m2 = 7,
15289 .p1 = 13,
15290 .p2 = 4,
15291 .n = 2,
15292 };
15293 u32 dpll, fp;
15294 int i;
15295
15296 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15297
15298 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15299 pipe_name(pipe), clock.vco, clock.dot);
15300
15301 fp = i9xx_dpll_compute_fp(&clock);
15302 dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15303 DPLL_VGA_MODE_DIS |
15304 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15305 PLL_P2_DIVIDE_BY_4 |
15306 PLL_REF_INPUT_DREFCLK |
15307 DPLL_VCO_ENABLE;
15308
15309 I915_WRITE(FP0(pipe), fp);
15310 I915_WRITE(FP1(pipe), fp);
15311
15312 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15313 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15314 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15315 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15316 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15317 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15318 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15319
15320 /*
15321 * Apparently we need to have VGA mode enabled prior to changing
15322 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15323 * dividers, even though the register value does change.
15324 */
15325 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15326 I915_WRITE(DPLL(pipe), dpll);
15327
15328 /* Wait for the clocks to stabilize. */
15329 POSTING_READ(DPLL(pipe));
15330 udelay(150);
15331
15332 /* The pixel multiplier can only be updated once the
15333 * DPLL is enabled and the clocks are stable.
15334 *
15335 * So write it again.
15336 */
15337 I915_WRITE(DPLL(pipe), dpll);
15338
15339 /* We do this three times for luck */
15340 for (i = 0; i < 3 ; i++) {
15341 I915_WRITE(DPLL(pipe), dpll);
15342 POSTING_READ(DPLL(pipe));
15343 udelay(150); /* wait for warmup */
15344 }
15345
15346 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15347 POSTING_READ(PIPECONF(pipe));
Ville Syrjäläd5fb43c2017-11-29 17:37:31 +020015348
15349 intel_wait_for_pipe_scanline_moving(crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015350}
15351
15352void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15353{
Ville Syrjälä8fedd642017-11-29 17:37:30 +020015354 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15355
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015356 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15357 pipe_name(pipe));
15358
Ville Syrjälä5816d9c2017-11-29 14:54:11 +020015359 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15360 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15361 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020015362 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15363 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015364
15365 I915_WRITE(PIPECONF(pipe), 0);
15366 POSTING_READ(PIPECONF(pipe));
15367
Ville Syrjälä8fedd642017-11-29 17:37:30 +020015368 intel_wait_for_pipe_scanline_stopped(crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015369
15370 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15371 POSTING_READ(DPLL(pipe));
15372}
15373
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015374static void
15375intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15376{
15377 struct intel_crtc *crtc;
Daniel Vetterfa555832012-10-10 23:14:00 +020015378
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015379 if (INTEL_GEN(dev_priv) >= 4)
15380 return;
Daniel Vetterfa555832012-10-10 23:14:00 +020015381
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015382 for_each_intel_crtc(&dev_priv->drm, crtc) {
15383 struct intel_plane *plane =
15384 to_intel_plane(crtc->base.primary);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015385 struct intel_crtc *plane_crtc;
15386 enum pipe pipe;
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015387
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015388 if (!plane->get_hw_state(plane, &pipe))
15389 continue;
15390
15391 if (pipe == crtc->pipe)
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015392 continue;
15393
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +030015394 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15395 plane->base.base.id, plane->base.name);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015396
15397 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15398 intel_plane_disable_noatomic(plane_crtc, plane);
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015399 }
Daniel Vetterfa555832012-10-10 23:14:00 +020015400}
15401
Ville Syrjälä02e93c32015-08-26 19:39:19 +030015402static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15403{
15404 struct drm_device *dev = crtc->base.dev;
15405 struct intel_encoder *encoder;
15406
15407 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15408 return true;
15409
15410 return false;
15411}
15412
Maarten Lankhorst496b0fc2016-08-23 16:18:07 +020015413static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15414{
15415 struct drm_device *dev = encoder->base.dev;
15416 struct intel_connector *connector;
15417
15418 for_each_connector_on_encoder(dev, &encoder->base, connector)
15419 return connector;
15420
15421 return NULL;
15422}
15423
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015424static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015425 enum pipe pch_transcoder)
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015426{
15427 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015428 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015429}
15430
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015431static void intel_sanitize_crtc(struct intel_crtc *crtc,
15432 struct drm_modeset_acquire_ctx *ctx)
Daniel Vetter24929352012-07-02 20:28:59 +020015433{
15434 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010015435 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015436 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15437 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Daniel Vetter24929352012-07-02 20:28:59 +020015438
Daniel Vetter24929352012-07-02 20:28:59 +020015439 /* Clear any frame start delays used for debugging left by the BIOS */
Ville Syrjälä738a8142017-11-15 22:04:42 +020015440 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
Jani Nikula4d1de972016-03-18 17:05:42 +020015441 i915_reg_t reg = PIPECONF(cpu_transcoder);
15442
15443 I915_WRITE(reg,
15444 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15445 }
Daniel Vetter24929352012-07-02 20:28:59 +020015446
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015447 if (crtc_state->base.active) {
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015448 struct intel_plane *plane;
15449
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015450 /* Disable everything but the primary plane */
15451 for_each_intel_plane_on_crtc(dev, crtc, plane) {
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015452 const struct intel_plane_state *plane_state =
15453 to_intel_plane_state(plane->base.state);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015454
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015455 if (plane_state->base.visible &&
15456 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15457 intel_plane_disable_noatomic(crtc, plane);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015458 }
Daniel Vetter96256042015-02-13 21:03:42 +010015459 }
Ville Syrjäläd3eaf882014-05-20 17:20:05 +030015460
Daniel Vetter24929352012-07-02 20:28:59 +020015461 /* Adjust the state of the output pipe according to whether we
15462 * have active connectors/encoders. */
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015463 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
Ville Syrjäläda1d0e22017-06-01 17:36:14 +030015464 intel_crtc_disable_noatomic(&crtc->base, ctx);
Daniel Vetter24929352012-07-02 20:28:59 +020015465
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015466 if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
Daniel Vetter4cc31482014-03-24 00:01:41 +010015467 /*
15468 * We start out with underrun reporting disabled to avoid races.
15469 * For correct bookkeeping mark this on active crtcs.
15470 *
Daniel Vetterc5ab3bc2014-05-14 15:40:34 +020015471 * Also on gmch platforms we dont have any hardware bits to
15472 * disable the underrun reporting. Which means we need to start
15473 * out with underrun reporting disabled also on inactive pipes,
15474 * since otherwise we'll complain about the garbage we read when
15475 * e.g. coming up after runtime pm.
15476 *
Daniel Vetter4cc31482014-03-24 00:01:41 +010015477 * No protection against concurrent access is required - at
15478 * worst a fifo underrun happens which also sets this to false.
15479 */
15480 crtc->cpu_fifo_underrun_disabled = true;
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015481 /*
15482 * We track the PCH trancoder underrun reporting state
15483 * within the crtc. With crtc for pipe A housing the underrun
15484 * reporting state for PCH transcoder A, crtc for pipe B housing
15485 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15486 * and marking underrun reporting as disabled for the non-existing
15487 * PCH transcoders B and C would prevent enabling the south
15488 * error interrupt (see cpt_can_enable_serr_int()).
15489 */
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015490 if (has_pch_trancoder(dev_priv, crtc->pipe))
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015491 crtc->pch_fifo_underrun_disabled = true;
Daniel Vetter4cc31482014-03-24 00:01:41 +010015492 }
Daniel Vetter24929352012-07-02 20:28:59 +020015493}
15494
15495static void intel_sanitize_encoder(struct intel_encoder *encoder)
15496{
Imre Deak70332ac2018-11-01 16:04:27 +020015497 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015498 struct intel_connector *connector;
Daniel Vetter24929352012-07-02 20:28:59 +020015499
15500 /* We need to check both for a crtc link (meaning that the
15501 * encoder is active and trying to read from a pipe) and the
15502 * pipe itself being active. */
15503 bool has_active_crtc = encoder->base.crtc &&
15504 to_intel_crtc(encoder->base.crtc)->active;
15505
Maarten Lankhorst496b0fc2016-08-23 16:18:07 +020015506 connector = intel_encoder_find_connector(encoder);
15507 if (connector && !has_active_crtc) {
Daniel Vetter24929352012-07-02 20:28:59 +020015508 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15509 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030015510 encoder->base.name);
Daniel Vetter24929352012-07-02 20:28:59 +020015511
15512 /* Connector is active, but has no active pipe. This is
15513 * fallout from our resume register restoring. Disable
15514 * the encoder manually again. */
15515 if (encoder->base.crtc) {
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015516 struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15517
Daniel Vetter24929352012-07-02 20:28:59 +020015518 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15519 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030015520 encoder->base.name);
Jani Nikulac84c6fe2018-10-16 15:41:34 +030015521 if (encoder->disable)
15522 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
Ville Syrjäläa62d1492014-06-28 02:04:01 +030015523 if (encoder->post_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015524 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
Daniel Vetter24929352012-07-02 20:28:59 +020015525 }
Egbert Eich7f1950f2014-04-25 10:56:22 +020015526 encoder->base.crtc = NULL;
Daniel Vetter24929352012-07-02 20:28:59 +020015527
15528 /* Inconsistent output/port/pipe state happens presumably due to
15529 * a bug in one of the get_hw_state functions. Or someplace else
15530 * in our code, like the register restore mess on resume. Clamp
15531 * things to off as a safer default. */
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015532
15533 connector->base.dpms = DRM_MODE_DPMS_OFF;
15534 connector->base.encoder = NULL;
Daniel Vetter24929352012-07-02 20:28:59 +020015535 }
Maarten Lankhorstd6cae4a2018-05-16 10:50:38 +020015536
15537 /* notify opregion of the sanitized encoder state */
15538 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
Imre Deak70332ac2018-11-01 16:04:27 +020015539
15540 if (INTEL_GEN(dev_priv) >= 11)
15541 icl_sanitize_encoder_pll_mapping(encoder);
Daniel Vetter24929352012-07-02 20:28:59 +020015542}
15543
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015544void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015545{
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010015546 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015547
Imre Deak04098752014-02-18 00:02:16 +020015548 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15549 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015550 i915_disable_vga(dev_priv);
Imre Deak04098752014-02-18 00:02:16 +020015551 }
15552}
15553
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015554void i915_redisable_vga(struct drm_i915_private *dev_priv)
Imre Deak04098752014-02-18 00:02:16 +020015555{
Chris Wilson0e6e0be2019-01-14 14:21:24 +000015556 intel_wakeref_t wakeref;
15557
15558 /*
15559 * This function can be called both from intel_modeset_setup_hw_state or
Paulo Zanoni8dc8a272013-08-02 16:22:24 -030015560 * at a very early point in our resume sequence, where the power well
15561 * structures are not yet restored. Since this function is at a very
15562 * paranoid "someone might have enabled VGA while we were not looking"
15563 * level, just check if the power well is enabled instead of trying to
15564 * follow the "don't touch the power well if we don't need it" policy
Chris Wilson0e6e0be2019-01-14 14:21:24 +000015565 * the rest of the driver uses.
15566 */
15567 wakeref = intel_display_power_get_if_enabled(dev_priv,
15568 POWER_DOMAIN_VGA);
15569 if (!wakeref)
Paulo Zanoni8dc8a272013-08-02 16:22:24 -030015570 return;
15571
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015572 i915_redisable_vga_power_on(dev_priv);
Imre Deak6392f842016-02-12 18:55:13 +020015573
Chris Wilson0e6e0be2019-01-14 14:21:24 +000015574 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015575}
15576
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015577/* FIXME read out full plane state for all planes */
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015578static void readout_plane_state(struct drm_i915_private *dev_priv)
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +020015579{
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015580 struct intel_plane *plane;
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015581 struct intel_crtc *crtc;
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +020015582
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015583 for_each_intel_plane(&dev_priv->drm, plane) {
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015584 struct intel_plane_state *plane_state =
15585 to_intel_plane_state(plane->base.state);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015586 struct intel_crtc_state *crtc_state;
15587 enum pipe pipe = PIPE_A;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020015588 bool visible;
15589
15590 visible = plane->get_hw_state(plane, &pipe);
Maarten Lankhorstb26d3ea2015-09-23 16:11:41 +020015591
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015592 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15593 crtc_state = to_intel_crtc_state(crtc->base.state);
15594
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015595 intel_set_plane_visible(crtc_state, plane_state, visible);
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +030015596
15597 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15598 plane->base.base.id, plane->base.name,
15599 enableddisabled(visible), pipe_name(pipe));
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015600 }
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015601
15602 for_each_intel_crtc(&dev_priv->drm, crtc) {
15603 struct intel_crtc_state *crtc_state =
15604 to_intel_crtc_state(crtc->base.state);
15605
15606 fixup_active_planes(crtc_state);
15607 }
Ville Syrjälä98ec7732014-04-30 17:43:01 +030015608}
15609
Daniel Vetter30e984d2013-06-05 13:34:17 +020015610static void intel_modeset_readout_hw_state(struct drm_device *dev)
Daniel Vetter24929352012-07-02 20:28:59 +020015611{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015612 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015613 enum pipe pipe;
Daniel Vetter24929352012-07-02 20:28:59 +020015614 struct intel_crtc *crtc;
15615 struct intel_encoder *encoder;
15616 struct intel_connector *connector;
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015617 struct drm_connector_list_iter conn_iter;
Daniel Vetter53589012013-06-05 13:34:16 +020015618 int i;
Daniel Vetter24929352012-07-02 20:28:59 +020015619
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015620 dev_priv->active_crtcs = 0;
15621
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015622 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015623 struct intel_crtc_state *crtc_state =
15624 to_intel_crtc_state(crtc->base.state);
Daniel Vetter3b117c82013-04-17 20:15:07 +020015625
Daniel Vetterec2dc6a2016-05-09 16:34:09 +020015626 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015627 memset(crtc_state, 0, sizeof(*crtc_state));
15628 crtc_state->base.crtc = &crtc->base;
Daniel Vetter24929352012-07-02 20:28:59 +020015629
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015630 crtc_state->base.active = crtc_state->base.enable =
15631 dev_priv->display.get_pipe_config(crtc, crtc_state);
15632
15633 crtc->base.enabled = crtc_state->base.enable;
15634 crtc->active = crtc_state->base.active;
15635
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015636 if (crtc_state->base.active)
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015637 dev_priv->active_crtcs |= 1 << crtc->pipe;
15638
Ville Syrjälä78108b72016-05-27 20:59:19 +030015639 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15640 crtc->base.base.id, crtc->base.name,
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015641 enableddisabled(crtc_state->base.active));
Daniel Vetter24929352012-07-02 20:28:59 +020015642 }
15643
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015644 readout_plane_state(dev_priv);
15645
Daniel Vetter53589012013-06-05 13:34:16 +020015646 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15647 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15648
Lucas De Marchiee1398b2018-03-20 15:06:33 -070015649 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15650 &pll->state.hw_state);
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015651 pll->state.crtc_mask = 0;
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015652 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015653 struct intel_crtc_state *crtc_state =
15654 to_intel_crtc_state(crtc->base.state);
15655
15656 if (crtc_state->base.active &&
15657 crtc_state->shared_dpll == pll)
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015658 pll->state.crtc_mask |= 1 << crtc->pipe;
Daniel Vetter53589012013-06-05 13:34:16 +020015659 }
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015660 pll->active_mask = pll->state.crtc_mask;
Daniel Vetter53589012013-06-05 13:34:16 +020015661
Ander Conselvan de Oliveira1e6f2dd2014-10-29 11:32:31 +020015662 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
Lucas De Marchi72f775f2018-03-20 15:06:34 -070015663 pll->info->name, pll->state.crtc_mask, pll->on);
Daniel Vetter53589012013-06-05 13:34:16 +020015664 }
15665
Damien Lespiaub2784e12014-08-05 11:29:37 +010015666 for_each_intel_encoder(dev, encoder) {
Daniel Vetter24929352012-07-02 20:28:59 +020015667 pipe = 0;
15668
15669 if (encoder->get_hw_state(encoder, &pipe)) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015670 struct intel_crtc_state *crtc_state;
15671
Ville Syrjälä98187832016-10-31 22:37:10 +020015672 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015673 crtc_state = to_intel_crtc_state(crtc->base.state);
Ville Syrjäläe2af48c2016-10-31 22:37:05 +020015674
Jesse Barnes045ac3b2013-05-14 17:08:26 -070015675 encoder->base.crtc = &crtc->base;
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015676 encoder->get_config(encoder, crtc_state);
Daniel Vetter24929352012-07-02 20:28:59 +020015677 } else {
15678 encoder->base.crtc = NULL;
15679 }
15680
Damien Lespiau6f2bcce2013-10-16 12:29:54 +010015681 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000015682 encoder->base.base.id, encoder->base.name,
15683 enableddisabled(encoder->base.crtc),
Damien Lespiau6f2bcce2013-10-16 12:29:54 +010015684 pipe_name(pipe));
Daniel Vetter24929352012-07-02 20:28:59 +020015685 }
15686
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015687 drm_connector_list_iter_begin(dev, &conn_iter);
15688 for_each_intel_connector_iter(connector, &conn_iter) {
Daniel Vetter24929352012-07-02 20:28:59 +020015689 if (connector->get_hw_state(connector)) {
15690 connector->base.dpms = DRM_MODE_DPMS_ON;
Maarten Lankhorst2aa974c2016-01-06 14:53:25 +010015691
15692 encoder = connector->encoder;
15693 connector->base.encoder = &encoder->base;
15694
15695 if (encoder->base.crtc &&
15696 encoder->base.crtc->state->active) {
15697 /*
15698 * This has to be done during hardware readout
15699 * because anything calling .crtc_disable may
15700 * rely on the connector_mask being accurate.
15701 */
15702 encoder->base.crtc->state->connector_mask |=
Ville Syrjälä40560e22018-06-26 22:47:11 +030015703 drm_connector_mask(&connector->base);
Maarten Lankhorste87a52b2016-01-28 15:04:58 +010015704 encoder->base.crtc->state->encoder_mask |=
Ville Syrjälä40560e22018-06-26 22:47:11 +030015705 drm_encoder_mask(&encoder->base);
Maarten Lankhorst2aa974c2016-01-06 14:53:25 +010015706 }
15707
Daniel Vetter24929352012-07-02 20:28:59 +020015708 } else {
15709 connector->base.dpms = DRM_MODE_DPMS_OFF;
15710 connector->base.encoder = NULL;
15711 }
15712 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000015713 connector->base.base.id, connector->base.name,
15714 enableddisabled(connector->base.encoder));
Daniel Vetter24929352012-07-02 20:28:59 +020015715 }
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015716 drm_connector_list_iter_end(&conn_iter);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015717
15718 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015719 struct intel_crtc_state *crtc_state =
15720 to_intel_crtc_state(crtc->base.state);
Ville Syrjäläd305e062017-08-30 21:57:03 +030015721 int min_cdclk = 0;
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015722
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015723 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015724 if (crtc_state->base.active) {
15725 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
Ville Syrjäläbd4cd032018-04-26 19:30:15 +030015726 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15727 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015728 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015729 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15730
15731 /*
15732 * The initial mode needs to be set in order to keep
15733 * the atomic core happy. It wants a valid mode if the
15734 * crtc's enabled, so we do the above call.
15735 *
Daniel Vetter7800fb62016-12-19 09:24:23 +010015736 * But we don't set all the derived state fully, hence
15737 * set a flag to indicate that a full recalculation is
15738 * needed on the next commit.
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015739 */
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015740 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
Ville Syrjälä9eca68322015-09-10 18:59:10 +030015741
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020015742 intel_crtc_compute_pixel_rate(crtc_state);
15743
Ville Syrjälä9c61de42017-07-10 22:33:47 +030015744 if (dev_priv->display.modeset_calc_cdclk) {
Ville Syrjäläd305e062017-08-30 21:57:03 +030015745 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
Ville Syrjälä9c61de42017-07-10 22:33:47 +030015746 if (WARN_ON(min_cdclk < 0))
15747 min_cdclk = 0;
15748 }
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015749
Daniel Vetter5caa0fe2017-05-09 16:03:29 +020015750 drm_calc_timestamping_constants(&crtc->base,
15751 &crtc_state->base.adjusted_mode);
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020015752 update_scanline_offset(crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015753 }
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020015754
Ville Syrjäläd305e062017-08-30 21:57:03 +030015755 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030015756 dev_priv->min_voltage_level[crtc->pipe] =
15757 crtc_state->min_voltage_level;
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015758
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015759 intel_pipe_config_sanity_check(dev_priv, crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015760 }
Daniel Vetter30e984d2013-06-05 13:34:17 +020015761}
15762
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015763static void
15764get_encoder_power_domains(struct drm_i915_private *dev_priv)
15765{
15766 struct intel_encoder *encoder;
15767
15768 for_each_intel_encoder(&dev_priv->drm, encoder) {
15769 u64 get_domains;
15770 enum intel_display_power_domain domain;
Imre Deak52528052018-06-21 21:44:49 +030015771 struct intel_crtc_state *crtc_state;
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015772
15773 if (!encoder->get_power_domains)
15774 continue;
15775
Imre Deak52528052018-06-21 21:44:49 +030015776 /*
Imre Deakb79ebe72018-07-05 15:26:54 +030015777 * MST-primary and inactive encoders don't have a crtc state
15778 * and neither of these require any power domain references.
Imre Deak52528052018-06-21 21:44:49 +030015779 */
Imre Deakb79ebe72018-07-05 15:26:54 +030015780 if (!encoder->base.crtc)
15781 continue;
Imre Deak52528052018-06-21 21:44:49 +030015782
Imre Deakb79ebe72018-07-05 15:26:54 +030015783 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
Imre Deak52528052018-06-21 21:44:49 +030015784 get_domains = encoder->get_power_domains(encoder, crtc_state);
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015785 for_each_power_domain(domain, get_domains)
15786 intel_display_power_get(dev_priv, domain);
15787 }
15788}
15789
Rodrigo Vividf49ec82017-11-10 16:03:19 -080015790static void intel_early_display_was(struct drm_i915_private *dev_priv)
15791{
15792 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15793 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15794 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15795 DARBF_GATING_DIS);
15796
15797 if (IS_HASWELL(dev_priv)) {
15798 /*
15799 * WaRsPkgCStateDisplayPMReq:hsw
15800 * System hang if this isn't done before disabling all planes!
15801 */
15802 I915_WRITE(CHICKEN_PAR1_1,
15803 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15804 }
15805}
15806
Ville Syrjälä3aefb672018-11-08 16:36:35 +020015807static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
15808 enum port port, i915_reg_t hdmi_reg)
15809{
15810 u32 val = I915_READ(hdmi_reg);
15811
15812 if (val & SDVO_ENABLE ||
15813 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
15814 return;
15815
15816 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
15817 port_name(port));
15818
15819 val &= ~SDVO_PIPE_SEL_MASK;
15820 val |= SDVO_PIPE_SEL(PIPE_A);
15821
15822 I915_WRITE(hdmi_reg, val);
15823}
15824
15825static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
15826 enum port port, i915_reg_t dp_reg)
15827{
15828 u32 val = I915_READ(dp_reg);
15829
15830 if (val & DP_PORT_EN ||
15831 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
15832 return;
15833
15834 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
15835 port_name(port));
15836
15837 val &= ~DP_PIPE_SEL_MASK;
15838 val |= DP_PIPE_SEL(PIPE_A);
15839
15840 I915_WRITE(dp_reg, val);
15841}
15842
15843static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
15844{
15845 /*
15846 * The BIOS may select transcoder B on some of the PCH
15847 * ports even it doesn't enable the port. This would trip
15848 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
15849 * Sanitize the transcoder select bits to prevent that. We
15850 * assume that the BIOS never actually enabled the port,
15851 * because if it did we'd actually have to toggle the port
15852 * on and back off to make the transcoder A select stick
15853 * (see. intel_dp_link_down(), intel_disable_hdmi(),
15854 * intel_disable_sdvo()).
15855 */
15856 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
15857 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
15858 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
15859
15860 /* PCH SDVOB multiplex with HDMIB */
15861 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
15862 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
15863 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
15864}
15865
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015866/* Scan out the current hw modeset state,
15867 * and sanitizes it to the current state
15868 */
15869static void
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015870intel_modeset_setup_hw_state(struct drm_device *dev,
15871 struct drm_modeset_acquire_ctx *ctx)
Daniel Vetter30e984d2013-06-05 13:34:17 +020015872{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015873 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015874 struct intel_crtc_state *crtc_state;
Daniel Vetter30e984d2013-06-05 13:34:17 +020015875 struct intel_encoder *encoder;
Chris Wilson0e6e0be2019-01-14 14:21:24 +000015876 struct intel_crtc *crtc;
15877 intel_wakeref_t wakeref;
Daniel Vetter35c95372013-07-17 06:55:04 +020015878 int i;
Daniel Vetter30e984d2013-06-05 13:34:17 +020015879
Chris Wilson0e6e0be2019-01-14 14:21:24 +000015880 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
Imre Deak2cd9a682018-08-16 15:37:57 +030015881
Rodrigo Vividf49ec82017-11-10 16:03:19 -080015882 intel_early_display_was(dev_priv);
Daniel Vetter30e984d2013-06-05 13:34:17 +020015883 intel_modeset_readout_hw_state(dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015884
15885 /* HW state is read out, now we need to sanitize this mess. */
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015886 get_encoder_power_domains(dev_priv);
15887
Ville Syrjälä3aefb672018-11-08 16:36:35 +020015888 if (HAS_PCH_IBX(dev_priv))
15889 ibx_sanitize_pch_ports(dev_priv);
15890
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015891 /*
15892 * intel_sanitize_plane_mapping() may need to do vblank
15893 * waits, so we need vblank interrupts restored beforehand.
15894 */
15895 for_each_intel_crtc(&dev_priv->drm, crtc) {
15896 drm_crtc_vblank_reset(&crtc->base);
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015897
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015898 if (crtc->base.state->active)
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015899 drm_crtc_vblank_on(&crtc->base);
Daniel Vetter24929352012-07-02 20:28:59 +020015900 }
15901
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015902 intel_sanitize_plane_mapping(dev_priv);
Ville Syrjäläe2af48c2016-10-31 22:37:05 +020015903
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015904 for_each_intel_encoder(dev, encoder)
15905 intel_sanitize_encoder(encoder);
15906
15907 for_each_intel_crtc(&dev_priv->drm, crtc) {
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015908 crtc_state = to_intel_crtc_state(crtc->base.state);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015909 intel_sanitize_crtc(crtc, ctx);
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015910 intel_dump_pipe_config(crtc, crtc_state,
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +020015911 "[setup_hw_state]");
Daniel Vetter24929352012-07-02 20:28:59 +020015912 }
Daniel Vetter9a935852012-07-05 22:34:27 +020015913
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020015914 intel_modeset_update_connector_atomic_state(dev);
15915
Daniel Vetter35c95372013-07-17 06:55:04 +020015916 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15917 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15918
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +010015919 if (!pll->on || pll->active_mask)
Daniel Vetter35c95372013-07-17 06:55:04 +020015920 continue;
15921
Lucas De Marchi72f775f2018-03-20 15:06:34 -070015922 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15923 pll->info->name);
Daniel Vetter35c95372013-07-17 06:55:04 +020015924
Lucas De Marchiee1398b2018-03-20 15:06:33 -070015925 pll->info->funcs->disable(dev_priv, pll);
Daniel Vetter35c95372013-07-17 06:55:04 +020015926 pll->on = false;
15927 }
15928
Ville Syrjälä04548cb2017-04-21 21:14:29 +030015929 if (IS_G4X(dev_priv)) {
Matt Ropercd1d3ee2018-12-10 13:54:14 -080015930 g4x_wm_get_hw_state(dev_priv);
Ville Syrjälä04548cb2017-04-21 21:14:29 +030015931 g4x_wm_sanitize(dev_priv);
15932 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Matt Ropercd1d3ee2018-12-10 13:54:14 -080015933 vlv_wm_get_hw_state(dev_priv);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015934 vlv_wm_sanitize(dev_priv);
Rodrigo Vivia029fa42017-08-09 13:52:48 -070015935 } else if (INTEL_GEN(dev_priv) >= 9) {
Matt Ropercd1d3ee2018-12-10 13:54:14 -080015936 skl_wm_get_hw_state(dev_priv);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015937 } else if (HAS_PCH_SPLIT(dev_priv)) {
Matt Ropercd1d3ee2018-12-10 13:54:14 -080015938 ilk_wm_get_hw_state(dev_priv);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015939 }
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015940
15941 for_each_intel_crtc(dev, crtc) {
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +020015942 u64 put_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015943
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015944 crtc_state = to_intel_crtc_state(crtc->base.state);
15945 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015946 if (WARN_ON(put_domains))
15947 modeset_put_power_domains(dev_priv, put_domains);
15948 }
Imre Deak2cd9a682018-08-16 15:37:57 +030015949
Chris Wilson0e6e0be2019-01-14 14:21:24 +000015950 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
Paulo Zanoni010cf732016-01-19 11:35:48 -020015951
15952 intel_fbc_init_pipe_state(dev_priv);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015953}
Ville Syrjälä7d0bc1e2013-09-16 17:38:33 +030015954
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015955void intel_display_resume(struct drm_device *dev)
15956{
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015957 struct drm_i915_private *dev_priv = to_i915(dev);
15958 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15959 struct drm_modeset_acquire_ctx ctx;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015960 int ret;
Daniel Vetterf30da182013-04-11 20:22:50 +020015961
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015962 dev_priv->modeset_restore_state = NULL;
Maarten Lankhorst73974892016-08-05 23:28:27 +030015963 if (state)
15964 state->acquire_ctx = &ctx;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015965
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015966 drm_modeset_acquire_init(&ctx, 0);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015967
Maarten Lankhorst73974892016-08-05 23:28:27 +030015968 while (1) {
15969 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15970 if (ret != -EDEADLK)
15971 break;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015972
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015973 drm_modeset_backoff(&ctx);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015974 }
15975
Maarten Lankhorst73974892016-08-05 23:28:27 +030015976 if (!ret)
Maarten Lankhorst581e49f2017-01-16 10:37:38 +010015977 ret = __intel_display_resume(dev, state, &ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +030015978
Kumar, Mahesh2503a0f2017-08-17 19:15:28 +053015979 intel_enable_ipc(dev_priv);
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015980 drm_modeset_drop_locks(&ctx);
15981 drm_modeset_acquire_fini(&ctx);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015982
Chris Wilson08536952016-10-14 13:18:18 +010015983 if (ret)
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015984 DRM_ERROR("Restoring old state failed with %i\n", ret);
Chris Wilson3c5e37f2017-01-15 12:58:25 +000015985 if (state)
15986 drm_atomic_state_put(state);
Chris Wilson2c7111d2011-03-29 10:40:27 +010015987}
15988
Manasi Navare886c6b82017-10-26 14:52:00 -070015989static void intel_hpd_poll_fini(struct drm_device *dev)
15990{
15991 struct intel_connector *connector;
15992 struct drm_connector_list_iter conn_iter;
15993
Chris Wilson448aa912017-11-28 11:01:47 +000015994 /* Kill all the work that may have been queued by hpd. */
Manasi Navare886c6b82017-10-26 14:52:00 -070015995 drm_connector_list_iter_begin(dev, &conn_iter);
15996 for_each_intel_connector_iter(connector, &conn_iter) {
15997 if (connector->modeset_retry_work.func)
15998 cancel_work_sync(&connector->modeset_retry_work);
Ramalingam Cd3dacc72018-10-29 15:15:46 +053015999 if (connector->hdcp.shim) {
16000 cancel_delayed_work_sync(&connector->hdcp.check_work);
16001 cancel_work_sync(&connector->hdcp.prop_work);
Sean Paulee5e5e72018-01-08 14:55:39 -050016002 }
Manasi Navare886c6b82017-10-26 14:52:00 -070016003 }
16004 drm_connector_list_iter_end(&conn_iter);
16005}
16006
Jesse Barnes79e53942008-11-07 14:24:08 -080016007void intel_modeset_cleanup(struct drm_device *dev)
16008{
Chris Wilsonfac5e232016-07-04 11:34:36 +010016009 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes652c3932009-08-17 13:31:43 -070016010
Chris Wilson8bcf9f72018-07-10 10:44:20 +010016011 flush_workqueue(dev_priv->modeset_wq);
16012
Chris Wilsoneb955ee2017-01-23 21:29:39 +000016013 flush_work(&dev_priv->atomic_helper.free_work);
16014 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16015
Daniel Vetterfd0c0642013-04-24 11:13:35 +020016016 /*
16017 * Interrupts and polling as the first thing to avoid creating havoc.
Imre Deak2eb52522014-11-19 15:30:05 +020016018 * Too much stuff here (turning of connectors, ...) would
Daniel Vetterfd0c0642013-04-24 11:13:35 +020016019 * experience fancy races otherwise.
16020 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +020016021 intel_irq_uninstall(dev_priv);
Jesse Barneseb21b922014-06-20 11:57:33 -070016022
Daniel Vetterfd0c0642013-04-24 11:13:35 +020016023 /*
16024 * Due to the hpd irq storm handling the hotplug work can re-arm the
16025 * poll handlers. Hence disable polling after hpd handling is shut down.
16026 */
Manasi Navare886c6b82017-10-26 14:52:00 -070016027 intel_hpd_poll_fini(dev);
Daniel Vetterfd0c0642013-04-24 11:13:35 +020016028
Daniel Vetter4f256d82017-07-15 00:46:55 +020016029 /* poll work can call into fbdev, hence clean that up afterwards */
16030 intel_fbdev_fini(dev_priv);
16031
Jesse Barnes723bfd72010-10-07 16:01:13 -070016032 intel_unregister_dsm_handler();
16033
Paulo Zanonic937ab3e52016-01-19 11:35:46 -020016034 intel_fbc_global_disable(dev_priv);
Kristian Høgsberg69341a52009-11-11 12:19:17 -050016035
Chris Wilson1630fe72011-07-08 12:22:42 +010016036 /* flush any delayed tasks or pending work */
16037 flush_scheduled_work();
16038
Jesse Barnes79e53942008-11-07 14:24:08 -080016039 drm_mode_config_cleanup(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +010016040
José Roberto de Souza58db08a72018-11-07 16:16:47 -080016041 intel_overlay_cleanup(dev_priv);
Imre Deakae484342014-03-31 15:10:44 +030016042
Tvrtko Ursulin40196442016-12-01 14:16:42 +000016043 intel_teardown_gmbus(dev_priv);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020016044
16045 destroy_workqueue(dev_priv->modeset_wq);
José Roberto de Souzaacde44b2018-11-07 16:16:45 -080016046
16047 intel_fbc_cleanup_cfb(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080016048}
16049
Dave Airlie28d52042009-09-21 14:33:58 +100016050/*
16051 * set vga decode state - true == enable VGA decode
16052 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000016053int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
Dave Airlie28d52042009-09-21 14:33:58 +100016054{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000016055 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
Dave Airlie28d52042009-09-21 14:33:58 +100016056 u16 gmch_ctrl;
16057
Chris Wilson75fa0412014-02-07 18:37:02 -020016058 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16059 DRM_ERROR("failed to read control word\n");
16060 return -EIO;
16061 }
16062
Chris Wilsonc0cc8a52014-02-07 18:37:03 -020016063 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16064 return 0;
16065
Dave Airlie28d52042009-09-21 14:33:58 +100016066 if (state)
16067 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16068 else
16069 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
Chris Wilson75fa0412014-02-07 18:37:02 -020016070
16071 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16072 DRM_ERROR("failed to write control word\n");
16073 return -EIO;
16074 }
16075
Dave Airlie28d52042009-09-21 14:33:58 +100016076 return 0;
16077}
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016078
Chris Wilson98a2f412016-10-12 10:05:18 +010016079#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16080
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016081struct intel_display_error_state {
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030016082
16083 u32 power_well_driver;
16084
Chris Wilson63b66e52013-08-08 15:12:06 +020016085 int num_transcoders;
16086
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016087 struct intel_cursor_error_state {
16088 u32 control;
16089 u32 position;
16090 u32 base;
16091 u32 size;
Damien Lespiau52331302012-08-15 19:23:25 +010016092 } cursor[I915_MAX_PIPES];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016093
16094 struct intel_pipe_error_state {
Imre Deakddf9c532013-11-27 22:02:02 +020016095 bool power_domain_on;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016096 u32 source;
Imre Deakf301b1e2014-04-18 15:55:04 +030016097 u32 stat;
Damien Lespiau52331302012-08-15 19:23:25 +010016098 } pipe[I915_MAX_PIPES];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016099
16100 struct intel_plane_error_state {
16101 u32 control;
16102 u32 stride;
16103 u32 size;
16104 u32 pos;
16105 u32 addr;
16106 u32 surface;
16107 u32 tile_offset;
Damien Lespiau52331302012-08-15 19:23:25 +010016108 } plane[I915_MAX_PIPES];
Chris Wilson63b66e52013-08-08 15:12:06 +020016109
16110 struct intel_transcoder_error_state {
Imre Deakddf9c532013-11-27 22:02:02 +020016111 bool power_domain_on;
Chris Wilson63b66e52013-08-08 15:12:06 +020016112 enum transcoder cpu_transcoder;
16113
16114 u32 conf;
16115
16116 u32 htotal;
16117 u32 hblank;
16118 u32 hsync;
16119 u32 vtotal;
16120 u32 vblank;
16121 u32 vsync;
16122 } transcoder[4];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016123};
16124
16125struct intel_display_error_state *
Chris Wilsonc0336662016-05-06 15:40:21 +010016126intel_display_capture_error_state(struct drm_i915_private *dev_priv)
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016127{
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016128 struct intel_display_error_state *error;
Chris Wilson63b66e52013-08-08 15:12:06 +020016129 int transcoders[] = {
16130 TRANSCODER_A,
16131 TRANSCODER_B,
16132 TRANSCODER_C,
16133 TRANSCODER_EDP,
16134 };
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016135 int i;
16136
José Roberto de Souzae1bf0942018-11-30 15:20:47 -080016137 if (!HAS_DISPLAY(dev_priv))
Chris Wilson63b66e52013-08-08 15:12:06 +020016138 return NULL;
16139
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020016140 error = kzalloc(sizeof(*error), GFP_ATOMIC);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016141 if (error == NULL)
16142 return NULL;
16143
Chris Wilsonc0336662016-05-06 15:40:21 +010016144 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Imre Deak75e39682018-08-06 12:58:39 +030016145 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030016146
Damien Lespiau055e3932014-08-18 13:49:10 +010016147 for_each_pipe(dev_priv, i) {
Imre Deakddf9c532013-11-27 22:02:02 +020016148 error->pipe[i].power_domain_on =
Daniel Vetterf458ebb2014-09-30 10:56:39 +020016149 __intel_display_power_is_enabled(dev_priv,
16150 POWER_DOMAIN_PIPE(i));
Imre Deakddf9c532013-11-27 22:02:02 +020016151 if (!error->pipe[i].power_domain_on)
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020016152 continue;
16153
Ville Syrjälä5efb3e22014-04-09 13:28:53 +030016154 error->cursor[i].control = I915_READ(CURCNTR(i));
16155 error->cursor[i].position = I915_READ(CURPOS(i));
16156 error->cursor[i].base = I915_READ(CURBASE(i));
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016157
16158 error->plane[i].control = I915_READ(DSPCNTR(i));
16159 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
Chris Wilsonc0336662016-05-06 15:40:21 +010016160 if (INTEL_GEN(dev_priv) <= 3) {
Paulo Zanoni51889b32013-03-06 20:03:13 -030016161 error->plane[i].size = I915_READ(DSPSIZE(i));
Paulo Zanoni80ca3782013-03-22 14:20:57 -030016162 error->plane[i].pos = I915_READ(DSPPOS(i));
16163 }
Chris Wilsonc0336662016-05-06 15:40:21 +010016164 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
Paulo Zanonica291362013-03-06 20:03:14 -030016165 error->plane[i].addr = I915_READ(DSPADDR(i));
Chris Wilsonc0336662016-05-06 15:40:21 +010016166 if (INTEL_GEN(dev_priv) >= 4) {
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016167 error->plane[i].surface = I915_READ(DSPSURF(i));
16168 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16169 }
16170
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016171 error->pipe[i].source = I915_READ(PIPESRC(i));
Imre Deakf301b1e2014-04-18 15:55:04 +030016172
Chris Wilsonc0336662016-05-06 15:40:21 +010016173 if (HAS_GMCH_DISPLAY(dev_priv))
Imre Deakf301b1e2014-04-18 15:55:04 +030016174 error->pipe[i].stat = I915_READ(PIPESTAT(i));
Chris Wilson63b66e52013-08-08 15:12:06 +020016175 }
16176
Jani Nikula4d1de972016-03-18 17:05:42 +020016177 /* Note: this does not include DSI transcoders. */
Chris Wilsonc0336662016-05-06 15:40:21 +010016178 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +030016179 if (HAS_DDI(dev_priv))
Chris Wilson63b66e52013-08-08 15:12:06 +020016180 error->num_transcoders++; /* Account for eDP. */
16181
16182 for (i = 0; i < error->num_transcoders; i++) {
16183 enum transcoder cpu_transcoder = transcoders[i];
16184
Imre Deakddf9c532013-11-27 22:02:02 +020016185 error->transcoder[i].power_domain_on =
Daniel Vetterf458ebb2014-09-30 10:56:39 +020016186 __intel_display_power_is_enabled(dev_priv,
Paulo Zanoni38cc1da2013-12-20 15:09:41 -020016187 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
Imre Deakddf9c532013-11-27 22:02:02 +020016188 if (!error->transcoder[i].power_domain_on)
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020016189 continue;
16190
Chris Wilson63b66e52013-08-08 15:12:06 +020016191 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16192
16193 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16194 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16195 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16196 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16197 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16198 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16199 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016200 }
16201
16202 return error;
16203}
16204
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016205#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16206
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016207void
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016208intel_display_print_error_state(struct drm_i915_error_state_buf *m,
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016209 struct intel_display_error_state *error)
16210{
Chris Wilson5a4c6f12017-02-14 16:46:11 +000016211 struct drm_i915_private *dev_priv = m->i915;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016212 int i;
16213
Chris Wilson63b66e52013-08-08 15:12:06 +020016214 if (!error)
16215 return;
16216
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000016217 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
Tvrtko Ursulin86527442016-10-13 11:03:00 +010016218 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016219 err_printf(m, "PWR_WELL_CTL2: %08x\n",
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030016220 error->power_well_driver);
Damien Lespiau055e3932014-08-18 13:49:10 +010016221 for_each_pipe(dev_priv, i) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016222 err_printf(m, "Pipe [%d]:\n", i);
Imre Deakddf9c532013-11-27 22:02:02 +020016223 err_printf(m, " Power: %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +020016224 onoff(error->pipe[i].power_domain_on));
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016225 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
Imre Deakf301b1e2014-04-18 15:55:04 +030016226 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016227
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016228 err_printf(m, "Plane [%d]:\n", i);
16229 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16230 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
Tvrtko Ursulin5f56d5f2016-11-16 08:55:37 +000016231 if (INTEL_GEN(dev_priv) <= 3) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016232 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16233 err_printf(m, " POS: %08x\n", error->plane[i].pos);
Paulo Zanoni80ca3782013-03-22 14:20:57 -030016234 }
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +010016235 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016236 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
Tvrtko Ursulin5f56d5f2016-11-16 08:55:37 +000016237 if (INTEL_GEN(dev_priv) >= 4) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016238 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16239 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016240 }
16241
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030016242 err_printf(m, "Cursor [%d]:\n", i);
16243 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16244 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16245 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016246 }
Chris Wilson63b66e52013-08-08 15:12:06 +020016247
16248 for (i = 0; i < error->num_transcoders; i++) {
Jani Nikulada205632016-03-15 21:51:10 +020016249 err_printf(m, "CPU transcoder: %s\n",
Chris Wilson63b66e52013-08-08 15:12:06 +020016250 transcoder_name(error->transcoder[i].cpu_transcoder));
Imre Deakddf9c532013-11-27 22:02:02 +020016251 err_printf(m, " Power: %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +020016252 onoff(error->transcoder[i].power_domain_on));
Chris Wilson63b66e52013-08-08 15:12:06 +020016253 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16254 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16255 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16256 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16257 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16258 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16259 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16260 }
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000016261}
Chris Wilson98a2f412016-10-12 10:05:18 +010016262
16263#endif