blob: 3999d02cd18ce0f4420b2ea99c26c068a7a442bf [file] [log] [blame]
Jesse Barnes79e53942008-11-07 14:24:08 -08001/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
Jesse Barnesc1c7af62009-09-10 15:28:03 -070027#include <linux/module.h>
28#include <linux/input.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080029#include <linux/i2c.h>
Shaohua Li7662c8b2009-06-26 11:23:55 +080030#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Jesse Barnes9cce37f2010-08-13 15:11:26 -070032#include <linux/vgaarb.h>
Wu Fengguange0dac652011-09-05 14:25:34 +080033#include <drm/drm_edid.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080035#include "intel_drv.h"
Chris Wilson5d723d72016-08-04 16:32:35 +010036#include "intel_frontbuffer.h"
David Howells760285e2012-10-02 18:01:07 +010037#include <drm/i915_drm.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "i915_drv.h"
Chris Wilson57822dc2017-02-22 11:40:48 +000039#include "i915_gem_clflush.h"
Imre Deakdb18b6a2016-03-24 12:41:40 +020040#include "intel_dsi.h"
Jesse Barnese5510fa2010-07-01 16:48:37 -070041#include "i915_trace.h"
Xi Ruoyao319c1d42015-03-12 20:16:32 +080042#include <drm/drm_atomic.h>
Matt Roperc196e1d2015-01-21 16:35:48 -080043#include <drm/drm_atomic_helper.h>
David Howells760285e2012-10-02 18:01:07 +010044#include <drm/drm_dp_helper.h>
45#include <drm/drm_crtc_helper.h>
Matt Roper465c1202014-05-29 08:06:54 -070046#include <drm/drm_plane_helper.h>
47#include <drm/drm_rect.h>
Daniel Vetter72fdb40c2018-09-05 15:57:11 +020048#include <drm/drm_atomic_uapi.h>
Keith Packardc0f372b32011-11-16 22:24:52 -080049#include <linux/dma_remapping.h>
Alex Goinsfd8e0582015-11-25 18:43:38 -080050#include <linux/reservation.h>
Jesse Barnes79e53942008-11-07 14:24:08 -080051
Matt Roper465c1202014-05-29 08:06:54 -070052/* Primary plane formats for gen <= 3 */
Damien Lespiau568db4f2015-05-12 16:13:18 +010053static const uint32_t i8xx_primary_formats[] = {
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010054 DRM_FORMAT_C8,
55 DRM_FORMAT_RGB565,
Matt Roper465c1202014-05-29 08:06:54 -070056 DRM_FORMAT_XRGB1555,
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010057 DRM_FORMAT_XRGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070058};
59
60/* Primary plane formats for gen >= 4 */
Damien Lespiau568db4f2015-05-12 16:13:18 +010061static const uint32_t i965_primary_formats[] = {
Damien Lespiau67fe7dc2015-05-15 19:06:00 +010062 DRM_FORMAT_C8,
63 DRM_FORMAT_RGB565,
64 DRM_FORMAT_XRGB8888,
Matt Roper465c1202014-05-29 08:06:54 -070065 DRM_FORMAT_XBGR8888,
Damien Lespiau6c0fd452015-05-19 12:29:16 +010066 DRM_FORMAT_XRGB2101010,
67 DRM_FORMAT_XBGR2101010,
68};
69
Ben Widawsky714244e2017-08-01 09:58:16 -070070static const uint64_t i9xx_format_modifiers[] = {
71 I915_FORMAT_MOD_X_TILED,
72 DRM_FORMAT_MOD_LINEAR,
73 DRM_FORMAT_MOD_INVALID
74};
75
Matt Roper3d7d6512014-06-10 08:28:13 -070076/* Cursor formats */
77static const uint32_t intel_cursor_formats[] = {
78 DRM_FORMAT_ARGB8888,
79};
80
Ben Widawsky714244e2017-08-01 09:58:16 -070081static const uint64_t cursor_format_modifiers[] = {
82 DRM_FORMAT_MOD_LINEAR,
83 DRM_FORMAT_MOD_INVALID
84};
85
Jesse Barnesf1f644d2013-06-27 00:39:25 +030086static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020087 struct intel_crtc_state *pipe_config);
Ville Syrjälä18442d02013-09-13 16:00:08 +030088static void ironlake_pch_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020089 struct intel_crtc_state *pipe_config);
Jesse Barnesf1f644d2013-06-27 00:39:25 +030090
Chris Wilson24dbf512017-02-15 10:59:18 +000091static int intel_framebuffer_init(struct intel_framebuffer *ifb,
92 struct drm_i915_gem_object *obj,
93 struct drm_mode_fb_cmd2 *mode_cmd);
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +020094static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
95static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
Maarten Lankhorst4c354752018-10-11 12:04:49 +020096static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
97 const struct intel_link_m_n *m_n,
98 const struct intel_link_m_n *m2_n2);
Maarten Lankhorstfdf73512018-10-04 11:45:52 +020099static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
100static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
101static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
102static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200103static void vlv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200104 const struct intel_crtc_state *pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +0200105static void chv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +0200106 const struct intel_crtc_state *pipe_config);
Daniel Vetter5a21b662016-05-24 17:13:53 +0200107static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
108static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
Nabendu Maiti1c74eea2016-11-29 11:23:14 +0530109static void intel_crtc_init_scalers(struct intel_crtc *crtc,
110 struct intel_crtc_state *crtc_state);
Maarten Lankhorstb2562712018-10-04 11:45:53 +0200111static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
112static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
113static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +0300114static void intel_modeset_setup_hw_state(struct drm_device *dev,
115 struct drm_modeset_acquire_ctx *ctx);
Ville Syrjälä2622a082016-03-09 19:07:26 +0200116static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
Damien Lespiaue7457a92013-08-08 22:28:59 +0100117
Ma Lingd4906092009-03-18 20:13:27 +0800118struct intel_limit {
Ander Conselvan de Oliveira4c5def92016-05-04 12:11:58 +0300119 struct {
120 int min, max;
121 } dot, vco, n, m, m1, m2, p, p1;
122
123 struct {
124 int dot_limit;
125 int p2_slow, p2_fast;
126 } p2;
Ma Lingd4906092009-03-18 20:13:27 +0800127};
Jesse Barnes79e53942008-11-07 14:24:08 -0800128
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300129/* returns HPLL frequency in kHz */
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200130int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300131{
132 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
133
134 /* Obtain SKU information */
135 mutex_lock(&dev_priv->sb_lock);
136 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
137 CCK_FUSE_HPLL_FREQ_MASK;
138 mutex_unlock(&dev_priv->sb_lock);
139
140 return vco_freq[hpll_freq] * 1000;
141}
142
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200143int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
144 const char *name, u32 reg, int ref_freq)
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300145{
146 u32 val;
147 int divider;
148
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300149 mutex_lock(&dev_priv->sb_lock);
150 val = vlv_cck_read(dev_priv, reg);
151 mutex_unlock(&dev_priv->sb_lock);
152
153 divider = val & CCK_FREQUENCY_VALUES;
154
155 WARN((val & CCK_FREQUENCY_STATUS) !=
156 (divider << CCK_FREQUENCY_STATUS_SHIFT),
157 "%s change in progress\n", name);
158
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200159 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
160}
161
Ville Syrjälä7ff89ca2017-02-07 20:33:05 +0200162int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
163 const char *name, u32 reg)
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200164{
165 if (dev_priv->hpll_freq == 0)
Ville Syrjälä49cd97a2017-02-07 20:33:45 +0200166 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
Ville Syrjäläc30fec62016-03-04 21:43:02 +0200167
168 return vlv_get_cck_clock(dev_priv, name, reg,
169 dev_priv->hpll_freq);
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300170}
171
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300172static void intel_update_czclk(struct drm_i915_private *dev_priv)
173{
Wayne Boyer666a4532015-12-09 12:29:35 -0800174 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
Ville Syrjäläbfa7df02015-09-24 23:29:18 +0300175 return;
176
177 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
178 CCK_CZ_CLOCK_CONTROL);
179
180 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
181}
182
Chris Wilson021357a2010-09-07 20:54:59 +0100183static inline u32 /* units of 100MHz */
Ville Syrjälä21a727b2016-02-17 21:41:10 +0200184intel_fdi_link_freq(struct drm_i915_private *dev_priv,
185 const struct intel_crtc_state *pipe_config)
Chris Wilson021357a2010-09-07 20:54:59 +0100186{
Ville Syrjälä21a727b2016-02-17 21:41:10 +0200187 if (HAS_DDI(dev_priv))
188 return pipe_config->port_clock; /* SPLL */
Ville Syrjäläe3b247d2016-02-17 21:41:09 +0200189 else
Chris Wilson58ecd9d2017-11-05 13:49:05 +0000190 return dev_priv->fdi_pll_freq;
Chris Wilson021357a2010-09-07 20:54:59 +0100191}
192
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300193static const struct intel_limit intel_limits_i8xx_dac = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400194 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200195 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200196 .n = { .min = 2, .max = 16 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400197 .m = { .min = 96, .max = 140 },
198 .m1 = { .min = 18, .max = 26 },
199 .m2 = { .min = 6, .max = 16 },
200 .p = { .min = 4, .max = 128 },
201 .p1 = { .min = 2, .max = 33 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700202 .p2 = { .dot_limit = 165000,
203 .p2_slow = 4, .p2_fast = 2 },
Keith Packarde4b36692009-06-05 19:22:17 -0700204};
205
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300206static const struct intel_limit intel_limits_i8xx_dvo = {
Daniel Vetter5d536e22013-07-06 12:52:06 +0200207 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200208 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200209 .n = { .min = 2, .max = 16 },
Daniel Vetter5d536e22013-07-06 12:52:06 +0200210 .m = { .min = 96, .max = 140 },
211 .m1 = { .min = 18, .max = 26 },
212 .m2 = { .min = 6, .max = 16 },
213 .p = { .min = 4, .max = 128 },
214 .p1 = { .min = 2, .max = 33 },
215 .p2 = { .dot_limit = 165000,
216 .p2_slow = 4, .p2_fast = 4 },
217};
218
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300219static const struct intel_limit intel_limits_i8xx_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400220 .dot = { .min = 25000, .max = 350000 },
Ville Syrjälä9c333712013-12-09 18:54:17 +0200221 .vco = { .min = 908000, .max = 1512000 },
Ville Syrjälä91dbe5f2013-12-09 18:54:14 +0200222 .n = { .min = 2, .max = 16 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400223 .m = { .min = 96, .max = 140 },
224 .m1 = { .min = 18, .max = 26 },
225 .m2 = { .min = 6, .max = 16 },
226 .p = { .min = 4, .max = 128 },
227 .p1 = { .min = 1, .max = 6 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700228 .p2 = { .dot_limit = 165000,
229 .p2_slow = 14, .p2_fast = 7 },
Keith Packarde4b36692009-06-05 19:22:17 -0700230};
Eric Anholt273e27c2011-03-30 13:01:10 -0700231
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300232static const struct intel_limit intel_limits_i9xx_sdvo = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400233 .dot = { .min = 20000, .max = 400000 },
234 .vco = { .min = 1400000, .max = 2800000 },
235 .n = { .min = 1, .max = 6 },
236 .m = { .min = 70, .max = 120 },
Patrik Jakobsson4f7dfb62013-02-13 22:20:22 +0100237 .m1 = { .min = 8, .max = 18 },
238 .m2 = { .min = 3, .max = 7 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400239 .p = { .min = 5, .max = 80 },
240 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700241 .p2 = { .dot_limit = 200000,
242 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700243};
244
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300245static const struct intel_limit intel_limits_i9xx_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400246 .dot = { .min = 20000, .max = 400000 },
247 .vco = { .min = 1400000, .max = 2800000 },
248 .n = { .min = 1, .max = 6 },
249 .m = { .min = 70, .max = 120 },
Patrik Jakobsson53a7d2d2013-02-13 22:20:21 +0100250 .m1 = { .min = 8, .max = 18 },
251 .m2 = { .min = 3, .max = 7 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400252 .p = { .min = 7, .max = 98 },
253 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700254 .p2 = { .dot_limit = 112000,
255 .p2_slow = 14, .p2_fast = 7 },
Keith Packarde4b36692009-06-05 19:22:17 -0700256};
257
Eric Anholt273e27c2011-03-30 13:01:10 -0700258
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300259static const struct intel_limit intel_limits_g4x_sdvo = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700260 .dot = { .min = 25000, .max = 270000 },
261 .vco = { .min = 1750000, .max = 3500000},
262 .n = { .min = 1, .max = 4 },
263 .m = { .min = 104, .max = 138 },
264 .m1 = { .min = 17, .max = 23 },
265 .m2 = { .min = 5, .max = 11 },
266 .p = { .min = 10, .max = 30 },
267 .p1 = { .min = 1, .max = 3},
268 .p2 = { .dot_limit = 270000,
269 .p2_slow = 10,
270 .p2_fast = 10
Ma Ling044c7c42009-03-18 20:13:23 +0800271 },
Keith Packarde4b36692009-06-05 19:22:17 -0700272};
273
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300274static const struct intel_limit intel_limits_g4x_hdmi = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700275 .dot = { .min = 22000, .max = 400000 },
276 .vco = { .min = 1750000, .max = 3500000},
277 .n = { .min = 1, .max = 4 },
278 .m = { .min = 104, .max = 138 },
279 .m1 = { .min = 16, .max = 23 },
280 .m2 = { .min = 5, .max = 11 },
281 .p = { .min = 5, .max = 80 },
282 .p1 = { .min = 1, .max = 8},
283 .p2 = { .dot_limit = 165000,
284 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700285};
286
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300287static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700288 .dot = { .min = 20000, .max = 115000 },
289 .vco = { .min = 1750000, .max = 3500000 },
290 .n = { .min = 1, .max = 3 },
291 .m = { .min = 104, .max = 138 },
292 .m1 = { .min = 17, .max = 23 },
293 .m2 = { .min = 5, .max = 11 },
294 .p = { .min = 28, .max = 112 },
295 .p1 = { .min = 2, .max = 8 },
296 .p2 = { .dot_limit = 0,
297 .p2_slow = 14, .p2_fast = 14
Ma Ling044c7c42009-03-18 20:13:23 +0800298 },
Keith Packarde4b36692009-06-05 19:22:17 -0700299};
300
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300301static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700302 .dot = { .min = 80000, .max = 224000 },
303 .vco = { .min = 1750000, .max = 3500000 },
304 .n = { .min = 1, .max = 3 },
305 .m = { .min = 104, .max = 138 },
306 .m1 = { .min = 17, .max = 23 },
307 .m2 = { .min = 5, .max = 11 },
308 .p = { .min = 14, .max = 42 },
309 .p1 = { .min = 2, .max = 6 },
310 .p2 = { .dot_limit = 0,
311 .p2_slow = 7, .p2_fast = 7
Ma Ling044c7c42009-03-18 20:13:23 +0800312 },
Keith Packarde4b36692009-06-05 19:22:17 -0700313};
314
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300315static const struct intel_limit intel_limits_pineview_sdvo = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400316 .dot = { .min = 20000, .max = 400000},
317 .vco = { .min = 1700000, .max = 3500000 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700318 /* Pineview's Ncounter is a ring counter */
Akshay Joshi0206e352011-08-16 15:34:10 -0400319 .n = { .min = 3, .max = 6 },
320 .m = { .min = 2, .max = 256 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700321 /* Pineview only has one combined m divider, which we treat as m2. */
Akshay Joshi0206e352011-08-16 15:34:10 -0400322 .m1 = { .min = 0, .max = 0 },
323 .m2 = { .min = 0, .max = 254 },
324 .p = { .min = 5, .max = 80 },
325 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700326 .p2 = { .dot_limit = 200000,
327 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700328};
329
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300330static const struct intel_limit intel_limits_pineview_lvds = {
Akshay Joshi0206e352011-08-16 15:34:10 -0400331 .dot = { .min = 20000, .max = 400000 },
332 .vco = { .min = 1700000, .max = 3500000 },
333 .n = { .min = 3, .max = 6 },
334 .m = { .min = 2, .max = 256 },
335 .m1 = { .min = 0, .max = 0 },
336 .m2 = { .min = 0, .max = 254 },
337 .p = { .min = 7, .max = 112 },
338 .p1 = { .min = 1, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700339 .p2 = { .dot_limit = 112000,
340 .p2_slow = 14, .p2_fast = 14 },
Keith Packarde4b36692009-06-05 19:22:17 -0700341};
342
Eric Anholt273e27c2011-03-30 13:01:10 -0700343/* Ironlake / Sandybridge
344 *
345 * We calculate clock using (register_value + 2) for N/M1/M2, so here
346 * the range value for them is (actual_value - 2).
347 */
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300348static const struct intel_limit intel_limits_ironlake_dac = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700349 .dot = { .min = 25000, .max = 350000 },
350 .vco = { .min = 1760000, .max = 3510000 },
351 .n = { .min = 1, .max = 5 },
352 .m = { .min = 79, .max = 127 },
353 .m1 = { .min = 12, .max = 22 },
354 .m2 = { .min = 5, .max = 9 },
355 .p = { .min = 5, .max = 80 },
356 .p1 = { .min = 1, .max = 8 },
357 .p2 = { .dot_limit = 225000,
358 .p2_slow = 10, .p2_fast = 5 },
Keith Packarde4b36692009-06-05 19:22:17 -0700359};
360
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300361static const struct intel_limit intel_limits_ironlake_single_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700362 .dot = { .min = 25000, .max = 350000 },
363 .vco = { .min = 1760000, .max = 3510000 },
364 .n = { .min = 1, .max = 3 },
365 .m = { .min = 79, .max = 118 },
366 .m1 = { .min = 12, .max = 22 },
367 .m2 = { .min = 5, .max = 9 },
368 .p = { .min = 28, .max = 112 },
369 .p1 = { .min = 2, .max = 8 },
370 .p2 = { .dot_limit = 225000,
371 .p2_slow = 14, .p2_fast = 14 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800372};
373
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300374static const struct intel_limit intel_limits_ironlake_dual_lvds = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700375 .dot = { .min = 25000, .max = 350000 },
376 .vco = { .min = 1760000, .max = 3510000 },
377 .n = { .min = 1, .max = 3 },
378 .m = { .min = 79, .max = 127 },
379 .m1 = { .min = 12, .max = 22 },
380 .m2 = { .min = 5, .max = 9 },
381 .p = { .min = 14, .max = 56 },
382 .p1 = { .min = 2, .max = 8 },
383 .p2 = { .dot_limit = 225000,
384 .p2_slow = 7, .p2_fast = 7 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800385};
386
Eric Anholt273e27c2011-03-30 13:01:10 -0700387/* LVDS 100mhz refclk limits. */
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300388static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700389 .dot = { .min = 25000, .max = 350000 },
390 .vco = { .min = 1760000, .max = 3510000 },
391 .n = { .min = 1, .max = 2 },
392 .m = { .min = 79, .max = 126 },
393 .m1 = { .min = 12, .max = 22 },
394 .m2 = { .min = 5, .max = 9 },
395 .p = { .min = 28, .max = 112 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400396 .p1 = { .min = 2, .max = 8 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700397 .p2 = { .dot_limit = 225000,
398 .p2_slow = 14, .p2_fast = 14 },
Zhenyu Wangb91ad0e2010-02-05 09:14:17 +0800399};
400
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300401static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
Eric Anholt273e27c2011-03-30 13:01:10 -0700402 .dot = { .min = 25000, .max = 350000 },
403 .vco = { .min = 1760000, .max = 3510000 },
404 .n = { .min = 1, .max = 3 },
405 .m = { .min = 79, .max = 126 },
406 .m1 = { .min = 12, .max = 22 },
407 .m2 = { .min = 5, .max = 9 },
408 .p = { .min = 14, .max = 42 },
Akshay Joshi0206e352011-08-16 15:34:10 -0400409 .p1 = { .min = 2, .max = 6 },
Eric Anholt273e27c2011-03-30 13:01:10 -0700410 .p2 = { .dot_limit = 225000,
411 .p2_slow = 7, .p2_fast = 7 },
Zhao Yakui45476682009-12-31 16:06:04 +0800412};
413
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300414static const struct intel_limit intel_limits_vlv = {
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300415 /*
416 * These are the data rate limits (measured in fast clocks)
417 * since those are the strictest limits we have. The fast
418 * clock and actual rate limits are more relaxed, so checking
419 * them would make no difference.
420 */
421 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
Daniel Vetter75e53982013-04-18 21:10:43 +0200422 .vco = { .min = 4000000, .max = 6000000 },
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700423 .n = { .min = 1, .max = 7 },
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700424 .m1 = { .min = 2, .max = 3 },
425 .m2 = { .min = 11, .max = 156 },
Ville Syrjäläb99ab662013-09-24 21:26:26 +0300426 .p1 = { .min = 2, .max = 3 },
Ville Syrjälä5fdc9c492013-09-24 21:26:29 +0300427 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700428};
429
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300430static const struct intel_limit intel_limits_chv = {
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300431 /*
432 * These are the data rate limits (measured in fast clocks)
433 * since those are the strictest limits we have. The fast
434 * clock and actual rate limits are more relaxed, so checking
435 * them would make no difference.
436 */
437 .dot = { .min = 25000 * 5, .max = 540000 * 5},
Ville Syrjälä17fe1022015-02-26 21:01:52 +0200438 .vco = { .min = 4800000, .max = 6480000 },
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300439 .n = { .min = 1, .max = 1 },
440 .m1 = { .min = 2, .max = 2 },
441 .m2 = { .min = 24 << 22, .max = 175 << 22 },
442 .p1 = { .min = 2, .max = 4 },
443 .p2 = { .p2_slow = 1, .p2_fast = 14 },
444};
445
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300446static const struct intel_limit intel_limits_bxt = {
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200447 /* FIXME: find real dot limits */
448 .dot = { .min = 0, .max = INT_MAX },
Vandana Kannane6292552015-07-01 17:02:57 +0530449 .vco = { .min = 4800000, .max = 6700000 },
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200450 .n = { .min = 1, .max = 1 },
451 .m1 = { .min = 2, .max = 2 },
452 /* FIXME: find real m2 limits */
453 .m2 = { .min = 2 << 22, .max = 255 << 22 },
454 .p1 = { .min = 2, .max = 4 },
455 .p2 = { .p2_slow = 1, .p2_fast = 20 },
456};
457
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530458static void
Vidya Srinivas6deef9b602018-05-12 03:03:13 +0530459skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
460{
461 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
462 return;
463
464 if (enable)
465 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
466 else
467 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
468}
469
470static void
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530471skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
472{
Vidya Srinivas6deef9b602018-05-12 03:03:13 +0530473 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +0530474 return;
475
476 if (enable)
477 I915_WRITE(CLKGATE_DIS_PSL(pipe),
478 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
479 else
480 I915_WRITE(CLKGATE_DIS_PSL(pipe),
481 I915_READ(CLKGATE_DIS_PSL(pipe)) &
482 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
483}
484
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200485static bool
Maarten Lankhorst24f28452017-11-22 19:39:01 +0100486needs_modeset(const struct drm_crtc_state *state)
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200487{
Maarten Lankhorstfc596662015-07-21 13:28:57 +0200488 return drm_atomic_crtc_needs_modeset(state);
Ander Conselvan de Oliveiracdba9542015-06-01 12:49:51 +0200489}
490
Imre Deakdccbea32015-06-22 23:35:51 +0300491/*
492 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
493 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
494 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
495 * The helpers' return value is the rate of the clock that is fed to the
496 * display engine's pipe which can be the above fast dot clock rate or a
497 * divided-down version of it.
498 */
Adam Jacksonf2b115e2009-12-03 17:14:42 -0500499/* m1 is reserved as 0 in Pineview, n is a ring counter */
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300500static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
Jesse Barnes79e53942008-11-07 14:24:08 -0800501{
Shaohua Li21778322009-02-23 15:19:16 +0800502 clock->m = clock->m2 + 2;
503 clock->p = clock->p1 * clock->p2;
Ville Syrjäläed5ca772013-12-02 19:00:45 +0200504 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300505 return 0;
Ville Syrjäläfb03ac02013-10-14 14:50:30 +0300506 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
507 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300508
509 return clock->dot;
Shaohua Li21778322009-02-23 15:19:16 +0800510}
511
Daniel Vetter7429e9d2013-04-20 17:19:46 +0200512static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
513{
514 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
515}
516
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300517static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
Shaohua Li21778322009-02-23 15:19:16 +0800518{
Daniel Vetter7429e9d2013-04-20 17:19:46 +0200519 clock->m = i9xx_dpll_compute_m(clock);
Jesse Barnes79e53942008-11-07 14:24:08 -0800520 clock->p = clock->p1 * clock->p2;
Ville Syrjäläed5ca772013-12-02 19:00:45 +0200521 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300522 return 0;
Ville Syrjäläfb03ac02013-10-14 14:50:30 +0300523 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
524 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300525
526 return clock->dot;
Jesse Barnes79e53942008-11-07 14:24:08 -0800527}
528
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300529static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
Imre Deak589eca62015-06-22 23:35:50 +0300530{
531 clock->m = clock->m1 * clock->m2;
532 clock->p = clock->p1 * clock->p2;
533 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300534 return 0;
Imre Deak589eca62015-06-22 23:35:50 +0300535 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
536 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300537
538 return clock->dot / 5;
Imre Deak589eca62015-06-22 23:35:50 +0300539}
540
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300541int chv_calc_dpll_params(int refclk, struct dpll *clock)
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300542{
543 clock->m = clock->m1 * clock->m2;
544 clock->p = clock->p1 * clock->p2;
545 if (WARN_ON(clock->n == 0 || clock->p == 0))
Imre Deakdccbea32015-06-22 23:35:51 +0300546 return 0;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300547 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
548 clock->n << 22);
549 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
Imre Deakdccbea32015-06-22 23:35:51 +0300550
551 return clock->dot / 5;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300552}
553
Jesse Barnes7c04d1d2009-02-23 15:36:40 -0800554#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
Chris Wilsonc38c1452018-02-14 13:49:22 +0000555
556/*
Jesse Barnes79e53942008-11-07 14:24:08 -0800557 * Returns whether the given set of divisors are valid for a given refclk with
558 * the given connectors.
559 */
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100560static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300561 const struct intel_limit *limit,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300562 const struct dpll *clock)
Jesse Barnes79e53942008-11-07 14:24:08 -0800563{
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300564 if (clock->n < limit->n.min || limit->n.max < clock->n)
565 INTELPllInvalid("n out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800566 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
Akshay Joshi0206e352011-08-16 15:34:10 -0400567 INTELPllInvalid("p1 out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800568 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
Akshay Joshi0206e352011-08-16 15:34:10 -0400569 INTELPllInvalid("m2 out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800570 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
Akshay Joshi0206e352011-08-16 15:34:10 -0400571 INTELPllInvalid("m1 out of range\n");
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300572
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100573 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200574 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300575 if (clock->m1 <= clock->m2)
576 INTELPllInvalid("m1 <= m2\n");
577
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100578 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +0200579 !IS_GEN9_LP(dev_priv)) {
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300580 if (clock->p < limit->p.min || limit->p.max < clock->p)
581 INTELPllInvalid("p out of range\n");
582 if (clock->m < limit->m.min || limit->m.max < clock->m)
583 INTELPllInvalid("m out of range\n");
584 }
585
Jesse Barnes79e53942008-11-07 14:24:08 -0800586 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
Akshay Joshi0206e352011-08-16 15:34:10 -0400587 INTELPllInvalid("vco out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800588 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
589 * connector, etc., rather than just a single range.
590 */
591 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
Akshay Joshi0206e352011-08-16 15:34:10 -0400592 INTELPllInvalid("dot out of range\n");
Jesse Barnes79e53942008-11-07 14:24:08 -0800593
594 return true;
595}
596
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300597static int
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300598i9xx_select_p2_div(const struct intel_limit *limit,
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300599 const struct intel_crtc_state *crtc_state,
600 int target)
Jesse Barnes79e53942008-11-07 14:24:08 -0800601{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300602 struct drm_device *dev = crtc_state->base.crtc->dev;
Jesse Barnes79e53942008-11-07 14:24:08 -0800603
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +0300604 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Jesse Barnes79e53942008-11-07 14:24:08 -0800605 /*
Daniel Vettera210b022012-11-26 17:22:08 +0100606 * For LVDS just rely on its current settings for dual-channel.
607 * We haven't figured out how to reliably set up different
608 * single/dual channel state, if we even can.
Jesse Barnes79e53942008-11-07 14:24:08 -0800609 */
Daniel Vetter1974cad2012-11-26 17:22:09 +0100610 if (intel_is_dual_link_lvds(dev))
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300611 return limit->p2.p2_fast;
Jesse Barnes79e53942008-11-07 14:24:08 -0800612 else
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300613 return limit->p2.p2_slow;
Jesse Barnes79e53942008-11-07 14:24:08 -0800614 } else {
615 if (target < limit->p2.dot_limit)
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300616 return limit->p2.p2_slow;
Jesse Barnes79e53942008-11-07 14:24:08 -0800617 else
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300618 return limit->p2.p2_fast;
Jesse Barnes79e53942008-11-07 14:24:08 -0800619 }
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300620}
621
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200622/*
623 * Returns a set of divisors for the desired target clock with the given
624 * refclk, or FALSE. The returned values represent the clock equation:
625 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
626 *
627 * Target and reference clocks are specified in kHz.
628 *
629 * If match_clock is provided, then best_clock P divider must match the P
630 * divider from @match_clock used for LVDS downclocking.
631 */
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300632static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300633i9xx_find_best_dpll(const struct intel_limit *limit,
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300634 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300635 int target, int refclk, struct dpll *match_clock,
636 struct dpll *best_clock)
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300637{
638 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300639 struct dpll clock;
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300640 int err = target;
Jesse Barnes79e53942008-11-07 14:24:08 -0800641
Akshay Joshi0206e352011-08-16 15:34:10 -0400642 memset(best_clock, 0, sizeof(*best_clock));
Jesse Barnes79e53942008-11-07 14:24:08 -0800643
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300644 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
645
Zhao Yakui42158662009-11-20 11:24:18 +0800646 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
647 clock.m1++) {
648 for (clock.m2 = limit->m2.min;
649 clock.m2 <= limit->m2.max; clock.m2++) {
Daniel Vetterc0efc382013-06-03 20:56:24 +0200650 if (clock.m2 >= clock.m1)
Zhao Yakui42158662009-11-20 11:24:18 +0800651 break;
652 for (clock.n = limit->n.min;
653 clock.n <= limit->n.max; clock.n++) {
654 for (clock.p1 = limit->p1.min;
655 clock.p1 <= limit->p1.max; clock.p1++) {
Jesse Barnes79e53942008-11-07 14:24:08 -0800656 int this_err;
657
Imre Deakdccbea32015-06-22 23:35:51 +0300658 i9xx_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100659 if (!intel_PLL_is_valid(to_i915(dev),
660 limit,
Chris Wilson1b894b52010-12-14 20:04:54 +0000661 &clock))
Jesse Barnes79e53942008-11-07 14:24:08 -0800662 continue;
Sean Paulcec2f352012-01-10 15:09:36 -0800663 if (match_clock &&
664 clock.p != match_clock->p)
665 continue;
Jesse Barnes79e53942008-11-07 14:24:08 -0800666
667 this_err = abs(clock.dot - target);
668 if (this_err < err) {
669 *best_clock = clock;
670 err = this_err;
671 }
672 }
673 }
674 }
675 }
676
677 return (err != target);
678}
679
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200680/*
681 * Returns a set of divisors for the desired target clock with the given
682 * refclk, or FALSE. The returned values represent the clock equation:
683 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
684 *
685 * Target and reference clocks are specified in kHz.
686 *
687 * If match_clock is provided, then best_clock P divider must match the P
688 * divider from @match_clock used for LVDS downclocking.
689 */
Ma Lingd4906092009-03-18 20:13:27 +0800690static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300691pnv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200692 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300693 int target, int refclk, struct dpll *match_clock,
694 struct dpll *best_clock)
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200695{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300696 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300697 struct dpll clock;
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200698 int err = target;
699
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200700 memset(best_clock, 0, sizeof(*best_clock));
701
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300702 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
703
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200704 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
705 clock.m1++) {
706 for (clock.m2 = limit->m2.min;
707 clock.m2 <= limit->m2.max; clock.m2++) {
Daniel Vetterac58c3f2013-06-01 17:16:17 +0200708 for (clock.n = limit->n.min;
709 clock.n <= limit->n.max; clock.n++) {
710 for (clock.p1 = limit->p1.min;
711 clock.p1 <= limit->p1.max; clock.p1++) {
712 int this_err;
713
Imre Deakdccbea32015-06-22 23:35:51 +0300714 pnv_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100715 if (!intel_PLL_is_valid(to_i915(dev),
716 limit,
Jesse Barnes79e53942008-11-07 14:24:08 -0800717 &clock))
718 continue;
719 if (match_clock &&
720 clock.p != match_clock->p)
721 continue;
722
723 this_err = abs(clock.dot - target);
724 if (this_err < err) {
725 *best_clock = clock;
726 err = this_err;
727 }
728 }
729 }
730 }
731 }
732
733 return (err != target);
734}
735
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +0200736/*
737 * Returns a set of divisors for the desired target clock with the given
738 * refclk, or FALSE. The returned values represent the clock equation:
739 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +0200740 *
741 * Target and reference clocks are specified in kHz.
742 *
743 * If match_clock is provided, then best_clock P divider must match the P
744 * divider from @match_clock used for LVDS downclocking.
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +0200745 */
Ma Lingd4906092009-03-18 20:13:27 +0800746static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300747g4x_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200748 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300749 int target, int refclk, struct dpll *match_clock,
750 struct dpll *best_clock)
Ma Lingd4906092009-03-18 20:13:27 +0800751{
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300752 struct drm_device *dev = crtc_state->base.crtc->dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300753 struct dpll clock;
Ma Lingd4906092009-03-18 20:13:27 +0800754 int max_n;
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300755 bool found = false;
Adam Jackson6ba770d2010-07-02 16:43:30 -0400756 /* approximately equals target * 0.00585 */
757 int err_most = (target >> 8) + (target >> 9);
Ma Lingd4906092009-03-18 20:13:27 +0800758
759 memset(best_clock, 0, sizeof(*best_clock));
Ville Syrjälä3b1429d2015-06-18 13:47:22 +0300760
761 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
762
Ma Lingd4906092009-03-18 20:13:27 +0800763 max_n = limit->n.max;
Gilles Espinassef77f13e2010-03-29 15:41:47 +0200764 /* based on hardware requirement, prefer smaller n to precision */
Ma Lingd4906092009-03-18 20:13:27 +0800765 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
Gilles Espinassef77f13e2010-03-29 15:41:47 +0200766 /* based on hardware requirement, prefere larger m1,m2 */
Ma Lingd4906092009-03-18 20:13:27 +0800767 for (clock.m1 = limit->m1.max;
768 clock.m1 >= limit->m1.min; clock.m1--) {
769 for (clock.m2 = limit->m2.max;
770 clock.m2 >= limit->m2.min; clock.m2--) {
771 for (clock.p1 = limit->p1.max;
772 clock.p1 >= limit->p1.min; clock.p1--) {
773 int this_err;
774
Imre Deakdccbea32015-06-22 23:35:51 +0300775 i9xx_calc_dpll_params(refclk, &clock);
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100776 if (!intel_PLL_is_valid(to_i915(dev),
777 limit,
Chris Wilson1b894b52010-12-14 20:04:54 +0000778 &clock))
Ma Lingd4906092009-03-18 20:13:27 +0800779 continue;
Chris Wilson1b894b52010-12-14 20:04:54 +0000780
781 this_err = abs(clock.dot - target);
Ma Lingd4906092009-03-18 20:13:27 +0800782 if (this_err < err_most) {
783 *best_clock = clock;
784 err_most = this_err;
785 max_n = clock.n;
786 found = true;
787 }
788 }
789 }
790 }
791 }
Zhenyu Wang2c072452009-06-05 15:38:42 +0800792 return found;
793}
Ma Lingd4906092009-03-18 20:13:27 +0800794
Imre Deakd5dd62b2015-03-17 11:40:03 +0200795/*
796 * Check if the calculated PLL configuration is more optimal compared to the
797 * best configuration and error found so far. Return the calculated error.
798 */
799static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300800 const struct dpll *calculated_clock,
801 const struct dpll *best_clock,
Imre Deakd5dd62b2015-03-17 11:40:03 +0200802 unsigned int best_error_ppm,
803 unsigned int *error_ppm)
804{
Imre Deak9ca3ba02015-03-17 11:40:05 +0200805 /*
806 * For CHV ignore the error and consider only the P value.
807 * Prefer a bigger P value based on HW requirements.
808 */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +0100809 if (IS_CHERRYVIEW(to_i915(dev))) {
Imre Deak9ca3ba02015-03-17 11:40:05 +0200810 *error_ppm = 0;
811
812 return calculated_clock->p > best_clock->p;
813 }
814
Imre Deak24be4e42015-03-17 11:40:04 +0200815 if (WARN_ON_ONCE(!target_freq))
816 return false;
817
Imre Deakd5dd62b2015-03-17 11:40:03 +0200818 *error_ppm = div_u64(1000000ULL *
819 abs(target_freq - calculated_clock->dot),
820 target_freq);
821 /*
822 * Prefer a better P value over a better (smaller) error if the error
823 * is small. Ensure this preference for future configurations too by
824 * setting the error to 0.
825 */
826 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
827 *error_ppm = 0;
828
829 return true;
830 }
831
832 return *error_ppm + 10 < best_error_ppm;
833}
834
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200835/*
836 * Returns a set of divisors for the desired target clock with the given
837 * refclk, or FALSE. The returned values represent the clock equation:
838 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
839 */
Zhenyu Wang2c072452009-06-05 15:38:42 +0800840static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300841vlv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200842 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300843 int target, int refclk, struct dpll *match_clock,
844 struct dpll *best_clock)
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700845{
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200846 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveiraa919ff12014-10-20 13:46:43 +0300847 struct drm_device *dev = crtc->base.dev;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300848 struct dpll clock;
Ville Syrjälä69e4f9002013-09-24 21:26:20 +0300849 unsigned int bestppm = 1000000;
Ville Syrjälä27e639b2013-09-24 21:26:24 +0300850 /* min update 19.2 MHz */
851 int max_n = min(limit->n.max, refclk / 19200);
Ville Syrjälä49e497e2013-09-24 21:26:31 +0300852 bool found = false;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700853
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300854 target *= 5; /* fast clock */
855
856 memset(best_clock, 0, sizeof(*best_clock));
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700857
858 /* based on hardware requirement, prefer smaller n to precision */
Ville Syrjälä27e639b2013-09-24 21:26:24 +0300859 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
Ville Syrjälä811bbf02013-09-24 21:26:25 +0300860 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
Ville Syrjälä889059d2013-09-24 21:26:27 +0300861 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
Ville Syrjäläc1a9ae42013-09-24 21:26:23 +0300862 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300863 clock.p = clock.p1 * clock.p2;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700864 /* based on hardware requirement, prefer bigger m1,m2 values */
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300865 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
Imre Deakd5dd62b2015-03-17 11:40:03 +0200866 unsigned int ppm;
Ville Syrjälä69e4f9002013-09-24 21:26:20 +0300867
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300868 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
869 refclk * clock.m1);
Ville Syrjälä43b0ac52013-09-24 21:26:18 +0300870
Imre Deakdccbea32015-06-22 23:35:51 +0300871 vlv_calc_dpll_params(refclk, &clock);
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300872
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100873 if (!intel_PLL_is_valid(to_i915(dev),
874 limit,
Ville Syrjäläf01b7962013-09-27 16:55:49 +0300875 &clock))
Ville Syrjälä43b0ac52013-09-24 21:26:18 +0300876 continue;
877
Imre Deakd5dd62b2015-03-17 11:40:03 +0200878 if (!vlv_PLL_is_optimal(dev, target,
879 &clock,
880 best_clock,
881 bestppm, &ppm))
882 continue;
Ville Syrjälä6b4bf1c2013-09-27 16:54:19 +0300883
Imre Deakd5dd62b2015-03-17 11:40:03 +0200884 *best_clock = clock;
885 bestppm = ppm;
886 found = true;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700887 }
888 }
889 }
890 }
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700891
Ville Syrjälä49e497e2013-09-24 21:26:31 +0300892 return found;
Jesse Barnesa0c4da242012-06-15 11:55:13 -0700893}
Keith Packarda4fc5ed2009-04-07 16:16:42 -0700894
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200895/*
896 * Returns a set of divisors for the desired target clock with the given
897 * refclk, or FALSE. The returned values represent the clock equation:
898 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
899 */
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300900static bool
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300901chv_find_best_dpll(const struct intel_limit *limit,
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200902 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300903 int target, int refclk, struct dpll *match_clock,
904 struct dpll *best_clock)
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300905{
Ander Conselvan de Oliveiraa93e2552015-03-20 16:18:17 +0200906 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveiraa919ff12014-10-20 13:46:43 +0300907 struct drm_device *dev = crtc->base.dev;
Imre Deak9ca3ba02015-03-17 11:40:05 +0200908 unsigned int best_error_ppm;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300909 struct dpll clock;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300910 uint64_t m2;
911 int found = false;
912
913 memset(best_clock, 0, sizeof(*best_clock));
Imre Deak9ca3ba02015-03-17 11:40:05 +0200914 best_error_ppm = 1000000;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300915
916 /*
917 * Based on hardware doc, the n always set to 1, and m1 always
918 * set to 2. If requires to support 200Mhz refclk, we need to
919 * revisit this because n may not 1 anymore.
920 */
921 clock.n = 1, clock.m1 = 2;
922 target *= 5; /* fast clock */
923
924 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
925 for (clock.p2 = limit->p2.p2_fast;
926 clock.p2 >= limit->p2.p2_slow;
927 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
Imre Deak9ca3ba02015-03-17 11:40:05 +0200928 unsigned int error_ppm;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300929
930 clock.p = clock.p1 * clock.p2;
931
932 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
933 clock.n) << 22, refclk * clock.m1);
934
935 if (m2 > INT_MAX/clock.m1)
936 continue;
937
938 clock.m2 = m2;
939
Imre Deakdccbea32015-06-22 23:35:51 +0300940 chv_calc_dpll_params(refclk, &clock);
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300941
Tvrtko Ursuline2d214a2016-10-13 11:03:04 +0100942 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300943 continue;
944
Imre Deak9ca3ba02015-03-17 11:40:05 +0200945 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
946 best_error_ppm, &error_ppm))
947 continue;
948
949 *best_clock = clock;
950 best_error_ppm = error_ppm;
951 found = true;
Chon Ming Leeef9348c2014-04-09 13:28:18 +0300952 }
953 }
954
955 return found;
956}
957
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200958bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +0300959 struct dpll *best_clock)
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200960{
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200961 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +0300962 const struct intel_limit *limit = &intel_limits_bxt;
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200963
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +0200964 return chv_find_best_dpll(limit, crtc_state,
Imre Deak5ab7b0b2015-03-06 03:29:25 +0200965 target_clock, refclk, NULL, best_clock);
966}
967
Ville Syrjälä525b9312016-10-31 22:37:02 +0200968bool intel_crtc_active(struct intel_crtc *crtc)
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300969{
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300970 /* Be paranoid as we can arrive here with only partial
971 * state retrieved from the hardware during setup.
972 *
Damien Lespiau241bfc32013-09-25 16:45:37 +0100973 * We can ditch the adjusted_mode.crtc_clock check as soon
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300974 * as Haswell has gained clock readout/fastboot support.
975 *
Ville Syrjäläcd30fbc2018-05-25 21:50:40 +0300976 * We can ditch the crtc->primary->state->fb check as soon as we can
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300977 * properly reconstruct framebuffers.
Matt Roperc3d1f432015-03-09 10:19:23 -0700978 *
979 * FIXME: The intel_crtc->active here should be switched to
980 * crtc->state->active once we have proper CRTC states wired up
981 * for atomic.
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300982 */
Ville Syrjälä525b9312016-10-31 22:37:02 +0200983 return crtc->active && crtc->base.primary->state->fb &&
984 crtc->config->base.adjusted_mode.crtc_clock;
Ville Syrjälä20ddf662013-09-04 18:25:25 +0300985}
986
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200987enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
988 enum pipe pipe)
989{
Ville Syrjälä98187832016-10-31 22:37:10 +0200990 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200991
Ville Syrjäläe2af48c2016-10-31 22:37:05 +0200992 return crtc->config->cpu_transcoder;
Paulo Zanonia5c961d2012-10-24 15:59:34 -0200993}
994
Ville Syrjälä8fedd642017-11-29 17:37:30 +0200995static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
996 enum pipe pipe)
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300997{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200998 i915_reg_t reg = PIPEDSL(pipe);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +0300999 u32 line1, line2;
1000 u32 line_mask;
1001
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01001002 if (IS_GEN2(dev_priv))
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001003 line_mask = DSL_LINEMASK_GEN2;
1004 else
1005 line_mask = DSL_LINEMASK_GEN3;
1006
1007 line1 = I915_READ(reg) & line_mask;
Daniel Vetter6adfb1e2015-07-07 09:10:40 +02001008 msleep(5);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001009 line2 = I915_READ(reg) & line_mask;
1010
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001011 return line1 != line2;
1012}
1013
1014static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1015{
1016 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1017 enum pipe pipe = crtc->pipe;
1018
1019 /* Wait for the display line to settle/start moving */
1020 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1021 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1022 pipe_name(pipe), onoff(state));
1023}
1024
1025static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1026{
1027 wait_for_pipe_scanline_moving(crtc, false);
1028}
1029
1030static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1031{
1032 wait_for_pipe_scanline_moving(crtc, true);
Ville Syrjäläfbf49ea2013-10-11 14:21:31 +03001033}
1034
Ville Syrjälä4972f702017-11-29 17:37:32 +02001035static void
1036intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001037{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001038 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001039 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001040
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001041 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001042 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001043 i915_reg_t reg = PIPECONF(cpu_transcoder);
Jesse Barnes9d0498a2010-08-18 13:20:54 -07001044
Keith Packardab7ad7f2010-10-03 00:33:06 -07001045 /* Wait for the Pipe State to go off */
Chris Wilsonb8511f52016-06-30 15:32:53 +01001046 if (intel_wait_for_register(dev_priv,
1047 reg, I965_PIPECONF_ACTIVE, 0,
1048 100))
Daniel Vetter284637d2012-07-09 09:51:57 +02001049 WARN(1, "pipe_off wait timed out\n");
Keith Packardab7ad7f2010-10-03 00:33:06 -07001050 } else {
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001051 intel_wait_for_pipe_scanline_stopped(crtc);
Keith Packardab7ad7f2010-10-03 00:33:06 -07001052 }
Jesse Barnes79e53942008-11-07 14:24:08 -08001053}
1054
Jesse Barnesb24e7172011-01-04 15:09:30 -08001055/* Only for pre-ILK configs */
Daniel Vetter55607e82013-06-16 21:42:39 +02001056void assert_pll(struct drm_i915_private *dev_priv,
1057 enum pipe pipe, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001058{
Jesse Barnesb24e7172011-01-04 15:09:30 -08001059 u32 val;
1060 bool cur_state;
1061
Ville Syrjälä649636e2015-09-22 19:50:01 +03001062 val = I915_READ(DPLL(pipe));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001063 cur_state = !!(val & DPLL_VCO_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001064 I915_STATE_WARN(cur_state != state,
Jesse Barnesb24e7172011-01-04 15:09:30 -08001065 "PLL state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001066 onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001067}
Jesse Barnesb24e7172011-01-04 15:09:30 -08001068
Jani Nikula23538ef2013-08-27 15:12:22 +03001069/* XXX: the dsi pll is shared between MIPI DSI ports */
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +00001070void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
Jani Nikula23538ef2013-08-27 15:12:22 +03001071{
1072 u32 val;
1073 bool cur_state;
1074
Ville Syrjäläa5805162015-05-26 20:42:30 +03001075 mutex_lock(&dev_priv->sb_lock);
Jani Nikula23538ef2013-08-27 15:12:22 +03001076 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
Ville Syrjäläa5805162015-05-26 20:42:30 +03001077 mutex_unlock(&dev_priv->sb_lock);
Jani Nikula23538ef2013-08-27 15:12:22 +03001078
1079 cur_state = val & DSI_PLL_VCO_EN;
Rob Clarke2c719b2014-12-15 13:56:32 -05001080 I915_STATE_WARN(cur_state != state,
Jani Nikula23538ef2013-08-27 15:12:22 +03001081 "DSI PLL state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001082 onoff(state), onoff(cur_state));
Jani Nikula23538ef2013-08-27 15:12:22 +03001083}
Jani Nikula23538ef2013-08-27 15:12:22 +03001084
Jesse Barnes040484a2011-01-03 12:14:26 -08001085static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1086 enum pipe pipe, bool state)
1087{
Jesse Barnes040484a2011-01-03 12:14:26 -08001088 bool cur_state;
Paulo Zanoniad80a812012-10-24 16:06:19 -02001089 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1090 pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001091
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001092 if (HAS_DDI(dev_priv)) {
Paulo Zanoniaffa9352012-11-23 15:30:39 -02001093 /* DDI does not have a specific FDI_TX register */
Ville Syrjälä649636e2015-09-22 19:50:01 +03001094 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
Paulo Zanoniad80a812012-10-24 16:06:19 -02001095 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001096 } else {
Ville Syrjälä649636e2015-09-22 19:50:01 +03001097 u32 val = I915_READ(FDI_TX_CTL(pipe));
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001098 cur_state = !!(val & FDI_TX_ENABLE);
1099 }
Rob Clarke2c719b2014-12-15 13:56:32 -05001100 I915_STATE_WARN(cur_state != state,
Jesse Barnes040484a2011-01-03 12:14:26 -08001101 "FDI TX state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001102 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001103}
1104#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1105#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1106
1107static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1108 enum pipe pipe, bool state)
1109{
Jesse Barnes040484a2011-01-03 12:14:26 -08001110 u32 val;
1111 bool cur_state;
1112
Ville Syrjälä649636e2015-09-22 19:50:01 +03001113 val = I915_READ(FDI_RX_CTL(pipe));
Paulo Zanonid63fa0d2012-11-20 13:27:35 -02001114 cur_state = !!(val & FDI_RX_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001115 I915_STATE_WARN(cur_state != state,
Jesse Barnes040484a2011-01-03 12:14:26 -08001116 "FDI RX state assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001117 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001118}
1119#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1120#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1121
1122static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1123 enum pipe pipe)
1124{
Jesse Barnes040484a2011-01-03 12:14:26 -08001125 u32 val;
1126
1127 /* ILK FDI PLL is always enabled */
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01001128 if (IS_GEN5(dev_priv))
Jesse Barnes040484a2011-01-03 12:14:26 -08001129 return;
1130
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001131 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001132 if (HAS_DDI(dev_priv))
Eugeni Dodonovbf507ef2012-05-09 15:37:18 -03001133 return;
1134
Ville Syrjälä649636e2015-09-22 19:50:01 +03001135 val = I915_READ(FDI_TX_CTL(pipe));
Rob Clarke2c719b2014-12-15 13:56:32 -05001136 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
Jesse Barnes040484a2011-01-03 12:14:26 -08001137}
1138
Daniel Vetter55607e82013-06-16 21:42:39 +02001139void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1140 enum pipe pipe, bool state)
Jesse Barnes040484a2011-01-03 12:14:26 -08001141{
Jesse Barnes040484a2011-01-03 12:14:26 -08001142 u32 val;
Daniel Vetter55607e82013-06-16 21:42:39 +02001143 bool cur_state;
Jesse Barnes040484a2011-01-03 12:14:26 -08001144
Ville Syrjälä649636e2015-09-22 19:50:01 +03001145 val = I915_READ(FDI_RX_CTL(pipe));
Daniel Vetter55607e82013-06-16 21:42:39 +02001146 cur_state = !!(val & FDI_RX_PLL_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001147 I915_STATE_WARN(cur_state != state,
Daniel Vetter55607e82013-06-16 21:42:39 +02001148 "FDI RX PLL assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001149 onoff(state), onoff(cur_state));
Jesse Barnes040484a2011-01-03 12:14:26 -08001150}
1151
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001152void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
Jesse Barnesea0760c2011-01-04 15:09:32 -08001153{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001154 i915_reg_t pp_reg;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001155 u32 val;
Ville Syrjälä10ed55e2018-05-23 17:57:18 +03001156 enum pipe panel_pipe = INVALID_PIPE;
Thomas Jarosch0de3b482011-08-25 15:37:45 +02001157 bool locked = true;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001158
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001159 if (WARN_ON(HAS_DDI(dev_priv)))
Jani Nikulabedd4db2014-08-22 15:04:13 +03001160 return;
1161
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001162 if (HAS_PCH_SPLIT(dev_priv)) {
Jani Nikulabedd4db2014-08-22 15:04:13 +03001163 u32 port_sel;
1164
Imre Deak44cb7342016-08-10 14:07:29 +03001165 pp_reg = PP_CONTROL(0);
1166 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
Jani Nikulabedd4db2014-08-22 15:04:13 +03001167
Ville Syrjälä4c23dea2018-05-18 18:29:30 +03001168 switch (port_sel) {
1169 case PANEL_PORT_SELECT_LVDS:
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001170 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
Ville Syrjälä4c23dea2018-05-18 18:29:30 +03001171 break;
1172 case PANEL_PORT_SELECT_DPA:
1173 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1174 break;
1175 case PANEL_PORT_SELECT_DPC:
1176 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1177 break;
1178 case PANEL_PORT_SELECT_DPD:
1179 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1180 break;
1181 default:
1182 MISSING_CASE(port_sel);
1183 break;
1184 }
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01001185 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Jani Nikulabedd4db2014-08-22 15:04:13 +03001186 /* presumably write lock depends on pipe, not port select */
Imre Deak44cb7342016-08-10 14:07:29 +03001187 pp_reg = PP_CONTROL(pipe);
Jani Nikulabedd4db2014-08-22 15:04:13 +03001188 panel_pipe = pipe;
Jesse Barnesea0760c2011-01-04 15:09:32 -08001189 } else {
Ville Syrjäläf0d2b752018-05-18 18:29:31 +03001190 u32 port_sel;
1191
Imre Deak44cb7342016-08-10 14:07:29 +03001192 pp_reg = PP_CONTROL(0);
Ville Syrjäläf0d2b752018-05-18 18:29:31 +03001193 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1194
1195 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001196 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
Jesse Barnesea0760c2011-01-04 15:09:32 -08001197 }
1198
1199 val = I915_READ(pp_reg);
1200 if (!(val & PANEL_POWER_ON) ||
Jani Nikulaec49ba22014-08-21 15:06:25 +03001201 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
Jesse Barnesea0760c2011-01-04 15:09:32 -08001202 locked = false;
1203
Rob Clarke2c719b2014-12-15 13:56:32 -05001204 I915_STATE_WARN(panel_pipe == pipe && locked,
Jesse Barnesea0760c2011-01-04 15:09:32 -08001205 "panel assertion failure, pipe %c regs locked\n",
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001206 pipe_name(pipe));
Jesse Barnesea0760c2011-01-04 15:09:32 -08001207}
1208
Jesse Barnesb840d907f2011-12-13 13:19:38 -08001209void assert_pipe(struct drm_i915_private *dev_priv,
1210 enum pipe pipe, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001211{
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001212 bool cur_state;
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001213 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1214 pipe);
Imre Deak4feed0e2016-02-12 18:55:14 +02001215 enum intel_display_power_domain power_domain;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001216
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001217 /* we keep both pipes enabled on 830 */
1218 if (IS_I830(dev_priv))
Daniel Vetter8e636782012-01-22 01:36:48 +01001219 state = true;
1220
Imre Deak4feed0e2016-02-12 18:55:14 +02001221 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1222 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
Ville Syrjälä649636e2015-09-22 19:50:01 +03001223 u32 val = I915_READ(PIPECONF(cpu_transcoder));
Paulo Zanoni69310162013-01-29 16:35:19 -02001224 cur_state = !!(val & PIPECONF_ENABLE);
Imre Deak4feed0e2016-02-12 18:55:14 +02001225
1226 intel_display_power_put(dev_priv, power_domain);
1227 } else {
1228 cur_state = false;
Paulo Zanoni69310162013-01-29 16:35:19 -02001229 }
1230
Rob Clarke2c719b2014-12-15 13:56:32 -05001231 I915_STATE_WARN(cur_state != state,
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001232 "pipe %c assertion failure (expected %s, current %s)\n",
Jani Nikula87ad3212016-01-14 12:53:34 +02001233 pipe_name(pipe), onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001234}
1235
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001236static void assert_plane(struct intel_plane *plane, bool state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001237{
Ville Syrjäläeade6c82018-01-30 22:38:03 +02001238 enum pipe pipe;
1239 bool cur_state;
1240
1241 cur_state = plane->get_hw_state(plane, &pipe);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001242
Rob Clarke2c719b2014-12-15 13:56:32 -05001243 I915_STATE_WARN(cur_state != state,
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001244 "%s assertion failure (expected %s, current %s)\n",
1245 plane->base.name, onoff(state), onoff(cur_state));
Jesse Barnesb24e7172011-01-04 15:09:30 -08001246}
1247
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001248#define assert_plane_enabled(p) assert_plane(p, true)
1249#define assert_plane_disabled(p) assert_plane(p, false)
Chris Wilson931872f2012-01-16 23:01:13 +00001250
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001251static void assert_planes_disabled(struct intel_crtc *crtc)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001252{
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001253 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1254 struct intel_plane *plane;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001255
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001256 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1257 assert_plane_disabled(plane);
Jesse Barnes19332d72013-03-28 09:55:38 -07001258}
1259
Ville Syrjälä08c71e52014-08-06 14:49:45 +03001260static void assert_vblank_disabled(struct drm_crtc *crtc)
1261{
Rob Clarke2c719b2014-12-15 13:56:32 -05001262 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
Ville Syrjälä08c71e52014-08-06 14:49:45 +03001263 drm_crtc_vblank_put(crtc);
1264}
1265
Ander Conselvan de Oliveira7abd4b32016-03-08 17:46:15 +02001266void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1267 enum pipe pipe)
Jesse Barnes92f25842011-01-04 15:09:34 -08001268{
Jesse Barnes92f25842011-01-04 15:09:34 -08001269 u32 val;
1270 bool enabled;
1271
Ville Syrjälä649636e2015-09-22 19:50:01 +03001272 val = I915_READ(PCH_TRANSCONF(pipe));
Jesse Barnes92f25842011-01-04 15:09:34 -08001273 enabled = !!(val & TRANS_ENABLE);
Rob Clarke2c719b2014-12-15 13:56:32 -05001274 I915_STATE_WARN(enabled,
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001275 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1276 pipe_name(pipe));
Jesse Barnes92f25842011-01-04 15:09:34 -08001277}
1278
Jesse Barnes291906f2011-02-02 12:28:03 -08001279static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001280 enum pipe pipe, enum port port,
1281 i915_reg_t dp_reg)
Jesse Barnes291906f2011-02-02 12:28:03 -08001282{
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001283 enum pipe port_pipe;
1284 bool state;
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001285
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001286 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1287
1288 I915_STATE_WARN(state && port_pipe == pipe,
1289 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1290 port_name(port), pipe_name(pipe));
1291
1292 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1293 "IBX PCH DP %c still using transcoder B\n",
1294 port_name(port));
Jesse Barnes291906f2011-02-02 12:28:03 -08001295}
1296
1297static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
Ville Syrjälä76203462018-05-14 20:24:21 +03001298 enum pipe pipe, enum port port,
1299 i915_reg_t hdmi_reg)
Jesse Barnes291906f2011-02-02 12:28:03 -08001300{
Ville Syrjälä76203462018-05-14 20:24:21 +03001301 enum pipe port_pipe;
1302 bool state;
Daniel Vetterde9a35a2012-06-05 11:03:40 +02001303
Ville Syrjälä76203462018-05-14 20:24:21 +03001304 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1305
1306 I915_STATE_WARN(state && port_pipe == pipe,
1307 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1308 port_name(port), pipe_name(pipe));
1309
1310 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1311 "IBX PCH HDMI %c still using transcoder B\n",
1312 port_name(port));
Jesse Barnes291906f2011-02-02 12:28:03 -08001313}
1314
1315static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1316 enum pipe pipe)
1317{
Ville Syrjälä6102a8e2018-05-14 20:24:19 +03001318 enum pipe port_pipe;
Jesse Barnes291906f2011-02-02 12:28:03 -08001319
Ville Syrjälä59b74c42018-05-18 18:29:28 +03001320 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1321 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1322 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
Jesse Barnes291906f2011-02-02 12:28:03 -08001323
Ville Syrjälä6102a8e2018-05-14 20:24:19 +03001324 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1325 port_pipe == pipe,
1326 "PCH VGA enabled on transcoder %c, should be disabled\n",
1327 pipe_name(pipe));
Jesse Barnes291906f2011-02-02 12:28:03 -08001328
Ville Syrjäläa44628b2018-05-14 21:28:27 +03001329 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1330 port_pipe == pipe,
1331 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1332 pipe_name(pipe));
Jesse Barnes291906f2011-02-02 12:28:03 -08001333
Ville Syrjälä76203462018-05-14 20:24:21 +03001334 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1335 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1336 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
Jesse Barnes291906f2011-02-02 12:28:03 -08001337}
1338
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001339static void _vlv_enable_pll(struct intel_crtc *crtc,
1340 const struct intel_crtc_state *pipe_config)
1341{
1342 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1343 enum pipe pipe = crtc->pipe;
1344
1345 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1346 POSTING_READ(DPLL(pipe));
1347 udelay(150);
1348
Chris Wilson2c30b432016-06-30 15:32:54 +01001349 if (intel_wait_for_register(dev_priv,
1350 DPLL(pipe),
1351 DPLL_LOCK_VLV,
1352 DPLL_LOCK_VLV,
1353 1))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001354 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1355}
1356
Ville Syrjäläd288f652014-10-28 13:20:22 +02001357static void vlv_enable_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02001358 const struct intel_crtc_state *pipe_config)
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001359{
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001360 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001361 enum pipe pipe = crtc->pipe;
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001362
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001363 assert_pipe_disabled(dev_priv, pipe);
Daniel Vetter58c6eaa2013-04-11 16:29:09 +02001364
Daniel Vetter87442f72013-06-06 00:52:17 +02001365 /* PLL is protected by panel, make sure we can write it */
Ville Syrjälä7d1a83c2016-03-15 16:39:58 +02001366 assert_panel_unlocked(dev_priv, pipe);
Daniel Vetter87442f72013-06-06 00:52:17 +02001367
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001368 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1369 _vlv_enable_pll(crtc, pipe_config);
Daniel Vetter426115c2013-07-11 22:13:42 +02001370
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001371 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1372 POSTING_READ(DPLL_MD(pipe));
Daniel Vetter87442f72013-06-06 00:52:17 +02001373}
1374
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001375
1376static void _chv_enable_pll(struct intel_crtc *crtc,
1377 const struct intel_crtc_state *pipe_config)
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001378{
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001379 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä8bd3f302016-03-15 16:39:57 +02001380 enum pipe pipe = crtc->pipe;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001381 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001382 u32 tmp;
1383
Ville Syrjäläa5805162015-05-26 20:42:30 +03001384 mutex_lock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001385
1386 /* Enable back the 10bit clock to display controller */
1387 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1388 tmp |= DPIO_DCLKP_EN;
1389 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1390
Ville Syrjälä54433e92015-05-26 20:42:31 +03001391 mutex_unlock(&dev_priv->sb_lock);
1392
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001393 /*
1394 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1395 */
1396 udelay(1);
1397
1398 /* Enable PLL */
Ville Syrjäläd288f652014-10-28 13:20:22 +02001399 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001400
1401 /* Check PLL is locked */
Chris Wilson6b188262016-06-30 15:32:55 +01001402 if (intel_wait_for_register(dev_priv,
1403 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1404 1))
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001405 DRM_ERROR("PLL %d failed to lock\n", pipe);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03001406}
1407
1408static void chv_enable_pll(struct intel_crtc *crtc,
1409 const struct intel_crtc_state *pipe_config)
1410{
1411 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1412 enum pipe pipe = crtc->pipe;
1413
1414 assert_pipe_disabled(dev_priv, pipe);
1415
1416 /* PLL is protected by panel, make sure we can write it */
1417 assert_panel_unlocked(dev_priv, pipe);
1418
1419 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1420 _chv_enable_pll(crtc, pipe_config);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001421
Ville Syrjäläc2317752016-03-15 16:39:56 +02001422 if (pipe != PIPE_A) {
1423 /*
1424 * WaPixelRepeatModeFixForC0:chv
1425 *
1426 * DPLLCMD is AWOL. Use chicken bits to propagate
1427 * the value from DPLLBMD to either pipe B or C.
1428 */
Ville Syrjälädfa311f2017-09-13 17:08:54 +03001429 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
Ville Syrjäläc2317752016-03-15 16:39:56 +02001430 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1431 I915_WRITE(CBR4_VLV, 0);
1432 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1433
1434 /*
1435 * DPLLB VGA mode also seems to cause problems.
1436 * We should always have it disabled.
1437 */
1438 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1439 } else {
1440 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1441 POSTING_READ(DPLL_MD(pipe));
1442 }
Chon Ming Lee9d556c92014-05-02 14:27:47 +03001443}
1444
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001445static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001446{
1447 struct intel_crtc *crtc;
1448 int count = 0;
1449
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001450 for_each_intel_crtc(&dev_priv->drm, crtc) {
Maarten Lankhorst3538b9d2015-06-01 12:50:10 +02001451 count += crtc->base.state->active &&
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03001452 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1453 }
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001454
1455 return count;
1456}
1457
Ville Syrjälä939994d2017-09-13 17:08:56 +03001458static void i9xx_enable_pll(struct intel_crtc *crtc,
1459 const struct intel_crtc_state *crtc_state)
Daniel Vetter87442f72013-06-06 00:52:17 +02001460{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001461 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001462 i915_reg_t reg = DPLL(crtc->pipe);
Ville Syrjälä939994d2017-09-13 17:08:56 +03001463 u32 dpll = crtc_state->dpll_hw_state.dpll;
Ville Syrjäläbb408dd2017-06-01 17:36:15 +03001464 int i;
Daniel Vetter87442f72013-06-06 00:52:17 +02001465
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001466 assert_pipe_disabled(dev_priv, crtc->pipe);
Daniel Vetter87442f72013-06-06 00:52:17 +02001467
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001468 /* PLL is protected by panel, make sure we can write it */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001469 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001470 assert_panel_unlocked(dev_priv, crtc->pipe);
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001471
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001472 /* Enable DVO 2x clock on both PLLs if necessary */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001473 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001474 /*
1475 * It appears to be important that we don't enable this
1476 * for the current pipe before otherwise configuring the
1477 * PLL. No idea how this should be handled if multiple
1478 * DVO outputs are enabled simultaneosly.
1479 */
1480 dpll |= DPLL_DVO_2X_MODE;
1481 I915_WRITE(DPLL(!crtc->pipe),
1482 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1483 }
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001484
Ville Syrjäläc2b63372015-10-07 22:08:25 +03001485 /*
1486 * Apparently we need to have VGA mode enabled prior to changing
1487 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1488 * dividers, even though the register value does change.
1489 */
1490 I915_WRITE(reg, 0);
1491
Ville Syrjälä8e7a65a2015-10-07 22:08:24 +03001492 I915_WRITE(reg, dpll);
1493
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001494 /* Wait for the clocks to stabilize. */
1495 POSTING_READ(reg);
1496 udelay(150);
1497
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001498 if (INTEL_GEN(dev_priv) >= 4) {
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001499 I915_WRITE(DPLL_MD(crtc->pipe),
Ville Syrjälä939994d2017-09-13 17:08:56 +03001500 crtc_state->dpll_hw_state.dpll_md);
Daniel Vetter66e3d5c2013-06-16 21:24:16 +02001501 } else {
1502 /* The pixel multiplier can only be updated once the
1503 * DPLL is enabled and the clocks are stable.
1504 *
1505 * So write it again.
1506 */
1507 I915_WRITE(reg, dpll);
1508 }
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001509
1510 /* We do this three times for luck */
Ville Syrjäläbb408dd2017-06-01 17:36:15 +03001511 for (i = 0; i < 3; i++) {
1512 I915_WRITE(reg, dpll);
1513 POSTING_READ(reg);
1514 udelay(150); /* wait for warmup */
1515 }
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001516}
1517
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001518static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001519{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001520 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001521 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001522 enum pipe pipe = crtc->pipe;
1523
1524 /* Disable DVO 2x clock on both PLLs if necessary */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01001525 if (IS_I830(dev_priv) &&
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02001526 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00001527 !intel_num_dvo_pipes(dev_priv)) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03001528 I915_WRITE(DPLL(PIPE_B),
1529 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1530 I915_WRITE(DPLL(PIPE_A),
1531 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1532 }
1533
Ville Syrjäläb6b5d042014-08-15 01:22:07 +03001534 /* Don't disable pipe or pipe PLLs if needed */
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001535 if (IS_I830(dev_priv))
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001536 return;
1537
1538 /* Make sure the pipe isn't still relying on us */
1539 assert_pipe_disabled(dev_priv, pipe);
1540
Ville Syrjäläb8afb912015-06-29 15:25:48 +03001541 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
Daniel Vetter50b44a42013-06-05 13:34:33 +02001542 POSTING_READ(DPLL(pipe));
Jesse Barnes63d7bbe2011-01-04 15:09:33 -08001543}
1544
Jesse Barnesf6071162013-10-01 10:41:38 -07001545static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1546{
Ville Syrjäläb8afb912015-06-29 15:25:48 +03001547 u32 val;
Jesse Barnesf6071162013-10-01 10:41:38 -07001548
1549 /* Make sure the pipe isn't still relying on us */
1550 assert_pipe_disabled(dev_priv, pipe);
1551
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02001552 val = DPLL_INTEGRATED_REF_CLK_VLV |
1553 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1554 if (pipe != PIPE_A)
1555 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1556
Jesse Barnesf6071162013-10-01 10:41:38 -07001557 I915_WRITE(DPLL(pipe), val);
1558 POSTING_READ(DPLL(pipe));
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001559}
1560
1561static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1562{
Ville Syrjäläd7520482014-04-09 13:28:59 +03001563 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001564 u32 val;
1565
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001566 /* Make sure the pipe isn't still relying on us */
1567 assert_pipe_disabled(dev_priv, pipe);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03001568
Ville Syrjälä60bfe442015-06-29 15:25:49 +03001569 val = DPLL_SSC_REF_CLK_CHV |
1570 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001571 if (pipe != PIPE_A)
1572 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02001573
Ville Syrjäläa11b0702014-04-09 13:28:57 +03001574 I915_WRITE(DPLL(pipe), val);
1575 POSTING_READ(DPLL(pipe));
Ville Syrjäläd7520482014-04-09 13:28:59 +03001576
Ville Syrjäläa5805162015-05-26 20:42:30 +03001577 mutex_lock(&dev_priv->sb_lock);
Ville Syrjäläd7520482014-04-09 13:28:59 +03001578
1579 /* Disable 10bit clock to display controller */
1580 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1581 val &= ~DPIO_DCLKP_EN;
1582 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1583
Ville Syrjäläa5805162015-05-26 20:42:30 +03001584 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesf6071162013-10-01 10:41:38 -07001585}
1586
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001587void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001588 struct intel_digital_port *dport,
1589 unsigned int expected_mask)
Jesse Barnes89b667f2013-04-18 14:51:36 -07001590{
1591 u32 port_mask;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001592 i915_reg_t dpll_reg;
Jesse Barnes89b667f2013-04-18 14:51:36 -07001593
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02001594 switch (dport->base.port) {
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001595 case PORT_B:
Jesse Barnes89b667f2013-04-18 14:51:36 -07001596 port_mask = DPLL_PORTB_READY_MASK;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001597 dpll_reg = DPLL(0);
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001598 break;
1599 case PORT_C:
Jesse Barnes89b667f2013-04-18 14:51:36 -07001600 port_mask = DPLL_PORTC_READY_MASK;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001601 dpll_reg = DPLL(0);
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001602 expected_mask <<= 4;
Chon Ming Lee00fc31b2014-04-09 13:28:15 +03001603 break;
1604 case PORT_D:
1605 port_mask = DPLL_PORTD_READY_MASK;
1606 dpll_reg = DPIO_PHY_STATUS;
Chon Ming Leee4607fc2013-11-06 14:36:35 +08001607 break;
1608 default:
1609 BUG();
1610 }
Jesse Barnes89b667f2013-04-18 14:51:36 -07001611
Chris Wilson370004d2016-06-30 15:32:56 +01001612 if (intel_wait_for_register(dev_priv,
1613 dpll_reg, port_mask, expected_mask,
1614 1000))
Ville Syrjälä9b6de0a2015-04-10 18:21:31 +03001615 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02001616 port_name(dport->base.port),
1617 I915_READ(dpll_reg) & port_mask, expected_mask);
Jesse Barnes89b667f2013-04-18 14:51:36 -07001618}
1619
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001620static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
Jesse Barnes040484a2011-01-03 12:14:26 -08001621{
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001622 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1623 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1624 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001625 i915_reg_t reg;
1626 uint32_t val, pipeconf_val;
Jesse Barnes040484a2011-01-03 12:14:26 -08001627
Jesse Barnes040484a2011-01-03 12:14:26 -08001628 /* Make sure PCH DPLL is enabled */
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001629 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
Jesse Barnes040484a2011-01-03 12:14:26 -08001630
1631 /* FDI must be feeding us bits for PCH ports */
1632 assert_fdi_tx_enabled(dev_priv, pipe);
1633 assert_fdi_rx_enabled(dev_priv, pipe);
1634
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01001635 if (HAS_PCH_CPT(dev_priv)) {
Daniel Vetter23670b322012-11-01 09:15:30 +01001636 /* Workaround: Set the timing override bit before enabling the
1637 * pch transcoder. */
1638 reg = TRANS_CHICKEN2(pipe);
1639 val = I915_READ(reg);
1640 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1641 I915_WRITE(reg, val);
Eugeni Dodonov59c859d2012-05-09 15:37:19 -03001642 }
Daniel Vetter23670b322012-11-01 09:15:30 +01001643
Daniel Vetterab9412b2013-05-03 11:49:46 +02001644 reg = PCH_TRANSCONF(pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001645 val = I915_READ(reg);
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001646 pipeconf_val = I915_READ(PIPECONF(pipe));
Jesse Barnese9bcff52011-06-24 12:19:20 -07001647
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001648 if (HAS_PCH_IBX(dev_priv)) {
Jesse Barnese9bcff52011-06-24 12:19:20 -07001649 /*
Ville Syrjäläc5de7c62015-05-05 17:06:22 +03001650 * Make the BPC in transcoder be consistent with
1651 * that in pipeconf reg. For HDMI we must use 8bpc
1652 * here for both 8bpc and 12bpc.
Jesse Barnese9bcff52011-06-24 12:19:20 -07001653 */
Daniel Vetterdfd07d72012-12-17 11:21:38 +01001654 val &= ~PIPECONF_BPC_MASK;
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001655 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Ville Syrjäläc5de7c62015-05-05 17:06:22 +03001656 val |= PIPECONF_8BPC;
1657 else
1658 val |= pipeconf_val & PIPECONF_BPC_MASK;
Jesse Barnese9bcff52011-06-24 12:19:20 -07001659 }
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001660
1661 val &= ~TRANS_INTERLACE_MASK;
1662 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001663 if (HAS_PCH_IBX(dev_priv) &&
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02001664 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Paulo Zanoni7c26e5c2012-02-14 17:07:09 -02001665 val |= TRANS_LEGACY_INTERLACED_ILK;
1666 else
1667 val |= TRANS_INTERLACED;
Paulo Zanoni5f7f7262012-02-03 17:47:15 -02001668 else
1669 val |= TRANS_PROGRESSIVE;
1670
Jesse Barnes040484a2011-01-03 12:14:26 -08001671 I915_WRITE(reg, val | TRANS_ENABLE);
Chris Wilson650fbd82016-06-30 15:32:57 +01001672 if (intel_wait_for_register(dev_priv,
1673 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1674 100))
Ville Syrjälä4bb6f1f2013-04-17 17:48:50 +03001675 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
Jesse Barnes040484a2011-01-03 12:14:26 -08001676}
1677
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001678static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
Paulo Zanoni937bb612012-10-31 18:12:47 -02001679 enum transcoder cpu_transcoder)
Jesse Barnes040484a2011-01-03 12:14:26 -08001680{
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001681 u32 val, pipeconf_val;
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001682
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001683 /* FDI must be feeding us bits for PCH ports */
Daniel Vetter1a240d42012-11-29 22:18:51 +01001684 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001685 assert_fdi_rx_enabled(dev_priv, PIPE_A);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001686
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001687 /* Workaround: set timing override bit. */
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001688 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
Daniel Vetter23670b322012-11-01 09:15:30 +01001689 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001690 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001691
Paulo Zanoni25f3ef12012-10-31 18:12:49 -02001692 val = TRANS_ENABLE;
Paulo Zanoni937bb612012-10-31 18:12:47 -02001693 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001694
Paulo Zanoni9a76b1c2012-10-31 18:12:48 -02001695 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1696 PIPECONF_INTERLACED_ILK)
Paulo Zanonia35f2672012-10-31 18:12:45 -02001697 val |= TRANS_INTERLACED;
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001698 else
1699 val |= TRANS_PROGRESSIVE;
1700
Daniel Vetterab9412b2013-05-03 11:49:46 +02001701 I915_WRITE(LPT_TRANSCONF, val);
Chris Wilsond9f96242016-06-30 15:32:58 +01001702 if (intel_wait_for_register(dev_priv,
1703 LPT_TRANSCONF,
1704 TRANS_STATE_ENABLE,
1705 TRANS_STATE_ENABLE,
1706 100))
Paulo Zanoni937bb612012-10-31 18:12:47 -02001707 DRM_ERROR("Failed to enable PCH transcoder\n");
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001708}
1709
Paulo Zanonib8a4f402012-10-31 18:12:42 -02001710static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1711 enum pipe pipe)
Jesse Barnes040484a2011-01-03 12:14:26 -08001712{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001713 i915_reg_t reg;
1714 uint32_t val;
Jesse Barnes040484a2011-01-03 12:14:26 -08001715
1716 /* FDI relies on the transcoder */
1717 assert_fdi_tx_disabled(dev_priv, pipe);
1718 assert_fdi_rx_disabled(dev_priv, pipe);
1719
Jesse Barnes291906f2011-02-02 12:28:03 -08001720 /* Ports must be off as well */
1721 assert_pch_ports_disabled(dev_priv, pipe);
1722
Daniel Vetterab9412b2013-05-03 11:49:46 +02001723 reg = PCH_TRANSCONF(pipe);
Jesse Barnes040484a2011-01-03 12:14:26 -08001724 val = I915_READ(reg);
1725 val &= ~TRANS_ENABLE;
1726 I915_WRITE(reg, val);
1727 /* wait for PCH transcoder off, transcoder state */
Chris Wilsona7d04662016-06-30 15:32:59 +01001728 if (intel_wait_for_register(dev_priv,
1729 reg, TRANS_STATE_ENABLE, 0,
1730 50))
Ville Syrjälä4bb6f1f2013-04-17 17:48:50 +03001731 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
Daniel Vetter23670b322012-11-01 09:15:30 +01001732
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01001733 if (HAS_PCH_CPT(dev_priv)) {
Daniel Vetter23670b322012-11-01 09:15:30 +01001734 /* Workaround: Clear the timing override chicken bit again. */
1735 reg = TRANS_CHICKEN2(pipe);
1736 val = I915_READ(reg);
1737 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1738 I915_WRITE(reg, val);
1739 }
Jesse Barnes040484a2011-01-03 12:14:26 -08001740}
1741
Maarten Lankhorstb7076542016-08-23 16:18:08 +02001742void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001743{
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001744 u32 val;
1745
Daniel Vetterab9412b2013-05-03 11:49:46 +02001746 val = I915_READ(LPT_TRANSCONF);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001747 val &= ~TRANS_ENABLE;
Daniel Vetterab9412b2013-05-03 11:49:46 +02001748 I915_WRITE(LPT_TRANSCONF, val);
Paulo Zanoni8fb033d2012-10-31 18:12:43 -02001749 /* wait for PCH transcoder off, transcoder state */
Chris Wilsondfdb4742016-06-30 15:33:00 +01001750 if (intel_wait_for_register(dev_priv,
1751 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1752 50))
Paulo Zanoni8a52fd92012-10-31 18:12:51 -02001753 DRM_ERROR("Failed to disable PCH transcoder\n");
Paulo Zanoni223a6fd2012-10-31 18:12:52 -02001754
1755 /* Workaround: clear timing override bit. */
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001756 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
Daniel Vetter23670b322012-11-01 09:15:30 +01001757 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
Ville Syrjälä36c0d0c2015-09-18 20:03:31 +03001758 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
Jesse Barnes92f25842011-01-04 15:09:34 -08001759}
1760
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001761enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
Ville Syrjälä65f21302016-10-14 20:02:53 +03001762{
1763 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1764
Ville Syrjälä65f21302016-10-14 20:02:53 +03001765 if (HAS_PCH_LPT(dev_priv))
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001766 return PIPE_A;
Ville Syrjälä65f21302016-10-14 20:02:53 +03001767 else
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001768 return crtc->pipe;
Ville Syrjälä65f21302016-10-14 20:02:53 +03001769}
1770
Ville Syrjälä4972f702017-11-29 17:37:32 +02001771static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001772{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001773 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1774 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1775 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
Paulo Zanoni03722642014-01-17 13:51:09 -02001776 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001777 i915_reg_t reg;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001778 u32 val;
1779
Ville Syrjälä9e2ee2d2015-06-24 21:59:35 +03001780 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1781
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001782 assert_planes_disabled(crtc);
Daniel Vetter58c6eaa2013-04-11 16:29:09 +02001783
Jesse Barnesb24e7172011-01-04 15:09:30 -08001784 /*
1785 * A pipe without a PLL won't actually be able to drive bits from
1786 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1787 * need the check.
1788 */
Ville Syrjälä09fa8bb2016-08-05 20:41:34 +03001789 if (HAS_GMCH_DISPLAY(dev_priv)) {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001790 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
Jani Nikula23538ef2013-08-27 15:12:22 +03001791 assert_dsi_pll_enabled(dev_priv);
1792 else
1793 assert_pll_enabled(dev_priv, pipe);
Ville Syrjälä09fa8bb2016-08-05 20:41:34 +03001794 } else {
Ville Syrjälä4972f702017-11-29 17:37:32 +02001795 if (new_crtc_state->has_pch_encoder) {
Jesse Barnes040484a2011-01-03 12:14:26 -08001796 /* if driving the PCH, we need FDI enabled */
Ville Syrjälä65f21302016-10-14 20:02:53 +03001797 assert_fdi_rx_pll_enabled(dev_priv,
Matthias Kaehlckea2196032017-07-17 11:14:03 -07001798 intel_crtc_pch_transcoder(crtc));
Daniel Vetter1a240d42012-11-29 22:18:51 +01001799 assert_fdi_tx_pll_enabled(dev_priv,
1800 (enum pipe) cpu_transcoder);
Jesse Barnes040484a2011-01-03 12:14:26 -08001801 }
1802 /* FIXME: assert CPU port conditions for SNB+ */
1803 }
Jesse Barnesb24e7172011-01-04 15:09:30 -08001804
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001805 reg = PIPECONF(cpu_transcoder);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001806 val = I915_READ(reg);
Paulo Zanoni7ad25d42014-01-17 13:51:13 -02001807 if (val & PIPECONF_ENABLE) {
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001808 /* we keep both pipes enabled on 830 */
1809 WARN_ON(!IS_I830(dev_priv));
Chris Wilson00d70b12011-03-17 07:18:29 +00001810 return;
Paulo Zanoni7ad25d42014-01-17 13:51:13 -02001811 }
Chris Wilson00d70b12011-03-17 07:18:29 +00001812
1813 I915_WRITE(reg, val | PIPECONF_ENABLE);
Paulo Zanoni851855d2013-12-19 19:12:29 -02001814 POSTING_READ(reg);
Ville Syrjäläb7792d82015-12-14 18:23:43 +02001815
1816 /*
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001817 * Until the pipe starts PIPEDSL reads will return a stale value,
1818 * which causes an apparent vblank timestamp jump when PIPEDSL
1819 * resets to its proper value. That also messes up the frame count
1820 * when it's derived from the timestamps. So let's wait for the
1821 * pipe to start properly before we call drm_crtc_vblank_on()
Ville Syrjäläb7792d82015-12-14 18:23:43 +02001822 */
Ville Syrjälä4972f702017-11-29 17:37:32 +02001823 if (dev_priv->drm.max_vblank_count == 0)
Ville Syrjälä8fedd642017-11-29 17:37:30 +02001824 intel_wait_for_pipe_scanline_moving(crtc);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001825}
1826
Ville Syrjälä4972f702017-11-29 17:37:32 +02001827static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
Jesse Barnesb24e7172011-01-04 15:09:30 -08001828{
Ville Syrjälä4972f702017-11-29 17:37:32 +02001829 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Chris Wilsonfac5e232016-07-04 11:34:36 +01001830 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä4972f702017-11-29 17:37:32 +02001831 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Ville Syrjälä575f7ab2014-08-15 01:21:56 +03001832 enum pipe pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001833 i915_reg_t reg;
Jesse Barnesb24e7172011-01-04 15:09:30 -08001834 u32 val;
1835
Ville Syrjälä9e2ee2d2015-06-24 21:59:35 +03001836 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1837
Jesse Barnesb24e7172011-01-04 15:09:30 -08001838 /*
1839 * Make sure planes won't keep trying to pump pixels to us,
1840 * or we might hang the display.
1841 */
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02001842 assert_planes_disabled(crtc);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001843
Paulo Zanoni702e7a52012-10-23 18:29:59 -02001844 reg = PIPECONF(cpu_transcoder);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001845 val = I915_READ(reg);
Chris Wilson00d70b12011-03-17 07:18:29 +00001846 if ((val & PIPECONF_ENABLE) == 0)
1847 return;
1848
Ville Syrjälä67adc642014-08-15 01:21:57 +03001849 /*
1850 * Double wide has implications for planes
1851 * so best keep it disabled when not needed.
1852 */
Ville Syrjälä4972f702017-11-29 17:37:32 +02001853 if (old_crtc_state->double_wide)
Ville Syrjälä67adc642014-08-15 01:21:57 +03001854 val &= ~PIPECONF_DOUBLE_WIDE;
1855
1856 /* Don't disable pipe or pipe PLLs if needed */
Ville Syrjäläe56134b2017-06-01 17:36:19 +03001857 if (!IS_I830(dev_priv))
Ville Syrjälä67adc642014-08-15 01:21:57 +03001858 val &= ~PIPECONF_ENABLE;
1859
1860 I915_WRITE(reg, val);
1861 if ((val & PIPECONF_ENABLE) == 0)
Ville Syrjälä4972f702017-11-29 17:37:32 +02001862 intel_wait_for_pipe_off(old_crtc_state);
Jesse Barnesb24e7172011-01-04 15:09:30 -08001863}
1864
Ville Syrjälä832be822016-01-12 21:08:33 +02001865static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1866{
1867 return IS_GEN2(dev_priv) ? 2048 : 4096;
1868}
1869
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001870static unsigned int
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001871intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001872{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001873 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001874 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001875
1876 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07001877 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001878 return cpp;
1879 case I915_FORMAT_MOD_X_TILED:
1880 if (IS_GEN2(dev_priv))
1881 return 128;
1882 else
1883 return 512;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001884 case I915_FORMAT_MOD_Y_TILED_CCS:
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001885 if (color_plane == 1)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001886 return 128;
1887 /* fall through */
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001888 case I915_FORMAT_MOD_Y_TILED:
1889 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
1890 return 128;
1891 else
1892 return 512;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001893 case I915_FORMAT_MOD_Yf_TILED_CCS:
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001894 if (color_plane == 1)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07001895 return 128;
1896 /* fall through */
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001897 case I915_FORMAT_MOD_Yf_TILED:
1898 switch (cpp) {
1899 case 1:
1900 return 64;
1901 case 2:
1902 case 4:
1903 return 128;
1904 case 8:
1905 case 16:
1906 return 256;
1907 default:
1908 MISSING_CASE(cpp);
1909 return cpp;
1910 }
1911 break;
1912 default:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001913 MISSING_CASE(fb->modifier);
Ville Syrjälä7b49f942016-01-12 21:08:32 +02001914 return cpp;
1915 }
1916}
1917
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001918static unsigned int
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001919intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
Jesse Barnesa57ce0b2014-02-07 12:10:35 -08001920{
Ben Widawsky2f075562017-03-24 14:29:48 -07001921 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
Ville Syrjälä832be822016-01-12 21:08:33 +02001922 return 1;
1923 else
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001924 return intel_tile_size(to_i915(fb->dev)) /
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001925 intel_tile_width_bytes(fb, color_plane);
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001926}
1927
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001928/* Return the tile dimensions in pixel units */
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001929static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001930 unsigned int *tile_width,
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001931 unsigned int *tile_height)
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001932{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001933 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1934 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001935
1936 *tile_width = tile_width_bytes / cpp;
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001937 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02001938}
1939
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001940unsigned int
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001941intel_fb_align_height(const struct drm_framebuffer *fb,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001942 int color_plane, unsigned int height)
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00001943{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001944 unsigned int tile_height = intel_tile_height(fb, color_plane);
Ville Syrjälä832be822016-01-12 21:08:33 +02001945
1946 return ALIGN(height, tile_height);
Jesse Barnesa57ce0b2014-02-07 12:10:35 -08001947}
1948
Ville Syrjälä1663b9d2016-02-15 22:54:45 +02001949unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1950{
1951 unsigned int size = 0;
1952 int i;
1953
1954 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1955 size += rot_info->plane[i].width * rot_info->plane[i].height;
1956
1957 return size;
1958}
1959
Daniel Vetter75c82a52015-10-14 16:51:04 +02001960static void
Ville Syrjälä3465c582016-02-15 22:54:43 +02001961intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
1962 const struct drm_framebuffer *fb,
1963 unsigned int rotation)
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00001964{
Chris Wilson7b92c042017-01-14 00:28:26 +00001965 view->type = I915_GGTT_VIEW_NORMAL;
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03001966 if (drm_rotation_90_or_270(rotation)) {
Chris Wilson7b92c042017-01-14 00:28:26 +00001967 view->type = I915_GGTT_VIEW_ROTATED;
Chris Wilson8bab11932017-01-14 00:28:25 +00001968 view->rotated = to_intel_framebuffer(fb)->rot_info;
Ville Syrjälä2d7a2152016-02-15 22:54:47 +02001969 }
1970}
1971
Ville Syrjäläfabac482017-03-27 21:55:43 +03001972static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
1973{
1974 if (IS_I830(dev_priv))
1975 return 16 * 1024;
1976 else if (IS_I85X(dev_priv))
1977 return 256;
Ville Syrjäläd9e15512017-03-27 21:55:45 +03001978 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
1979 return 32;
Ville Syrjäläfabac482017-03-27 21:55:43 +03001980 else
1981 return 4 * 1024;
1982}
1983
Ville Syrjälä603525d2016-01-12 21:08:37 +02001984static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001985{
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00001986 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001987 return 256 * 1024;
Jani Nikulac0f86832016-12-07 12:13:04 +02001988 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
Wayne Boyer666a4532015-12-09 12:29:35 -08001989 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001990 return 128 * 1024;
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00001991 else if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001992 return 4 * 1024;
1993 else
Ville Syrjälä44c59052015-06-11 16:31:16 +03001994 return 0;
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03001995}
1996
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02001997static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03001998 int color_plane)
Ville Syrjälä603525d2016-01-12 21:08:37 +02001999{
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002000 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2001
Ville Syrjäläb90c1ee2017-03-07 21:42:07 +02002002 /* AUX_DIST needs only 4K alignment */
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002003 if (color_plane == 1)
Ville Syrjäläb90c1ee2017-03-07 21:42:07 +02002004 return 4096;
2005
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002006 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07002007 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjälä603525d2016-01-12 21:08:37 +02002008 return intel_linear_alignment(dev_priv);
2009 case I915_FORMAT_MOD_X_TILED:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002010 if (INTEL_GEN(dev_priv) >= 9)
Ville Syrjälä603525d2016-01-12 21:08:37 +02002011 return 256 * 1024;
2012 return 0;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002013 case I915_FORMAT_MOD_Y_TILED_CCS:
2014 case I915_FORMAT_MOD_Yf_TILED_CCS:
Ville Syrjälä603525d2016-01-12 21:08:37 +02002015 case I915_FORMAT_MOD_Y_TILED:
2016 case I915_FORMAT_MOD_Yf_TILED:
2017 return 1 * 1024 * 1024;
2018 default:
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002019 MISSING_CASE(fb->modifier);
Ville Syrjälä603525d2016-01-12 21:08:37 +02002020 return 0;
2021 }
2022}
2023
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002024static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2025{
2026 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2027 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2028
Ville Syrjälä32febd92018-02-21 18:02:33 +02002029 return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002030}
2031
Chris Wilson058d88c2016-08-15 10:49:06 +01002032struct i915_vma *
Chris Wilson59354852018-02-20 13:42:06 +00002033intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002034 const struct i915_ggtt_view *view,
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002035 bool uses_fence,
Chris Wilson59354852018-02-20 13:42:06 +00002036 unsigned long *out_flags)
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002037{
Tvrtko Ursulin850c4cd2014-10-30 16:39:38 +00002038 struct drm_device *dev = fb->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002039 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulin850c4cd2014-10-30 16:39:38 +00002040 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Chris Wilson058d88c2016-08-15 10:49:06 +01002041 struct i915_vma *vma;
Chris Wilson59354852018-02-20 13:42:06 +00002042 unsigned int pinctl;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002043 u32 alignment;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002044
Matt Roperebcdd392014-07-09 16:22:11 -07002045 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2046
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002047 alignment = intel_surf_alignment(fb, 0);
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002048
Chris Wilson693db182013-03-05 14:52:39 +00002049 /* Note that the w/a also requires 64 PTE of padding following the
2050 * bo. We currently fill all unused PTE with the shadow page and so
2051 * we should always have valid PTE following the scanout preventing
2052 * the VT-d warning.
2053 */
Chris Wilson48f112f2016-06-24 14:07:14 +01002054 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
Chris Wilson693db182013-03-05 14:52:39 +00002055 alignment = 256 * 1024;
2056
Paulo Zanonid6dd6842014-08-15 15:59:32 -03002057 /*
2058 * Global gtt pte registers are special registers which actually forward
2059 * writes to a chunk of system memory. Which means that there is no risk
2060 * that the register values disappear as soon as we call
2061 * intel_runtime_pm_put(), so it is correct to wrap only the
2062 * pin/unpin/fence and not more.
2063 */
2064 intel_runtime_pm_get(dev_priv);
2065
Daniel Vetter9db529a2017-08-08 10:08:28 +02002066 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2067
Chris Wilson59354852018-02-20 13:42:06 +00002068 pinctl = 0;
2069
2070 /* Valleyview is definitely limited to scanning out the first
2071 * 512MiB. Lets presume this behaviour was inherited from the
2072 * g4x display engine and that all earlier gen are similarly
2073 * limited. Testing suggests that it is a little more
2074 * complicated than this. For example, Cherryview appears quite
2075 * happy to scanout from anywhere within its global aperture.
2076 */
2077 if (HAS_GMCH_DISPLAY(dev_priv))
2078 pinctl |= PIN_MAPPABLE;
2079
2080 vma = i915_gem_object_pin_to_display_plane(obj,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002081 alignment, view, pinctl);
Chris Wilson49ef5292016-08-18 17:17:00 +01002082 if (IS_ERR(vma))
2083 goto err;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002084
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002085 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002086 int ret;
2087
Chris Wilson49ef5292016-08-18 17:17:00 +01002088 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2089 * fence, whereas 965+ only requires a fence if using
2090 * framebuffer compression. For simplicity, we always, when
2091 * possible, install a fence as the cost is not that onerous.
2092 *
2093 * If we fail to fence the tiled scanout, then either the
2094 * modeset will reject the change (which is highly unlikely as
2095 * the affected systems, all but one, do not have unmappable
2096 * space) or we will not be able to enable full powersaving
2097 * techniques (also likely not to apply due to various limits
2098 * FBC and the like impose on the size of the buffer, which
2099 * presumably we violated anyway with this unmappable buffer).
2100 * Anyway, it is presumably better to stumble onwards with
2101 * something and try to run the system in a "less than optimal"
2102 * mode that matches the user configuration.
2103 */
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002104 ret = i915_vma_pin_fence(vma);
2105 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
Chris Wilson75097022018-03-05 10:33:12 +00002106 i915_gem_object_unpin_from_display_plane(vma);
Ville Syrjälä85798ac2018-02-21 18:02:30 +02002107 vma = ERR_PTR(ret);
2108 goto err;
2109 }
2110
2111 if (ret == 0 && vma->fence)
Chris Wilson59354852018-02-20 13:42:06 +00002112 *out_flags |= PLANE_HAS_FENCE;
Vivek Kasireddy98072162015-10-29 18:54:38 -07002113 }
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002114
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002115 i915_vma_get(vma);
Chris Wilson49ef5292016-08-18 17:17:00 +01002116err:
Daniel Vetter9db529a2017-08-08 10:08:28 +02002117 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2118
Paulo Zanonid6dd6842014-08-15 15:59:32 -03002119 intel_runtime_pm_put(dev_priv);
Chris Wilson058d88c2016-08-15 10:49:06 +01002120 return vma;
Kristian Høgsberg6b95a202009-11-18 11:25:18 -05002121}
2122
Chris Wilson59354852018-02-20 13:42:06 +00002123void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
Chris Wilson1690e1e2011-12-14 13:57:08 +01002124{
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002125 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Tvrtko Ursulinf64b98c2015-03-23 11:10:35 +00002126
Chris Wilson59354852018-02-20 13:42:06 +00002127 if (flags & PLANE_HAS_FENCE)
2128 i915_vma_unpin_fence(vma);
Chris Wilson058d88c2016-08-15 10:49:06 +01002129 i915_gem_object_unpin_from_display_plane(vma);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002130 i915_vma_put(vma);
Chris Wilson1690e1e2011-12-14 13:57:08 +01002131}
2132
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002133static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002134 unsigned int rotation)
2135{
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002136 if (drm_rotation_90_or_270(rotation))
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002137 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002138 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002139 return fb->pitches[color_plane];
Ville Syrjäläef78ec92015-10-13 22:48:39 +03002140}
2141
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002142/*
Ville Syrjälä6687c902015-09-15 13:16:41 +03002143 * Convert the x/y offsets into a linear offset.
2144 * Only valid with 0/180 degree rotation, which is fine since linear
2145 * offset is only used with linear buffers on pre-hsw and tiled buffers
2146 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2147 */
2148u32 intel_fb_xy_to_linear(int x, int y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002149 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002150 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002151{
Ville Syrjälä29490562016-01-20 18:02:50 +02002152 const struct drm_framebuffer *fb = state->base.fb;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002153 unsigned int cpp = fb->format->cpp[color_plane];
2154 unsigned int pitch = state->color_plane[color_plane].stride;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002155
2156 return y * pitch + x * cpp;
2157}
2158
2159/*
2160 * Add the x/y offsets derived from fb->offsets[] to the user
2161 * specified plane src x/y offsets. The resulting x/y offsets
2162 * specify the start of scanout from the beginning of the gtt mapping.
2163 */
2164void intel_add_fb_offsets(int *x, int *y,
Ville Syrjälä29490562016-01-20 18:02:50 +02002165 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002166 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002167
2168{
Ville Syrjälä29490562016-01-20 18:02:50 +02002169 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2170 unsigned int rotation = state->base.rotation;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002171
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002172 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002173 *x += intel_fb->rotated[color_plane].x;
2174 *y += intel_fb->rotated[color_plane].y;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002175 } else {
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002176 *x += intel_fb->normal[color_plane].x;
2177 *y += intel_fb->normal[color_plane].y;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002178 }
2179}
2180
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002181static u32 intel_adjust_tile_offset(int *x, int *y,
2182 unsigned int tile_width,
2183 unsigned int tile_height,
2184 unsigned int tile_size,
2185 unsigned int pitch_tiles,
2186 u32 old_offset,
2187 u32 new_offset)
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002188{
Ville Syrjäläb9b24032016-02-08 18:28:00 +02002189 unsigned int pitch_pixels = pitch_tiles * tile_width;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002190 unsigned int tiles;
2191
2192 WARN_ON(old_offset & (tile_size - 1));
2193 WARN_ON(new_offset & (tile_size - 1));
2194 WARN_ON(new_offset > old_offset);
2195
2196 tiles = (old_offset - new_offset) / tile_size;
2197
2198 *y += tiles / pitch_tiles * tile_height;
2199 *x += tiles % pitch_tiles * tile_width;
2200
Ville Syrjäläb9b24032016-02-08 18:28:00 +02002201 /* minimize x in case it got needlessly big */
2202 *y += *x / pitch_pixels * tile_height;
2203 *x %= pitch_pixels;
2204
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002205 return new_offset;
2206}
2207
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002208static u32 intel_adjust_aligned_offset(int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002209 const struct drm_framebuffer *fb,
2210 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002211 unsigned int rotation,
Ville Syrjälädf79cf42018-09-11 18:01:39 +03002212 unsigned int pitch,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002213 u32 old_offset, u32 new_offset)
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002214{
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002215 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002216 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002217
2218 WARN_ON(new_offset > old_offset);
2219
Ben Widawsky2f075562017-03-24 14:29:48 -07002220 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002221 unsigned int tile_size, tile_width, tile_height;
2222 unsigned int pitch_tiles;
2223
2224 tile_size = intel_tile_size(dev_priv);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002225 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002226
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002227 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002228 pitch_tiles = pitch / tile_height;
2229 swap(tile_width, tile_height);
2230 } else {
2231 pitch_tiles = pitch / (tile_width * cpp);
2232 }
2233
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002234 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2235 tile_size, pitch_tiles,
2236 old_offset, new_offset);
Ville Syrjälä66a2d922016-02-05 18:44:05 +02002237 } else {
2238 old_offset += *y * pitch + *x * cpp;
2239
2240 *y = (old_offset - new_offset) / pitch;
2241 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2242 }
2243
2244 return new_offset;
2245}
2246
2247/*
Ville Syrjälä303ba692017-08-24 22:10:49 +03002248 * Adjust the tile offset by moving the difference into
2249 * the x/y offsets.
2250 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002251static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2252 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002253 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002254 u32 old_offset, u32 new_offset)
Ville Syrjälä303ba692017-08-24 22:10:49 +03002255{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002256 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002257 state->base.rotation,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002258 state->color_plane[color_plane].stride,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002259 old_offset, new_offset);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002260}
2261
2262/*
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002263 * Computes the aligned offset to the base tile and adjusts
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002264 * x, y. bytes per pixel is assumed to be a power-of-two.
2265 *
2266 * In the 90/270 rotated case, x and y are assumed
2267 * to be already rotated to match the rotated GTT view, and
2268 * pitch is the tile_height aligned framebuffer height.
Ville Syrjälä6687c902015-09-15 13:16:41 +03002269 *
2270 * This function is used when computing the derived information
2271 * under intel_framebuffer, so using any of that information
2272 * here is not allowed. Anything under drm_framebuffer can be
2273 * used. This is why the user has to pass in the pitch since it
2274 * is specified in the rotated orientation.
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002275 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002276static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2277 int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002278 const struct drm_framebuffer *fb,
2279 int color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002280 unsigned int pitch,
2281 unsigned int rotation,
2282 u32 alignment)
Daniel Vetterc2c75132012-07-05 12:17:30 +02002283{
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002284 uint64_t fb_modifier = fb->modifier;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002285 unsigned int cpp = fb->format->cpp[color_plane];
Ville Syrjälä6687c902015-09-15 13:16:41 +03002286 u32 offset, offset_aligned;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002287
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002288 if (alignment)
2289 alignment--;
2290
Ben Widawsky2f075562017-03-24 14:29:48 -07002291 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002292 unsigned int tile_size, tile_width, tile_height;
2293 unsigned int tile_rows, tiles, pitch_tiles;
Daniel Vetterc2c75132012-07-05 12:17:30 +02002294
Ville Syrjäläd8433102016-01-12 21:08:35 +02002295 tile_size = intel_tile_size(dev_priv);
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002296 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002297
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03002298 if (drm_rotation_90_or_270(rotation)) {
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002299 pitch_tiles = pitch / tile_height;
2300 swap(tile_width, tile_height);
2301 } else {
2302 pitch_tiles = pitch / (tile_width * cpp);
2303 }
Daniel Vetterc2c75132012-07-05 12:17:30 +02002304
Ville Syrjäläd8433102016-01-12 21:08:35 +02002305 tile_rows = *y / tile_height;
2306 *y %= tile_height;
Chris Wilsonbc752862013-02-21 20:04:31 +00002307
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02002308 tiles = *x / tile_width;
2309 *x %= tile_width;
Ville Syrjäläd8433102016-01-12 21:08:35 +02002310
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002311 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2312 offset_aligned = offset & ~alignment;
Chris Wilsonbc752862013-02-21 20:04:31 +00002313
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002314 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2315 tile_size, pitch_tiles,
2316 offset, offset_aligned);
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002317 } else {
Chris Wilsonbc752862013-02-21 20:04:31 +00002318 offset = *y * pitch + *x * cpp;
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002319 offset_aligned = offset & ~alignment;
2320
Ville Syrjälä4e9a86b2015-06-11 16:31:14 +03002321 *y = (offset & alignment) / pitch;
2322 *x = ((offset & alignment) - *y * pitch) / cpp;
Chris Wilsonbc752862013-02-21 20:04:31 +00002323 }
Ville Syrjälä29cf9492016-02-15 22:54:42 +02002324
2325 return offset_aligned;
Daniel Vetterc2c75132012-07-05 12:17:30 +02002326}
2327
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002328static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2329 const struct intel_plane_state *state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002330 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002331{
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03002332 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2333 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
Ville Syrjälä29490562016-01-20 18:02:50 +02002334 const struct drm_framebuffer *fb = state->base.fb;
2335 unsigned int rotation = state->base.rotation;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002336 int pitch = state->color_plane[color_plane].stride;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03002337 u32 alignment;
2338
2339 if (intel_plane->id == PLANE_CURSOR)
2340 alignment = intel_cursor_alignment(dev_priv);
2341 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002342 alignment = intel_surf_alignment(fb, color_plane);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002343
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002344 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002345 pitch, rotation, alignment);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002346}
2347
Ville Syrjälä303ba692017-08-24 22:10:49 +03002348/* Convert the fb->offset[] into x/y offsets */
2349static int intel_fb_offset_to_xy(int *x, int *y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002350 const struct drm_framebuffer *fb,
2351 int color_plane)
Ville Syrjälä6687c902015-09-15 13:16:41 +03002352{
Ville Syrjälä303ba692017-08-24 22:10:49 +03002353 struct drm_i915_private *dev_priv = to_i915(fb->dev);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002354
Ville Syrjälä303ba692017-08-24 22:10:49 +03002355 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002356 fb->offsets[color_plane] % intel_tile_size(dev_priv))
Ville Syrjälä303ba692017-08-24 22:10:49 +03002357 return -EINVAL;
2358
2359 *x = 0;
2360 *y = 0;
2361
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002362 intel_adjust_aligned_offset(x, y,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002363 fb, color_plane, DRM_MODE_ROTATE_0,
2364 fb->pitches[color_plane],
2365 fb->offsets[color_plane], 0);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002366
2367 return 0;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002368}
2369
Ville Syrjälä72618eb2016-02-04 20:38:20 +02002370static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2371{
2372 switch (fb_modifier) {
2373 case I915_FORMAT_MOD_X_TILED:
2374 return I915_TILING_X;
2375 case I915_FORMAT_MOD_Y_TILED:
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002376 case I915_FORMAT_MOD_Y_TILED_CCS:
Ville Syrjälä72618eb2016-02-04 20:38:20 +02002377 return I915_TILING_Y;
2378 default:
2379 return I915_TILING_NONE;
2380 }
2381}
2382
Ville Syrjälä16af25f2018-01-19 16:41:52 +02002383/*
2384 * From the Sky Lake PRM:
2385 * "The Color Control Surface (CCS) contains the compression status of
2386 * the cache-line pairs. The compression state of the cache-line pair
2387 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2388 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2389 * cache-line-pairs. CCS is always Y tiled."
2390 *
2391 * Since cache line pairs refers to horizontally adjacent cache lines,
2392 * each cache line in the CCS corresponds to an area of 32x16 cache
2393 * lines on the main surface. Since each pixel is 4 bytes, this gives
2394 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2395 * main surface.
2396 */
Ville Syrjäläbbfb6ce2017-08-01 09:58:12 -07002397static const struct drm_format_info ccs_formats[] = {
2398 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2399 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2400 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2401 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2402};
2403
2404static const struct drm_format_info *
2405lookup_format_info(const struct drm_format_info formats[],
2406 int num_formats, u32 format)
2407{
2408 int i;
2409
2410 for (i = 0; i < num_formats; i++) {
2411 if (formats[i].format == format)
2412 return &formats[i];
2413 }
2414
2415 return NULL;
2416}
2417
2418static const struct drm_format_info *
2419intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2420{
2421 switch (cmd->modifier[0]) {
2422 case I915_FORMAT_MOD_Y_TILED_CCS:
2423 case I915_FORMAT_MOD_Yf_TILED_CCS:
2424 return lookup_format_info(ccs_formats,
2425 ARRAY_SIZE(ccs_formats),
2426 cmd->pixel_format);
2427 default:
2428 return NULL;
2429 }
2430}
2431
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07002432bool is_ccs_modifier(u64 modifier)
2433{
2434 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2435 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2436}
2437
Ville Syrjälä6687c902015-09-15 13:16:41 +03002438static int
2439intel_fill_fb_info(struct drm_i915_private *dev_priv,
2440 struct drm_framebuffer *fb)
2441{
2442 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2443 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
Daniel Stonea5ff7a42018-05-18 15:30:07 +01002444 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002445 u32 gtt_offset_rotated = 0;
2446 unsigned int max_size = 0;
Ville Syrjäläbcb0b462016-12-14 23:30:22 +02002447 int i, num_planes = fb->format->num_planes;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002448 unsigned int tile_size = intel_tile_size(dev_priv);
2449
2450 for (i = 0; i < num_planes; i++) {
2451 unsigned int width, height;
2452 unsigned int cpp, size;
2453 u32 offset;
2454 int x, y;
Ville Syrjälä303ba692017-08-24 22:10:49 +03002455 int ret;
Ville Syrjälä6687c902015-09-15 13:16:41 +03002456
Ville Syrjälä353c8592016-12-14 23:30:57 +02002457 cpp = fb->format->cpp[i];
Ville Syrjälä145fcb12016-11-18 21:53:06 +02002458 width = drm_framebuffer_plane_width(fb->width, fb, i);
2459 height = drm_framebuffer_plane_height(fb->height, fb, i);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002460
Ville Syrjälä303ba692017-08-24 22:10:49 +03002461 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2462 if (ret) {
2463 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2464 i, fb->offsets[i]);
2465 return ret;
2466 }
Ville Syrjälä6687c902015-09-15 13:16:41 +03002467
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07002468 if (is_ccs_modifier(fb->modifier) && i == 1) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002469 int hsub = fb->format->hsub;
2470 int vsub = fb->format->vsub;
2471 int tile_width, tile_height;
2472 int main_x, main_y;
2473 int ccs_x, ccs_y;
2474
2475 intel_tile_dims(fb, i, &tile_width, &tile_height);
Ville Syrjälä303ba692017-08-24 22:10:49 +03002476 tile_width *= hsub;
2477 tile_height *= vsub;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002478
Ville Syrjälä303ba692017-08-24 22:10:49 +03002479 ccs_x = (x * hsub) % tile_width;
2480 ccs_y = (y * vsub) % tile_height;
2481 main_x = intel_fb->normal[0].x % tile_width;
2482 main_y = intel_fb->normal[0].y % tile_height;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002483
2484 /*
2485 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2486 * x/y offsets must match between CCS and the main surface.
2487 */
2488 if (main_x != ccs_x || main_y != ccs_y) {
2489 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2490 main_x, main_y,
2491 ccs_x, ccs_y,
2492 intel_fb->normal[0].x,
2493 intel_fb->normal[0].y,
2494 x, y);
2495 return -EINVAL;
2496 }
2497 }
2498
Ville Syrjälä6687c902015-09-15 13:16:41 +03002499 /*
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002500 * The fence (if used) is aligned to the start of the object
2501 * so having the framebuffer wrap around across the edge of the
2502 * fenced region doesn't really work. We have no API to configure
2503 * the fence start offset within the object (nor could we probably
2504 * on gen2/3). So it's just easier if we just require that the
2505 * fb layout agrees with the fence layout. We already check that the
2506 * fb stride matches the fence stride elsewhere.
2507 */
Daniel Stonea5ff7a42018-05-18 15:30:07 +01002508 if (i == 0 && i915_gem_object_is_tiled(obj) &&
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002509 (x + width) * cpp > fb->pitches[i]) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +02002510 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2511 i, fb->offsets[i]);
Ville Syrjälä60d5f2a2016-01-22 18:41:24 +02002512 return -EINVAL;
2513 }
2514
2515 /*
Ville Syrjälä6687c902015-09-15 13:16:41 +03002516 * First pixel of the framebuffer from
2517 * the start of the normal gtt mapping.
2518 */
2519 intel_fb->normal[i].x = x;
2520 intel_fb->normal[i].y = y;
2521
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002522 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2523 fb->pitches[i],
2524 DRM_MODE_ROTATE_0,
2525 tile_size);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002526 offset /= tile_size;
2527
Ben Widawsky2f075562017-03-24 14:29:48 -07002528 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
Ville Syrjälä6687c902015-09-15 13:16:41 +03002529 unsigned int tile_width, tile_height;
2530 unsigned int pitch_tiles;
2531 struct drm_rect r;
2532
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002533 intel_tile_dims(fb, i, &tile_width, &tile_height);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002534
2535 rot_info->plane[i].offset = offset;
2536 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2537 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2538 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2539
2540 intel_fb->rotated[i].pitch =
2541 rot_info->plane[i].height * tile_height;
2542
2543 /* how many tiles does this plane need */
2544 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2545 /*
2546 * If the plane isn't horizontally tile aligned,
2547 * we need one more tile.
2548 */
2549 if (x != 0)
2550 size++;
2551
2552 /* rotate the x/y offsets to match the GTT view */
2553 r.x1 = x;
2554 r.y1 = y;
2555 r.x2 = x + width;
2556 r.y2 = y + height;
2557 drm_rect_rotate(&r,
2558 rot_info->plane[i].width * tile_width,
2559 rot_info->plane[i].height * tile_height,
Robert Fossc2c446a2017-05-19 16:50:17 -04002560 DRM_MODE_ROTATE_270);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002561 x = r.x1;
2562 y = r.y1;
2563
2564 /* rotate the tile dimensions to match the GTT view */
2565 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2566 swap(tile_width, tile_height);
2567
2568 /*
2569 * We only keep the x/y offsets, so push all of the
2570 * gtt offset into the x/y offsets.
2571 */
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002572 intel_adjust_tile_offset(&x, &y,
2573 tile_width, tile_height,
2574 tile_size, pitch_tiles,
2575 gtt_offset_rotated * tile_size, 0);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002576
2577 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2578
2579 /*
2580 * First pixel of the framebuffer from
2581 * the start of the rotated gtt mapping.
2582 */
2583 intel_fb->rotated[i].x = x;
2584 intel_fb->rotated[i].y = y;
2585 } else {
2586 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2587 x * cpp, tile_size);
2588 }
2589
2590 /* how many tiles in total needed in the bo */
2591 max_size = max(max_size, offset + size);
2592 }
2593
Ville Syrjälä4e050472018-09-12 21:04:43 +03002594 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2595 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2596 mul_u32_u32(max_size, tile_size), obj->base.size);
Ville Syrjälä6687c902015-09-15 13:16:41 +03002597 return -EINVAL;
2598 }
2599
2600 return 0;
2601}
2602
Damien Lespiaub35d63f2015-01-20 12:51:50 +00002603static int i9xx_format_to_fourcc(int format)
Jesse Barnes46f297f2014-03-07 08:57:48 -08002604{
2605 switch (format) {
2606 case DISPPLANE_8BPP:
2607 return DRM_FORMAT_C8;
2608 case DISPPLANE_BGRX555:
2609 return DRM_FORMAT_XRGB1555;
2610 case DISPPLANE_BGRX565:
2611 return DRM_FORMAT_RGB565;
2612 default:
2613 case DISPPLANE_BGRX888:
2614 return DRM_FORMAT_XRGB8888;
2615 case DISPPLANE_RGBX888:
2616 return DRM_FORMAT_XBGR8888;
2617 case DISPPLANE_BGRX101010:
2618 return DRM_FORMAT_XRGB2101010;
2619 case DISPPLANE_RGBX101010:
2620 return DRM_FORMAT_XBGR2101010;
2621 }
2622}
2623
Mahesh Kumarddf34312018-04-09 09:11:03 +05302624int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00002625{
2626 switch (format) {
2627 case PLANE_CTL_FORMAT_RGB_565:
2628 return DRM_FORMAT_RGB565;
Mahesh Kumarf34a2912018-04-09 09:11:02 +05302629 case PLANE_CTL_FORMAT_NV12:
2630 return DRM_FORMAT_NV12;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00002631 default:
2632 case PLANE_CTL_FORMAT_XRGB_8888:
2633 if (rgb_order) {
2634 if (alpha)
2635 return DRM_FORMAT_ABGR8888;
2636 else
2637 return DRM_FORMAT_XBGR8888;
2638 } else {
2639 if (alpha)
2640 return DRM_FORMAT_ARGB8888;
2641 else
2642 return DRM_FORMAT_XRGB8888;
2643 }
2644 case PLANE_CTL_FORMAT_XRGB_2101010:
2645 if (rgb_order)
2646 return DRM_FORMAT_XBGR2101010;
2647 else
2648 return DRM_FORMAT_XRGB2101010;
2649 }
2650}
2651
Damien Lespiau5724dbd2015-01-20 12:51:52 +00002652static bool
Daniel Vetterf6936e22015-03-26 12:17:05 +01002653intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2654 struct intel_initial_plane_config *plane_config)
Jesse Barnes46f297f2014-03-07 08:57:48 -08002655{
2656 struct drm_device *dev = crtc->base.dev;
Paulo Zanoni3badb492015-09-23 12:52:23 -03002657 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes46f297f2014-03-07 08:57:48 -08002658 struct drm_i915_gem_object *obj = NULL;
2659 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
Damien Lespiau2d140302015-02-05 17:22:18 +00002660 struct drm_framebuffer *fb = &plane_config->fb->base;
Daniel Vetterf37b5c22015-02-10 23:12:27 +01002661 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2662 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2663 PAGE_SIZE);
2664
2665 size_aligned -= base_aligned;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002666
Chris Wilsonff2652e2014-03-10 08:07:02 +00002667 if (plane_config->size == 0)
2668 return false;
2669
Paulo Zanoni3badb492015-09-23 12:52:23 -03002670 /* If the FB is too big, just don't use it since fbdev is not very
2671 * important and we should probably use that space with FBC or other
2672 * features. */
Matthew Auldb1ace602017-12-11 15:18:21 +00002673 if (size_aligned * 2 > dev_priv->stolen_usable_size)
Paulo Zanoni3badb492015-09-23 12:52:23 -03002674 return false;
2675
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +00002676 mutex_lock(&dev->struct_mutex);
Tvrtko Ursulin187685c2016-12-01 14:16:36 +00002677 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
Daniel Vetterf37b5c22015-02-10 23:12:27 +01002678 base_aligned,
2679 base_aligned,
2680 size_aligned);
Chris Wilson24dbf512017-02-15 10:59:18 +00002681 mutex_unlock(&dev->struct_mutex);
2682 if (!obj)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002683 return false;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002684
Chris Wilson3e510a82016-08-05 10:14:23 +01002685 if (plane_config->tiling == I915_TILING_X)
2686 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002687
Ville Syrjälä438b74a2016-12-14 23:32:55 +02002688 mode_cmd.pixel_format = fb->format->format;
Damien Lespiau6bf129d2015-02-05 17:22:16 +00002689 mode_cmd.width = fb->width;
2690 mode_cmd.height = fb->height;
2691 mode_cmd.pitches[0] = fb->pitches[0];
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002692 mode_cmd.modifier[0] = fb->modifier;
Daniel Vetter18c52472015-02-10 17:16:09 +00002693 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002694
Chris Wilson24dbf512017-02-15 10:59:18 +00002695 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
Jesse Barnes46f297f2014-03-07 08:57:48 -08002696 DRM_DEBUG_KMS("intel fb init failed\n");
2697 goto out_unref_obj;
2698 }
Tvrtko Ursulin12c83d92016-02-11 10:27:29 +00002699
Jesse Barnes484b41d2014-03-07 08:57:55 -08002700
Daniel Vetterf6936e22015-03-26 12:17:05 +01002701 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002702 return true;
Jesse Barnes46f297f2014-03-07 08:57:48 -08002703
2704out_unref_obj:
Chris Wilsonf8c417c2016-07-20 13:31:53 +01002705 i915_gem_object_put(obj);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002706 return false;
2707}
2708
Damien Lespiau5724dbd2015-01-20 12:51:52 +00002709static void
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002710intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2711 struct intel_plane_state *plane_state,
2712 bool visible)
2713{
2714 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2715
2716 plane_state->base.visible = visible;
2717
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002718 if (visible)
Ville Syrjälä40560e22018-06-26 22:47:11 +03002719 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002720 else
Ville Syrjälä40560e22018-06-26 22:47:11 +03002721 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002722}
2723
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002724static void fixup_active_planes(struct intel_crtc_state *crtc_state)
2725{
2726 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2727 struct drm_plane *plane;
2728
2729 /*
2730 * Active_planes aliases if multiple "primary" or cursor planes
2731 * have been used on the same (or wrong) pipe. plane_mask uses
2732 * unique ids, hence we can use that to reconstruct active_planes.
2733 */
2734 crtc_state->active_planes = 0;
2735
2736 drm_for_each_plane_mask(plane, &dev_priv->drm,
2737 crtc_state->base.plane_mask)
2738 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
2739}
2740
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002741static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2742 struct intel_plane *plane)
2743{
2744 struct intel_crtc_state *crtc_state =
2745 to_intel_crtc_state(crtc->base.state);
2746 struct intel_plane_state *plane_state =
2747 to_intel_plane_state(plane->base.state);
2748
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +03002749 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
2750 plane->base.base.id, plane->base.name,
2751 crtc->base.base.id, crtc->base.name);
2752
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002753 intel_set_plane_visible(crtc_state, plane_state, false);
Ville Syrjälä62358aa2018-10-03 17:50:17 +03002754 fixup_active_planes(crtc_state);
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002755
2756 if (plane->id == PLANE_PRIMARY)
2757 intel_pre_disable_primary_noatomic(&crtc->base);
2758
2759 trace_intel_disable_plane(&plane->base, crtc);
2760 plane->disable_plane(plane, crtc);
2761}
2762
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002763static void
Daniel Vetterf6936e22015-03-26 12:17:05 +01002764intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2765 struct intel_initial_plane_config *plane_config)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002766{
2767 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002768 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002769 struct drm_crtc *c;
Matt Roper2ff8fde2014-07-08 07:50:07 -07002770 struct drm_i915_gem_object *obj;
Daniel Vetter88595ac2015-03-26 12:42:24 +01002771 struct drm_plane *primary = intel_crtc->base.primary;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002772 struct drm_plane_state *plane_state = primary->state;
Matt Roper200757f2015-12-03 11:37:36 -08002773 struct intel_plane *intel_plane = to_intel_plane(primary);
Matt Roper0a8d8a82015-12-03 11:37:38 -08002774 struct intel_plane_state *intel_state =
2775 to_intel_plane_state(plane_state);
Daniel Vetter88595ac2015-03-26 12:42:24 +01002776 struct drm_framebuffer *fb;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002777
Damien Lespiau2d140302015-02-05 17:22:18 +00002778 if (!plane_config->fb)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002779 return;
2780
Daniel Vetterf6936e22015-03-26 12:17:05 +01002781 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
Daniel Vetter88595ac2015-03-26 12:42:24 +01002782 fb = &plane_config->fb->base;
2783 goto valid_fb;
Damien Lespiauf55548b2015-02-05 18:30:20 +00002784 }
Jesse Barnes484b41d2014-03-07 08:57:55 -08002785
Damien Lespiau2d140302015-02-05 17:22:18 +00002786 kfree(plane_config->fb);
Jesse Barnes484b41d2014-03-07 08:57:55 -08002787
2788 /*
2789 * Failed to alloc the obj, check to see if we should share
2790 * an fb with another CRTC instead
2791 */
Damien Lespiau70e1e0e2014-05-13 23:32:24 +01002792 for_each_crtc(dev, c) {
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002793 struct intel_plane_state *state;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002794
2795 if (c == &intel_crtc->base)
2796 continue;
2797
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002798 if (!to_intel_crtc(c)->active)
Jesse Barnes484b41d2014-03-07 08:57:55 -08002799 continue;
2800
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002801 state = to_intel_plane_state(c->primary->state);
2802 if (!state->vma)
Matt Roper2ff8fde2014-07-08 07:50:07 -07002803 continue;
2804
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002805 if (intel_plane_ggtt_offset(state) == plane_config->base) {
Ville Syrjälä8bc20f62018-03-22 17:22:59 +02002806 fb = state->base.fb;
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302807 drm_framebuffer_get(fb);
Daniel Vetter88595ac2015-03-26 12:42:24 +01002808 goto valid_fb;
Jesse Barnes484b41d2014-03-07 08:57:55 -08002809 }
2810 }
Daniel Vetter88595ac2015-03-26 12:42:24 +01002811
Matt Roper200757f2015-12-03 11:37:36 -08002812 /*
2813 * We've failed to reconstruct the BIOS FB. Current display state
2814 * indicates that the primary plane is visible, but has a NULL FB,
2815 * which will lead to problems later if we don't fix it up. The
2816 * simplest solution is to just disable the primary plane now and
2817 * pretend the BIOS never had it enabled.
2818 */
Ville Syrjäläb1e01592017-11-17 21:19:09 +02002819 intel_plane_disable_noatomic(intel_crtc, intel_plane);
Matt Roper200757f2015-12-03 11:37:36 -08002820
Daniel Vetter88595ac2015-03-26 12:42:24 +01002821 return;
2822
2823valid_fb:
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002824 intel_fill_fb_ggtt_view(&intel_state->view, fb,
2825 intel_state->base.rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03002826 intel_state->color_plane[0].stride =
2827 intel_fb_pitch(fb, 0, intel_state->base.rotation);
2828
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002829 mutex_lock(&dev->struct_mutex);
2830 intel_state->vma =
Chris Wilson59354852018-02-20 13:42:06 +00002831 intel_pin_and_fence_fb_obj(fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +03002832 &intel_state->view,
Ville Syrjäläf7a02ad2018-02-21 20:48:07 +02002833 intel_plane_uses_fence(intel_state),
Chris Wilson59354852018-02-20 13:42:06 +00002834 &intel_state->flags);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002835 mutex_unlock(&dev->struct_mutex);
2836 if (IS_ERR(intel_state->vma)) {
2837 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2838 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2839
2840 intel_state->vma = NULL;
Harsha Sharmac3ed1102017-10-09 17:36:43 +05302841 drm_framebuffer_put(fb);
Chris Wilsonbe1e3412017-01-16 15:21:27 +00002842 return;
2843 }
2844
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -08002845 obj = intel_fb_obj(fb);
2846 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2847
Ville Syrjäläf44e2652015-11-13 19:16:13 +02002848 plane_state->src_x = 0;
2849 plane_state->src_y = 0;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002850 plane_state->src_w = fb->width << 16;
2851 plane_state->src_h = fb->height << 16;
2852
Ville Syrjäläf44e2652015-11-13 19:16:13 +02002853 plane_state->crtc_x = 0;
2854 plane_state->crtc_y = 0;
Maarten Lankhorstbe5651f2015-07-13 16:30:18 +02002855 plane_state->crtc_w = fb->width;
2856 plane_state->crtc_h = fb->height;
2857
Rob Clark1638d302016-11-05 11:08:08 -04002858 intel_state->base.src = drm_plane_state_src(plane_state);
2859 intel_state->base.dst = drm_plane_state_dest(plane_state);
Matt Roper0a8d8a82015-12-03 11:37:38 -08002860
Chris Wilson3e510a82016-08-05 10:14:23 +01002861 if (i915_gem_object_is_tiled(obj))
Daniel Vetter88595ac2015-03-26 12:42:24 +01002862 dev_priv->preserve_bios_swizzle = true;
2863
Ville Syrjäläcd30fbc2018-05-25 21:50:40 +03002864 plane_state->fb = fb;
2865 plane_state->crtc = &intel_crtc->base;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +02002866
Chris Wilsonfaf5bf02016-08-04 16:32:37 +01002867 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2868 &obj->frontbuffer_bits);
Jesse Barnes46f297f2014-03-07 08:57:48 -08002869}
2870
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002871static int skl_max_plane_width(const struct drm_framebuffer *fb,
2872 int color_plane,
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002873 unsigned int rotation)
2874{
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03002875 int cpp = fb->format->cpp[color_plane];
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002876
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002877 switch (fb->modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07002878 case DRM_FORMAT_MOD_LINEAR:
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002879 case I915_FORMAT_MOD_X_TILED:
2880 switch (cpp) {
2881 case 8:
2882 return 4096;
2883 case 4:
2884 case 2:
2885 case 1:
2886 return 8192;
2887 default:
2888 MISSING_CASE(cpp);
2889 break;
2890 }
2891 break;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002892 case I915_FORMAT_MOD_Y_TILED_CCS:
2893 case I915_FORMAT_MOD_Yf_TILED_CCS:
2894 /* FIXME AUX plane? */
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002895 case I915_FORMAT_MOD_Y_TILED:
2896 case I915_FORMAT_MOD_Yf_TILED:
2897 switch (cpp) {
2898 case 8:
2899 return 2048;
2900 case 4:
2901 return 4096;
2902 case 2:
2903 case 1:
2904 return 8192;
2905 default:
2906 MISSING_CASE(cpp);
2907 break;
2908 }
2909 break;
2910 default:
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002911 MISSING_CASE(fb->modifier);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002912 }
2913
2914 return 2048;
2915}
2916
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002917static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2918 int main_x, int main_y, u32 main_offset)
2919{
2920 const struct drm_framebuffer *fb = plane_state->base.fb;
2921 int hsub = fb->format->hsub;
2922 int vsub = fb->format->vsub;
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002923 int aux_x = plane_state->color_plane[1].x;
2924 int aux_y = plane_state->color_plane[1].y;
2925 u32 aux_offset = plane_state->color_plane[1].offset;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002926 u32 alignment = intel_surf_alignment(fb, 1);
2927
2928 while (aux_offset >= main_offset && aux_y <= main_y) {
2929 int x, y;
2930
2931 if (aux_x == main_x && aux_y == main_y)
2932 break;
2933
2934 if (aux_offset == 0)
2935 break;
2936
2937 x = aux_x / hsub;
2938 y = aux_y / vsub;
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002939 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
2940 aux_offset, aux_offset - alignment);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002941 aux_x = x * hsub + aux_x % hsub;
2942 aux_y = y * vsub + aux_y % vsub;
2943 }
2944
2945 if (aux_x != main_x || aux_y != main_y)
2946 return false;
2947
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002948 plane_state->color_plane[1].offset = aux_offset;
2949 plane_state->color_plane[1].x = aux_x;
2950 plane_state->color_plane[1].y = aux_y;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002951
2952 return true;
2953}
2954
Ville Syrjälä73266592018-09-07 18:24:11 +03002955static int skl_check_main_surface(struct intel_plane_state *plane_state)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002956{
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002957 const struct drm_framebuffer *fb = plane_state->base.fb;
2958 unsigned int rotation = plane_state->base.rotation;
Daniel Vettercc926382016-08-15 10:41:47 +02002959 int x = plane_state->base.src.x1 >> 16;
2960 int y = plane_state->base.src.y1 >> 16;
2961 int w = drm_rect_width(&plane_state->base.src) >> 16;
2962 int h = drm_rect_height(&plane_state->base.src) >> 16;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002963 int max_width = skl_max_plane_width(fb, 0, rotation);
2964 int max_height = 4096;
Ville Syrjäläc11ada02018-09-07 18:24:04 +03002965 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002966
2967 if (w > max_width || h > max_height) {
2968 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
2969 w, h, max_width, max_height);
2970 return -EINVAL;
2971 }
2972
2973 intel_add_fb_offsets(&x, &y, plane_state, 0);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002974 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02002975 alignment = intel_surf_alignment(fb, 0);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002976
2977 /*
Ville Syrjälä8d970652016-01-28 16:30:28 +02002978 * AUX surface offset is specified as the distance from the
2979 * main surface offset, and it must be non-negative. Make
2980 * sure that is what we will get.
2981 */
2982 if (offset > aux_offset)
Ville Syrjälä6d19a442018-09-07 18:24:01 +03002983 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
2984 offset, aux_offset & ~(alignment - 1));
Ville Syrjälä8d970652016-01-28 16:30:28 +02002985
2986 /*
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002987 * When using an X-tiled surface, the plane blows up
2988 * if the x offset + width exceed the stride.
2989 *
2990 * TODO: linear and Y-tiled seem fine, Yf untested,
2991 */
Ville Syrjäläbae781b2016-11-16 13:33:16 +02002992 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
Ville Syrjälä353c8592016-12-14 23:30:57 +02002993 int cpp = fb->format->cpp[0];
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002994
Ville Syrjälädf79cf42018-09-11 18:01:39 +03002995 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002996 if (offset == 0) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07002997 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02002998 return -EINVAL;
2999 }
3000
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003001 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3002 offset, offset - alignment);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003003 }
3004 }
3005
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003006 /*
3007 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3008 * they match with the main surface x/y offsets.
3009 */
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07003010 if (is_ccs_modifier(fb->modifier)) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003011 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3012 if (offset == 0)
3013 break;
3014
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003015 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3016 offset, offset - alignment);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003017 }
3018
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003019 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003020 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3021 return -EINVAL;
3022 }
3023 }
3024
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003025 plane_state->color_plane[0].offset = offset;
3026 plane_state->color_plane[0].x = x;
3027 plane_state->color_plane[0].y = y;
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003028
3029 return 0;
3030}
3031
Maarten Lankhorst5d794282018-05-12 03:03:14 +05303032static int
Ville Syrjälä73266592018-09-07 18:24:11 +03003033skl_check_nv12_surface(struct intel_plane_state *plane_state)
Maarten Lankhorst5d794282018-05-12 03:03:14 +05303034{
3035 /* Display WA #1106 */
3036 if (plane_state->base.rotation !=
3037 (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3038 plane_state->base.rotation != DRM_MODE_ROTATE_270)
3039 return 0;
3040
3041 /*
3042 * src coordinates are rotated here.
3043 * We check height but report it as width
3044 */
3045 if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3046 DRM_DEBUG_KMS("src width must be multiple "
3047 "of 4 for rotated NV12\n");
3048 return -EINVAL;
3049 }
3050
3051 return 0;
3052}
3053
Ville Syrjälä8d970652016-01-28 16:30:28 +02003054static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3055{
3056 const struct drm_framebuffer *fb = plane_state->base.fb;
3057 unsigned int rotation = plane_state->base.rotation;
3058 int max_width = skl_max_plane_width(fb, 1, rotation);
3059 int max_height = 4096;
Daniel Vettercc926382016-08-15 10:41:47 +02003060 int x = plane_state->base.src.x1 >> 17;
3061 int y = plane_state->base.src.y1 >> 17;
3062 int w = drm_rect_width(&plane_state->base.src) >> 17;
3063 int h = drm_rect_height(&plane_state->base.src) >> 17;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003064 u32 offset;
3065
3066 intel_add_fb_offsets(&x, &y, plane_state, 1);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003067 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
Ville Syrjälä8d970652016-01-28 16:30:28 +02003068
3069 /* FIXME not quite sure how/if these apply to the chroma plane */
3070 if (w > max_width || h > max_height) {
3071 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3072 w, h, max_width, max_height);
3073 return -EINVAL;
3074 }
3075
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003076 plane_state->color_plane[1].offset = offset;
3077 plane_state->color_plane[1].x = x;
3078 plane_state->color_plane[1].y = y;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003079
3080 return 0;
3081}
3082
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003083static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3084{
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003085 const struct drm_framebuffer *fb = plane_state->base.fb;
3086 int src_x = plane_state->base.src.x1 >> 16;
3087 int src_y = plane_state->base.src.y1 >> 16;
3088 int hsub = fb->format->hsub;
3089 int vsub = fb->format->vsub;
3090 int x = src_x / hsub;
3091 int y = src_y / vsub;
3092 u32 offset;
3093
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003094 intel_add_fb_offsets(&x, &y, plane_state, 1);
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003095 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003096
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003097 plane_state->color_plane[1].offset = offset;
3098 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3099 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003100
3101 return 0;
3102}
3103
Ville Syrjälä73266592018-09-07 18:24:11 +03003104int skl_check_plane_surface(struct intel_plane_state *plane_state)
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003105{
3106 const struct drm_framebuffer *fb = plane_state->base.fb;
3107 unsigned int rotation = plane_state->base.rotation;
3108 int ret;
3109
Ville Syrjäläf5929c52018-09-07 18:24:06 +03003110 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003111 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3112 plane_state->color_plane[1].stride = intel_fb_pitch(fb, 1, rotation);
3113
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003114 ret = intel_plane_check_stride(plane_state);
3115 if (ret)
3116 return ret;
3117
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003118 /* HW only has 8 bits pixel precision, disable plane if invisible */
3119 if (!(plane_state->base.alpha >> 8))
3120 plane_state->base.visible = false;
3121
Ville Syrjäläa5e4c7d2016-11-07 22:20:54 +02003122 if (!plane_state->base.visible)
3123 return 0;
3124
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003125 /* Rotate src coordinates to match rotated GTT view */
Ville Syrjäläbd2ef252016-09-26 19:30:46 +03003126 if (drm_rotation_90_or_270(rotation))
Daniel Vettercc926382016-08-15 10:41:47 +02003127 drm_rect_rotate(&plane_state->base.src,
Ville Syrjäläda064b42016-10-24 19:13:04 +03003128 fb->width << 16, fb->height << 16,
Robert Fossc2c446a2017-05-19 16:50:17 -04003129 DRM_MODE_ROTATE_270);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003130
Ville Syrjälä8d970652016-01-28 16:30:28 +02003131 /*
3132 * Handle the AUX surface first since
3133 * the main surface setup depends on it.
3134 */
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003135 if (fb->format->format == DRM_FORMAT_NV12) {
Ville Syrjälä73266592018-09-07 18:24:11 +03003136 ret = skl_check_nv12_surface(plane_state);
Maarten Lankhorst5d794282018-05-12 03:03:14 +05303137 if (ret)
3138 return ret;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003139 ret = skl_check_nv12_aux_surface(plane_state);
3140 if (ret)
3141 return ret;
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -07003142 } else if (is_ccs_modifier(fb->modifier)) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003143 ret = skl_check_ccs_aux_surface(plane_state);
3144 if (ret)
3145 return ret;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003146 } else {
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003147 plane_state->color_plane[1].offset = ~0xfff;
3148 plane_state->color_plane[1].x = 0;
3149 plane_state->color_plane[1].y = 0;
Ville Syrjälä8d970652016-01-28 16:30:28 +02003150 }
3151
Ville Syrjälä73266592018-09-07 18:24:11 +03003152 ret = skl_check_main_surface(plane_state);
Ville Syrjäläb63a16f2016-01-28 16:53:54 +02003153 if (ret)
3154 return ret;
3155
3156 return 0;
3157}
3158
Ville Syrjäläddd57132018-09-07 18:24:02 +03003159unsigned int
3160i9xx_plane_max_stride(struct intel_plane *plane,
3161 u32 pixel_format, u64 modifier,
3162 unsigned int rotation)
3163{
3164 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3165
3166 if (!HAS_GMCH_DISPLAY(dev_priv)) {
3167 return 32*1024;
3168 } else if (INTEL_GEN(dev_priv) >= 4) {
3169 if (modifier == I915_FORMAT_MOD_X_TILED)
3170 return 16*1024;
3171 else
3172 return 32*1024;
3173 } else if (INTEL_GEN(dev_priv) >= 3) {
3174 if (modifier == I915_FORMAT_MOD_X_TILED)
3175 return 8*1024;
3176 else
3177 return 16*1024;
3178 } else {
3179 if (plane->i9xx_plane == PLANE_C)
3180 return 4*1024;
3181 else
3182 return 8*1024;
3183 }
3184}
3185
Ville Syrjälä7145f602017-03-23 21:27:07 +02003186static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3187 const struct intel_plane_state *plane_state)
Jesse Barnes81255562010-08-02 12:07:50 -07003188{
Ville Syrjälä7145f602017-03-23 21:27:07 +02003189 struct drm_i915_private *dev_priv =
3190 to_i915(plane_state->base.plane->dev);
3191 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3192 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä8d0deca2016-02-15 22:54:41 +02003193 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003194 u32 dspcntr;
Ville Syrjäläc9ba6fa2014-08-27 17:48:41 +03003195
Ville Syrjälä7145f602017-03-23 21:27:07 +02003196 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003197
Ville Syrjälä6a4407a2017-03-23 21:27:08 +02003198 if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
3199 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
Ville Syrjälä7145f602017-03-23 21:27:07 +02003200 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003201
Ville Syrjälä6a4407a2017-03-23 21:27:08 +02003202 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3203 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003204
Ville Syrjäläc154d1e2018-01-30 22:38:02 +02003205 if (INTEL_GEN(dev_priv) < 5)
Ville Syrjäläd509e282017-03-27 21:55:32 +03003206 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003207
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003208 switch (fb->format->format) {
Ville Syrjälä57779d02012-10-31 17:50:14 +02003209 case DRM_FORMAT_C8:
Jesse Barnes81255562010-08-02 12:07:50 -07003210 dspcntr |= DISPPLANE_8BPP;
3211 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +02003212 case DRM_FORMAT_XRGB1555:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003213 dspcntr |= DISPPLANE_BGRX555;
Jesse Barnes81255562010-08-02 12:07:50 -07003214 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +02003215 case DRM_FORMAT_RGB565:
3216 dspcntr |= DISPPLANE_BGRX565;
3217 break;
3218 case DRM_FORMAT_XRGB8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003219 dspcntr |= DISPPLANE_BGRX888;
3220 break;
3221 case DRM_FORMAT_XBGR8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003222 dspcntr |= DISPPLANE_RGBX888;
3223 break;
3224 case DRM_FORMAT_XRGB2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003225 dspcntr |= DISPPLANE_BGRX101010;
3226 break;
3227 case DRM_FORMAT_XBGR2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +02003228 dspcntr |= DISPPLANE_RGBX101010;
Jesse Barnes81255562010-08-02 12:07:50 -07003229 break;
3230 default:
Ville Syrjälä7145f602017-03-23 21:27:07 +02003231 MISSING_CASE(fb->format->format);
3232 return 0;
Jesse Barnes81255562010-08-02 12:07:50 -07003233 }
Ville Syrjälä57779d02012-10-31 17:50:14 +02003234
Ville Syrjälä72618eb2016-02-04 20:38:20 +02003235 if (INTEL_GEN(dev_priv) >= 4 &&
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003236 fb->modifier == I915_FORMAT_MOD_X_TILED)
Ville Syrjäläf45651b2014-08-08 21:51:10 +03003237 dspcntr |= DISPPLANE_TILED;
Jesse Barnes81255562010-08-02 12:07:50 -07003238
Robert Fossc2c446a2017-05-19 16:50:17 -04003239 if (rotation & DRM_MODE_ROTATE_180)
Ville Syrjälädf0cd452016-11-14 18:53:59 +02003240 dspcntr |= DISPPLANE_ROTATE_180;
3241
Robert Fossc2c446a2017-05-19 16:50:17 -04003242 if (rotation & DRM_MODE_REFLECT_X)
Ville Syrjälä4ea7be22016-11-14 18:54:00 +02003243 dspcntr |= DISPPLANE_MIRROR;
3244
Ville Syrjälä7145f602017-03-23 21:27:07 +02003245 return dspcntr;
3246}
Ville Syrjäläde1aa622013-06-07 10:47:01 +03003247
Ville Syrjäläf9407ae2017-03-23 21:27:12 +02003248int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003249{
3250 struct drm_i915_private *dev_priv =
3251 to_i915(plane_state->base.plane->dev);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003252 const struct drm_framebuffer *fb = plane_state->base.fb;
3253 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003254 int src_x = plane_state->base.src.x1 >> 16;
3255 int src_y = plane_state->base.src.y1 >> 16;
3256 u32 offset;
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003257 int ret;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003258
Ville Syrjäläf5929c52018-09-07 18:24:06 +03003259 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003260 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
3261
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03003262 ret = intel_plane_check_stride(plane_state);
3263 if (ret)
3264 return ret;
3265
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003266 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
Jesse Barnes81255562010-08-02 12:07:50 -07003267
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003268 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä6d19a442018-09-07 18:24:01 +03003269 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3270 plane_state, 0);
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003271 else
3272 offset = 0;
Daniel Vettere506a0c2012-07-05 12:17:29 +02003273
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003274 /* HSW/BDW do this automagically in hardware */
3275 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003276 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3277 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3278
Robert Fossc2c446a2017-05-19 16:50:17 -04003279 if (rotation & DRM_MODE_ROTATE_180) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003280 src_x += src_w - 1;
3281 src_y += src_h - 1;
Robert Fossc2c446a2017-05-19 16:50:17 -04003282 } else if (rotation & DRM_MODE_REFLECT_X) {
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003283 src_x += src_w - 1;
3284 }
Sonika Jindal48404c12014-08-22 14:06:04 +05303285 }
3286
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003287 plane_state->color_plane[0].offset = offset;
3288 plane_state->color_plane[0].x = src_x;
3289 plane_state->color_plane[0].y = src_y;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003290
3291 return 0;
3292}
3293
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03003294static int
3295i9xx_plane_check(struct intel_crtc_state *crtc_state,
3296 struct intel_plane_state *plane_state)
3297{
3298 int ret;
3299
Ville Syrjälä25721f82018-09-07 18:24:12 +03003300 ret = chv_plane_check_rotation(plane_state);
3301 if (ret)
3302 return ret;
3303
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03003304 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3305 &crtc_state->base,
3306 DRM_PLANE_HELPER_NO_SCALING,
3307 DRM_PLANE_HELPER_NO_SCALING,
3308 false, true);
3309 if (ret)
3310 return ret;
3311
3312 if (!plane_state->base.visible)
3313 return 0;
3314
3315 ret = intel_plane_check_src_coordinates(plane_state);
3316 if (ret)
3317 return ret;
3318
3319 ret = i9xx_check_plane_surface(plane_state);
3320 if (ret)
3321 return ret;
3322
3323 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3324
3325 return 0;
3326}
3327
Ville Syrjäläed150302017-11-17 21:19:10 +02003328static void i9xx_update_plane(struct intel_plane *plane,
3329 const struct intel_crtc_state *crtc_state,
3330 const struct intel_plane_state *plane_state)
Ville Syrjälä7145f602017-03-23 21:27:07 +02003331{
Ville Syrjäläed150302017-11-17 21:19:10 +02003332 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjäläed150302017-11-17 21:19:10 +02003333 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003334 u32 linear_offset;
Ville Syrjäläa0864d52017-03-23 21:27:09 +02003335 u32 dspcntr = plane_state->ctl;
Ville Syrjäläed150302017-11-17 21:19:10 +02003336 i915_reg_t reg = DSPCNTR(i9xx_plane);
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003337 int x = plane_state->color_plane[0].x;
3338 int y = plane_state->color_plane[0].y;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003339 unsigned long irqflags;
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003340 u32 dspaddr_offset;
Ville Syrjälä7145f602017-03-23 21:27:07 +02003341
Ville Syrjälä29490562016-01-20 18:02:50 +02003342 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
Ville Syrjälä6687c902015-09-15 13:16:41 +03003343
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003344 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjäläc11ada02018-09-07 18:24:04 +03003345 dspaddr_offset = plane_state->color_plane[0].offset;
Ville Syrjälä5b7fcc42017-03-23 21:27:10 +02003346 else
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003347 dspaddr_offset = linear_offset;
Ville Syrjälä6687c902015-09-15 13:16:41 +03003348
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003349 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3350
Ville Syrjälä78587de2017-03-09 17:44:32 +02003351 if (INTEL_GEN(dev_priv) < 4) {
3352 /* pipesrc and dspsize control the size that is scaled from,
3353 * which should always be the user's requested size.
3354 */
Ville Syrjäläed150302017-11-17 21:19:10 +02003355 I915_WRITE_FW(DSPSIZE(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003356 ((crtc_state->pipe_src_h - 1) << 16) |
3357 (crtc_state->pipe_src_w - 1));
Ville Syrjäläed150302017-11-17 21:19:10 +02003358 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3359 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3360 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003361 ((crtc_state->pipe_src_h - 1) << 16) |
3362 (crtc_state->pipe_src_w - 1));
Ville Syrjäläed150302017-11-17 21:19:10 +02003363 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3364 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
Ville Syrjälä78587de2017-03-09 17:44:32 +02003365 }
3366
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003367 I915_WRITE_FW(reg, dspcntr);
Sonika Jindal48404c12014-08-22 14:06:04 +05303368
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003369 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003370 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Ville Syrjäläed150302017-11-17 21:19:10 +02003371 I915_WRITE_FW(DSPSURF(i9xx_plane),
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003372 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003373 dspaddr_offset);
Ville Syrjäläed150302017-11-17 21:19:10 +02003374 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
Ville Syrjälä3ba35e52017-03-23 21:27:11 +02003375 } else if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjäläed150302017-11-17 21:19:10 +02003376 I915_WRITE_FW(DSPSURF(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003377 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003378 dspaddr_offset);
Ville Syrjäläed150302017-11-17 21:19:10 +02003379 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3380 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
Ville Syrjäläbfb81042016-11-07 22:20:57 +02003381 } else {
Ville Syrjäläed150302017-11-17 21:19:10 +02003382 I915_WRITE_FW(DSPADDR(i9xx_plane),
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003383 intel_plane_ggtt_offset(plane_state) +
Juha-Pekka Heikkilae2888812017-10-17 23:08:08 +03003384 dspaddr_offset);
Ville Syrjäläbfb81042016-11-07 22:20:57 +02003385 }
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003386 POSTING_READ_FW(reg);
3387
3388 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Jesse Barnes17638cd2011-06-24 12:19:23 -07003389}
3390
Ville Syrjäläed150302017-11-17 21:19:10 +02003391static void i9xx_disable_plane(struct intel_plane *plane,
3392 struct intel_crtc *crtc)
Jesse Barnes17638cd2011-06-24 12:19:23 -07003393{
Ville Syrjäläed150302017-11-17 21:19:10 +02003394 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3395 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003396 unsigned long irqflags;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003397
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003398 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3399
Ville Syrjäläed150302017-11-17 21:19:10 +02003400 I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3401 if (INTEL_GEN(dev_priv) >= 4)
3402 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003403 else
Ville Syrjäläed150302017-11-17 21:19:10 +02003404 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3405 POSTING_READ_FW(DSPCNTR(i9xx_plane));
Ville Syrjälädd584fc2017-03-09 17:44:33 +02003406
3407 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003408}
3409
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003410static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3411 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003412{
Ville Syrjäläed150302017-11-17 21:19:10 +02003413 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003414 enum intel_display_power_domain power_domain;
Ville Syrjäläed150302017-11-17 21:19:10 +02003415 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003416 bool ret;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003417 u32 val;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003418
3419 /*
3420 * Not 100% correct for planes that can move between pipes,
3421 * but that's only the case for gen2-4 which don't have any
3422 * display power wells.
3423 */
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003424 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003425 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3426 return false;
3427
Ville Syrjäläeade6c82018-01-30 22:38:03 +02003428 val = I915_READ(DSPCNTR(i9xx_plane));
3429
3430 ret = val & DISPLAY_PLANE_ENABLE;
3431
3432 if (INTEL_GEN(dev_priv) >= 5)
3433 *pipe = plane->pipe;
3434 else
3435 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3436 DISPPLANE_SEL_PIPE_SHIFT;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02003437
3438 intel_display_power_put(dev_priv, power_domain);
3439
3440 return ret;
3441}
3442
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003443static u32
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003444intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
Damien Lespiaub3218032015-02-27 11:15:18 +00003445{
Ben Widawsky2f075562017-03-24 14:29:48 -07003446 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
Ville Syrjälä7b49f942016-01-12 21:08:32 +02003447 return 64;
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003448 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003449 return intel_tile_width_bytes(fb, color_plane);
Damien Lespiaub3218032015-02-27 11:15:18 +00003450}
3451
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003452static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3453{
3454 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003455 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003456
3457 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3458 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3459 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003460}
3461
Chandra Kondurua1b22782015-04-07 15:28:45 -07003462/*
3463 * This function detaches (aka. unbinds) unused scalers in hardware
3464 */
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003465static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
Chandra Kondurua1b22782015-04-07 15:28:45 -07003466{
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003467 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3468 const struct intel_crtc_scaler_state *scaler_state =
3469 &crtc_state->scaler_state;
Chandra Kondurua1b22782015-04-07 15:28:45 -07003470 int i;
3471
Chandra Kondurua1b22782015-04-07 15:28:45 -07003472 /* loop through and disable scalers that aren't in use */
3473 for (i = 0; i < intel_crtc->num_scalers; i++) {
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02003474 if (!scaler_state->scalers[i].in_use)
3475 skl_detach_scaler(intel_crtc, i);
Chandra Kondurua1b22782015-04-07 15:28:45 -07003476 }
3477}
3478
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003479u32 skl_plane_stride(const struct intel_plane_state *plane_state,
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003480 int color_plane)
Ville Syrjäläd2196772016-01-28 18:33:11 +02003481{
Ville Syrjälädf79cf42018-09-11 18:01:39 +03003482 const struct drm_framebuffer *fb = plane_state->base.fb;
3483 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003484 u32 stride = plane_state->color_plane[color_plane].stride;
Ville Syrjälä1b500532017-03-07 21:42:08 +02003485
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003486 if (color_plane >= fb->format->num_planes)
Ville Syrjälä1b500532017-03-07 21:42:08 +02003487 return 0;
3488
Ville Syrjäläd2196772016-01-28 18:33:11 +02003489 /*
3490 * The stride is either expressed as a multiple of 64 bytes chunks for
3491 * linear buffers or in number of tiles for tiled buffers.
3492 */
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003493 if (drm_rotation_90_or_270(rotation))
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003494 stride /= intel_tile_height(fb, color_plane);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02003495 else
Ville Syrjälä5d2a1952018-09-07 18:24:07 +03003496 stride /= intel_fb_stride_alignment(fb, color_plane);
Ville Syrjäläd2196772016-01-28 18:33:11 +02003497
3498 return stride;
3499}
3500
Ville Syrjälä2e881262017-03-17 23:17:56 +02003501static u32 skl_plane_ctl_format(uint32_t pixel_format)
Chandra Konduru6156a452015-04-27 13:48:39 -07003502{
Chandra Konduru6156a452015-04-27 13:48:39 -07003503 switch (pixel_format) {
Damien Lespiaud161cf72015-05-12 16:13:17 +01003504 case DRM_FORMAT_C8:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003505 return PLANE_CTL_FORMAT_INDEXED;
Chandra Konduru6156a452015-04-27 13:48:39 -07003506 case DRM_FORMAT_RGB565:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003507 return PLANE_CTL_FORMAT_RGB_565;
Chandra Konduru6156a452015-04-27 13:48:39 -07003508 case DRM_FORMAT_XBGR8888:
James Ausmus4036c782017-11-13 10:11:28 -08003509 case DRM_FORMAT_ABGR8888:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003510 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
Chandra Konduru6156a452015-04-27 13:48:39 -07003511 case DRM_FORMAT_XRGB8888:
Chandra Konduru6156a452015-04-27 13:48:39 -07003512 case DRM_FORMAT_ARGB8888:
James Ausmus4036c782017-11-13 10:11:28 -08003513 return PLANE_CTL_FORMAT_XRGB_8888;
Chandra Konduru6156a452015-04-27 13:48:39 -07003514 case DRM_FORMAT_XRGB2101010:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003515 return PLANE_CTL_FORMAT_XRGB_2101010;
Chandra Konduru6156a452015-04-27 13:48:39 -07003516 case DRM_FORMAT_XBGR2101010:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003517 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
Chandra Konduru6156a452015-04-27 13:48:39 -07003518 case DRM_FORMAT_YUYV:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003519 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
Chandra Konduru6156a452015-04-27 13:48:39 -07003520 case DRM_FORMAT_YVYU:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003521 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
Chandra Konduru6156a452015-04-27 13:48:39 -07003522 case DRM_FORMAT_UYVY:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003523 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
Chandra Konduru6156a452015-04-27 13:48:39 -07003524 case DRM_FORMAT_VYUY:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003525 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
Chandra Konduru77224cd2018-04-09 09:11:13 +05303526 case DRM_FORMAT_NV12:
3527 return PLANE_CTL_FORMAT_NV12;
Chandra Konduru6156a452015-04-27 13:48:39 -07003528 default:
Damien Lespiau4249eee2015-05-12 16:13:16 +01003529 MISSING_CASE(pixel_format);
Chandra Konduru6156a452015-04-27 13:48:39 -07003530 }
Damien Lespiau8cfcba42015-05-12 16:13:14 +01003531
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003532 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003533}
3534
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003535static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
James Ausmus4036c782017-11-13 10:11:28 -08003536{
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003537 if (!plane_state->base.fb->format->has_alpha)
3538 return PLANE_CTL_ALPHA_DISABLE;
3539
3540 switch (plane_state->base.pixel_blend_mode) {
3541 case DRM_MODE_BLEND_PIXEL_NONE:
3542 return PLANE_CTL_ALPHA_DISABLE;
3543 case DRM_MODE_BLEND_PREMULTI:
James Ausmus4036c782017-11-13 10:11:28 -08003544 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003545 case DRM_MODE_BLEND_COVERAGE:
3546 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
James Ausmus4036c782017-11-13 10:11:28 -08003547 default:
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003548 MISSING_CASE(plane_state->base.pixel_blend_mode);
James Ausmus4036c782017-11-13 10:11:28 -08003549 return PLANE_CTL_ALPHA_DISABLE;
3550 }
3551}
3552
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003553static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
James Ausmus4036c782017-11-13 10:11:28 -08003554{
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003555 if (!plane_state->base.fb->format->has_alpha)
3556 return PLANE_COLOR_ALPHA_DISABLE;
3557
3558 switch (plane_state->base.pixel_blend_mode) {
3559 case DRM_MODE_BLEND_PIXEL_NONE:
3560 return PLANE_COLOR_ALPHA_DISABLE;
3561 case DRM_MODE_BLEND_PREMULTI:
James Ausmus4036c782017-11-13 10:11:28 -08003562 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003563 case DRM_MODE_BLEND_COVERAGE:
3564 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
James Ausmus4036c782017-11-13 10:11:28 -08003565 default:
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003566 MISSING_CASE(plane_state->base.pixel_blend_mode);
James Ausmus4036c782017-11-13 10:11:28 -08003567 return PLANE_COLOR_ALPHA_DISABLE;
3568 }
3569}
3570
Ville Syrjälä2e881262017-03-17 23:17:56 +02003571static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
Chandra Konduru6156a452015-04-27 13:48:39 -07003572{
Chandra Konduru6156a452015-04-27 13:48:39 -07003573 switch (fb_modifier) {
Ben Widawsky2f075562017-03-24 14:29:48 -07003574 case DRM_FORMAT_MOD_LINEAR:
Chandra Konduru6156a452015-04-27 13:48:39 -07003575 break;
3576 case I915_FORMAT_MOD_X_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003577 return PLANE_CTL_TILED_X;
Chandra Konduru6156a452015-04-27 13:48:39 -07003578 case I915_FORMAT_MOD_Y_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003579 return PLANE_CTL_TILED_Y;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003580 case I915_FORMAT_MOD_Y_TILED_CCS:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07003581 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
Chandra Konduru6156a452015-04-27 13:48:39 -07003582 case I915_FORMAT_MOD_Yf_TILED:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003583 return PLANE_CTL_TILED_YF;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07003584 case I915_FORMAT_MOD_Yf_TILED_CCS:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07003585 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
Chandra Konduru6156a452015-04-27 13:48:39 -07003586 default:
3587 MISSING_CASE(fb_modifier);
3588 }
Damien Lespiau8cfcba42015-05-12 16:13:14 +01003589
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003590 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003591}
3592
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003593static u32 skl_plane_ctl_rotate(unsigned int rotate)
Chandra Konduru6156a452015-04-27 13:48:39 -07003594{
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003595 switch (rotate) {
Robert Fossc2c446a2017-05-19 16:50:17 -04003596 case DRM_MODE_ROTATE_0:
Chandra Konduru6156a452015-04-27 13:48:39 -07003597 break;
Sonika Jindal1e8df162015-05-20 13:40:48 +05303598 /*
Robert Fossc2c446a2017-05-19 16:50:17 -04003599 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
Sonika Jindal1e8df162015-05-20 13:40:48 +05303600 * while i915 HW rotation is clockwise, thats why this swapping.
3601 */
Robert Fossc2c446a2017-05-19 16:50:17 -04003602 case DRM_MODE_ROTATE_90:
Sonika Jindal1e8df162015-05-20 13:40:48 +05303603 return PLANE_CTL_ROTATE_270;
Robert Fossc2c446a2017-05-19 16:50:17 -04003604 case DRM_MODE_ROTATE_180:
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003605 return PLANE_CTL_ROTATE_180;
Robert Fossc2c446a2017-05-19 16:50:17 -04003606 case DRM_MODE_ROTATE_270:
Sonika Jindal1e8df162015-05-20 13:40:48 +05303607 return PLANE_CTL_ROTATE_90;
Chandra Konduru6156a452015-04-27 13:48:39 -07003608 default:
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003609 MISSING_CASE(rotate);
3610 }
3611
3612 return 0;
3613}
3614
3615static u32 cnl_plane_ctl_flip(unsigned int reflect)
3616{
3617 switch (reflect) {
3618 case 0:
3619 break;
3620 case DRM_MODE_REFLECT_X:
3621 return PLANE_CTL_FLIP_HORIZONTAL;
3622 case DRM_MODE_REFLECT_Y:
3623 default:
3624 MISSING_CASE(reflect);
Chandra Konduru6156a452015-04-27 13:48:39 -07003625 }
3626
Damien Lespiauc34ce3d2015-05-15 15:07:02 +01003627 return 0;
Chandra Konduru6156a452015-04-27 13:48:39 -07003628}
3629
Ville Syrjälä2e881262017-03-17 23:17:56 +02003630u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3631 const struct intel_plane_state *plane_state)
Damien Lespiau70d21f02013-07-03 21:06:04 +01003632{
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003633 struct drm_i915_private *dev_priv =
3634 to_i915(plane_state->base.plane->dev);
3635 const struct drm_framebuffer *fb = plane_state->base.fb;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +01003636 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä2e881262017-03-17 23:17:56 +02003637 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003638 u32 plane_ctl;
Damien Lespiau70d21f02013-07-03 21:06:04 +01003639
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003640 plane_ctl = PLANE_CTL_ENABLE;
3641
James Ausmus4036c782017-11-13 10:11:28 -08003642 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003643 plane_ctl |= skl_plane_ctl_alpha(plane_state);
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003644 plane_ctl |=
3645 PLANE_CTL_PIPE_GAMMA_ENABLE |
3646 PLANE_CTL_PIPE_CSC_ENABLE |
3647 PLANE_CTL_PLANE_GAMMA_DISABLE;
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003648
3649 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3650 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
Ville Syrjäläc8624ed2018-02-14 21:23:27 +02003651
3652 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3653 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
Ander Conselvan de Oliveira47f9ea82017-01-26 13:24:22 +02003654 }
Damien Lespiau70d21f02013-07-03 21:06:04 +01003655
Ville Syrjälä438b74a2016-12-14 23:32:55 +02003656 plane_ctl |= skl_plane_ctl_format(fb->format->format);
Ville Syrjäläbae781b2016-11-16 13:33:16 +02003657 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
Joonas Lahtinen5f8e3f52017-12-15 13:38:00 -08003658 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3659
3660 if (INTEL_GEN(dev_priv) >= 10)
3661 plane_ctl |= cnl_plane_ctl_flip(rotation &
3662 DRM_MODE_REFLECT_MASK);
Damien Lespiau70d21f02013-07-03 21:06:04 +01003663
Ville Syrjälä2e881262017-03-17 23:17:56 +02003664 if (key->flags & I915_SET_COLORKEY_DESTINATION)
3665 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3666 else if (key->flags & I915_SET_COLORKEY_SOURCE)
3667 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3668
Ville Syrjälä46f788b2017-03-17 23:17:55 +02003669 return plane_ctl;
3670}
3671
James Ausmus4036c782017-11-13 10:11:28 -08003672u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3673 const struct intel_plane_state *plane_state)
3674{
James Ausmus077ef1f2018-03-28 14:57:56 -07003675 struct drm_i915_private *dev_priv =
3676 to_i915(plane_state->base.plane->dev);
James Ausmus4036c782017-11-13 10:11:28 -08003677 const struct drm_framebuffer *fb = plane_state->base.fb;
3678 u32 plane_color_ctl = 0;
3679
James Ausmus077ef1f2018-03-28 14:57:56 -07003680 if (INTEL_GEN(dev_priv) < 11) {
3681 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3682 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3683 }
James Ausmus4036c782017-11-13 10:11:28 -08003684 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
Maarten Lankhorstb2081522018-08-15 12:34:05 +02003685 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
James Ausmus4036c782017-11-13 10:11:28 -08003686
Ayan Kumar Halder9bace652018-07-17 18:13:43 +01003687 if (fb->format->is_yuv) {
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003688 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3689 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3690 else
3691 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
Ville Syrjäläc8624ed2018-02-14 21:23:27 +02003692
3693 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3694 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
Ville Syrjäläb0f5c0b2018-02-14 21:23:25 +02003695 }
Ville Syrjälä012d79e2018-05-21 21:56:12 +03003696
James Ausmus4036c782017-11-13 10:11:28 -08003697 return plane_color_ctl;
3698}
3699
Maarten Lankhorst73974892016-08-05 23:28:27 +03003700static int
3701__intel_display_resume(struct drm_device *dev,
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003702 struct drm_atomic_state *state,
3703 struct drm_modeset_acquire_ctx *ctx)
Maarten Lankhorst73974892016-08-05 23:28:27 +03003704{
3705 struct drm_crtc_state *crtc_state;
3706 struct drm_crtc *crtc;
3707 int i, ret;
3708
Ville Syrjäläaecd36b2017-06-01 17:36:13 +03003709 intel_modeset_setup_hw_state(dev, ctx);
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +00003710 i915_redisable_vga(to_i915(dev));
Maarten Lankhorst73974892016-08-05 23:28:27 +03003711
3712 if (!state)
3713 return 0;
3714
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01003715 /*
3716 * We've duplicated the state, pointers to the old state are invalid.
3717 *
3718 * Don't attempt to use the old state until we commit the duplicated state.
3719 */
3720 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst73974892016-08-05 23:28:27 +03003721 /*
3722 * Force recalculation even if we restore
3723 * current state. With fast modeset this may not result
3724 * in a modeset when the state is compatible.
3725 */
3726 crtc_state->mode_changed = true;
3727 }
3728
3729 /* ignore any reset values/BIOS leftovers in the WM registers */
Ville Syrjälä602ae832017-03-02 19:15:02 +02003730 if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3731 to_intel_atomic_state(state)->skip_intermediate_wm = true;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003732
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003733 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003734
3735 WARN_ON(ret == -EDEADLK);
3736 return ret;
3737}
3738
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003739static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3740{
Ville Syrjäläae981042016-08-05 23:28:30 +03003741 return intel_has_gpu_reset(dev_priv) &&
3742 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003743}
3744
Chris Wilsonc0336662016-05-06 15:40:21 +01003745void intel_prepare_reset(struct drm_i915_private *dev_priv)
Ville Syrjälä75147472014-11-24 18:28:11 +02003746{
Maarten Lankhorst73974892016-08-05 23:28:27 +03003747 struct drm_device *dev = &dev_priv->drm;
3748 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3749 struct drm_atomic_state *state;
3750 int ret;
3751
Daniel Vetterce87ea12017-07-19 14:54:55 +02003752 /* reset doesn't touch the display */
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00003753 if (!i915_modparams.force_reset_modeset_test &&
Daniel Vetterce87ea12017-07-19 14:54:55 +02003754 !gpu_reset_clobbers_display(dev_priv))
3755 return;
3756
Daniel Vetter9db529a2017-08-08 10:08:28 +02003757 /* We have a modeset vs reset deadlock, defensively unbreak it. */
3758 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3759 wake_up_all(&dev_priv->gpu_error.wait_queue);
3760
3761 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3762 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3763 i915_gem_set_wedged(dev_priv);
3764 }
Daniel Vetter97154ec2017-08-08 10:08:26 +02003765
Maarten Lankhorst73974892016-08-05 23:28:27 +03003766 /*
3767 * Need mode_config.mutex so that we don't
3768 * trample ongoing ->detect() and whatnot.
3769 */
3770 mutex_lock(&dev->mode_config.mutex);
3771 drm_modeset_acquire_init(ctx, 0);
3772 while (1) {
3773 ret = drm_modeset_lock_all_ctx(dev, ctx);
3774 if (ret != -EDEADLK)
3775 break;
3776
3777 drm_modeset_backoff(ctx);
3778 }
Ville Syrjäläf98ce922014-11-21 21:54:30 +02003779 /*
3780 * Disabling the crtcs gracefully seems nicer. Also the
3781 * g33 docs say we should at least disable all the planes.
3782 */
Maarten Lankhorst73974892016-08-05 23:28:27 +03003783 state = drm_atomic_helper_duplicate_state(dev, ctx);
3784 if (IS_ERR(state)) {
3785 ret = PTR_ERR(state);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003786 DRM_ERROR("Duplicating state failed with %i\n", ret);
Ander Conselvan de Oliveira1e5a15d2017-01-18 14:34:28 +02003787 return;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003788 }
3789
3790 ret = drm_atomic_helper_disable_all(dev, ctx);
3791 if (ret) {
3792 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
Ander Conselvan de Oliveira1e5a15d2017-01-18 14:34:28 +02003793 drm_atomic_state_put(state);
3794 return;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003795 }
3796
3797 dev_priv->modeset_restore_state = state;
3798 state->acquire_ctx = ctx;
Ville Syrjälä75147472014-11-24 18:28:11 +02003799}
3800
Chris Wilsonc0336662016-05-06 15:40:21 +01003801void intel_finish_reset(struct drm_i915_private *dev_priv)
Ville Syrjälä75147472014-11-24 18:28:11 +02003802{
Maarten Lankhorst73974892016-08-05 23:28:27 +03003803 struct drm_device *dev = &dev_priv->drm;
3804 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
Chris Wilson40da1d32018-04-05 13:37:14 +01003805 struct drm_atomic_state *state;
Maarten Lankhorst73974892016-08-05 23:28:27 +03003806 int ret;
3807
Daniel Vetterce87ea12017-07-19 14:54:55 +02003808 /* reset doesn't touch the display */
Chris Wilson40da1d32018-04-05 13:37:14 +01003809 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
Daniel Vetterce87ea12017-07-19 14:54:55 +02003810 return;
3811
Chris Wilson40da1d32018-04-05 13:37:14 +01003812 state = fetch_and_zero(&dev_priv->modeset_restore_state);
Daniel Vetterce87ea12017-07-19 14:54:55 +02003813 if (!state)
3814 goto unlock;
3815
Ville Syrjälä75147472014-11-24 18:28:11 +02003816 /* reset doesn't touch the display */
Ville Syrjälä4ac2ba22016-08-05 23:28:29 +03003817 if (!gpu_reset_clobbers_display(dev_priv)) {
Daniel Vetterce87ea12017-07-19 14:54:55 +02003818 /* for testing only restore the display */
3819 ret = __intel_display_resume(dev, state, ctx);
Chris Wilson942d5d02017-08-28 11:46:04 +01003820 if (ret)
3821 DRM_ERROR("Restoring old state failed with %i\n", ret);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003822 } else {
3823 /*
3824 * The display has been reset as well,
3825 * so need a full re-initialization.
3826 */
3827 intel_runtime_pm_disable_interrupts(dev_priv);
3828 intel_runtime_pm_enable_interrupts(dev_priv);
3829
Imre Deak51f59202016-09-14 13:04:13 +03003830 intel_pps_unlock_regs_wa(dev_priv);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003831 intel_modeset_init_hw(dev);
Ville Syrjäläf72b84c2017-11-08 15:35:55 +02003832 intel_init_clock_gating(dev_priv);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003833
3834 spin_lock_irq(&dev_priv->irq_lock);
3835 if (dev_priv->display.hpd_irq_setup)
3836 dev_priv->display.hpd_irq_setup(dev_priv);
3837 spin_unlock_irq(&dev_priv->irq_lock);
3838
Maarten Lankhorst581e49f2017-01-16 10:37:38 +01003839 ret = __intel_display_resume(dev, state, ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +03003840 if (ret)
3841 DRM_ERROR("Restoring old state failed with %i\n", ret);
3842
3843 intel_hpd_init(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02003844 }
3845
Daniel Vetterce87ea12017-07-19 14:54:55 +02003846 drm_atomic_state_put(state);
3847unlock:
Maarten Lankhorst73974892016-08-05 23:28:27 +03003848 drm_modeset_drop_locks(ctx);
3849 drm_modeset_acquire_fini(ctx);
3850 mutex_unlock(&dev->mode_config.mutex);
Daniel Vetter9db529a2017-08-08 10:08:28 +02003851
3852 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
Ville Syrjälä75147472014-11-24 18:28:11 +02003853}
3854
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003855static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3856 const struct intel_crtc_state *new_crtc_state)
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003857{
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003858 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003859 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003860
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003861 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003862 crtc->base.mode = new_crtc_state->base.mode;
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003863
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003864 /*
3865 * Update pipe size and adjust fitter if needed: the reason for this is
3866 * that in compute_mode_changes we check the native mode (not the pfit
3867 * mode) to see if we can flip rather than do a full mode set. In the
3868 * fastboot case, we'll flip, but if we don't update the pipesrc and
3869 * pfit state, we'll end up with a big fb scanned out into the wrong
3870 * sized surface.
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003871 */
3872
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003873 I915_WRITE(PIPESRC(crtc->pipe),
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003874 ((new_crtc_state->pipe_src_w - 1) << 16) |
3875 (new_crtc_state->pipe_src_h - 1));
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003876
3877 /* on skylake this is done by detaching scalers */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00003878 if (INTEL_GEN(dev_priv) >= 9) {
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +02003879 skl_detach_scalers(new_crtc_state);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003880
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003881 if (new_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003882 skylake_pfit_enable(new_crtc_state);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003883 } else if (HAS_PCH_SPLIT(dev_priv)) {
Ville Syrjälä1a15b772017-08-23 18:22:25 +03003884 if (new_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003885 ironlake_pfit_enable(new_crtc_state);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +02003886 else if (old_crtc_state->pch_pfit.enabled)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02003887 ironlake_pfit_disable(old_crtc_state);
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003888 }
Gustavo Padovane30e8f72014-09-10 12:04:17 -03003889}
3890
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003891static void intel_fdi_normal_train(struct intel_crtc *crtc)
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003892{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003893 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003894 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003895 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003896 i915_reg_t reg;
3897 u32 temp;
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003898
3899 /* enable normal train */
3900 reg = FDI_TX_CTL(pipe);
3901 temp = I915_READ(reg);
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01003902 if (IS_IVYBRIDGE(dev_priv)) {
Jesse Barnes357555c2011-04-28 15:09:55 -07003903 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3904 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
Keith Packard61e499b2011-05-17 16:13:52 -07003905 } else {
3906 temp &= ~FDI_LINK_TRAIN_NONE;
3907 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
Jesse Barnes357555c2011-04-28 15:09:55 -07003908 }
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003909 I915_WRITE(reg, temp);
3910
3911 reg = FDI_RX_CTL(pipe);
3912 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003913 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003914 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3915 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3916 } else {
3917 temp &= ~FDI_LINK_TRAIN_NONE;
3918 temp |= FDI_LINK_TRAIN_NONE;
3919 }
3920 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3921
3922 /* wait one idle pattern time */
3923 POSTING_READ(reg);
3924 udelay(1000);
Jesse Barnes357555c2011-04-28 15:09:55 -07003925
3926 /* IVB wants error correction enabled */
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01003927 if (IS_IVYBRIDGE(dev_priv))
Jesse Barnes357555c2011-04-28 15:09:55 -07003928 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3929 FDI_FE_ERRC_ENABLE);
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08003930}
3931
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003932/* The FDI link training functions for ILK/Ibexpeak. */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02003933static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3934 const struct intel_crtc_state *crtc_state)
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003935{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003936 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003937 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02003938 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003939 i915_reg_t reg;
3940 u32 temp, tries;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003941
Ville Syrjälä1c8562f2014-04-25 22:12:07 +03003942 /* FDI needs bits from pipe first */
Jesse Barnes0fc932b2011-01-04 15:09:37 -08003943 assert_pipe_enabled(dev_priv, pipe);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08003944
Adam Jacksone1a44742010-06-25 15:32:14 -04003945 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3946 for train result */
Chris Wilson5eddb702010-09-11 13:48:45 +01003947 reg = FDI_RX_IMR(pipe);
3948 temp = I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04003949 temp &= ~FDI_RX_SYMBOL_LOCK;
3950 temp &= ~FDI_RX_BIT_LOCK;
Chris Wilson5eddb702010-09-11 13:48:45 +01003951 I915_WRITE(reg, temp);
3952 I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04003953 udelay(150);
3954
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003955 /* enable CPU FDI TX and PCH FDI RX */
Chris Wilson5eddb702010-09-11 13:48:45 +01003956 reg = FDI_TX_CTL(pipe);
3957 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02003958 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02003959 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003960 temp &= ~FDI_LINK_TRAIN_NONE;
3961 temp |= FDI_LINK_TRAIN_PATTERN_1;
Chris Wilson5eddb702010-09-11 13:48:45 +01003962 I915_WRITE(reg, temp | FDI_TX_ENABLE);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003963
Chris Wilson5eddb702010-09-11 13:48:45 +01003964 reg = FDI_RX_CTL(pipe);
3965 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003966 temp &= ~FDI_LINK_TRAIN_NONE;
3967 temp |= FDI_LINK_TRAIN_PATTERN_1;
Chris Wilson5eddb702010-09-11 13:48:45 +01003968 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3969
3970 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003971 udelay(150);
3972
Jesse Barnes5b2adf82010-10-07 16:01:15 -07003973 /* Ironlake workaround, enable clock pointer after FDI enable*/
Daniel Vetter8f5718a2012-10-31 22:52:28 +01003974 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3975 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3976 FDI_RX_PHASE_SYNC_POINTER_EN);
Jesse Barnes5b2adf82010-10-07 16:01:15 -07003977
Chris Wilson5eddb702010-09-11 13:48:45 +01003978 reg = FDI_RX_IIR(pipe);
Adam Jacksone1a44742010-06-25 15:32:14 -04003979 for (tries = 0; tries < 5; tries++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01003980 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003981 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3982
3983 if ((temp & FDI_RX_BIT_LOCK)) {
3984 DRM_DEBUG_KMS("FDI train 1 done.\n");
Chris Wilson5eddb702010-09-11 13:48:45 +01003985 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003986 break;
3987 }
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003988 }
Adam Jacksone1a44742010-06-25 15:32:14 -04003989 if (tries == 5)
Chris Wilson5eddb702010-09-11 13:48:45 +01003990 DRM_ERROR("FDI train 1 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003991
3992 /* Train 2 */
Chris Wilson5eddb702010-09-11 13:48:45 +01003993 reg = FDI_TX_CTL(pipe);
3994 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003995 temp &= ~FDI_LINK_TRAIN_NONE;
3996 temp |= FDI_LINK_TRAIN_PATTERN_2;
Chris Wilson5eddb702010-09-11 13:48:45 +01003997 I915_WRITE(reg, temp);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08003998
Chris Wilson5eddb702010-09-11 13:48:45 +01003999 reg = FDI_RX_CTL(pipe);
4000 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004001 temp &= ~FDI_LINK_TRAIN_NONE;
4002 temp |= FDI_LINK_TRAIN_PATTERN_2;
Chris Wilson5eddb702010-09-11 13:48:45 +01004003 I915_WRITE(reg, temp);
4004
4005 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004006 udelay(150);
4007
Chris Wilson5eddb702010-09-11 13:48:45 +01004008 reg = FDI_RX_IIR(pipe);
Adam Jacksone1a44742010-06-25 15:32:14 -04004009 for (tries = 0; tries < 5; tries++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004010 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004011 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4012
4013 if (temp & FDI_RX_SYMBOL_LOCK) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004014 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004015 DRM_DEBUG_KMS("FDI train 2 done.\n");
4016 break;
4017 }
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004018 }
Adam Jacksone1a44742010-06-25 15:32:14 -04004019 if (tries == 5)
Chris Wilson5eddb702010-09-11 13:48:45 +01004020 DRM_ERROR("FDI train 2 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004021
4022 DRM_DEBUG_KMS("FDI train done\n");
Jesse Barnes5c5313c2010-10-07 16:01:11 -07004023
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004024}
4025
Akshay Joshi0206e352011-08-16 15:34:10 -04004026static const int snb_b_fdi_train_param[] = {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004027 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4028 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4029 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4030 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4031};
4032
4033/* The FDI link training functions for SNB/Cougarpoint. */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004034static void gen6_fdi_link_train(struct intel_crtc *crtc,
4035 const struct intel_crtc_state *crtc_state)
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004036{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004037 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004038 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004039 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004040 i915_reg_t reg;
4041 u32 temp, i, retry;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004042
Adam Jacksone1a44742010-06-25 15:32:14 -04004043 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4044 for train result */
Chris Wilson5eddb702010-09-11 13:48:45 +01004045 reg = FDI_RX_IMR(pipe);
4046 temp = I915_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04004047 temp &= ~FDI_RX_SYMBOL_LOCK;
4048 temp &= ~FDI_RX_BIT_LOCK;
Chris Wilson5eddb702010-09-11 13:48:45 +01004049 I915_WRITE(reg, temp);
4050
4051 POSTING_READ(reg);
Adam Jacksone1a44742010-06-25 15:32:14 -04004052 udelay(150);
4053
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004054 /* enable CPU FDI TX and PCH FDI RX */
Chris Wilson5eddb702010-09-11 13:48:45 +01004055 reg = FDI_TX_CTL(pipe);
4056 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02004057 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004058 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004059 temp &= ~FDI_LINK_TRAIN_NONE;
4060 temp |= FDI_LINK_TRAIN_PATTERN_1;
4061 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4062 /* SNB-B */
4063 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
Chris Wilson5eddb702010-09-11 13:48:45 +01004064 I915_WRITE(reg, temp | FDI_TX_ENABLE);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004065
Daniel Vetterd74cf322012-10-26 10:58:13 +02004066 I915_WRITE(FDI_RX_MISC(pipe),
4067 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4068
Chris Wilson5eddb702010-09-11 13:48:45 +01004069 reg = FDI_RX_CTL(pipe);
4070 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004071 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004072 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4073 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4074 } else {
4075 temp &= ~FDI_LINK_TRAIN_NONE;
4076 temp |= FDI_LINK_TRAIN_PATTERN_1;
4077 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004078 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4079
4080 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004081 udelay(150);
4082
Akshay Joshi0206e352011-08-16 15:34:10 -04004083 for (i = 0; i < 4; i++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004084 reg = FDI_TX_CTL(pipe);
4085 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004086 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4087 temp |= snb_b_fdi_train_param[i];
Chris Wilson5eddb702010-09-11 13:48:45 +01004088 I915_WRITE(reg, temp);
4089
4090 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004091 udelay(500);
4092
Sean Paulfa37d392012-03-02 12:53:39 -05004093 for (retry = 0; retry < 5; retry++) {
4094 reg = FDI_RX_IIR(pipe);
4095 temp = I915_READ(reg);
4096 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4097 if (temp & FDI_RX_BIT_LOCK) {
4098 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4099 DRM_DEBUG_KMS("FDI train 1 done.\n");
4100 break;
4101 }
4102 udelay(50);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004103 }
Sean Paulfa37d392012-03-02 12:53:39 -05004104 if (retry < 5)
4105 break;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004106 }
4107 if (i == 4)
Chris Wilson5eddb702010-09-11 13:48:45 +01004108 DRM_ERROR("FDI train 1 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004109
4110 /* Train 2 */
Chris Wilson5eddb702010-09-11 13:48:45 +01004111 reg = FDI_TX_CTL(pipe);
4112 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004113 temp &= ~FDI_LINK_TRAIN_NONE;
4114 temp |= FDI_LINK_TRAIN_PATTERN_2;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004115 if (IS_GEN6(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004116 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4117 /* SNB-B */
4118 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4119 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004120 I915_WRITE(reg, temp);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004121
Chris Wilson5eddb702010-09-11 13:48:45 +01004122 reg = FDI_RX_CTL(pipe);
4123 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004124 if (HAS_PCH_CPT(dev_priv)) {
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004125 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4126 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4127 } else {
4128 temp &= ~FDI_LINK_TRAIN_NONE;
4129 temp |= FDI_LINK_TRAIN_PATTERN_2;
4130 }
Chris Wilson5eddb702010-09-11 13:48:45 +01004131 I915_WRITE(reg, temp);
4132
4133 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004134 udelay(150);
4135
Akshay Joshi0206e352011-08-16 15:34:10 -04004136 for (i = 0; i < 4; i++) {
Chris Wilson5eddb702010-09-11 13:48:45 +01004137 reg = FDI_TX_CTL(pipe);
4138 temp = I915_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004139 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4140 temp |= snb_b_fdi_train_param[i];
Chris Wilson5eddb702010-09-11 13:48:45 +01004141 I915_WRITE(reg, temp);
4142
4143 POSTING_READ(reg);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004144 udelay(500);
4145
Sean Paulfa37d392012-03-02 12:53:39 -05004146 for (retry = 0; retry < 5; retry++) {
4147 reg = FDI_RX_IIR(pipe);
4148 temp = I915_READ(reg);
4149 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4150 if (temp & FDI_RX_SYMBOL_LOCK) {
4151 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4152 DRM_DEBUG_KMS("FDI train 2 done.\n");
4153 break;
4154 }
4155 udelay(50);
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004156 }
Sean Paulfa37d392012-03-02 12:53:39 -05004157 if (retry < 5)
4158 break;
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004159 }
4160 if (i == 4)
Chris Wilson5eddb702010-09-11 13:48:45 +01004161 DRM_ERROR("FDI train 2 fail!\n");
Zhenyu Wang8db9d772010-04-07 16:15:54 +08004162
4163 DRM_DEBUG_KMS("FDI train done.\n");
4164}
4165
Jesse Barnes357555c2011-04-28 15:09:55 -07004166/* Manual link training for Ivy Bridge A0 parts */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004167static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4168 const struct intel_crtc_state *crtc_state)
Jesse Barnes357555c2011-04-28 15:09:55 -07004169{
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004170 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004171 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004172 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004173 i915_reg_t reg;
4174 u32 temp, i, j;
Jesse Barnes357555c2011-04-28 15:09:55 -07004175
4176 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4177 for train result */
4178 reg = FDI_RX_IMR(pipe);
4179 temp = I915_READ(reg);
4180 temp &= ~FDI_RX_SYMBOL_LOCK;
4181 temp &= ~FDI_RX_BIT_LOCK;
4182 I915_WRITE(reg, temp);
4183
4184 POSTING_READ(reg);
4185 udelay(150);
4186
Daniel Vetter01a415f2012-10-27 15:58:40 +02004187 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4188 I915_READ(FDI_RX_IIR(pipe)));
4189
Jesse Barnes139ccd32013-08-19 11:04:55 -07004190 /* Try each vswing and preemphasis setting twice before moving on */
4191 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4192 /* disable first in case we need to retry */
Jesse Barnes357555c2011-04-28 15:09:55 -07004193 reg = FDI_TX_CTL(pipe);
4194 temp = I915_READ(reg);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004195 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4196 temp &= ~FDI_TX_ENABLE;
4197 I915_WRITE(reg, temp);
4198
4199 reg = FDI_RX_CTL(pipe);
4200 temp = I915_READ(reg);
4201 temp &= ~FDI_LINK_TRAIN_AUTO;
4202 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4203 temp &= ~FDI_RX_ENABLE;
4204 I915_WRITE(reg, temp);
4205
4206 /* enable CPU FDI TX and PCH FDI RX */
4207 reg = FDI_TX_CTL(pipe);
4208 temp = I915_READ(reg);
4209 temp &= ~FDI_DP_PORT_WIDTH_MASK;
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004210 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004211 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
Jesse Barnes357555c2011-04-28 15:09:55 -07004212 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
Jesse Barnes139ccd32013-08-19 11:04:55 -07004213 temp |= snb_b_fdi_train_param[j/2];
4214 temp |= FDI_COMPOSITE_SYNC;
4215 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4216
4217 I915_WRITE(FDI_RX_MISC(pipe),
4218 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4219
4220 reg = FDI_RX_CTL(pipe);
4221 temp = I915_READ(reg);
4222 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4223 temp |= FDI_COMPOSITE_SYNC;
4224 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4225
4226 POSTING_READ(reg);
4227 udelay(1); /* should be 0.5us */
4228
4229 for (i = 0; i < 4; i++) {
4230 reg = FDI_RX_IIR(pipe);
4231 temp = I915_READ(reg);
4232 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4233
4234 if (temp & FDI_RX_BIT_LOCK ||
4235 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4236 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4237 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4238 i);
4239 break;
4240 }
4241 udelay(1); /* should be 0.5us */
4242 }
4243 if (i == 4) {
4244 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4245 continue;
4246 }
4247
4248 /* Train 2 */
4249 reg = FDI_TX_CTL(pipe);
4250 temp = I915_READ(reg);
4251 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4252 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4253 I915_WRITE(reg, temp);
4254
4255 reg = FDI_RX_CTL(pipe);
4256 temp = I915_READ(reg);
4257 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4258 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
Jesse Barnes357555c2011-04-28 15:09:55 -07004259 I915_WRITE(reg, temp);
4260
4261 POSTING_READ(reg);
Jesse Barnes139ccd32013-08-19 11:04:55 -07004262 udelay(2); /* should be 1.5us */
Jesse Barnes357555c2011-04-28 15:09:55 -07004263
Jesse Barnes139ccd32013-08-19 11:04:55 -07004264 for (i = 0; i < 4; i++) {
4265 reg = FDI_RX_IIR(pipe);
4266 temp = I915_READ(reg);
4267 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
Jesse Barnes357555c2011-04-28 15:09:55 -07004268
Jesse Barnes139ccd32013-08-19 11:04:55 -07004269 if (temp & FDI_RX_SYMBOL_LOCK ||
4270 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4271 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4272 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4273 i);
4274 goto train_done;
4275 }
4276 udelay(2); /* should be 1.5us */
Jesse Barnes357555c2011-04-28 15:09:55 -07004277 }
Jesse Barnes139ccd32013-08-19 11:04:55 -07004278 if (i == 4)
4279 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
Jesse Barnes357555c2011-04-28 15:09:55 -07004280 }
Jesse Barnes357555c2011-04-28 15:09:55 -07004281
Jesse Barnes139ccd32013-08-19 11:04:55 -07004282train_done:
Jesse Barnes357555c2011-04-28 15:09:55 -07004283 DRM_DEBUG_KMS("FDI train done.\n");
4284}
4285
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004286static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnes0e23b992010-09-10 11:10:00 -07004287{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004288 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4289 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004290 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004291 i915_reg_t reg;
4292 u32 temp;
Jesse Barnesc64e3112010-09-10 11:27:03 -07004293
Jesse Barnes0e23b992010-09-10 11:10:00 -07004294 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
Chris Wilson5eddb702010-09-11 13:48:45 +01004295 reg = FDI_RX_CTL(pipe);
4296 temp = I915_READ(reg);
Daniel Vetter627eb5a2013-04-29 19:33:42 +02004297 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02004298 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004299 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Chris Wilson5eddb702010-09-11 13:48:45 +01004300 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4301
4302 POSTING_READ(reg);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004303 udelay(200);
4304
4305 /* Switch from Rawclk to PCDclk */
Chris Wilson5eddb702010-09-11 13:48:45 +01004306 temp = I915_READ(reg);
4307 I915_WRITE(reg, temp | FDI_PCDCLK);
4308
4309 POSTING_READ(reg);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004310 udelay(200);
4311
Paulo Zanoni20749732012-11-23 15:30:38 -02004312 /* Enable CPU FDI TX PLL, always on for Ironlake */
4313 reg = FDI_TX_CTL(pipe);
4314 temp = I915_READ(reg);
4315 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4316 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
Chris Wilson5eddb702010-09-11 13:48:45 +01004317
Paulo Zanoni20749732012-11-23 15:30:38 -02004318 POSTING_READ(reg);
4319 udelay(100);
Jesse Barnes0e23b992010-09-10 11:10:00 -07004320 }
4321}
4322
Daniel Vetter88cefb62012-08-12 19:27:14 +02004323static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4324{
4325 struct drm_device *dev = intel_crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004326 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter88cefb62012-08-12 19:27:14 +02004327 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004328 i915_reg_t reg;
4329 u32 temp;
Daniel Vetter88cefb62012-08-12 19:27:14 +02004330
4331 /* Switch from PCDclk to Rawclk */
4332 reg = FDI_RX_CTL(pipe);
4333 temp = I915_READ(reg);
4334 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4335
4336 /* Disable CPU FDI TX PLL */
4337 reg = FDI_TX_CTL(pipe);
4338 temp = I915_READ(reg);
4339 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4340
4341 POSTING_READ(reg);
4342 udelay(100);
4343
4344 reg = FDI_RX_CTL(pipe);
4345 temp = I915_READ(reg);
4346 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4347
4348 /* Wait for the clocks to turn off. */
4349 POSTING_READ(reg);
4350 udelay(100);
4351}
4352
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004353static void ironlake_fdi_disable(struct drm_crtc *crtc)
4354{
4355 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004356 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004357 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4358 int pipe = intel_crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004359 i915_reg_t reg;
4360 u32 temp;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004361
4362 /* disable CPU FDI tx and PCH FDI rx */
4363 reg = FDI_TX_CTL(pipe);
4364 temp = I915_READ(reg);
4365 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4366 POSTING_READ(reg);
4367
4368 reg = FDI_RX_CTL(pipe);
4369 temp = I915_READ(reg);
4370 temp &= ~(0x7 << 16);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004371 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004372 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4373
4374 POSTING_READ(reg);
4375 udelay(100);
4376
4377 /* Ironlake workaround, disable clock pointer after downing FDI */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004378 if (HAS_PCH_IBX(dev_priv))
Jesse Barnes6f06ce12011-01-04 15:09:38 -08004379 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004380
4381 /* still set train pattern 1 */
4382 reg = FDI_TX_CTL(pipe);
4383 temp = I915_READ(reg);
4384 temp &= ~FDI_LINK_TRAIN_NONE;
4385 temp |= FDI_LINK_TRAIN_PATTERN_1;
4386 I915_WRITE(reg, temp);
4387
4388 reg = FDI_RX_CTL(pipe);
4389 temp = I915_READ(reg);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004390 if (HAS_PCH_CPT(dev_priv)) {
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004391 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4392 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4393 } else {
4394 temp &= ~FDI_LINK_TRAIN_NONE;
4395 temp |= FDI_LINK_TRAIN_PATTERN_1;
4396 }
4397 /* BPC in FDI rx is consistent with that in PIPECONF */
4398 temp &= ~(0x07 << 16);
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004399 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
Jesse Barnes0fc932b2011-01-04 15:09:37 -08004400 I915_WRITE(reg, temp);
4401
4402 POSTING_READ(reg);
4403 udelay(100);
4404}
4405
Chris Wilson49d73912016-11-29 09:50:08 +00004406bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
Chris Wilson5dce5b932014-01-20 10:17:36 +00004407{
Daniel Vetterfa058872017-07-20 19:57:52 +02004408 struct drm_crtc *crtc;
4409 bool cleanup_done;
Chris Wilson5dce5b932014-01-20 10:17:36 +00004410
Daniel Vetterfa058872017-07-20 19:57:52 +02004411 drm_for_each_crtc(crtc, &dev_priv->drm) {
4412 struct drm_crtc_commit *commit;
4413 spin_lock(&crtc->commit_lock);
4414 commit = list_first_entry_or_null(&crtc->commit_list,
4415 struct drm_crtc_commit, commit_entry);
4416 cleanup_done = commit ?
4417 try_wait_for_completion(&commit->cleanup_done) : true;
4418 spin_unlock(&crtc->commit_lock);
4419
4420 if (cleanup_done)
Chris Wilson5dce5b932014-01-20 10:17:36 +00004421 continue;
4422
Daniel Vetterfa058872017-07-20 19:57:52 +02004423 drm_crtc_wait_one_vblank(crtc);
Chris Wilson5dce5b932014-01-20 10:17:36 +00004424
4425 return true;
4426 }
4427
4428 return false;
4429}
4430
Maarten Lankhorstb7076542016-08-23 16:18:08 +02004431void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004432{
4433 u32 temp;
4434
4435 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4436
4437 mutex_lock(&dev_priv->sb_lock);
4438
4439 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4440 temp |= SBI_SSCCTL_DISABLE;
4441 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4442
4443 mutex_unlock(&dev_priv->sb_lock);
4444}
4445
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004446/* Program iCLKIP clock to the desired frequency */
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004447static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004448{
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004449 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004450 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004451 int clock = crtc_state->base.adjusted_mode.crtc_clock;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004452 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4453 u32 temp;
4454
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004455 lpt_disable_iclkip(dev_priv);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004456
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004457 /* The iCLK virtual clock root frequency is in MHz,
4458 * but the adjusted_mode->crtc_clock in in KHz. To get the
4459 * divisors, it is necessary to divide one by another, so we
4460 * convert the virtual clock precision to KHz here for higher
4461 * precision.
4462 */
4463 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004464 u32 iclk_virtual_root_freq = 172800 * 1000;
4465 u32 iclk_pi_range = 64;
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004466 u32 desired_divisor;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004467
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004468 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4469 clock << auxdiv);
4470 divsel = (desired_divisor / iclk_pi_range) - 2;
4471 phaseinc = desired_divisor % iclk_pi_range;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004472
Ville Syrjälä64b46a02016-02-17 21:41:11 +02004473 /*
4474 * Near 20MHz is a corner case which is
4475 * out of range for the 7-bit divisor
4476 */
4477 if (divsel <= 0x7f)
4478 break;
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004479 }
4480
4481 /* This should not happen with any sane values */
4482 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4483 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4484 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4485 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4486
4487 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
Ville Syrjälä12d7cee2013-09-04 18:25:19 +03004488 clock,
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004489 auxdiv,
4490 divsel,
4491 phasedir,
4492 phaseinc);
4493
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004494 mutex_lock(&dev_priv->sb_lock);
4495
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004496 /* Program SSCDIVINTPHASE6 */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004497 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004498 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4499 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4500 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4501 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4502 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4503 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004504 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004505
4506 /* Program SSCAUXDIV */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004507 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004508 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4509 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004510 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004511
4512 /* Enable modulator and associated divider */
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004513 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004514 temp &= ~SBI_SSCCTL_DISABLE;
Paulo Zanoni988d6ee2012-12-01 12:04:24 -02004515 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004516
Ville Syrjälä060f02d2015-12-04 22:21:34 +02004517 mutex_unlock(&dev_priv->sb_lock);
4518
Eugeni Dodonove615efe2012-05-09 15:37:26 -03004519 /* Wait for initialization time */
4520 udelay(24);
4521
4522 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4523}
4524
Ville Syrjälä8802e5b2016-02-17 21:41:12 +02004525int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4526{
4527 u32 divsel, phaseinc, auxdiv;
4528 u32 iclk_virtual_root_freq = 172800 * 1000;
4529 u32 iclk_pi_range = 64;
4530 u32 desired_divisor;
4531 u32 temp;
4532
4533 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4534 return 0;
4535
4536 mutex_lock(&dev_priv->sb_lock);
4537
4538 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4539 if (temp & SBI_SSCCTL_DISABLE) {
4540 mutex_unlock(&dev_priv->sb_lock);
4541 return 0;
4542 }
4543
4544 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4545 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4546 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4547 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4548 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4549
4550 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4551 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4552 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4553
4554 mutex_unlock(&dev_priv->sb_lock);
4555
4556 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4557
4558 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4559 desired_divisor << auxdiv);
4560}
4561
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004562static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
Daniel Vetter275f01b22013-05-03 11:49:47 +02004563 enum pipe pch_transcoder)
4564{
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004565 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4566 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4567 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Daniel Vetter275f01b22013-05-03 11:49:47 +02004568
4569 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4570 I915_READ(HTOTAL(cpu_transcoder)));
4571 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4572 I915_READ(HBLANK(cpu_transcoder)));
4573 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4574 I915_READ(HSYNC(cpu_transcoder)));
4575
4576 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4577 I915_READ(VTOTAL(cpu_transcoder)));
4578 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4579 I915_READ(VBLANK(cpu_transcoder)));
4580 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4581 I915_READ(VSYNC(cpu_transcoder)));
4582 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4583 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4584}
4585
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004586static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004587{
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004588 uint32_t temp;
4589
4590 temp = I915_READ(SOUTH_CHICKEN1);
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004591 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004592 return;
4593
4594 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4595 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4596
Ander Conselvan de Oliveira003632d2015-03-11 13:35:43 +02004597 temp &= ~FDI_BC_BIFURCATION_SELECT;
4598 if (enable)
4599 temp |= FDI_BC_BIFURCATION_SELECT;
4600
4601 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004602 I915_WRITE(SOUTH_CHICKEN1, temp);
4603 POSTING_READ(SOUTH_CHICKEN1);
4604}
4605
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004606static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004607{
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004608 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4609 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004610
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004611 switch (crtc->pipe) {
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004612 case PIPE_A:
4613 break;
4614 case PIPE_B:
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004615 if (crtc_state->fdi_lanes > 2)
4616 cpt_set_fdi_bc_bifurcation(dev_priv, false);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004617 else
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004618 cpt_set_fdi_bc_bifurcation(dev_priv, true);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004619
4620 break;
4621 case PIPE_C:
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004622 cpt_set_fdi_bc_bifurcation(dev_priv, true);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004623
4624 break;
4625 default:
4626 BUG();
4627 }
4628}
4629
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004630/*
4631 * Finds the encoder associated with the given CRTC. This can only be
4632 * used when we know that the CRTC isn't feeding multiple encoders!
4633 */
4634static struct intel_encoder *
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004635intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4636 const struct intel_crtc_state *crtc_state)
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004637{
4638 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004639 const struct drm_connector_state *connector_state;
4640 const struct drm_connector *connector;
4641 struct intel_encoder *encoder = NULL;
4642 int num_encoders = 0;
4643 int i;
4644
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004645 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
Ville Syrjäläf606bc62018-05-18 18:29:25 +03004646 if (connector_state->crtc != &crtc->base)
4647 continue;
4648
4649 encoder = to_intel_encoder(connector_state->best_encoder);
4650 num_encoders++;
4651 }
4652
4653 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4654 num_encoders, pipe_name(crtc->pipe));
4655
4656 return encoder;
4657}
4658
Jesse Barnesf67a5592011-01-05 10:31:48 -08004659/*
4660 * Enable PCH resources required for PCH ports:
4661 * - PCH PLLs
4662 * - FDI training & RX/TX
4663 * - update transcoder timings
4664 * - DP transcoding bits
4665 * - transcoder
4666 */
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004667static void ironlake_pch_enable(const struct intel_atomic_state *state,
4668 const struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08004669{
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004670 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004671 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004672 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira4cbe4b22017-03-02 14:58:51 +02004673 int pipe = crtc->pipe;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004674 u32 temp;
Jesse Barnes6be4a602010-09-10 10:26:01 -07004675
Daniel Vetterab9412b2013-05-03 11:49:46 +02004676 assert_pch_transcoder_disabled(dev_priv, pipe);
Chris Wilsone7e164d2012-05-11 09:21:25 +01004677
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01004678 if (IS_IVYBRIDGE(dev_priv))
Maarten Lankhorstb0b62d82018-10-11 12:04:56 +02004679 ivybridge_update_fdi_bc_bifurcation(crtc_state);
Daniel Vetter1fbc0d72013-10-29 12:04:08 +01004680
Daniel Vettercd986ab2012-10-26 10:58:12 +02004681 /* Write the TU size bits before fdi link training, so that error
4682 * detection works. */
4683 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4684 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4685
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004686 /* For PCH output, training FDI link */
Ander Conselvan de Oliveiradc4a1092017-03-02 14:58:54 +02004687 dev_priv->display.fdi_link_train(crtc, crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07004688
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004689 /* We need to program the right clock selection before writing the pixel
4690 * mutliplier into the DPLL. */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004691 if (HAS_PCH_CPT(dev_priv)) {
Jesse Barnesee7b9f92012-04-20 17:11:53 +01004692 u32 sel;
Jesse Barnes4b645f12011-10-12 09:51:31 -07004693
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004694 temp = I915_READ(PCH_DPLL_SEL);
Daniel Vetter11887392013-06-05 13:34:09 +02004695 temp |= TRANS_DPLL_ENABLE(pipe);
4696 sel = TRANS_DPLLB_SEL(pipe);
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004697 if (crtc_state->shared_dpll ==
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02004698 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
Jesse Barnesee7b9f92012-04-20 17:11:53 +01004699 temp |= sel;
4700 else
4701 temp &= ~sel;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004702 I915_WRITE(PCH_DPLL_SEL, temp);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004703 }
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004704
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004705 /* XXX: pch pll's can be enabled any time before we enable the PCH
4706 * transcoder, and we actually should do this to not upset any PCH
4707 * transcoder that already use the clock when we share it.
4708 *
4709 * Note that enable_shared_dpll tries to do the right thing, but
4710 * get_shared_dpll unconditionally resets the pll - we need that to have
4711 * the right LVDS enable sequence. */
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02004712 intel_enable_shared_dpll(crtc_state);
Daniel Vetter3ad8a202013-06-05 13:34:32 +02004713
Jesse Barnesd9b6cb52011-01-04 15:09:35 -08004714 /* set transcoder timing, panel must allow it */
4715 assert_panel_unlocked(dev_priv, pipe);
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004716 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004717
Paulo Zanoni303b81e2012-10-31 18:12:23 -02004718 intel_fdi_normal_train(crtc);
Zhenyu Wang5e84e1a2010-10-28 16:38:08 +08004719
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004720 /* For PCH DP, enable TRANS_DP_CTL */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004721 if (HAS_PCH_CPT(dev_priv) &&
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004722 intel_crtc_has_dp_encoder(crtc_state)) {
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004723 const struct drm_display_mode *adjusted_mode =
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004724 &crtc_state->base.adjusted_mode;
Daniel Vetterdfd07d72012-12-17 11:21:38 +01004725 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004726 i915_reg_t reg = TRANS_DP_CTL(pipe);
Ville Syrjäläf67dc6d2018-05-18 18:29:26 +03004727 enum port port;
4728
Chris Wilson5eddb702010-09-11 13:48:45 +01004729 temp = I915_READ(reg);
4730 temp &= ~(TRANS_DP_PORT_SEL_MASK |
Eric Anholt220cad32010-11-18 09:32:58 +08004731 TRANS_DP_SYNC_MASK |
4732 TRANS_DP_BPC_MASK);
Ville Syrjäläe3ef4472015-05-05 17:17:31 +03004733 temp |= TRANS_DP_OUTPUT_ENABLE;
Jesse Barnes9325c9f2011-06-24 12:19:21 -07004734 temp |= bpc << 9; /* same format but at 11:9 */
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004735
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004736 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
Chris Wilson5eddb702010-09-11 13:48:45 +01004737 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
Ville Syrjälä9c4edae2015-10-29 21:25:51 +02004738 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
Chris Wilson5eddb702010-09-11 13:48:45 +01004739 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004740
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004741 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
Ville Syrjäläf67dc6d2018-05-18 18:29:26 +03004742 WARN_ON(port < PORT_B || port > PORT_D);
4743 temp |= TRANS_DP_PORT_SEL(port);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004744
Chris Wilson5eddb702010-09-11 13:48:45 +01004745 I915_WRITE(reg, temp);
Jesse Barnesc98e9dc2010-09-10 10:57:18 -07004746 }
4747
Maarten Lankhorst7efd90f2018-10-04 11:45:55 +02004748 ironlake_enable_pch_transcoder(crtc_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08004749}
4750
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03004751static void lpt_pch_enable(const struct intel_atomic_state *state,
4752 const struct intel_crtc_state *crtc_state)
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004753{
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004754 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ander Conselvan de Oliveira0dcdc382017-03-02 14:58:52 +02004755 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ander Conselvan de Oliveira2ce42272017-03-02 14:58:53 +02004756 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004757
Matthias Kaehlckea2196032017-07-17 11:14:03 -07004758 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004759
Maarten Lankhorstc5b36fa2018-10-11 12:04:55 +02004760 lpt_program_iclkip(crtc_state);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004761
Paulo Zanoni0540e482012-10-31 18:12:40 -02004762 /* Set transcoder timing. */
Maarten Lankhorst5e1cdf52018-10-04 11:45:58 +02004763 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
Paulo Zanoni1507e5b2012-10-31 18:12:22 -02004764
Paulo Zanoni937bb612012-10-31 18:12:47 -02004765 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
Jesse Barnesf67a5592011-01-05 10:31:48 -08004766}
4767
Daniel Vettera1520312013-05-03 11:49:50 +02004768static void cpt_verify_modeset(struct drm_device *dev, int pipe)
Jesse Barnesd4270e52011-10-11 10:43:02 -07004769{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004770 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004771 i915_reg_t dslreg = PIPEDSL(pipe);
Jesse Barnesd4270e52011-10-11 10:43:02 -07004772 u32 temp;
4773
4774 temp = I915_READ(dslreg);
4775 udelay(500);
4776 if (wait_for(I915_READ(dslreg) != temp, 5)) {
Jesse Barnesd4270e52011-10-11 10:43:02 -07004777 if (wait_for(I915_READ(dslreg) != temp, 5))
Ville Syrjälä84f44ce2013-04-17 17:48:49 +03004778 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
Jesse Barnesd4270e52011-10-11 10:43:02 -07004779 }
4780}
4781
Ville Syrjälä0a599522018-05-21 21:56:13 +03004782/*
4783 * The hardware phase 0.0 refers to the center of the pixel.
4784 * We want to start from the top/left edge which is phase
4785 * -0.5. That matches how the hardware calculates the scaling
4786 * factors (from top-left of the first pixel to bottom-right
4787 * of the last pixel, as opposed to the pixel centers).
4788 *
4789 * For 4:2:0 subsampled chroma planes we obviously have to
4790 * adjust that so that the chroma sample position lands in
4791 * the right spot.
4792 *
4793 * Note that for packed YCbCr 4:2:2 formats there is no way to
4794 * control chroma siting. The hardware simply replicates the
4795 * chroma samples for both of the luma samples, and thus we don't
4796 * actually get the expected MPEG2 chroma siting convention :(
4797 * The same behaviour is observed on pre-SKL platforms as well.
4798 */
4799u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4800{
4801 int phase = -0x8000;
4802 u16 trip = 0;
4803
4804 if (chroma_cosited)
4805 phase += (sub - 1) * 0x8000 / sub;
4806
4807 if (phase < 0)
4808 phase = 0x10000 + phase;
4809 else
4810 trip = PS_PHASE_TRIP;
4811
4812 return ((phase >> 2) & PS_PHASE_MASK) | trip;
4813}
4814
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004815static int
4816skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
Ville Syrjäläd96a7d22017-03-31 21:00:54 +03004817 unsigned int scaler_user, int *scaler_id,
Chandra Konduru77224cd2018-04-09 09:11:13 +05304818 int src_w, int src_h, int dst_w, int dst_h,
4819 bool plane_scaler_check,
4820 uint32_t pixel_format)
Chandra Kondurua1b22782015-04-07 15:28:45 -07004821{
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004822 struct intel_crtc_scaler_state *scaler_state =
4823 &crtc_state->scaler_state;
4824 struct intel_crtc *intel_crtc =
4825 to_intel_crtc(crtc_state->base.crtc);
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304826 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4827 const struct drm_display_mode *adjusted_mode =
4828 &crtc_state->base.adjusted_mode;
Chandra Kondurua1b22782015-04-07 15:28:45 -07004829 int need_scaling;
Chandra Konduru6156a452015-04-27 13:48:39 -07004830
Ville Syrjäläd96a7d22017-03-31 21:00:54 +03004831 /*
4832 * Src coordinates are already rotated by 270 degrees for
4833 * the 90/270 degree plane rotation cases (to match the
4834 * GTT mapping), hence no need to account for rotation here.
4835 */
4836 need_scaling = src_w != dst_w || src_h != dst_h;
Chandra Kondurua1b22782015-04-07 15:28:45 -07004837
Chandra Konduru77224cd2018-04-09 09:11:13 +05304838 if (plane_scaler_check)
4839 if (pixel_format == DRM_FORMAT_NV12)
4840 need_scaling = true;
4841
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05304842 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
4843 scaler_user == SKL_CRTC_INDEX)
Shashank Sharmae5c05932017-07-21 20:55:05 +05304844 need_scaling = true;
4845
Chandra Kondurua1b22782015-04-07 15:28:45 -07004846 /*
Mahesh Kumar7f58cbb2017-06-30 17:41:00 +05304847 * Scaling/fitting not supported in IF-ID mode in GEN9+
4848 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4849 * Once NV12 is enabled, handle it here while allocating scaler
4850 * for NV12.
4851 */
4852 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4853 need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4854 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4855 return -EINVAL;
4856 }
4857
4858 /*
Chandra Kondurua1b22782015-04-07 15:28:45 -07004859 * if plane is being disabled or scaler is no more required or force detach
4860 * - free scaler binded to this plane/crtc
4861 * - in order to do this, update crtc->scaler_usage
4862 *
4863 * Here scaler state in crtc_state is set free so that
4864 * scaler can be assigned to other user. Actual register
4865 * update to free the scaler is done in plane/panel-fit programming.
4866 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4867 */
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004868 if (force_detach || !need_scaling) {
Chandra Kondurua1b22782015-04-07 15:28:45 -07004869 if (*scaler_id >= 0) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004870 scaler_state->scaler_users &= ~(1 << scaler_user);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004871 scaler_state->scalers[*scaler_id].in_use = 0;
4872
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004873 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4874 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4875 intel_crtc->pipe, scaler_user, *scaler_id,
Chandra Kondurua1b22782015-04-07 15:28:45 -07004876 scaler_state->scaler_users);
4877 *scaler_id = -1;
4878 }
4879 return 0;
4880 }
4881
Chandra Konduru77224cd2018-04-09 09:11:13 +05304882 if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
Maarten Lankhorst5d794282018-05-12 03:03:14 +05304883 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
Chandra Konduru77224cd2018-04-09 09:11:13 +05304884 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4885 return -EINVAL;
4886 }
4887
Chandra Kondurua1b22782015-04-07 15:28:45 -07004888 /* range checks */
4889 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
Nabendu Maiti323301a2018-03-23 10:24:18 -07004890 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4891 (IS_GEN11(dev_priv) &&
4892 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4893 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
4894 (!IS_GEN11(dev_priv) &&
4895 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4896 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004897 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
Chandra Kondurua1b22782015-04-07 15:28:45 -07004898 "size is out of scaler range\n",
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004899 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004900 return -EINVAL;
4901 }
4902
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004903 /* mark this plane as a scaler user in crtc_state */
4904 scaler_state->scaler_users |= (1 << scaler_user);
4905 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4906 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4907 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4908 scaler_state->scaler_users);
4909
4910 return 0;
4911}
4912
4913/**
4914 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4915 *
4916 * @state: crtc's scaler state
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004917 *
4918 * Return
4919 * 0 - scaler_usage updated successfully
4920 * error - requested scaling cannot be supported or other error condition
4921 */
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004922int skl_update_scaler_crtc(struct intel_crtc_state *state)
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004923{
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03004924 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004925
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004926 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
Chandra Konduru77224cd2018-04-09 09:11:13 +05304927 &state->scaler_state.scaler_id,
4928 state->pipe_src_w, state->pipe_src_h,
4929 adjusted_mode->crtc_hdisplay,
4930 adjusted_mode->crtc_vdisplay, false, 0);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004931}
4932
4933/**
4934 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
Chris Wilsonc38c1452018-02-14 13:49:22 +00004935 * @crtc_state: crtc's scaler state
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004936 * @plane_state: atomic plane state to update
4937 *
4938 * Return
4939 * 0 - scaler_usage updated successfully
4940 * error - requested scaling cannot be supported or other error condition
4941 */
Maarten Lankhorstda20eab2015-06-15 12:33:44 +02004942static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4943 struct intel_plane_state *plane_state)
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004944{
4945
Maarten Lankhorstda20eab2015-06-15 12:33:44 +02004946 struct intel_plane *intel_plane =
4947 to_intel_plane(plane_state->base.plane);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004948 struct drm_framebuffer *fb = plane_state->base.fb;
4949 int ret;
4950
Ville Syrjälä936e71e2016-07-26 19:06:59 +03004951 bool force_detach = !fb || !plane_state->base.visible;
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004952
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004953 ret = skl_update_scaler(crtc_state, force_detach,
4954 drm_plane_index(&intel_plane->base),
4955 &plane_state->scaler_id,
Ville Syrjälä936e71e2016-07-26 19:06:59 +03004956 drm_rect_width(&plane_state->base.src) >> 16,
4957 drm_rect_height(&plane_state->base.src) >> 16,
4958 drm_rect_width(&plane_state->base.dst),
Chandra Konduru77224cd2018-04-09 09:11:13 +05304959 drm_rect_height(&plane_state->base.dst),
4960 fb ? true : false, fb ? fb->format->format : 0);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004961
4962 if (ret || plane_state->scaler_id < 0)
4963 return ret;
4964
Chandra Kondurua1b22782015-04-07 15:28:45 -07004965 /* check colorkey */
Ville Syrjälä6ec5bd32018-02-02 22:42:31 +02004966 if (plane_state->ckey.flags) {
Ville Syrjälä72660ce2016-05-27 20:59:20 +03004967 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4968 intel_plane->base.base.id,
4969 intel_plane->base.name);
Chandra Kondurua1b22782015-04-07 15:28:45 -07004970 return -EINVAL;
4971 }
4972
4973 /* Check src format */
Ville Syrjälä438b74a2016-12-14 23:32:55 +02004974 switch (fb->format->format) {
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004975 case DRM_FORMAT_RGB565:
4976 case DRM_FORMAT_XBGR8888:
4977 case DRM_FORMAT_XRGB8888:
4978 case DRM_FORMAT_ABGR8888:
4979 case DRM_FORMAT_ARGB8888:
4980 case DRM_FORMAT_XRGB2101010:
4981 case DRM_FORMAT_XBGR2101010:
4982 case DRM_FORMAT_YUYV:
4983 case DRM_FORMAT_YVYU:
4984 case DRM_FORMAT_UYVY:
4985 case DRM_FORMAT_VYUY:
Chandra Konduru77224cd2018-04-09 09:11:13 +05304986 case DRM_FORMAT_NV12:
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004987 break;
4988 default:
Ville Syrjälä72660ce2016-05-27 20:59:20 +03004989 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4990 intel_plane->base.base.id, intel_plane->base.name,
Ville Syrjälä438b74a2016-12-14 23:32:55 +02004991 fb->base.id, fb->format->format);
Maarten Lankhorst86adf9d2015-06-22 09:50:32 +02004992 return -EINVAL;
Chandra Kondurua1b22782015-04-07 15:28:45 -07004993 }
4994
Chandra Kondurua1b22782015-04-07 15:28:45 -07004995 return 0;
4996}
4997
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02004998static void skylake_scaler_disable(struct intel_crtc *crtc)
4999{
5000 int i;
5001
5002 for (i = 0; i < crtc->num_scalers; i++)
5003 skl_detach_scaler(crtc, i);
5004}
5005
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005006static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00005007{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005008 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5010 enum pipe pipe = crtc->pipe;
5011 const struct intel_crtc_scaler_state *scaler_state =
5012 &crtc_state->scaler_state;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005013
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005014 if (crtc_state->pch_pfit.enabled) {
Ville Syrjälä0a599522018-05-21 21:56:13 +03005015 u16 uv_rgb_hphase, uv_rgb_vphase;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005016 int id;
5017
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005018 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
Chandra Kondurua1b22782015-04-07 15:28:45 -07005019 return;
Chandra Kondurua1b22782015-04-07 15:28:45 -07005020
Ville Syrjälä0a599522018-05-21 21:56:13 +03005021 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
5022 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
5023
Chandra Kondurua1b22782015-04-07 15:28:45 -07005024 id = scaler_state->scaler_id;
5025 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5026 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
Ville Syrjälä0a599522018-05-21 21:56:13 +03005027 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5028 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5029 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5030 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005031 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5032 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00005033 }
5034}
5035
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005036static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnesb074cec2013-04-25 12:55:02 -07005037{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005038 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5039 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesb074cec2013-04-25 12:55:02 -07005040 int pipe = crtc->pipe;
5041
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005042 if (crtc_state->pch_pfit.enabled) {
Jesse Barnesb074cec2013-04-25 12:55:02 -07005043 /* Force use of hard-coded filter coefficients
5044 * as some pre-programmed values are broken,
5045 * e.g. x201.
5046 */
Tvrtko Ursulinfd6b8f42016-10-14 10:13:06 +01005047 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
Jesse Barnesb074cec2013-04-25 12:55:02 -07005048 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5049 PF_PIPE_SEL_IVB(pipe));
5050 else
5051 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005052 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5053 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
Jesse Barnes040484a2011-01-03 12:14:26 -08005054 }
Jesse Barnesf67a5592011-01-05 10:31:48 -08005055}
5056
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005057void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005058{
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005059 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Ville Syrjäläcea165c2014-04-15 21:41:35 +03005060 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005061 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005062
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005063 if (!crtc_state->ips_enabled)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005064 return;
5065
Maarten Lankhorst307e4492016-03-23 14:33:28 +01005066 /*
5067 * We can only enable IPS after we enable a plane and wait for a vblank
5068 * This function is called from post_plane_update, which is run after
5069 * a vblank wait.
5070 */
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005071 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02005072
Tvrtko Ursulin86527442016-10-13 11:03:00 +01005073 if (IS_BROADWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005074 mutex_lock(&dev_priv->pcu_lock);
Ville Syrjälä61843f02017-09-12 18:34:11 +03005075 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5076 IPS_ENABLE | IPS_PCODE_CONTROL));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005077 mutex_unlock(&dev_priv->pcu_lock);
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005078 /* Quoting Art Runyan: "its not safe to expect any particular
5079 * value in IPS_CTL bit 31 after enabling IPS through the
Jesse Barnese59150d2014-01-07 13:30:45 -08005080 * mailbox." Moreover, the mailbox may return a bogus state,
5081 * so we need to just enable it and continue on.
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005082 */
5083 } else {
5084 I915_WRITE(IPS_CTL, IPS_ENABLE);
5085 /* The bit only becomes 1 in the next vblank, so this wait here
5086 * is essentially intel_wait_for_vblank. If we don't have this
5087 * and don't wait for vblanks until the end of crtc_enable, then
5088 * the HW state readout code will complain that the expected
5089 * IPS_CTL value is not the one we read. */
Chris Wilson2ec9ba32016-06-30 15:33:01 +01005090 if (intel_wait_for_register(dev_priv,
5091 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5092 50))
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005093 DRM_ERROR("Timed out waiting for IPS enable\n");
5094 }
Paulo Zanonid77e4532013-09-24 13:52:55 -03005095}
5096
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005097void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005098{
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005099 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005100 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005101 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005102
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005103 if (!crtc_state->ips_enabled)
Paulo Zanonid77e4532013-09-24 13:52:55 -03005104 return;
5105
Tvrtko Ursulin86527442016-10-13 11:03:00 +01005106 if (IS_BROADWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005107 mutex_lock(&dev_priv->pcu_lock);
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005108 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01005109 mutex_unlock(&dev_priv->pcu_lock);
Imre Deakacb3ef02018-09-05 13:00:05 +03005110 /*
5111 * Wait for PCODE to finish disabling IPS. The BSpec specified
5112 * 42ms timeout value leads to occasional timeouts so use 100ms
5113 * instead.
5114 */
Chris Wilsonb85c1ec2016-06-30 15:33:02 +01005115 if (intel_wait_for_register(dev_priv,
5116 IPS_CTL, IPS_ENABLE, 0,
Imre Deakacb3ef02018-09-05 13:00:05 +03005117 100))
Ben Widawsky23d0b132014-04-10 14:32:41 -07005118 DRM_ERROR("Timed out waiting for IPS disable\n");
Jesse Barnese59150d2014-01-07 13:30:45 -08005119 } else {
Ben Widawsky2a114cc2013-11-02 21:07:47 -07005120 I915_WRITE(IPS_CTL, 0);
Jesse Barnese59150d2014-01-07 13:30:45 -08005121 POSTING_READ(IPS_CTL);
5122 }
Paulo Zanonid77e4532013-09-24 13:52:55 -03005123
5124 /* We need to wait for a vblank before we can disable the plane. */
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005125 intel_wait_for_vblank(dev_priv, crtc->pipe);
Paulo Zanonid77e4532013-09-24 13:52:55 -03005126}
5127
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005128static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005129{
Maarten Lankhorst7cac9452015-04-21 17:12:55 +03005130 if (intel_crtc->overlay) {
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005131 struct drm_device *dev = intel_crtc->base.dev;
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005132
5133 mutex_lock(&dev->struct_mutex);
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005134 (void) intel_overlay_switch_off(intel_crtc->overlay);
Ville Syrjäläd3eedb12014-05-08 19:23:13 +03005135 mutex_unlock(&dev->struct_mutex);
5136 }
5137
5138 /* Let userspace switch the overlay on again. In most cases userspace
5139 * has to recompute where to put it anyway.
5140 */
5141}
5142
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005143/**
5144 * intel_post_enable_primary - Perform operations after enabling primary plane
5145 * @crtc: the CRTC whose primary plane was just enabled
Chris Wilsonc38c1452018-02-14 13:49:22 +00005146 * @new_crtc_state: the enabling state
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005147 *
5148 * Performs potentially sleeping operations that must be done after the primary
5149 * plane is enabled, such as updating FBC and IPS. Note that this may be
5150 * called due to an explicit primary plane update, or due to an implicit
5151 * re-enable that is caused when a sprite plane is updated to no longer
5152 * completely hide the primary plane.
5153 */
5154static void
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005155intel_post_enable_primary(struct drm_crtc *crtc,
5156 const struct intel_crtc_state *new_crtc_state)
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005157{
5158 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005159 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005160 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5161 int pipe = intel_crtc->pipe;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005162
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005163 /*
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005164 * Gen2 reports pipe underruns whenever all planes are disabled.
5165 * So don't enable underrun reporting before at least some planes
5166 * are enabled.
5167 * FIXME: Need to fix the logic to work when we turn off all planes
5168 * but leave the pipe running.
Daniel Vetterf99d7062014-06-19 16:01:59 +02005169 */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01005170 if (IS_GEN2(dev_priv))
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005171 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5172
Ville Syrjäläaca7b682015-10-30 19:22:21 +02005173 /* Underruns don't always raise interrupts, so check manually. */
5174 intel_check_cpu_fifo_underruns(dev_priv);
5175 intel_check_pch_fifo_underruns(dev_priv);
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005176}
5177
Ville Syrjälä2622a082016-03-09 19:07:26 +02005178/* FIXME get rid of this and use pre_plane_update */
5179static void
5180intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5181{
5182 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005183 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä2622a082016-03-09 19:07:26 +02005184 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5185 int pipe = intel_crtc->pipe;
5186
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005187 /*
5188 * Gen2 reports pipe underruns whenever all planes are disabled.
5189 * So disable underrun reporting before all the planes get disabled.
5190 */
5191 if (IS_GEN2(dev_priv))
5192 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5193
5194 hsw_disable_ips(to_intel_crtc_state(crtc->state));
Ville Syrjälä2622a082016-03-09 19:07:26 +02005195
5196 /*
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005197 * Vblank time updates from the shadow to live plane control register
5198 * are blocked if the memory self-refresh mode is active at that
5199 * moment. So to make sure the plane gets truly disabled, disable
5200 * first the self-refresh mode. The self-refresh enable bit in turn
5201 * will be checked/applied by the HW only at the next frame start
5202 * event which is after the vblank start event, so we need to have a
5203 * wait-for-vblank between disabling the plane and the pipe.
5204 */
Ville Syrjälä11a85d62016-11-28 19:37:12 +02005205 if (HAS_GMCH_DISPLAY(dev_priv) &&
5206 intel_set_memory_cxsr(dev_priv, false))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005207 intel_wait_for_vblank(dev_priv, pipe);
Maarten Lankhorst87d43002015-04-21 17:12:54 +03005208}
5209
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005210static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5211 const struct intel_crtc_state *new_crtc_state)
5212{
5213 if (!old_crtc_state->ips_enabled)
5214 return false;
5215
5216 if (needs_modeset(&new_crtc_state->base))
5217 return true;
5218
5219 return !new_crtc_state->ips_enabled;
5220}
5221
5222static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5223 const struct intel_crtc_state *new_crtc_state)
5224{
5225 if (!new_crtc_state->ips_enabled)
5226 return false;
5227
5228 if (needs_modeset(&new_crtc_state->base))
5229 return true;
5230
5231 /*
5232 * We can't read out IPS on broadwell, assume the worst and
5233 * forcibly enable IPS on the first fastset.
5234 */
5235 if (new_crtc_state->update_pipe &&
5236 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5237 return true;
5238
5239 return !old_crtc_state->ips_enabled;
5240}
5241
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305242static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5243 const struct intel_crtc_state *crtc_state)
5244{
5245 if (!crtc_state->nv12_planes)
5246 return false;
5247
5248 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
5249 return false;
5250
5251 if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5252 IS_CANNONLAKE(dev_priv))
5253 return true;
5254
5255 return false;
5256}
5257
Daniel Vetter5a21b662016-05-24 17:13:53 +02005258static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5259{
5260 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Vidya Srinivasc4a4efa2018-04-09 09:11:09 +05305261 struct drm_device *dev = crtc->base.dev;
5262 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005263 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5264 struct intel_crtc_state *pipe_config =
Ville Syrjäläf9a8c142017-08-23 18:22:24 +03005265 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5266 crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005267 struct drm_plane *primary = crtc->base.primary;
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005268 struct drm_plane_state *old_primary_state =
5269 drm_atomic_get_old_plane_state(old_state, primary);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005270
Chris Wilson5748b6a2016-08-04 16:32:38 +01005271 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005272
Daniel Vetter5a21b662016-05-24 17:13:53 +02005273 if (pipe_config->update_wm_post && pipe_config->base.active)
Ville Syrjälä432081b2016-10-31 22:37:03 +02005274 intel_update_watermarks(crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005275
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005276 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5277 hsw_enable_ips(pipe_config);
5278
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005279 if (old_primary_state) {
5280 struct drm_plane_state *new_primary_state =
5281 drm_atomic_get_new_plane_state(old_state, primary);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005282
5283 intel_fbc_post_update(crtc);
5284
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005285 if (new_primary_state->visible &&
Daniel Vetter5a21b662016-05-24 17:13:53 +02005286 (needs_modeset(&pipe_config->base) ||
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005287 !old_primary_state->visible))
Maarten Lankhorst199ea382017-11-10 12:35:00 +01005288 intel_post_enable_primary(&crtc->base, pipe_config);
Daniel Vetter5a21b662016-05-24 17:13:53 +02005289 }
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305290
5291 /* Display WA 827 */
5292 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
Vidya Srinivas6deef9b602018-05-12 03:03:13 +05305293 !needs_nv12_wa(dev_priv, pipe_config)) {
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305294 skl_wa_clkgate(dev_priv, crtc->pipe, false);
Vidya Srinivas6deef9b602018-05-12 03:03:13 +05305295 skl_wa_528(dev_priv, crtc->pipe, false);
5296 }
Daniel Vetter5a21b662016-05-24 17:13:53 +02005297}
5298
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005299static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5300 struct intel_crtc_state *pipe_config)
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005301{
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005302 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005303 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005304 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005305 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5306 struct drm_plane *primary = crtc->base.primary;
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005307 struct drm_plane_state *old_primary_state =
5308 drm_atomic_get_old_plane_state(old_state, primary);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005309 bool modeset = needs_modeset(&pipe_config->base);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005310 struct intel_atomic_state *old_intel_state =
5311 to_intel_atomic_state(old_state);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005312
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005313 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5314 hsw_disable_ips(old_crtc_state);
5315
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005316 if (old_primary_state) {
5317 struct intel_plane_state *new_primary_state =
Ville Syrjäläf9a8c142017-08-23 18:22:24 +03005318 intel_atomic_get_new_plane_state(old_intel_state,
5319 to_intel_plane(primary));
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005320
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005321 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005322 /*
5323 * Gen2 reports pipe underruns whenever all planes are disabled.
5324 * So disable underrun reporting before all the planes get disabled.
5325 */
Maarten Lankhorst8b694492018-04-09 14:46:55 +02005326 if (IS_GEN2(dev_priv) && old_primary_state->visible &&
5327 (modeset || !new_primary_state->base.visible))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01005328 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
Maarten Lankhorst5c74cd72016-02-03 16:53:24 +01005329 }
Ville Syrjälä852eb002015-06-24 22:00:07 +03005330
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305331 /* Display WA 827 */
5332 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
Vidya Srinivas6deef9b602018-05-12 03:03:13 +05305333 needs_nv12_wa(dev_priv, pipe_config)) {
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305334 skl_wa_clkgate(dev_priv, crtc->pipe, true);
Vidya Srinivas6deef9b602018-05-12 03:03:13 +05305335 skl_wa_528(dev_priv, crtc->pipe, true);
5336 }
Maarten Lankhorst8e021152018-05-12 03:03:12 +05305337
Ville Syrjälä5eeb7982017-03-02 19:15:00 +02005338 /*
5339 * Vblank time updates from the shadow to live plane control register
5340 * are blocked if the memory self-refresh mode is active at that
5341 * moment. So to make sure the plane gets truly disabled, disable
5342 * first the self-refresh mode. The self-refresh enable bit in turn
5343 * will be checked/applied by the HW only at the next frame start
5344 * event which is after the vblank start event, so we need to have a
5345 * wait-for-vblank between disabling the plane and the pipe.
5346 */
5347 if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5348 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5349 intel_wait_for_vblank(dev_priv, crtc->pipe);
Maarten Lankhorst92826fc2015-12-03 13:49:13 +01005350
Matt Ropered4a6a72016-02-23 17:20:13 -08005351 /*
5352 * IVB workaround: must disable low power watermarks for at least
5353 * one frame before enabling scaling. LP watermarks can be re-enabled
5354 * when scaling is disabled.
5355 *
5356 * WaCxSRDisabledForSpriteScaling:ivb
5357 */
Ville Syrjälä8e7a4422018-10-04 15:15:27 +03005358 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5359 old_crtc_state->base.active)
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005360 intel_wait_for_vblank(dev_priv, crtc->pipe);
Matt Ropered4a6a72016-02-23 17:20:13 -08005361
5362 /*
5363 * If we're doing a modeset, we're done. No need to do any pre-vblank
5364 * watermark programming here.
5365 */
5366 if (needs_modeset(&pipe_config->base))
5367 return;
5368
5369 /*
5370 * For platforms that support atomic watermarks, program the
5371 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5372 * will be the intermediate values that are safe for both pre- and
5373 * post- vblank; when vblank happens, the 'active' values will be set
5374 * to the final 'target' values and we'll do this again to get the
5375 * optimal watermarks. For gen9+ platforms, the values we program here
5376 * will be the final target values which will get automatically latched
5377 * at vblank time; no further programming will be necessary.
5378 *
5379 * If a platform hasn't been transitioned to atomic watermarks yet,
5380 * we'll continue to update watermarks the old way, if flags tell
5381 * us to.
5382 */
5383 if (dev_priv->display.initial_watermarks != NULL)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005384 dev_priv->display.initial_watermarks(old_intel_state,
5385 pipe_config);
Ville Syrjäläcaed3612016-03-09 19:07:25 +02005386 else if (pipe_config->update_wm_pre)
Ville Syrjälä432081b2016-10-31 22:37:03 +02005387 intel_update_watermarks(crtc);
Maarten Lankhorstac21b222015-06-15 12:33:49 +02005388}
5389
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005390static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask)
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005391{
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005392 struct drm_device *dev = crtc->base.dev;
5393 struct intel_plane *plane;
5394 unsigned fb_bits = 0;
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005395
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005396 intel_crtc_dpms_overlay_disable(crtc);
Maarten Lankhorst27321ae2015-04-21 17:12:52 +03005397
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005398 for_each_intel_plane_on_crtc(dev, crtc, plane) {
5399 if (plane_mask & BIT(plane->id)) {
5400 plane->disable_plane(plane, crtc);
Ville Syrjäläf98551a2014-05-22 17:48:06 +03005401
Maarten Lankhorstf59e9702018-09-20 12:27:07 +02005402 fb_bits |= plane->frontbuffer_bit;
5403 }
5404 }
5405
5406 intel_frontbuffer_flip(to_i915(dev), fb_bits);
Ville Syrjäläa5c4d7b2014-03-07 18:32:13 +02005407}
5408
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005409static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005410 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005411 struct drm_atomic_state *old_state)
5412{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005413 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005414 struct drm_connector *conn;
5415 int i;
5416
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005417 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005418 struct intel_encoder *encoder =
5419 to_intel_encoder(conn_state->best_encoder);
5420
5421 if (conn_state->crtc != crtc)
5422 continue;
5423
5424 if (encoder->pre_pll_enable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005425 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005426 }
5427}
5428
5429static void intel_encoders_pre_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005430 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005431 struct drm_atomic_state *old_state)
5432{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005433 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005434 struct drm_connector *conn;
5435 int i;
5436
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005437 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005438 struct intel_encoder *encoder =
5439 to_intel_encoder(conn_state->best_encoder);
5440
5441 if (conn_state->crtc != crtc)
5442 continue;
5443
5444 if (encoder->pre_enable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005445 encoder->pre_enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005446 }
5447}
5448
5449static void intel_encoders_enable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005450 struct intel_crtc_state *crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005451 struct drm_atomic_state *old_state)
5452{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005453 struct drm_connector_state *conn_state;
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005454 struct drm_connector *conn;
5455 int i;
5456
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005457 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005458 struct intel_encoder *encoder =
5459 to_intel_encoder(conn_state->best_encoder);
5460
5461 if (conn_state->crtc != crtc)
5462 continue;
5463
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005464 encoder->enable(encoder, crtc_state, conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005465 intel_opregion_notify_encoder(encoder, true);
5466 }
5467}
5468
5469static void intel_encoders_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005470 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005471 struct drm_atomic_state *old_state)
5472{
5473 struct drm_connector_state *old_conn_state;
5474 struct drm_connector *conn;
5475 int i;
5476
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005477 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005478 struct intel_encoder *encoder =
5479 to_intel_encoder(old_conn_state->best_encoder);
5480
5481 if (old_conn_state->crtc != crtc)
5482 continue;
5483
5484 intel_opregion_notify_encoder(encoder, false);
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005485 encoder->disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005486 }
5487}
5488
5489static void intel_encoders_post_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005490 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005491 struct drm_atomic_state *old_state)
5492{
5493 struct drm_connector_state *old_conn_state;
5494 struct drm_connector *conn;
5495 int i;
5496
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005497 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005498 struct intel_encoder *encoder =
5499 to_intel_encoder(old_conn_state->best_encoder);
5500
5501 if (old_conn_state->crtc != crtc)
5502 continue;
5503
5504 if (encoder->post_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005505 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005506 }
5507}
5508
5509static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005510 struct intel_crtc_state *old_crtc_state,
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005511 struct drm_atomic_state *old_state)
5512{
5513 struct drm_connector_state *old_conn_state;
5514 struct drm_connector *conn;
5515 int i;
5516
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +01005517 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005518 struct intel_encoder *encoder =
5519 to_intel_encoder(old_conn_state->best_encoder);
5520
5521 if (old_conn_state->crtc != crtc)
5522 continue;
5523
5524 if (encoder->post_pll_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005525 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
Maarten Lankhorstfb1c98b2016-08-09 17:04:03 +02005526 }
5527}
5528
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005529static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5530 struct drm_atomic_state *old_state)
Jesse Barnesf67a5592011-01-05 10:31:48 -08005531{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005532 struct drm_crtc *crtc = pipe_config->base.crtc;
Jesse Barnesf67a5592011-01-05 10:31:48 -08005533 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005534 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005535 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5536 int pipe = intel_crtc->pipe;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005537 struct intel_atomic_state *old_intel_state =
5538 to_intel_atomic_state(old_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005539
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005540 if (WARN_ON(intel_crtc->active))
Jesse Barnesf67a5592011-01-05 10:31:48 -08005541 return;
5542
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005543 /*
5544 * Sometimes spurious CPU pipe underruns happen during FDI
5545 * training, at least with VGA+HDMI cloning. Suppress them.
5546 *
5547 * On ILK we get an occasional spurious CPU pipe underruns
5548 * between eDP port A enable and vdd enable. Also PCH port
5549 * enable seems to result in the occasional CPU pipe underrun.
5550 *
5551 * Spurious PCH underruns also occur during PCH enabling.
5552 */
Ville Syrjälä2b5b6312018-05-24 22:04:06 +03005553 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5554 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005555
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02005556 if (pipe_config->has_pch_encoder)
5557 intel_prepare_shared_dpll(pipe_config);
Daniel Vetterb14b1052014-04-24 23:55:13 +02005558
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005559 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005560 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005561
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005562 intel_set_pipe_timings(pipe_config);
5563 intel_set_pipe_src_size(pipe_config);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005564
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005565 if (pipe_config->has_pch_encoder) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005566 intel_cpu_transcoder_set_m_n(pipe_config,
5567 &pipe_config->fdi_m_n, NULL);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005568 }
5569
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005570 ironlake_set_pipeconf(pipe_config);
Daniel Vetter29407aa2014-04-24 23:55:08 +02005571
Jesse Barnesf67a5592011-01-05 10:31:48 -08005572 intel_crtc->active = true;
Paulo Zanoni86642812013-04-12 17:57:57 -03005573
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005574 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005575
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005576 if (pipe_config->has_pch_encoder) {
Daniel Vetterfff367c2012-10-27 15:50:28 +02005577 /* Note: FDI PLL enabling _must_ be done before we enable the
5578 * cpu pipes, hence this is separate from all the other fdi/pch
5579 * enabling. */
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02005580 ironlake_fdi_pll_enable(pipe_config);
Daniel Vetter46b6f812012-09-06 22:08:33 +02005581 } else {
5582 assert_fdi_tx_disabled(dev_priv, pipe);
5583 assert_fdi_rx_disabled(dev_priv, pipe);
5584 }
Jesse Barnesf67a5592011-01-05 10:31:48 -08005585
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005586 ironlake_pfit_enable(pipe_config);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005587
Jesse Barnes9c54c0d2011-06-15 23:32:33 +02005588 /*
5589 * On ILK+ LUT must be loaded before the pipe is running but with
5590 * clocks enabled
5591 */
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02005592 intel_color_load_luts(&pipe_config->base);
Jesse Barnes9c54c0d2011-06-15 23:32:33 +02005593
Imre Deak1d5bf5d2016-02-29 22:10:33 +02005594 if (dev_priv->display.initial_watermarks != NULL)
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005595 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
Ville Syrjälä4972f702017-11-29 17:37:32 +02005596 intel_enable_pipe(pipe_config);
Jesse Barnesf67a5592011-01-05 10:31:48 -08005597
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005598 if (pipe_config->has_pch_encoder)
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03005599 ironlake_pch_enable(old_intel_state, pipe_config);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005600
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005601 assert_vblank_disabled(crtc);
5602 drm_crtc_vblank_on(crtc);
5603
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005604 intel_encoders_enable(crtc, pipe_config, old_state);
Daniel Vetter61b77dd2012-07-02 00:16:19 +02005605
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01005606 if (HAS_PCH_CPT(dev_priv))
Daniel Vettera1520312013-05-03 11:49:50 +02005607 cpt_verify_modeset(dev, intel_crtc->pipe);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005608
Ville Syrjäläea80a662018-05-24 22:04:05 +03005609 /*
5610 * Must wait for vblank to avoid spurious PCH FIFO underruns.
5611 * And a second vblank wait is needed at least on ILK with
5612 * some interlaced HDMI modes. Let's do the double wait always
5613 * in case there are more corner cases we don't know about.
5614 */
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005615 if (pipe_config->has_pch_encoder) {
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005616 intel_wait_for_vblank(dev_priv, pipe);
Ville Syrjäläea80a662018-05-24 22:04:05 +03005617 intel_wait_for_vblank(dev_priv, pipe);
5618 }
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005619 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005620 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005621}
5622
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005623/* IPS only exists on ULT machines and is tied to pipe A. */
5624static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5625{
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01005626 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005627}
5628
Imre Deaked69cd42017-10-02 10:55:57 +03005629static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5630 enum pipe pipe, bool apply)
5631{
5632 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5633 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5634
5635 if (apply)
5636 val |= mask;
5637 else
5638 val &= ~mask;
5639
5640 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5641}
5642
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005643static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5644{
5645 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5646 enum pipe pipe = crtc->pipe;
5647 uint32_t val;
5648
Rodrigo Vivi443d5e32018-10-04 08:18:14 -07005649 val = MBUS_DBOX_A_CREDIT(2);
5650 val |= MBUS_DBOX_BW_CREDIT(1);
5651 val |= MBUS_DBOX_B_CREDIT(8);
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005652
5653 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5654}
5655
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005656static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5657 struct drm_atomic_state *old_state)
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005658{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005659 struct drm_crtc *crtc = pipe_config->base.crtc;
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005660 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005661 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005662 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005663 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +01005664 struct intel_atomic_state *old_intel_state =
5665 to_intel_atomic_state(old_state);
Imre Deaked69cd42017-10-02 10:55:57 +03005666 bool psl_clkgate_wa;
Vandita Kulkarnie16a3752018-06-21 20:43:56 +05305667 u32 pipe_chicken;
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005668
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02005669 if (WARN_ON(intel_crtc->active))
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005670 return;
5671
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005672 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
Imre Deak95a7a2a2016-06-13 16:44:35 +03005673
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02005674 if (pipe_config->shared_dpll)
5675 intel_enable_shared_dpll(pipe_config);
Daniel Vetterdf8ad702014-06-25 22:02:03 +03005676
Paulo Zanonic27e9172018-04-27 16:14:36 -07005677 if (INTEL_GEN(dev_priv) >= 11)
5678 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5679
Paulo Zanonic8af5272018-05-02 14:58:51 -07005680 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5681
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005682 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005683 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter229fca92014-04-24 23:55:09 +02005684
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005685 if (!transcoder_is_dsi(cpu_transcoder))
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005686 intel_set_pipe_timings(pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005687
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02005688 intel_set_pipe_src_size(pipe_config);
Daniel Vetter229fca92014-04-24 23:55:09 +02005689
Jani Nikula4d1de972016-03-18 17:05:42 +02005690 if (cpu_transcoder != TRANSCODER_EDP &&
5691 !transcoder_is_dsi(cpu_transcoder)) {
5692 I915_WRITE(PIPE_MULT(cpu_transcoder),
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005693 pipe_config->pixel_multiplier - 1);
Clint Taylorebb69c92014-09-30 10:30:22 -07005694 }
5695
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005696 if (pipe_config->has_pch_encoder) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02005697 intel_cpu_transcoder_set_m_n(pipe_config,
5698 &pipe_config->fdi_m_n, NULL);
Daniel Vetter229fca92014-04-24 23:55:09 +02005699 }
5700
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005701 if (!transcoder_is_dsi(cpu_transcoder))
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005702 haswell_set_pipeconf(pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005703
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02005704 haswell_set_pipemisc(pipe_config);
Daniel Vetter229fca92014-04-24 23:55:09 +02005705
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02005706 intel_color_set_csc(&pipe_config->base);
Daniel Vetter229fca92014-04-24 23:55:09 +02005707
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005708 intel_crtc->active = true;
Paulo Zanoni86642812013-04-12 17:57:57 -03005709
Imre Deaked69cd42017-10-02 10:55:57 +03005710 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5711 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005712 pipe_config->pch_pfit.enabled;
Imre Deaked69cd42017-10-02 10:55:57 +03005713 if (psl_clkgate_wa)
5714 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5715
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005716 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005717 skylake_pfit_enable(pipe_config);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08005718 else
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005719 ironlake_pfit_enable(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005720
5721 /*
5722 * On ILK+ LUT must be loaded before the pipe is running but with
5723 * clocks enabled
5724 */
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02005725 intel_color_load_luts(&pipe_config->base);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005726
Vandita Kulkarnie16a3752018-06-21 20:43:56 +05305727 /*
5728 * Display WA #1153: enable hardware to bypass the alpha math
5729 * and rounding for per-pixel values 00 and 0xff
5730 */
5731 if (INTEL_GEN(dev_priv) >= 11) {
5732 pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
5733 if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
5734 I915_WRITE_FW(PIPE_CHICKEN(pipe),
5735 pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
5736 }
5737
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005738 intel_ddi_set_pipe_settings(pipe_config);
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005739 if (!transcoder_is_dsi(cpu_transcoder))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005740 intel_ddi_enable_transcoder_func(pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005741
Imre Deak1d5bf5d2016-02-29 22:10:33 +02005742 if (dev_priv->display.initial_watermarks != NULL)
Ville Syrjälä3125d392016-11-28 19:37:03 +02005743 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
Jani Nikula4d1de972016-03-18 17:05:42 +02005744
Mahesh Kumarc3cc39c2018-02-05 15:21:31 -02005745 if (INTEL_GEN(dev_priv) >= 11)
5746 icl_pipe_mbus_enable(intel_crtc);
5747
Jani Nikula4d1de972016-03-18 17:05:42 +02005748 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005749 if (!transcoder_is_dsi(cpu_transcoder))
Ville Syrjälä4972f702017-11-29 17:37:32 +02005750 intel_enable_pipe(pipe_config);
Paulo Zanoni42db64e2013-05-31 16:33:22 -03005751
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005752 if (pipe_config->has_pch_encoder)
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03005753 lpt_pch_enable(old_intel_state, pipe_config);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005754
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005755 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
Ander Conselvan de Oliveira3dc38ee2017-03-02 14:58:56 +02005756 intel_ddi_set_vc_payload_alloc(pipe_config, true);
Dave Airlie0e32b392014-05-02 14:02:48 +10005757
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005758 assert_vblank_disabled(crtc);
5759 drm_crtc_vblank_on(crtc);
5760
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005761 intel_encoders_enable(crtc, pipe_config, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005762
Imre Deaked69cd42017-10-02 10:55:57 +03005763 if (psl_clkgate_wa) {
5764 intel_wait_for_vblank(dev_priv, pipe);
5765 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5766 }
5767
Paulo Zanonie4916942013-09-20 16:21:19 -03005768 /* If we change the relative order between pipe/planes enabling, we need
5769 * to change the workaround. */
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005770 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01005771 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02005772 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5773 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +02005774 }
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005775}
5776
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005777static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005778{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005779 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5780 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5781 enum pipe pipe = crtc->pipe;
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005782
5783 /* To avoid upsetting the power well on haswell only disable the pfit if
5784 * it's in use. The hw state code will make sure we get this right. */
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005785 if (old_crtc_state->pch_pfit.enabled) {
Daniel Vetter3f8dce32013-05-08 10:36:30 +02005786 I915_WRITE(PF_CTL(pipe), 0);
5787 I915_WRITE(PF_WIN_POS(pipe), 0);
5788 I915_WRITE(PF_WIN_SZ(pipe), 0);
5789 }
5790}
5791
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005792static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5793 struct drm_atomic_state *old_state)
Jesse Barnes6be4a602010-09-10 10:26:01 -07005794{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005795 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Jesse Barnes6be4a602010-09-10 10:26:01 -07005796 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01005797 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005798 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5799 int pipe = intel_crtc->pipe;
Jesse Barnes6be4a602010-09-10 10:26:01 -07005800
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005801 /*
5802 * Sometimes spurious CPU pipe underruns happen when the
5803 * pipe is already disabled, but FDI RX/TX is still enabled.
5804 * Happens at least with VGA+HDMI cloning. Suppress them.
5805 */
Ville Syrjälä2b5b6312018-05-24 22:04:06 +03005806 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5807 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjälä37ca8d42015-10-30 19:20:27 +02005808
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005809 intel_encoders_disable(crtc, old_crtc_state, old_state);
Daniel Vetterea9d7582012-07-10 10:42:52 +02005810
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005811 drm_crtc_vblank_off(crtc);
5812 assert_vblank_disabled(crtc);
5813
Ville Syrjälä4972f702017-11-29 17:37:32 +02005814 intel_disable_pipe(old_crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005815
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005816 ironlake_pfit_disable(old_crtc_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005817
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005818 if (old_crtc_state->has_pch_encoder)
Ville Syrjälä5a74f702015-05-05 17:17:38 +03005819 ironlake_fdi_disable(crtc);
5820
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005821 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005822
Maarten Lankhorst6f405632018-10-04 11:46:04 +02005823 if (old_crtc_state->has_pch_encoder) {
Daniel Vetterd925c592013-06-05 13:34:04 +02005824 ironlake_disable_pch_transcoder(dev_priv, pipe);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005825
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01005826 if (HAS_PCH_CPT(dev_priv)) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02005827 i915_reg_t reg;
5828 u32 temp;
5829
Daniel Vetterd925c592013-06-05 13:34:04 +02005830 /* disable TRANS_DP_CTL */
5831 reg = TRANS_DP_CTL(pipe);
5832 temp = I915_READ(reg);
5833 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5834 TRANS_DP_PORT_SEL_MASK);
5835 temp |= TRANS_DP_PORT_SEL_NONE;
5836 I915_WRITE(reg, temp);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005837
Daniel Vetterd925c592013-06-05 13:34:04 +02005838 /* disable DPLL_SEL */
5839 temp = I915_READ(PCH_DPLL_SEL);
Daniel Vetter11887392013-06-05 13:34:09 +02005840 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
Daniel Vetterd925c592013-06-05 13:34:04 +02005841 I915_WRITE(PCH_DPLL_SEL, temp);
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08005842 }
Daniel Vetterd925c592013-06-05 13:34:04 +02005843
Daniel Vetterd925c592013-06-05 13:34:04 +02005844 ironlake_fdi_pll_disable(intel_crtc);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005845 }
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005846
Ville Syrjäläb2c05932016-04-01 21:53:17 +03005847 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä81b088c2015-10-30 19:21:31 +02005848 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
Jesse Barnes6be4a602010-09-10 10:26:01 -07005849}
5850
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005851static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5852 struct drm_atomic_state *old_state)
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005853{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02005854 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005855 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005856 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Imre Deak24a28172018-06-13 20:07:06 +03005857 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005858
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005859 intel_encoders_disable(crtc, old_crtc_state, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005860
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01005861 drm_crtc_vblank_off(crtc);
5862 assert_vblank_disabled(crtc);
5863
Jani Nikula4d1de972016-03-18 17:05:42 +02005864 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005865 if (!transcoder_is_dsi(cpu_transcoder))
Ville Syrjälä4972f702017-11-29 17:37:32 +02005866 intel_disable_pipe(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005867
Imre Deak24a28172018-06-13 20:07:06 +03005868 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
5869 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
Ville Syrjäläa4bf2142014-08-18 21:27:34 +03005870
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03005871 if (!transcoder_is_dsi(cpu_transcoder))
Clint Taylor90c3e212018-07-10 13:02:05 -07005872 intel_ddi_disable_transcoder_func(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005873
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00005874 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorste435d6e2015-07-13 16:30:15 +02005875 skylake_scaler_disable(intel_crtc);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08005876 else
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005877 ironlake_pfit_disable(old_crtc_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005878
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02005879 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Paulo Zanonic27e9172018-04-27 16:14:36 -07005880
5881 if (INTEL_GEN(dev_priv) >= 11)
5882 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
Paulo Zanoni4f771f12012-10-23 18:29:51 -02005883}
5884
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005885static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
Jesse Barnes2dd24552013-04-25 12:55:01 -07005886{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005887 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5888 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnes2dd24552013-04-25 12:55:01 -07005889
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005890 if (!crtc_state->gmch_pfit.control)
Jesse Barnes2dd24552013-04-25 12:55:01 -07005891 return;
5892
Daniel Vetterc0b03412013-05-28 12:05:54 +02005893 /*
5894 * The panel fitter should only be adjusted whilst the pipe is disabled,
5895 * according to register description and PRM.
5896 */
Jesse Barnes2dd24552013-04-25 12:55:01 -07005897 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5898 assert_pipe_disabled(dev_priv, crtc->pipe);
5899
Maarten Lankhorstb2562712018-10-04 11:45:53 +02005900 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
5901 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
Daniel Vetter5a80c452013-04-25 22:52:18 +02005902
5903 /* Border color in case we don't scale up to the full screen. Black by
5904 * default, change to something else for debugging. */
5905 I915_WRITE(BCLRPAT(crtc->pipe), 0);
Jesse Barnes2dd24552013-04-25 12:55:01 -07005906}
5907
Mahesh Kumar176597a2018-10-04 14:20:43 +05305908bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
5909{
5910 if (port == PORT_NONE)
5911 return false;
5912
5913 if (IS_ICELAKE(dev_priv))
5914 return port <= PORT_B;
5915
5916 return false;
5917}
5918
Paulo Zanoniac213c12018-05-21 17:25:37 -07005919bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
5920{
5921 if (IS_ICELAKE(dev_priv))
5922 return port >= PORT_C && port <= PORT_F;
5923
5924 return false;
5925}
5926
5927enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
5928{
5929 if (!intel_port_is_tc(dev_priv, port))
5930 return PORT_TC_NONE;
5931
5932 return port - PORT_C;
5933}
5934
Ander Conselvan de Oliveira79f255a2017-02-22 08:34:27 +02005935enum intel_display_power_domain intel_port_to_power_domain(enum port port)
Dave Airlied05410f2014-06-05 13:22:59 +10005936{
5937 switch (port) {
5938 case PORT_A:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005939 return POWER_DOMAIN_PORT_DDI_A_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10005940 case PORT_B:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005941 return POWER_DOMAIN_PORT_DDI_B_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10005942 case PORT_C:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005943 return POWER_DOMAIN_PORT_DDI_C_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10005944 case PORT_D:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005945 return POWER_DOMAIN_PORT_DDI_D_LANES;
Xiong Zhangd8e19f92015-08-13 18:00:12 +08005946 case PORT_E:
Patrik Jakobsson6331a702015-11-09 16:48:21 +01005947 return POWER_DOMAIN_PORT_DDI_E_LANES;
Rodrigo Vivi9787e832018-01-29 15:22:22 -08005948 case PORT_F:
5949 return POWER_DOMAIN_PORT_DDI_F_LANES;
Dave Airlied05410f2014-06-05 13:22:59 +10005950 default:
Imre Deakb9fec162015-11-18 15:57:25 +02005951 MISSING_CASE(port);
Dave Airlied05410f2014-06-05 13:22:59 +10005952 return POWER_DOMAIN_PORT_OTHER;
5953 }
5954}
5955
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005956static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5957 struct intel_crtc_state *crtc_state)
Imre Deak319be8a2014-03-04 19:22:57 +02005958{
5959 struct drm_device *dev = crtc->dev;
Maarten Lankhorst37255d82016-12-15 15:29:43 +01005960 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005961 struct drm_encoder *encoder;
Imre Deak319be8a2014-03-04 19:22:57 +02005962 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5963 enum pipe pipe = intel_crtc->pipe;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005964 u64 mask;
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005965 enum transcoder transcoder = crtc_state->cpu_transcoder;
Imre Deak77d22dc2014-03-05 16:20:52 +02005966
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005967 if (!crtc_state->base.active)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005968 return 0;
5969
Imre Deak17bd6e62018-01-09 14:20:40 +02005970 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5971 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005972 if (crtc_state->pch_pfit.enabled ||
5973 crtc_state->pch_pfit.force_thru)
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005974 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
Imre Deak77d22dc2014-03-05 16:20:52 +02005975
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005976 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5977 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5978
Ander Conselvan de Oliveira79f255a2017-02-22 08:34:27 +02005979 mask |= BIT_ULL(intel_encoder->power_domain);
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005980 }
Imre Deak319be8a2014-03-04 19:22:57 +02005981
Maarten Lankhorst37255d82016-12-15 15:29:43 +01005982 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
Imre Deak17bd6e62018-01-09 14:20:40 +02005983 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
Maarten Lankhorst37255d82016-12-15 15:29:43 +01005984
Maarten Lankhorst15e7ec22016-03-14 09:27:54 +01005985 if (crtc_state->shared_dpll)
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005986 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
Maarten Lankhorst15e7ec22016-03-14 09:27:54 +01005987
Imre Deak77d22dc2014-03-05 16:20:52 +02005988 return mask;
5989}
5990
Ander Conselvan de Oliveirad2d15012017-02-13 16:57:33 +02005991static u64
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01005992modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5993 struct intel_crtc_state *crtc_state)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005994{
Chris Wilsonfac5e232016-07-04 11:34:36 +01005995 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005996 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5997 enum intel_display_power_domain domain;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02005998 u64 domains, new_domains, old_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02005999
6000 old_domains = intel_crtc->enabled_power_domains;
Maarten Lankhorst74bff5f2016-02-10 13:49:36 +01006001 intel_crtc->enabled_power_domains = new_domains =
6002 get_crtc_power_domains(crtc, crtc_state);
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006003
Daniel Vetter5a21b662016-05-24 17:13:53 +02006004 domains = new_domains & ~old_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006005
6006 for_each_power_domain(domain, domains)
6007 intel_display_power_get(dev_priv, domain);
6008
Daniel Vetter5a21b662016-05-24 17:13:53 +02006009 return old_domains & ~new_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006010}
6011
6012static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02006013 u64 domains)
Maarten Lankhorst292b9902015-07-13 16:30:27 +02006014{
6015 enum intel_display_power_domain domain;
6016
6017 for_each_power_domain(domain, domains)
6018 intel_display_power_put(dev_priv, domain);
6019}
6020
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006021static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6022 struct drm_atomic_state *old_state)
Jesse Barnes89b667f2013-04-18 14:51:36 -07006023{
Ville Syrjäläff32c542017-03-02 19:14:57 +02006024 struct intel_atomic_state *old_intel_state =
6025 to_intel_atomic_state(old_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006026 struct drm_crtc *crtc = pipe_config->base.crtc;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006027 struct drm_device *dev = crtc->dev;
Daniel Vettera72e4c92014-09-30 10:56:47 +02006028 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006029 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006030 int pipe = intel_crtc->pipe;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006031
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02006032 if (WARN_ON(intel_crtc->active))
Jesse Barnes89b667f2013-04-18 14:51:36 -07006033 return;
6034
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006035 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006036 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006037
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02006038 intel_set_pipe_timings(pipe_config);
6039 intel_set_pipe_src_size(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006040
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006041 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
Ville Syrjäläc14b0482014-10-16 20:52:34 +03006042 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6043 I915_WRITE(CHV_CANVAS(pipe), 0);
6044 }
6045
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02006046 i9xx_set_pipeconf(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006047
P Raviraj Sitaramc59d2da2018-09-10 19:57:14 +05306048 intel_color_set_csc(&pipe_config->base);
6049
Jesse Barnes89b667f2013-04-18 14:51:36 -07006050 intel_crtc->active = true;
Jesse Barnes89b667f2013-04-18 14:51:36 -07006051
Daniel Vettera72e4c92014-09-30 10:56:47 +02006052 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä4a3436e2014-05-16 19:40:25 +03006053
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006054 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006055
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006056 if (IS_CHERRYVIEW(dev_priv)) {
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006057 chv_prepare_pll(intel_crtc, pipe_config);
6058 chv_enable_pll(intel_crtc, pipe_config);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006059 } else {
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006060 vlv_prepare_pll(intel_crtc, pipe_config);
6061 vlv_enable_pll(intel_crtc, pipe_config);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006062 }
Jesse Barnes89b667f2013-04-18 14:51:36 -07006063
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006064 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006065
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006066 i9xx_pfit_enable(pipe_config);
Jesse Barnes2dd24552013-04-25 12:55:01 -07006067
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02006068 intel_color_load_luts(&pipe_config->base);
Ville Syrjälä63cbb072013-06-04 13:48:59 +03006069
Ville Syrjäläff32c542017-03-02 19:14:57 +02006070 dev_priv->display.initial_watermarks(old_intel_state,
6071 pipe_config);
Ville Syrjälä4972f702017-11-29 17:37:32 +02006072 intel_enable_pipe(pipe_config);
Daniel Vetterbe6a6f82014-04-15 18:41:22 +02006073
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006074 assert_vblank_disabled(crtc);
6075 drm_crtc_vblank_on(crtc);
6076
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006077 intel_encoders_enable(crtc, pipe_config, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006078}
6079
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006080static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006081{
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006082 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6083 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006084
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006085 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6086 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006087}
6088
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006089static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6090 struct drm_atomic_state *old_state)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006091{
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006092 struct intel_atomic_state *old_intel_state =
6093 to_intel_atomic_state(old_state);
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006094 struct drm_crtc *crtc = pipe_config->base.crtc;
Zhenyu Wang2c072452009-06-05 15:38:42 +08006095 struct drm_device *dev = crtc->dev;
Daniel Vettera72e4c92014-09-30 10:56:47 +02006096 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes79e53942008-11-07 14:24:08 -08006097 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006098 enum pipe pipe = intel_crtc->pipe;
Jesse Barnes79e53942008-11-07 14:24:08 -08006099
Maarten Lankhorst53d9f4e2015-06-01 12:49:52 +02006100 if (WARN_ON(intel_crtc->active))
Chris Wilsonf7abfe82010-09-13 14:19:16 +01006101 return;
6102
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006103 i9xx_set_pll_dividers(pipe_config);
Daniel Vetterf13c2ef2014-04-24 23:55:10 +02006104
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006105 if (intel_crtc_has_dp_encoder(pipe_config))
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006106 intel_dp_set_m_n(pipe_config, M1_N1);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006107
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02006108 intel_set_pipe_timings(pipe_config);
6109 intel_set_pipe_src_size(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006110
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02006111 i9xx_set_pipeconf(pipe_config);
Daniel Vetter5b18e572014-04-24 23:55:06 +02006112
Chris Wilsonf7abfe82010-09-13 14:19:16 +01006113 intel_crtc->active = true;
Chris Wilson6b383a72010-09-13 13:54:26 +01006114
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01006115 if (!IS_GEN2(dev_priv))
Daniel Vettera72e4c92014-09-30 10:56:47 +02006116 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
Ville Syrjälä4a3436e2014-05-16 19:40:25 +03006117
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006118 intel_encoders_pre_enable(crtc, pipe_config, old_state);
Mika Kuoppala9d6d9f12013-02-08 16:35:38 +02006119
Ville Syrjälä939994d2017-09-13 17:08:56 +03006120 i9xx_enable_pll(intel_crtc, pipe_config);
Daniel Vetterf6736a12013-06-05 13:34:30 +02006121
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006122 i9xx_pfit_enable(pipe_config);
Jesse Barnes2dd24552013-04-25 12:55:01 -07006123
Maarten Lankhorstb95c5322016-03-30 17:16:34 +02006124 intel_color_load_luts(&pipe_config->base);
Ville Syrjälä63cbb072013-06-04 13:48:59 +03006125
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006126 if (dev_priv->display.initial_watermarks != NULL)
6127 dev_priv->display.initial_watermarks(old_intel_state,
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006128 pipe_config);
Ville Syrjälä04548cb2017-04-21 21:14:29 +03006129 else
6130 intel_update_watermarks(intel_crtc);
Ville Syrjälä4972f702017-11-29 17:37:32 +02006131 intel_enable_pipe(pipe_config);
Daniel Vetterbe6a6f82014-04-15 18:41:22 +02006132
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006133 assert_vblank_disabled(crtc);
6134 drm_crtc_vblank_on(crtc);
6135
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006136 intel_encoders_enable(crtc, pipe_config, old_state);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006137}
6138
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006139static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
Daniel Vetter87476d62013-04-11 16:29:06 +02006140{
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006141 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6142 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter328d8e82013-05-08 10:36:31 +02006143
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006144 if (!old_crtc_state->gmch_pfit.control)
Daniel Vetter328d8e82013-05-08 10:36:31 +02006145 return;
Daniel Vetter87476d62013-04-11 16:29:06 +02006146
6147 assert_pipe_disabled(dev_priv, crtc->pipe);
6148
Chris Wilson43031782018-09-13 14:16:26 +01006149 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6150 I915_READ(PFIT_CONTROL));
Daniel Vetter328d8e82013-05-08 10:36:31 +02006151 I915_WRITE(PFIT_CONTROL, 0);
Daniel Vetter87476d62013-04-11 16:29:06 +02006152}
6153
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006154static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6155 struct drm_atomic_state *old_state)
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006156{
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006157 struct drm_crtc *crtc = old_crtc_state->base.crtc;
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006158 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006159 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006160 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6161 int pipe = intel_crtc->pipe;
Daniel Vetteref9c3ae2012-06-29 22:40:09 +02006162
Ville Syrjälä6304cd92014-04-25 13:30:12 +03006163 /*
6164 * On gen2 planes are double buffered but the pipe isn't, so we must
6165 * wait for planes to fully turn off before disabling the pipe.
6166 */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01006167 if (IS_GEN2(dev_priv))
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +02006168 intel_wait_for_vblank(dev_priv, pipe);
Ville Syrjälä6304cd92014-04-25 13:30:12 +03006169
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006170 intel_encoders_disable(crtc, old_crtc_state, old_state);
Ville Syrjälä4b3a9522014-08-14 22:04:37 +03006171
Daniel Vetterf9b61ff2015-01-07 13:54:39 +01006172 drm_crtc_vblank_off(crtc);
6173 assert_vblank_disabled(crtc);
6174
Ville Syrjälä4972f702017-11-29 17:37:32 +02006175 intel_disable_pipe(old_crtc_state);
Mika Kuoppala24a1f162013-02-08 16:35:37 +02006176
Maarten Lankhorstb2562712018-10-04 11:45:53 +02006177 i9xx_pfit_disable(old_crtc_state);
Mika Kuoppala24a1f162013-02-08 16:35:37 +02006178
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006179 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006180
Maarten Lankhorst6f405632018-10-04 11:46:04 +02006181 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01006182 if (IS_CHERRYVIEW(dev_priv))
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006183 chv_disable_pll(dev_priv, pipe);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01006184 else if (IS_VALLEYVIEW(dev_priv))
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006185 vlv_disable_pll(dev_priv, pipe);
6186 else
Maarten Lankhorstb2354c72018-10-04 11:45:57 +02006187 i9xx_disable_pll(old_crtc_state);
Chon Ming Lee076ed3b2014-04-09 13:28:17 +03006188 }
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006189
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +02006190 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
Ville Syrjäläd6db9952015-07-08 23:45:49 +03006191
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01006192 if (!IS_GEN2(dev_priv))
Daniel Vettera72e4c92014-09-30 10:56:47 +02006193 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
Ville Syrjäläff32c542017-03-02 19:14:57 +02006194
6195 if (!dev_priv->display.initial_watermarks)
6196 intel_update_watermarks(intel_crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +03006197
6198 /* clock the pipe down to 640x480@60 to potentially save power */
6199 if (IS_I830(dev_priv))
6200 i830_enable_pipe(dev_priv, pipe);
Jesse Barnes0b8765c62010-09-10 10:31:34 -07006201}
6202
Ville Syrjäläda1d0e22017-06-01 17:36:14 +03006203static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6204 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnesee7b9f92012-04-20 17:11:53 +01006205{
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006206 struct intel_encoder *encoder;
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006207 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006208 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006209 enum intel_display_power_domain domain;
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006210 struct intel_plane *plane;
Ander Conselvan de Oliveirad2d15012017-02-13 16:57:33 +02006211 u64 domains;
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006212 struct drm_atomic_state *state;
6213 struct intel_crtc_state *crtc_state;
6214 int ret;
Daniel Vetter976f8a22012-07-08 22:34:21 +02006215
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006216 if (!intel_crtc->active)
6217 return;
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006218
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006219 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6220 const struct intel_plane_state *plane_state =
6221 to_intel_plane_state(plane->base.state);
Maarten Lankhorst54a419612015-11-23 10:25:28 +01006222
Ville Syrjäläb1e01592017-11-17 21:19:09 +02006223 if (plane_state->base.visible)
6224 intel_plane_disable_noatomic(intel_crtc, plane);
Maarten Lankhorsta5392052015-06-15 12:33:52 +02006225 }
6226
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006227 state = drm_atomic_state_alloc(crtc->dev);
Ander Conselvan de Oliveira31bb2ef2017-01-20 16:28:45 +02006228 if (!state) {
6229 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6230 crtc->base.id, crtc->name);
6231 return;
6232 }
6233
Ville Syrjäläda1d0e22017-06-01 17:36:14 +03006234 state->acquire_ctx = ctx;
Maarten Lankhorst4a806552016-08-09 17:04:01 +02006235
6236 /* Everything's already locked, -EDEADLK can't happen. */
6237 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6238 ret = drm_atomic_add_affected_connectors(state, crtc);
6239
6240 WARN_ON(IS_ERR(crtc_state) || ret);
6241
6242 dev_priv->display.crtc_disable(crtc_state, state);
6243
Chris Wilson08536952016-10-14 13:18:18 +01006244 drm_atomic_state_put(state);
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006245
Ville Syrjälä78108b72016-05-27 20:59:19 +03006246 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6247 crtc->base.id, crtc->name);
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006248
6249 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6250 crtc->state->active = false;
Matt Roper37d90782015-09-24 15:53:06 -07006251 intel_crtc->active = false;
Maarten Lankhorst842e0302016-03-02 15:48:01 +01006252 crtc->enabled = false;
6253 crtc->state->connector_mask = 0;
6254 crtc->state->encoder_mask = 0;
6255
6256 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6257 encoder->base.crtc = NULL;
6258
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -02006259 intel_fbc_disable(intel_crtc);
Ville Syrjälä432081b2016-10-31 22:37:03 +02006260 intel_update_watermarks(intel_crtc);
Maarten Lankhorst65c307f2018-10-05 11:52:44 +02006261 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
Daniel Vetter0e572fe2014-04-24 23:55:42 +02006262
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006263 domains = intel_crtc->enabled_power_domains;
6264 for_each_power_domain(domain, domains)
6265 intel_display_power_put(dev_priv, domain);
6266 intel_crtc->enabled_power_domains = 0;
Maarten Lankhorst565602d2015-12-10 12:33:57 +01006267
6268 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
Ville Syrjäläd305e062017-08-30 21:57:03 +03006269 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
Ville Syrjälä53e9bf52017-10-24 12:52:14 +03006270 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
Maarten Lankhorstb17d48e2015-06-12 11:15:39 +02006271}
6272
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006273/*
6274 * turn all crtc's off, but do not adjust state
6275 * This has to be paired with a call to intel_modeset_setup_hw_state.
6276 */
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006277int intel_display_suspend(struct drm_device *dev)
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006278{
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006279 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006280 struct drm_atomic_state *state;
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006281 int ret;
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006282
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006283 state = drm_atomic_helper_suspend(dev);
6284 ret = PTR_ERR_OR_ZERO(state);
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006285 if (ret)
6286 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
Maarten Lankhorste2c8b872016-02-16 10:06:14 +01006287 else
6288 dev_priv->modeset_restore_state = state;
Maarten Lankhorst70e0bd72015-07-13 16:30:29 +02006289 return ret;
Maarten Lankhorst6b72d482015-06-01 12:49:47 +02006290}
6291
Chris Wilsonea5b2132010-08-04 13:50:23 +01006292void intel_encoder_destroy(struct drm_encoder *encoder)
6293{
Chris Wilson4ef69c72010-09-09 15:14:28 +01006294 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
Chris Wilsonea5b2132010-08-04 13:50:23 +01006295
Chris Wilsonea5b2132010-08-04 13:50:23 +01006296 drm_encoder_cleanup(encoder);
6297 kfree(intel_encoder);
6298}
6299
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006300/* Cross check the actual hw state with our own modeset state tracking (and it's
6301 * internal consistency). */
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006302static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6303 struct drm_connector_state *conn_state)
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006304{
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006305 struct intel_connector *connector = to_intel_connector(conn_state->connector);
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006306
6307 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6308 connector->base.base.id,
6309 connector->base.name);
6310
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006311 if (connector->get_hw_state(connector)) {
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006312 struct intel_encoder *encoder = connector->encoder;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006313
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006314 I915_STATE_WARN(!crtc_state,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006315 "connector enabled without attached crtc\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006316
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006317 if (!crtc_state)
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006318 return;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006319
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006320 I915_STATE_WARN(!crtc_state->active,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006321 "connector is active, but attached crtc isn't\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006322
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006323 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006324 return;
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006325
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006326 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006327 "atomic encoder doesn't match attached encoder\n");
Dave Airlie36cd7442014-05-02 13:44:18 +10006328
Maarten Lankhorste85376c2015-08-27 13:13:31 +02006329 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006330 "attached encoder crtc differs from connector crtc\n");
6331 } else {
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006332 I915_STATE_WARN(crtc_state && crtc_state->active,
Maarten Lankhorst4d688a22015-08-05 12:37:06 +02006333 "attached crtc is active, but connector isn't\n");
Maarten Lankhorst749d98b2017-05-11 10:28:43 +02006334 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +02006335 "best encoder set without crtc!\n");
Daniel Vetter0a91ca22012-07-02 21:54:27 +02006336 }
6337}
6338
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006339static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
Ville Syrjäläd272ddf2015-03-11 18:52:31 +02006340{
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006341 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6342 return crtc_state->fdi_lanes;
Ville Syrjäläd272ddf2015-03-11 18:52:31 +02006343
6344 return 0;
6345}
6346
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006347static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006348 struct intel_crtc_state *pipe_config)
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006349{
Tvrtko Ursulin86527442016-10-13 11:03:00 +01006350 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006351 struct drm_atomic_state *state = pipe_config->base.state;
6352 struct intel_crtc *other_crtc;
6353 struct intel_crtc_state *other_crtc_state;
6354
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006355 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6356 pipe_name(pipe), pipe_config->fdi_lanes);
6357 if (pipe_config->fdi_lanes > 4) {
6358 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6359 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006360 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006361 }
6362
Tvrtko Ursulin86527442016-10-13 11:03:00 +01006363 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006364 if (pipe_config->fdi_lanes > 2) {
6365 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6366 pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006367 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006368 } else {
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006369 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006370 }
6371 }
6372
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +00006373 if (INTEL_INFO(dev_priv)->num_pipes == 2)
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006374 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006375
6376 /* Ivybridge 3 pipe is really complicated */
6377 switch (pipe) {
6378 case PIPE_A:
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006379 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006380 case PIPE_B:
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006381 if (pipe_config->fdi_lanes <= 2)
6382 return 0;
6383
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02006384 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006385 other_crtc_state =
6386 intel_atomic_get_crtc_state(state, other_crtc);
6387 if (IS_ERR(other_crtc_state))
6388 return PTR_ERR(other_crtc_state);
6389
6390 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006391 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6392 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006393 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006394 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006395 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006396 case PIPE_C:
Ville Syrjälä251cc672015-03-11 18:52:30 +02006397 if (pipe_config->fdi_lanes > 2) {
6398 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6399 pipe_name(pipe), pipe_config->fdi_lanes);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006400 return -EINVAL;
Ville Syrjälä251cc672015-03-11 18:52:30 +02006401 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006402
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02006403 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006404 other_crtc_state =
6405 intel_atomic_get_crtc_state(state, other_crtc);
6406 if (IS_ERR(other_crtc_state))
6407 return PTR_ERR(other_crtc_state);
6408
6409 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006410 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006411 return -EINVAL;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006412 }
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006413 return 0;
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006414 default:
6415 BUG();
6416 }
6417}
6418
Daniel Vettere29c22c2013-02-21 00:00:16 +01006419#define RETRY 1
6420static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006421 struct intel_crtc_state *pipe_config)
Daniel Vetter877d48d2013-04-19 11:24:43 +02006422{
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006423 struct drm_device *dev = intel_crtc->base.dev;
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03006424 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006425 int lane, link_bw, fdi_dotclock, ret;
6426 bool needs_recompute = false;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006427
Daniel Vettere29c22c2013-02-21 00:00:16 +01006428retry:
Daniel Vetter877d48d2013-04-19 11:24:43 +02006429 /* FDI is a binary signal running at ~2.7GHz, encoding
6430 * each output octet as 10 bits. The actual frequency
6431 * is stored as a divider into a 100MHz clock, and the
6432 * mode pixel clock is stored in units of 1KHz.
6433 * Hence the bw of each lane in terms of the mode signal
6434 * is:
6435 */
Ville Syrjälä21a727b2016-02-17 21:41:10 +02006436 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
Daniel Vetter877d48d2013-04-19 11:24:43 +02006437
Damien Lespiau241bfc32013-09-25 16:45:37 +01006438 fdi_dotclock = adjusted_mode->crtc_clock;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006439
Daniel Vetter2bd89a02013-06-01 17:16:19 +02006440 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
Daniel Vetter877d48d2013-04-19 11:24:43 +02006441 pipe_config->pipe_bpp);
6442
6443 pipe_config->fdi_lanes = lane;
6444
Daniel Vetter2bd89a02013-06-01 17:16:19 +02006445 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006446 link_bw, &pipe_config->fdi_m_n, false);
Daniel Vetter1857e1d2013-04-29 19:34:16 +02006447
Ville Syrjäläe3b247d2016-02-17 21:41:09 +02006448 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006449 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
Daniel Vettere29c22c2013-02-21 00:00:16 +01006450 pipe_config->pipe_bpp -= 2*3;
6451 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6452 pipe_config->pipe_bpp);
6453 needs_recompute = true;
6454 pipe_config->bw_constrained = true;
6455
6456 goto retry;
6457 }
6458
6459 if (needs_recompute)
6460 return RETRY;
6461
Ander Conselvan de Oliveira6d293982015-03-30 08:33:12 +03006462 return ret;
Daniel Vetter877d48d2013-04-19 11:24:43 +02006463}
6464
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006465bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006466{
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006467 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6468 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6469
6470 /* IPS only exists on ULT machines and is tied to pipe A. */
6471 if (!hsw_crtc_supports_ips(crtc))
Ville Syrjälä6e644622017-08-17 17:55:09 +03006472 return false;
6473
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006474 if (!i915_modparams.enable_ips)
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006475 return false;
6476
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006477 if (crtc_state->pipe_bpp > 24)
6478 return false;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006479
6480 /*
Ville Syrjäläb432e5c2015-06-03 15:45:13 +03006481 * We compare against max which means we must take
6482 * the increased cdclk requirement into account when
6483 * calculating the new cdclk.
6484 *
6485 * Should measure whether using a lower cdclk w/o IPS
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006486 */
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006487 if (IS_BROADWELL(dev_priv) &&
6488 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6489 return false;
6490
6491 return true;
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006492}
6493
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006494static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
Paulo Zanoni42db64e2013-05-31 16:33:22 -03006495{
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006496 struct drm_i915_private *dev_priv =
6497 to_i915(crtc_state->base.crtc->dev);
6498 struct intel_atomic_state *intel_state =
6499 to_intel_atomic_state(crtc_state->base.state);
Ville Syrjälä8cfb3402015-06-03 15:45:11 +03006500
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006501 if (!hsw_crtc_state_ips_capable(crtc_state))
6502 return false;
6503
6504 if (crtc_state->ips_force_disable)
6505 return false;
6506
Maarten Lankhorstadbe5c52017-11-22 19:39:06 +01006507 /* IPS should be fine as long as at least one plane is enabled. */
6508 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
Maarten Lankhorst24f28452017-11-22 19:39:01 +01006509 return false;
6510
6511 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6512 if (IS_BROADWELL(dev_priv) &&
6513 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6514 return false;
6515
6516 return true;
Paulo Zanoni42db64e2013-05-31 16:33:22 -03006517}
6518
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006519static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6520{
6521 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6522
6523 /* GDG double wide on either pipe, otherwise pipe A only */
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00006524 return INTEL_GEN(dev_priv) < 4 &&
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006525 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6526}
6527
Ville Syrjäläceb99322017-01-20 20:22:05 +02006528static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6529{
6530 uint32_t pixel_rate;
6531
6532 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6533
6534 /*
6535 * We only use IF-ID interlacing. If we ever use
6536 * PF-ID we'll need to adjust the pixel_rate here.
6537 */
6538
6539 if (pipe_config->pch_pfit.enabled) {
6540 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6541 uint32_t pfit_size = pipe_config->pch_pfit.size;
6542
6543 pipe_w = pipe_config->pipe_src_w;
6544 pipe_h = pipe_config->pipe_src_h;
6545
6546 pfit_w = (pfit_size >> 16) & 0xFFFF;
6547 pfit_h = pfit_size & 0xFFFF;
6548 if (pipe_w < pfit_w)
6549 pipe_w = pfit_w;
6550 if (pipe_h < pfit_h)
6551 pipe_h = pfit_h;
6552
6553 if (WARN_ON(!pfit_w || !pfit_h))
6554 return pixel_rate;
6555
6556 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6557 pfit_w * pfit_h);
6558 }
6559
6560 return pixel_rate;
6561}
6562
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +02006563static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6564{
6565 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6566
6567 if (HAS_GMCH_DISPLAY(dev_priv))
6568 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6569 crtc_state->pixel_rate =
6570 crtc_state->base.adjusted_mode.crtc_clock;
6571 else
6572 crtc_state->pixel_rate =
6573 ilk_pipe_pixel_rate(crtc_state);
6574}
6575
Daniel Vettera43f6e02013-06-07 23:10:32 +02006576static int intel_crtc_compute_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006577 struct intel_crtc_state *pipe_config)
Jesse Barnes79e53942008-11-07 14:24:08 -08006578{
Daniel Vettera43f6e02013-06-07 23:10:32 +02006579 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006580 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä7c5f93b2015-09-08 13:40:49 +03006581 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
Ville Syrjäläf3261152016-05-24 21:34:18 +03006582 int clock_limit = dev_priv->max_dotclk_freq;
Chris Wilson89749352010-09-12 18:25:19 +01006583
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006584 if (INTEL_GEN(dev_priv) < 4) {
Ville Syrjäläf3261152016-05-24 21:34:18 +03006585 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006586
6587 /*
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006588 * Enable double wide mode when the dot clock
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006589 * is > 90% of the (display) core speed.
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006590 */
Ville Syrjälä39acb4a2015-10-30 23:39:38 +02006591 if (intel_crtc_supports_double_wide(crtc) &&
6592 adjusted_mode->crtc_clock > clock_limit) {
Ville Syrjäläf3261152016-05-24 21:34:18 +03006593 clock_limit = dev_priv->max_dotclk_freq;
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03006594 pipe_config->double_wide = true;
Ville Syrjäläad3a4472013-09-04 18:30:04 +03006595 }
Ville Syrjäläf3261152016-05-24 21:34:18 +03006596 }
Ville Syrjäläad3a4472013-09-04 18:30:04 +03006597
Ville Syrjäläf3261152016-05-24 21:34:18 +03006598 if (adjusted_mode->crtc_clock > clock_limit) {
6599 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6600 adjusted_mode->crtc_clock, clock_limit,
6601 yesno(pipe_config->double_wide));
6602 return -EINVAL;
Zhenyu Wang2c072452009-06-05 15:38:42 +08006603 }
Chris Wilson89749352010-09-12 18:25:19 +01006604
Shashank Sharma8c79f842018-10-12 11:53:09 +05306605 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6606 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
6607 pipe_config->base.ctm) {
Shashank Sharma25edf912017-07-21 20:55:07 +05306608 /*
6609 * There is only one pipe CSC unit per pipe, and we need that
6610 * for output conversion from RGB->YCBCR. So if CTM is already
6611 * applied we can't support YCBCR420 output.
6612 */
6613 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6614 return -EINVAL;
6615 }
6616
Ville Syrjälä1d1d0e22013-09-04 18:30:05 +03006617 /*
6618 * Pipe horizontal size must be even in:
6619 * - DVO ganged mode
6620 * - LVDS dual channel mode
6621 * - Double wide pipe
6622 */
Ville Syrjälä0574bd82017-11-23 21:04:48 +02006623 if (pipe_config->pipe_src_w & 1) {
6624 if (pipe_config->double_wide) {
6625 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6626 return -EINVAL;
6627 }
6628
6629 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6630 intel_is_dual_link_lvds(dev)) {
6631 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6632 return -EINVAL;
6633 }
6634 }
Ville Syrjälä1d1d0e22013-09-04 18:30:05 +03006635
Damien Lespiau8693a822013-05-03 18:48:11 +01006636 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6637 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
Chris Wilson44f46b422012-06-21 13:19:59 +03006638 */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01006639 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
Ville Syrjäläaad941d2015-09-25 16:38:56 +03006640 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
Daniel Vettere29c22c2013-02-21 00:00:16 +01006641 return -EINVAL;
Chris Wilson44f46b422012-06-21 13:19:59 +03006642
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +02006643 intel_crtc_compute_pixel_rate(pipe_config);
6644
Daniel Vetter877d48d2013-04-19 11:24:43 +02006645 if (pipe_config->has_pch_encoder)
Daniel Vettera43f6e02013-06-07 23:10:32 +02006646 return ironlake_fdi_compute_config(crtc, pipe_config);
Daniel Vetter877d48d2013-04-19 11:24:43 +02006647
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +02006648 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08006649}
6650
Zhenyu Wang2c072452009-06-05 15:38:42 +08006651static void
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006652intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006653{
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006654 while (*num > DATA_LINK_M_N_MASK ||
6655 *den > DATA_LINK_M_N_MASK) {
Zhenyu Wang2c072452009-06-05 15:38:42 +08006656 *num >>= 1;
6657 *den >>= 1;
6658 }
6659}
6660
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006661static void compute_m_n(unsigned int m, unsigned int n,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006662 uint32_t *ret_m, uint32_t *ret_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006663 bool constant_n)
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006664{
Jani Nikula9a86cda2017-03-27 14:33:25 +03006665 /*
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006666 * Several DP dongles in particular seem to be fussy about
6667 * too large link M/N values. Give N value as 0x8000 that
6668 * should be acceptable by specific devices. 0x8000 is the
6669 * specified fixed N value for asynchronous clock mode,
6670 * which the devices expect also in synchronous clock mode.
Jani Nikula9a86cda2017-03-27 14:33:25 +03006671 */
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006672 if (constant_n)
6673 *ret_n = 0x8000;
6674 else
6675 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
Jani Nikula9a86cda2017-03-27 14:33:25 +03006676
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006677 *ret_m = div_u64((uint64_t) m * *ret_n, n);
6678 intel_reduce_m_n_ratio(ret_m, ret_n);
6679}
6680
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006681void
6682intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6683 int pixel_clock, int link_clock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006684 struct intel_link_m_n *m_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006685 bool constant_n)
Zhenyu Wang2c072452009-06-05 15:38:42 +08006686{
Daniel Vettere69d0bc2012-11-29 15:59:36 +01006687 m_n->tu = 64;
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006688
6689 compute_m_n(bits_per_pixel * pixel_clock,
6690 link_clock * nlanes * 8,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006691 &m_n->gmch_m, &m_n->gmch_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006692 constant_n);
Ville Syrjäläa65851a2013-04-23 15:03:34 +03006693
6694 compute_m_n(pixel_clock, link_clock,
Jani Nikulab31e85e2017-05-18 14:10:25 +03006695 &m_n->link_m, &m_n->link_n,
Lee, Shawn C53ca2ed2018-09-11 23:22:50 -07006696 constant_n);
Zhenyu Wang2c072452009-06-05 15:38:42 +08006697}
6698
Chris Wilsona7615032011-01-12 17:04:08 +00006699static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6700{
Michal Wajdeczko4f044a82017-09-19 19:38:44 +00006701 if (i915_modparams.panel_use_ssc >= 0)
6702 return i915_modparams.panel_use_ssc != 0;
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03006703 return dev_priv->vbt.lvds_use_ssc
Keith Packard435793d2011-07-12 14:56:22 -07006704 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
Chris Wilsona7615032011-01-12 17:04:08 +00006705}
6706
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006707static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
Jesse Barnesc65d77d2011-12-15 12:30:36 -08006708{
Daniel Vetter7df00d72013-05-21 21:54:55 +02006709 return (1 << dpll->n) << 16 | dpll->m2;
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006710}
Daniel Vetterf47709a2013-03-28 10:42:02 +01006711
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006712static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6713{
6714 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
Jesse Barnesc65d77d2011-12-15 12:30:36 -08006715}
6716
Daniel Vetterf47709a2013-03-28 10:42:02 +01006717static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006718 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03006719 struct dpll *reduced_clock)
Jesse Barnesa7516a02011-12-15 12:30:37 -08006720{
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02006721 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006722 u32 fp, fp2 = 0;
6723
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02006724 if (IS_PINEVIEW(dev_priv)) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006725 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006726 if (reduced_clock)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006727 fp2 = pnv_dpll_compute_fp(reduced_clock);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006728 } else {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006729 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006730 if (reduced_clock)
Daniel Vetter7429e9d2013-04-20 17:19:46 +02006731 fp2 = i9xx_dpll_compute_fp(reduced_clock);
Jesse Barnesa7516a02011-12-15 12:30:37 -08006732 }
6733
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006734 crtc_state->dpll_hw_state.fp0 = fp;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006735
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03006736 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Rodrigo Viviab585de2015-03-24 12:40:09 -07006737 reduced_clock) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006738 crtc_state->dpll_hw_state.fp1 = fp2;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006739 } else {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02006740 crtc_state->dpll_hw_state.fp1 = fp;
Jesse Barnesa7516a02011-12-15 12:30:37 -08006741 }
6742}
6743
Chon Ming Lee5e69f972013-09-05 20:41:49 +08006744static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6745 pipe)
Jesse Barnes89b667f2013-04-18 14:51:36 -07006746{
6747 u32 reg_val;
6748
6749 /*
6750 * PLLB opamp always calibrates to max value of 0x3f, force enable it
6751 * and set it to a reasonable value instead.
6752 */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006753 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006754 reg_val &= 0xffffff00;
6755 reg_val |= 0x00000030;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006756 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006757
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006758 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
Imre Deaked585702017-05-10 12:21:47 +03006759 reg_val &= 0x00ffffff;
6760 reg_val |= 0x8c000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006761 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006762
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006763 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006764 reg_val &= 0xffffff00;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006765 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006766
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006767 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006768 reg_val &= 0x00ffffff;
6769 reg_val |= 0xb0000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006770 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006771}
6772
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006773static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6774 const struct intel_link_m_n *m_n)
Daniel Vetterb5518422013-05-03 11:49:48 +02006775{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006776 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6777 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6778 enum pipe pipe = crtc->pipe;
Daniel Vetterb5518422013-05-03 11:49:48 +02006779
Daniel Vettere3b95f12013-05-03 11:49:49 +02006780 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6781 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6782 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6783 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
Daniel Vetterb5518422013-05-03 11:49:48 +02006784}
6785
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006786static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
6787 enum transcoder transcoder)
6788{
6789 if (IS_HASWELL(dev_priv))
6790 return transcoder == TRANSCODER_EDP;
6791
6792 /*
6793 * Strictly speaking some registers are available before
6794 * gen7, but we only support DRRS on gen7+
6795 */
6796 return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
6797}
6798
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006799static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
6800 const struct intel_link_m_n *m_n,
6801 const struct intel_link_m_n *m2_n2)
Daniel Vetterb5518422013-05-03 11:49:48 +02006802{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006803 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006804 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006805 enum pipe pipe = crtc->pipe;
6806 enum transcoder transcoder = crtc_state->cpu_transcoder;
Daniel Vetterb5518422013-05-03 11:49:48 +02006807
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00006808 if (INTEL_GEN(dev_priv) >= 5) {
Daniel Vetterb5518422013-05-03 11:49:48 +02006809 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6810 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6811 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6812 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006813 /*
6814 * M2_N2 registers are set only if DRRS is supported
6815 * (to make sure the registers are not unnecessarily accessed).
Vandana Kannanf769cd22014-08-05 07:51:22 -07006816 */
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02006817 if (m2_n2 && crtc_state->has_drrs &&
6818 transcoder_has_m2_n2(dev_priv, transcoder)) {
Vandana Kannanf769cd22014-08-05 07:51:22 -07006819 I915_WRITE(PIPE_DATA_M2(transcoder),
6820 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6821 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6822 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6823 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6824 }
Daniel Vetterb5518422013-05-03 11:49:48 +02006825 } else {
Daniel Vettere3b95f12013-05-03 11:49:49 +02006826 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6827 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6828 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6829 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
Daniel Vetterb5518422013-05-03 11:49:48 +02006830 }
6831}
6832
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006833void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006834{
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006835 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306836
6837 if (m_n == M1_N1) {
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006838 dp_m_n = &crtc_state->dp_m_n;
6839 dp_m2_n2 = &crtc_state->dp_m2_n2;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306840 } else if (m_n == M2_N2) {
6841
6842 /*
6843 * M2_N2 registers are not supported. Hence m2_n2 divider value
6844 * needs to be programmed into M1_N1.
6845 */
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006846 dp_m_n = &crtc_state->dp_m2_n2;
Ramalingam Cfe3cd482015-02-13 15:32:59 +05306847 } else {
6848 DRM_ERROR("Unsupported divider value\n");
6849 return;
6850 }
6851
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006852 if (crtc_state->has_pch_encoder)
6853 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006854 else
Maarten Lankhorst4c354752018-10-11 12:04:49 +02006855 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
Daniel Vetter03afc4a2013-04-02 23:42:31 +02006856}
6857
Daniel Vetter251ac862015-06-18 10:30:24 +02006858static void vlv_compute_dpll(struct intel_crtc *crtc,
6859 struct intel_crtc_state *pipe_config)
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006860{
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006861 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006862 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006863 if (crtc->pipe != PIPE_A)
6864 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006865
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006866 /* DPLL not used with DSI, but still need the rest set up */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006867 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006868 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6869 DPLL_EXT_BUFFER_ENABLE_VLV;
6870
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006871 pipe_config->dpll_hw_state.dpll_md =
6872 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6873}
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006874
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006875static void chv_compute_dpll(struct intel_crtc *crtc,
6876 struct intel_crtc_state *pipe_config)
6877{
6878 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006879 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006880 if (crtc->pipe != PIPE_A)
6881 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6882
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006883 /* DPLL not used with DSI, but still need the rest set up */
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03006884 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006885 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6886
Ville Syrjälä03ed5cbf2016-03-15 16:39:55 +02006887 pipe_config->dpll_hw_state.dpll_md =
6888 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006889}
6890
Ville Syrjäläd288f652014-10-28 13:20:22 +02006891static void vlv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006892 const struct intel_crtc_state *pipe_config)
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006893{
Daniel Vetterf47709a2013-03-28 10:42:02 +01006894 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006895 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006896 enum pipe pipe = crtc->pipe;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006897 u32 mdiv;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006898 u32 bestn, bestm1, bestm2, bestp1, bestp2;
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006899 u32 coreclk, reg_val;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006900
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006901 /* Enable Refclk */
6902 I915_WRITE(DPLL(pipe),
6903 pipe_config->dpll_hw_state.dpll &
6904 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6905
6906 /* No need to actually set up the DPLL with DSI */
6907 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6908 return;
6909
Ville Syrjäläa5805162015-05-26 20:42:30 +03006910 mutex_lock(&dev_priv->sb_lock);
Daniel Vetter09153002012-12-12 14:06:44 +01006911
Ville Syrjäläd288f652014-10-28 13:20:22 +02006912 bestn = pipe_config->dpll.n;
6913 bestm1 = pipe_config->dpll.m1;
6914 bestm2 = pipe_config->dpll.m2;
6915 bestp1 = pipe_config->dpll.p1;
6916 bestp2 = pipe_config->dpll.p2;
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006917
Jesse Barnes89b667f2013-04-18 14:51:36 -07006918 /* See eDP HDMI DPIO driver vbios notes doc */
6919
6920 /* PLL B needs special handling */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006921 if (pipe == PIPE_B)
Chon Ming Lee5e69f972013-09-05 20:41:49 +08006922 vlv_pllb_recal_opamp(dev_priv, pipe);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006923
6924 /* Set up Tx target for periodic Rcomp update */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006925 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006926
6927 /* Disable target IRef on PLL */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006928 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006929 reg_val &= 0x00ffffff;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006930 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006931
6932 /* Disable fast lock */
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006933 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006934
6935 /* Set idtafcrecal before PLL is enabled */
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006936 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
6937 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
6938 mdiv |= ((bestn << DPIO_N_SHIFT));
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006939 mdiv |= (1 << DPIO_K_SHIFT);
Jesse Barnes7df50802013-05-02 10:48:09 -07006940
6941 /*
6942 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
6943 * but we don't support that).
6944 * Note: don't use the DAC post divider as it seems unstable.
6945 */
6946 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006947 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006948
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006949 mdiv |= DPIO_ENABLE_CALIBRATION;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006950 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006951
Jesse Barnes89b667f2013-04-18 14:51:36 -07006952 /* Set HBR and RBR LPF coefficients */
Ville Syrjäläd288f652014-10-28 13:20:22 +02006953 if (pipe_config->port_clock == 162000 ||
Maarten Lankhorst92d54b02018-10-11 12:04:50 +02006954 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
6955 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006956 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
Ville Syrjälä885b01202013-07-05 19:21:38 +03006957 0x009f0003);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006958 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006959 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006960 0x00d0000f);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006961
Ville Syrjälä37a56502016-06-22 21:57:04 +03006962 if (intel_crtc_has_dp_encoder(pipe_config)) {
Jesse Barnes89b667f2013-04-18 14:51:36 -07006963 /* Use SSC source */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006964 if (pipe == PIPE_A)
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006965 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006966 0x0df40000);
6967 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006968 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006969 0x0df70000);
6970 } else { /* HDMI or VGA */
6971 /* Use bend source */
Daniel Vetterbdd4b6a2014-04-24 23:55:11 +02006972 if (pipe == PIPE_A)
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006973 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006974 0x0df70000);
6975 else
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006976 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
Jesse Barnes89b667f2013-04-18 14:51:36 -07006977 0x0df40000);
6978 }
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006979
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006980 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
Jesse Barnes89b667f2013-04-18 14:51:36 -07006981 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
Maarten Lankhorst92d54b02018-10-11 12:04:50 +02006982 if (intel_crtc_has_dp_encoder(pipe_config))
Jesse Barnes89b667f2013-04-18 14:51:36 -07006983 coreclk |= 0x01000000;
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006984 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
Jesse Barnes89b667f2013-04-18 14:51:36 -07006985
Chon Ming Leeab3c7592013-11-07 10:43:30 +08006986 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
Ville Syrjäläa5805162015-05-26 20:42:30 +03006987 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesa0c4da242012-06-15 11:55:13 -07006988}
6989
Ville Syrjäläd288f652014-10-28 13:20:22 +02006990static void chv_prepare_pll(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02006991 const struct intel_crtc_state *pipe_config)
Ville Syrjälä1ae0d132014-06-28 02:04:00 +03006992{
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006993 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01006994 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03006995 enum pipe pipe = crtc->pipe;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006996 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05306997 u32 loopfilter, tribuf_calcntr;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03006998 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05306999 u32 dpio_val;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307000 int vco;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007001
Ville Syrjäläcd2d34d2016-04-12 22:14:34 +03007002 /* Enable Refclk and SSC */
7003 I915_WRITE(DPLL(pipe),
7004 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7005
7006 /* No need to actually set up the DPLL with DSI */
7007 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7008 return;
7009
Ville Syrjäläd288f652014-10-28 13:20:22 +02007010 bestn = pipe_config->dpll.n;
7011 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7012 bestm1 = pipe_config->dpll.m1;
7013 bestm2 = pipe_config->dpll.m2 >> 22;
7014 bestp1 = pipe_config->dpll.p1;
7015 bestp2 = pipe_config->dpll.p2;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307016 vco = pipe_config->dpll.vco;
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05307017 dpio_val = 0;
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307018 loopfilter = 0;
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007019
Ville Syrjäläa5805162015-05-26 20:42:30 +03007020 mutex_lock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007021
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007022 /* p1 and p2 divider */
7023 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7024 5 << DPIO_CHV_S1_DIV_SHIFT |
7025 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7026 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7027 1 << DPIO_CHV_K_DIV_SHIFT);
7028
7029 /* Feedback post-divider - m2 */
7030 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7031
7032 /* Feedback refclk divider - n and m1 */
7033 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7034 DPIO_CHV_M1_DIV_BY_2 |
7035 1 << DPIO_CHV_N_DIV_SHIFT);
7036
7037 /* M2 fraction division */
Ville Syrjälä25a25df2015-07-08 23:45:47 +03007038 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007039
7040 /* M2 fraction division enable */
Vijay Purushothamana945ce7e2015-03-05 19:30:57 +05307041 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7042 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7043 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7044 if (bestm2_frac)
7045 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7046 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007047
Vijay Purushothamande3a0fd2015-03-05 19:32:06 +05307048 /* Program digital lock detect threshold */
7049 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7050 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7051 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7052 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7053 if (!bestm2_frac)
7054 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7055 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7056
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007057 /* Loop filter */
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307058 if (vco == 5400000) {
7059 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7060 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7061 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7062 tribuf_calcntr = 0x9;
7063 } else if (vco <= 6200000) {
7064 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7065 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7066 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7067 tribuf_calcntr = 0x9;
7068 } else if (vco <= 6480000) {
7069 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7070 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7071 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7072 tribuf_calcntr = 0x8;
7073 } else {
7074 /* Not supported. Apply the same limits as in the max case */
7075 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7076 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7077 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7078 tribuf_calcntr = 0;
7079 }
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007080 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7081
Ville Syrjälä968040b2015-03-11 22:52:08 +02007082 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
Vijay Purushothaman9cbe40c2015-03-05 19:33:08 +05307083 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7084 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7085 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7086
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007087 /* AFC Recal */
7088 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7089 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7090 DPIO_AFC_RECAL);
7091
Ville Syrjäläa5805162015-05-26 20:42:30 +03007092 mutex_unlock(&dev_priv->sb_lock);
Chon Ming Lee9d556c92014-05-02 14:27:47 +03007093}
7094
Ville Syrjäläd288f652014-10-28 13:20:22 +02007095/**
7096 * vlv_force_pll_on - forcibly enable just the PLL
7097 * @dev_priv: i915 private structure
7098 * @pipe: pipe PLL to enable
7099 * @dpll: PLL configuration
7100 *
7101 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7102 * in cases where we need the PLL enabled even when @pipe is not going to
7103 * be enabled.
7104 */
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007105int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007106 const struct dpll *dpll)
Ville Syrjäläd288f652014-10-28 13:20:22 +02007107{
Ville Syrjäläb91eb5c2016-10-31 22:37:09 +02007108 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007109 struct intel_crtc_state *pipe_config;
7110
7111 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7112 if (!pipe_config)
7113 return -ENOMEM;
7114
7115 pipe_config->base.crtc = &crtc->base;
7116 pipe_config->pixel_multiplier = 1;
7117 pipe_config->dpll = *dpll;
Ville Syrjäläd288f652014-10-28 13:20:22 +02007118
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007119 if (IS_CHERRYVIEW(dev_priv)) {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007120 chv_compute_dpll(crtc, pipe_config);
7121 chv_prepare_pll(crtc, pipe_config);
7122 chv_enable_pll(crtc, pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007123 } else {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007124 vlv_compute_dpll(crtc, pipe_config);
7125 vlv_prepare_pll(crtc, pipe_config);
7126 vlv_enable_pll(crtc, pipe_config);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007127 }
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +00007128
7129 kfree(pipe_config);
7130
7131 return 0;
Ville Syrjäläd288f652014-10-28 13:20:22 +02007132}
7133
7134/**
7135 * vlv_force_pll_off - forcibly disable just the PLL
7136 * @dev_priv: i915 private structure
7137 * @pipe: pipe PLL to disable
7138 *
7139 * Disable the PLL for @pipe. To be used in cases where we need
7140 * the PLL enabled even when @pipe is not going to be enabled.
7141 */
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007142void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
Ville Syrjäläd288f652014-10-28 13:20:22 +02007143{
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007144 if (IS_CHERRYVIEW(dev_priv))
7145 chv_disable_pll(dev_priv, pipe);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007146 else
Ville Syrjälä30ad9812016-10-31 22:37:07 +02007147 vlv_disable_pll(dev_priv, pipe);
Ville Syrjäläd288f652014-10-28 13:20:22 +02007148}
7149
Daniel Vetter251ac862015-06-18 10:30:24 +02007150static void i9xx_compute_dpll(struct intel_crtc *crtc,
7151 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007152 struct dpll *reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007153{
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007154 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007155 u32 dpll;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007156 struct dpll *clock = &crtc_state->dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007157
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007158 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
Vijay Purushothaman2a8f64c2012-09-27 19:13:06 +05307159
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007160 dpll = DPLL_VGA_MODE_DIS;
7161
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007162 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007163 dpll |= DPLLB_MODE_LVDS;
7164 else
7165 dpll |= DPLLB_MODE_DAC_SERIAL;
Daniel Vetter6cc5f342013-03-27 00:44:53 +01007166
Jani Nikula73f67aa2016-12-07 22:48:09 +02007167 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7168 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007169 dpll |= (crtc_state->pixel_multiplier - 1)
Daniel Vetter198a037f2013-04-19 11:14:37 +02007170 << SDVO_MULTIPLIER_SHIFT_HIRES;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007171 }
Daniel Vetter198a037f2013-04-19 11:14:37 +02007172
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03007173 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7174 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007175 dpll |= DPLL_SDVO_HIGH_SPEED;
Daniel Vetter198a037f2013-04-19 11:14:37 +02007176
Ville Syrjälä37a56502016-06-22 21:57:04 +03007177 if (intel_crtc_has_dp_encoder(crtc_state))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007178 dpll |= DPLL_SDVO_HIGH_SPEED;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007179
7180 /* compute bitmask from p1 value */
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007181 if (IS_PINEVIEW(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007182 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7183 else {
7184 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007185 if (IS_G4X(dev_priv) && reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007186 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7187 }
7188 switch (clock->p2) {
7189 case 5:
7190 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7191 break;
7192 case 7:
7193 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7194 break;
7195 case 10:
7196 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7197 break;
7198 case 14:
7199 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7200 break;
7201 }
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007202 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007203 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7204
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007205 if (crtc_state->sdvo_tv_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007206 dpll |= PLL_REF_INPUT_TVCLKINBC;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007207 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Ander Conselvan de Oliveiraceb41002016-03-21 18:00:02 +02007208 intel_panel_use_ssc(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007209 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7210 else
7211 dpll |= PLL_REF_INPUT_DREFCLK;
7212
7213 dpll |= DPLL_VCO_ENABLE;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007214 crtc_state->dpll_hw_state.dpll = dpll;
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007215
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +02007216 if (INTEL_GEN(dev_priv) >= 4) {
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007217 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
Daniel Vetteref1b4602013-06-01 17:17:04 +02007218 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007219 crtc_state->dpll_hw_state.dpll_md = dpll_md;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007220 }
7221}
7222
Daniel Vetter251ac862015-06-18 10:30:24 +02007223static void i8xx_compute_dpll(struct intel_crtc *crtc,
7224 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007225 struct dpll *reduced_clock)
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007226{
Daniel Vetterf47709a2013-03-28 10:42:02 +01007227 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007228 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007229 u32 dpll;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007230 struct dpll *clock = &crtc_state->dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007231
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007232 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
Vijay Purushothaman2a8f64c2012-09-27 19:13:06 +05307233
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007234 dpll = DPLL_VGA_MODE_DIS;
7235
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007236 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007237 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7238 } else {
7239 if (clock->p1 == 2)
7240 dpll |= PLL_P1_DIVIDE_BY_TWO;
7241 else
7242 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7243 if (clock->p2 == 4)
7244 dpll |= PLL_P2_DIVIDE_BY_4;
7245 }
7246
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007247 if (!IS_I830(dev_priv) &&
7248 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
Daniel Vetter4a33e482013-07-06 12:52:05 +02007249 dpll |= DPLL_DVO_2X_MODE;
7250
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007251 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
Ander Conselvan de Oliveiraceb41002016-03-21 18:00:02 +02007252 intel_panel_use_ssc(dev_priv))
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007253 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7254 else
7255 dpll |= PLL_REF_INPUT_DREFCLK;
7256
7257 dpll |= DPLL_VCO_ENABLE;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02007258 crtc_state->dpll_hw_state.dpll = dpll;
Daniel Vettereb1cbe42012-03-28 23:12:16 +02007259}
7260
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007261static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007262{
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007263 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7264 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7265 enum pipe pipe = crtc->pipe;
7266 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7267 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
Ville Syrjälä1caea6e2014-03-28 23:29:32 +02007268 uint32_t crtc_vtotal, crtc_vblank_end;
7269 int vsyncshift = 0;
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007270
7271 /* We need to be careful not to changed the adjusted mode, for otherwise
7272 * the hw state checker will get angry at the mismatch. */
7273 crtc_vtotal = adjusted_mode->crtc_vtotal;
7274 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007275
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007276 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007277 /* the chip adds 2 halflines automatically */
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007278 crtc_vtotal -= 1;
7279 crtc_vblank_end -= 1;
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007280
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007281 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Ville Syrjälä609aeac2014-03-28 23:29:30 +02007282 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7283 else
7284 vsyncshift = adjusted_mode->crtc_hsync_start -
7285 adjusted_mode->crtc_htotal / 2;
Ville Syrjälä1caea6e2014-03-28 23:29:32 +02007286 if (vsyncshift < 0)
7287 vsyncshift += adjusted_mode->crtc_htotal;
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007288 }
7289
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007290 if (INTEL_GEN(dev_priv) > 3)
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007291 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007292
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007293 I915_WRITE(HTOTAL(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007294 (adjusted_mode->crtc_hdisplay - 1) |
7295 ((adjusted_mode->crtc_htotal - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007296 I915_WRITE(HBLANK(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007297 (adjusted_mode->crtc_hblank_start - 1) |
7298 ((adjusted_mode->crtc_hblank_end - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007299 I915_WRITE(HSYNC(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007300 (adjusted_mode->crtc_hsync_start - 1) |
7301 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7302
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007303 I915_WRITE(VTOTAL(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007304 (adjusted_mode->crtc_vdisplay - 1) |
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007305 ((crtc_vtotal - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007306 I915_WRITE(VBLANK(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007307 (adjusted_mode->crtc_vblank_start - 1) |
Daniel Vetter4d8a62e2013-05-03 11:49:51 +02007308 ((crtc_vblank_end - 1) << 16));
Paulo Zanonife2b8f92012-10-23 18:30:02 -02007309 I915_WRITE(VSYNC(cpu_transcoder),
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007310 (adjusted_mode->crtc_vsync_start - 1) |
7311 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7312
Paulo Zanonib5e508d2012-10-24 11:34:43 -02007313 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7314 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7315 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7316 * bits. */
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01007317 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
Paulo Zanonib5e508d2012-10-24 11:34:43 -02007318 (pipe == PIPE_B || pipe == PIPE_C))
7319 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7320
Jani Nikulabc58be62016-03-18 17:05:39 +02007321}
7322
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007323static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
Jani Nikulabc58be62016-03-18 17:05:39 +02007324{
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007325 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7326 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7327 enum pipe pipe = crtc->pipe;
Jani Nikulabc58be62016-03-18 17:05:39 +02007328
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007329 /* pipesrc controls the size that is scaled from, which should
7330 * always be the user's requested size.
7331 */
7332 I915_WRITE(PIPESRC(pipe),
Maarten Lankhorst44fe7f32018-10-04 11:45:54 +02007333 ((crtc_state->pipe_src_w - 1) << 16) |
7334 (crtc_state->pipe_src_h - 1));
Paulo Zanonib0e77b92012-10-01 18:10:53 -03007335}
7336
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007337static void intel_get_pipe_timings(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007338 struct intel_crtc_state *pipe_config)
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007339{
7340 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007341 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007342 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7343 uint32_t tmp;
7344
7345 tmp = I915_READ(HTOTAL(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007346 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7347 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007348 tmp = I915_READ(HBLANK(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007349 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7350 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007351 tmp = I915_READ(HSYNC(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007352 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7353 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007354
7355 tmp = I915_READ(VTOTAL(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007356 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7357 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007358 tmp = I915_READ(VBLANK(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007359 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7360 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007361 tmp = I915_READ(VSYNC(cpu_transcoder));
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007362 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7363 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007364
7365 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007366 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7367 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7368 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007369 }
Jani Nikulabc58be62016-03-18 17:05:39 +02007370}
7371
7372static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7373 struct intel_crtc_state *pipe_config)
7374{
7375 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007376 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulabc58be62016-03-18 17:05:39 +02007377 u32 tmp;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007378
7379 tmp = I915_READ(PIPESRC(crtc->pipe));
Ville Syrjälä37327ab2013-09-04 18:25:28 +03007380 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7381 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7382
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007383 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7384 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007385}
7386
Daniel Vetterf6a83282014-02-11 15:28:57 -08007387void intel_mode_from_pipe_config(struct drm_display_mode *mode,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007388 struct intel_crtc_state *pipe_config)
Jesse Barnesbabea612013-06-26 18:57:38 +03007389{
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007390 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7391 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7392 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7393 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
Jesse Barnesbabea612013-06-26 18:57:38 +03007394
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007395 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7396 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7397 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7398 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
Jesse Barnesbabea612013-06-26 18:57:38 +03007399
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007400 mode->flags = pipe_config->base.adjusted_mode.flags;
Maarten Lankhorstcd13f5a2015-07-14 14:12:02 +02007401 mode->type = DRM_MODE_TYPE_DRIVER;
Jesse Barnesbabea612013-06-26 18:57:38 +03007402
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +02007403 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
Maarten Lankhorstcd13f5a2015-07-14 14:12:02 +02007404
7405 mode->hsync = drm_mode_hsync(mode);
7406 mode->vrefresh = drm_mode_vrefresh(mode);
7407 drm_mode_set_name(mode);
Jesse Barnesbabea612013-06-26 18:57:38 +03007408}
7409
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007410static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
Daniel Vetter84b046f2013-02-19 18:48:54 +01007411{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007412 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter84b046f2013-02-19 18:48:54 +01007414 uint32_t pipeconf;
7415
Daniel Vetter9f11a9e2013-06-13 00:54:58 +02007416 pipeconf = 0;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007417
Ville Syrjäläe56134b2017-06-01 17:36:19 +03007418 /* we keep both pipes enabled on 830 */
7419 if (IS_I830(dev_priv))
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007420 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
Daniel Vetter67c72a12013-09-24 11:46:14 +02007421
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007422 if (crtc_state->double_wide)
Ville Syrjäläcf532bb2013-09-04 18:30:02 +03007423 pipeconf |= PIPECONF_DOUBLE_WIDE;
Daniel Vetter84b046f2013-02-19 18:48:54 +01007424
Daniel Vetterff9ce462013-04-24 14:57:17 +02007425 /* only g4x and later have fancy bpc/dither controls */
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007426 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7427 IS_CHERRYVIEW(dev_priv)) {
Daniel Vetterff9ce462013-04-24 14:57:17 +02007428 /* Bspec claims that we can't use dithering for 30bpp pipes. */
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007429 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
Daniel Vetterff9ce462013-04-24 14:57:17 +02007430 pipeconf |= PIPECONF_DITHER_EN |
7431 PIPECONF_DITHER_TYPE_SP;
7432
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007433 switch (crtc_state->pipe_bpp) {
Daniel Vetterff9ce462013-04-24 14:57:17 +02007434 case 18:
7435 pipeconf |= PIPECONF_6BPC;
7436 break;
7437 case 24:
7438 pipeconf |= PIPECONF_8BPC;
7439 break;
7440 case 30:
7441 pipeconf |= PIPECONF_10BPC;
7442 break;
7443 default:
7444 /* Case prevented by intel_choose_pipe_bpp_dither. */
7445 BUG();
Daniel Vetter84b046f2013-02-19 18:48:54 +01007446 }
7447 }
7448
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007449 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007450 if (INTEL_GEN(dev_priv) < 4 ||
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007451 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
Ville Syrjäläefc2cff2014-03-28 23:29:31 +02007452 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7453 else
7454 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7455 } else
Daniel Vetter84b046f2013-02-19 18:48:54 +01007456 pipeconf |= PIPECONF_PROGRESSIVE;
7457
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007458 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007459 crtc_state->limited_color_range)
Daniel Vetter9f11a9e2013-06-13 00:54:58 +02007460 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
Ville Syrjälä9c8e09b2013-04-02 16:10:09 +03007461
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02007462 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
7463 POSTING_READ(PIPECONF(crtc->pipe));
Daniel Vetter84b046f2013-02-19 18:48:54 +01007464}
7465
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007466static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7467 struct intel_crtc_state *crtc_state)
7468{
7469 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007470 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007471 const struct intel_limit *limit;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007472 int refclk = 48000;
7473
7474 memset(&crtc_state->dpll_hw_state, 0,
7475 sizeof(crtc_state->dpll_hw_state));
7476
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007477 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007478 if (intel_panel_use_ssc(dev_priv)) {
7479 refclk = dev_priv->vbt.lvds_ssc_freq;
7480 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7481 }
7482
7483 limit = &intel_limits_i8xx_lvds;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007484 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007485 limit = &intel_limits_i8xx_dvo;
7486 } else {
7487 limit = &intel_limits_i8xx_dac;
7488 }
7489
7490 if (!crtc_state->clock_set &&
7491 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7492 refclk, NULL, &crtc_state->dpll)) {
7493 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7494 return -EINVAL;
7495 }
7496
7497 i8xx_compute_dpll(crtc, crtc_state, NULL);
7498
7499 return 0;
7500}
7501
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007502static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7503 struct intel_crtc_state *crtc_state)
7504{
7505 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007506 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007507 const struct intel_limit *limit;
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007508 int refclk = 96000;
7509
7510 memset(&crtc_state->dpll_hw_state, 0,
7511 sizeof(crtc_state->dpll_hw_state));
7512
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007513 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007514 if (intel_panel_use_ssc(dev_priv)) {
7515 refclk = dev_priv->vbt.lvds_ssc_freq;
7516 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7517 }
7518
7519 if (intel_is_dual_link_lvds(dev))
7520 limit = &intel_limits_g4x_dual_channel_lvds;
7521 else
7522 limit = &intel_limits_g4x_single_channel_lvds;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007523 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7524 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007525 limit = &intel_limits_g4x_hdmi;
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007526 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +02007527 limit = &intel_limits_g4x_sdvo;
7528 } else {
7529 /* The option is for other outputs */
7530 limit = &intel_limits_i9xx_sdvo;
7531 }
7532
7533 if (!crtc_state->clock_set &&
7534 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7535 refclk, NULL, &crtc_state->dpll)) {
7536 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7537 return -EINVAL;
7538 }
7539
7540 i9xx_compute_dpll(crtc, crtc_state, NULL);
7541
7542 return 0;
7543}
7544
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007545static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7546 struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08007547{
Ander Conselvan de Oliveirac7653192014-10-20 13:46:44 +03007548 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007549 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007550 const struct intel_limit *limit;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007551 int refclk = 96000;
Jesse Barnes79e53942008-11-07 14:24:08 -08007552
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03007553 memset(&crtc_state->dpll_hw_state, 0,
7554 sizeof(crtc_state->dpll_hw_state));
7555
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007556 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007557 if (intel_panel_use_ssc(dev_priv)) {
7558 refclk = dev_priv->vbt.lvds_ssc_freq;
7559 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7560 }
Jesse Barnes79e53942008-11-07 14:24:08 -08007561
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007562 limit = &intel_limits_pineview_lvds;
7563 } else {
7564 limit = &intel_limits_pineview_sdvo;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007565 }
Jani Nikulaf2335332013-09-13 11:03:09 +03007566
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007567 if (!crtc_state->clock_set &&
7568 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7569 refclk, NULL, &crtc_state->dpll)) {
7570 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7571 return -EINVAL;
7572 }
7573
7574 i9xx_compute_dpll(crtc, crtc_state, NULL);
7575
7576 return 0;
7577}
7578
7579static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7580 struct intel_crtc_state *crtc_state)
7581{
7582 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007583 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007584 const struct intel_limit *limit;
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007585 int refclk = 96000;
7586
7587 memset(&crtc_state->dpll_hw_state, 0,
7588 sizeof(crtc_state->dpll_hw_state));
7589
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03007590 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007591 if (intel_panel_use_ssc(dev_priv)) {
7592 refclk = dev_priv->vbt.lvds_ssc_freq;
7593 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
Jani Nikulae9fd1c02013-08-27 15:12:23 +03007594 }
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +02007595
7596 limit = &intel_limits_i9xx_lvds;
7597 } else {
7598 limit = &intel_limits_i9xx_sdvo;
7599 }
7600
7601 if (!crtc_state->clock_set &&
7602 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7603 refclk, NULL, &crtc_state->dpll)) {
7604 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7605 return -EINVAL;
Daniel Vetterf47709a2013-03-28 10:42:02 +01007606 }
Eric Anholtf564048e2011-03-30 13:01:02 -07007607
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +02007608 i9xx_compute_dpll(crtc, crtc_state, NULL);
Eric Anholtf564048e2011-03-30 13:01:02 -07007609
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02007610 return 0;
Eric Anholtf564048e2011-03-30 13:01:02 -07007611}
7612
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007613static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7614 struct intel_crtc_state *crtc_state)
7615{
7616 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007617 const struct intel_limit *limit = &intel_limits_chv;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007618
7619 memset(&crtc_state->dpll_hw_state, 0,
7620 sizeof(crtc_state->dpll_hw_state));
7621
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007622 if (!crtc_state->clock_set &&
7623 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7624 refclk, NULL, &crtc_state->dpll)) {
7625 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7626 return -EINVAL;
7627 }
7628
7629 chv_compute_dpll(crtc, crtc_state);
7630
7631 return 0;
7632}
7633
7634static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7635 struct intel_crtc_state *crtc_state)
7636{
7637 int refclk = 100000;
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03007638 const struct intel_limit *limit = &intel_limits_vlv;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007639
7640 memset(&crtc_state->dpll_hw_state, 0,
7641 sizeof(crtc_state->dpll_hw_state));
7642
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +02007643 if (!crtc_state->clock_set &&
7644 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7645 refclk, NULL, &crtc_state->dpll)) {
7646 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7647 return -EINVAL;
7648 }
7649
7650 vlv_compute_dpll(crtc, crtc_state);
7651
7652 return 0;
7653}
7654
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007655static void i9xx_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007656 struct intel_crtc_state *pipe_config)
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007657{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007658 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007659 uint32_t tmp;
7660
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007661 if (INTEL_GEN(dev_priv) <= 3 &&
7662 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
Ville Syrjälädc9e7dec2014-01-10 14:06:45 +02007663 return;
7664
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007665 tmp = I915_READ(PFIT_CONTROL);
Daniel Vetter06922822013-07-11 13:35:40 +02007666 if (!(tmp & PFIT_ENABLE))
7667 return;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007668
Daniel Vetter06922822013-07-11 13:35:40 +02007669 /* Check whether the pfit is attached to our pipe. */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007670 if (INTEL_GEN(dev_priv) < 4) {
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007671 if (crtc->pipe != PIPE_B)
7672 return;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007673 } else {
7674 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7675 return;
7676 }
7677
Daniel Vetter06922822013-07-11 13:35:40 +02007678 pipe_config->gmch_pfit.control = tmp;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007679 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007680}
7681
Jesse Barnesacbec812013-09-20 11:29:32 -07007682static void vlv_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007683 struct intel_crtc_state *pipe_config)
Jesse Barnesacbec812013-09-20 11:29:32 -07007684{
7685 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007686 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesacbec812013-09-20 11:29:32 -07007687 int pipe = pipe_config->cpu_transcoder;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007688 struct dpll clock;
Jesse Barnesacbec812013-09-20 11:29:32 -07007689 u32 mdiv;
Chris Wilson662c6ec2013-09-25 14:24:01 -07007690 int refclk = 100000;
Jesse Barnesacbec812013-09-20 11:29:32 -07007691
Ville Syrjäläb5219732016-03-15 16:40:01 +02007692 /* In case of DSI, DPLL will not be used */
7693 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
Shobhit Kumarf573de52014-07-30 20:32:37 +05307694 return;
7695
Ville Syrjäläa5805162015-05-26 20:42:30 +03007696 mutex_lock(&dev_priv->sb_lock);
Chon Ming Leeab3c7592013-11-07 10:43:30 +08007697 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
Ville Syrjäläa5805162015-05-26 20:42:30 +03007698 mutex_unlock(&dev_priv->sb_lock);
Jesse Barnesacbec812013-09-20 11:29:32 -07007699
7700 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7701 clock.m2 = mdiv & DPIO_M2DIV_MASK;
7702 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7703 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7704 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7705
Imre Deakdccbea32015-06-22 23:35:51 +03007706 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
Jesse Barnesacbec812013-09-20 11:29:32 -07007707}
7708
Damien Lespiau5724dbd2015-01-20 12:51:52 +00007709static void
7710i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7711 struct intel_initial_plane_config *plane_config)
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007712{
7713 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007714 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007715 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7716 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007717 enum pipe pipe;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007718 u32 val, base, offset;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007719 int fourcc, pixel_format;
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00007720 unsigned int aligned_height;
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007721 struct drm_framebuffer *fb;
Damien Lespiau1b842c82015-01-21 13:50:54 +00007722 struct intel_framebuffer *intel_fb;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007723
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007724 if (!plane->get_hw_state(plane, &pipe))
Damien Lespiau42a7b082015-02-05 19:35:13 +00007725 return;
7726
Ville Syrjäläeade6c82018-01-30 22:38:03 +02007727 WARN_ON(pipe != crtc->pipe);
7728
Damien Lespiaud9806c92015-01-21 14:07:19 +00007729 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Damien Lespiau1b842c82015-01-21 13:50:54 +00007730 if (!intel_fb) {
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007731 DRM_DEBUG_KMS("failed to alloc fb\n");
7732 return;
7733 }
7734
Damien Lespiau1b842c82015-01-21 13:50:54 +00007735 fb = &intel_fb->base;
7736
Ville Syrjäläd2e9f5f2016-11-18 21:52:53 +02007737 fb->dev = dev;
7738
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02007739 val = I915_READ(DSPCNTR(i9xx_plane));
7740
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007741 if (INTEL_GEN(dev_priv) >= 4) {
Daniel Vetter18c52472015-02-10 17:16:09 +00007742 if (val & DISPPLANE_TILED) {
Damien Lespiau49af4492015-01-20 12:51:44 +00007743 plane_config->tiling = I915_TILING_X;
Ville Syrjäläbae781b2016-11-16 13:33:16 +02007744 fb->modifier = I915_FORMAT_MOD_X_TILED;
Daniel Vetter18c52472015-02-10 17:16:09 +00007745 }
7746 }
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007747
7748 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
Damien Lespiaub35d63f2015-01-20 12:51:50 +00007749 fourcc = i9xx_format_to_fourcc(pixel_format);
Ville Syrjälä2f3f4762016-11-18 21:52:57 +02007750 fb->format = drm_format_info(fourcc);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007751
Ville Syrjälä81894b22017-11-17 21:19:13 +02007752 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7753 offset = I915_READ(DSPOFFSET(i9xx_plane));
7754 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7755 } else if (INTEL_GEN(dev_priv) >= 4) {
Damien Lespiau49af4492015-01-20 12:51:44 +00007756 if (plane_config->tiling)
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007757 offset = I915_READ(DSPTILEOFF(i9xx_plane));
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007758 else
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007759 offset = I915_READ(DSPLINOFF(i9xx_plane));
7760 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007761 } else {
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007762 base = I915_READ(DSPADDR(i9xx_plane));
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007763 }
7764 plane_config->base = base;
7765
7766 val = I915_READ(PIPESRC(pipe));
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007767 fb->width = ((val >> 16) & 0xfff) + 1;
7768 fb->height = ((val >> 0) & 0xfff) + 1;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007769
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007770 val = I915_READ(DSPSTRIDE(i9xx_plane));
Damien Lespiaub113d5e2015-01-20 12:51:46 +00007771 fb->pitches[0] = val & 0xffffffc0;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007772
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02007773 aligned_height = intel_fb_align_height(fb, 0, fb->height);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007774
Daniel Vetterf37b5c22015-02-10 23:12:27 +01007775 plane_config->size = fb->pitches[0] * aligned_height;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007776
Ville Syrjälä282e83e2017-11-17 21:19:12 +02007777 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7778 crtc->base.name, plane->base.name, fb->width, fb->height,
Ville Syrjälä272725c2016-12-14 23:32:20 +02007779 fb->format->cpp[0] * 8, base, fb->pitches[0],
Damien Lespiau2844a922015-01-20 12:51:48 +00007780 plane_config->size);
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007781
Damien Lespiau2d140302015-02-05 17:22:18 +00007782 plane_config->fb = intel_fb;
Jesse Barnes1ad292b2014-03-07 08:57:49 -08007783}
7784
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007785static void chv_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007786 struct intel_crtc_state *pipe_config)
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007787{
7788 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01007789 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007790 int pipe = pipe_config->cpu_transcoder;
7791 enum dpio_channel port = vlv_pipe_to_channel(pipe);
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03007792 struct dpll clock;
Imre Deak0d7b6b12015-07-02 14:29:58 +03007793 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007794 int refclk = 100000;
7795
Ville Syrjäläb5219732016-03-15 16:40:01 +02007796 /* In case of DSI, DPLL will not be used */
7797 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7798 return;
7799
Ville Syrjäläa5805162015-05-26 20:42:30 +03007800 mutex_lock(&dev_priv->sb_lock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007801 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7802 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7803 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7804 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
Imre Deak0d7b6b12015-07-02 14:29:58 +03007805 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
Ville Syrjäläa5805162015-05-26 20:42:30 +03007806 mutex_unlock(&dev_priv->sb_lock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007807
7808 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
Imre Deak0d7b6b12015-07-02 14:29:58 +03007809 clock.m2 = (pll_dw0 & 0xff) << 22;
7810 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7811 clock.m2 |= pll_dw2 & 0x3fffff;
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007812 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7813 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7814 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7815
Imre Deakdccbea32015-06-22 23:35:51 +03007816 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007817}
7818
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307819static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
7820 struct intel_crtc_state *pipe_config)
7821{
7822 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7823 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
7824
Shashank Sharma668b6c12018-10-12 11:53:14 +05307825 pipe_config->lspcon_downsampling = false;
7826
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307827 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
7828 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
7829
7830 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
7831 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
7832 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
7833
7834 if (ycbcr420_enabled) {
7835 /* We support 4:2:0 in full blend mode only */
7836 if (!blend)
7837 output = INTEL_OUTPUT_FORMAT_INVALID;
7838 else if (!(IS_GEMINILAKE(dev_priv) ||
7839 INTEL_GEN(dev_priv) >= 10))
7840 output = INTEL_OUTPUT_FORMAT_INVALID;
7841 else
7842 output = INTEL_OUTPUT_FORMAT_YCBCR420;
Shashank Sharma8c79f842018-10-12 11:53:09 +05307843 } else {
Shashank Sharma668b6c12018-10-12 11:53:14 +05307844 /*
7845 * Currently there is no interface defined to
7846 * check user preference between RGB/YCBCR444
7847 * or YCBCR420. So the only possible case for
7848 * YCBCR444 usage is driving YCBCR420 output
7849 * with LSPCON, when pipe is configured for
7850 * YCBCR444 output and LSPCON takes care of
7851 * downsampling it.
7852 */
7853 pipe_config->lspcon_downsampling = true;
Shashank Sharma8c79f842018-10-12 11:53:09 +05307854 output = INTEL_OUTPUT_FORMAT_YCBCR444;
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05307855 }
7856 }
7857 }
7858
7859 pipe_config->output_format = output;
7860}
7861
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007862static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02007863 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007864{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007865 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Imre Deak17290502016-02-12 18:55:11 +02007866 enum intel_display_power_domain power_domain;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007867 uint32_t tmp;
Imre Deak17290502016-02-12 18:55:11 +02007868 bool ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007869
Imre Deak17290502016-02-12 18:55:11 +02007870 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7871 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Imre Deakb5482bd2014-03-05 16:20:55 +02007872 return false;
7873
Shashank Sharmad9facae2018-10-12 11:53:07 +05307874 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
Daniel Vettere143a212013-07-04 12:01:15 +02007875 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02007876 pipe_config->shared_dpll = NULL;
Daniel Vettereccb1402013-05-22 00:50:22 +02007877
Imre Deak17290502016-02-12 18:55:11 +02007878 ret = false;
7879
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007880 tmp = I915_READ(PIPECONF(crtc->pipe));
7881 if (!(tmp & PIPECONF_ENABLE))
Imre Deak17290502016-02-12 18:55:11 +02007882 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007883
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +01007884 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7885 IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä42571ae2013-09-06 23:29:00 +03007886 switch (tmp & PIPECONF_BPC_MASK) {
7887 case PIPECONF_6BPC:
7888 pipe_config->pipe_bpp = 18;
7889 break;
7890 case PIPECONF_8BPC:
7891 pipe_config->pipe_bpp = 24;
7892 break;
7893 case PIPECONF_10BPC:
7894 pipe_config->pipe_bpp = 30;
7895 break;
7896 default:
7897 break;
7898 }
7899 }
7900
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007901 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
Wayne Boyer666a4532015-12-09 12:29:35 -08007902 (tmp & PIPECONF_COLOR_RANGE_SELECT))
Daniel Vetterb5a9fa02014-04-24 23:54:49 +02007903 pipe_config->limited_color_range = true;
7904
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007905 if (INTEL_GEN(dev_priv) < 4)
Ville Syrjälä282740f2013-09-04 18:30:03 +03007906 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7907
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007908 intel_get_pipe_timings(crtc, pipe_config);
Jani Nikulabc58be62016-03-18 17:05:39 +02007909 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02007910
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02007911 i9xx_get_pfit_config(crtc, pipe_config);
7912
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00007913 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjäläc2317752016-03-15 16:39:56 +02007914 /* No way to read it out on pipes B and C */
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007915 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
Ville Syrjäläc2317752016-03-15 16:39:56 +02007916 tmp = dev_priv->chv_dpll_md[crtc->pipe];
7917 else
7918 tmp = I915_READ(DPLL_MD(crtc->pipe));
Daniel Vetter6c49f242013-06-06 12:45:25 +02007919 pipe_config->pixel_multiplier =
7920 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7921 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007922 pipe_config->dpll_hw_state.dpll_md = tmp;
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007923 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
Jani Nikula73f67aa2016-12-07 22:48:09 +02007924 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
Daniel Vetter6c49f242013-06-06 12:45:25 +02007925 tmp = I915_READ(DPLL(crtc->pipe));
7926 pipe_config->pixel_multiplier =
7927 ((tmp & SDVO_MULTIPLIER_MASK)
7928 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7929 } else {
7930 /* Note that on i915G/GM the pixel multiplier is in the sdvo
7931 * port and will be fixed up in the encoder->get_config
7932 * function. */
7933 pipe_config->pixel_multiplier = 1;
7934 }
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007935 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007936 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03007937 /*
7938 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7939 * on 830. Filter it out here so that we don't
7940 * report errors due to that.
7941 */
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01007942 if (IS_I830(dev_priv))
Ville Syrjälä1c4e0272014-09-05 21:52:42 +03007943 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7944
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007945 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7946 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
Ville Syrjälä165e9012013-06-26 17:44:15 +03007947 } else {
7948 /* Mask out read-only status bits. */
7949 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7950 DPLL_PORTC_READY_MASK |
7951 DPLL_PORTB_READY_MASK);
Daniel Vetter8bcc2792013-06-05 13:34:28 +02007952 }
Daniel Vetter6c49f242013-06-06 12:45:25 +02007953
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +01007954 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjälä70b23a92014-04-09 13:28:22 +03007955 chv_crtc_clock_get(crtc, pipe_config);
Tvrtko Ursulin11a914c2016-10-13 11:03:08 +01007956 else if (IS_VALLEYVIEW(dev_priv))
Jesse Barnesacbec812013-09-20 11:29:32 -07007957 vlv_crtc_clock_get(crtc, pipe_config);
7958 else
7959 i9xx_crtc_clock_get(crtc, pipe_config);
Ville Syrjälä18442d02013-09-13 16:00:08 +03007960
Ville Syrjälä0f646142015-08-26 19:39:18 +03007961 /*
7962 * Normally the dotclock is filled in by the encoder .get_config()
7963 * but in case the pipe is enabled w/o any ports we need a sane
7964 * default.
7965 */
7966 pipe_config->base.adjusted_mode.crtc_clock =
7967 pipe_config->port_clock / pipe_config->pixel_multiplier;
7968
Imre Deak17290502016-02-12 18:55:11 +02007969 ret = true;
7970
7971out:
7972 intel_display_power_put(dev_priv, power_domain);
7973
7974 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01007975}
7976
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02007977static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
Jesse Barnes13d83a62011-08-03 12:59:20 -07007978{
Jesse Barnes13d83a62011-08-03 12:59:20 -07007979 struct intel_encoder *encoder;
Lyude1c1a24d2016-06-14 11:04:09 -04007980 int i;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07007981 u32 val, final;
Jesse Barnes13d83a62011-08-03 12:59:20 -07007982 bool has_lvds = false;
Keith Packard199e5d72011-09-22 12:01:57 -07007983 bool has_cpu_edp = false;
Keith Packard199e5d72011-09-22 12:01:57 -07007984 bool has_panel = false;
Keith Packard99eb6a02011-09-26 14:29:12 -07007985 bool has_ck505 = false;
7986 bool can_ssc = false;
Lyude1c1a24d2016-06-14 11:04:09 -04007987 bool using_ssc_source = false;
Jesse Barnes13d83a62011-08-03 12:59:20 -07007988
7989 /* We need to take the global config into account */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02007990 for_each_intel_encoder(&dev_priv->drm, encoder) {
Keith Packard199e5d72011-09-22 12:01:57 -07007991 switch (encoder->type) {
7992 case INTEL_OUTPUT_LVDS:
7993 has_panel = true;
7994 has_lvds = true;
7995 break;
7996 case INTEL_OUTPUT_EDP:
7997 has_panel = true;
Ville Syrjälä8f4f2792017-11-09 17:24:34 +02007998 if (encoder->port == PORT_A)
Keith Packard199e5d72011-09-22 12:01:57 -07007999 has_cpu_edp = true;
8000 break;
Paulo Zanoni6847d71b2014-10-27 17:47:52 -02008001 default:
8002 break;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008003 }
8004 }
8005
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008006 if (HAS_PCH_IBX(dev_priv)) {
Rodrigo Vivi41aa3442013-05-09 20:03:18 -03008007 has_ck505 = dev_priv->vbt.display_clock_mode;
Keith Packard99eb6a02011-09-26 14:29:12 -07008008 can_ssc = has_ck505;
8009 } else {
8010 has_ck505 = false;
8011 can_ssc = true;
8012 }
8013
Lyude1c1a24d2016-06-14 11:04:09 -04008014 /* Check if any DPLLs are using the SSC source */
8015 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8016 u32 temp = I915_READ(PCH_DPLL(i));
8017
8018 if (!(temp & DPLL_VCO_ENABLE))
8019 continue;
8020
8021 if ((temp & PLL_REF_INPUT_MASK) ==
8022 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8023 using_ssc_source = true;
8024 break;
8025 }
8026 }
8027
8028 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8029 has_panel, has_lvds, has_ck505, using_ssc_source);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008030
8031 /* Ironlake: try to setup display ref clock before DPLL
8032 * enabling. This is only under driver's control after
8033 * PCH B stepping, previous chipset stepping should be
8034 * ignoring this setting.
8035 */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008036 val = I915_READ(PCH_DREF_CONTROL);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008037
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008038 /* As we must carefully and slowly disable/enable each source in turn,
8039 * compute the final state we want first and check if we need to
8040 * make any changes at all.
8041 */
8042 final = val;
8043 final &= ~DREF_NONSPREAD_SOURCE_MASK;
Keith Packard99eb6a02011-09-26 14:29:12 -07008044 if (has_ck505)
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008045 final |= DREF_NONSPREAD_CK505_ENABLE;
Keith Packard99eb6a02011-09-26 14:29:12 -07008046 else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008047 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8048
Daniel Vetter8c07eb62016-06-09 18:39:07 +02008049 final &= ~DREF_SSC_SOURCE_MASK;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008050 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Daniel Vetter8c07eb62016-06-09 18:39:07 +02008051 final &= ~DREF_SSC1_ENABLE;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008052
Keith Packard199e5d72011-09-22 12:01:57 -07008053 if (has_panel) {
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008054 final |= DREF_SSC_SOURCE_ENABLE;
8055
8056 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8057 final |= DREF_SSC1_ENABLE;
8058
8059 if (has_cpu_edp) {
8060 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8061 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8062 else
8063 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8064 } else
8065 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Lyude1c1a24d2016-06-14 11:04:09 -04008066 } else if (using_ssc_source) {
8067 final |= DREF_SSC_SOURCE_ENABLE;
8068 final |= DREF_SSC1_ENABLE;
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008069 }
8070
8071 if (final == val)
8072 return;
8073
8074 /* Always enable nonspread source */
8075 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8076
8077 if (has_ck505)
8078 val |= DREF_NONSPREAD_CK505_ENABLE;
8079 else
8080 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8081
8082 if (has_panel) {
8083 val &= ~DREF_SSC_SOURCE_MASK;
8084 val |= DREF_SSC_SOURCE_ENABLE;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008085
Keith Packard199e5d72011-09-22 12:01:57 -07008086 /* SSC must be turned on before enabling the CPU output */
Keith Packard99eb6a02011-09-26 14:29:12 -07008087 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
Keith Packard199e5d72011-09-22 12:01:57 -07008088 DRM_DEBUG_KMS("Using SSC on panel\n");
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008089 val |= DREF_SSC1_ENABLE;
Daniel Vettere77166b2012-03-30 22:14:05 +02008090 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008091 val &= ~DREF_SSC1_ENABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008092
8093 /* Get SSC going before enabling the outputs */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008094 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008095 POSTING_READ(PCH_DREF_CONTROL);
8096 udelay(200);
8097
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008098 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Jesse Barnes13d83a62011-08-03 12:59:20 -07008099
8100 /* Enable CPU source on CPU attached eDP */
Keith Packard199e5d72011-09-22 12:01:57 -07008101 if (has_cpu_edp) {
Keith Packard99eb6a02011-09-26 14:29:12 -07008102 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
Keith Packard199e5d72011-09-22 12:01:57 -07008103 DRM_DEBUG_KMS("Using SSC on eDP\n");
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008104 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
Robin Schroereba905b2014-05-18 02:24:50 +02008105 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008106 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
Keith Packard199e5d72011-09-22 12:01:57 -07008107 } else
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008108 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008109
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008110 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008111 POSTING_READ(PCH_DREF_CONTROL);
8112 udelay(200);
8113 } else {
Lyude1c1a24d2016-06-14 11:04:09 -04008114 DRM_DEBUG_KMS("Disabling CPU source output\n");
Keith Packard199e5d72011-09-22 12:01:57 -07008115
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008116 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
Keith Packard199e5d72011-09-22 12:01:57 -07008117
8118 /* Turn off CPU output */
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008119 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008120
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008121 I915_WRITE(PCH_DREF_CONTROL, val);
Keith Packard199e5d72011-09-22 12:01:57 -07008122 POSTING_READ(PCH_DREF_CONTROL);
8123 udelay(200);
8124
Lyude1c1a24d2016-06-14 11:04:09 -04008125 if (!using_ssc_source) {
8126 DRM_DEBUG_KMS("Disabling SSC source\n");
Keith Packard199e5d72011-09-22 12:01:57 -07008127
Lyude1c1a24d2016-06-14 11:04:09 -04008128 /* Turn off the SSC source */
8129 val &= ~DREF_SSC_SOURCE_MASK;
8130 val |= DREF_SSC_SOURCE_DISABLE;
Keith Packard199e5d72011-09-22 12:01:57 -07008131
Lyude1c1a24d2016-06-14 11:04:09 -04008132 /* Turn off SSC1 */
8133 val &= ~DREF_SSC1_ENABLE;
8134
8135 I915_WRITE(PCH_DREF_CONTROL, val);
8136 POSTING_READ(PCH_DREF_CONTROL);
8137 udelay(200);
8138 }
Jesse Barnes13d83a62011-08-03 12:59:20 -07008139 }
Chris Wilson74cfd7a2013-03-26 16:33:04 -07008140
8141 BUG_ON(val != final);
Jesse Barnes13d83a62011-08-03 12:59:20 -07008142}
8143
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008144static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
Paulo Zanonidde86e22012-12-01 12:04:25 -02008145{
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008146 uint32_t tmp;
Paulo Zanonidde86e22012-12-01 12:04:25 -02008147
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008148 tmp = I915_READ(SOUTH_CHICKEN2);
8149 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8150 I915_WRITE(SOUTH_CHICKEN2, tmp);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008151
Imre Deakcf3598c2016-06-28 13:37:31 +03008152 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8153 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008154 DRM_ERROR("FDI mPHY reset assert timeout\n");
Paulo Zanonidde86e22012-12-01 12:04:25 -02008155
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008156 tmp = I915_READ(SOUTH_CHICKEN2);
8157 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8158 I915_WRITE(SOUTH_CHICKEN2, tmp);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008159
Imre Deakcf3598c2016-06-28 13:37:31 +03008160 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8161 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008162 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008163}
8164
8165/* WaMPhyProgramming:hsw */
8166static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8167{
8168 uint32_t tmp;
Paulo Zanonidde86e22012-12-01 12:04:25 -02008169
8170 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8171 tmp &= ~(0xFF << 24);
8172 tmp |= (0x12 << 24);
8173 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8174
Paulo Zanonidde86e22012-12-01 12:04:25 -02008175 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8176 tmp |= (1 << 11);
8177 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8178
8179 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8180 tmp |= (1 << 11);
8181 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8182
Paulo Zanonidde86e22012-12-01 12:04:25 -02008183 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8184 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8185 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8186
8187 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8188 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8189 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8190
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008191 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8192 tmp &= ~(7 << 13);
8193 tmp |= (5 << 13);
8194 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008195
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008196 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8197 tmp &= ~(7 << 13);
8198 tmp |= (5 << 13);
8199 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008200
8201 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8202 tmp &= ~0xFF;
8203 tmp |= 0x1C;
8204 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8205
8206 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8207 tmp &= ~0xFF;
8208 tmp |= 0x1C;
8209 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8210
8211 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8212 tmp &= ~(0xFF << 16);
8213 tmp |= (0x1C << 16);
8214 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8215
8216 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8217 tmp &= ~(0xFF << 16);
8218 tmp |= (0x1C << 16);
8219 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8220
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008221 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8222 tmp |= (1 << 27);
8223 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008224
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008225 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8226 tmp |= (1 << 27);
8227 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008228
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008229 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8230 tmp &= ~(0xF << 28);
8231 tmp |= (4 << 28);
8232 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008233
Paulo Zanoni0ff066a2013-07-12 14:19:36 -03008234 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8235 tmp &= ~(0xF << 28);
8236 tmp |= (4 << 28);
8237 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008238}
8239
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008240/* Implements 3 different sequences from BSpec chapter "Display iCLK
8241 * Programming" based on the parameters passed:
8242 * - Sequence to enable CLKOUT_DP
8243 * - Sequence to enable CLKOUT_DP without spread
8244 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8245 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008246static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8247 bool with_spread, bool with_fdi)
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008248{
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008249 uint32_t reg, tmp;
8250
8251 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8252 with_spread = true;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008253 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8254 with_fdi, "LP PCH doesn't have FDI\n"))
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008255 with_fdi = false;
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008256
Ville Syrjäläa5805162015-05-26 20:42:30 +03008257 mutex_lock(&dev_priv->sb_lock);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008258
8259 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8260 tmp &= ~SBI_SSCCTL_DISABLE;
8261 tmp |= SBI_SSCCTL_PATHALT;
8262 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8263
8264 udelay(24);
8265
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008266 if (with_spread) {
8267 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8268 tmp &= ~SBI_SSCCTL_PATHALT;
8269 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
Paulo Zanonif31f2d52013-07-18 18:51:11 -03008270
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008271 if (with_fdi) {
8272 lpt_reset_fdi_mphy(dev_priv);
8273 lpt_program_fdi_mphy(dev_priv);
8274 }
8275 }
Paulo Zanonidde86e22012-12-01 12:04:25 -02008276
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008277 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
Paulo Zanoni2fa86a12013-07-23 11:19:24 -03008278 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8279 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8280 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
Daniel Vetterc00db242013-01-22 15:33:27 +01008281
Ville Syrjäläa5805162015-05-26 20:42:30 +03008282 mutex_unlock(&dev_priv->sb_lock);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008283}
8284
Paulo Zanoni47701c32013-07-23 11:19:25 -03008285/* Sequence to disable CLKOUT_DP */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008286static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
Paulo Zanoni47701c32013-07-23 11:19:25 -03008287{
Paulo Zanoni47701c32013-07-23 11:19:25 -03008288 uint32_t reg, tmp;
8289
Ville Syrjäläa5805162015-05-26 20:42:30 +03008290 mutex_lock(&dev_priv->sb_lock);
Paulo Zanoni47701c32013-07-23 11:19:25 -03008291
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01008292 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
Paulo Zanoni47701c32013-07-23 11:19:25 -03008293 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8294 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8295 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8296
8297 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8298 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8299 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8300 tmp |= SBI_SSCCTL_PATHALT;
8301 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8302 udelay(32);
8303 }
8304 tmp |= SBI_SSCCTL_DISABLE;
8305 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8306 }
8307
Ville Syrjäläa5805162015-05-26 20:42:30 +03008308 mutex_unlock(&dev_priv->sb_lock);
Paulo Zanoni47701c32013-07-23 11:19:25 -03008309}
8310
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008311#define BEND_IDX(steps) ((50 + (steps)) / 5)
8312
8313static const uint16_t sscdivintphase[] = {
8314 [BEND_IDX( 50)] = 0x3B23,
8315 [BEND_IDX( 45)] = 0x3B23,
8316 [BEND_IDX( 40)] = 0x3C23,
8317 [BEND_IDX( 35)] = 0x3C23,
8318 [BEND_IDX( 30)] = 0x3D23,
8319 [BEND_IDX( 25)] = 0x3D23,
8320 [BEND_IDX( 20)] = 0x3E23,
8321 [BEND_IDX( 15)] = 0x3E23,
8322 [BEND_IDX( 10)] = 0x3F23,
8323 [BEND_IDX( 5)] = 0x3F23,
8324 [BEND_IDX( 0)] = 0x0025,
8325 [BEND_IDX( -5)] = 0x0025,
8326 [BEND_IDX(-10)] = 0x0125,
8327 [BEND_IDX(-15)] = 0x0125,
8328 [BEND_IDX(-20)] = 0x0225,
8329 [BEND_IDX(-25)] = 0x0225,
8330 [BEND_IDX(-30)] = 0x0325,
8331 [BEND_IDX(-35)] = 0x0325,
8332 [BEND_IDX(-40)] = 0x0425,
8333 [BEND_IDX(-45)] = 0x0425,
8334 [BEND_IDX(-50)] = 0x0525,
8335};
8336
8337/*
8338 * Bend CLKOUT_DP
8339 * steps -50 to 50 inclusive, in steps of 5
8340 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8341 * change in clock period = -(steps / 10) * 5.787 ps
8342 */
8343static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8344{
8345 uint32_t tmp;
8346 int idx = BEND_IDX(steps);
8347
8348 if (WARN_ON(steps % 5 != 0))
8349 return;
8350
8351 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8352 return;
8353
8354 mutex_lock(&dev_priv->sb_lock);
8355
8356 if (steps % 10 != 0)
8357 tmp = 0xAAAAAAAB;
8358 else
8359 tmp = 0x00000000;
8360 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8361
8362 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8363 tmp &= 0xffff0000;
8364 tmp |= sscdivintphase[idx];
8365 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8366
8367 mutex_unlock(&dev_priv->sb_lock);
8368}
8369
8370#undef BEND_IDX
8371
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008372static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008373{
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008374 struct intel_encoder *encoder;
8375 bool has_vga = false;
8376
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008377 for_each_intel_encoder(&dev_priv->drm, encoder) {
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008378 switch (encoder->type) {
8379 case INTEL_OUTPUT_ANALOG:
8380 has_vga = true;
8381 break;
Paulo Zanoni6847d71b2014-10-27 17:47:52 -02008382 default:
8383 break;
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008384 }
8385 }
8386
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008387 if (has_vga) {
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008388 lpt_bend_clkout_dp(dev_priv, 0);
8389 lpt_enable_clkout_dp(dev_priv, true, true);
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008390 } else {
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008391 lpt_disable_clkout_dp(dev_priv);
Ville Syrjäläf7be2c22015-12-04 22:19:39 +02008392 }
Paulo Zanonibf8fa3d2013-07-12 14:19:38 -03008393}
8394
Paulo Zanonidde86e22012-12-01 12:04:25 -02008395/*
8396 * Initialize reference clocks when the driver loads
8397 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008398void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
Paulo Zanonidde86e22012-12-01 12:04:25 -02008399{
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008400 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008401 ironlake_init_pch_refclk(dev_priv);
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008402 else if (HAS_PCH_LPT(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02008403 lpt_init_pch_refclk(dev_priv);
Paulo Zanonidde86e22012-12-01 12:04:25 -02008404}
8405
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008406static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
Paulo Zanonic8203562012-09-12 10:06:29 -03008407{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008408 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8409 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8410 enum pipe pipe = crtc->pipe;
Paulo Zanonic8203562012-09-12 10:06:29 -03008411 uint32_t val;
8412
Daniel Vetter78114072013-06-13 00:54:57 +02008413 val = 0;
Paulo Zanonic8203562012-09-12 10:06:29 -03008414
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008415 switch (crtc_state->pipe_bpp) {
Paulo Zanonic8203562012-09-12 10:06:29 -03008416 case 18:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008417 val |= PIPECONF_6BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008418 break;
8419 case 24:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008420 val |= PIPECONF_8BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008421 break;
8422 case 30:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008423 val |= PIPECONF_10BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008424 break;
8425 case 36:
Daniel Vetterdfd07d72012-12-17 11:21:38 +01008426 val |= PIPECONF_12BPC;
Paulo Zanonic8203562012-09-12 10:06:29 -03008427 break;
8428 default:
Paulo Zanonicc769b62012-09-20 18:36:03 -03008429 /* Case prevented by intel_choose_pipe_bpp_dither. */
8430 BUG();
Paulo Zanonic8203562012-09-12 10:06:29 -03008431 }
8432
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008433 if (crtc_state->dither)
Paulo Zanonic8203562012-09-12 10:06:29 -03008434 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8435
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008436 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
Paulo Zanonic8203562012-09-12 10:06:29 -03008437 val |= PIPECONF_INTERLACED_ILK;
8438 else
8439 val |= PIPECONF_PROGRESSIVE;
8440
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008441 if (crtc_state->limited_color_range)
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02008442 val |= PIPECONF_COLOR_RANGE_SELECT;
Ville Syrjälä3685a8f2013-01-17 16:31:28 +02008443
Paulo Zanonic8203562012-09-12 10:06:29 -03008444 I915_WRITE(PIPECONF(pipe), val);
8445 POSTING_READ(PIPECONF(pipe));
8446}
8447
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008448static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008449{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008450 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8451 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8452 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Jani Nikula391bf042016-03-18 17:05:40 +02008453 u32 val = 0;
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008454
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008455 if (IS_HASWELL(dev_priv) && crtc_state->dither)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008456 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8457
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008458 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008459 val |= PIPECONF_INTERLACED_ILK;
8460 else
8461 val |= PIPECONF_PROGRESSIVE;
8462
Paulo Zanoni702e7a52012-10-23 18:29:59 -02008463 I915_WRITE(PIPECONF(cpu_transcoder), val);
8464 POSTING_READ(PIPECONF(cpu_transcoder));
Jani Nikula391bf042016-03-18 17:05:40 +02008465}
8466
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008467static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
Jani Nikula391bf042016-03-18 17:05:40 +02008468{
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008469 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
8470 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
Jani Nikula391bf042016-03-18 17:05:40 +02008471
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +00008472 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
Jani Nikula391bf042016-03-18 17:05:40 +02008473 u32 val = 0;
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008474
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008475 switch (crtc_state->pipe_bpp) {
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008476 case 18:
8477 val |= PIPEMISC_DITHER_6_BPC;
8478 break;
8479 case 24:
8480 val |= PIPEMISC_DITHER_8_BPC;
8481 break;
8482 case 30:
8483 val |= PIPEMISC_DITHER_10_BPC;
8484 break;
8485 case 36:
8486 val |= PIPEMISC_DITHER_12_BPC;
8487 break;
8488 default:
8489 /* Case prevented by pipe_config_set_bpp. */
8490 BUG();
8491 }
8492
Maarten Lankhorstfdf73512018-10-04 11:45:52 +02008493 if (crtc_state->dither)
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008494 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8495
Shashank Sharma8c79f842018-10-12 11:53:09 +05308496 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8497 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05308498 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
Shashank Sharma8c79f842018-10-12 11:53:09 +05308499
8500 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05308501 val |= PIPEMISC_YUV420_ENABLE |
Shashank Sharmab22ca992017-07-24 19:19:32 +05308502 PIPEMISC_YUV420_MODE_FULL_BLEND;
Shashank Sharmab22ca992017-07-24 19:19:32 +05308503
Jani Nikula391bf042016-03-18 17:05:40 +02008504 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
Paulo Zanoni756f85c2013-11-02 21:07:38 -07008505 }
Paulo Zanoniee2b0b32012-10-05 12:05:57 -03008506}
8507
Paulo Zanonid4b19312012-11-29 11:29:32 -02008508int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8509{
8510 /*
8511 * Account for spread spectrum to avoid
8512 * oversubscribing the link. Max center spread
8513 * is 2.5%; use 5% for safety's sake.
8514 */
8515 u32 bps = target_clock * bpp * 21 / 20;
Ville Syrjälä619d4d02014-02-27 14:23:14 +02008516 return DIV_ROUND_UP(bps, link_bw * 8);
Paulo Zanonid4b19312012-11-29 11:29:32 -02008517}
8518
Daniel Vetter7429e9d2013-04-20 17:19:46 +02008519static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
Daniel Vetter6cf86a52013-04-02 23:38:10 +02008520{
Daniel Vetter7429e9d2013-04-20 17:19:46 +02008521 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
Paulo Zanonif48d8f22012-09-20 18:36:04 -03008522}
8523
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008524static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8525 struct intel_crtc_state *crtc_state,
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +03008526 struct dpll *reduced_clock)
Paulo Zanonide13a2e2012-09-20 18:36:05 -03008527{
8528 struct drm_crtc *crtc = &intel_crtc->base;
8529 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008530 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008531 u32 dpll, fp, fp2;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008532 int factor;
Jesse Barnes79e53942008-11-07 14:24:08 -08008533
Chris Wilsonc1858122010-12-03 21:35:48 +00008534 /* Enable autotuning of the PLL clock (if permissible) */
Eric Anholt8febb292011-03-30 13:01:07 -07008535 factor = 21;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008536 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Eric Anholt8febb292011-03-30 13:01:07 -07008537 if ((intel_panel_use_ssc(dev_priv) &&
Ville Syrjäläe91e9412013-12-09 18:54:16 +02008538 dev_priv->vbt.lvds_ssc_freq == 100000) ||
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01008539 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
Eric Anholt8febb292011-03-30 13:01:07 -07008540 factor = 25;
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008541 } else if (crtc_state->sdvo_tv_clock)
Eric Anholt8febb292011-03-30 13:01:07 -07008542 factor = 20;
Chris Wilsonc1858122010-12-03 21:35:48 +00008543
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008544 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
Chris Wilsonc1858122010-12-03 21:35:48 +00008545
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008546 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8547 fp |= FP_CB_TUNE;
8548
8549 if (reduced_clock) {
8550 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8551
8552 if (reduced_clock->m < factor * reduced_clock->n)
8553 fp2 |= FP_CB_TUNE;
8554 } else {
8555 fp2 = fp;
8556 }
Daniel Vetter9a7c7892013-04-04 22:20:34 +02008557
Chris Wilson5eddb702010-09-11 13:48:45 +01008558 dpll = 0;
Zhenyu Wang2c072452009-06-05 15:38:42 +08008559
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008560 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
Eric Anholta07d6782011-03-30 13:01:08 -07008561 dpll |= DPLLB_MODE_LVDS;
8562 else
8563 dpll |= DPLLB_MODE_DAC_SERIAL;
Daniel Vetter198a037f2013-04-19 11:14:37 +02008564
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008565 dpll |= (crtc_state->pixel_multiplier - 1)
Daniel Vetteref1b4602013-06-01 17:17:04 +02008566 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
Daniel Vetter198a037f2013-04-19 11:14:37 +02008567
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008568 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8569 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
Daniel Vetter4a33e482013-07-06 12:52:05 +02008570 dpll |= DPLL_SDVO_HIGH_SPEED;
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008571
Ville Syrjälä37a56502016-06-22 21:57:04 +03008572 if (intel_crtc_has_dp_encoder(crtc_state))
Daniel Vetter4a33e482013-07-06 12:52:05 +02008573 dpll |= DPLL_SDVO_HIGH_SPEED;
Jesse Barnes79e53942008-11-07 14:24:08 -08008574
Ville Syrjälä7d7f8632016-09-26 11:30:46 +03008575 /*
8576 * The high speed IO clock is only really required for
8577 * SDVO/HDMI/DP, but we also enable it for CRT to make it
8578 * possible to share the DPLL between CRT and HDMI. Enabling
8579 * the clock needlessly does no real harm, except use up a
8580 * bit of power potentially.
8581 *
8582 * We'll limit this to IVB with 3 pipes, since it has only two
8583 * DPLLs and so DPLL sharing is the only way to get three pipes
8584 * driving PCH ports at the same time. On SNB we could do this,
8585 * and potentially avoid enabling the second DPLL, but it's not
8586 * clear if it''s a win or loss power wise. No point in doing
8587 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8588 */
8589 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8590 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8591 dpll |= DPLL_SDVO_HIGH_SPEED;
8592
Eric Anholta07d6782011-03-30 13:01:08 -07008593 /* compute bitmask from p1 value */
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008594 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
Eric Anholta07d6782011-03-30 13:01:08 -07008595 /* also FPA1 */
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008596 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
Eric Anholta07d6782011-03-30 13:01:08 -07008597
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008598 switch (crtc_state->dpll.p2) {
Eric Anholta07d6782011-03-30 13:01:08 -07008599 case 5:
8600 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8601 break;
8602 case 7:
8603 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8604 break;
8605 case 10:
8606 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8607 break;
8608 case 14:
8609 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8610 break;
Jesse Barnes79e53942008-11-07 14:24:08 -08008611 }
8612
Ville Syrjälä3d6e9ee2016-06-22 21:57:03 +03008613 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8614 intel_panel_use_ssc(dev_priv))
Kristian Høgsberg43565a02009-02-13 20:56:52 -05008615 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
Jesse Barnes79e53942008-11-07 14:24:08 -08008616 else
8617 dpll |= PLL_REF_INPUT_DREFCLK;
8618
Ander Conselvan de Oliveirab75ca6f2016-03-21 18:00:11 +02008619 dpll |= DPLL_VCO_ENABLE;
8620
8621 crtc_state->dpll_hw_state.dpll = dpll;
8622 crtc_state->dpll_hw_state.fp0 = fp;
8623 crtc_state->dpll_hw_state.fp1 = fp2;
Paulo Zanonide13a2e2012-09-20 18:36:05 -03008624}
8625
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02008626static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8627 struct intel_crtc_state *crtc_state)
Jesse Barnes79e53942008-11-07 14:24:08 -08008628{
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008629 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008630 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira1b6f4952016-05-04 12:11:59 +03008631 const struct intel_limit *limit;
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008632 int refclk = 120000;
Jesse Barnes79e53942008-11-07 14:24:08 -08008633
Ander Conselvan de Oliveiradd3cd742015-05-15 13:34:29 +03008634 memset(&crtc_state->dpll_hw_state, 0,
8635 sizeof(crtc_state->dpll_hw_state));
8636
Ander Conselvan de Oliveiraded220e2016-03-21 18:00:09 +02008637 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8638 if (!crtc_state->has_pch_encoder)
8639 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08008640
Ville Syrjälä2d84d2b2016-06-22 21:57:02 +03008641 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008642 if (intel_panel_use_ssc(dev_priv)) {
8643 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8644 dev_priv->vbt.lvds_ssc_freq);
8645 refclk = dev_priv->vbt.lvds_ssc_freq;
8646 }
8647
8648 if (intel_is_dual_link_lvds(dev)) {
8649 if (refclk == 100000)
8650 limit = &intel_limits_ironlake_dual_lvds_100m;
8651 else
8652 limit = &intel_limits_ironlake_dual_lvds;
8653 } else {
8654 if (refclk == 100000)
8655 limit = &intel_limits_ironlake_single_lvds_100m;
8656 else
8657 limit = &intel_limits_ironlake_single_lvds;
8658 }
8659 } else {
8660 limit = &intel_limits_ironlake_dac;
8661 }
8662
Ander Conselvan de Oliveira364ee292016-03-21 18:00:10 +02008663 if (!crtc_state->clock_set &&
Ander Conselvan de Oliveira997c0302016-03-21 18:00:12 +02008664 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8665 refclk, NULL, &crtc_state->dpll)) {
Ander Conselvan de Oliveira364ee292016-03-21 18:00:10 +02008666 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8667 return -EINVAL;
Daniel Vetterf47709a2013-03-28 10:42:02 +01008668 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008669
Gustavo A. R. Silvacbaa3312017-05-15 16:56:05 -05008670 ironlake_compute_dpll(crtc, crtc_state, NULL);
Daniel Vetter66e985c2013-06-05 13:34:20 +02008671
Gustavo A. R. Silvaefd38b62017-05-15 17:00:28 -05008672 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
Chris Wilson43031782018-09-13 14:16:26 +01008673 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
8674 pipe_name(crtc->pipe));
Ander Conselvan de Oliveiraded220e2016-03-21 18:00:09 +02008675 return -EINVAL;
Ander Conselvan de Oliveira3fb37702014-10-29 11:32:35 +02008676 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008677
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02008678 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08008679}
8680
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008681static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8682 struct intel_link_m_n *m_n)
Daniel Vetter72419202013-04-04 13:28:53 +02008683{
8684 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008685 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008686 enum pipe pipe = crtc->pipe;
Daniel Vetter72419202013-04-04 13:28:53 +02008687
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008688 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8689 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8690 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8691 & ~TU_SIZE_MASK;
8692 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8693 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8694 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8695}
8696
8697static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8698 enum transcoder transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008699 struct intel_link_m_n *m_n,
8700 struct intel_link_m_n *m2_n2)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008701{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008702 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008703 enum pipe pipe = crtc->pipe;
8704
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00008705 if (INTEL_GEN(dev_priv) >= 5) {
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008706 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8707 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8708 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8709 & ~TU_SIZE_MASK;
8710 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8711 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8712 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
Maarten Lankhorst4207c8b2018-10-15 11:40:23 +02008713
8714 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008715 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8716 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8717 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8718 & ~TU_SIZE_MASK;
8719 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8720 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8721 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8722 }
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008723 } else {
8724 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8725 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8726 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8727 & ~TU_SIZE_MASK;
8728 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8729 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8730 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8731 }
8732}
8733
8734void intel_dp_get_m_n(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008735 struct intel_crtc_state *pipe_config)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008736{
Ander Conselvan de Oliveira681a8502015-01-15 14:55:24 +02008737 if (pipe_config->has_pch_encoder)
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008738 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8739 else
8740 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008741 &pipe_config->dp_m_n,
8742 &pipe_config->dp_m2_n2);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008743}
8744
Daniel Vetter72419202013-04-04 13:28:53 +02008745static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008746 struct intel_crtc_state *pipe_config)
Daniel Vetter72419202013-04-04 13:28:53 +02008747{
Ville Syrjäläeb14cb72013-09-10 17:02:54 +03008748 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
Vandana Kannanb95af8b2014-08-05 07:51:23 -07008749 &pipe_config->fdi_m_n, NULL);
Daniel Vetter72419202013-04-04 13:28:53 +02008750}
8751
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008752static void skylake_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008753 struct intel_crtc_state *pipe_config)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008754{
8755 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008756 struct drm_i915_private *dev_priv = to_i915(dev);
Chandra Kondurua1b22782015-04-07 15:28:45 -07008757 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8758 uint32_t ps_ctrl = 0;
8759 int id = -1;
8760 int i;
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008761
Chandra Kondurua1b22782015-04-07 15:28:45 -07008762 /* find scaler attached to this pipe */
8763 for (i = 0; i < crtc->num_scalers; i++) {
8764 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8765 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8766 id = i;
8767 pipe_config->pch_pfit.enabled = true;
8768 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8769 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8770 break;
8771 }
8772 }
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008773
Chandra Kondurua1b22782015-04-07 15:28:45 -07008774 scaler_state->scaler_id = id;
8775 if (id >= 0) {
8776 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8777 } else {
8778 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00008779 }
8780}
8781
Damien Lespiau5724dbd2015-01-20 12:51:52 +00008782static void
8783skylake_get_initial_plane_config(struct intel_crtc *crtc,
8784 struct intel_initial_plane_config *plane_config)
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008785{
8786 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008787 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008788 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8789 enum plane_id plane_id = plane->id;
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008790 enum pipe pipe;
James Ausmus4036c782017-11-13 10:11:28 -08008791 u32 val, base, offset, stride_mult, tiling, alpha;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008792 int fourcc, pixel_format;
Tvrtko Ursulin6761dd32015-03-23 11:10:32 +00008793 unsigned int aligned_height;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008794 struct drm_framebuffer *fb;
Damien Lespiau1b842c82015-01-21 13:50:54 +00008795 struct intel_framebuffer *intel_fb;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008796
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008797 if (!plane->get_hw_state(plane, &pipe))
Ville Syrjälä2924b8c2017-11-17 21:19:16 +02008798 return;
8799
Ville Syrjäläeade6c82018-01-30 22:38:03 +02008800 WARN_ON(pipe != crtc->pipe);
8801
Damien Lespiaud9806c92015-01-21 14:07:19 +00008802 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Damien Lespiau1b842c82015-01-21 13:50:54 +00008803 if (!intel_fb) {
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008804 DRM_DEBUG_KMS("failed to alloc fb\n");
8805 return;
8806 }
8807
Damien Lespiau1b842c82015-01-21 13:50:54 +00008808 fb = &intel_fb->base;
8809
Ville Syrjäläd2e9f5f2016-11-18 21:52:53 +02008810 fb->dev = dev;
8811
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008812 val = I915_READ(PLANE_CTL(pipe, plane_id));
Damien Lespiau42a7b082015-02-05 19:35:13 +00008813
James Ausmusb5972772018-01-30 11:49:16 -02008814 if (INTEL_GEN(dev_priv) >= 11)
8815 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8816 else
8817 pixel_format = val & PLANE_CTL_FORMAT_MASK;
James Ausmus4036c782017-11-13 10:11:28 -08008818
8819 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008820 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
James Ausmus4036c782017-11-13 10:11:28 -08008821 alpha &= PLANE_COLOR_ALPHA_MASK;
8822 } else {
8823 alpha = val & PLANE_CTL_ALPHA_MASK;
8824 }
8825
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008826 fourcc = skl_format_to_fourcc(pixel_format,
James Ausmus4036c782017-11-13 10:11:28 -08008827 val & PLANE_CTL_ORDER_RGBX, alpha);
Ville Syrjälä2f3f4762016-11-18 21:52:57 +02008828 fb->format = drm_format_info(fourcc);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008829
Damien Lespiau40f46282015-02-27 11:15:21 +00008830 tiling = val & PLANE_CTL_TILED_MASK;
8831 switch (tiling) {
8832 case PLANE_CTL_TILED_LINEAR:
Ben Widawsky2f075562017-03-24 14:29:48 -07008833 fb->modifier = DRM_FORMAT_MOD_LINEAR;
Damien Lespiau40f46282015-02-27 11:15:21 +00008834 break;
8835 case PLANE_CTL_TILED_X:
8836 plane_config->tiling = I915_TILING_X;
Ville Syrjäläbae781b2016-11-16 13:33:16 +02008837 fb->modifier = I915_FORMAT_MOD_X_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008838 break;
8839 case PLANE_CTL_TILED_Y:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07008840 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07008841 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8842 else
8843 fb->modifier = I915_FORMAT_MOD_Y_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008844 break;
8845 case PLANE_CTL_TILED_YF:
Dhinakaran Pandiyan53867b42018-08-21 18:50:53 -07008846 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
Ville Syrjälä2e2adb02017-08-01 09:58:13 -07008847 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8848 else
8849 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
Damien Lespiau40f46282015-02-27 11:15:21 +00008850 break;
8851 default:
8852 MISSING_CASE(tiling);
8853 goto error;
8854 }
8855
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008856 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008857 plane_config->base = base;
8858
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008859 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008860
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008861 val = I915_READ(PLANE_SIZE(pipe, plane_id));
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008862 fb->height = ((val >> 16) & 0xfff) + 1;
8863 fb->width = ((val >> 0) & 0x1fff) + 1;
8864
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008865 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02008866 stride_mult = intel_fb_stride_alignment(fb, 0);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008867 fb->pitches[0] = (val & 0x3ff) * stride_mult;
8868
Ville Syrjäläd88c4af2017-03-07 21:42:06 +02008869 aligned_height = intel_fb_align_height(fb, 0, fb->height);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008870
Daniel Vetterf37b5c22015-02-10 23:12:27 +01008871 plane_config->size = fb->pitches[0] * aligned_height;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008872
Ville Syrjälä282e83e2017-11-17 21:19:12 +02008873 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8874 crtc->base.name, plane->base.name, fb->width, fb->height,
Ville Syrjälä272725c2016-12-14 23:32:20 +02008875 fb->format->cpp[0] * 8, base, fb->pitches[0],
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008876 plane_config->size);
8877
Damien Lespiau2d140302015-02-05 17:22:18 +00008878 plane_config->fb = intel_fb;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008879 return;
8880
8881error:
Matthew Auldd1a3a032016-08-23 16:00:44 +01008882 kfree(intel_fb);
Damien Lespiaubc8d7df2015-01-20 12:51:51 +00008883}
8884
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008885static void ironlake_get_pfit_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008886 struct intel_crtc_state *pipe_config)
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008887{
8888 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008889 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008890 uint32_t tmp;
8891
8892 tmp = I915_READ(PF_CTL(crtc->pipe));
8893
8894 if (tmp & PF_ENABLE) {
Chris Wilsonfd4daa92013-08-27 17:04:17 +01008895 pipe_config->pch_pfit.enabled = true;
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008896 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8897 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
Daniel Vettercb8b2a32013-06-01 17:16:23 +02008898
8899 /* We currently do not free assignements of panel fitters on
8900 * ivb/hsw (since we don't use the higher upscaling modes which
8901 * differentiates them) so just WARN about this case for now. */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01008902 if (IS_GEN7(dev_priv)) {
Daniel Vettercb8b2a32013-06-01 17:16:23 +02008903 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8904 PF_PIPE_SEL_IVB(crtc->pipe));
8905 }
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008906 }
Jesse Barnes79e53942008-11-07 14:24:08 -08008907}
8908
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008909static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02008910 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008911{
8912 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01008913 struct drm_i915_private *dev_priv = to_i915(dev);
Imre Deak17290502016-02-12 18:55:11 +02008914 enum intel_display_power_domain power_domain;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008915 uint32_t tmp;
Imre Deak17290502016-02-12 18:55:11 +02008916 bool ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008917
Imre Deak17290502016-02-12 18:55:11 +02008918 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8919 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Paulo Zanoni930e8c92014-07-04 13:38:34 -03008920 return false;
8921
Shashank Sharmad9facae2018-10-12 11:53:07 +05308922 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
Daniel Vettere143a212013-07-04 12:01:15 +02008923 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008924 pipe_config->shared_dpll = NULL;
Daniel Vettereccb1402013-05-22 00:50:22 +02008925
Imre Deak17290502016-02-12 18:55:11 +02008926 ret = false;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008927 tmp = I915_READ(PIPECONF(crtc->pipe));
8928 if (!(tmp & PIPECONF_ENABLE))
Imre Deak17290502016-02-12 18:55:11 +02008929 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01008930
Ville Syrjälä42571ae2013-09-06 23:29:00 +03008931 switch (tmp & PIPECONF_BPC_MASK) {
8932 case PIPECONF_6BPC:
8933 pipe_config->pipe_bpp = 18;
8934 break;
8935 case PIPECONF_8BPC:
8936 pipe_config->pipe_bpp = 24;
8937 break;
8938 case PIPECONF_10BPC:
8939 pipe_config->pipe_bpp = 30;
8940 break;
8941 case PIPECONF_12BPC:
8942 pipe_config->pipe_bpp = 36;
8943 break;
8944 default:
8945 break;
8946 }
8947
Daniel Vetterb5a9fa02014-04-24 23:54:49 +02008948 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8949 pipe_config->limited_color_range = true;
8950
Daniel Vetterab9412b2013-05-03 11:49:46 +02008951 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
Daniel Vetter66e985c2013-06-05 13:34:20 +02008952 struct intel_shared_dpll *pll;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008953 enum intel_dpll_id pll_id;
Daniel Vetter66e985c2013-06-05 13:34:20 +02008954
Daniel Vetter88adfff2013-03-28 10:42:01 +01008955 pipe_config->has_pch_encoder = true;
8956
Daniel Vetter627eb5a2013-04-29 19:33:42 +02008957 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8958 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8959 FDI_DP_PORT_WIDTH_SHIFT) + 1;
Daniel Vetter72419202013-04-04 13:28:53 +02008960
8961 ironlake_get_fdi_m_n_config(crtc, pipe_config);
Daniel Vetter6c49f242013-06-06 12:45:25 +02008962
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03008963 if (HAS_PCH_IBX(dev_priv)) {
Imre Deakd9a7bc62016-05-12 16:18:50 +03008964 /*
8965 * The pipe->pch transcoder and pch transcoder->pll
8966 * mapping is fixed.
8967 */
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008968 pll_id = (enum intel_dpll_id) crtc->pipe;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02008969 } else {
8970 tmp = I915_READ(PCH_DPLL_SEL);
8971 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008972 pll_id = DPLL_ID_PCH_PLL_B;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02008973 else
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008974 pll_id= DPLL_ID_PCH_PLL_A;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02008975 }
Daniel Vetter66e985c2013-06-05 13:34:20 +02008976
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02008977 pipe_config->shared_dpll =
8978 intel_get_shared_dpll_by_id(dev_priv, pll_id);
8979 pll = pipe_config->shared_dpll;
Daniel Vetter66e985c2013-06-05 13:34:20 +02008980
Lucas De Marchiee1398b2018-03-20 15:06:33 -07008981 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
8982 &pipe_config->dpll_hw_state));
Daniel Vetterc93f54c2013-06-27 19:47:19 +02008983
8984 tmp = pipe_config->dpll_hw_state.dpll;
8985 pipe_config->pixel_multiplier =
8986 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
8987 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
Ville Syrjälä18442d02013-09-13 16:00:08 +03008988
8989 ironlake_pch_clock_get(crtc, pipe_config);
Daniel Vetter6c49f242013-06-06 12:45:25 +02008990 } else {
8991 pipe_config->pixel_multiplier = 1;
Daniel Vetter627eb5a2013-04-29 19:33:42 +02008992 }
8993
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02008994 intel_get_pipe_timings(crtc, pipe_config);
Jani Nikulabc58be62016-03-18 17:05:39 +02008995 intel_get_pipe_src_size(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02008996
Daniel Vetter2fa2fe92013-05-07 23:34:16 +02008997 ironlake_get_pfit_config(crtc, pipe_config);
8998
Imre Deak17290502016-02-12 18:55:11 +02008999 ret = true;
9000
9001out:
9002 intel_display_power_put(dev_priv, power_domain);
9003
9004 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009005}
9006
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009007static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9008{
Chris Wilson91c8a322016-07-05 10:40:23 +01009009 struct drm_device *dev = &dev_priv->drm;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009010 struct intel_crtc *crtc;
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009011
Damien Lespiaud3fcc802014-05-13 23:32:22 +01009012 for_each_intel_crtc(dev, crtc)
Rob Clarke2c719b2014-12-15 13:56:32 -05009013 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009014 pipe_name(crtc->pipe));
9015
Imre Deak75e39682018-08-06 12:58:39 +03009016 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
Imre Deak9c3a16c2017-08-14 18:15:30 +03009017 "Display power well on\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009018 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
Ville Syrjälä01403de2015-09-18 20:03:33 +03009019 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9020 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
Imre Deak44cb7342016-08-10 14:07:29 +03009021 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009022 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009023 "CPU PWM1 enabled\n");
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009024 if (IS_HASWELL(dev_priv))
Rob Clarke2c719b2014-12-15 13:56:32 -05009025 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
Paulo Zanonic5107b82014-07-04 11:50:30 -03009026 "CPU PWM2 enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009027 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009028 "PCH PWM1 enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009029 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009030 "Utility pin enabled\n");
Rob Clarke2c719b2014-12-15 13:56:32 -05009031 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009032
Paulo Zanoni9926ada2014-04-01 19:39:47 -03009033 /*
9034 * In theory we can still leave IRQs enabled, as long as only the HPD
9035 * interrupts remain enabled. We used to check for that, but since it's
9036 * gen-specific and since we only disable LCPLL after we fully disable
9037 * the interrupts, the check below should be enough.
9038 */
Rob Clarke2c719b2014-12-15 13:56:32 -05009039 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009040}
9041
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009042static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9043{
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009044 if (IS_HASWELL(dev_priv))
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009045 return I915_READ(D_COMP_HSW);
9046 else
9047 return I915_READ(D_COMP_BDW);
9048}
9049
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009050static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9051{
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01009052 if (IS_HASWELL(dev_priv)) {
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01009053 mutex_lock(&dev_priv->pcu_lock);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009054 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9055 val))
Chris Wilson79cf2192016-08-24 11:16:07 +01009056 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01009057 mutex_unlock(&dev_priv->pcu_lock);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009058 } else {
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009059 I915_WRITE(D_COMP_BDW, val);
9060 POSTING_READ(D_COMP_BDW);
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009061 }
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009062}
9063
9064/*
9065 * This function implements pieces of two sequences from BSpec:
9066 * - Sequence for display software to disable LCPLL
9067 * - Sequence for display software to allow package C8+
9068 * The steps implemented here are just the steps that actually touch the LCPLL
9069 * register. Callers should take care of disabling all the display engine
9070 * functions, doing the mode unset, fixing interrupts, etc.
9071 */
Paulo Zanoni6ff58d52013-09-24 13:52:57 -03009072static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9073 bool switch_to_fclk, bool allow_power_down)
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009074{
9075 uint32_t val;
9076
9077 assert_can_disable_lcpll(dev_priv);
9078
9079 val = I915_READ(LCPLL_CTL);
9080
9081 if (switch_to_fclk) {
9082 val |= LCPLL_CD_SOURCE_FCLK;
9083 I915_WRITE(LCPLL_CTL, val);
9084
Imre Deakf53dd632016-06-28 13:37:32 +03009085 if (wait_for_us(I915_READ(LCPLL_CTL) &
9086 LCPLL_CD_SOURCE_FCLK_DONE, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009087 DRM_ERROR("Switching to FCLK failed\n");
9088
9089 val = I915_READ(LCPLL_CTL);
9090 }
9091
9092 val |= LCPLL_PLL_DISABLE;
9093 I915_WRITE(LCPLL_CTL, val);
9094 POSTING_READ(LCPLL_CTL);
9095
Chris Wilson24d84412016-06-30 15:33:07 +01009096 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009097 DRM_ERROR("LCPLL still locked\n");
9098
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009099 val = hsw_read_dcomp(dev_priv);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009100 val |= D_COMP_COMP_DISABLE;
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009101 hsw_write_dcomp(dev_priv, val);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009102 ndelay(100);
9103
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009104 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9105 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009106 DRM_ERROR("D_COMP RCOMP still in progress\n");
9107
9108 if (allow_power_down) {
9109 val = I915_READ(LCPLL_CTL);
9110 val |= LCPLL_POWER_DOWN_ALLOW;
9111 I915_WRITE(LCPLL_CTL, val);
9112 POSTING_READ(LCPLL_CTL);
9113 }
9114}
9115
9116/*
9117 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9118 * source.
9119 */
Paulo Zanoni6ff58d52013-09-24 13:52:57 -03009120static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009121{
9122 uint32_t val;
9123
9124 val = I915_READ(LCPLL_CTL);
9125
9126 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9127 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9128 return;
9129
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03009130 /*
9131 * Make sure we're not on PC8 state before disabling PC8, otherwise
9132 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
Paulo Zanonia8a8bd52014-03-07 20:08:05 -03009133 */
Mika Kuoppala59bad942015-01-16 11:34:40 +02009134 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Paulo Zanoni215733f2013-08-19 13:18:07 -03009135
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009136 if (val & LCPLL_POWER_DOWN_ALLOW) {
9137 val &= ~LCPLL_POWER_DOWN_ALLOW;
9138 I915_WRITE(LCPLL_CTL, val);
Daniel Vetter35d8f2e2013-08-21 23:38:08 +02009139 POSTING_READ(LCPLL_CTL);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009140 }
9141
Paulo Zanoni9ccd5ae2014-07-04 11:59:58 -03009142 val = hsw_read_dcomp(dev_priv);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009143 val |= D_COMP_COMP_FORCE;
9144 val &= ~D_COMP_COMP_DISABLE;
Paulo Zanoni3c4c9b82014-03-07 20:12:36 -03009145 hsw_write_dcomp(dev_priv, val);
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009146
9147 val = I915_READ(LCPLL_CTL);
9148 val &= ~LCPLL_PLL_DISABLE;
9149 I915_WRITE(LCPLL_CTL, val);
9150
Chris Wilson93220c02016-06-30 15:33:08 +01009151 if (intel_wait_for_register(dev_priv,
9152 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9153 5))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009154 DRM_ERROR("LCPLL not locked yet\n");
9155
9156 if (val & LCPLL_CD_SOURCE_FCLK) {
9157 val = I915_READ(LCPLL_CTL);
9158 val &= ~LCPLL_CD_SOURCE_FCLK;
9159 I915_WRITE(LCPLL_CTL, val);
9160
Imre Deakf53dd632016-06-28 13:37:32 +03009161 if (wait_for_us((I915_READ(LCPLL_CTL) &
9162 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009163 DRM_ERROR("Switching back to LCPLL failed\n");
9164 }
Paulo Zanoni215733f2013-08-19 13:18:07 -03009165
Mika Kuoppala59bad942015-01-16 11:34:40 +02009166 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +03009167
Ville Syrjälä4c75b942016-10-31 22:37:12 +02009168 intel_update_cdclk(dev_priv);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +03009169 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
Paulo Zanonibe256dc2013-07-23 11:19:26 -03009170}
9171
Paulo Zanoni765dab672014-03-07 20:08:18 -03009172/*
9173 * Package states C8 and deeper are really deep PC states that can only be
9174 * reached when all the devices on the system allow it, so even if the graphics
9175 * device allows PC8+, it doesn't mean the system will actually get to these
9176 * states. Our driver only allows PC8+ when going into runtime PM.
9177 *
9178 * The requirements for PC8+ are that all the outputs are disabled, the power
9179 * well is disabled and most interrupts are disabled, and these are also
9180 * requirements for runtime PM. When these conditions are met, we manually do
9181 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9182 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9183 * hang the machine.
9184 *
9185 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9186 * the state of some registers, so when we come back from PC8+ we need to
9187 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9188 * need to take care of the registers kept by RC6. Notice that this happens even
9189 * if we don't put the device in PCI D3 state (which is what currently happens
9190 * because of the runtime PM support).
9191 *
9192 * For more, read "Display Sequences for Package C8" on the hardware
9193 * documentation.
9194 */
Paulo Zanonia14cb6f2014-03-07 20:08:17 -03009195void hsw_enable_pc8(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03009196{
Paulo Zanonic67a4702013-08-19 13:18:09 -03009197 uint32_t val;
9198
Paulo Zanonic67a4702013-08-19 13:18:09 -03009199 DRM_DEBUG_KMS("Enabling package C8+\n");
9200
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01009201 if (HAS_PCH_LPT_LP(dev_priv)) {
Paulo Zanonic67a4702013-08-19 13:18:09 -03009202 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9203 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9204 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9205 }
9206
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02009207 lpt_disable_clkout_dp(dev_priv);
Paulo Zanonic67a4702013-08-19 13:18:09 -03009208 hsw_disable_lcpll(dev_priv, true, true);
9209}
9210
Paulo Zanonia14cb6f2014-03-07 20:08:17 -03009211void hsw_disable_pc8(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03009212{
Paulo Zanonic67a4702013-08-19 13:18:09 -03009213 uint32_t val;
9214
Paulo Zanonic67a4702013-08-19 13:18:09 -03009215 DRM_DEBUG_KMS("Disabling package C8+\n");
9216
9217 hsw_restore_lcpll(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +02009218 lpt_init_pch_refclk(dev_priv);
Paulo Zanonic67a4702013-08-19 13:18:09 -03009219
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +01009220 if (HAS_PCH_LPT_LP(dev_priv)) {
Paulo Zanonic67a4702013-08-19 13:18:09 -03009221 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9222 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9223 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9224 }
Paulo Zanonic67a4702013-08-19 13:18:09 -03009225}
9226
Ander Conselvan de Oliveira190f68c2015-01-15 14:55:23 +02009227static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9228 struct intel_crtc_state *crtc_state)
Paulo Zanoni09b4ddf2012-10-05 12:05:55 -03009229{
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03009230 struct intel_atomic_state *state =
9231 to_intel_atomic_state(crtc_state->base.state);
9232
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009233 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009234 struct intel_encoder *encoder =
Ville Syrjälä5a0b3852018-05-18 18:29:27 +03009235 intel_get_crtc_new_encoder(state, crtc_state);
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009236
9237 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
Chris Wilson43031782018-09-13 14:16:26 +01009238 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9239 pipe_name(crtc->pipe));
Mika Kaholaaf3997b2016-02-05 13:29:28 +02009240 return -EINVAL;
Paulo Zanoni44a126b2017-03-22 15:58:45 -03009241 }
Mika Kaholaaf3997b2016-02-05 13:29:28 +02009242 }
Daniel Vetter716c2e52014-06-25 22:02:02 +03009243
Daniel Vetterc8f7a0d2014-04-24 23:55:04 +02009244 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08009245}
9246
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009247static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9248 enum port port,
9249 struct intel_crtc_state *pipe_config)
9250{
9251 enum intel_dpll_id id;
9252 u32 temp;
9253
9254 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
Paulo Zanonidfbd4502017-08-25 16:40:04 -03009255 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009256
9257 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9258 return;
9259
9260 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9261}
9262
Paulo Zanoni970888e2018-05-21 17:25:44 -07009263static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9264 enum port port,
9265 struct intel_crtc_state *pipe_config)
9266{
9267 enum intel_dpll_id id;
9268 u32 temp;
9269
9270 /* TODO: TBT pll not implemented. */
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309271 if (intel_port_is_combophy(dev_priv, port)) {
Paulo Zanoni970888e2018-05-21 17:25:44 -07009272 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9273 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9274 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9275
Vandita Kulkarnia54270d2018-10-03 12:52:00 +05309276 if (WARN_ON(!intel_dpll_is_combophy(id)))
Paulo Zanoni970888e2018-05-21 17:25:44 -07009277 return;
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309278 } else if (intel_port_is_tc(dev_priv, port)) {
Vandita Kulkarnicb6caf72018-10-03 12:51:58 +05309279 id = icl_port_to_mg_pll_id(port);
Vandita Kulkarni8ea59e62018-10-03 12:51:59 +05309280 } else {
9281 WARN(1, "Invalid port %x\n", port);
Paulo Zanoni970888e2018-05-21 17:25:44 -07009282 return;
9283 }
9284
9285 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9286}
9287
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309288static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9289 enum port port,
9290 struct intel_crtc_state *pipe_config)
9291{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009292 enum intel_dpll_id id;
9293
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309294 switch (port) {
9295 case PORT_A:
Imre Deak08250c42016-03-14 19:55:34 +02009296 id = DPLL_ID_SKL_DPLL0;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309297 break;
9298 case PORT_B:
Imre Deak08250c42016-03-14 19:55:34 +02009299 id = DPLL_ID_SKL_DPLL1;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309300 break;
9301 case PORT_C:
Imre Deak08250c42016-03-14 19:55:34 +02009302 id = DPLL_ID_SKL_DPLL2;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309303 break;
9304 default:
9305 DRM_ERROR("Incorrect port type\n");
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009306 return;
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309307 }
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009308
9309 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309310}
9311
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009312static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9313 enum port port,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009314 struct intel_crtc_state *pipe_config)
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009315{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009316 enum intel_dpll_id id;
Ander Conselvan de Oliveiraa3c988e2016-03-08 17:46:27 +02009317 u32 temp;
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009318
9319 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009320 id = temp >> (port * 3 + 1);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009321
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009322 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009323 return;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009324
9325 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009326}
9327
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009328static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9329 enum port port,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009330 struct intel_crtc_state *pipe_config)
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009331{
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009332 enum intel_dpll_id id;
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009333 uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009334
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009335 switch (ddi_pll_sel) {
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009336 case PORT_CLK_SEL_WRPLL1:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009337 id = DPLL_ID_WRPLL1;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009338 break;
9339 case PORT_CLK_SEL_WRPLL2:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009340 id = DPLL_ID_WRPLL2;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009341 break;
Maarten Lankhorst00490c22015-11-16 14:42:12 +01009342 case PORT_CLK_SEL_SPLL:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009343 id = DPLL_ID_SPLL;
Ville Syrjälä79bd23d2015-12-01 23:32:07 +02009344 break;
Ander Conselvan de Oliveira9d16da62016-03-08 17:46:26 +02009345 case PORT_CLK_SEL_LCPLL_810:
9346 id = DPLL_ID_LCPLL_810;
9347 break;
9348 case PORT_CLK_SEL_LCPLL_1350:
9349 id = DPLL_ID_LCPLL_1350;
9350 break;
9351 case PORT_CLK_SEL_LCPLL_2700:
9352 id = DPLL_ID_LCPLL_2700;
9353 break;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009354 default:
Ander Conselvan de Oliveirac8560522016-09-01 15:08:07 -07009355 MISSING_CASE(ddi_pll_sel);
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009356 /* fall through */
9357 case PORT_CLK_SEL_NONE:
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009358 return;
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009359 }
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009360
9361 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
Damien Lespiau7d2c8172014-07-29 18:06:18 +01009362}
9363
Jani Nikulacf304292016-03-18 17:05:41 +02009364static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9365 struct intel_crtc_state *pipe_config,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009366 u64 *power_domain_mask)
Jani Nikulacf304292016-03-18 17:05:41 +02009367{
9368 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009369 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikulacf304292016-03-18 17:05:41 +02009370 enum intel_display_power_domain power_domain;
9371 u32 tmp;
9372
Imre Deakd9a7bc62016-05-12 16:18:50 +03009373 /*
9374 * The pipe->transcoder mapping is fixed with the exception of the eDP
9375 * transcoder handled below.
9376 */
Jani Nikulacf304292016-03-18 17:05:41 +02009377 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9378
9379 /*
9380 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9381 * consistency and less surprising code; it's in always on power).
9382 */
9383 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9384 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9385 enum pipe trans_edp_pipe;
9386 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9387 default:
9388 WARN(1, "unknown pipe linked to edp transcoder\n");
Gustavo A. R. Silvaf0d759f2018-06-28 17:35:41 -05009389 /* fall through */
Jani Nikulacf304292016-03-18 17:05:41 +02009390 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9391 case TRANS_DDI_EDP_INPUT_A_ON:
9392 trans_edp_pipe = PIPE_A;
9393 break;
9394 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9395 trans_edp_pipe = PIPE_B;
9396 break;
9397 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9398 trans_edp_pipe = PIPE_C;
9399 break;
9400 }
9401
9402 if (trans_edp_pipe == crtc->pipe)
9403 pipe_config->cpu_transcoder = TRANSCODER_EDP;
9404 }
9405
9406 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9407 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9408 return false;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009409 *power_domain_mask |= BIT_ULL(power_domain);
Jani Nikulacf304292016-03-18 17:05:41 +02009410
9411 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9412
9413 return tmp & PIPECONF_ENABLE;
9414}
9415
Jani Nikula4d1de972016-03-18 17:05:42 +02009416static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9417 struct intel_crtc_state *pipe_config,
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009418 u64 *power_domain_mask)
Jani Nikula4d1de972016-03-18 17:05:42 +02009419{
9420 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +01009421 struct drm_i915_private *dev_priv = to_i915(dev);
Jani Nikula4d1de972016-03-18 17:05:42 +02009422 enum intel_display_power_domain power_domain;
9423 enum port port;
9424 enum transcoder cpu_transcoder;
9425 u32 tmp;
9426
Jani Nikula4d1de972016-03-18 17:05:42 +02009427 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9428 if (port == PORT_A)
9429 cpu_transcoder = TRANSCODER_DSI_A;
9430 else
9431 cpu_transcoder = TRANSCODER_DSI_C;
9432
9433 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9434 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9435 continue;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009436 *power_domain_mask |= BIT_ULL(power_domain);
Jani Nikula4d1de972016-03-18 17:05:42 +02009437
Imre Deakdb18b6a2016-03-24 12:41:40 +02009438 /*
9439 * The PLL needs to be enabled with a valid divider
9440 * configuration, otherwise accessing DSI registers will hang
9441 * the machine. See BSpec North Display Engine
9442 * registers/MIPI[BXT]. We can break out here early, since we
9443 * need the same DSI PLL to be enabled for both DSI ports.
9444 */
Jani Nikulae5186342018-07-05 16:25:08 +03009445 if (!bxt_dsi_pll_is_enabled(dev_priv))
Imre Deakdb18b6a2016-03-24 12:41:40 +02009446 break;
9447
Jani Nikula4d1de972016-03-18 17:05:42 +02009448 /* XXX: this works for video mode only */
9449 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9450 if (!(tmp & DPI_ENABLE))
9451 continue;
9452
9453 tmp = I915_READ(MIPI_CTRL(port));
9454 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9455 continue;
9456
9457 pipe_config->cpu_transcoder = cpu_transcoder;
Jani Nikula4d1de972016-03-18 17:05:42 +02009458 break;
9459 }
9460
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009461 return transcoder_is_dsi(pipe_config->cpu_transcoder);
Jani Nikula4d1de972016-03-18 17:05:42 +02009462}
9463
Daniel Vetter26804af2014-06-25 22:01:55 +03009464static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009465 struct intel_crtc_state *pipe_config)
Daniel Vetter26804af2014-06-25 22:01:55 +03009466{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009467 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Daniel Vetterd452c5b2014-07-04 11:27:39 -03009468 struct intel_shared_dpll *pll;
Daniel Vetter26804af2014-06-25 22:01:55 +03009469 enum port port;
9470 uint32_t tmp;
9471
9472 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9473
9474 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9475
Paulo Zanoni970888e2018-05-21 17:25:44 -07009476 if (IS_ICELAKE(dev_priv))
9477 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9478 else if (IS_CANNONLAKE(dev_priv))
Kahola, Mika8b0f7e02017-06-09 15:26:03 -07009479 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9480 else if (IS_GEN9_BC(dev_priv))
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009481 skylake_get_ddi_pll(dev_priv, port, pipe_config);
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02009482 else if (IS_GEN9_LP(dev_priv))
Satheeshakrishna M3760b592014-08-22 09:49:11 +05309483 bxt_get_ddi_pll(dev_priv, port, pipe_config);
Satheeshakrishna M96b7dfb2014-11-13 14:55:17 +00009484 else
9485 haswell_get_ddi_pll(dev_priv, port, pipe_config);
Daniel Vetter9cd86932014-06-25 22:01:57 +03009486
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009487 pll = pipe_config->shared_dpll;
9488 if (pll) {
Lucas De Marchiee1398b2018-03-20 15:06:33 -07009489 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9490 &pipe_config->dpll_hw_state));
Daniel Vetterd452c5b2014-07-04 11:27:39 -03009491 }
9492
Daniel Vetter26804af2014-06-25 22:01:55 +03009493 /*
9494 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9495 * DDI E. So just check whether this pipe is wired to DDI E and whether
9496 * the PCH transcoder is on.
9497 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009498 if (INTEL_GEN(dev_priv) < 9 &&
Damien Lespiauca370452013-12-03 13:56:24 +00009499 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
Daniel Vetter26804af2014-06-25 22:01:55 +03009500 pipe_config->has_pch_encoder = true;
9501
9502 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9503 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9504 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9505
9506 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9507 }
9508}
9509
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009510static bool haswell_get_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +02009511 struct intel_crtc_state *pipe_config)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009512{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009513 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Imre Deak17290502016-02-12 18:55:11 +02009514 enum intel_display_power_domain power_domain;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009515 u64 power_domain_mask;
Jani Nikulacf304292016-03-18 17:05:41 +02009516 bool active;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009517
Imre Deake79dfb52017-07-20 01:50:57 +03009518 intel_crtc_init_scalers(crtc, pipe_config);
Imre Deak5fb9dad2017-07-20 14:28:20 +03009519
Imre Deak17290502016-02-12 18:55:11 +02009520 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9521 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
Imre Deakb5482bd2014-03-05 16:20:55 +02009522 return false;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009523 power_domain_mask = BIT_ULL(power_domain);
Imre Deak17290502016-02-12 18:55:11 +02009524
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +02009525 pipe_config->shared_dpll = NULL;
Daniel Vetterc0d43d62013-06-07 23:11:08 +02009526
Jani Nikulacf304292016-03-18 17:05:41 +02009527 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
Daniel Vettereccb1402013-05-22 00:50:22 +02009528
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02009529 if (IS_GEN9_LP(dev_priv) &&
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009530 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9531 WARN_ON(active);
9532 active = true;
Jani Nikula4d1de972016-03-18 17:05:42 +02009533 }
9534
Jani Nikulacf304292016-03-18 17:05:41 +02009535 if (!active)
Imre Deak17290502016-02-12 18:55:11 +02009536 goto out;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009537
Ville Syrjäläd7edc4e2016-06-22 21:57:07 +03009538 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
Jani Nikula4d1de972016-03-18 17:05:42 +02009539 haswell_get_ddi_port_state(crtc, pipe_config);
9540 intel_get_pipe_timings(crtc, pipe_config);
9541 }
Daniel Vetter627eb5a2013-04-29 19:33:42 +02009542
Jani Nikulabc58be62016-03-18 17:05:39 +02009543 intel_get_pipe_src_size(crtc, pipe_config);
Shashank Sharma33b7f3e2018-10-12 11:53:08 +05309544 intel_get_crtc_ycbcr_config(crtc, pipe_config);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +02009545
Lionel Landwerlin05dc6982016-03-16 10:57:15 +00009546 pipe_config->gamma_mode =
9547 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9548
Imre Deak17290502016-02-12 18:55:11 +02009549 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9550 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +02009551 power_domain_mask |= BIT_ULL(power_domain);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +00009552 if (INTEL_GEN(dev_priv) >= 9)
Jesse Barnesbd2e2442014-11-13 17:51:47 +00009553 skylake_get_pfit_config(crtc, pipe_config);
Jesse Barnesff6d9f52015-01-21 17:19:54 -08009554 else
Rodrigo Vivi1c132b42015-09-02 15:19:26 -07009555 ironlake_get_pfit_config(crtc, pipe_config);
Jesse Barnesbd2e2442014-11-13 17:51:47 +00009556 }
Daniel Vetter88adfff2013-03-28 10:42:01 +01009557
Maarten Lankhorst24f28452017-11-22 19:39:01 +01009558 if (hsw_crtc_supports_ips(crtc)) {
9559 if (IS_HASWELL(dev_priv))
9560 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9561 else {
9562 /*
9563 * We cannot readout IPS state on broadwell, set to
9564 * true so we can set it to a defined state on first
9565 * commit.
9566 */
9567 pipe_config->ips_enabled = true;
9568 }
9569 }
9570
Jani Nikula4d1de972016-03-18 17:05:42 +02009571 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9572 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
Clint Taylorebb69c92014-09-30 10:30:22 -07009573 pipe_config->pixel_multiplier =
9574 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9575 } else {
9576 pipe_config->pixel_multiplier = 1;
9577 }
Daniel Vetter6c49f242013-06-06 12:45:25 +02009578
Imre Deak17290502016-02-12 18:55:11 +02009579out:
9580 for_each_power_domain(power_domain, power_domain_mask)
9581 intel_display_power_put(dev_priv, power_domain);
9582
Jani Nikulacf304292016-03-18 17:05:41 +02009583 return active;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +01009584}
9585
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009586static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009587{
9588 struct drm_i915_private *dev_priv =
9589 to_i915(plane_state->base.plane->dev);
9590 const struct drm_framebuffer *fb = plane_state->base.fb;
9591 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9592 u32 base;
9593
9594 if (INTEL_INFO(dev_priv)->cursor_needs_physical)
9595 base = obj->phys_handle->busaddr;
9596 else
9597 base = intel_plane_ggtt_offset(plane_state);
9598
Ville Syrjäläc11ada02018-09-07 18:24:04 +03009599 base += plane_state->color_plane[0].offset;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009600
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009601 /* ILK+ do this automagically */
9602 if (HAS_GMCH_DISPLAY(dev_priv) &&
Dave Airliea82256b2017-05-30 15:25:28 +10009603 plane_state->base.rotation & DRM_MODE_ROTATE_180)
Ville Syrjälä1cecc832017-03-27 21:55:34 +03009604 base += (plane_state->base.crtc_h *
9605 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9606
9607 return base;
9608}
9609
Ville Syrjäläed270222017-03-27 21:55:36 +03009610static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9611{
9612 int x = plane_state->base.crtc_x;
9613 int y = plane_state->base.crtc_y;
9614 u32 pos = 0;
9615
9616 if (x < 0) {
9617 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9618 x = -x;
9619 }
9620 pos |= x << CURSOR_X_SHIFT;
9621
9622 if (y < 0) {
9623 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9624 y = -y;
9625 }
9626 pos |= y << CURSOR_Y_SHIFT;
9627
9628 return pos;
9629}
9630
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009631static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9632{
9633 const struct drm_mode_config *config =
9634 &plane_state->base.plane->dev->mode_config;
9635 int width = plane_state->base.crtc_w;
9636 int height = plane_state->base.crtc_h;
9637
9638 return width > 0 && width <= config->cursor_width &&
9639 height > 0 && height <= config->cursor_height;
9640}
9641
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009642static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009643{
9644 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009645 unsigned int rotation = plane_state->base.rotation;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009646 int src_x, src_y;
9647 u32 offset;
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03009648 int ret;
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009649
9650 intel_fill_fb_ggtt_view(&plane_state->view, fb, rotation);
9651 plane_state->color_plane[0].stride = intel_fb_pitch(fb, 0, rotation);
9652
Ville Syrjäläfc3fed52018-09-18 17:02:43 +03009653 ret = intel_plane_check_stride(plane_state);
9654 if (ret)
9655 return ret;
9656
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009657 src_x = plane_state->base.src_x >> 16;
9658 src_y = plane_state->base.src_y >> 16;
9659
9660 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9661 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
9662 plane_state, 0);
9663
9664 if (src_x != 0 || src_y != 0) {
9665 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9666 return -EINVAL;
9667 }
9668
9669 plane_state->color_plane[0].offset = offset;
9670
9671 return 0;
9672}
9673
9674static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9675 struct intel_plane_state *plane_state)
9676{
9677 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009678 int ret;
9679
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009680 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9681 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9682 return -EINVAL;
9683 }
9684
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +02009685 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9686 &crtc_state->base,
Ville Syrjäläa01cb8b2017-11-01 22:16:19 +02009687 DRM_PLANE_HELPER_NO_SCALING,
9688 DRM_PLANE_HELPER_NO_SCALING,
9689 true, true);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009690 if (ret)
9691 return ret;
9692
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009693 if (!plane_state->base.visible)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009694 return 0;
9695
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +03009696 ret = intel_plane_check_src_coordinates(plane_state);
9697 if (ret)
9698 return ret;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009699
Ville Syrjäläfce8d232018-09-07 18:24:13 +03009700 ret = intel_cursor_check_surface(plane_state);
9701 if (ret)
9702 return ret;
Ville Syrjälä1e7b4fd2017-03-27 21:55:44 +03009703
Ville Syrjälä659056f2017-03-27 21:55:39 +03009704 return 0;
9705}
9706
Ville Syrjäläddd57132018-09-07 18:24:02 +03009707static unsigned int
9708i845_cursor_max_stride(struct intel_plane *plane,
9709 u32 pixel_format, u64 modifier,
9710 unsigned int rotation)
9711{
9712 return 2048;
9713}
9714
Ville Syrjälä292889e2017-03-17 23:18:01 +02009715static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9716 const struct intel_plane_state *plane_state)
9717{
Ville Syrjälä292889e2017-03-17 23:18:01 +02009718 return CURSOR_ENABLE |
9719 CURSOR_GAMMA_ENABLE |
9720 CURSOR_FORMAT_ARGB |
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009721 CURSOR_STRIDE(plane_state->color_plane[0].stride);
Ville Syrjälä292889e2017-03-17 23:18:01 +02009722}
9723
Ville Syrjälä659056f2017-03-27 21:55:39 +03009724static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9725{
Ville Syrjälä659056f2017-03-27 21:55:39 +03009726 int width = plane_state->base.crtc_w;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009727
9728 /*
9729 * 845g/865g are only limited by the width of their cursors,
9730 * the height is arbitrary up to the precision of the register.
9731 */
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009732 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009733}
9734
Ville Syrjäläeb0f5042018-08-28 17:27:06 +03009735static int i845_check_cursor(struct intel_crtc_state *crtc_state,
Ville Syrjälä659056f2017-03-27 21:55:39 +03009736 struct intel_plane_state *plane_state)
9737{
9738 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009739 int ret;
9740
9741 ret = intel_check_cursor(crtc_state, plane_state);
9742 if (ret)
9743 return ret;
9744
9745 /* if we want to turn off the cursor ignore width and height */
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009746 if (!fb)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009747 return 0;
9748
9749 /* Check for which cursor types we support */
9750 if (!i845_cursor_size_ok(plane_state)) {
9751 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9752 plane_state->base.crtc_w,
9753 plane_state->base.crtc_h);
9754 return -EINVAL;
9755 }
9756
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009757 WARN_ON(plane_state->base.visible &&
9758 plane_state->color_plane[0].stride != fb->pitches[0]);
9759
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009760 switch (fb->pitches[0]) {
Chris Wilson560b85b2010-08-07 11:01:38 +01009761 case 256:
9762 case 512:
9763 case 1024:
9764 case 2048:
Ville Syrjälädc41c152014-08-13 11:57:05 +03009765 break;
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009766 default:
9767 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9768 fb->pitches[0]);
9769 return -EINVAL;
Chris Wilson560b85b2010-08-07 11:01:38 +01009770 }
Maarten Lankhorst55a08b3f2016-01-07 11:54:10 +01009771
Ville Syrjälä659056f2017-03-27 21:55:39 +03009772 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9773
9774 return 0;
Jesse Barnes79e53942008-11-07 14:24:08 -08009775}
9776
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009777static void i845_update_cursor(struct intel_plane *plane,
9778 const struct intel_crtc_state *crtc_state,
Chris Wilson560b85b2010-08-07 11:01:38 +01009779 const struct intel_plane_state *plane_state)
9780{
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009781 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009782 u32 cntl = 0, base = 0, pos = 0, size = 0;
9783 unsigned long irqflags;
Chris Wilson560b85b2010-08-07 11:01:38 +01009784
Ville Syrjälä936e71e2016-07-26 19:06:59 +03009785 if (plane_state && plane_state->base.visible) {
Maarten Lankhorst55a08b3f2016-01-07 11:54:10 +01009786 unsigned int width = plane_state->base.crtc_w;
9787 unsigned int height = plane_state->base.crtc_h;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009788
Ville Syrjäläa0864d52017-03-23 21:27:09 +02009789 cntl = plane_state->ctl;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009790 size = (height << 12) | width;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009791
9792 base = intel_cursor_base(plane_state);
9793 pos = intel_cursor_position(plane_state);
Chris Wilson4b0e3332014-05-30 16:35:26 +03009794 }
Chris Wilson560b85b2010-08-07 11:01:38 +01009795
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009796 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9797
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009798 /* On these chipsets we can only modify the base/size/stride
9799 * whilst the cursor is disabled.
9800 */
9801 if (plane->cursor.base != base ||
9802 plane->cursor.size != size ||
9803 plane->cursor.cntl != cntl) {
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009804 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009805 I915_WRITE_FW(CURBASE(PIPE_A), base);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009806 I915_WRITE_FW(CURSIZE, size);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009807 I915_WRITE_FW(CURPOS(PIPE_A), pos);
Ville Syrjälädd584fc2017-03-09 17:44:33 +02009808 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
Ville Syrjälä75343a42017-03-27 21:55:38 +03009809
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +03009810 plane->cursor.base = base;
9811 plane->cursor.size = size;
9812 plane->cursor.cntl = cntl;
9813 } else {
9814 I915_WRITE_FW(CURPOS(PIPE_A), pos);
Ville Syrjälädc41c152014-08-13 11:57:05 +03009815 }
9816
Ville Syrjälä75343a42017-03-27 21:55:38 +03009817 POSTING_READ_FW(CURCNTR(PIPE_A));
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009818
9819 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9820}
9821
9822static void i845_disable_cursor(struct intel_plane *plane,
9823 struct intel_crtc *crtc)
9824{
9825 i845_update_cursor(plane, NULL, NULL);
Chris Wilson560b85b2010-08-07 11:01:38 +01009826}
9827
Ville Syrjäläeade6c82018-01-30 22:38:03 +02009828static bool i845_cursor_get_hw_state(struct intel_plane *plane,
9829 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02009830{
9831 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9832 enum intel_display_power_domain power_domain;
9833 bool ret;
9834
9835 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9836 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9837 return false;
9838
9839 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9840
Ville Syrjäläeade6c82018-01-30 22:38:03 +02009841 *pipe = PIPE_A;
9842
Ville Syrjälä51f5a0962017-11-17 21:19:08 +02009843 intel_display_power_put(dev_priv, power_domain);
9844
9845 return ret;
9846}
9847
Ville Syrjäläddd57132018-09-07 18:24:02 +03009848static unsigned int
9849i9xx_cursor_max_stride(struct intel_plane *plane,
9850 u32 pixel_format, u64 modifier,
9851 unsigned int rotation)
9852{
9853 return plane->base.dev->mode_config.cursor_width * 4;
9854}
9855
Ville Syrjälä292889e2017-03-17 23:18:01 +02009856static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9857 const struct intel_plane_state *plane_state)
9858{
9859 struct drm_i915_private *dev_priv =
9860 to_i915(plane_state->base.plane->dev);
9861 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
José Roberto de Souzac894d632018-05-18 13:15:47 -07009862 u32 cntl = 0;
Ville Syrjälä292889e2017-03-17 23:18:01 +02009863
Ville Syrjäläe876b782018-01-30 22:38:05 +02009864 if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
9865 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
9866
José Roberto de Souzac894d632018-05-18 13:15:47 -07009867 if (INTEL_GEN(dev_priv) <= 10) {
9868 cntl |= MCURSOR_GAMMA_ENABLE;
Ville Syrjälä292889e2017-03-17 23:18:01 +02009869
José Roberto de Souzac894d632018-05-18 13:15:47 -07009870 if (HAS_DDI(dev_priv))
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +02009871 cntl |= MCURSOR_PIPE_CSC_ENABLE;
José Roberto de Souzac894d632018-05-18 13:15:47 -07009872 }
Ville Syrjälä292889e2017-03-17 23:18:01 +02009873
Ville Syrjälä32ea06b2018-01-30 22:38:01 +02009874 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
9875 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
Ville Syrjälä292889e2017-03-17 23:18:01 +02009876
9877 switch (plane_state->base.crtc_w) {
9878 case 64:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +02009879 cntl |= MCURSOR_MODE_64_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +02009880 break;
9881 case 128:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +02009882 cntl |= MCURSOR_MODE_128_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +02009883 break;
9884 case 256:
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +02009885 cntl |= MCURSOR_MODE_256_ARGB_AX;
Ville Syrjälä292889e2017-03-17 23:18:01 +02009886 break;
9887 default:
9888 MISSING_CASE(plane_state->base.crtc_w);
9889 return 0;
9890 }
9891
Robert Fossc2c446a2017-05-19 16:50:17 -04009892 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +02009893 cntl |= MCURSOR_ROTATE_180;
Ville Syrjälä292889e2017-03-17 23:18:01 +02009894
9895 return cntl;
9896}
9897
Ville Syrjälä659056f2017-03-27 21:55:39 +03009898static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
Chris Wilson560b85b2010-08-07 11:01:38 +01009899{
Ville Syrjälä024faac2017-03-27 21:55:42 +03009900 struct drm_i915_private *dev_priv =
9901 to_i915(plane_state->base.plane->dev);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009902 int width = plane_state->base.crtc_w;
9903 int height = plane_state->base.crtc_h;
Chris Wilson560b85b2010-08-07 11:01:38 +01009904
Ville Syrjälä3637ecf2017-03-27 21:55:40 +03009905 if (!intel_cursor_size_ok(plane_state))
Ville Syrjälädc41c152014-08-13 11:57:05 +03009906 return false;
9907
Ville Syrjälä024faac2017-03-27 21:55:42 +03009908 /* Cursor width is limited to a few power-of-two sizes */
9909 switch (width) {
Ville Syrjälä659056f2017-03-27 21:55:39 +03009910 case 256:
9911 case 128:
Ville Syrjälä659056f2017-03-27 21:55:39 +03009912 case 64:
9913 break;
9914 default:
9915 return false;
9916 }
9917
Ville Syrjälädc41c152014-08-13 11:57:05 +03009918 /*
Ville Syrjälä024faac2017-03-27 21:55:42 +03009919 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
9920 * height from 8 lines up to the cursor width, when the
9921 * cursor is not rotated. Everything else requires square
9922 * cursors.
Ville Syrjälädc41c152014-08-13 11:57:05 +03009923 */
Ville Syrjälä024faac2017-03-27 21:55:42 +03009924 if (HAS_CUR_FBC(dev_priv) &&
Dave Airliea82256b2017-05-30 15:25:28 +10009925 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
Ville Syrjälä024faac2017-03-27 21:55:42 +03009926 if (height < 8 || height > width)
Ville Syrjälädc41c152014-08-13 11:57:05 +03009927 return false;
9928 } else {
Ville Syrjälä024faac2017-03-27 21:55:42 +03009929 if (height != width)
Ville Syrjälädc41c152014-08-13 11:57:05 +03009930 return false;
Ville Syrjälädc41c152014-08-13 11:57:05 +03009931 }
9932
9933 return true;
9934}
9935
Ville Syrjäläeb0f5042018-08-28 17:27:06 +03009936static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
Ville Syrjälä659056f2017-03-27 21:55:39 +03009937 struct intel_plane_state *plane_state)
9938{
Ville Syrjäläeb0f5042018-08-28 17:27:06 +03009939 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
Ville Syrjälä659056f2017-03-27 21:55:39 +03009940 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9941 const struct drm_framebuffer *fb = plane_state->base.fb;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009942 enum pipe pipe = plane->pipe;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009943 int ret;
9944
9945 ret = intel_check_cursor(crtc_state, plane_state);
9946 if (ret)
9947 return ret;
9948
9949 /* if we want to turn off the cursor ignore width and height */
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009950 if (!fb)
Ville Syrjälä659056f2017-03-27 21:55:39 +03009951 return 0;
9952
9953 /* Check for which cursor types we support */
9954 if (!i9xx_cursor_size_ok(plane_state)) {
9955 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9956 plane_state->base.crtc_w,
9957 plane_state->base.crtc_h);
9958 return -EINVAL;
9959 }
9960
Ville Syrjälädf79cf42018-09-11 18:01:39 +03009961 WARN_ON(plane_state->base.visible &&
9962 plane_state->color_plane[0].stride != fb->pitches[0]);
9963
Ville Syrjälä1e1bb872017-03-27 21:55:41 +03009964 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
9965 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
9966 fb->pitches[0], plane_state->base.crtc_w);
9967 return -EINVAL;
Ville Syrjälä659056f2017-03-27 21:55:39 +03009968 }
9969
9970 /*
9971 * There's something wrong with the cursor on CHV pipe C.
9972 * If it straddles the left edge of the screen then
9973 * moving it away from the edge or disabling it often
9974 * results in a pipe underrun, and often that can lead to
9975 * dead pipe (constant underrun reported, and it scans
9976 * out just a solid color). To recover from that, the
9977 * display power well must be turned off and on again.
9978 * Refuse the put the cursor into that compromised position.
9979 */
9980 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
9981 plane_state->base.visible && plane_state->base.crtc_x < 0) {
9982 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
9983 return -EINVAL;
9984 }
9985
9986 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
9987
9988 return 0;
9989}
9990
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009991static void i9xx_update_cursor(struct intel_plane *plane,
9992 const struct intel_crtc_state *crtc_state,
Sagar Kamble4726e0b2014-03-10 17:06:23 +05309993 const struct intel_plane_state *plane_state)
9994{
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +03009995 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9996 enum pipe pipe = plane->pipe;
Ville Syrjälä024faac2017-03-27 21:55:42 +03009997 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +03009998 unsigned long irqflags;
Sagar Kamble4726e0b2014-03-10 17:06:23 +05309999
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010000 if (plane_state && plane_state->base.visible) {
Ville Syrjäläa0864d52017-03-23 21:27:09 +020010001 cntl = plane_state->ctl;
Chris Wilson4b0e3332014-05-30 16:35:26 +030010002
Ville Syrjälä024faac2017-03-27 21:55:42 +030010003 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10004 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10005
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010006 base = intel_cursor_base(plane_state);
10007 pos = intel_cursor_position(plane_state);
10008 }
10009
10010 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10011
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010012 /*
10013 * On some platforms writing CURCNTR first will also
10014 * cause CURPOS to be armed by the CURBASE write.
10015 * Without the CURCNTR write the CURPOS write would
Ville Syrjälä8753d2b2017-07-14 18:52:27 +030010016 * arm itself. Thus we always start the full update
10017 * with a CURCNTR write.
10018 *
10019 * On other platforms CURPOS always requires the
10020 * CURBASE write to arm the update. Additonally
10021 * a write to any of the cursor register will cancel
10022 * an already armed cursor update. Thus leaving out
10023 * the CURBASE write after CURPOS could lead to a
10024 * cursor that doesn't appear to move, or even change
10025 * shape. Thus we always write CURBASE.
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010026 *
10027 * CURCNTR and CUR_FBC_CTL are always
10028 * armed by the CURBASE write only.
10029 */
10030 if (plane->cursor.base != base ||
Ville Syrjälä024faac2017-03-27 21:55:42 +030010031 plane->cursor.size != fbc_ctl ||
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010032 plane->cursor.cntl != cntl) {
10033 I915_WRITE_FW(CURCNTR(pipe), cntl);
10034 if (HAS_CUR_FBC(dev_priv))
10035 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10036 I915_WRITE_FW(CURPOS(pipe), pos);
Ville Syrjälä75343a42017-03-27 21:55:38 +030010037 I915_WRITE_FW(CURBASE(pipe), base);
10038
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010039 plane->cursor.base = base;
10040 plane->cursor.size = fbc_ctl;
10041 plane->cursor.cntl = cntl;
10042 } else {
10043 I915_WRITE_FW(CURPOS(pipe), pos);
Ville Syrjälä8753d2b2017-07-14 18:52:27 +030010044 I915_WRITE_FW(CURBASE(pipe), base);
Ville Syrjäläe11ffdd2017-03-27 21:55:46 +030010045 }
10046
Sagar Kamble4726e0b2014-03-10 17:06:23 +053010047 POSTING_READ_FW(CURBASE(pipe));
10048
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010049 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
Jesse Barnes65a21cd2011-10-12 11:10:21 -070010050}
Ville Syrjälä5efb3e22014-04-09 13:28:53 +030010051
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010052static void i9xx_disable_cursor(struct intel_plane *plane,
10053 struct intel_crtc *crtc)
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010054{
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030010055 i9xx_update_cursor(plane, NULL, NULL);
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010056}
Ville Syrjäläd6e4db12013-09-04 18:25:31 +030010057
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010058static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10059 enum pipe *pipe)
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010060{
10061 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10062 enum intel_display_power_domain power_domain;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010063 bool ret;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010064 u32 val;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010065
10066 /*
10067 * Not 100% correct for planes that can move between pipes,
10068 * but that's only the case for gen2-3 which don't have any
10069 * display power wells.
10070 */
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010071 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010072 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10073 return false;
10074
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010075 val = I915_READ(CURCNTR(plane->pipe));
10076
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020010077 ret = val & MCURSOR_MODE;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020010078
10079 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10080 *pipe = plane->pipe;
10081 else
10082 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10083 MCURSOR_PIPE_SELECT_SHIFT;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020010084
10085 intel_display_power_put(dev_priv, power_domain);
10086
10087 return ret;
10088}
Chris Wilsoncda4b7d2010-07-09 08:45:04 +010010089
Jesse Barnes79e53942008-11-07 14:24:08 -080010090/* VESA 640x480x72Hz mode to set on the pipe */
Ville Syrjäläbacdcd52017-05-18 22:38:37 +030010091static const struct drm_display_mode load_detect_mode = {
Jesse Barnes79e53942008-11-07 14:24:08 -080010092 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10093 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10094};
10095
Daniel Vettera8bb6812014-02-10 18:00:39 +010010096struct drm_framebuffer *
Chris Wilson24dbf512017-02-15 10:59:18 +000010097intel_framebuffer_create(struct drm_i915_gem_object *obj,
10098 struct drm_mode_fb_cmd2 *mode_cmd)
Chris Wilsond2dff872011-04-19 08:36:26 +010010099{
10100 struct intel_framebuffer *intel_fb;
10101 int ret;
10102
10103 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020010104 if (!intel_fb)
Chris Wilsond2dff872011-04-19 08:36:26 +010010105 return ERR_PTR(-ENOMEM);
Chris Wilsond2dff872011-04-19 08:36:26 +010010106
Chris Wilson24dbf512017-02-15 10:59:18 +000010107 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010108 if (ret)
10109 goto err;
Chris Wilsond2dff872011-04-19 08:36:26 +010010110
10111 return &intel_fb->base;
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010112
Lukas Wunnerdcb13942015-07-04 11:50:58 +020010113err:
10114 kfree(intel_fb);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020010115 return ERR_PTR(ret);
Chris Wilsond2dff872011-04-19 08:36:26 +010010116}
10117
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010118static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10119 struct drm_crtc *crtc)
Chris Wilsond2dff872011-04-19 08:36:26 +010010120{
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010121 struct drm_plane *plane;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010122 struct drm_plane_state *plane_state;
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010123 int ret, i;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010124
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010125 ret = drm_atomic_add_affected_planes(state, crtc);
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010126 if (ret)
10127 return ret;
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010128
10129 for_each_new_plane_in_state(state, plane, plane_state, i) {
10130 if (plane_state->crtc != crtc)
10131 continue;
10132
10133 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10134 if (ret)
10135 return ret;
10136
10137 drm_atomic_set_fb_for_plane(plane_state, NULL);
10138 }
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010139
10140 return 0;
10141}
10142
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010143int intel_get_load_detect_pipe(struct drm_connector *connector,
Ville Syrjäläbacdcd52017-05-18 22:38:37 +030010144 const struct drm_display_mode *mode,
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010145 struct intel_load_detect_pipe *old,
10146 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnes79e53942008-11-07 14:24:08 -080010147{
10148 struct intel_crtc *intel_crtc;
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010149 struct intel_encoder *intel_encoder =
10150 intel_attached_encoder(connector);
Jesse Barnes79e53942008-11-07 14:24:08 -080010151 struct drm_crtc *possible_crtc;
Chris Wilson4ef69c72010-09-09 15:14:28 +010010152 struct drm_encoder *encoder = &intel_encoder->base;
Jesse Barnes79e53942008-11-07 14:24:08 -080010153 struct drm_crtc *crtc = NULL;
10154 struct drm_device *dev = encoder->dev;
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020010155 struct drm_i915_private *dev_priv = to_i915(dev);
Rob Clark51fd3712013-11-19 12:10:12 -050010156 struct drm_mode_config *config = &dev->mode_config;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010157 struct drm_atomic_state *state = NULL, *restore_state = NULL;
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010158 struct drm_connector_state *connector_state;
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010159 struct intel_crtc_state *crtc_state;
Rob Clark51fd3712013-11-19 12:10:12 -050010160 int ret, i = -1;
Jesse Barnes79e53942008-11-07 14:24:08 -080010161
Chris Wilsond2dff872011-04-19 08:36:26 +010010162 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +030010163 connector->base.id, connector->name,
Jani Nikula8e329a032014-06-03 14:56:21 +030010164 encoder->base.id, encoder->name);
Chris Wilsond2dff872011-04-19 08:36:26 +010010165
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010166 old->restore_state = NULL;
10167
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010168 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
Daniel Vetter6e9f7982014-05-29 23:54:47 +020010169
Jesse Barnes79e53942008-11-07 14:24:08 -080010170 /*
10171 * Algorithm gets a little messy:
Chris Wilson7a5e4802011-04-19 23:21:12 +010010172 *
Jesse Barnes79e53942008-11-07 14:24:08 -080010173 * - if the connector already has an assigned crtc, use it (but make
10174 * sure it's on first)
Chris Wilson7a5e4802011-04-19 23:21:12 +010010175 *
Jesse Barnes79e53942008-11-07 14:24:08 -080010176 * - try to find the first unused crtc that can drive this connector,
10177 * and use that if we find one
Jesse Barnes79e53942008-11-07 14:24:08 -080010178 */
10179
10180 /* See if we already have a CRTC for this connector */
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010181 if (connector->state->crtc) {
10182 crtc = connector->state->crtc;
Chris Wilson8261b192011-04-19 23:18:09 +010010183
Rob Clark51fd3712013-11-19 12:10:12 -050010184 ret = drm_modeset_lock(&crtc->mutex, ctx);
10185 if (ret)
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010186 goto fail;
Chris Wilson8261b192011-04-19 23:18:09 +010010187
10188 /* Make sure the crtc and connector are running */
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010189 goto found;
Jesse Barnes79e53942008-11-07 14:24:08 -080010190 }
10191
10192 /* Find an unused one (if possible) */
Damien Lespiau70e1e0e2014-05-13 23:32:24 +010010193 for_each_crtc(dev, possible_crtc) {
Jesse Barnes79e53942008-11-07 14:24:08 -080010194 i++;
10195 if (!(encoder->possible_crtcs & (1 << i)))
10196 continue;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010197
10198 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10199 if (ret)
10200 goto fail;
10201
10202 if (possible_crtc->state->enable) {
10203 drm_modeset_unlock(&possible_crtc->mutex);
Ville Syrjäläa4592492014-08-11 13:15:36 +030010204 continue;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010205 }
Ville Syrjäläa4592492014-08-11 13:15:36 +030010206
10207 crtc = possible_crtc;
10208 break;
Jesse Barnes79e53942008-11-07 14:24:08 -080010209 }
10210
10211 /*
10212 * If we didn't find an unused CRTC, don't use any.
10213 */
10214 if (!crtc) {
Chris Wilson71731882011-04-19 23:10:58 +010010215 DRM_DEBUG_KMS("no pipe available for load-detect\n");
Dan Carpenterf4bf77b2017-04-14 22:54:25 +030010216 ret = -ENODEV;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010217 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080010218 }
10219
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010220found:
10221 intel_crtc = to_intel_crtc(crtc);
10222
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010223 state = drm_atomic_state_alloc(dev);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010224 restore_state = drm_atomic_state_alloc(dev);
10225 if (!state || !restore_state) {
10226 ret = -ENOMEM;
10227 goto fail;
10228 }
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010229
10230 state->acquire_ctx = ctx;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010231 restore_state->acquire_ctx = ctx;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010232
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010233 connector_state = drm_atomic_get_connector_state(state, connector);
10234 if (IS_ERR(connector_state)) {
10235 ret = PTR_ERR(connector_state);
10236 goto fail;
10237 }
10238
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010239 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10240 if (ret)
10241 goto fail;
Ander Conselvan de Oliveira944b0c72015-03-20 16:18:07 +020010242
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010243 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10244 if (IS_ERR(crtc_state)) {
10245 ret = PTR_ERR(crtc_state);
10246 goto fail;
10247 }
10248
Maarten Lankhorst49d6fa22015-05-11 10:45:15 +020010249 crtc_state->base.active = crtc_state->base.enable = true;
Ander Conselvan de Oliveira4be07312015-04-21 17:13:01 +030010250
Chris Wilson64927112011-04-20 07:25:26 +010010251 if (!mode)
10252 mode = &load_detect_mode;
Jesse Barnes79e53942008-11-07 14:24:08 -080010253
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010254 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010255 if (ret)
10256 goto fail;
10257
Ville Syrjälä20bdc112017-12-20 10:35:45 +010010258 ret = intel_modeset_disable_planes(state, crtc);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010259 if (ret)
10260 goto fail;
10261
10262 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10263 if (!ret)
10264 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
Ville Syrjäläbe90cc32018-03-22 17:23:12 +020010265 if (!ret)
10266 ret = drm_atomic_add_affected_planes(restore_state, crtc);
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010267 if (ret) {
10268 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10269 goto fail;
10270 }
Ander Conselvan de Oliveira8c7b5cc2015-04-21 17:13:19 +030010271
Maarten Lankhorst3ba86072016-02-29 09:18:57 +010010272 ret = drm_atomic_commit(state);
10273 if (ret) {
Chris Wilson64927112011-04-20 07:25:26 +010010274 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010275 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080010276 }
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010277
10278 old->restore_state = restore_state;
Chris Wilson7abbd112017-01-19 11:37:49 +000010279 drm_atomic_state_put(state);
Chris Wilson71731882011-04-19 23:10:58 +010010280
Jesse Barnes79e53942008-11-07 14:24:08 -080010281 /* let the connector get through one full cycle before testing */
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020010282 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
Chris Wilson71731882011-04-19 23:10:58 +010010283 return true;
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010284
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020010285fail:
Chris Wilson7fb71c82016-10-19 12:37:43 +010010286 if (state) {
10287 drm_atomic_state_put(state);
10288 state = NULL;
10289 }
10290 if (restore_state) {
10291 drm_atomic_state_put(restore_state);
10292 restore_state = NULL;
10293 }
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020010294
Maarten Lankhorst6c5ed5a2017-04-06 20:55:20 +020010295 if (ret == -EDEADLK)
10296 return ret;
Rob Clark51fd3712013-11-19 12:10:12 -050010297
Ville Syrjälä412b61d2014-01-17 15:59:39 +020010298 return false;
Jesse Barnes79e53942008-11-07 14:24:08 -080010299}
10300
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010301void intel_release_load_detect_pipe(struct drm_connector *connector,
Ander Conselvan de Oliveira49172fe2015-03-20 16:18:02 +020010302 struct intel_load_detect_pipe *old,
10303 struct drm_modeset_acquire_ctx *ctx)
Jesse Barnes79e53942008-11-07 14:24:08 -080010304{
Daniel Vetterd2434ab2012-08-12 21:20:10 +020010305 struct intel_encoder *intel_encoder =
10306 intel_attached_encoder(connector);
Chris Wilson4ef69c72010-09-09 15:14:28 +010010307 struct drm_encoder *encoder = &intel_encoder->base;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010308 struct drm_atomic_state *state = old->restore_state;
Ander Conselvan de Oliveirad3a40d12015-04-21 17:13:09 +030010309 int ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080010310
Chris Wilsond2dff872011-04-19 08:36:26 +010010311 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
Jani Nikulac23cc412014-06-03 14:56:17 +030010312 connector->base.id, connector->name,
Jani Nikula8e329a032014-06-03 14:56:21 +030010313 encoder->base.id, encoder->name);
Chris Wilsond2dff872011-04-19 08:36:26 +010010314
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010315 if (!state)
Chris Wilson0622a532011-04-21 09:32:11 +010010316 return;
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010317
Maarten Lankhorst581e49f2017-01-16 10:37:38 +010010318 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
Chris Wilson08536952016-10-14 13:18:18 +010010319 if (ret)
Maarten Lankhorstedde3612016-02-17 09:18:35 +010010320 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
Chris Wilson08536952016-10-14 13:18:18 +010010321 drm_atomic_state_put(state);
Jesse Barnes79e53942008-11-07 14:24:08 -080010322}
10323
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010324static int i9xx_pll_refclk(struct drm_device *dev,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010325 const struct intel_crtc_state *pipe_config)
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010326{
Chris Wilsonfac5e232016-07-04 11:34:36 +010010327 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010328 u32 dpll = pipe_config->dpll_hw_state.dpll;
10329
10330 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
Ville Syrjäläe91e9412013-12-09 18:54:16 +020010331 return dev_priv->vbt.lvds_ssc_freq;
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010010332 else if (HAS_PCH_SPLIT(dev_priv))
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010333 return 120000;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010010334 else if (!IS_GEN2(dev_priv))
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010335 return 96000;
10336 else
10337 return 48000;
10338}
10339
Jesse Barnes79e53942008-11-07 14:24:08 -080010340/* Returns the clock of the currently programmed mode of the given pipe. */
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010341static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010342 struct intel_crtc_state *pipe_config)
Jesse Barnes79e53942008-11-07 14:24:08 -080010343{
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010344 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010010345 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010346 int pipe = pipe_config->cpu_transcoder;
Ville Syrjälä293623f2013-09-13 16:18:46 +030010347 u32 dpll = pipe_config->dpll_hw_state.dpll;
Jesse Barnes79e53942008-11-07 14:24:08 -080010348 u32 fp;
Ander Conselvan de Oliveira9e2c8472016-05-04 12:11:57 +030010349 struct dpll clock;
Imre Deakdccbea32015-06-22 23:35:51 +030010350 int port_clock;
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010351 int refclk = i9xx_pll_refclk(dev, pipe_config);
Jesse Barnes79e53942008-11-07 14:24:08 -080010352
10353 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
Ville Syrjälä293623f2013-09-13 16:18:46 +030010354 fp = pipe_config->dpll_hw_state.fp0;
Jesse Barnes79e53942008-11-07 14:24:08 -080010355 else
Ville Syrjälä293623f2013-09-13 16:18:46 +030010356 fp = pipe_config->dpll_hw_state.fp1;
Jesse Barnes79e53942008-11-07 14:24:08 -080010357
10358 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010359 if (IS_PINEVIEW(dev_priv)) {
Adam Jacksonf2b115e2009-12-03 17:14:42 -050010360 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10361 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
Shaohua Li21778322009-02-23 15:19:16 +080010362 } else {
10363 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10364 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10365 }
10366
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010010367 if (!IS_GEN2(dev_priv)) {
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010368 if (IS_PINEVIEW(dev_priv))
Adam Jacksonf2b115e2009-12-03 17:14:42 -050010369 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10370 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
Shaohua Li21778322009-02-23 15:19:16 +080010371 else
10372 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
Jesse Barnes79e53942008-11-07 14:24:08 -080010373 DPLL_FPA01_P1_POST_DIV_SHIFT);
10374
10375 switch (dpll & DPLL_MODE_MASK) {
10376 case DPLLB_MODE_DAC_SERIAL:
10377 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10378 5 : 10;
10379 break;
10380 case DPLLB_MODE_LVDS:
10381 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10382 7 : 14;
10383 break;
10384 default:
Zhao Yakui28c97732009-10-09 11:39:41 +080010385 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
Jesse Barnes79e53942008-11-07 14:24:08 -080010386 "mode\n", (int)(dpll & DPLL_MODE_MASK));
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010387 return;
Jesse Barnes79e53942008-11-07 14:24:08 -080010388 }
10389
Ville Syrjälä9b1e14f2016-10-31 22:37:15 +020010390 if (IS_PINEVIEW(dev_priv))
Imre Deakdccbea32015-06-22 23:35:51 +030010391 port_clock = pnv_calc_dpll_params(refclk, &clock);
Daniel Vetterac58c3f2013-06-01 17:16:17 +020010392 else
Imre Deakdccbea32015-06-22 23:35:51 +030010393 port_clock = i9xx_calc_dpll_params(refclk, &clock);
Jesse Barnes79e53942008-11-07 14:24:08 -080010394 } else {
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +010010395 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
Ville Syrjäläb1c560d2013-12-09 18:54:13 +020010396 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
Jesse Barnes79e53942008-11-07 14:24:08 -080010397
10398 if (is_lvds) {
10399 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10400 DPLL_FPA01_P1_POST_DIV_SHIFT);
Ville Syrjäläb1c560d2013-12-09 18:54:13 +020010401
10402 if (lvds & LVDS_CLKB_POWER_UP)
10403 clock.p2 = 7;
10404 else
10405 clock.p2 = 14;
Jesse Barnes79e53942008-11-07 14:24:08 -080010406 } else {
10407 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10408 clock.p1 = 2;
10409 else {
10410 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10411 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10412 }
10413 if (dpll & PLL_P2_DIVIDE_BY_4)
10414 clock.p2 = 4;
10415 else
10416 clock.p2 = 2;
Jesse Barnes79e53942008-11-07 14:24:08 -080010417 }
Ville Syrjäläda4a1ef2013-09-09 14:06:37 +030010418
Imre Deakdccbea32015-06-22 23:35:51 +030010419 port_clock = i9xx_calc_dpll_params(refclk, &clock);
Jesse Barnes79e53942008-11-07 14:24:08 -080010420 }
10421
Ville Syrjälä18442d02013-09-13 16:00:08 +030010422 /*
10423 * This value includes pixel_multiplier. We will use
Damien Lespiau241bfc32013-09-25 16:45:37 +010010424 * port_clock to compute adjusted_mode.crtc_clock in the
Ville Syrjälä18442d02013-09-13 16:00:08 +030010425 * encoder's get_config() function.
10426 */
Imre Deakdccbea32015-06-22 23:35:51 +030010427 pipe_config->port_clock = port_clock;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010428}
10429
Ville Syrjälä6878da02013-09-13 15:59:11 +030010430int intel_dotclock_calculate(int link_freq,
10431 const struct intel_link_m_n *m_n)
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010432{
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010433 /*
10434 * The calculation for the data clock is:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010435 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010436 * But we want to avoid losing precison if possible, so:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010437 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010438 *
10439 * and the link clock is simpler:
Ville Syrjälä1041a022013-09-06 23:28:58 +030010440 * link_clock = (m * link_clock) / n
Jesse Barnes79e53942008-11-07 14:24:08 -080010441 */
10442
Ville Syrjälä6878da02013-09-13 15:59:11 +030010443 if (!m_n->link_n)
10444 return 0;
10445
Chris Wilson31236982017-09-13 11:51:53 +010010446 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
Ville Syrjälä6878da02013-09-13 15:59:11 +030010447}
10448
Ville Syrjälä18442d02013-09-13 16:00:08 +030010449static void ironlake_pch_clock_get(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010450 struct intel_crtc_state *pipe_config)
Ville Syrjälä6878da02013-09-13 15:59:11 +030010451{
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020010452 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä18442d02013-09-13 16:00:08 +030010453
10454 /* read out port_clock from the DPLL */
10455 i9xx_crtc_clock_get(crtc, pipe_config);
Ville Syrjälä6878da02013-09-13 15:59:11 +030010456
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010457 /*
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020010458 * In case there is an active pipe without active ports,
10459 * we may need some idea for the dotclock anyway.
10460 * Calculate one based on the FDI configuration.
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010461 */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020010462 pipe_config->base.adjusted_mode.crtc_clock =
Ville Syrjälä21a727b2016-02-17 21:41:10 +020010463 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
Ville Syrjälä18442d02013-09-13 16:00:08 +030010464 &pipe_config->fdi_m_n);
Jesse Barnes79e53942008-11-07 14:24:08 -080010465}
10466
Ville Syrjäläde330812017-10-09 19:19:50 +030010467/* Returns the currently programmed mode of the given encoder. */
10468struct drm_display_mode *
10469intel_encoder_current_mode(struct intel_encoder *encoder)
Jesse Barnes79e53942008-11-07 14:24:08 -080010470{
Ville Syrjäläde330812017-10-09 19:19:50 +030010471 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10472 struct intel_crtc_state *crtc_state;
Jesse Barnes79e53942008-11-07 14:24:08 -080010473 struct drm_display_mode *mode;
Ville Syrjäläde330812017-10-09 19:19:50 +030010474 struct intel_crtc *crtc;
10475 enum pipe pipe;
10476
10477 if (!encoder->get_hw_state(encoder, &pipe))
10478 return NULL;
10479
10480 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Jesse Barnes79e53942008-11-07 14:24:08 -080010481
10482 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10483 if (!mode)
10484 return NULL;
10485
Ville Syrjäläde330812017-10-09 19:19:50 +030010486 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10487 if (!crtc_state) {
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +000010488 kfree(mode);
10489 return NULL;
10490 }
10491
Ville Syrjäläde330812017-10-09 19:19:50 +030010492 crtc_state->base.crtc = &crtc->base;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030010493
Ville Syrjäläde330812017-10-09 19:19:50 +030010494 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10495 kfree(crtc_state);
10496 kfree(mode);
10497 return NULL;
10498 }
Ville Syrjäläe30a1542016-04-01 18:37:25 +030010499
Ville Syrjäläde330812017-10-09 19:19:50 +030010500 encoder->get_config(encoder, crtc_state);
Ville Syrjäläe30a1542016-04-01 18:37:25 +030010501
Ville Syrjäläde330812017-10-09 19:19:50 +030010502 intel_mode_from_pipe_config(mode, crtc_state);
Jesse Barnes79e53942008-11-07 14:24:08 -080010503
Ville Syrjäläde330812017-10-09 19:19:50 +030010504 kfree(crtc_state);
Tvrtko Ursulin3f36b932016-01-19 15:25:17 +000010505
Jesse Barnes79e53942008-11-07 14:24:08 -080010506 return mode;
10507}
10508
10509static void intel_crtc_destroy(struct drm_crtc *crtc)
10510{
10511 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10512
10513 drm_crtc_cleanup(crtc);
10514 kfree(intel_crtc);
10515}
10516
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010517/**
10518 * intel_wm_need_update - Check whether watermarks need updating
10519 * @plane: drm plane
10520 * @state: new plane state
10521 *
10522 * Check current plane state versus the new one to determine whether
10523 * watermarks need to be recalculated.
10524 *
10525 * Returns true or false.
10526 */
10527static bool intel_wm_need_update(struct drm_plane *plane,
10528 struct drm_plane_state *state)
10529{
Matt Roperd21fbe82015-09-24 15:53:12 -070010530 struct intel_plane_state *new = to_intel_plane_state(state);
10531 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10532
10533 /* Update watermarks on tiling or size changes. */
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010534 if (new->base.visible != cur->base.visible)
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010535 return true;
10536
10537 if (!cur->base.fb || !new->base.fb)
10538 return false;
10539
Ville Syrjäläbae781b2016-11-16 13:33:16 +020010540 if (cur->base.fb->modifier != new->base.fb->modifier ||
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010541 cur->base.rotation != new->base.rotation ||
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010542 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10543 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10544 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10545 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010546 return true;
10547
10548 return false;
10549}
10550
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010551static bool needs_scaling(const struct intel_plane_state *state)
Matt Roperd21fbe82015-09-24 15:53:12 -070010552{
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010553 int src_w = drm_rect_width(&state->base.src) >> 16;
10554 int src_h = drm_rect_height(&state->base.src) >> 16;
10555 int dst_w = drm_rect_width(&state->base.dst);
10556 int dst_h = drm_rect_height(&state->base.dst);
Matt Roperd21fbe82015-09-24 15:53:12 -070010557
10558 return (src_w != dst_w || src_h != dst_h);
10559}
10560
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010561int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10562 struct drm_crtc_state *crtc_state,
10563 const struct intel_plane_state *old_plane_state,
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010564 struct drm_plane_state *plane_state)
10565{
Maarten Lankhorstab1d3a02015-11-19 16:07:14 +010010566 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010567 struct drm_crtc *crtc = crtc_state->crtc;
10568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010569 struct intel_plane *plane = to_intel_plane(plane_state->plane);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010570 struct drm_device *dev = crtc->dev;
Matt Ropered4a6a72016-02-23 17:20:13 -080010571 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010572 bool mode_changed = needs_modeset(crtc_state);
Ville Syrjäläb2b55502017-08-23 18:22:23 +030010573 bool was_crtc_enabled = old_crtc_state->base.active;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010574 bool is_crtc_enabled = crtc_state->active;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010575 bool turn_off, turn_on, visible, was_visible;
10576 struct drm_framebuffer *fb = plane_state->fb;
Ville Syrjälä78108b72016-05-27 20:59:19 +030010577 int ret;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010578
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010579 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010580 ret = skl_update_scaler_plane(
10581 to_intel_crtc_state(crtc_state),
10582 to_intel_plane_state(plane_state));
10583 if (ret)
10584 return ret;
10585 }
10586
Ville Syrjälä936e71e2016-07-26 19:06:59 +030010587 was_visible = old_plane_state->base.visible;
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +010010588 visible = plane_state->visible;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010589
10590 if (!was_crtc_enabled && WARN_ON(was_visible))
10591 was_visible = false;
10592
Maarten Lankhorst35c08f42015-12-03 14:31:07 +010010593 /*
10594 * Visibility is calculated as if the crtc was on, but
10595 * after scaler setup everything depends on it being off
10596 * when the crtc isn't active.
Ville Syrjäläf818ffe2016-04-29 17:31:18 +030010597 *
10598 * FIXME this is wrong for watermarks. Watermarks should also
10599 * be computed as if the pipe would be active. Perhaps move
10600 * per-plane wm computation to the .check_plane() hook, and
10601 * only combine the results from all planes in the current place?
Maarten Lankhorst35c08f42015-12-03 14:31:07 +010010602 */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010603 if (!is_crtc_enabled) {
Maarten Lankhorst1d4258d2017-01-12 10:43:45 +010010604 plane_state->visible = visible = false;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010605 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10606 }
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010607
10608 if (!was_visible && !visible)
10609 return 0;
10610
Maarten Lankhorste8861672016-02-24 11:24:26 +010010611 if (fb != old_plane_state->base.fb)
10612 pipe_config->fb_changed = true;
10613
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010614 turn_off = was_visible && (!visible || mode_changed);
10615 turn_on = visible && (!was_visible || mode_changed);
10616
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010617 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010618 intel_crtc->base.base.id, intel_crtc->base.name,
10619 plane->base.base.id, plane->base.name,
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010620 fb ? fb->base.id : -1);
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010621
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010622 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010623 plane->base.base.id, plane->base.name,
Ville Syrjälä72660ce2016-05-27 20:59:20 +030010624 was_visible, visible,
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010625 turn_off, turn_on, mode_changed);
10626
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010627 if (turn_on) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010628 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010629 pipe_config->update_wm_pre = true;
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010630
10631 /* must disable cxsr around plane enable/disable */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010632 if (plane->id != PLANE_CURSOR)
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010633 pipe_config->disable_cxsr = true;
10634 } else if (turn_off) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010635 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010636 pipe_config->update_wm_post = true;
Maarten Lankhorst92826fc2015-12-03 13:49:13 +010010637
Ville Syrjälä852eb002015-06-24 22:00:07 +030010638 /* must disable cxsr around plane enable/disable */
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010639 if (plane->id != PLANE_CURSOR)
Maarten Lankhorstab1d3a02015-11-19 16:07:14 +010010640 pipe_config->disable_cxsr = true;
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010641 } else if (intel_wm_need_update(&plane->base, plane_state)) {
Ville Syrjälä04548cb2017-04-21 21:14:29 +030010642 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
Ville Syrjäläb4ede6d2017-03-02 19:15:01 +020010643 /* FIXME bollocks */
10644 pipe_config->update_wm_pre = true;
10645 pipe_config->update_wm_post = true;
10646 }
Ville Syrjälä852eb002015-06-24 22:00:07 +030010647 }
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010648
Rodrigo Vivi8be6ca82015-08-24 16:38:23 -070010649 if (visible || was_visible)
Ville Syrjäläe9728bd2017-03-02 19:14:51 +020010650 pipe_config->fb_bits |= plane->frontbuffer_bit;
Ville Syrjäläa9ff8712015-06-24 21:59:34 +030010651
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010652 /*
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010653 * ILK/SNB DVSACNTR/Sprite Enable
10654 * IVB SPR_CTL/Sprite Enable
10655 * "When in Self Refresh Big FIFO mode, a write to enable the
10656 * plane will be internally buffered and delayed while Big FIFO
10657 * mode is exiting."
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010658 *
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010659 * Which means that enabling the sprite can take an extra frame
10660 * when we start in big FIFO mode (LP1+). Thus we need to drop
10661 * down to LP0 and wait for vblank in order to make sure the
10662 * sprite gets enabled on the next vblank after the register write.
10663 * Doing otherwise would risk enabling the sprite one frame after
10664 * we've already signalled flip completion. We can resume LP1+
10665 * once the sprite has been enabled.
10666 *
10667 *
10668 * WaCxSRDisabledForSpriteScaling:ivb
10669 * IVB SPR_SCALE/Scaling Enable
10670 * "Low Power watermarks must be disabled for at least one
10671 * frame before enabling sprite scaling, and kept disabled
10672 * until sprite scaling is disabled."
10673 *
10674 * ILK/SNB DVSASCALE/Scaling Enable
10675 * "When in Self Refresh Big FIFO mode, scaling enable will be
10676 * masked off while Big FIFO mode is exiting."
10677 *
10678 * Despite the w/a only being listed for IVB we assume that
10679 * the ILK/SNB note has similar ramifications, hence we apply
10680 * the w/a on all three platforms.
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010681 */
Ville Syrjälä8e7a4422018-10-04 15:15:27 +030010682 if (plane->id == PLANE_SPRITE0 &&
10683 (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
10684 IS_IVYBRIDGE(dev_priv)) &&
10685 (turn_on || (!needs_scaling(old_plane_state) &&
10686 needs_scaling(to_intel_plane_state(plane_state)))))
Maarten Lankhorst31ae71f2016-03-09 10:35:45 +010010687 pipe_config->disable_lp_wm = true;
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010688
Maarten Lankhorstda20eab2015-06-15 12:33:44 +020010689 return 0;
10690}
10691
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010692static bool encoders_cloneable(const struct intel_encoder *a,
10693 const struct intel_encoder *b)
10694{
10695 /* masks could be asymmetric, so check both ways */
10696 return a == b || (a->cloneable & (1 << b->type) &&
10697 b->cloneable & (1 << a->type));
10698}
10699
10700static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10701 struct intel_crtc *crtc,
10702 struct intel_encoder *encoder)
10703{
10704 struct intel_encoder *source_encoder;
10705 struct drm_connector *connector;
10706 struct drm_connector_state *connector_state;
10707 int i;
10708
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010010709 for_each_new_connector_in_state(state, connector, connector_state, i) {
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010710 if (connector_state->crtc != &crtc->base)
10711 continue;
10712
10713 source_encoder =
10714 to_intel_encoder(connector_state->best_encoder);
10715 if (!encoders_cloneable(encoder, source_encoder))
10716 return false;
10717 }
10718
10719 return true;
10720}
10721
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010722static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10723 struct drm_crtc_state *crtc_state)
10724{
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +020010725 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010010726 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010727 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstcf5a15b2015-06-15 12:33:41 +020010728 struct intel_crtc_state *pipe_config =
10729 to_intel_crtc_state(crtc_state);
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010730 struct drm_atomic_state *state = crtc_state->state;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020010731 int ret;
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010732 bool mode_changed = needs_modeset(crtc_state);
10733
Ville Syrjälä852eb002015-06-24 22:00:07 +030010734 if (mode_changed && !crtc_state->active)
Ville Syrjäläcaed3612016-03-09 19:07:25 +020010735 pipe_config->update_wm_post = true;
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020010736
Maarten Lankhorstad421372015-06-15 12:33:42 +020010737 if (mode_changed && crtc_state->enable &&
10738 dev_priv->display.crtc_compute_clock &&
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020010739 !WARN_ON(pipe_config->shared_dpll)) {
Maarten Lankhorstad421372015-06-15 12:33:42 +020010740 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10741 pipe_config);
10742 if (ret)
10743 return ret;
10744 }
10745
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000010746 if (crtc_state->color_mgmt_changed) {
10747 ret = intel_color_check(crtc, crtc_state);
10748 if (ret)
10749 return ret;
Lionel Landwerline7852a42016-05-25 14:30:41 +010010750
10751 /*
10752 * Changing color management on Intel hardware is
10753 * handled as part of planes update.
10754 */
10755 crtc_state->planes_changed = true;
Lionel Landwerlin82cf4352016-03-16 10:57:16 +000010756 }
10757
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020010758 ret = 0;
Matt Roper86c8bbb2015-09-24 15:53:16 -070010759 if (dev_priv->display.compute_pipe_wm) {
Maarten Lankhorste3bddde2016-03-01 11:07:22 +010010760 ret = dev_priv->display.compute_pipe_wm(pipe_config);
Matt Ropered4a6a72016-02-23 17:20:13 -080010761 if (ret) {
10762 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
Matt Roper86c8bbb2015-09-24 15:53:16 -070010763 return ret;
Matt Ropered4a6a72016-02-23 17:20:13 -080010764 }
10765 }
10766
10767 if (dev_priv->display.compute_intermediate_wm &&
10768 !to_intel_atomic_state(state)->skip_intermediate_wm) {
10769 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10770 return 0;
10771
10772 /*
10773 * Calculate 'intermediate' watermarks that satisfy both the
10774 * old state and the new state. We can program these
10775 * immediately.
10776 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000010777 ret = dev_priv->display.compute_intermediate_wm(dev,
Matt Ropered4a6a72016-02-23 17:20:13 -080010778 intel_crtc,
10779 pipe_config);
10780 if (ret) {
10781 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10782 return ret;
10783 }
Ville Syrjäläe3d54572016-05-13 10:10:42 -070010784 } else if (dev_priv->display.compute_intermediate_wm) {
10785 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10786 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
Matt Roper86c8bbb2015-09-24 15:53:16 -070010787 }
10788
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000010789 if (INTEL_GEN(dev_priv) >= 9) {
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020010790 if (mode_changed)
10791 ret = skl_update_scaler_crtc(pipe_config);
10792
10793 if (!ret)
Mahesh Kumar73b0ca82017-05-26 20:45:46 +053010794 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10795 pipe_config);
10796 if (!ret)
Ander Conselvan de Oliveira6ebc6922017-02-23 09:15:59 +020010797 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020010798 pipe_config);
10799 }
10800
Maarten Lankhorst24f28452017-11-22 19:39:01 +010010801 if (HAS_IPS(dev_priv))
10802 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
10803
Maarten Lankhorste435d6e2015-07-13 16:30:15 +020010804 return ret;
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010805}
10806
Jani Nikula65b38e02015-04-13 11:26:56 +030010807static const struct drm_crtc_helper_funcs intel_helper_funcs = {
Maarten Lankhorst6d3a1ce2015-06-15 12:33:40 +020010808 .atomic_check = intel_crtc_atomic_check,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010010809};
10810
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010811static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10812{
10813 struct intel_connector *connector;
Daniel Vetterf9e905c2017-03-01 10:52:25 +010010814 struct drm_connector_list_iter conn_iter;
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010815
Daniel Vetterf9e905c2017-03-01 10:52:25 +010010816 drm_connector_list_iter_begin(dev, &conn_iter);
10817 for_each_intel_connector_iter(connector, &conn_iter) {
Daniel Vetter8863dc72016-05-06 15:39:03 +020010818 if (connector->base.state->crtc)
Thomas Zimmermannef196b52018-06-18 13:01:50 +020010819 drm_connector_put(&connector->base);
Daniel Vetter8863dc72016-05-06 15:39:03 +020010820
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010821 if (connector->base.encoder) {
10822 connector->base.state->best_encoder =
10823 connector->base.encoder;
10824 connector->base.state->crtc =
10825 connector->base.encoder->crtc;
Daniel Vetter8863dc72016-05-06 15:39:03 +020010826
Thomas Zimmermannef196b52018-06-18 13:01:50 +020010827 drm_connector_get(&connector->base);
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010828 } else {
10829 connector->base.state->best_encoder = NULL;
10830 connector->base.state->crtc = NULL;
10831 }
10832 }
Daniel Vetterf9e905c2017-03-01 10:52:25 +010010833 drm_connector_list_iter_end(&conn_iter);
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020010834}
10835
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010836static void
Robin Schroereba905b2014-05-18 02:24:50 +020010837connected_sink_compute_bpp(struct intel_connector *connector,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010838 struct intel_crtc_state *pipe_config)
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010839{
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010840 const struct drm_display_info *info = &connector->base.display_info;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010841 int bpp = pipe_config->pipe_bpp;
10842
10843 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010844 connector->base.base.id,
10845 connector->base.name);
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010846
10847 /* Don't use an invalid EDID bpc value */
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010848 if (info->bpc != 0 && info->bpc * 3 < bpp) {
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010849 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010850 bpp, info->bpc * 3);
10851 pipe_config->pipe_bpp = info->bpc * 3;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010852 }
10853
Mario Kleiner196f9542016-07-06 12:05:45 +020010854 /* Clamp bpp to 8 on screens without EDID 1.4 */
Ville Syrjälä6a2a5c52016-09-28 16:51:42 +030010855 if (info->bpc == 0 && bpp > 24) {
Mario Kleiner196f9542016-07-06 12:05:45 +020010856 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10857 bpp);
10858 pipe_config->pipe_bpp = 24;
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010859 }
10860}
10861
10862static int
10863compute_baseline_pipe_bpp(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010864 struct intel_crtc_state *pipe_config)
Daniel Vetter050f7ae2013-06-02 13:26:23 +020010865{
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010010866 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020010867 struct drm_atomic_state *state;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030010868 struct drm_connector *connector;
10869 struct drm_connector_state *connector_state;
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020010870 int bpp, i;
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010871
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010010872 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10873 IS_CHERRYVIEW(dev_priv)))
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010874 bpp = 10*3;
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010010875 else if (INTEL_GEN(dev_priv) >= 5)
Daniel Vetterd328c9d2015-04-10 16:22:37 +020010876 bpp = 12*3;
10877 else
10878 bpp = 8*3;
10879
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010880
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010881 pipe_config->pipe_bpp = bpp;
10882
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020010883 state = pipe_config->base.state;
10884
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010885 /* Clamp display bpp to EDID value */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010010886 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030010887 if (connector_state->crtc != &crtc->base)
Ander Conselvan de Oliveira14860172015-03-20 16:18:09 +020010888 continue;
10889
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030010890 connected_sink_compute_bpp(to_intel_connector(connector),
10891 pipe_config);
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010010892 }
10893
10894 return bpp;
10895}
10896
Daniel Vetter644db712013-09-19 14:53:58 +020010897static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10898{
10899 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10900 "type: 0x%x flags: 0x%x\n",
Damien Lespiau13428302013-09-25 16:45:36 +010010901 mode->crtc_clock,
Daniel Vetter644db712013-09-19 14:53:58 +020010902 mode->crtc_hdisplay, mode->crtc_hsync_start,
10903 mode->crtc_hsync_end, mode->crtc_htotal,
10904 mode->crtc_vdisplay, mode->crtc_vsync_start,
10905 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10906}
10907
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000010908static inline void
10909intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
Tvrtko Ursulina4309652016-11-17 12:30:09 +000010910 unsigned int lane_count, struct intel_link_m_n *m_n)
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000010911{
Tvrtko Ursulina4309652016-11-17 12:30:09 +000010912 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10913 id, lane_count,
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000010914 m_n->gmch_m, m_n->gmch_n,
10915 m_n->link_m, m_n->link_n, m_n->tu);
10916}
10917
Ville Syrjälä40b2be42017-10-10 15:11:59 +030010918#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
10919
10920static const char * const output_type_str[] = {
10921 OUTPUT_TYPE(UNUSED),
10922 OUTPUT_TYPE(ANALOG),
10923 OUTPUT_TYPE(DVO),
10924 OUTPUT_TYPE(SDVO),
10925 OUTPUT_TYPE(LVDS),
10926 OUTPUT_TYPE(TVOUT),
10927 OUTPUT_TYPE(HDMI),
10928 OUTPUT_TYPE(DP),
10929 OUTPUT_TYPE(EDP),
10930 OUTPUT_TYPE(DSI),
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030010931 OUTPUT_TYPE(DDI),
Ville Syrjälä40b2be42017-10-10 15:11:59 +030010932 OUTPUT_TYPE(DP_MST),
10933};
10934
10935#undef OUTPUT_TYPE
10936
10937static void snprintf_output_types(char *buf, size_t len,
10938 unsigned int output_types)
10939{
10940 char *str = buf;
10941 int i;
10942
10943 str[0] = '\0';
10944
10945 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
10946 int r;
10947
10948 if ((output_types & BIT(i)) == 0)
10949 continue;
10950
10951 r = snprintf(str, len, "%s%s",
10952 str != buf ? "," : "", output_type_str[i]);
10953 if (r >= len)
10954 break;
10955 str += r;
10956 len -= r;
10957
10958 output_types &= ~BIT(i);
10959 }
10960
10961 WARN_ON_ONCE(output_types != 0);
10962}
10963
Shashank Sharmad9facae2018-10-12 11:53:07 +053010964static const char * const output_format_str[] = {
10965 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
10966 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
Shashank Sharma33b7f3e2018-10-12 11:53:08 +053010967 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
Shashank Sharma8c79f842018-10-12 11:53:09 +053010968 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
Shashank Sharmad9facae2018-10-12 11:53:07 +053010969};
10970
10971static const char *output_formats(enum intel_output_format format)
10972{
Shashank Sharma33b7f3e2018-10-12 11:53:08 +053010973 if (format >= ARRAY_SIZE(output_format_str))
Shashank Sharmad9facae2018-10-12 11:53:07 +053010974 format = INTEL_OUTPUT_FORMAT_INVALID;
10975 return output_format_str[format];
10976}
10977
Daniel Vetterc0b03412013-05-28 12:05:54 +020010978static void intel_dump_pipe_config(struct intel_crtc *crtc,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020010979 struct intel_crtc_state *pipe_config,
Daniel Vetterc0b03412013-05-28 12:05:54 +020010980 const char *context)
10981{
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010982 struct drm_device *dev = crtc->base.dev;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010010983 struct drm_i915_private *dev_priv = to_i915(dev);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010984 struct drm_plane *plane;
10985 struct intel_plane *intel_plane;
10986 struct intel_plane_state *state;
10987 struct drm_framebuffer *fb;
Ville Syrjälä40b2be42017-10-10 15:11:59 +030010988 char buf[64];
Chandra Konduru6a60cd82015-04-07 15:28:40 -070010989
Tvrtko Ursulin66766e42016-11-17 12:30:10 +000010990 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
10991 crtc->base.base.id, crtc->base.name, context);
Daniel Vetterc0b03412013-05-28 12:05:54 +020010992
Ville Syrjälä40b2be42017-10-10 15:11:59 +030010993 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
10994 DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
10995 buf, pipe_config->output_types);
10996
Shashank Sharmad9facae2018-10-12 11:53:07 +053010997 DRM_DEBUG_KMS("output format: %s\n",
10998 output_formats(pipe_config->output_format));
10999
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011000 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11001 transcoder_name(pipe_config->cpu_transcoder),
Daniel Vetterc0b03412013-05-28 12:05:54 +020011002 pipe_config->pipe_bpp, pipe_config->dither);
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011003
11004 if (pipe_config->has_pch_encoder)
11005 intel_dump_m_n_config(pipe_config, "fdi",
11006 pipe_config->fdi_lanes,
11007 &pipe_config->fdi_m_n);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011008
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011009 if (intel_crtc_has_dp_encoder(pipe_config)) {
Tvrtko Ursulina4309652016-11-17 12:30:09 +000011010 intel_dump_m_n_config(pipe_config, "dp m_n",
11011 pipe_config->lane_count, &pipe_config->dp_m_n);
Tvrtko Ursulind806e682016-11-17 15:44:09 +000011012 if (pipe_config->has_drrs)
11013 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11014 pipe_config->lane_count,
11015 &pipe_config->dp_m2_n2);
Tvrtko Ursulinf6982332016-11-17 12:30:08 +000011016 }
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011017
Daniel Vetter55072d12014-11-20 16:10:28 +010011018 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011019 pipe_config->has_audio, pipe_config->has_infoframe);
Daniel Vetter55072d12014-11-20 16:10:28 +010011020
Daniel Vetterc0b03412013-05-28 12:05:54 +020011021 DRM_DEBUG_KMS("requested mode:\n");
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011022 drm_mode_debug_printmodeline(&pipe_config->base.mode);
Daniel Vetterc0b03412013-05-28 12:05:54 +020011023 DRM_DEBUG_KMS("adjusted mode:\n");
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011024 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11025 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011026 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011027 pipe_config->port_clock,
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011028 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11029 pipe_config->pixel_rate);
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011030
11031 if (INTEL_GEN(dev_priv) >= 9)
11032 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11033 crtc->num_scalers,
11034 pipe_config->scaler_state.scaler_users,
11035 pipe_config->scaler_state.scaler_id);
Tvrtko Ursulina74f8372016-11-17 12:30:13 +000011036
11037 if (HAS_GMCH_DISPLAY(dev_priv))
11038 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11039 pipe_config->gmch_pfit.control,
11040 pipe_config->gmch_pfit.pgm_ratios,
11041 pipe_config->gmch_pfit.lvds_border_bits);
11042 else
11043 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11044 pipe_config->pch_pfit.pos,
11045 pipe_config->pch_pfit.size,
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000011046 enableddisabled(pipe_config->pch_pfit.enabled));
Tvrtko Ursulina74f8372016-11-17 12:30:13 +000011047
Tvrtko Ursulin2c894292016-11-17 12:30:11 +000011048 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11049 pipe_config->ips_enabled, pipe_config->double_wide);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011050
Ander Conselvan de Oliveiraf50b79f2016-12-29 17:22:12 +020011051 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
Tvrtko Ursulin415ff0f2015-05-14 13:38:31 +010011052
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011053 DRM_DEBUG_KMS("planes on this crtc\n");
11054 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
Eric Engestromb3c11ac2016-11-12 01:12:56 +000011055 struct drm_format_name_buf format_name;
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011056 intel_plane = to_intel_plane(plane);
11057 if (intel_plane->pipe != crtc->pipe)
11058 continue;
11059
11060 state = to_intel_plane_state(plane->state);
11061 fb = state->base.fb;
11062 if (!fb) {
Ville Syrjälä1d577e02016-05-27 20:59:25 +030011063 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
11064 plane->base.id, plane->name, state->scaler_id);
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011065 continue;
11066 }
11067
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011068 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
11069 plane->base.id, plane->name,
Eric Engestromb3c11ac2016-11-12 01:12:56 +000011070 fb->base.id, fb->width, fb->height,
Ville Syrjälä438b74a2016-12-14 23:32:55 +020011071 drm_get_format_name(fb->format->format, &format_name));
Tvrtko Ursulindd2f6162016-11-17 12:30:12 +000011072 if (INTEL_GEN(dev_priv) >= 9)
11073 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
11074 state->scaler_id,
11075 state->base.src.x1 >> 16,
11076 state->base.src.y1 >> 16,
11077 drm_rect_width(&state->base.src) >> 16,
11078 drm_rect_height(&state->base.src) >> 16,
11079 state->base.dst.x1, state->base.dst.y1,
11080 drm_rect_width(&state->base.dst),
11081 drm_rect_height(&state->base.dst));
Chandra Konduru6a60cd82015-04-07 15:28:40 -070011082 }
Daniel Vetterc0b03412013-05-28 12:05:54 +020011083}
11084
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011085static bool check_digital_port_conflicts(struct drm_atomic_state *state)
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011086{
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011087 struct drm_device *dev = state->dev;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011088 struct drm_connector *connector;
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011089 struct drm_connector_list_iter conn_iter;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011090 unsigned int used_ports = 0;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011091 unsigned int used_mst_ports = 0;
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011092 bool ret = true;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011093
11094 /*
11095 * Walk the connector list instead of the encoder
11096 * list to detect the problem on ddi platforms
11097 * where there's just one encoder per digital port.
11098 */
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011099 drm_connector_list_iter_begin(dev, &conn_iter);
11100 drm_for_each_connector_iter(connector, &conn_iter) {
Ville Syrjälä0bff4852015-12-10 18:22:31 +020011101 struct drm_connector_state *connector_state;
11102 struct intel_encoder *encoder;
11103
Maarten Lankhorst8b694492018-04-09 14:46:55 +020011104 connector_state = drm_atomic_get_new_connector_state(state, connector);
Ville Syrjälä0bff4852015-12-10 18:22:31 +020011105 if (!connector_state)
11106 connector_state = connector->state;
11107
Ander Conselvan de Oliveira5448a002015-04-02 14:47:59 +030011108 if (!connector_state->best_encoder)
11109 continue;
11110
11111 encoder = to_intel_encoder(connector_state->best_encoder);
11112
11113 WARN_ON(!connector_state->crtc);
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011114
11115 switch (encoder->type) {
11116 unsigned int port_mask;
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011117 case INTEL_OUTPUT_DDI:
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010011118 if (WARN_ON(!HAS_DDI(to_i915(dev))))
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011119 break;
Gustavo A. R. Silvaf0d759f2018-06-28 17:35:41 -050011120 /* else: fall through */
Ville Syrjäläcca05022016-06-22 21:57:06 +030011121 case INTEL_OUTPUT_DP:
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011122 case INTEL_OUTPUT_HDMI:
11123 case INTEL_OUTPUT_EDP:
Ville Syrjälä8f4f2792017-11-09 17:24:34 +020011124 port_mask = 1 << encoder->port;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011125
11126 /* the same port mustn't appear more than once */
11127 if (used_ports & port_mask)
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011128 ret = false;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011129
11130 used_ports |= port_mask;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011131 break;
11132 case INTEL_OUTPUT_DP_MST:
11133 used_mst_ports |=
Ville Syrjälä8f4f2792017-11-09 17:24:34 +020011134 1 << encoder->port;
Ville Syrjälä477321e2016-07-28 17:50:40 +030011135 break;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011136 default:
11137 break;
11138 }
11139 }
Gustavo Padovan2fd96b42017-05-11 16:10:44 -030011140 drm_connector_list_iter_end(&conn_iter);
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011141
Ville Syrjälä477321e2016-07-28 17:50:40 +030011142 /* can't mix MST and SST/HDMI on the same port */
11143 if (used_ports & used_mst_ports)
11144 return false;
11145
Maarten Lankhorstbd67a8c2018-02-15 10:14:25 +010011146 return ret;
Ville Syrjälä00f0b372014-12-02 14:10:46 +020011147}
11148
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011149static void
11150clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11151{
Ville Syrjäläff32c542017-03-02 19:14:57 +020011152 struct drm_i915_private *dev_priv =
11153 to_i915(crtc_state->base.crtc->dev);
Chandra Konduru663a3642015-04-07 15:28:41 -070011154 struct intel_crtc_scaler_state scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011155 struct intel_dpll_hw_state dpll_hw_state;
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011156 struct intel_shared_dpll *shared_dpll;
Ville Syrjäläff32c542017-03-02 19:14:57 +020011157 struct intel_crtc_wm_state wm_state;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011158 bool force_thru, ips_force_disable;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011159
Ander Conselvan de Oliveira7546a382015-05-20 09:03:27 +030011160 /* FIXME: before the switch to atomic started, a new pipe_config was
11161 * kzalloc'd. Code that depends on any field being zero should be
11162 * fixed, so that the crtc_state can be safely duplicated. For now,
11163 * only fields that are know to not cause problems are preserved. */
11164
Chandra Konduru663a3642015-04-07 15:28:41 -070011165 scaler_state = crtc_state->scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011166 shared_dpll = crtc_state->shared_dpll;
11167 dpll_hw_state = crtc_state->dpll_hw_state;
Maarten Lankhorstc4e2d042015-08-05 12:36:59 +020011168 force_thru = crtc_state->pch_pfit.force_thru;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011169 ips_force_disable = crtc_state->ips_force_disable;
Ville Syrjälä04548cb2017-04-21 21:14:29 +030011170 if (IS_G4X(dev_priv) ||
11171 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjäläff32c542017-03-02 19:14:57 +020011172 wm_state = crtc_state->wm;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011173
Chris Wilsond2fa80a2017-03-03 15:46:44 +000011174 /* Keep base drm_crtc_state intact, only clear our extended struct */
11175 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11176 memset(&crtc_state->base + 1, 0,
11177 sizeof(*crtc_state) - sizeof(crtc_state->base));
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011178
Chandra Konduru663a3642015-04-07 15:28:41 -070011179 crtc_state->scaler_state = scaler_state;
Ander Conselvan de Oliveira4978cc92015-04-21 17:13:21 +030011180 crtc_state->shared_dpll = shared_dpll;
11181 crtc_state->dpll_hw_state = dpll_hw_state;
Maarten Lankhorstc4e2d042015-08-05 12:36:59 +020011182 crtc_state->pch_pfit.force_thru = force_thru;
Ville Syrjälä6e644622017-08-17 17:55:09 +030011183 crtc_state->ips_force_disable = ips_force_disable;
Ville Syrjälä04548cb2017-04-21 21:14:29 +030011184 if (IS_G4X(dev_priv) ||
11185 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Ville Syrjäläff32c542017-03-02 19:14:57 +020011186 crtc_state->wm = wm_state;
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011187}
11188
Ander Conselvan de Oliveira548ee152015-04-21 17:13:02 +030011189static int
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010011190intel_modeset_pipe_config(struct drm_crtc *crtc,
Maarten Lankhorstb3592832015-06-15 12:33:38 +020011191 struct intel_crtc_state *pipe_config)
Daniel Vetter7758a112012-07-08 19:40:39 +020011192{
Maarten Lankhorstb3592832015-06-15 12:33:38 +020011193 struct drm_atomic_state *state = pipe_config->base.state;
Daniel Vetter7758a112012-07-08 19:40:39 +020011194 struct intel_encoder *encoder;
Ander Conselvan de Oliveirada3ced2982015-04-21 17:12:59 +030011195 struct drm_connector *connector;
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011196 struct drm_connector_state *connector_state;
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011197 int base_bpp, ret = -EINVAL;
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011198 int i;
Daniel Vettere29c22c2013-02-21 00:00:16 +010011199 bool retry = true;
Daniel Vetter7758a112012-07-08 19:40:39 +020011200
Ander Conselvan de Oliveira83a57152015-03-20 16:18:03 +020011201 clear_intel_crtc_state(pipe_config);
Daniel Vetter7758a112012-07-08 19:40:39 +020011202
Daniel Vettere143a212013-07-04 12:01:15 +020011203 pipe_config->cpu_transcoder =
11204 (enum transcoder) to_intel_crtc(crtc)->pipe;
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010011205
Imre Deak2960bc92013-07-30 13:36:32 +030011206 /*
11207 * Sanitize sync polarity flags based on requested ones. If neither
11208 * positive or negative polarity is requested, treat this as meaning
11209 * negative polarity.
11210 */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011211 if (!(pipe_config->base.adjusted_mode.flags &
Imre Deak2960bc92013-07-30 13:36:32 +030011212 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011213 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
Imre Deak2960bc92013-07-30 13:36:32 +030011214
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011215 if (!(pipe_config->base.adjusted_mode.flags &
Imre Deak2960bc92013-07-30 13:36:32 +030011216 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011217 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
Imre Deak2960bc92013-07-30 13:36:32 +030011218
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011219 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11220 pipe_config);
11221 if (base_bpp < 0)
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011222 goto fail;
11223
Ville Syrjäläe41a56b2013-10-01 22:52:14 +030011224 /*
11225 * Determine the real pipe dimensions. Note that stereo modes can
11226 * increase the actual pipe size due to the frame doubling and
11227 * insertion of additional space for blanks between the frame. This
11228 * is stored in the crtc timings. We use the requested mode to do this
11229 * computation to clearly distinguish it from the adjusted mode, which
11230 * can be changed by the connectors in the below retry loop.
11231 */
Daniel Vetter196cd5d2017-01-25 07:26:56 +010011232 drm_mode_get_hv_timing(&pipe_config->base.mode,
Gustavo Padovanecb7e162014-12-01 15:40:09 -080011233 &pipe_config->pipe_src_w,
11234 &pipe_config->pipe_src_h);
Ville Syrjäläe41a56b2013-10-01 22:52:14 +030011235
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011236 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011237 if (connector_state->crtc != crtc)
11238 continue;
11239
11240 encoder = to_intel_encoder(connector_state->best_encoder);
11241
Ville Syrjäläe25148d2016-06-22 21:57:09 +030011242 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11243 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11244 goto fail;
11245 }
11246
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011247 /*
11248 * Determine output_types before calling the .compute_config()
11249 * hooks so that the hooks can use this information safely.
11250 */
Ville Syrjälä7e732ca2017-10-27 22:31:24 +030011251 if (encoder->compute_output_type)
11252 pipe_config->output_types |=
11253 BIT(encoder->compute_output_type(encoder, pipe_config,
11254 connector_state));
11255 else
11256 pipe_config->output_types |= BIT(encoder->type);
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011257 }
11258
Daniel Vettere29c22c2013-02-21 00:00:16 +010011259encoder_retry:
Daniel Vetteref1b4602013-06-01 17:17:04 +020011260 /* Ensure the port clock defaults are reset when retrying. */
Daniel Vetterff9a6752013-06-01 17:16:21 +020011261 pipe_config->port_clock = 0;
Daniel Vetteref1b4602013-06-01 17:17:04 +020011262 pipe_config->pixel_multiplier = 1;
Daniel Vetterff9a6752013-06-01 17:16:21 +020011263
Daniel Vetter135c81b2013-07-21 21:37:09 +020011264 /* Fill in default crtc timings, allow encoders to overwrite them. */
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011265 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11266 CRTC_STEREO_DOUBLE);
Daniel Vetter135c81b2013-07-21 21:37:09 +020011267
Daniel Vetter7758a112012-07-08 19:40:39 +020011268 /* Pass our mode to the connectors and the CRTC to give them a chance to
11269 * adjust it according to limitations or connector properties, and also
11270 * a chance to reject the mode entirely.
11271 */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011272 for_each_new_connector_in_state(state, connector, connector_state, i) {
Ander Conselvan de Oliveira0b901872015-03-20 16:18:08 +020011273 if (connector_state->crtc != crtc)
11274 continue;
11275
11276 encoder = to_intel_encoder(connector_state->best_encoder);
11277
Maarten Lankhorst0a478c22016-08-09 17:04:05 +020011278 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
Daniel Vetterefea6e82013-07-21 21:36:59 +020011279 DRM_DEBUG_KMS("Encoder config failure\n");
Daniel Vetter7758a112012-07-08 19:40:39 +020011280 goto fail;
11281 }
11282 }
11283
Daniel Vetterff9a6752013-06-01 17:16:21 +020011284 /* Set default port clock if not overwritten by the encoder. Needs to be
11285 * done afterwards in case the encoder adjusts the mode. */
11286 if (!pipe_config->port_clock)
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011287 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
Damien Lespiau241bfc32013-09-25 16:45:37 +010011288 * pipe_config->pixel_multiplier;
Daniel Vetterff9a6752013-06-01 17:16:21 +020011289
Daniel Vettera43f6e02013-06-07 23:10:32 +020011290 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
Daniel Vettere29c22c2013-02-21 00:00:16 +010011291 if (ret < 0) {
Daniel Vetter7758a112012-07-08 19:40:39 +020011292 DRM_DEBUG_KMS("CRTC fixup failed\n");
11293 goto fail;
11294 }
Daniel Vettere29c22c2013-02-21 00:00:16 +010011295
11296 if (ret == RETRY) {
11297 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11298 ret = -EINVAL;
11299 goto fail;
11300 }
11301
11302 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11303 retry = false;
11304 goto encoder_retry;
11305 }
11306
Daniel Vettere8fa4272015-08-12 11:43:34 +020011307 /* Dithering seems to not pass-through bits correctly when it should, so
Manasi Navare611032b2017-01-24 08:21:49 -080011308 * only enable it on 6bpc panels and when its not a compliance
11309 * test requesting 6bpc video pattern.
11310 */
11311 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11312 !pipe_config->dither_force_disable;
Daniel Vetter62f0ace2015-08-26 18:57:26 +020011313 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
Daniel Vetterd328c9d2015-04-10 16:22:37 +020011314 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
Daniel Vetter4e53c2e2013-03-27 00:44:58 +010011315
Daniel Vetter7758a112012-07-08 19:40:39 +020011316fail:
Ander Conselvan de Oliveira548ee152015-04-21 17:13:02 +030011317 return ret;
Daniel Vetter7758a112012-07-08 19:40:39 +020011318}
11319
Ville Syrjälä3bd26262013-09-06 23:29:02 +030011320static bool intel_fuzzy_clock_check(int clock1, int clock2)
Jesse Barnesf1f644d2013-06-27 00:39:25 +030011321{
Ville Syrjälä3bd26262013-09-06 23:29:02 +030011322 int diff;
Jesse Barnesf1f644d2013-06-27 00:39:25 +030011323
11324 if (clock1 == clock2)
11325 return true;
11326
11327 if (!clock1 || !clock2)
11328 return false;
11329
11330 diff = abs(clock1 - clock2);
11331
11332 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11333 return true;
11334
11335 return false;
11336}
11337
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011338static bool
11339intel_compare_m_n(unsigned int m, unsigned int n,
11340 unsigned int m2, unsigned int n2,
11341 bool exact)
11342{
11343 if (m == m2 && n == n2)
11344 return true;
11345
11346 if (exact || !m || !n || !m2 || !n2)
11347 return false;
11348
11349 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11350
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011351 if (n > n2) {
11352 while (n > n2) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011353 m2 <<= 1;
11354 n2 <<= 1;
11355 }
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011356 } else if (n < n2) {
11357 while (n < n2) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011358 m <<= 1;
11359 n <<= 1;
11360 }
11361 }
11362
Maarten Lankhorst31d10b52016-01-06 13:54:43 +010011363 if (n != n2)
11364 return false;
11365
11366 return intel_fuzzy_clock_check(m, m2);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011367}
11368
11369static bool
11370intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11371 struct intel_link_m_n *m2_n2,
11372 bool adjust)
11373{
11374 if (m_n->tu == m2_n2->tu &&
11375 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11376 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11377 intel_compare_m_n(m_n->link_m, m_n->link_n,
11378 m2_n2->link_m, m2_n2->link_n, !adjust)) {
11379 if (adjust)
11380 *m2_n2 = *m_n;
11381
11382 return true;
11383 }
11384
11385 return false;
11386}
11387
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011388static void __printf(3, 4)
11389pipe_config_err(bool adjust, const char *name, const char *format, ...)
11390{
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011391 struct va_format vaf;
11392 va_list args;
11393
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011394 va_start(args, format);
11395 vaf.fmt = format;
11396 vaf.va = &args;
11397
Joe Perches99a95482018-03-13 15:02:15 -070011398 if (adjust)
11399 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11400 else
11401 drm_err("mismatch in %s %pV", name, &vaf);
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011402
11403 va_end(args);
11404}
11405
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011406static bool
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011407intel_pipe_config_compare(struct drm_i915_private *dev_priv,
Ander Conselvan de Oliveira5cec2582015-01-15 14:55:21 +020011408 struct intel_crtc_state *current_config,
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011409 struct intel_crtc_state *pipe_config,
11410 bool adjust)
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011411{
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011412 bool ret = true;
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011413 bool fixup_inherited = adjust &&
11414 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11415 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011416
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011417#define PIPE_CONF_CHECK_X(name) do { \
Daniel Vetter66e985c2013-06-05 13:34:20 +020011418 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011419 pipe_config_err(adjust, __stringify(name), \
Daniel Vetter66e985c2013-06-05 13:34:20 +020011420 "(expected 0x%08x, found 0x%08x)\n", \
11421 current_config->name, \
11422 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011423 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011424 } \
11425} while (0)
Daniel Vetter66e985c2013-06-05 13:34:20 +020011426
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011427#define PIPE_CONF_CHECK_I(name) do { \
Daniel Vetter08a24032013-04-19 11:25:34 +020011428 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011429 pipe_config_err(adjust, __stringify(name), \
Daniel Vetter08a24032013-04-19 11:25:34 +020011430 "(expected %i, found %i)\n", \
11431 current_config->name, \
11432 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011433 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011434 } \
11435} while (0)
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011436
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011437#define PIPE_CONF_CHECK_BOOL(name) do { \
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011438 if (current_config->name != pipe_config->name) { \
11439 pipe_config_err(adjust, __stringify(name), \
11440 "(expected %s, found %s)\n", \
11441 yesno(current_config->name), \
11442 yesno(pipe_config->name)); \
11443 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011444 } \
11445} while (0)
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011446
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011447/*
11448 * Checks state where we only read out the enabling, but not the entire
11449 * state itself (like full infoframes or ELD for audio). These states
11450 * require a full modeset on bootup to fix up.
11451 */
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011452#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011453 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11454 PIPE_CONF_CHECK_BOOL(name); \
11455 } else { \
11456 pipe_config_err(adjust, __stringify(name), \
11457 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11458 yesno(current_config->name), \
11459 yesno(pipe_config->name)); \
11460 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011461 } \
11462} while (0)
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011463
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011464#define PIPE_CONF_CHECK_P(name) do { \
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011465 if (current_config->name != pipe_config->name) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011466 pipe_config_err(adjust, __stringify(name), \
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011467 "(expected %p, found %p)\n", \
11468 current_config->name, \
11469 pipe_config->name); \
11470 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011471 } \
11472} while (0)
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011473
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011474#define PIPE_CONF_CHECK_M_N(name) do { \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011475 if (!intel_compare_link_m_n(&current_config->name, \
11476 &pipe_config->name,\
11477 adjust)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011478 pipe_config_err(adjust, __stringify(name), \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011479 "(expected tu %i gmch %i/%i link %i/%i, " \
11480 "found tu %i, gmch %i/%i link %i/%i)\n", \
11481 current_config->name.tu, \
11482 current_config->name.gmch_m, \
11483 current_config->name.gmch_n, \
11484 current_config->name.link_m, \
11485 current_config->name.link_n, \
11486 pipe_config->name.tu, \
11487 pipe_config->name.gmch_m, \
11488 pipe_config->name.gmch_n, \
11489 pipe_config->name.link_m, \
11490 pipe_config->name.link_n); \
11491 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011492 } \
11493} while (0)
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011494
Daniel Vetter55c561a2016-03-30 11:34:36 +020011495/* This is required for BDW+ where there is only one set of registers for
11496 * switching between high and low RR.
11497 * This macro can be used whenever a comparison has to be made between one
11498 * hw state and multiple sw state variables.
11499 */
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011500#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011501 if (!intel_compare_link_m_n(&current_config->name, \
11502 &pipe_config->name, adjust) && \
11503 !intel_compare_link_m_n(&current_config->alt_name, \
11504 &pipe_config->name, adjust)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011505 pipe_config_err(adjust, __stringify(name), \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011506 "(expected tu %i gmch %i/%i link %i/%i, " \
11507 "or tu %i gmch %i/%i link %i/%i, " \
11508 "found tu %i, gmch %i/%i link %i/%i)\n", \
11509 current_config->name.tu, \
11510 current_config->name.gmch_m, \
11511 current_config->name.gmch_n, \
11512 current_config->name.link_m, \
11513 current_config->name.link_n, \
11514 current_config->alt_name.tu, \
11515 current_config->alt_name.gmch_m, \
11516 current_config->alt_name.gmch_n, \
11517 current_config->alt_name.link_m, \
11518 current_config->alt_name.link_n, \
11519 pipe_config->name.tu, \
11520 pipe_config->name.gmch_m, \
11521 pipe_config->name.gmch_n, \
11522 pipe_config->name.link_m, \
11523 pipe_config->name.link_n); \
11524 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011525 } \
11526} while (0)
Daniel Vetter88adfff2013-03-28 10:42:01 +010011527
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011528#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011529 if ((current_config->name ^ pipe_config->name) & (mask)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011530 pipe_config_err(adjust, __stringify(name), \
11531 "(%x) (expected %i, found %i)\n", \
11532 (mask), \
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011533 current_config->name & (mask), \
11534 pipe_config->name & (mask)); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011535 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011536 } \
11537} while (0)
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011538
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011539#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
Ville Syrjälä5e550652013-09-06 23:29:07 +030011540 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
Tvrtko Ursulin4e8048f2016-12-06 10:50:20 +000011541 pipe_config_err(adjust, __stringify(name), \
Ville Syrjälä5e550652013-09-06 23:29:07 +030011542 "(expected %i, found %i)\n", \
11543 current_config->name, \
11544 pipe_config->name); \
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011545 ret = false; \
Ville Syrjäläeadd2722018-03-16 20:36:25 +020011546 } \
11547} while (0)
Ville Syrjälä5e550652013-09-06 23:29:07 +030011548
Daniel Vetterbb760062013-06-06 14:55:52 +020011549#define PIPE_CONF_QUIRK(quirk) \
11550 ((current_config->quirks | pipe_config->quirks) & (quirk))
11551
Daniel Vettereccb1402013-05-22 00:50:22 +020011552 PIPE_CONF_CHECK_I(cpu_transcoder);
11553
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011554 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
Daniel Vetter08a24032013-04-19 11:25:34 +020011555 PIPE_CONF_CHECK_I(fdi_lanes);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011556 PIPE_CONF_CHECK_M_N(fdi_m_n);
Daniel Vetter08a24032013-04-19 11:25:34 +020011557
Ville Syrjälä90a6b7b2015-07-06 16:39:15 +030011558 PIPE_CONF_CHECK_I(lane_count);
Imre Deak95a7a2a2016-06-13 16:44:35 +030011559 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011560
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011561 if (INTEL_GEN(dev_priv) < 8) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011562 PIPE_CONF_CHECK_M_N(dp_m_n);
Vandana Kannanb95af8b2014-08-05 07:51:23 -070011563
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011564 if (current_config->has_drrs)
11565 PIPE_CONF_CHECK_M_N(dp_m2_n2);
11566 } else
11567 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
Ville Syrjäläeb14cb72013-09-10 17:02:54 +030011568
Ville Syrjälä253c84c2016-06-22 21:57:01 +030011569 PIPE_CONF_CHECK_X(output_types);
Jani Nikulaa65347b2015-11-27 12:21:46 +020011570
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011571 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11572 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11573 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11574 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11575 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11576 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011577
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011578 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11579 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11580 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11581 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11582 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11583 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011584
Daniel Vetterc93f54c2013-06-27 19:47:19 +020011585 PIPE_CONF_CHECK_I(pixel_multiplier);
Shashank Sharmad9facae2018-10-12 11:53:07 +053011586 PIPE_CONF_CHECK_I(output_format);
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011587 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +010011588 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010011589 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011590 PIPE_CONF_CHECK_BOOL(limited_color_range);
Shashank Sharma15953632017-03-13 16:54:03 +053011591
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011592 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11593 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011594 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
Daniel Vetter6c49f242013-06-06 12:45:25 +020011595
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011596 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
Daniel Vetter9ed109a2014-04-24 23:54:52 +020011597
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011598 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011599 DRM_MODE_FLAG_INTERLACE);
11600
Daniel Vetterbb760062013-06-06 14:55:52 +020011601 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011602 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011603 DRM_MODE_FLAG_PHSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011604 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011605 DRM_MODE_FLAG_NHSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011606 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011607 DRM_MODE_FLAG_PVSYNC);
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011608 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
Daniel Vetterbb760062013-06-06 14:55:52 +020011609 DRM_MODE_FLAG_NVSYNC);
11610 }
Jesse Barnes045ac3b2013-05-14 17:08:26 -070011611
Ville Syrjälä333b8ca2015-09-03 21:50:16 +030011612 PIPE_CONF_CHECK_X(gmch_pfit.control);
Daniel Vettere2ff2d42015-07-15 14:15:50 +020011613 /* pfit ratios are autocomputed by the hw on gen4+ */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011614 if (INTEL_GEN(dev_priv) < 4)
Ville Syrjälä7f7d8dd2016-03-15 16:40:07 +020011615 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
Ville Syrjälä333b8ca2015-09-03 21:50:16 +030011616 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
Daniel Vetter99535992014-04-13 12:00:33 +020011617
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +020011618 if (!adjust) {
11619 PIPE_CONF_CHECK_I(pipe_src_w);
11620 PIPE_CONF_CHECK_I(pipe_src_h);
11621
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011622 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
Maarten Lankhorstbfd16b22015-08-27 15:44:05 +020011623 if (current_config->pch_pfit.enabled) {
11624 PIPE_CONF_CHECK_X(pch_pfit.pos);
11625 PIPE_CONF_CHECK_X(pch_pfit.size);
11626 }
Daniel Vetter2fa2fe92013-05-07 23:34:16 +020011627
Maarten Lankhorst7aefe2b2015-09-14 11:30:10 +020011628 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011629 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
Maarten Lankhorst7aefe2b2015-09-14 11:30:10 +020011630 }
Chandra Kondurua1b22782015-04-07 15:28:45 -070011631
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011632 PIPE_CONF_CHECK_BOOL(double_wide);
Ville Syrjälä282740f2013-09-04 18:30:03 +030011633
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011634 PIPE_CONF_CHECK_P(shared_dpll);
Daniel Vetter66e985c2013-06-05 13:34:20 +020011635 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
Daniel Vetter8bcc2792013-06-05 13:34:28 +020011636 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
Daniel Vetter66e985c2013-06-05 13:34:20 +020011637 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11638 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
Daniel Vetterd452c5b2014-07-04 11:27:39 -030011639 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
Maarten Lankhorst00490c22015-11-16 14:42:12 +010011640 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
Damien Lespiau3f4cd192014-11-13 14:55:21 +000011641 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11642 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11643 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
Paulo Zanoni2de38132017-09-22 17:53:42 -030011644 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11645 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11646 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11647 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11648 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11649 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11650 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11651 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11652 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11653 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11654 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11655 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
Paulo Zanonic27e9172018-04-27 16:14:36 -070011656 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11657 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11658 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11659 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11660 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11661 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11662 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11663 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11664 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11665 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
Daniel Vetterc0d43d62013-06-07 23:11:08 +020011666
Ville Syrjälä47eacba2016-04-12 22:14:35 +030011667 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11668 PIPE_CONF_CHECK_X(dsi_pll.div);
11669
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010011670 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
Ville Syrjälä42571ae2013-09-06 23:29:00 +030011671 PIPE_CONF_CHECK_I(pipe_bpp);
11672
Ander Conselvan de Oliveira2d112de2015-01-15 14:55:22 +020011673 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
Jesse Barnesa9a7e982014-01-20 14:18:04 -080011674 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
Ville Syrjälä5e550652013-09-06 23:29:07 +030011675
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030011676 PIPE_CONF_CHECK_I(min_voltage_level);
11677
Daniel Vetter66e985c2013-06-05 13:34:20 +020011678#undef PIPE_CONF_CHECK_X
Daniel Vetter08a24032013-04-19 11:25:34 +020011679#undef PIPE_CONF_CHECK_I
Maarten Lankhorstd640bf72017-11-10 12:34:55 +010011680#undef PIPE_CONF_CHECK_BOOL
Maarten Lankhorst4493e092017-11-10 12:34:56 +010011681#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020011682#undef PIPE_CONF_CHECK_P
Daniel Vetter1bd1bd82013-04-29 21:56:12 +020011683#undef PIPE_CONF_CHECK_FLAGS
Ville Syrjälä5e550652013-09-06 23:29:07 +030011684#undef PIPE_CONF_CHECK_CLOCK_FUZZY
Daniel Vetterbb760062013-06-06 14:55:52 +020011685#undef PIPE_CONF_QUIRK
Daniel Vetter627eb5a2013-04-29 19:33:42 +020011686
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011687 return ret;
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010011688}
11689
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020011690static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11691 const struct intel_crtc_state *pipe_config)
11692{
11693 if (pipe_config->has_pch_encoder) {
Ville Syrjälä21a727b2016-02-17 21:41:10 +020011694 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020011695 &pipe_config->fdi_m_n);
11696 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11697
11698 /*
11699 * FDI already provided one idea for the dotclock.
11700 * Yell if the encoder disagrees.
11701 */
11702 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11703 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11704 fdi_dotclock, dotclock);
11705 }
11706}
11707
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011708static void verify_wm_state(struct drm_crtc *crtc,
11709 struct drm_crtc_state *new_state)
Damien Lespiau08db6652014-11-04 17:06:52 +000011710{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011711 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
Damien Lespiau08db6652014-11-04 17:06:52 +000011712 struct skl_ddb_allocation hw_ddb, *sw_ddb;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011713 struct skl_pipe_wm hw_wm, *sw_wm;
11714 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11715 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011716 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11717 const enum pipe pipe = intel_crtc->pipe;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011718 int plane, level, max_level = ilk_wm_max_level(dev_priv);
Damien Lespiau08db6652014-11-04 17:06:52 +000011719
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011720 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
Damien Lespiau08db6652014-11-04 17:06:52 +000011721 return;
11722
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011723 skl_pipe_wm_get_hw_state(crtc, &hw_wm);
Maarten Lankhorst03af79e2016-10-26 15:41:36 +020011724 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011725
Damien Lespiau08db6652014-11-04 17:06:52 +000011726 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11727 sw_ddb = &dev_priv->wm.skl_hw.ddb;
11728
Mahesh Kumar74bd8002018-04-26 19:55:15 +053011729 if (INTEL_GEN(dev_priv) >= 11)
11730 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
11731 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
11732 sw_ddb->enabled_slices,
11733 hw_ddb.enabled_slices);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011734 /* planes */
Matt Roper8b364b42016-10-26 15:51:28 -070011735 for_each_universal_plane(dev_priv, pipe, plane) {
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011736 hw_plane_wm = &hw_wm.planes[plane];
11737 sw_plane_wm = &sw_wm->planes[plane];
Damien Lespiau08db6652014-11-04 17:06:52 +000011738
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011739 /* Watermarks */
11740 for (level = 0; level <= max_level; level++) {
11741 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11742 &sw_plane_wm->wm[level]))
11743 continue;
Damien Lespiau08db6652014-11-04 17:06:52 +000011744
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011745 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11746 pipe_name(pipe), plane + 1, level,
11747 sw_plane_wm->wm[level].plane_en,
11748 sw_plane_wm->wm[level].plane_res_b,
11749 sw_plane_wm->wm[level].plane_res_l,
11750 hw_plane_wm->wm[level].plane_en,
11751 hw_plane_wm->wm[level].plane_res_b,
11752 hw_plane_wm->wm[level].plane_res_l);
11753 }
11754
11755 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11756 &sw_plane_wm->trans_wm)) {
11757 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11758 pipe_name(pipe), plane + 1,
11759 sw_plane_wm->trans_wm.plane_en,
11760 sw_plane_wm->trans_wm.plane_res_b,
11761 sw_plane_wm->trans_wm.plane_res_l,
11762 hw_plane_wm->trans_wm.plane_en,
11763 hw_plane_wm->trans_wm.plane_res_b,
11764 hw_plane_wm->trans_wm.plane_res_l);
11765 }
11766
11767 /* DDB */
11768 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
11769 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
11770
11771 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
cpaul@redhat.comfaccd992016-10-14 17:31:58 -040011772 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011773 pipe_name(pipe), plane + 1,
11774 sw_ddb_entry->start, sw_ddb_entry->end,
11775 hw_ddb_entry->start, hw_ddb_entry->end);
11776 }
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011777 }
11778
Lyude27082492016-08-24 07:48:10 +020011779 /*
11780 * cursor
11781 * If the cursor plane isn't active, we may not have updated it's ddb
11782 * allocation. In that case since the ddb allocation will be updated
11783 * once the plane becomes visible, we can skip this check
11784 */
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030011785 if (1) {
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011786 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
11787 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011788
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011789 /* Watermarks */
11790 for (level = 0; level <= max_level; level++) {
11791 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11792 &sw_plane_wm->wm[level]))
11793 continue;
11794
11795 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11796 pipe_name(pipe), level,
11797 sw_plane_wm->wm[level].plane_en,
11798 sw_plane_wm->wm[level].plane_res_b,
11799 sw_plane_wm->wm[level].plane_res_l,
11800 hw_plane_wm->wm[level].plane_en,
11801 hw_plane_wm->wm[level].plane_res_b,
11802 hw_plane_wm->wm[level].plane_res_l);
11803 }
11804
11805 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11806 &sw_plane_wm->trans_wm)) {
11807 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11808 pipe_name(pipe),
11809 sw_plane_wm->trans_wm.plane_en,
11810 sw_plane_wm->trans_wm.plane_res_b,
11811 sw_plane_wm->trans_wm.plane_res_l,
11812 hw_plane_wm->trans_wm.plane_en,
11813 hw_plane_wm->trans_wm.plane_res_b,
11814 hw_plane_wm->trans_wm.plane_res_l);
11815 }
11816
11817 /* DDB */
11818 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
11819 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
11820
11821 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
cpaul@redhat.comfaccd992016-10-14 17:31:58 -040011822 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
Lyude27082492016-08-24 07:48:10 +020011823 pipe_name(pipe),
cpaul@redhat.com3de8a142016-10-14 17:31:57 -040011824 sw_ddb_entry->start, sw_ddb_entry->end,
11825 hw_ddb_entry->start, hw_ddb_entry->end);
Lyude27082492016-08-24 07:48:10 +020011826 }
Damien Lespiau08db6652014-11-04 17:06:52 +000011827 }
11828}
11829
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011830static void
Maarten Lankhorst677100c2016-11-08 13:55:41 +010011831verify_connector_state(struct drm_device *dev,
11832 struct drm_atomic_state *state,
11833 struct drm_crtc *crtc)
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011834{
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020011835 struct drm_connector *connector;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011836 struct drm_connector_state *new_conn_state;
Maarten Lankhorst677100c2016-11-08 13:55:41 +010011837 int i;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011838
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011839 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020011840 struct drm_encoder *encoder = connector->encoder;
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020011841 struct drm_crtc_state *crtc_state = NULL;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020011842
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011843 if (new_conn_state->crtc != crtc)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011844 continue;
11845
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020011846 if (crtc)
11847 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
11848
11849 intel_connector_verify_state(crtc_state, new_conn_state);
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011850
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010011851 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
Maarten Lankhorst35dd3c62015-08-06 13:49:22 +020011852 "connector's atomic encoder doesn't match legacy encoder\n");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011853 }
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011854}
11855
11856static void
Daniel Vetter86b04262017-03-01 10:52:26 +010011857verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011858{
11859 struct intel_encoder *encoder;
Daniel Vetter86b04262017-03-01 10:52:26 +010011860 struct drm_connector *connector;
11861 struct drm_connector_state *old_conn_state, *new_conn_state;
11862 int i;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011863
Damien Lespiaub2784e12014-08-05 11:29:37 +010011864 for_each_intel_encoder(dev, encoder) {
Daniel Vetter86b04262017-03-01 10:52:26 +010011865 bool enabled = false, found = false;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011866 enum pipe pipe;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011867
11868 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11869 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030011870 encoder->base.name);
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011871
Daniel Vetter86b04262017-03-01 10:52:26 +010011872 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
11873 new_conn_state, i) {
11874 if (old_conn_state->best_encoder == &encoder->base)
11875 found = true;
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020011876
Daniel Vetter86b04262017-03-01 10:52:26 +010011877 if (new_conn_state->best_encoder != &encoder->base)
11878 continue;
11879 found = enabled = true;
11880
11881 I915_STATE_WARN(new_conn_state->crtc !=
Maarten Lankhorstad3c5582015-07-13 16:30:26 +020011882 encoder->base.crtc,
11883 "connector's crtc doesn't match encoder crtc\n");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011884 }
Daniel Vetter86b04262017-03-01 10:52:26 +010011885
11886 if (!found)
11887 continue;
Dave Airlie0e32b392014-05-02 14:02:48 +100011888
Rob Clarke2c719b2014-12-15 13:56:32 -050011889 I915_STATE_WARN(!!encoder->base.crtc != enabled,
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011890 "encoder's enabled state mismatch "
11891 "(expected %i, found %i)\n",
11892 !!encoder->base.crtc, enabled);
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020011893
11894 if (!encoder->base.crtc) {
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011895 bool active;
11896
11897 active = encoder->get_hw_state(encoder, &pipe);
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020011898 I915_STATE_WARN(active,
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011899 "encoder detached but still enabled on pipe %c.\n",
11900 pipe_name(pipe));
Maarten Lankhorst7c60d192015-08-05 12:37:04 +020011901 }
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011902 }
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011903}
11904
11905static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011906verify_crtc_state(struct drm_crtc *crtc,
11907 struct drm_crtc_state *old_crtc_state,
11908 struct drm_crtc_state *new_crtc_state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011909{
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011910 struct drm_device *dev = crtc->dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010011911 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011912 struct intel_encoder *encoder;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011913 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11914 struct intel_crtc_state *pipe_config, *sw_config;
11915 struct drm_atomic_state *old_state;
11916 bool active;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011917
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011918 old_state = old_crtc_state->state;
Daniel Vetterec2dc6a2016-05-09 16:34:09 +020011919 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011920 pipe_config = to_intel_crtc_state(old_crtc_state);
11921 memset(pipe_config, 0, sizeof(*pipe_config));
11922 pipe_config->base.crtc = crtc;
11923 pipe_config->base.state = old_state;
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011924
Ville Syrjälä78108b72016-05-27 20:59:19 +030011925 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020011926
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011927 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011928
Ville Syrjäläe56134b2017-06-01 17:36:19 +030011929 /* we keep both pipes enabled on 830 */
11930 if (IS_I830(dev_priv))
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011931 active = new_crtc_state->active;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011932
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011933 I915_STATE_WARN(new_crtc_state->active != active,
11934 "crtc active state doesn't match with hw state "
11935 "(expected %i, found %i)\n", new_crtc_state->active, active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011936
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011937 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
11938 "transitional active state does not match atomic hw state "
11939 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011940
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011941 for_each_encoder_on_crtc(dev, crtc, encoder) {
11942 enum pipe pipe;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011943
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011944 active = encoder->get_hw_state(encoder, &pipe);
11945 I915_STATE_WARN(active != new_crtc_state->active,
11946 "[ENCODER:%i] active %i with crtc active %i\n",
11947 encoder->base.base.id, active, new_crtc_state->active);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011948
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011949 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
11950 "Encoder connected to wrong pipe %c\n",
11951 pipe_name(pipe));
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011952
Ville Syrjäläe1214b92017-10-27 22:31:23 +030011953 if (active)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011954 encoder->get_config(encoder, pipe_config);
11955 }
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011956
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020011957 intel_crtc_compute_pixel_rate(pipe_config);
11958
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011959 if (!new_crtc_state->active)
11960 return;
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011961
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011962 intel_pipe_config_sanity_check(dev_priv, pipe_config);
Maarten Lankhorst4d20cd82015-08-05 12:37:05 +020011963
Maarten Lankhorst749d98b2017-05-11 10:28:43 +020011964 sw_config = to_intel_crtc_state(new_crtc_state);
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000011965 if (!intel_pipe_config_compare(dev_priv, sw_config,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011966 pipe_config, false)) {
11967 I915_STATE_WARN(1, "pipe state doesn't match!\n");
11968 intel_dump_pipe_config(intel_crtc, pipe_config,
11969 "[hw state]");
11970 intel_dump_pipe_config(intel_crtc, sw_config,
11971 "[sw state]");
Daniel Vetter8af6cf82012-07-10 09:50:11 +020011972 }
11973}
11974
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020011975static void
Ville Syrjäläcff109f2017-11-17 21:19:17 +020011976intel_verify_planes(struct intel_atomic_state *state)
11977{
11978 struct intel_plane *plane;
11979 const struct intel_plane_state *plane_state;
11980 int i;
11981
11982 for_each_new_intel_plane_in_state(state, plane,
11983 plane_state, i)
11984 assert_plane(plane, plane_state->base.visible);
11985}
11986
11987static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020011988verify_single_dpll_state(struct drm_i915_private *dev_priv,
11989 struct intel_shared_dpll *pll,
11990 struct drm_crtc *crtc,
11991 struct drm_crtc_state *new_state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011992{
11993 struct intel_dpll_hw_state dpll_hw_state;
Ville Syrjälä40560e22018-06-26 22:47:11 +030011994 unsigned int crtc_mask;
Maarten Lankhorste7c84542016-03-23 14:58:06 +010011995 bool active;
11996
11997 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
11998
Lucas De Marchi72f775f2018-03-20 15:06:34 -070011999 DRM_DEBUG_KMS("%s\n", pll->info->name);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012000
Lucas De Marchiee1398b2018-03-20 15:06:33 -070012001 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012002
Lucas De Marchi5cd281f2018-03-20 15:06:36 -070012003 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012004 I915_STATE_WARN(!pll->on && pll->active_mask,
12005 "pll in active use but not on in sw tracking\n");
12006 I915_STATE_WARN(pll->on && !pll->active_mask,
12007 "pll is on but not used by any active crtc\n");
12008 I915_STATE_WARN(pll->on != active,
12009 "pll on state mismatch (expected %i, found %i)\n",
12010 pll->on, active);
12011 }
12012
12013 if (!crtc) {
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012014 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012015 "more active pll users than references: %x vs %x\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012016 pll->active_mask, pll->state.crtc_mask);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012017
12018 return;
12019 }
12020
Ville Syrjälä40560e22018-06-26 22:47:11 +030012021 crtc_mask = drm_crtc_mask(crtc);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012022
12023 if (new_state->active)
12024 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12025 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12026 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12027 else
12028 I915_STATE_WARN(pll->active_mask & crtc_mask,
12029 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12030 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12031
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012032 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012033 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012034 crtc_mask, pll->state.crtc_mask);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012035
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012036 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012037 &dpll_hw_state,
12038 sizeof(dpll_hw_state)),
12039 "pll hw state mismatch\n");
12040}
12041
12042static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012043verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12044 struct drm_crtc_state *old_crtc_state,
12045 struct drm_crtc_state *new_crtc_state)
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012046{
Chris Wilsonfac5e232016-07-04 11:34:36 +010012047 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012048 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12049 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12050
12051 if (new_state->shared_dpll)
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012052 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012053
12054 if (old_state->shared_dpll &&
12055 old_state->shared_dpll != new_state->shared_dpll) {
Ville Syrjälä40560e22018-06-26 22:47:11 +030012056 unsigned int crtc_mask = drm_crtc_mask(crtc);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012057 struct intel_shared_dpll *pll = old_state->shared_dpll;
12058
12059 I915_STATE_WARN(pll->active_mask & crtc_mask,
12060 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12061 pipe_name(drm_crtc_index(crtc)));
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020012062 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012063 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12064 pipe_name(drm_crtc_index(crtc)));
12065 }
12066}
12067
12068static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012069intel_modeset_verify_crtc(struct drm_crtc *crtc,
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012070 struct drm_atomic_state *state,
12071 struct drm_crtc_state *old_state,
12072 struct drm_crtc_state *new_state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012073{
Daniel Vetter5a21b662016-05-24 17:13:53 +020012074 if (!needs_modeset(new_state) &&
12075 !to_intel_crtc_state(new_state)->update_pipe)
12076 return;
12077
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012078 verify_wm_state(crtc, new_state);
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012079 verify_connector_state(crtc->dev, state, crtc);
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012080 verify_crtc_state(crtc, old_state, new_state);
12081 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012082}
12083
12084static void
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012085verify_disabled_dpll_state(struct drm_device *dev)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012086{
Chris Wilsonfac5e232016-07-04 11:34:36 +010012087 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter91d1b4b2013-06-05 13:34:18 +020012088 int i;
Daniel Vetter53589012013-06-05 13:34:16 +020012089
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012090 for (i = 0; i < dev_priv->num_shared_dpll; i++)
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012091 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012092}
Daniel Vetter53589012013-06-05 13:34:16 +020012093
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012094static void
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012095intel_modeset_verify_disabled(struct drm_device *dev,
12096 struct drm_atomic_state *state)
Maarten Lankhorste7c84542016-03-23 14:58:06 +010012097{
Daniel Vetter86b04262017-03-01 10:52:26 +010012098 verify_encoder_state(dev, state);
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012099 verify_connector_state(dev, state, NULL);
Maarten Lankhorstc0ead702016-03-30 10:00:05 +020012100 verify_disabled_dpll_state(dev);
Daniel Vetter25c5b262012-07-08 22:08:04 +020012101}
12102
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012103static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
Ville Syrjälä80715b22014-05-15 20:23:23 +030012104{
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012105 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010012106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjälä80715b22014-05-15 20:23:23 +030012107
12108 /*
12109 * The scanline counter increments at the leading edge of hsync.
12110 *
12111 * On most platforms it starts counting from vtotal-1 on the
12112 * first active line. That means the scanline counter value is
12113 * always one less than what we would expect. Ie. just after
12114 * start of vblank, which also occurs at start of hsync (on the
12115 * last active line), the scanline counter will read vblank_start-1.
12116 *
12117 * On gen2 the scanline counter starts counting from 1 instead
12118 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12119 * to keep the value positive), instead of adding one.
12120 *
12121 * On HSW+ the behaviour of the scanline counter depends on the output
12122 * type. For DP ports it behaves like most other platforms, but on HDMI
12123 * there's an extra 1 line difference. So we need to add two instead of
12124 * one to the value.
Ville Syrjäläec1b4ee2016-12-15 19:47:34 +020012125 *
12126 * On VLV/CHV DSI the scanline counter would appear to increment
12127 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12128 * that means we can't tell whether we're in vblank or not while
12129 * we're on that particular line. We must still set scanline_offset
12130 * to 1 so that the vblank timestamps come out correct when we query
12131 * the scanline counter from within the vblank interrupt handler.
12132 * However if queried just before the start of vblank we'll get an
12133 * answer that's slightly in the future.
Ville Syrjälä80715b22014-05-15 20:23:23 +030012134 */
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010012135 if (IS_GEN2(dev_priv)) {
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012136 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
Ville Syrjälä80715b22014-05-15 20:23:23 +030012137 int vtotal;
12138
Ville Syrjälä124abe02015-09-08 13:40:45 +030012139 vtotal = adjusted_mode->crtc_vtotal;
12140 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
Ville Syrjälä80715b22014-05-15 20:23:23 +030012141 vtotal /= 2;
12142
12143 crtc->scanline_offset = vtotal - 1;
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010012144 } else if (HAS_DDI(dev_priv) &&
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012145 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
Ville Syrjälä80715b22014-05-15 20:23:23 +030012146 crtc->scanline_offset = 2;
12147 } else
12148 crtc->scanline_offset = 1;
12149}
12150
Maarten Lankhorstad421372015-06-15 12:33:42 +020012151static void intel_modeset_clear_plls(struct drm_atomic_state *state)
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012152{
Ander Conselvan de Oliveira225da592015-04-02 14:47:57 +030012153 struct drm_device *dev = state->dev;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012154 struct drm_i915_private *dev_priv = to_i915(dev);
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012155 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012156 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012157 int i;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012158
12159 if (!dev_priv->display.crtc_compute_clock)
Maarten Lankhorstad421372015-06-15 12:33:42 +020012160 return;
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012161
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012162 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012163 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020012164 struct intel_shared_dpll *old_dpll =
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012165 to_intel_crtc_state(old_crtc_state)->shared_dpll;
Maarten Lankhorstad421372015-06-15 12:33:42 +020012166
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012167 if (!needs_modeset(new_crtc_state))
Ander Conselvan de Oliveira225da592015-04-02 14:47:57 +030012168 continue;
12169
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012170 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012171
Ander Conselvan de Oliveira8106ddb2016-03-08 17:46:18 +020012172 if (!old_dpll)
Maarten Lankhorstfb1a38a2016-02-09 13:02:17 +010012173 continue;
Ander Conselvan de Oliveira0a9ab302015-04-21 17:13:04 +030012174
Ander Conselvan de Oliveiraa1c414e2016-12-29 17:22:07 +020012175 intel_release_shared_dpll(old_dpll, intel_crtc, state);
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012176 }
Ander Conselvan de Oliveiraed6739e2015-01-29 16:55:08 +020012177}
12178
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012179/*
12180 * This implements the workaround described in the "notes" section of the mode
12181 * set sequence documentation. When going from no pipes or single pipe to
12182 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12183 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12184 */
12185static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12186{
12187 struct drm_crtc_state *crtc_state;
12188 struct intel_crtc *intel_crtc;
12189 struct drm_crtc *crtc;
12190 struct intel_crtc_state *first_crtc_state = NULL;
12191 struct intel_crtc_state *other_crtc_state = NULL;
12192 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12193 int i;
12194
12195 /* look at all crtc's that are going to be enabled in during modeset */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012196 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012197 intel_crtc = to_intel_crtc(crtc);
12198
12199 if (!crtc_state->active || !needs_modeset(crtc_state))
12200 continue;
12201
12202 if (first_crtc_state) {
12203 other_crtc_state = to_intel_crtc_state(crtc_state);
12204 break;
12205 } else {
12206 first_crtc_state = to_intel_crtc_state(crtc_state);
12207 first_pipe = intel_crtc->pipe;
12208 }
12209 }
12210
12211 /* No workaround needed? */
12212 if (!first_crtc_state)
12213 return 0;
12214
12215 /* w/a possibly needed, check how many crtc's are already enabled. */
12216 for_each_intel_crtc(state->dev, intel_crtc) {
12217 struct intel_crtc_state *pipe_config;
12218
12219 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12220 if (IS_ERR(pipe_config))
12221 return PTR_ERR(pipe_config);
12222
12223 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12224
12225 if (!pipe_config->base.active ||
12226 needs_modeset(&pipe_config->base))
12227 continue;
12228
12229 /* 2 or more enabled crtcs means no need for w/a */
12230 if (enabled_pipe != INVALID_PIPE)
12231 return 0;
12232
12233 enabled_pipe = intel_crtc->pipe;
12234 }
12235
12236 if (enabled_pipe != INVALID_PIPE)
12237 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12238 else if (other_crtc_state)
12239 other_crtc_state->hsw_workaround_pipe = first_pipe;
12240
12241 return 0;
12242}
12243
Ville Syrjälä8d965612016-11-14 18:35:10 +020012244static int intel_lock_all_pipes(struct drm_atomic_state *state)
12245{
12246 struct drm_crtc *crtc;
12247
12248 /* Add all pipes to the state */
12249 for_each_crtc(state->dev, crtc) {
12250 struct drm_crtc_state *crtc_state;
12251
12252 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12253 if (IS_ERR(crtc_state))
12254 return PTR_ERR(crtc_state);
12255 }
12256
12257 return 0;
12258}
12259
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012260static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12261{
12262 struct drm_crtc *crtc;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012263
Ville Syrjälä8d965612016-11-14 18:35:10 +020012264 /*
12265 * Add all pipes to the state, and force
12266 * a modeset on all the active ones.
12267 */
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012268 for_each_crtc(state->dev, crtc) {
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012269 struct drm_crtc_state *crtc_state;
12270 int ret;
12271
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012272 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12273 if (IS_ERR(crtc_state))
12274 return PTR_ERR(crtc_state);
12275
12276 if (!crtc_state->active || needs_modeset(crtc_state))
12277 continue;
12278
12279 crtc_state->mode_changed = true;
12280
12281 ret = drm_atomic_add_affected_connectors(state, crtc);
12282 if (ret)
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012283 return ret;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012284
12285 ret = drm_atomic_add_affected_planes(state, crtc);
12286 if (ret)
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012287 return ret;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012288 }
12289
Ville Syrjälä9780aad2016-11-14 18:35:11 +020012290 return 0;
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012291}
12292
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012293static int intel_modeset_checks(struct drm_atomic_state *state)
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012294{
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012295 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012296 struct drm_i915_private *dev_priv = to_i915(state->dev);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012297 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012298 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012299 int ret = 0, i;
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012300
Maarten Lankhorstb3592832015-06-15 12:33:38 +020012301 if (!check_digital_port_conflicts(state)) {
12302 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12303 return -EINVAL;
12304 }
12305
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012306 intel_state->modeset = true;
12307 intel_state->active_crtcs = dev_priv->active_crtcs;
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012308 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12309 intel_state->cdclk.actual = dev_priv->cdclk.actual;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012310
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012311 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12312 if (new_crtc_state->active)
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012313 intel_state->active_crtcs |= 1 << i;
12314 else
12315 intel_state->active_crtcs &= ~(1 << i);
Matt Roper8b4a7d02016-05-12 07:06:00 -070012316
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012317 if (old_crtc_state->active != new_crtc_state->active)
Matt Roper8b4a7d02016-05-12 07:06:00 -070012318 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012319 }
12320
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012321 /*
12322 * See if the config requires any additional preparation, e.g.
12323 * to adjust global state with pipes off. We need to do this
12324 * here so we can get the modeset_pipe updated config for the new
12325 * mode set on this crtc. For other crtcs we need to use the
12326 * adjusted_mode bits in the crtc directly.
12327 */
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012328 if (dev_priv->display.modeset_calc_cdclk) {
Clint Taylorc89e39f2016-05-13 23:41:21 +030012329 ret = dev_priv->display.modeset_calc_cdclk(state);
12330 if (ret < 0)
12331 return ret;
12332
Ville Syrjälä8d965612016-11-14 18:35:10 +020012333 /*
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012334 * Writes to dev_priv->cdclk.logical must protected by
Ville Syrjälä8d965612016-11-14 18:35:10 +020012335 * holding all the crtc locks, even if we don't end up
12336 * touching the hardware
12337 */
Ville Syrjälä64600bd2017-10-24 12:52:08 +030012338 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12339 &intel_state->cdclk.logical)) {
Ville Syrjälä8d965612016-11-14 18:35:10 +020012340 ret = intel_lock_all_pipes(state);
12341 if (ret < 0)
12342 return ret;
12343 }
Maarten Lankhorst27c329e2015-06-15 12:33:56 +020012344
Ville Syrjälä8d965612016-11-14 18:35:10 +020012345 /* All pipes must be switched off while we change the cdclk. */
Ville Syrjälä64600bd2017-10-24 12:52:08 +030012346 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12347 &intel_state->cdclk.actual)) {
Ville Syrjälä8d965612016-11-14 18:35:10 +020012348 ret = intel_modeset_all_pipes(state);
12349 if (ret < 0)
12350 return ret;
12351 }
Maarten Lankhorste8788cb2016-02-16 10:25:11 +010012352
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012353 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12354 intel_state->cdclk.logical.cdclk,
12355 intel_state->cdclk.actual.cdclk);
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030012356 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12357 intel_state->cdclk.logical.voltage_level,
12358 intel_state->cdclk.actual.voltage_level);
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012359 } else {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012360 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012361 }
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012362
Maarten Lankhorstad421372015-06-15 12:33:42 +020012363 intel_modeset_clear_plls(state);
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012364
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012365 if (IS_HASWELL(dev_priv))
Maarten Lankhorstad421372015-06-15 12:33:42 +020012366 return haswell_mode_set_planes_workaround(state);
Maarten Lankhorst99d736a2015-06-01 12:50:09 +020012367
Maarten Lankhorstad421372015-06-15 12:33:42 +020012368 return 0;
Ander Conselvan de Oliveira054518d2015-04-21 17:13:06 +030012369}
12370
Matt Roperaa363132015-09-24 15:53:18 -070012371/*
12372 * Handle calculation of various watermark data at the end of the atomic check
12373 * phase. The code here should be run after the per-crtc and per-plane 'check'
12374 * handlers to ensure that all derived state has been updated.
12375 */
Matt Roper55994c22016-05-12 07:06:08 -070012376static int calc_watermark_data(struct drm_atomic_state *state)
Matt Roperaa363132015-09-24 15:53:18 -070012377{
12378 struct drm_device *dev = state->dev;
Matt Roper98d39492016-05-12 07:06:03 -070012379 struct drm_i915_private *dev_priv = to_i915(dev);
Matt Roper98d39492016-05-12 07:06:03 -070012380
12381 /* Is there platform-specific watermark information to calculate? */
12382 if (dev_priv->display.compute_global_watermarks)
Matt Roper55994c22016-05-12 07:06:08 -070012383 return dev_priv->display.compute_global_watermarks(state);
12384
12385 return 0;
Matt Roperaa363132015-09-24 15:53:18 -070012386}
12387
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012388/**
12389 * intel_atomic_check - validate state object
12390 * @dev: drm device
12391 * @state: state to validate
12392 */
12393static int intel_atomic_check(struct drm_device *dev,
12394 struct drm_atomic_state *state)
Daniel Vettera6778b32012-07-02 09:56:42 +020012395{
Paulo Zanonidd8b3bd2016-01-19 11:35:49 -020012396 struct drm_i915_private *dev_priv = to_i915(dev);
Matt Roperaa363132015-09-24 15:53:18 -070012397 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012398 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012399 struct drm_crtc_state *old_crtc_state, *crtc_state;
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012400 int ret, i;
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012401 bool any_ms = false;
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012402
Maarten Lankhorst8c58f732018-02-21 10:28:08 +010012403 /* Catch I915_MODE_FLAG_INHERITED */
12404 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12405 crtc_state, i) {
12406 if (crtc_state->mode.private_flags !=
12407 old_crtc_state->mode.private_flags)
12408 crtc_state->mode_changed = true;
12409 }
12410
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012411 ret = drm_atomic_helper_check_modeset(dev, state);
Daniel Vettera6778b32012-07-02 09:56:42 +020012412 if (ret)
12413 return ret;
12414
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012415 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012416 struct intel_crtc_state *pipe_config =
12417 to_intel_crtc_state(crtc_state);
Daniel Vetter1ed51de2015-07-15 14:15:51 +020012418
Daniel Vetter26495482015-07-15 14:15:52 +020012419 if (!needs_modeset(crtc_state))
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012420 continue;
12421
Daniel Vetteraf4a8792016-05-09 09:31:25 +020012422 if (!crtc_state->enable) {
12423 any_ms = true;
12424 continue;
12425 }
12426
Maarten Lankhorstcfb23ed2015-07-14 12:17:40 +020012427 ret = intel_modeset_pipe_config(crtc, pipe_config);
Maarten Lankhorst25aa1c32016-05-03 10:30:38 +020012428 if (ret) {
12429 intel_dump_pipe_config(to_intel_crtc(crtc),
12430 pipe_config, "[failed]");
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012431 return ret;
Maarten Lankhorst25aa1c32016-05-03 10:30:38 +020012432 }
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012433
Michal Wajdeczko4f044a82017-09-19 19:38:44 +000012434 if (i915_modparams.fastboot &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000012435 intel_pipe_config_compare(dev_priv,
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012436 to_intel_crtc_state(old_crtc_state),
Daniel Vetter1ed51de2015-07-15 14:15:51 +020012437 pipe_config, true)) {
Daniel Vetter26495482015-07-15 14:15:52 +020012438 crtc_state->mode_changed = false;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012439 pipe_config->update_pipe = true;
Daniel Vetter26495482015-07-15 14:15:52 +020012440 }
12441
Daniel Vetteraf4a8792016-05-09 09:31:25 +020012442 if (needs_modeset(crtc_state))
Daniel Vetter26495482015-07-15 14:15:52 +020012443 any_ms = true;
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012444
Daniel Vetter26495482015-07-15 14:15:52 +020012445 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12446 needs_modeset(crtc_state) ?
12447 "[modeset]" : "[fastset]");
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012448 }
12449
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012450 if (any_ms) {
12451 ret = intel_modeset_checks(state);
12452
12453 if (ret)
12454 return ret;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012455 } else {
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012456 intel_state->cdclk.logical = dev_priv->cdclk.logical;
Ville Syrjäläe0ca7a62016-11-14 18:35:09 +020012457 }
Ander Conselvan de Oliveirac347a672015-06-01 12:50:02 +020012458
Paulo Zanonidd8b3bd2016-01-19 11:35:49 -020012459 ret = drm_atomic_helper_check_planes(dev, state);
Matt Roperaa363132015-09-24 15:53:18 -070012460 if (ret)
12461 return ret;
12462
Ville Syrjälädd576022017-11-17 21:19:14 +020012463 intel_fbc_choose_crtc(dev_priv, intel_state);
Matt Roper55994c22016-05-12 07:06:08 -070012464 return calc_watermark_data(state);
Daniel Vettera6778b32012-07-02 09:56:42 +020012465}
12466
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012467static int intel_atomic_prepare_commit(struct drm_device *dev,
Chris Wilsond07f0e52016-10-28 13:58:44 +010012468 struct drm_atomic_state *state)
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012469{
Chris Wilsonfd700752017-07-26 17:00:36 +010012470 return drm_atomic_helper_prepare_planes(dev, state);
Maarten Lankhorst5008e872015-08-18 13:40:05 +020012471}
12472
Maarten Lankhorsta2991412016-05-17 15:07:48 +020012473u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12474{
12475 struct drm_device *dev = crtc->base.dev;
12476
12477 if (!dev->max_vblank_count)
Dhinakaran Pandiyan734cbbf2018-02-02 21:12:54 -080012478 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
Maarten Lankhorsta2991412016-05-17 15:07:48 +020012479
12480 return dev->driver->get_vblank_counter(dev, crtc->pipe);
12481}
12482
Lyude896e5bb2016-08-24 07:48:09 +020012483static void intel_update_crtc(struct drm_crtc *crtc,
12484 struct drm_atomic_state *state,
12485 struct drm_crtc_state *old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012486 struct drm_crtc_state *new_crtc_state)
Lyude896e5bb2016-08-24 07:48:09 +020012487{
12488 struct drm_device *dev = crtc->dev;
12489 struct drm_i915_private *dev_priv = to_i915(dev);
12490 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorst6c246b82018-09-20 12:27:08 +020012491 struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012492 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12493 bool modeset = needs_modeset(new_crtc_state);
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012494 struct intel_plane_state *new_plane_state =
12495 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12496 to_intel_plane(crtc->primary));
Lyude896e5bb2016-08-24 07:48:09 +020012497
12498 if (modeset) {
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020012499 update_scanline_offset(pipe_config);
Lyude896e5bb2016-08-24 07:48:09 +020012500 dev_priv->display.crtc_enable(pipe_config, state);
Maarten Lankhorst033b7a22018-03-08 13:02:02 +010012501
12502 /* vblanks work again, re-enable pipe CRC. */
12503 intel_crtc_enable_pipe_crc(intel_crtc);
Lyude896e5bb2016-08-24 07:48:09 +020012504 } else {
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012505 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12506 pipe_config);
Lyude896e5bb2016-08-24 07:48:09 +020012507 }
12508
Maarten Lankhorst8b694492018-04-09 14:46:55 +020012509 if (new_plane_state)
12510 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
Lyude896e5bb2016-08-24 07:48:09 +020012511
Maarten Lankhorst6c246b82018-09-20 12:27:08 +020012512 intel_begin_crtc_commit(crtc, old_crtc_state);
12513
12514 intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc,
12515 old_intel_cstate, pipe_config);
12516
12517 intel_finish_crtc_commit(crtc, old_crtc_state);
Lyude896e5bb2016-08-24 07:48:09 +020012518}
12519
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012520static void intel_update_crtcs(struct drm_atomic_state *state)
Lyude896e5bb2016-08-24 07:48:09 +020012521{
12522 struct drm_crtc *crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012523 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Lyude896e5bb2016-08-24 07:48:09 +020012524 int i;
12525
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012526 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12527 if (!new_crtc_state->active)
Lyude896e5bb2016-08-24 07:48:09 +020012528 continue;
12529
12530 intel_update_crtc(crtc, state, old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012531 new_crtc_state);
Lyude896e5bb2016-08-24 07:48:09 +020012532 }
12533}
12534
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012535static void skl_update_crtcs(struct drm_atomic_state *state)
Lyude27082492016-08-24 07:48:10 +020012536{
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020012537 struct drm_i915_private *dev_priv = to_i915(state->dev);
Lyude27082492016-08-24 07:48:10 +020012538 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12539 struct drm_crtc *crtc;
Lyudece0ba282016-09-15 10:46:35 -040012540 struct intel_crtc *intel_crtc;
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012541 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Lyudece0ba282016-09-15 10:46:35 -040012542 struct intel_crtc_state *cstate;
Lyude27082492016-08-24 07:48:10 +020012543 unsigned int updated = 0;
12544 bool progress;
12545 enum pipe pipe;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012546 int i;
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012547 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12548 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012549
12550 const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12551
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012552 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012553 /* ignore allocations for crtc's that have been turned off. */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012554 if (new_crtc_state->active)
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012555 entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
Lyude27082492016-08-24 07:48:10 +020012556
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012557 /* If 2nd DBuf slice required, enable it here */
12558 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12559 icl_dbuf_slices_update(dev_priv, required_slices);
12560
Lyude27082492016-08-24 07:48:10 +020012561 /*
12562 * Whenever the number of active pipes changes, we need to make sure we
12563 * update the pipes in the right order so that their ddb allocations
12564 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12565 * cause pipe underruns and other bad stuff.
12566 */
12567 do {
Lyude27082492016-08-24 07:48:10 +020012568 progress = false;
12569
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012570 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Lyude27082492016-08-24 07:48:10 +020012571 bool vbl_wait = false;
12572 unsigned int cmask = drm_crtc_mask(crtc);
Lyudece0ba282016-09-15 10:46:35 -040012573
12574 intel_crtc = to_intel_crtc(crtc);
Ville Syrjälä21794812017-08-23 18:22:26 +030012575 cstate = to_intel_crtc_state(new_crtc_state);
Lyudece0ba282016-09-15 10:46:35 -040012576 pipe = intel_crtc->pipe;
Lyude27082492016-08-24 07:48:10 +020012577
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012578 if (updated & cmask || !cstate->base.active)
Lyude27082492016-08-24 07:48:10 +020012579 continue;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012580
Mika Kahola2b685042017-10-10 13:17:03 +030012581 if (skl_ddb_allocation_overlaps(dev_priv,
12582 entries,
12583 &cstate->wm.skl.ddb,
12584 i))
Lyude27082492016-08-24 07:48:10 +020012585 continue;
12586
12587 updated |= cmask;
Maarten Lankhorst5eff5032016-11-08 13:55:35 +010012588 entries[i] = &cstate->wm.skl.ddb;
Lyude27082492016-08-24 07:48:10 +020012589
12590 /*
12591 * If this is an already active pipe, it's DDB changed,
12592 * and this isn't the last pipe that needs updating
12593 * then we need to wait for a vblank to pass for the
12594 * new ddb allocation to take effect.
12595 */
Lyudece0ba282016-09-15 10:46:35 -040012596 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
Maarten Lankhorst512b5522016-11-08 13:55:34 +010012597 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012598 !new_crtc_state->active_changed &&
Lyude27082492016-08-24 07:48:10 +020012599 intel_state->wm_results.dirty_pipes != updated)
12600 vbl_wait = true;
12601
12602 intel_update_crtc(crtc, state, old_crtc_state,
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012603 new_crtc_state);
Lyude27082492016-08-24 07:48:10 +020012604
12605 if (vbl_wait)
Ville Syrjälä0f0f74b2016-10-31 22:37:06 +020012606 intel_wait_for_vblank(dev_priv, pipe);
Lyude27082492016-08-24 07:48:10 +020012607
12608 progress = true;
12609 }
12610 } while (progress);
Mahesh Kumaraa9664f2018-04-26 19:55:16 +053012611
12612 /* If 2nd DBuf slice is no more required disable it */
12613 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12614 icl_dbuf_slices_update(dev_priv, required_slices);
Lyude27082492016-08-24 07:48:10 +020012615}
12616
Chris Wilsonba318c62017-02-02 20:47:41 +000012617static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12618{
12619 struct intel_atomic_state *state, *next;
12620 struct llist_node *freed;
12621
12622 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12623 llist_for_each_entry_safe(state, next, freed, freed)
12624 drm_atomic_state_put(&state->base);
12625}
12626
12627static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12628{
12629 struct drm_i915_private *dev_priv =
12630 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12631
12632 intel_atomic_helper_free_state(dev_priv);
12633}
12634
Daniel Vetter9db529a2017-08-08 10:08:28 +020012635static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12636{
12637 struct wait_queue_entry wait_fence, wait_reset;
12638 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12639
12640 init_wait_entry(&wait_fence, 0);
12641 init_wait_entry(&wait_reset, 0);
12642 for (;;) {
12643 prepare_to_wait(&intel_state->commit_ready.wait,
12644 &wait_fence, TASK_UNINTERRUPTIBLE);
12645 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12646 &wait_reset, TASK_UNINTERRUPTIBLE);
12647
12648
12649 if (i915_sw_fence_done(&intel_state->commit_ready)
12650 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12651 break;
12652
12653 schedule();
12654 }
12655 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12656 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12657}
12658
Chris Wilson8d52e442018-06-23 11:39:51 +010012659static void intel_atomic_cleanup_work(struct work_struct *work)
12660{
12661 struct drm_atomic_state *state =
12662 container_of(work, struct drm_atomic_state, commit_work);
12663 struct drm_i915_private *i915 = to_i915(state->dev);
12664
12665 drm_atomic_helper_cleanup_planes(&i915->drm, state);
12666 drm_atomic_helper_commit_cleanup_done(state);
12667 drm_atomic_state_put(state);
12668
12669 intel_atomic_helper_free_state(i915);
12670}
12671
Daniel Vetter94f05022016-06-14 18:01:00 +020012672static void intel_atomic_commit_tail(struct drm_atomic_state *state)
Daniel Vettera6778b32012-07-02 09:56:42 +020012673{
Daniel Vetter94f05022016-06-14 18:01:00 +020012674 struct drm_device *dev = state->dev;
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012675 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012676 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012677 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012678 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
Maarten Lankhorst7580d772015-08-18 13:40:06 +020012679 struct drm_crtc *crtc;
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012680 struct intel_crtc *intel_crtc;
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +020012681 u64 put_domains[I915_MAX_PIPES] = {};
Chris Wilsone95433c2016-10-28 13:58:27 +010012682 int i;
Daniel Vettera6778b32012-07-02 09:56:42 +020012683
Daniel Vetter9db529a2017-08-08 10:08:28 +020012684 intel_atomic_commit_fence_wait(intel_state);
Daniel Vetter42b062b2017-08-08 10:08:27 +020012685
Daniel Vetterea0000f2016-06-13 16:13:46 +020012686 drm_atomic_helper_wait_for_dependencies(state);
12687
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012688 if (intel_state->modeset)
Daniel Vetter5a21b662016-05-24 17:13:53 +020012689 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012690
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012691 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012692 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
12693 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
12694 intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020012695
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012696 if (needs_modeset(new_crtc_state) ||
12697 to_intel_crtc_state(new_crtc_state)->update_pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +020012698
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012699 put_domains[intel_crtc->pipe] =
Daniel Vetter5a21b662016-05-24 17:13:53 +020012700 modeset_get_crtc_power_domains(crtc,
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012701 new_intel_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012702 }
12703
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012704 if (!needs_modeset(new_crtc_state))
Maarten Lankhorst61333b62015-06-15 12:33:50 +020012705 continue;
12706
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012707 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
Daniel Vetter460da9162013-03-27 00:44:51 +010012708
Ville Syrjälä29ceb0e2016-03-09 19:07:27 +020012709 if (old_crtc_state->active) {
Maarten Lankhorstf59e9702018-09-20 12:27:07 +020012710 intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes);
Maarten Lankhorst033b7a22018-03-08 13:02:02 +010012711
12712 /*
12713 * We need to disable pipe CRC before disabling the pipe,
12714 * or we race against vblank off.
12715 */
12716 intel_crtc_disable_pipe_crc(intel_crtc);
12717
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012718 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
Maarten Lankhorsteddfcbc2015-06-15 12:33:53 +020012719 intel_crtc->active = false;
Paulo Zanoni58f9c0b2016-01-19 11:35:51 -020012720 intel_fbc_disable(intel_crtc);
Maarten Lankhorst65c307f2018-10-05 11:52:44 +020012721 intel_disable_shared_dpll(old_intel_crtc_state);
Ville Syrjälä9bbc8258a2015-11-20 22:09:20 +020012722
12723 /*
12724 * Underruns don't always raise
12725 * interrupts, so check manually.
12726 */
12727 intel_check_cpu_fifo_underruns(dev_priv);
12728 intel_check_pch_fifo_underruns(dev_priv);
Maarten Lankhorstb9001112015-11-19 16:07:16 +010012729
Ville Syrjälä21794812017-08-23 18:22:26 +030012730 if (!new_crtc_state->active) {
Maarten Lankhorste62929b2016-11-08 13:55:33 +010012731 /*
12732 * Make sure we don't call initial_watermarks
12733 * for ILK-style watermark updates.
Ville Syrjäläff32c542017-03-02 19:14:57 +020012734 *
12735 * No clue what this is supposed to achieve.
Maarten Lankhorste62929b2016-11-08 13:55:33 +010012736 */
Ville Syrjäläff32c542017-03-02 19:14:57 +020012737 if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorste62929b2016-11-08 13:55:33 +010012738 dev_priv->display.initial_watermarks(intel_state,
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012739 new_intel_crtc_state);
Maarten Lankhorste62929b2016-11-08 13:55:33 +010012740 }
Maarten Lankhorsta5392052015-06-15 12:33:52 +020012741 }
Daniel Vetterb8cecdf2013-03-27 00:44:50 +010012742 }
Daniel Vetter7758a112012-07-08 19:40:39 +020012743
Daniel Vetter7a1530d72017-12-07 15:32:02 +010012744 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
12745 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
12746 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
Daniel Vetterea9d7582012-07-10 10:42:52 +020012747
Maarten Lankhorst565602d2015-12-10 12:33:57 +010012748 if (intel_state->modeset) {
Maarten Lankhorst4740b0f2015-08-05 12:37:10 +020012749 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
Maarten Lankhorst33c8df892016-02-10 13:49:37 +010012750
Ville Syrjäläb0587e42017-01-26 21:52:01 +020012751 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
Maarten Lankhorstf6d19732016-03-23 14:58:07 +010012752
Lyude656d1b82016-08-17 15:55:54 -040012753 /*
12754 * SKL workaround: bspec recommends we disable the SAGV when we
12755 * have more then one pipe enabled
12756 */
Paulo Zanoni56feca92016-09-22 18:00:28 -030012757 if (!intel_can_enable_sagv(state))
Paulo Zanoni16dcdc42016-09-22 18:00:27 -030012758 intel_disable_sagv(dev_priv);
Lyude656d1b82016-08-17 15:55:54 -040012759
Maarten Lankhorst677100c2016-11-08 13:55:41 +010012760 intel_modeset_verify_disabled(dev, state);
Maarten Lankhorst4740b0f2015-08-05 12:37:10 +020012761 }
Daniel Vetter47fab732012-10-26 10:58:18 +020012762
Lyude896e5bb2016-08-24 07:48:09 +020012763 /* Complete the events for pipes that have now been disabled */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012764 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12765 bool modeset = needs_modeset(new_crtc_state);
Maarten Lankhorsta5392052015-06-15 12:33:52 +020012766
Daniel Vetter1f7528c2016-06-13 16:13:45 +020012767 /* Complete events for now disable pipes here. */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012768 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
Daniel Vetter1f7528c2016-06-13 16:13:45 +020012769 spin_lock_irq(&dev->event_lock);
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012770 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
Daniel Vetter1f7528c2016-06-13 16:13:45 +020012771 spin_unlock_irq(&dev->event_lock);
12772
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012773 new_crtc_state->event = NULL;
Daniel Vetter1f7528c2016-06-13 16:13:45 +020012774 }
Matt Ropered4a6a72016-02-23 17:20:13 -080012775 }
12776
Lyude896e5bb2016-08-24 07:48:09 +020012777 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012778 dev_priv->display.update_crtcs(state);
Lyude896e5bb2016-08-24 07:48:09 +020012779
Daniel Vetter94f05022016-06-14 18:01:00 +020012780 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12781 * already, but still need the state for the delayed optimization. To
12782 * fix this:
12783 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12784 * - schedule that vblank worker _before_ calling hw_done
12785 * - at the start of commit_tail, cancel it _synchrously
12786 * - switch over to the vblank wait helper in the core after that since
12787 * we don't need out special handling any more.
12788 */
Maarten Lankhorstb44d5c02017-09-04 12:48:33 +020012789 drm_atomic_helper_wait_for_flip_done(dev, state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012790
12791 /*
12792 * Now that the vblank has passed, we can go ahead and program the
12793 * optimal watermarks on platforms that need two-step watermark
12794 * programming.
12795 *
12796 * TODO: Move this (and other cleanup) to an async worker eventually.
12797 */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012798 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012799 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012800
12801 if (dev_priv->display.optimize_watermarks)
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010012802 dev_priv->display.optimize_watermarks(intel_state,
Maarten Lankhorsta1cccdc2018-09-20 12:27:04 +020012803 new_intel_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012804 }
12805
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012806 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Daniel Vetter5a21b662016-05-24 17:13:53 +020012807 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
12808
12809 if (put_domains[i])
12810 modeset_put_power_domains(dev_priv, put_domains[i]);
12811
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012812 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012813 }
12814
Ville Syrjäläcff109f2017-11-17 21:19:17 +020012815 if (intel_state->modeset)
12816 intel_verify_planes(intel_state);
12817
Paulo Zanoni56feca92016-09-22 18:00:28 -030012818 if (intel_state->modeset && intel_can_enable_sagv(state))
Paulo Zanoni16dcdc42016-09-22 18:00:27 -030012819 intel_enable_sagv(dev_priv);
Lyude656d1b82016-08-17 15:55:54 -040012820
Daniel Vetter94f05022016-06-14 18:01:00 +020012821 drm_atomic_helper_commit_hw_done(state);
12822
Chris Wilsond5553c02017-05-04 12:55:08 +010012823 if (intel_state->modeset) {
12824 /* As one of the primary mmio accessors, KMS has a high
12825 * likelihood of triggering bugs in unclaimed access. After we
12826 * finish modesetting, see if an error has been flagged, and if
12827 * so enable debugging for the next modeset - and hope we catch
12828 * the culprit.
12829 */
12830 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
Daniel Vetter5a21b662016-05-24 17:13:53 +020012831 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
Chris Wilsond5553c02017-05-04 12:55:08 +010012832 }
Daniel Vetter5a21b662016-05-24 17:13:53 +020012833
Chris Wilson8d52e442018-06-23 11:39:51 +010012834 /*
12835 * Defer the cleanup of the old state to a separate worker to not
12836 * impede the current task (userspace for blocking modesets) that
12837 * are executed inline. For out-of-line asynchronous modesets/flips,
12838 * deferring to a new worker seems overkill, but we would place a
12839 * schedule point (cond_resched()) here anyway to keep latencies
12840 * down.
12841 */
12842 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
Chris Wilson41db6452018-07-12 12:57:29 +010012843 queue_work(system_highpri_wq, &state->commit_work);
Daniel Vetter94f05022016-06-14 18:01:00 +020012844}
12845
12846static void intel_atomic_commit_work(struct work_struct *work)
12847{
Chris Wilsonc004a902016-10-28 13:58:45 +010012848 struct drm_atomic_state *state =
12849 container_of(work, struct drm_atomic_state, commit_work);
12850
Daniel Vetter94f05022016-06-14 18:01:00 +020012851 intel_atomic_commit_tail(state);
12852}
12853
Chris Wilsonc004a902016-10-28 13:58:45 +010012854static int __i915_sw_fence_call
12855intel_atomic_commit_ready(struct i915_sw_fence *fence,
12856 enum i915_sw_fence_notify notify)
12857{
12858 struct intel_atomic_state *state =
12859 container_of(fence, struct intel_atomic_state, commit_ready);
12860
12861 switch (notify) {
12862 case FENCE_COMPLETE:
Daniel Vetter42b062b2017-08-08 10:08:27 +020012863 /* we do blocking waits in the worker, nothing to do here */
Chris Wilsonc004a902016-10-28 13:58:45 +010012864 break;
Chris Wilsonc004a902016-10-28 13:58:45 +010012865 case FENCE_FREE:
Chris Wilsoneb955ee2017-01-23 21:29:39 +000012866 {
12867 struct intel_atomic_helper *helper =
12868 &to_i915(state->base.dev)->atomic_helper;
12869
12870 if (llist_add(&state->freed, &helper->free_list))
12871 schedule_work(&helper->free_work);
12872 break;
12873 }
Chris Wilsonc004a902016-10-28 13:58:45 +010012874 }
12875
12876 return NOTIFY_DONE;
12877}
12878
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012879static void intel_atomic_track_fbs(struct drm_atomic_state *state)
12880{
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012881 struct drm_plane_state *old_plane_state, *new_plane_state;
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012882 struct drm_plane *plane;
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012883 int i;
12884
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012885 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
Chris Wilsonfaf5bf02016-08-04 16:32:37 +010012886 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010012887 intel_fb_obj(new_plane_state->fb),
Chris Wilsonfaf5bf02016-08-04 16:32:37 +010012888 to_intel_plane(plane)->frontbuffer_bit);
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012889}
12890
Daniel Vetter94f05022016-06-14 18:01:00 +020012891/**
12892 * intel_atomic_commit - commit validated state object
12893 * @dev: DRM device
12894 * @state: the top-level driver state object
12895 * @nonblock: nonblocking commit
12896 *
12897 * This function commits a top-level state object that has been validated
12898 * with drm_atomic_helper_check().
12899 *
Daniel Vetter94f05022016-06-14 18:01:00 +020012900 * RETURNS
12901 * Zero for success or -errno.
12902 */
12903static int intel_atomic_commit(struct drm_device *dev,
12904 struct drm_atomic_state *state,
12905 bool nonblock)
12906{
12907 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
Chris Wilsonfac5e232016-07-04 11:34:36 +010012908 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter94f05022016-06-14 18:01:00 +020012909 int ret = 0;
12910
Chris Wilsonc004a902016-10-28 13:58:45 +010012911 drm_atomic_state_get(state);
12912 i915_sw_fence_init(&intel_state->commit_ready,
12913 intel_atomic_commit_ready);
Daniel Vetter94f05022016-06-14 18:01:00 +020012914
Ville Syrjälä440df932017-03-29 17:21:23 +030012915 /*
12916 * The intel_legacy_cursor_update() fast path takes care
12917 * of avoiding the vblank waits for simple cursor
12918 * movement and flips. For cursor on/off and size changes,
12919 * we want to perform the vblank waits so that watermark
12920 * updates happen during the correct frames. Gen9+ have
12921 * double buffered watermarks and so shouldn't need this.
12922 *
Maarten Lankhorst3cf50c62017-09-19 14:14:18 +020012923 * Unset state->legacy_cursor_update before the call to
12924 * drm_atomic_helper_setup_commit() because otherwise
12925 * drm_atomic_helper_wait_for_flip_done() is a noop and
12926 * we get FIFO underruns because we didn't wait
12927 * for vblank.
Ville Syrjälä440df932017-03-29 17:21:23 +030012928 *
12929 * FIXME doing watermarks and fb cleanup from a vblank worker
12930 * (assuming we had any) would solve these problems.
12931 */
Maarten Lankhorst213f1bd2017-09-19 14:14:19 +020012932 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
12933 struct intel_crtc_state *new_crtc_state;
12934 struct intel_crtc *crtc;
12935 int i;
12936
12937 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
12938 if (new_crtc_state->wm.need_postvbl_update ||
12939 new_crtc_state->update_wm_post)
12940 state->legacy_cursor_update = false;
12941 }
Ville Syrjälä440df932017-03-29 17:21:23 +030012942
Maarten Lankhorst3cf50c62017-09-19 14:14:18 +020012943 ret = intel_atomic_prepare_commit(dev, state);
12944 if (ret) {
12945 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
12946 i915_sw_fence_commit(&intel_state->commit_ready);
12947 return ret;
12948 }
12949
12950 ret = drm_atomic_helper_setup_commit(state, nonblock);
12951 if (!ret)
12952 ret = drm_atomic_helper_swap_state(state, true);
12953
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020012954 if (ret) {
12955 i915_sw_fence_commit(&intel_state->commit_ready);
12956
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020012957 drm_atomic_helper_cleanup_planes(dev, state);
Maarten Lankhorst0806f4e2017-07-11 16:33:07 +020012958 return ret;
12959 }
Daniel Vetter94f05022016-06-14 18:01:00 +020012960 dev_priv->wm.distrust_bios_wm = false;
Ander Conselvan de Oliveira3c0fb582016-12-29 17:22:08 +020012961 intel_shared_dpll_swap_state(state);
Daniel Vetter6c9c1b32016-06-13 16:13:48 +020012962 intel_atomic_track_fbs(state);
Daniel Vetter94f05022016-06-14 18:01:00 +020012963
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012964 if (intel_state->modeset) {
Ville Syrjäläd305e062017-08-30 21:57:03 +030012965 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
12966 sizeof(intel_state->min_cdclk));
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030012967 memcpy(dev_priv->min_voltage_level,
12968 intel_state->min_voltage_level,
12969 sizeof(intel_state->min_voltage_level));
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012970 dev_priv->active_crtcs = intel_state->active_crtcs;
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020012971 dev_priv->cdclk.logical = intel_state->cdclk.logical;
12972 dev_priv->cdclk.actual = intel_state->cdclk.actual;
Maarten Lankhorstc3b32652016-11-08 13:55:40 +010012973 }
12974
Chris Wilson08536952016-10-14 13:18:18 +010012975 drm_atomic_state_get(state);
Daniel Vetter42b062b2017-08-08 10:08:27 +020012976 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
Chris Wilsonc004a902016-10-28 13:58:45 +010012977
12978 i915_sw_fence_commit(&intel_state->commit_ready);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020012979 if (nonblock && intel_state->modeset) {
12980 queue_work(dev_priv->modeset_wq, &state->commit_work);
12981 } else if (nonblock) {
Daniel Vetter42b062b2017-08-08 10:08:27 +020012982 queue_work(system_unbound_wq, &state->commit_work);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020012983 } else {
12984 if (intel_state->modeset)
12985 flush_workqueue(dev_priv->modeset_wq);
Daniel Vetter94f05022016-06-14 18:01:00 +020012986 intel_atomic_commit_tail(state);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020012987 }
Mika Kuoppala75714942015-12-16 09:26:48 +020012988
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012989 return 0;
Daniel Vetterf30da182013-04-11 20:22:50 +020012990}
12991
Chris Wilsonf6e5b162011-04-12 18:06:51 +010012992static const struct drm_crtc_funcs intel_crtc_funcs = {
Daniel Vetter3fab2f02017-04-03 10:32:57 +020012993 .gamma_set = drm_atomic_helper_legacy_gamma_set,
Maarten Lankhorst74c090b2015-07-13 16:30:30 +020012994 .set_config = drm_atomic_helper_set_config,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010012995 .destroy = intel_crtc_destroy,
Maarten Lankhorst4c01ded2016-12-22 11:33:23 +010012996 .page_flip = drm_atomic_helper_page_flip,
Matt Roper13568372015-01-21 16:35:47 -080012997 .atomic_duplicate_state = intel_crtc_duplicate_state,
12998 .atomic_destroy_state = intel_crtc_destroy_state,
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +010012999 .set_crc_source = intel_crtc_set_crc_source,
Mahesh Kumara8c20832018-07-13 19:29:38 +053013000 .verify_crc_source = intel_crtc_verify_crc_source,
Mahesh Kumar260bc552018-07-13 19:29:39 +053013001 .get_crc_sources = intel_crtc_get_crc_sources,
Chris Wilsonf6e5b162011-04-12 18:06:51 +010013002};
13003
Chris Wilson74d290f2017-08-17 13:37:06 +010013004struct wait_rps_boost {
13005 struct wait_queue_entry wait;
13006
13007 struct drm_crtc *crtc;
Chris Wilsone61e0f52018-02-21 09:56:36 +000013008 struct i915_request *request;
Chris Wilson74d290f2017-08-17 13:37:06 +010013009};
13010
13011static int do_rps_boost(struct wait_queue_entry *_wait,
13012 unsigned mode, int sync, void *key)
13013{
13014 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
Chris Wilsone61e0f52018-02-21 09:56:36 +000013015 struct i915_request *rq = wait->request;
Chris Wilson74d290f2017-08-17 13:37:06 +010013016
Chris Wilsone9af4ea2018-01-18 13:16:09 +000013017 /*
13018 * If we missed the vblank, but the request is already running it
13019 * is reasonable to assume that it will complete before the next
13020 * vblank without our intervention, so leave RPS alone.
13021 */
Chris Wilsone61e0f52018-02-21 09:56:36 +000013022 if (!i915_request_started(rq))
Chris Wilsone9af4ea2018-01-18 13:16:09 +000013023 gen6_rps_boost(rq, NULL);
Chris Wilsone61e0f52018-02-21 09:56:36 +000013024 i915_request_put(rq);
Chris Wilson74d290f2017-08-17 13:37:06 +010013025
13026 drm_crtc_vblank_put(wait->crtc);
13027
13028 list_del(&wait->wait.entry);
13029 kfree(wait);
13030 return 1;
13031}
13032
13033static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
13034 struct dma_fence *fence)
13035{
13036 struct wait_rps_boost *wait;
13037
13038 if (!dma_fence_is_i915(fence))
13039 return;
13040
13041 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
13042 return;
13043
13044 if (drm_crtc_vblank_get(crtc))
13045 return;
13046
13047 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
13048 if (!wait) {
13049 drm_crtc_vblank_put(crtc);
13050 return;
13051 }
13052
13053 wait->request = to_request(dma_fence_get(fence));
13054 wait->crtc = crtc;
13055
13056 wait->wait.func = do_rps_boost;
13057 wait->wait.flags = 0;
13058
13059 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
13060}
13061
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013062static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
13063{
13064 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
13065 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13066 struct drm_framebuffer *fb = plane_state->base.fb;
13067 struct i915_vma *vma;
13068
13069 if (plane->id == PLANE_CURSOR &&
13070 INTEL_INFO(dev_priv)->cursor_needs_physical) {
13071 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13072 const int align = intel_cursor_alignment(dev_priv);
Chris Wilson4a477652018-08-17 09:24:05 +010013073 int err;
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013074
Chris Wilson4a477652018-08-17 09:24:05 +010013075 err = i915_gem_object_attach_phys(obj, align);
13076 if (err)
13077 return err;
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013078 }
13079
13080 vma = intel_pin_and_fence_fb_obj(fb,
Ville Syrjäläf5929c52018-09-07 18:24:06 +030013081 &plane_state->view,
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013082 intel_plane_uses_fence(plane_state),
13083 &plane_state->flags);
13084 if (IS_ERR(vma))
13085 return PTR_ERR(vma);
13086
13087 plane_state->vma = vma;
13088
13089 return 0;
13090}
13091
13092static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
13093{
13094 struct i915_vma *vma;
13095
13096 vma = fetch_and_zero(&old_plane_state->vma);
13097 if (vma)
13098 intel_unpin_fb_vma(vma, old_plane_state->flags);
13099}
13100
Chris Wilsonb7268c52018-04-18 19:40:52 +010013101static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13102{
13103 struct i915_sched_attr attr = {
13104 .priority = I915_PRIORITY_DISPLAY,
13105 };
13106
13107 i915_gem_object_wait_priority(obj, 0, &attr);
13108}
13109
Matt Roper6beb8c232014-12-01 15:40:14 -080013110/**
13111 * intel_prepare_plane_fb - Prepare fb for usage on plane
13112 * @plane: drm plane to prepare for
Chris Wilsonc38c1452018-02-14 13:49:22 +000013113 * @new_state: the plane state being prepared
Matt Roper6beb8c232014-12-01 15:40:14 -080013114 *
13115 * Prepares a framebuffer for usage on a display plane. Generally this
13116 * involves pinning the underlying object and updating the frontbuffer tracking
13117 * bits. Some older platforms need special physical address handling for
13118 * cursor planes.
13119 *
Maarten Lankhorstf9356752015-08-18 13:40:05 +020013120 * Must be called with struct_mutex held.
13121 *
Matt Roper6beb8c232014-12-01 15:40:14 -080013122 * Returns 0 on success, negative error code on failure.
13123 */
13124int
13125intel_prepare_plane_fb(struct drm_plane *plane,
Chris Wilson18320402016-08-18 19:00:16 +010013126 struct drm_plane_state *new_state)
Matt Roper465c1202014-05-29 08:06:54 -070013127{
Chris Wilsonc004a902016-10-28 13:58:45 +010013128 struct intel_atomic_state *intel_state =
13129 to_intel_atomic_state(new_state->state);
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000013130 struct drm_i915_private *dev_priv = to_i915(plane->dev);
Maarten Lankhorst844f9112015-09-02 10:42:40 +020013131 struct drm_framebuffer *fb = new_state->fb;
Matt Roper6beb8c232014-12-01 15:40:14 -080013132 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Maarten Lankhorst1ee49392015-09-23 13:27:08 +020013133 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
Chris Wilsonc004a902016-10-28 13:58:45 +010013134 int ret;
Matt Roper465c1202014-05-29 08:06:54 -070013135
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013136 if (old_obj) {
13137 struct drm_crtc_state *crtc_state =
Maarten Lankhorst8b694492018-04-09 14:46:55 +020013138 drm_atomic_get_new_crtc_state(new_state->state,
13139 plane->state->crtc);
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013140
13141 /* Big Hammer, we also need to ensure that any pending
13142 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13143 * current scanout is retired before unpinning the old
13144 * framebuffer. Note that we rely on userspace rendering
13145 * into the buffer attached to the pipe they are waiting
13146 * on. If not, userspace generates a GPU hang with IPEHR
13147 * point to the MI_WAIT_FOR_EVENT.
13148 *
13149 * This should only fail upon a hung GPU, in which case we
13150 * can safely continue.
13151 */
Chris Wilsonc004a902016-10-28 13:58:45 +010013152 if (needs_modeset(crtc_state)) {
13153 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13154 old_obj->resv, NULL,
13155 false, 0,
13156 GFP_KERNEL);
13157 if (ret < 0)
13158 return ret;
Chris Wilsonf4457ae2016-04-13 17:35:08 +010013159 }
Maarten Lankhorst5008e872015-08-18 13:40:05 +020013160 }
13161
Chris Wilsonc004a902016-10-28 13:58:45 +010013162 if (new_state->fence) { /* explicit fencing */
13163 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13164 new_state->fence,
13165 I915_FENCE_TIMEOUT,
13166 GFP_KERNEL);
13167 if (ret < 0)
13168 return ret;
13169 }
13170
Chris Wilsonc37efb92016-06-17 08:28:47 +010013171 if (!obj)
13172 return 0;
13173
Chris Wilson4d3088c2017-07-26 17:00:38 +010013174 ret = i915_gem_object_pin_pages(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010013175 if (ret)
13176 return ret;
13177
Chris Wilson4d3088c2017-07-26 17:00:38 +010013178 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13179 if (ret) {
13180 i915_gem_object_unpin_pages(obj);
13181 return ret;
13182 }
13183
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013184 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
Chris Wilsonfd700752017-07-26 17:00:36 +010013185
Chris Wilsonfd700752017-07-26 17:00:36 +010013186 mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson4d3088c2017-07-26 17:00:38 +010013187 i915_gem_object_unpin_pages(obj);
Chris Wilsonfd700752017-07-26 17:00:36 +010013188 if (ret)
13189 return ret;
13190
Chris Wilsone2f34962018-10-01 15:47:54 +010013191 fb_obj_bump_render_priority(obj);
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -080013192 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13193
Chris Wilsonc004a902016-10-28 13:58:45 +010013194 if (!new_state->fence) { /* implicit fencing */
Chris Wilson74d290f2017-08-17 13:37:06 +010013195 struct dma_fence *fence;
13196
Chris Wilsonc004a902016-10-28 13:58:45 +010013197 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13198 obj->resv, NULL,
13199 false, I915_FENCE_TIMEOUT,
13200 GFP_KERNEL);
13201 if (ret < 0)
13202 return ret;
Chris Wilson74d290f2017-08-17 13:37:06 +010013203
13204 fence = reservation_object_get_excl_rcu(obj->resv);
13205 if (fence) {
13206 add_rps_boost_after_vblank(new_state->crtc, fence);
13207 dma_fence_put(fence);
13208 }
13209 } else {
13210 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
Chris Wilsonc004a902016-10-28 13:58:45 +010013211 }
Daniel Vetter5a21b662016-05-24 17:13:53 +020013212
Chris Wilson60548c52018-07-31 14:26:29 +010013213 /*
13214 * We declare pageflips to be interactive and so merit a small bias
13215 * towards upclocking to deliver the frame on time. By only changing
13216 * the RPS thresholds to sample more regularly and aim for higher
13217 * clocks we can hopefully deliver low power workloads (like kodi)
13218 * that are not quite steady state without resorting to forcing
13219 * maximum clocks following a vblank miss (see do_rps_boost()).
13220 */
13221 if (!intel_state->rps_interactive) {
13222 intel_rps_mark_interactive(dev_priv, true);
13223 intel_state->rps_interactive = true;
13224 }
13225
Chris Wilsond07f0e52016-10-28 13:58:44 +010013226 return 0;
Matt Roper6beb8c232014-12-01 15:40:14 -080013227}
13228
Matt Roper38f3ce32014-12-02 07:45:25 -080013229/**
13230 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13231 * @plane: drm plane to clean up for
Chris Wilsonc38c1452018-02-14 13:49:22 +000013232 * @old_state: the state from the previous modeset
Matt Roper38f3ce32014-12-02 07:45:25 -080013233 *
13234 * Cleans up a framebuffer that has just been removed from a plane.
Maarten Lankhorstf9356752015-08-18 13:40:05 +020013235 *
13236 * Must be called with struct_mutex held.
Matt Roper38f3ce32014-12-02 07:45:25 -080013237 */
13238void
13239intel_cleanup_plane_fb(struct drm_plane *plane,
Chris Wilson18320402016-08-18 19:00:16 +010013240 struct drm_plane_state *old_state)
Matt Roper38f3ce32014-12-02 07:45:25 -080013241{
Chris Wilson60548c52018-07-31 14:26:29 +010013242 struct intel_atomic_state *intel_state =
13243 to_intel_atomic_state(old_state->state);
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013244 struct drm_i915_private *dev_priv = to_i915(plane->dev);
Matt Roper38f3ce32014-12-02 07:45:25 -080013245
Chris Wilson60548c52018-07-31 14:26:29 +010013246 if (intel_state->rps_interactive) {
13247 intel_rps_mark_interactive(dev_priv, false);
13248 intel_state->rps_interactive = false;
13249 }
13250
Chris Wilsonbe1e3412017-01-16 15:21:27 +000013251 /* Should only be called after a successful intel_prepare_plane_fb()! */
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013252 mutex_lock(&dev_priv->drm.struct_mutex);
13253 intel_plane_unpin_fb(to_intel_plane_state(old_state));
13254 mutex_unlock(&dev_priv->drm.struct_mutex);
Matt Roper465c1202014-05-29 08:06:54 -070013255}
13256
Chandra Konduru6156a452015-04-27 13:48:39 -070013257int
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013258skl_max_scale(const struct intel_crtc_state *crtc_state,
13259 u32 pixel_format)
Chandra Konduru6156a452015-04-27 13:48:39 -070013260{
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013261 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13262 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Chandra Konduru77224cd2018-04-09 09:11:13 +053013263 int max_scale, mult;
13264 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
Chandra Konduru6156a452015-04-27 13:48:39 -070013265
Ville Syrjälä4e0b83a2018-09-07 18:24:09 +030013266 if (!crtc_state->base.enable)
Chandra Konduru6156a452015-04-27 13:48:39 -070013267 return DRM_PLANE_HELPER_NO_SCALING;
13268
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013269 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13270 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13271
Rodrigo Vivi43037c82017-10-03 15:31:42 -070013272 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
Ander Conselvan de Oliveira5b7280f2017-02-23 09:15:58 +020013273 max_dotclk *= 2;
13274
13275 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
Chandra Konduru6156a452015-04-27 13:48:39 -070013276 return DRM_PLANE_HELPER_NO_SCALING;
13277
13278 /*
13279 * skl max scale is lower of:
13280 * close to 3 but not 3, -1 is for that purpose
13281 * or
13282 * cdclk/crtc_clock
13283 */
Chandra Konduru77224cd2018-04-09 09:11:13 +053013284 mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13285 tmpclk1 = (1 << 16) * mult - 1;
13286 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13287 max_scale = min(tmpclk1, tmpclk2);
Chandra Konduru6156a452015-04-27 13:48:39 -070013288
13289 return max_scale;
13290}
13291
Daniel Vetter5a21b662016-05-24 17:13:53 +020013292static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13293 struct drm_crtc_state *old_crtc_state)
13294{
13295 struct drm_device *dev = crtc->dev;
Lyude62e0fb82016-08-22 12:50:08 -040013296 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013297 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013298 struct intel_crtc_state *old_intel_cstate =
Daniel Vetter5a21b662016-05-24 17:13:53 +020013299 to_intel_crtc_state(old_crtc_state);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013300 struct intel_atomic_state *old_intel_state =
13301 to_intel_atomic_state(old_crtc_state->state);
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013302 struct intel_crtc_state *intel_cstate =
13303 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13304 bool modeset = needs_modeset(&intel_cstate->base);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013305
Maarten Lankhorst567f0792017-02-28 15:28:47 +010013306 if (!modeset &&
13307 (intel_cstate->base.color_mgmt_changed ||
13308 intel_cstate->update_pipe)) {
Ville Syrjälä5c857e62017-08-23 18:22:20 +030013309 intel_color_set_csc(&intel_cstate->base);
13310 intel_color_load_luts(&intel_cstate->base);
Maarten Lankhorst567f0792017-02-28 15:28:47 +010013311 }
13312
Daniel Vetter5a21b662016-05-24 17:13:53 +020013313 /* Perform vblank evasion around commit operation */
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013314 intel_pipe_update_start(intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013315
13316 if (modeset)
Maarten Lankhorste62929b2016-11-08 13:55:33 +010013317 goto out;
Daniel Vetter5a21b662016-05-24 17:13:53 +020013318
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013319 if (intel_cstate->update_pipe)
Ville Syrjälä1a15b772017-08-23 18:22:25 +030013320 intel_update_pipe_config(old_intel_cstate, intel_cstate);
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013321 else if (INTEL_GEN(dev_priv) >= 9)
Maarten Lankhorst15cbe5d2018-10-04 11:45:56 +020013322 skl_detach_scalers(intel_cstate);
Lyude62e0fb82016-08-22 12:50:08 -040013323
Maarten Lankhorste62929b2016-11-08 13:55:33 +010013324out:
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010013325 if (dev_priv->display.atomic_update_watermarks)
13326 dev_priv->display.atomic_update_watermarks(old_intel_state,
13327 intel_cstate);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013328}
13329
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013330void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13331 struct intel_crtc_state *crtc_state)
13332{
13333 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13334
13335 if (!IS_GEN2(dev_priv))
13336 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13337
13338 if (crtc_state->has_pch_encoder) {
13339 enum pipe pch_transcoder =
13340 intel_crtc_pch_transcoder(crtc);
13341
13342 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13343 }
13344}
13345
Daniel Vetter5a21b662016-05-24 17:13:53 +020013346static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13347 struct drm_crtc_state *old_crtc_state)
13348{
13349 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013350 struct intel_atomic_state *old_intel_state =
13351 to_intel_atomic_state(old_crtc_state->state);
13352 struct intel_crtc_state *new_crtc_state =
13353 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013354
Ville Syrjäläd3a8fb32017-08-23 18:22:21 +030013355 intel_pipe_update_end(new_crtc_state);
Maarten Lankhorst33a49862017-11-13 15:40:43 +010013356
13357 if (new_crtc_state->update_pipe &&
13358 !needs_modeset(&new_crtc_state->base) &&
Maarten Lankhorstd52ad9c2018-03-28 12:05:26 +020013359 old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13360 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
Daniel Vetter5a21b662016-05-24 17:13:53 +020013361}
13362
Matt Ropercf4c7c12014-12-04 10:27:42 -080013363/**
Matt Roper4a3b8762014-12-23 10:41:51 -080013364 * intel_plane_destroy - destroy a plane
13365 * @plane: plane to destroy
Matt Ropercf4c7c12014-12-04 10:27:42 -080013366 *
Matt Roper4a3b8762014-12-23 10:41:51 -080013367 * Common destruction function for all types of planes (primary, cursor,
13368 * sprite).
Matt Ropercf4c7c12014-12-04 10:27:42 -080013369 */
Matt Roper4a3b8762014-12-23 10:41:51 -080013370void intel_plane_destroy(struct drm_plane *plane)
Matt Roper465c1202014-05-29 08:06:54 -070013371{
Matt Roper465c1202014-05-29 08:06:54 -070013372 drm_plane_cleanup(plane);
Ville Syrjälä69ae5612016-05-27 20:59:22 +030013373 kfree(to_intel_plane(plane));
Matt Roper465c1202014-05-29 08:06:54 -070013374}
13375
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013376static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13377 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013378{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013379 switch (modifier) {
13380 case DRM_FORMAT_MOD_LINEAR:
13381 case I915_FORMAT_MOD_X_TILED:
13382 break;
13383 default:
13384 return false;
13385 }
13386
Ben Widawsky714244e2017-08-01 09:58:16 -070013387 switch (format) {
13388 case DRM_FORMAT_C8:
13389 case DRM_FORMAT_RGB565:
13390 case DRM_FORMAT_XRGB1555:
13391 case DRM_FORMAT_XRGB8888:
13392 return modifier == DRM_FORMAT_MOD_LINEAR ||
13393 modifier == I915_FORMAT_MOD_X_TILED;
13394 default:
13395 return false;
13396 }
13397}
13398
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013399static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13400 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013401{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013402 switch (modifier) {
13403 case DRM_FORMAT_MOD_LINEAR:
13404 case I915_FORMAT_MOD_X_TILED:
13405 break;
13406 default:
13407 return false;
13408 }
13409
Ben Widawsky714244e2017-08-01 09:58:16 -070013410 switch (format) {
13411 case DRM_FORMAT_C8:
13412 case DRM_FORMAT_RGB565:
13413 case DRM_FORMAT_XRGB8888:
13414 case DRM_FORMAT_XBGR8888:
13415 case DRM_FORMAT_XRGB2101010:
13416 case DRM_FORMAT_XBGR2101010:
13417 return modifier == DRM_FORMAT_MOD_LINEAR ||
13418 modifier == I915_FORMAT_MOD_X_TILED;
13419 default:
13420 return false;
13421 }
13422}
13423
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013424static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13425 u32 format, u64 modifier)
Ben Widawsky714244e2017-08-01 09:58:16 -070013426{
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013427 return modifier == DRM_FORMAT_MOD_LINEAR &&
13428 format == DRM_FORMAT_ARGB8888;
Ben Widawsky714244e2017-08-01 09:58:16 -070013429}
13430
Ville Syrjälä679bfe82018-10-05 15:58:07 +030013431static const struct drm_plane_funcs i965_plane_funcs = {
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013432 .update_plane = drm_atomic_helper_update_plane,
13433 .disable_plane = drm_atomic_helper_disable_plane,
13434 .destroy = intel_plane_destroy,
13435 .atomic_get_property = intel_plane_atomic_get_property,
13436 .atomic_set_property = intel_plane_atomic_set_property,
13437 .atomic_duplicate_state = intel_plane_duplicate_state,
13438 .atomic_destroy_state = intel_plane_destroy_state,
13439 .format_mod_supported = i965_plane_format_mod_supported,
13440};
13441
Ville Syrjälä679bfe82018-10-05 15:58:07 +030013442static const struct drm_plane_funcs i8xx_plane_funcs = {
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013443 .update_plane = drm_atomic_helper_update_plane,
13444 .disable_plane = drm_atomic_helper_disable_plane,
13445 .destroy = intel_plane_destroy,
13446 .atomic_get_property = intel_plane_atomic_get_property,
13447 .atomic_set_property = intel_plane_atomic_set_property,
13448 .atomic_duplicate_state = intel_plane_duplicate_state,
13449 .atomic_destroy_state = intel_plane_destroy_state,
13450 .format_mod_supported = i8xx_plane_format_mod_supported,
Matt Roper465c1202014-05-29 08:06:54 -070013451};
13452
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013453static int
13454intel_legacy_cursor_update(struct drm_plane *plane,
13455 struct drm_crtc *crtc,
13456 struct drm_framebuffer *fb,
13457 int crtc_x, int crtc_y,
13458 unsigned int crtc_w, unsigned int crtc_h,
13459 uint32_t src_x, uint32_t src_y,
Daniel Vetter34a2ab52017-03-22 22:50:41 +010013460 uint32_t src_w, uint32_t src_h,
13461 struct drm_modeset_acquire_ctx *ctx)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013462{
13463 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13464 int ret;
13465 struct drm_plane_state *old_plane_state, *new_plane_state;
13466 struct intel_plane *intel_plane = to_intel_plane(plane);
13467 struct drm_framebuffer *old_fb;
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013468 struct intel_crtc_state *crtc_state =
13469 to_intel_crtc_state(crtc->state);
13470 struct intel_crtc_state *new_crtc_state;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013471
13472 /*
13473 * When crtc is inactive or there is a modeset pending,
13474 * wait for it to complete in the slowpath
13475 */
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013476 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
13477 crtc_state->update_pipe)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013478 goto slow;
13479
13480 old_plane_state = plane->state;
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013481 /*
13482 * Don't do an async update if there is an outstanding commit modifying
13483 * the plane. This prevents our async update's changes from getting
13484 * overridden by a previous synchronous update's state.
13485 */
13486 if (old_plane_state->commit &&
13487 !try_wait_for_completion(&old_plane_state->commit->hw_done))
13488 goto slow;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013489
13490 /*
13491 * If any parameters change that may affect watermarks,
13492 * take the slowpath. Only changing fb or position should be
13493 * in the fastpath.
13494 */
13495 if (old_plane_state->crtc != crtc ||
13496 old_plane_state->src_w != src_w ||
13497 old_plane_state->src_h != src_h ||
13498 old_plane_state->crtc_w != crtc_w ||
13499 old_plane_state->crtc_h != crtc_h ||
Ville Syrjäläa5509ab2017-02-17 17:01:59 +020013500 !old_plane_state->fb != !fb)
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013501 goto slow;
13502
13503 new_plane_state = intel_plane_duplicate_state(plane);
13504 if (!new_plane_state)
13505 return -ENOMEM;
13506
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013507 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
13508 if (!new_crtc_state) {
13509 ret = -ENOMEM;
13510 goto out_free;
13511 }
13512
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013513 drm_atomic_set_fb_for_plane(new_plane_state, fb);
13514
13515 new_plane_state->src_x = src_x;
13516 new_plane_state->src_y = src_y;
13517 new_plane_state->src_w = src_w;
13518 new_plane_state->src_h = src_h;
13519 new_plane_state->crtc_x = crtc_x;
13520 new_plane_state->crtc_y = crtc_y;
13521 new_plane_state->crtc_w = crtc_w;
13522 new_plane_state->crtc_h = crtc_h;
13523
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013524 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
13525 to_intel_plane_state(old_plane_state),
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013526 to_intel_plane_state(new_plane_state));
13527 if (ret)
13528 goto out_free;
13529
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013530 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13531 if (ret)
13532 goto out_free;
13533
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013534 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13535 if (ret)
13536 goto out_unlock;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013537
Dhinakaran Pandiyana694e222018-03-06 19:34:19 -080013538 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013539
Dhinakaran Pandiyan07bcd992018-03-06 19:34:18 -080013540 old_fb = old_plane_state->fb;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013541 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13542 intel_plane->frontbuffer_bit);
13543
13544 /* Swap plane state */
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013545 plane->state = new_plane_state;
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013546
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013547 /*
13548 * We cannot swap crtc_state as it may be in use by an atomic commit or
13549 * page flip that's running simultaneously. If we swap crtc_state and
13550 * destroy the old state, we will cause a use-after-free there.
13551 *
13552 * Only update active_planes, which is needed for our internal
13553 * bookkeeping. Either value will do the right thing when updating
13554 * planes atomically. If the cursor was part of the atomic update then
13555 * we would have taken the slowpath.
13556 */
13557 crtc_state->active_planes = new_crtc_state->active_planes;
13558
Ville Syrjälä72259532017-03-02 19:15:05 +020013559 if (plane->state->visible) {
13560 trace_intel_update_plane(plane, to_intel_crtc(crtc));
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013561 intel_plane->update_plane(intel_plane, crtc_state,
Ville Syrjäläa5509ab2017-02-17 17:01:59 +020013562 to_intel_plane_state(plane->state));
Ville Syrjälä72259532017-03-02 19:15:05 +020013563 } else {
13564 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
Ville Syrjälä282dbf92017-03-27 21:55:33 +030013565 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
Ville Syrjälä72259532017-03-02 19:15:05 +020013566 }
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013567
Ville Syrjäläef1a1912018-02-21 18:02:34 +020013568 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013569
13570out_unlock:
13571 mutex_unlock(&dev_priv->drm.struct_mutex);
13572out_free:
Maarten Lankhorstc249c5f2018-09-20 12:27:05 +020013573 if (new_crtc_state)
13574 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
Maarten Lankhorst669c9212017-09-04 12:48:38 +020013575 if (ret)
13576 intel_plane_destroy_state(plane, new_plane_state);
13577 else
13578 intel_plane_destroy_state(plane, old_plane_state);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013579 return ret;
13580
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013581slow:
13582 return drm_atomic_helper_update_plane(plane, crtc, fb,
13583 crtc_x, crtc_y, crtc_w, crtc_h,
Daniel Vetter34a2ab52017-03-22 22:50:41 +010013584 src_x, src_y, src_w, src_h, ctx);
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013585}
13586
13587static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13588 .update_plane = intel_legacy_cursor_update,
13589 .disable_plane = drm_atomic_helper_disable_plane,
13590 .destroy = intel_plane_destroy,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013591 .atomic_get_property = intel_plane_atomic_get_property,
13592 .atomic_set_property = intel_plane_atomic_set_property,
13593 .atomic_duplicate_state = intel_plane_duplicate_state,
13594 .atomic_destroy_state = intel_plane_destroy_state,
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013595 .format_mod_supported = intel_cursor_format_mod_supported,
Maarten Lankhorstf79f2692016-12-12 11:34:55 +010013596};
13597
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013598static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13599 enum i9xx_plane_id i9xx_plane)
13600{
13601 if (!HAS_FBC(dev_priv))
13602 return false;
13603
13604 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13605 return i9xx_plane == PLANE_A; /* tied to pipe A */
13606 else if (IS_IVYBRIDGE(dev_priv))
13607 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13608 i9xx_plane == PLANE_C;
13609 else if (INTEL_GEN(dev_priv) >= 4)
13610 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13611 else
13612 return i9xx_plane == PLANE_A;
13613}
13614
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013615static struct intel_plane *
Ville Syrjälä580503c2016-10-31 22:37:00 +020013616intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
Matt Roper465c1202014-05-29 08:06:54 -070013617{
Ville Syrjälä881440a2018-10-05 15:58:17 +030013618 struct intel_plane *plane;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013619 const struct drm_plane_funcs *plane_funcs;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013620 unsigned int supported_rotations;
Ville Syrjälädeb19682018-10-05 15:58:08 +030013621 unsigned int possible_crtcs;
Ville Syrjälä881440a2018-10-05 15:58:17 +030013622 const u64 *modifiers;
13623 const u32 *formats;
13624 int num_formats;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013625 int ret;
Matt Roper465c1202014-05-29 08:06:54 -070013626
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013627 if (INTEL_GEN(dev_priv) >= 9)
13628 return skl_universal_plane_create(dev_priv, pipe,
13629 PLANE_PRIMARY);
13630
Ville Syrjälä881440a2018-10-05 15:58:17 +030013631 plane = intel_plane_alloc();
13632 if (IS_ERR(plane))
13633 return plane;
Matt Roperea2c67b2014-12-23 10:41:52 -080013634
Ville Syrjälä881440a2018-10-05 15:58:17 +030013635 plane->pipe = pipe;
Ville Syrjäläe3c566d2016-11-08 16:47:11 +020013636 /*
13637 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13638 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13639 */
13640 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
Ville Syrjälä881440a2018-10-05 15:58:17 +030013641 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
Ville Syrjäläe3c566d2016-11-08 16:47:11 +020013642 else
Ville Syrjälä881440a2018-10-05 15:58:17 +030013643 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
13644 plane->id = PLANE_PRIMARY;
13645 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013646
Ville Syrjälä881440a2018-10-05 15:58:17 +030013647 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
13648 if (plane->has_fbc) {
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013649 struct intel_fbc *fbc = &dev_priv->fbc;
13650
Ville Syrjälä881440a2018-10-05 15:58:17 +030013651 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
Ville Syrjäläcf1805e2018-02-21 19:31:01 +020013652 }
13653
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013654 if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä881440a2018-10-05 15:58:17 +030013655 formats = i965_primary_formats;
Damien Lespiau568db4f2015-05-12 16:13:18 +010013656 num_formats = ARRAY_SIZE(i965_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013657 modifiers = i9xx_format_modifiers;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013658
Ville Syrjälä881440a2018-10-05 15:58:17 +030013659 plane->max_stride = i9xx_plane_max_stride;
13660 plane->update_plane = i9xx_update_plane;
13661 plane->disable_plane = i9xx_disable_plane;
13662 plane->get_hw_state = i9xx_plane_get_hw_state;
13663 plane->check_plane = i9xx_plane_check;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013664
13665 plane_funcs = &i965_plane_funcs;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010013666 } else {
Ville Syrjälä881440a2018-10-05 15:58:17 +030013667 formats = i8xx_primary_formats;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010013668 num_formats = ARRAY_SIZE(i8xx_primary_formats);
Ben Widawsky714244e2017-08-01 09:58:16 -070013669 modifiers = i9xx_format_modifiers;
Maarten Lankhorsta8d201a2016-01-07 11:54:11 +010013670
Ville Syrjälä881440a2018-10-05 15:58:17 +030013671 plane->max_stride = i9xx_plane_max_stride;
13672 plane->update_plane = i9xx_update_plane;
13673 plane->disable_plane = i9xx_disable_plane;
13674 plane->get_hw_state = i9xx_plane_get_hw_state;
13675 plane->check_plane = i9xx_plane_check;
Ville Syrjäläa38189c2018-05-18 19:21:59 +030013676
13677 plane_funcs = &i8xx_plane_funcs;
Matt Roper465c1202014-05-29 08:06:54 -070013678 }
13679
Ville Syrjälädeb19682018-10-05 15:58:08 +030013680 possible_crtcs = BIT(pipe);
13681
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013682 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
Ville Syrjälä881440a2018-10-05 15:58:17 +030013683 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030013684 possible_crtcs, plane_funcs,
Ville Syrjälä881440a2018-10-05 15:58:17 +030013685 formats, num_formats, modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013686 DRM_PLANE_TYPE_PRIMARY,
13687 "primary %c", pipe_name(pipe));
13688 else
Ville Syrjälä881440a2018-10-05 15:58:17 +030013689 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030013690 possible_crtcs, plane_funcs,
Ville Syrjälä881440a2018-10-05 15:58:17 +030013691 formats, num_formats, modifiers,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013692 DRM_PLANE_TYPE_PRIMARY,
Ville Syrjäläed150302017-11-17 21:19:10 +020013693 "plane %c",
Ville Syrjälä881440a2018-10-05 15:58:17 +030013694 plane_name(plane->i9xx_plane));
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013695 if (ret)
13696 goto fail;
Sonika Jindal48404c12014-08-22 14:06:04 +053013697
Ville Syrjäläb7c80602018-10-05 15:58:15 +030013698 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
Ville Syrjälä4ea7be22016-11-14 18:54:00 +020013699 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040013700 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13701 DRM_MODE_REFLECT_X;
Dave Airlie5481e272016-10-25 16:36:13 +100013702 } else if (INTEL_GEN(dev_priv) >= 4) {
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013703 supported_rotations =
Robert Fossc2c446a2017-05-19 16:50:17 -040013704 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013705 } else {
Robert Fossc2c446a2017-05-19 16:50:17 -040013706 supported_rotations = DRM_MODE_ROTATE_0;
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013707 }
13708
Dave Airlie5481e272016-10-25 16:36:13 +100013709 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä881440a2018-10-05 15:58:17 +030013710 drm_plane_create_rotation_property(&plane->base,
Robert Fossc2c446a2017-05-19 16:50:17 -040013711 DRM_MODE_ROTATE_0,
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013712 supported_rotations);
Sonika Jindal48404c12014-08-22 14:06:04 +053013713
Ville Syrjälä881440a2018-10-05 15:58:17 +030013714 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
Matt Roperea2c67b2014-12-23 10:41:52 -080013715
Ville Syrjälä881440a2018-10-05 15:58:17 +030013716 return plane;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013717
13718fail:
Ville Syrjälä881440a2018-10-05 15:58:17 +030013719 intel_plane_free(plane);
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013720
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013721 return ERR_PTR(ret);
Matt Roper465c1202014-05-29 08:06:54 -070013722}
13723
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013724static struct intel_plane *
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013725intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13726 enum pipe pipe)
Matt Roper3d7d6512014-06-10 08:28:13 -070013727{
Ville Syrjälädeb19682018-10-05 15:58:08 +030013728 unsigned int possible_crtcs;
Ville Syrjäläc539b572018-10-05 15:58:14 +030013729 struct intel_plane *cursor;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013730 int ret;
Matt Roper3d7d6512014-06-10 08:28:13 -070013731
Ville Syrjäläc539b572018-10-05 15:58:14 +030013732 cursor = intel_plane_alloc();
13733 if (IS_ERR(cursor))
13734 return cursor;
Matt Roperea2c67b2014-12-23 10:41:52 -080013735
Matt Roper3d7d6512014-06-10 08:28:13 -070013736 cursor->pipe = pipe;
Ville Syrjäläed150302017-11-17 21:19:10 +020013737 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
Ville Syrjäläb14e5842016-11-22 18:01:56 +020013738 cursor->id = PLANE_CURSOR;
Ville Syrjäläc19e1122018-01-23 20:33:43 +020013739 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013740
13741 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
Ville Syrjäläddd57132018-09-07 18:24:02 +030013742 cursor->max_stride = i845_cursor_max_stride;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013743 cursor->update_plane = i845_update_cursor;
13744 cursor->disable_plane = i845_disable_cursor;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020013745 cursor->get_hw_state = i845_cursor_get_hw_state;
Ville Syrjälä659056f2017-03-27 21:55:39 +030013746 cursor->check_plane = i845_check_cursor;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013747 } else {
Ville Syrjäläddd57132018-09-07 18:24:02 +030013748 cursor->max_stride = i9xx_cursor_max_stride;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013749 cursor->update_plane = i9xx_update_cursor;
13750 cursor->disable_plane = i9xx_disable_cursor;
Ville Syrjälä51f5a0962017-11-17 21:19:08 +020013751 cursor->get_hw_state = i9xx_cursor_get_hw_state;
Ville Syrjälä659056f2017-03-27 21:55:39 +030013752 cursor->check_plane = i9xx_check_cursor;
Ville Syrjäläb2d03b02017-03-27 21:55:37 +030013753 }
Matt Roper3d7d6512014-06-10 08:28:13 -070013754
Ville Syrjäläcd5dcbf2017-03-27 21:55:35 +030013755 cursor->cursor.base = ~0;
13756 cursor->cursor.cntl = ~0;
Ville Syrjälä024faac2017-03-27 21:55:42 +030013757
13758 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13759 cursor->cursor.size = ~0;
Matt Roper3d7d6512014-06-10 08:28:13 -070013760
Ville Syrjälädeb19682018-10-05 15:58:08 +030013761 possible_crtcs = BIT(pipe);
13762
Ville Syrjälä580503c2016-10-31 22:37:00 +020013763 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
Ville Syrjälädeb19682018-10-05 15:58:08 +030013764 possible_crtcs, &intel_cursor_plane_funcs,
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013765 intel_cursor_formats,
13766 ARRAY_SIZE(intel_cursor_formats),
Ben Widawsky714244e2017-08-01 09:58:16 -070013767 cursor_format_modifiers,
13768 DRM_PLANE_TYPE_CURSOR,
Ville Syrjälä38573dc2016-05-27 20:59:23 +030013769 "cursor %c", pipe_name(pipe));
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013770 if (ret)
13771 goto fail;
Ville Syrjälä4398ad42014-10-23 07:41:34 -070013772
Dave Airlie5481e272016-10-25 16:36:13 +100013773 if (INTEL_GEN(dev_priv) >= 4)
Ville Syrjälä93ca7e02016-09-26 19:30:56 +030013774 drm_plane_create_rotation_property(&cursor->base,
Robert Fossc2c446a2017-05-19 16:50:17 -040013775 DRM_MODE_ROTATE_0,
13776 DRM_MODE_ROTATE_0 |
13777 DRM_MODE_ROTATE_180);
Ville Syrjälä4398ad42014-10-23 07:41:34 -070013778
Matt Roperea2c67b2014-12-23 10:41:52 -080013779 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13780
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013781 return cursor;
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013782
13783fail:
Ville Syrjäläc539b572018-10-05 15:58:14 +030013784 intel_plane_free(cursor);
Ville Syrjäläfca0ce22016-03-21 14:43:22 +000013785
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013786 return ERR_PTR(ret);
Matt Roper3d7d6512014-06-10 08:28:13 -070013787}
13788
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053013789static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13790 struct intel_crtc_state *crtc_state)
Chandra Konduru549e2bf2015-04-07 15:28:38 -070013791{
Ville Syrjälä65edccc2016-10-31 22:37:01 +020013792 struct intel_crtc_scaler_state *scaler_state =
13793 &crtc_state->scaler_state;
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053013794 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Chandra Konduru549e2bf2015-04-07 15:28:38 -070013795 int i;
Chandra Konduru549e2bf2015-04-07 15:28:38 -070013796
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053013797 crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13798 if (!crtc->num_scalers)
13799 return;
13800
Ville Syrjälä65edccc2016-10-31 22:37:01 +020013801 for (i = 0; i < crtc->num_scalers; i++) {
13802 struct intel_scaler *scaler = &scaler_state->scalers[i];
13803
13804 scaler->in_use = 0;
Maarten Lankhorst0aaf29b2018-09-21 16:44:37 +020013805 scaler->mode = 0;
Chandra Konduru549e2bf2015-04-07 15:28:38 -070013806 }
13807
13808 scaler_state->scaler_id = -1;
13809}
13810
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020013811static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
Jesse Barnes79e53942008-11-07 14:24:08 -080013812{
13813 struct intel_crtc *intel_crtc;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013814 struct intel_crtc_state *crtc_state = NULL;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013815 struct intel_plane *primary = NULL;
13816 struct intel_plane *cursor = NULL;
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030013817 int sprite, ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080013818
Daniel Vetter955382f2013-09-19 14:05:45 +020013819 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013820 if (!intel_crtc)
13821 return -ENOMEM;
Jesse Barnes79e53942008-11-07 14:24:08 -080013822
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013823 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013824 if (!crtc_state) {
13825 ret = -ENOMEM;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013826 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013827 }
Ander Conselvan de Oliveira550acef2015-04-21 17:13:24 +030013828 intel_crtc->config = crtc_state;
13829 intel_crtc->base.state = &crtc_state->base;
Matt Roper07878242015-02-25 11:43:26 -080013830 crtc_state->base.crtc = &intel_crtc->base;
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013831
Ville Syrjälä580503c2016-10-31 22:37:00 +020013832 primary = intel_primary_plane_create(dev_priv, pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013833 if (IS_ERR(primary)) {
13834 ret = PTR_ERR(primary);
Matt Roper3d7d6512014-06-10 08:28:13 -070013835 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013836 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020013837 intel_crtc->plane_ids_mask |= BIT(primary->id);
Matt Roper3d7d6512014-06-10 08:28:13 -070013838
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030013839 for_each_sprite(dev_priv, pipe, sprite) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013840 struct intel_plane *plane;
13841
Ville Syrjälä580503c2016-10-31 22:37:00 +020013842 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
Ville Syrjäläd2b2cbc2016-11-07 22:20:56 +020013843 if (IS_ERR(plane)) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013844 ret = PTR_ERR(plane);
13845 goto fail;
13846 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020013847 intel_crtc->plane_ids_mask |= BIT(plane->id);
Ville Syrjäläa81d6fa2016-10-25 18:58:01 +030013848 }
13849
Ville Syrjälä580503c2016-10-31 22:37:00 +020013850 cursor = intel_cursor_plane_create(dev_priv, pipe);
Ville Syrjäläd2b2cbc2016-11-07 22:20:56 +020013851 if (IS_ERR(cursor)) {
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013852 ret = PTR_ERR(cursor);
Matt Roper3d7d6512014-06-10 08:28:13 -070013853 goto fail;
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013854 }
Ville Syrjäläd97d7b42016-11-22 18:01:57 +020013855 intel_crtc->plane_ids_mask |= BIT(cursor->id);
Matt Roper3d7d6512014-06-10 08:28:13 -070013856
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020013857 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013858 &primary->base, &cursor->base,
13859 &intel_crtc_funcs,
Ville Syrjälä4d5d72b72016-05-27 20:59:21 +030013860 "pipe %c", pipe_name(pipe));
Matt Roper3d7d6512014-06-10 08:28:13 -070013861 if (ret)
13862 goto fail;
Jesse Barnes79e53942008-11-07 14:24:08 -080013863
Jesse Barnes80824002009-09-10 15:28:06 -070013864 intel_crtc->pipe = pipe;
Jesse Barnes80824002009-09-10 15:28:06 -070013865
Nabendu Maiti1c74eea2016-11-29 11:23:14 +053013866 /* initialize shared scalers */
13867 intel_crtc_init_scalers(intel_crtc, crtc_state);
13868
Ville Syrjälä1947fd12018-03-05 19:41:22 +020013869 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
13870 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
13871 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
13872
13873 if (INTEL_GEN(dev_priv) < 9) {
13874 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
13875
13876 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13877 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
13878 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
13879 }
Jesse Barnes22fd0fa2009-12-02 13:42:53 -080013880
Jesse Barnes79e53942008-11-07 14:24:08 -080013881 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
Daniel Vetter87b6b102014-05-15 15:33:46 +020013882
Lionel Landwerlin8563b1e2016-03-16 10:57:14 +000013883 intel_color_init(&intel_crtc->base);
13884
Daniel Vetter87b6b102014-05-15 15:33:46 +020013885 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013886
13887 return 0;
Matt Roper3d7d6512014-06-10 08:28:13 -070013888
13889fail:
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013890 /*
13891 * drm_mode_config_cleanup() will free up any
13892 * crtcs/planes already initialized.
13893 */
Ander Conselvan de Oliveiraf5de6e02015-01-15 14:55:26 +020013894 kfree(crtc_state);
Matt Roper3d7d6512014-06-10 08:28:13 -070013895 kfree(intel_crtc);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030013896
13897 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080013898}
13899
Ville Syrjälä6a20fe72018-02-07 18:48:41 +020013900int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
13901 struct drm_file *file)
Carl Worth08d7b3d2009-04-29 14:43:54 -070013902{
Carl Worth08d7b3d2009-04-29 14:43:54 -070013903 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
Rob Clark7707e652014-07-17 23:30:04 -040013904 struct drm_crtc *drmmode_crtc;
Daniel Vetterc05422d2009-08-11 16:05:30 +020013905 struct intel_crtc *crtc;
Carl Worth08d7b3d2009-04-29 14:43:54 -070013906
Keith Packard418da172017-03-14 23:25:07 -070013907 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
Chris Wilson71240ed2016-06-24 14:00:24 +010013908 if (!drmmode_crtc)
Ville Syrjälä3f2c2052013-10-17 13:35:03 +030013909 return -ENOENT;
Carl Worth08d7b3d2009-04-29 14:43:54 -070013910
Rob Clark7707e652014-07-17 23:30:04 -040013911 crtc = to_intel_crtc(drmmode_crtc);
Daniel Vetterc05422d2009-08-11 16:05:30 +020013912 pipe_from_crtc_id->pipe = crtc->pipe;
Carl Worth08d7b3d2009-04-29 14:43:54 -070013913
Daniel Vetterc05422d2009-08-11 16:05:30 +020013914 return 0;
Carl Worth08d7b3d2009-04-29 14:43:54 -070013915}
13916
Daniel Vetter66a92782012-07-12 20:08:18 +020013917static int intel_encoder_clones(struct intel_encoder *encoder)
Jesse Barnes79e53942008-11-07 14:24:08 -080013918{
Daniel Vetter66a92782012-07-12 20:08:18 +020013919 struct drm_device *dev = encoder->base.dev;
13920 struct intel_encoder *source_encoder;
Jesse Barnes79e53942008-11-07 14:24:08 -080013921 int index_mask = 0;
Jesse Barnes79e53942008-11-07 14:24:08 -080013922 int entry = 0;
13923
Damien Lespiaub2784e12014-08-05 11:29:37 +010013924 for_each_intel_encoder(dev, source_encoder) {
Ville Syrjäläbc079e82014-03-03 16:15:28 +020013925 if (encoders_cloneable(encoder, source_encoder))
Daniel Vetter66a92782012-07-12 20:08:18 +020013926 index_mask |= (1 << entry);
13927
Jesse Barnes79e53942008-11-07 14:24:08 -080013928 entry++;
13929 }
Chris Wilson4ef69c72010-09-09 15:14:28 +010013930
Jesse Barnes79e53942008-11-07 14:24:08 -080013931 return index_mask;
13932}
13933
Ville Syrjälä646d5772016-10-31 22:37:14 +020013934static bool has_edp_a(struct drm_i915_private *dev_priv)
Chris Wilson4d302442010-12-14 19:21:29 +000013935{
Ville Syrjälä646d5772016-10-31 22:37:14 +020013936 if (!IS_MOBILE(dev_priv))
Chris Wilson4d302442010-12-14 19:21:29 +000013937 return false;
13938
13939 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
13940 return false;
13941
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010013942 if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
Chris Wilson4d302442010-12-14 19:21:29 +000013943 return false;
13944
13945 return true;
13946}
13947
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000013948static bool intel_crt_present(struct drm_i915_private *dev_priv)
Jesse Barnes84b4e042014-06-25 08:24:29 -070013949{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000013950 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau884497e2013-12-03 13:56:23 +000013951 return false;
13952
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +010013953 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
Jesse Barnes84b4e042014-06-25 08:24:29 -070013954 return false;
13955
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010013956 if (IS_CHERRYVIEW(dev_priv))
Jesse Barnes84b4e042014-06-25 08:24:29 -070013957 return false;
13958
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010013959 if (HAS_PCH_LPT_H(dev_priv) &&
13960 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
Ville Syrjälä65e472e2015-12-01 23:28:55 +020013961 return false;
13962
Ville Syrjälä70ac54d2015-12-01 23:29:56 +020013963 /* DDI E can't be used if DDI A requires 4 lanes */
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010013964 if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
Ville Syrjälä70ac54d2015-12-01 23:29:56 +020013965 return false;
13966
Ville Syrjäläe4abb732015-12-01 23:31:33 +020013967 if (!dev_priv->vbt.int_crt_support)
Jesse Barnes84b4e042014-06-25 08:24:29 -070013968 return false;
13969
13970 return true;
13971}
13972
Imre Deak8090ba82016-08-10 14:07:33 +030013973void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
13974{
13975 int pps_num;
13976 int pps_idx;
13977
13978 if (HAS_DDI(dev_priv))
13979 return;
13980 /*
13981 * This w/a is needed at least on CPT/PPT, but to be sure apply it
13982 * everywhere where registers can be write protected.
13983 */
13984 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13985 pps_num = 2;
13986 else
13987 pps_num = 1;
13988
13989 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
13990 u32 val = I915_READ(PP_CONTROL(pps_idx));
13991
13992 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
13993 I915_WRITE(PP_CONTROL(pps_idx), val);
13994 }
13995}
13996
Imre Deak44cb7342016-08-10 14:07:29 +030013997static void intel_pps_init(struct drm_i915_private *dev_priv)
13998{
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +020013999 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
Imre Deak44cb7342016-08-10 14:07:29 +030014000 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14001 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14002 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14003 else
14004 dev_priv->pps_mmio_base = PPS_BASE;
Imre Deak8090ba82016-08-10 14:07:33 +030014005
14006 intel_pps_unlock_regs_wa(dev_priv);
Imre Deak44cb7342016-08-10 14:07:29 +030014007}
14008
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014009static void intel_setup_outputs(struct drm_i915_private *dev_priv)
Jesse Barnes79e53942008-11-07 14:24:08 -080014010{
Chris Wilson4ef69c72010-09-09 15:14:28 +010014011 struct intel_encoder *encoder;
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014012 bool dpd_is_edp = false;
Jesse Barnes79e53942008-11-07 14:24:08 -080014013
Imre Deak44cb7342016-08-10 14:07:29 +030014014 intel_pps_init(dev_priv);
14015
Chris Wilsonfc0c5a92018-08-15 21:12:07 +010014016 if (INTEL_INFO(dev_priv)->num_pipes == 0)
14017 return;
14018
Imre Deak97a824e12016-06-21 11:51:47 +030014019 /*
14020 * intel_edp_init_connector() depends on this completing first, to
14021 * prevent the registeration of both eDP and LVDS and the incorrect
14022 * sharing of the PPS.
14023 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014024 intel_lvds_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080014025
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014026 if (intel_crt_present(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014027 intel_crt_init(dev_priv);
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014028
Paulo Zanoni00c92d92018-05-21 17:25:47 -070014029 if (IS_ICELAKE(dev_priv)) {
14030 intel_ddi_init(dev_priv, PORT_A);
14031 intel_ddi_init(dev_priv, PORT_B);
14032 intel_ddi_init(dev_priv, PORT_C);
14033 intel_ddi_init(dev_priv, PORT_D);
14034 intel_ddi_init(dev_priv, PORT_E);
14035 intel_ddi_init(dev_priv, PORT_F);
14036 } else if (IS_GEN9_LP(dev_priv)) {
Vandana Kannanc776eb22014-08-19 12:05:01 +053014037 /*
14038 * FIXME: Broxton doesn't support port detection via the
14039 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14040 * detect the ports.
14041 */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014042 intel_ddi_init(dev_priv, PORT_A);
14043 intel_ddi_init(dev_priv, PORT_B);
14044 intel_ddi_init(dev_priv, PORT_C);
Shashank Sharmac6c794a2016-03-22 12:01:50 +020014045
Jani Nikulae5186342018-07-05 16:25:08 +030014046 vlv_dsi_init(dev_priv);
Tvrtko Ursulin4f8036a2016-10-13 11:02:52 +010014047 } else if (HAS_DDI(dev_priv)) {
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014048 int found;
14049
Jesse Barnesde31fac2015-03-06 15:53:32 -080014050 /*
14051 * Haswell uses DDI functions to detect digital outputs.
14052 * On SKL pre-D0 the strap isn't connected, so we assume
14053 * it's there.
14054 */
Ville Syrjälä77179402015-09-18 20:03:35 +030014055 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
Jesse Barnesde31fac2015-03-06 15:53:32 -080014056 /* WaIgnoreDDIAStrap: skl */
Rodrigo Vivib976dc52017-01-23 10:32:37 -080014057 if (found || IS_GEN9_BC(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014058 intel_ddi_init(dev_priv, PORT_A);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014059
Rodrigo Vivi9787e832018-01-29 15:22:22 -080014060 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014061 * register */
14062 found = I915_READ(SFUSE_STRAP);
14063
14064 if (found & SFUSE_STRAP_DDIB_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014065 intel_ddi_init(dev_priv, PORT_B);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014066 if (found & SFUSE_STRAP_DDIC_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014067 intel_ddi_init(dev_priv, PORT_C);
Eugeni Dodonov0e72a5b2012-05-09 15:37:27 -030014068 if (found & SFUSE_STRAP_DDID_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014069 intel_ddi_init(dev_priv, PORT_D);
Rodrigo Vivi9787e832018-01-29 15:22:22 -080014070 if (found & SFUSE_STRAP_DDIF_DETECTED)
14071 intel_ddi_init(dev_priv, PORT_F);
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070014072 /*
14073 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14074 */
Rodrigo Vivib976dc52017-01-23 10:32:37 -080014075 if (IS_GEN9_BC(dev_priv) &&
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070014076 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14077 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14078 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014079 intel_ddi_init(dev_priv, PORT_E);
Rodrigo Vivi2800e4c2015-08-07 17:35:21 -070014080
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010014081 } else if (HAS_PCH_SPLIT(dev_priv)) {
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014082 int found;
Jani Nikula7b91bf72017-08-18 12:30:19 +030014083 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
Daniel Vetter270b3042012-10-27 15:52:05 +020014084
Ville Syrjälä646d5772016-10-31 22:37:14 +020014085 if (has_edp_a(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014086 intel_dp_init(dev_priv, DP_A, PORT_A);
Adam Jacksoncb0953d2010-07-16 14:46:29 -040014087
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014088 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
Zhao Yakui461ed3c2010-03-30 15:11:33 +080014089 /* PCH SDVOB multiplex with HDMIB */
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014090 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014091 if (!found)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014092 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014093 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014094 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014095 }
14096
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014097 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014098 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014099
Paulo Zanonidc0fa712013-02-19 16:21:46 -030014100 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014101 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
Zhenyu Wang30ad48b2009-06-05 15:38:43 +080014102
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014103 if (I915_READ(PCH_DP_C) & DP_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014104 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
Zhenyu Wang5eb08b62009-07-24 01:00:31 +080014105
Daniel Vetter270b3042012-10-27 15:52:05 +020014106 if (I915_READ(PCH_DP_D) & DP_DETECTED)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014107 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014108 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä22f350422016-06-03 12:17:43 +030014109 bool has_edp, has_port;
Chris Wilson457c52d2016-06-01 08:27:50 +010014110
Ville Syrjäläe17ac6d2014-10-09 19:37:15 +030014111 /*
14112 * The DP_DETECTED bit is the latched state of the DDC
14113 * SDA pin at boot. However since eDP doesn't require DDC
14114 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14115 * eDP ports may have been muxed to an alternate function.
14116 * Thus we can't rely on the DP_DETECTED bit alone to detect
14117 * eDP ports. Consult the VBT as well as DP_DETECTED to
14118 * detect eDP ports.
Ville Syrjälä22f350422016-06-03 12:17:43 +030014119 *
14120 * Sadly the straps seem to be missing sometimes even for HDMI
14121 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14122 * and VBT for the presence of the port. Additionally we can't
14123 * trust the port type the VBT declares as we've seen at least
14124 * HDMI ports that the VBT claim are DP or eDP.
Ville Syrjäläe17ac6d2014-10-09 19:37:15 +030014125 */
Jani Nikula7b91bf72017-08-18 12:30:19 +030014126 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014127 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14128 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014129 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014130 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014131 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
Artem Bityutskiy585a94b2013-10-16 18:10:41 +030014132
Jani Nikula7b91bf72017-08-18 12:30:19 +030014133 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014134 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14135 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014136 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014137 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014138 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
Gajanan Bhat19c03922012-09-27 19:13:07 +053014139
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014140 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä22f350422016-06-03 12:17:43 +030014141 /*
14142 * eDP not supported on port D,
14143 * so no need to worry about it
14144 */
14145 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14146 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014147 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
Ville Syrjälä22f350422016-06-03 12:17:43 +030014148 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014149 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
Ville Syrjälä9418c1f2014-04-09 13:28:56 +030014150 }
14151
Jani Nikulae5186342018-07-05 16:25:08 +030014152 vlv_dsi_init(dev_priv);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014153 } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
Ma Ling27185ae2009-08-24 13:50:23 +080014154 bool found = false;
Eric Anholt7d573822009-01-02 13:33:00 -080014155
Paulo Zanonie2debe92013-02-18 19:00:27 -030014156 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014157 DRM_DEBUG_KMS("probing SDVOB\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014158 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014159 if (!found && IS_G4X(dev_priv)) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014160 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014161 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014162 }
Ma Ling27185ae2009-08-24 13:50:23 +080014163
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014164 if (!found && IS_G4X(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014165 intel_dp_init(dev_priv, DP_B, PORT_B);
Eric Anholt725e30a2009-01-22 13:01:02 -080014166 }
Kristian Høgsberg13520b02009-03-13 15:42:14 -040014167
14168 /* Before G4X SDVOC doesn't have its own detect register */
Kristian Høgsberg13520b02009-03-13 15:42:14 -040014169
Paulo Zanonie2debe92013-02-18 19:00:27 -030014170 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014171 DRM_DEBUG_KMS("probing SDVOC\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014172 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014173 }
Ma Ling27185ae2009-08-24 13:50:23 +080014174
Paulo Zanonie2debe92013-02-18 19:00:27 -030014175 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
Ma Ling27185ae2009-08-24 13:50:23 +080014176
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014177 if (IS_G4X(dev_priv)) {
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014178 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014179 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
Jesse Barnesb01f2c32009-12-11 11:07:17 -080014180 }
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014181 if (IS_G4X(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014182 intel_dp_init(dev_priv, DP_C, PORT_C);
Eric Anholt725e30a2009-01-22 13:01:02 -080014183 }
Ma Ling27185ae2009-08-24 13:50:23 +080014184
Tvrtko Ursulin9beb5fe2016-10-13 11:03:06 +010014185 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014186 intel_dp_init(dev_priv, DP_D, PORT_D);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014187 } else if (IS_GEN2(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014188 intel_dvo_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080014189
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +000014190 if (SUPPORTS_TV(dev_priv))
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014191 intel_tv_init(dev_priv);
Jesse Barnes79e53942008-11-07 14:24:08 -080014192
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014193 intel_psr_init(dev_priv);
Rodrigo Vivi7c8f8a72014-06-13 05:10:03 -070014194
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014195 for_each_intel_encoder(&dev_priv->drm, encoder) {
Chris Wilson4ef69c72010-09-09 15:14:28 +010014196 encoder->base.possible_crtcs = encoder->crtc_mask;
14197 encoder->base.possible_clones =
Daniel Vetter66a92782012-07-12 20:08:18 +020014198 intel_encoder_clones(encoder);
Jesse Barnes79e53942008-11-07 14:24:08 -080014199 }
Chris Wilson47356eb2011-01-11 17:06:04 +000014200
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014201 intel_init_pch_refclk(dev_priv);
Daniel Vetter270b3042012-10-27 15:52:05 +020014202
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014203 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
Jesse Barnes79e53942008-11-07 14:24:08 -080014204}
14205
14206static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14207{
14208 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014209 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Jesse Barnes79e53942008-11-07 14:24:08 -080014210
Daniel Vetteref2d6332014-02-10 18:00:38 +010014211 drm_framebuffer_cleanup(fb);
Chris Wilson70001cd2017-02-16 09:46:21 +000014212
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014213 i915_gem_object_lock(obj);
14214 WARN_ON(!obj->framebuffer_references--);
14215 i915_gem_object_unlock(obj);
Chris Wilsondd689282017-03-01 15:41:28 +000014216
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014217 i915_gem_object_put(obj);
Chris Wilson70001cd2017-02-16 09:46:21 +000014218
Jesse Barnes79e53942008-11-07 14:24:08 -080014219 kfree(intel_fb);
14220}
14221
14222static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
Chris Wilson05394f32010-11-08 19:18:58 +000014223 struct drm_file *file,
Jesse Barnes79e53942008-11-07 14:24:08 -080014224 unsigned int *handle)
14225{
Daniel Stonea5ff7a42018-05-18 15:30:07 +010014226 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Jesse Barnes79e53942008-11-07 14:24:08 -080014227
Chris Wilsoncc917ab2015-10-13 14:22:26 +010014228 if (obj->userptr.mm) {
14229 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14230 return -EINVAL;
14231 }
14232
Chris Wilson05394f32010-11-08 19:18:58 +000014233 return drm_gem_handle_create(file, &obj->base, handle);
Jesse Barnes79e53942008-11-07 14:24:08 -080014234}
14235
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014236static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14237 struct drm_file *file,
14238 unsigned flags, unsigned color,
14239 struct drm_clip_rect *clips,
14240 unsigned num_clips)
14241{
Chris Wilson5a97bcc2017-02-22 11:40:46 +000014242 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014243
Chris Wilson5a97bcc2017-02-22 11:40:46 +000014244 i915_gem_object_flush_if_display(obj);
Chris Wilsond59b21e2017-02-22 11:40:49 +000014245 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014246
14247 return 0;
14248}
14249
Jesse Barnes79e53942008-11-07 14:24:08 -080014250static const struct drm_framebuffer_funcs intel_fb_funcs = {
14251 .destroy = intel_user_framebuffer_destroy,
14252 .create_handle = intel_user_framebuffer_create_handle,
Rodrigo Vivi86c98582015-07-08 16:22:45 -070014253 .dirty = intel_user_framebuffer_dirty,
Jesse Barnes79e53942008-11-07 14:24:08 -080014254};
14255
Damien Lespiaub3218032015-02-27 11:15:18 +000014256static
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014257u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14258 uint64_t fb_modifier, uint32_t pixel_format)
Damien Lespiaub3218032015-02-27 11:15:18 +000014259{
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014260 struct intel_crtc *crtc;
14261 struct intel_plane *plane;
Damien Lespiaub3218032015-02-27 11:15:18 +000014262
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014263 /*
14264 * We assume the primary plane for pipe A has
14265 * the highest stride limits of them all.
14266 */
14267 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
14268 plane = to_intel_plane(crtc->base.primary);
Ville Syrjäläac484962016-01-20 21:05:26 +020014269
Ville Syrjälä645d91f2018-09-07 18:24:03 +030014270 return plane->max_stride(plane, pixel_format, fb_modifier,
14271 DRM_MODE_ROTATE_0);
Damien Lespiaub3218032015-02-27 11:15:18 +000014272}
14273
Chris Wilson24dbf512017-02-15 10:59:18 +000014274static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14275 struct drm_i915_gem_object *obj,
14276 struct drm_mode_fb_cmd2 *mode_cmd)
Jesse Barnes79e53942008-11-07 14:24:08 -080014277{
Chris Wilson24dbf512017-02-15 10:59:18 +000014278 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014279 struct drm_framebuffer *fb = &intel_fb->base;
Eric Engestromb3c11ac2016-11-12 01:12:56 +000014280 struct drm_format_name_buf format_name;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014281 u32 pitch_limit;
Chris Wilsondd689282017-03-01 15:41:28 +000014282 unsigned int tiling, stride;
Chris Wilson24dbf512017-02-15 10:59:18 +000014283 int ret = -EINVAL;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014284 int i;
Jesse Barnes79e53942008-11-07 14:24:08 -080014285
Chris Wilsondd689282017-03-01 15:41:28 +000014286 i915_gem_object_lock(obj);
14287 obj->framebuffer_references++;
14288 tiling = i915_gem_object_get_tiling(obj);
14289 stride = i915_gem_object_get_stride(obj);
14290 i915_gem_object_unlock(obj);
Daniel Vetterdd4916c2013-10-09 21:23:51 +020014291
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014292 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014293 /*
14294 * If there's a fence, enforce that
14295 * the fb modifier and tiling mode match.
14296 */
14297 if (tiling != I915_TILING_NONE &&
14298 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014299 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
Chris Wilson24dbf512017-02-15 10:59:18 +000014300 goto err;
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014301 }
14302 } else {
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014303 if (tiling == I915_TILING_X) {
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014304 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014305 } else if (tiling == I915_TILING_Y) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014306 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
Chris Wilson24dbf512017-02-15 10:59:18 +000014307 goto err;
Daniel Vetter2a80ead2015-02-10 17:16:06 +000014308 }
14309 }
14310
Tvrtko Ursulin9a8f0a12015-02-27 11:15:24 +000014311 /* Passed in modifier sanity checking. */
14312 switch (mode_cmd->modifier[0]) {
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014313 case I915_FORMAT_MOD_Y_TILED_CCS:
14314 case I915_FORMAT_MOD_Yf_TILED_CCS:
14315 switch (mode_cmd->pixel_format) {
14316 case DRM_FORMAT_XBGR8888:
14317 case DRM_FORMAT_ABGR8888:
14318 case DRM_FORMAT_XRGB8888:
14319 case DRM_FORMAT_ARGB8888:
14320 break;
14321 default:
14322 DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
14323 goto err;
14324 }
14325 /* fall through */
Tvrtko Ursulin9a8f0a12015-02-27 11:15:24 +000014326 case I915_FORMAT_MOD_Yf_TILED:
Paulo Zanonief51e0a2018-09-24 17:19:11 -070014327 if (mode_cmd->pixel_format == DRM_FORMAT_C8) {
14328 DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n");
14329 goto err;
14330 }
14331 /* fall through */
14332 case I915_FORMAT_MOD_Y_TILED:
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014333 if (INTEL_GEN(dev_priv) < 9) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014334 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14335 mode_cmd->modifier[0]);
Chris Wilson24dbf512017-02-15 10:59:18 +000014336 goto err;
Tvrtko Ursulin9a8f0a12015-02-27 11:15:24 +000014337 }
Paulo Zanonief51e0a2018-09-24 17:19:11 -070014338 break;
Ben Widawsky2f075562017-03-24 14:29:48 -070014339 case DRM_FORMAT_MOD_LINEAR:
Tvrtko Ursulin9a8f0a12015-02-27 11:15:24 +000014340 case I915_FORMAT_MOD_X_TILED:
14341 break;
14342 default:
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014343 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14344 mode_cmd->modifier[0]);
Chris Wilson24dbf512017-02-15 10:59:18 +000014345 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014346 }
Chris Wilson57cd6502010-08-08 12:34:44 +010014347
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014348 /*
14349 * gen2/3 display engine uses the fence if present,
14350 * so the tiling mode must match the fb modifier exactly.
14351 */
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +000014352 if (INTEL_GEN(dev_priv) < 4 &&
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014353 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014354 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014355 goto err;
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014356 }
14357
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014358 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
Damien Lespiaub3218032015-02-27 11:15:18 +000014359 mode_cmd->pixel_format);
Chris Wilsona35cdaa2013-06-25 17:26:45 +010014360 if (mode_cmd->pitches[0] > pitch_limit) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014361 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
Ben Widawsky2f075562017-03-24 14:29:48 -070014362 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014363 "tiled" : "linear",
14364 mode_cmd->pitches[0], pitch_limit);
Chris Wilson24dbf512017-02-15 10:59:18 +000014365 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014366 }
Ville Syrjälä5d7bd702012-10-31 17:50:18 +020014367
Ville Syrjäläc2ff7372016-02-11 19:16:37 +020014368 /*
14369 * If there's a fence, enforce that
14370 * the fb pitch and fence stride match.
14371 */
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014372 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14373 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14374 mode_cmd->pitches[0], stride);
Chris Wilson24dbf512017-02-15 10:59:18 +000014375 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014376 }
Ville Syrjälä5d7bd702012-10-31 17:50:18 +020014377
Ville Syrjälä57779d02012-10-31 17:50:14 +020014378 /* Reject formats not supported by any plane early. */
Jesse Barnes308e5bc2011-11-14 14:51:28 -080014379 switch (mode_cmd->pixel_format) {
Ville Syrjälä57779d02012-10-31 17:50:14 +020014380 case DRM_FORMAT_C8:
Ville Syrjälä04b39242011-11-17 18:05:13 +020014381 case DRM_FORMAT_RGB565:
14382 case DRM_FORMAT_XRGB8888:
14383 case DRM_FORMAT_ARGB8888:
Ville Syrjälä57779d02012-10-31 17:50:14 +020014384 break;
14385 case DRM_FORMAT_XRGB1555:
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014386 if (INTEL_GEN(dev_priv) > 3) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014387 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14388 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014389 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014390 }
Ville Syrjälä57779d02012-10-31 17:50:14 +020014391 break;
Ville Syrjälä57779d02012-10-31 17:50:14 +020014392 case DRM_FORMAT_ABGR8888:
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014393 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014394 INTEL_GEN(dev_priv) < 9) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014395 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14396 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014397 goto err;
Damien Lespiau6c0fd452015-05-19 12:29:16 +010014398 }
14399 break;
14400 case DRM_FORMAT_XBGR8888:
Ville Syrjälä04b39242011-11-17 18:05:13 +020014401 case DRM_FORMAT_XRGB2101010:
Ville Syrjälä57779d02012-10-31 17:50:14 +020014402 case DRM_FORMAT_XBGR2101010:
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000014403 if (INTEL_GEN(dev_priv) < 4) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014404 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14405 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014406 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014407 }
Jesse Barnesb5626742011-06-24 12:19:27 -070014408 break;
Damien Lespiau75312082015-05-15 19:06:01 +010014409 case DRM_FORMAT_ABGR2101010:
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014410 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014411 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14412 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014413 goto err;
Damien Lespiau75312082015-05-15 19:06:01 +010014414 }
14415 break;
Ville Syrjälä04b39242011-11-17 18:05:13 +020014416 case DRM_FORMAT_YUYV:
14417 case DRM_FORMAT_UYVY:
14418 case DRM_FORMAT_YVYU:
14419 case DRM_FORMAT_VYUY:
Ville Syrjäläab330812017-04-21 21:14:32 +030014420 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014421 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14422 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014423 goto err;
Chris Wilsonc16ed4b2012-12-18 22:13:14 +000014424 }
Chris Wilson57cd6502010-08-08 12:34:44 +010014425 break;
Chandra Kondurue44134f2018-05-12 03:03:15 +053014426 case DRM_FORMAT_NV12:
Chandra Kondurue44134f2018-05-12 03:03:15 +053014427 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
Dhinakaran Pandiyanb45649f2018-08-24 13:38:56 -070014428 IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
Chandra Kondurue44134f2018-05-12 03:03:15 +053014429 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14430 drm_get_format_name(mode_cmd->pixel_format,
14431 &format_name));
14432 goto err;
14433 }
14434 break;
Chris Wilson57cd6502010-08-08 12:34:44 +010014435 default:
Ville Syrjälä144cc1432017-03-07 21:42:10 +020014436 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14437 drm_get_format_name(mode_cmd->pixel_format, &format_name));
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014438 goto err;
Chris Wilson57cd6502010-08-08 12:34:44 +010014439 }
14440
Ville Syrjälä90f9a332012-10-31 17:50:19 +020014441 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14442 if (mode_cmd->offsets[0] != 0)
Chris Wilson24dbf512017-02-15 10:59:18 +000014443 goto err;
Ville Syrjälä90f9a332012-10-31 17:50:19 +020014444
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014445 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
Ville Syrjäläd88c4af2017-03-07 21:42:06 +020014446
Chandra Kondurue44134f2018-05-12 03:03:15 +053014447 if (fb->format->format == DRM_FORMAT_NV12 &&
14448 (fb->width < SKL_MIN_YUV_420_SRC_W ||
14449 fb->height < SKL_MIN_YUV_420_SRC_H ||
14450 (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14451 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14452 return -EINVAL;
14453 }
14454
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014455 for (i = 0; i < fb->format->num_planes; i++) {
14456 u32 stride_alignment;
14457
14458 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14459 DRM_DEBUG_KMS("bad plane %d handle\n", i);
Christophe JAILLET37875d62017-09-10 10:56:42 +020014460 goto err;
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014461 }
14462
14463 stride_alignment = intel_fb_stride_alignment(fb, i);
14464
14465 /*
14466 * Display WA #0531: skl,bxt,kbl,glk
14467 *
14468 * Render decompression and plane width > 3840
14469 * combined with horizontal panning requires the
14470 * plane stride to be a multiple of 4. We'll just
14471 * require the entire fb to accommodate that to avoid
14472 * potential runtime errors at plane configuration time.
14473 */
14474 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
Dhinakaran Pandiyan63eaf9a2018-08-22 12:38:27 -070014475 is_ccs_modifier(fb->modifier))
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014476 stride_alignment *= 4;
14477
14478 if (fb->pitches[i] & (stride_alignment - 1)) {
14479 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14480 i, fb->pitches[i], stride_alignment);
14481 goto err;
14482 }
Ville Syrjäläd88c4af2017-03-07 21:42:06 +020014483
Daniel Stonea268bcd2018-05-18 15:30:08 +010014484 fb->obj[i] = &obj->base;
14485 }
Daniel Vetterc7d73f62012-12-13 23:38:38 +010014486
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014487 ret = intel_fill_fb_info(dev_priv, fb);
Ville Syrjälä6687c902015-09-15 13:16:41 +030014488 if (ret)
Chris Wilson9aceb5c12017-03-01 15:41:27 +000014489 goto err;
Ville Syrjälä2d7a2152016-02-15 22:54:47 +020014490
Ville Syrjälä2e2adb02017-08-01 09:58:13 -070014491 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
Jesse Barnes79e53942008-11-07 14:24:08 -080014492 if (ret) {
14493 DRM_ERROR("framebuffer init failed %d\n", ret);
Chris Wilson24dbf512017-02-15 10:59:18 +000014494 goto err;
Jesse Barnes79e53942008-11-07 14:24:08 -080014495 }
14496
Jesse Barnes79e53942008-11-07 14:24:08 -080014497 return 0;
Chris Wilson24dbf512017-02-15 10:59:18 +000014498
14499err:
Chris Wilsondd689282017-03-01 15:41:28 +000014500 i915_gem_object_lock(obj);
14501 obj->framebuffer_references--;
14502 i915_gem_object_unlock(obj);
Chris Wilson24dbf512017-02-15 10:59:18 +000014503 return ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014504}
14505
Jesse Barnes79e53942008-11-07 14:24:08 -080014506static struct drm_framebuffer *
14507intel_user_framebuffer_create(struct drm_device *dev,
14508 struct drm_file *filp,
Ville Syrjälä1eb834512015-11-11 19:11:29 +020014509 const struct drm_mode_fb_cmd2 *user_mode_cmd)
Jesse Barnes79e53942008-11-07 14:24:08 -080014510{
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014511 struct drm_framebuffer *fb;
Chris Wilson05394f32010-11-08 19:18:58 +000014512 struct drm_i915_gem_object *obj;
Ville Syrjälä76dc3762015-11-11 19:11:28 +020014513 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
Jesse Barnes79e53942008-11-07 14:24:08 -080014514
Chris Wilson03ac0642016-07-20 13:31:51 +010014515 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14516 if (!obj)
Chris Wilsoncce13ff2010-08-08 13:36:38 +010014517 return ERR_PTR(-ENOENT);
Jesse Barnes79e53942008-11-07 14:24:08 -080014518
Chris Wilson24dbf512017-02-15 10:59:18 +000014519 fb = intel_framebuffer_create(obj, &mode_cmd);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014520 if (IS_ERR(fb))
Chris Wilsonf0cd5182016-10-28 13:58:43 +010014521 i915_gem_object_put(obj);
Lukas Wunnerdcb13942015-07-04 11:50:58 +020014522
14523 return fb;
Jesse Barnes79e53942008-11-07 14:24:08 -080014524}
14525
Chris Wilson778e23a2016-12-05 14:29:39 +000014526static void intel_atomic_state_free(struct drm_atomic_state *state)
14527{
14528 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14529
14530 drm_atomic_state_default_release(state);
14531
14532 i915_sw_fence_fini(&intel_state->commit_ready);
14533
14534 kfree(state);
14535}
14536
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014537static enum drm_mode_status
14538intel_mode_valid(struct drm_device *dev,
14539 const struct drm_display_mode *mode)
14540{
Ville Syrjäläad77c532018-06-15 20:44:05 +030014541 struct drm_i915_private *dev_priv = to_i915(dev);
14542 int hdisplay_max, htotal_max;
14543 int vdisplay_max, vtotal_max;
14544
Ville Syrjäläe4dd27a2018-05-24 15:54:03 +030014545 /*
14546 * Can't reject DBLSCAN here because Xorg ddxen can add piles
14547 * of DBLSCAN modes to the output's mode list when they detect
14548 * the scaling mode property on the connector. And they don't
14549 * ask the kernel to validate those modes in any way until
14550 * modeset time at which point the client gets a protocol error.
14551 * So in order to not upset those clients we silently ignore the
14552 * DBLSCAN flag on such connectors. For other connectors we will
14553 * reject modes with the DBLSCAN flag in encoder->compute_config().
14554 * And we always reject DBLSCAN modes in connector->mode_valid()
14555 * as we never want such modes on the connector's mode list.
14556 */
14557
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014558 if (mode->vscan > 1)
14559 return MODE_NO_VSCAN;
14560
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014561 if (mode->flags & DRM_MODE_FLAG_HSKEW)
14562 return MODE_H_ILLEGAL;
14563
14564 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14565 DRM_MODE_FLAG_NCSYNC |
14566 DRM_MODE_FLAG_PCSYNC))
14567 return MODE_HSYNC;
14568
14569 if (mode->flags & (DRM_MODE_FLAG_BCAST |
14570 DRM_MODE_FLAG_PIXMUX |
14571 DRM_MODE_FLAG_CLKDIV2))
14572 return MODE_BAD;
14573
Ville Syrjäläad77c532018-06-15 20:44:05 +030014574 if (INTEL_GEN(dev_priv) >= 9 ||
14575 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14576 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14577 vdisplay_max = 4096;
14578 htotal_max = 8192;
14579 vtotal_max = 8192;
14580 } else if (INTEL_GEN(dev_priv) >= 3) {
14581 hdisplay_max = 4096;
14582 vdisplay_max = 4096;
14583 htotal_max = 8192;
14584 vtotal_max = 8192;
14585 } else {
14586 hdisplay_max = 2048;
14587 vdisplay_max = 2048;
14588 htotal_max = 4096;
14589 vtotal_max = 4096;
14590 }
14591
14592 if (mode->hdisplay > hdisplay_max ||
14593 mode->hsync_start > htotal_max ||
14594 mode->hsync_end > htotal_max ||
14595 mode->htotal > htotal_max)
14596 return MODE_H_ILLEGAL;
14597
14598 if (mode->vdisplay > vdisplay_max ||
14599 mode->vsync_start > vtotal_max ||
14600 mode->vsync_end > vtotal_max ||
14601 mode->vtotal > vtotal_max)
14602 return MODE_V_ILLEGAL;
14603
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014604 return MODE_OK;
14605}
14606
Jesse Barnes79e53942008-11-07 14:24:08 -080014607static const struct drm_mode_config_funcs intel_mode_funcs = {
Jesse Barnes79e53942008-11-07 14:24:08 -080014608 .fb_create = intel_user_framebuffer_create,
Ville Syrjäläbbfb6ce2017-08-01 09:58:12 -070014609 .get_format_info = intel_get_format_info,
Daniel Vetter0632fef2013-10-08 17:44:49 +020014610 .output_poll_changed = intel_fbdev_output_poll_changed,
Ville Syrjäläe995ca0b2017-11-14 20:32:58 +020014611 .mode_valid = intel_mode_valid,
Matt Roper5ee67f12015-01-21 16:35:44 -080014612 .atomic_check = intel_atomic_check,
14613 .atomic_commit = intel_atomic_commit,
Maarten Lankhorstde419ab2015-06-04 10:21:28 +020014614 .atomic_state_alloc = intel_atomic_state_alloc,
14615 .atomic_state_clear = intel_atomic_state_clear,
Chris Wilson778e23a2016-12-05 14:29:39 +000014616 .atomic_state_free = intel_atomic_state_free,
Jesse Barnes79e53942008-11-07 14:24:08 -080014617};
14618
Imre Deak88212942016-03-16 13:38:53 +020014619/**
14620 * intel_init_display_hooks - initialize the display modesetting hooks
14621 * @dev_priv: device private
14622 */
14623void intel_init_display_hooks(struct drm_i915_private *dev_priv)
Jesse Barnese70236a2009-09-21 10:42:27 -070014624{
Ville Syrjälä7ff89ca2017-02-07 20:33:05 +020014625 intel_init_cdclk_hooks(dev_priv);
14626
Tvrtko Ursulinc56b89f2018-02-09 21:58:46 +000014627 if (INTEL_GEN(dev_priv) >= 9) {
Damien Lespiaubc8d7df2015-01-20 12:51:51 +000014628 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014629 dev_priv->display.get_initial_plane_config =
14630 skylake_get_initial_plane_config;
Damien Lespiaubc8d7df2015-01-20 12:51:51 +000014631 dev_priv->display.crtc_compute_clock =
14632 haswell_crtc_compute_clock;
14633 dev_priv->display.crtc_enable = haswell_crtc_enable;
14634 dev_priv->display.crtc_disable = haswell_crtc_disable;
Imre Deak88212942016-03-16 13:38:53 +020014635 } else if (HAS_DDI(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014636 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014637 dev_priv->display.get_initial_plane_config =
Ville Syrjälä81894b22017-11-17 21:19:13 +020014638 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira797d0252014-10-29 11:32:34 +020014639 dev_priv->display.crtc_compute_clock =
14640 haswell_crtc_compute_clock;
Paulo Zanoni4f771f12012-10-23 18:29:51 -020014641 dev_priv->display.crtc_enable = haswell_crtc_enable;
14642 dev_priv->display.crtc_disable = haswell_crtc_disable;
Imre Deak88212942016-03-16 13:38:53 +020014643 } else if (HAS_PCH_SPLIT(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014644 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014645 dev_priv->display.get_initial_plane_config =
Ville Syrjälä81894b22017-11-17 21:19:13 +020014646 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira3fb37702014-10-29 11:32:35 +020014647 dev_priv->display.crtc_compute_clock =
14648 ironlake_crtc_compute_clock;
Daniel Vetter76e5a892012-06-29 22:39:33 +020014649 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14650 dev_priv->display.crtc_disable = ironlake_crtc_disable;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +020014651 } else if (IS_CHERRYVIEW(dev_priv)) {
Jesse Barnes89b667f2013-04-18 14:51:36 -070014652 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014653 dev_priv->display.get_initial_plane_config =
14654 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveira65b3d6a2016-03-21 18:00:13 +020014655 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14656 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14657 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14658 } else if (IS_VALLEYVIEW(dev_priv)) {
14659 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14660 dev_priv->display.get_initial_plane_config =
14661 i9xx_get_initial_plane_config;
14662 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
Jesse Barnes89b667f2013-04-18 14:51:36 -070014663 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14664 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira19ec6692016-03-21 18:00:15 +020014665 } else if (IS_G4X(dev_priv)) {
14666 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14667 dev_priv->display.get_initial_plane_config =
14668 i9xx_get_initial_plane_config;
14669 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14670 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14671 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira70e8aa22016-03-21 18:00:16 +020014672 } else if (IS_PINEVIEW(dev_priv)) {
14673 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14674 dev_priv->display.get_initial_plane_config =
14675 i9xx_get_initial_plane_config;
14676 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14677 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14678 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +020014679 } else if (!IS_GEN2(dev_priv)) {
Daniel Vetter0e8ffe12013-03-28 10:42:00 +010014680 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
Damien Lespiau5724dbd2015-01-20 12:51:52 +000014681 dev_priv->display.get_initial_plane_config =
14682 i9xx_get_initial_plane_config;
Ander Conselvan de Oliveirad6dfee72014-10-29 11:32:36 +020014683 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
Daniel Vetter76e5a892012-06-29 22:39:33 +020014684 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14685 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Ander Conselvan de Oliveira81c97f52016-03-22 15:35:23 +020014686 } else {
14687 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14688 dev_priv->display.get_initial_plane_config =
14689 i9xx_get_initial_plane_config;
14690 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14691 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14692 dev_priv->display.crtc_disable = i9xx_crtc_disable;
Eric Anholtf564048e2011-03-30 13:01:02 -070014693 }
Jesse Barnese70236a2009-09-21 10:42:27 -070014694
Imre Deak88212942016-03-16 13:38:53 +020014695 if (IS_GEN5(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014696 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014697 } else if (IS_GEN6(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014698 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014699 } else if (IS_IVYBRIDGE(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014700 /* FIXME: detect B0+ stepping and use auto training */
14701 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
Imre Deak88212942016-03-16 13:38:53 +020014702 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
Sonika Jindal3bb11b52014-08-11 09:06:39 +053014703 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
Ville Syrjälä445e7802016-05-11 22:44:42 +030014704 }
14705
Rodrigo Vivibd30ca22017-09-26 14:13:46 -070014706 if (INTEL_GEN(dev_priv) >= 9)
Lyude27082492016-08-24 07:48:10 +020014707 dev_priv->display.update_crtcs = skl_update_crtcs;
14708 else
14709 dev_priv->display.update_crtcs = intel_update_crtcs;
Jesse Barnese70236a2009-09-21 10:42:27 -070014710}
14711
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014712/* Disable the VGA plane that we never use */
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000014713static void i915_disable_vga(struct drm_i915_private *dev_priv)
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014714{
David Weinehall52a05c32016-08-22 13:32:44 +030014715 struct pci_dev *pdev = dev_priv->drm.pdev;
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014716 u8 sr1;
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010014717 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014718
Ville Syrjälä2b37c612014-01-22 21:32:38 +020014719 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
David Weinehall52a05c32016-08-22 13:32:44 +030014720 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
Jesse Barnes3fdcf432012-04-06 11:46:27 -070014721 outb(SR01, VGA_SR_INDEX);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014722 sr1 = inb(VGA_SR_DATA);
14723 outb(sr1 | 1<<5, VGA_SR_DATA);
David Weinehall52a05c32016-08-22 13:32:44 +030014724 vga_put(pdev, VGA_RSRC_LEGACY_IO);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014725 udelay(300);
14726
Ville Syrjälä01f5a622014-12-16 18:38:37 +020014727 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014728 POSTING_READ(vga_reg);
14729}
14730
Daniel Vetterf8175862012-04-10 15:50:11 +020014731void intel_modeset_init_hw(struct drm_device *dev)
14732{
Chris Wilsonfac5e232016-07-04 11:34:36 +010014733 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst1a617b72015-12-03 14:31:06 +010014734
Ville Syrjälä4c75b942016-10-31 22:37:12 +020014735 intel_update_cdclk(dev_priv);
Ville Syrjäläcfddadc2017-10-24 12:52:16 +030014736 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
Ville Syrjäläbb0f4aa2017-01-20 20:21:59 +020014737 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
Daniel Vetterf8175862012-04-10 15:50:11 +020014738}
14739
Matt Roperd93c0372015-12-03 11:37:41 -080014740/*
14741 * Calculate what we think the watermarks should be for the state we've read
14742 * out of the hardware and then immediately program those watermarks so that
14743 * we ensure the hardware settings match our internal state.
14744 *
14745 * We can calculate what we think WM's should be by creating a duplicate of the
14746 * current state (which was constructed during hardware readout) and running it
14747 * through the atomic check code to calculate new watermark values in the
14748 * state object.
14749 */
14750static void sanitize_watermarks(struct drm_device *dev)
14751{
14752 struct drm_i915_private *dev_priv = to_i915(dev);
14753 struct drm_atomic_state *state;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014754 struct intel_atomic_state *intel_state;
Matt Roperd93c0372015-12-03 11:37:41 -080014755 struct drm_crtc *crtc;
14756 struct drm_crtc_state *cstate;
14757 struct drm_modeset_acquire_ctx ctx;
14758 int ret;
14759 int i;
14760
14761 /* Only supported on platforms that use atomic watermark design */
Matt Ropered4a6a72016-02-23 17:20:13 -080014762 if (!dev_priv->display.optimize_watermarks)
Matt Roperd93c0372015-12-03 11:37:41 -080014763 return;
14764
14765 /*
14766 * We need to hold connection_mutex before calling duplicate_state so
14767 * that the connector loop is protected.
14768 */
14769 drm_modeset_acquire_init(&ctx, 0);
14770retry:
Matt Roper0cd12622016-01-12 07:13:37 -080014771 ret = drm_modeset_lock_all_ctx(dev, &ctx);
Matt Roperd93c0372015-12-03 11:37:41 -080014772 if (ret == -EDEADLK) {
14773 drm_modeset_backoff(&ctx);
14774 goto retry;
14775 } else if (WARN_ON(ret)) {
Matt Roper0cd12622016-01-12 07:13:37 -080014776 goto fail;
Matt Roperd93c0372015-12-03 11:37:41 -080014777 }
14778
14779 state = drm_atomic_helper_duplicate_state(dev, &ctx);
14780 if (WARN_ON(IS_ERR(state)))
Matt Roper0cd12622016-01-12 07:13:37 -080014781 goto fail;
Matt Roperd93c0372015-12-03 11:37:41 -080014782
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014783 intel_state = to_intel_atomic_state(state);
14784
Matt Ropered4a6a72016-02-23 17:20:13 -080014785 /*
14786 * Hardware readout is the only time we don't want to calculate
14787 * intermediate watermarks (since we don't trust the current
14788 * watermarks).
14789 */
Ville Syrjälä602ae832017-03-02 19:15:02 +020014790 if (!HAS_GMCH_DISPLAY(dev_priv))
14791 intel_state->skip_intermediate_wm = true;
Matt Ropered4a6a72016-02-23 17:20:13 -080014792
Matt Roperd93c0372015-12-03 11:37:41 -080014793 ret = intel_atomic_check(dev, state);
14794 if (ret) {
14795 /*
14796 * If we fail here, it means that the hardware appears to be
14797 * programmed in a way that shouldn't be possible, given our
14798 * understanding of watermark requirements. This might mean a
14799 * mistake in the hardware readout code or a mistake in the
14800 * watermark calculations for a given platform. Raise a WARN
14801 * so that this is noticeable.
14802 *
14803 * If this actually happens, we'll have to just leave the
14804 * BIOS-programmed watermarks untouched and hope for the best.
14805 */
14806 WARN(true, "Could not determine valid watermarks for inherited state\n");
Arnd Bergmannb9a1b712016-10-18 17:16:23 +020014807 goto put_state;
Matt Roperd93c0372015-12-03 11:37:41 -080014808 }
14809
14810 /* Write calculated watermark values back */
Maarten Lankhorstaa5e9b42017-03-09 15:52:04 +010014811 for_each_new_crtc_in_state(state, crtc, cstate, i) {
Matt Roperd93c0372015-12-03 11:37:41 -080014812 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
14813
Matt Ropered4a6a72016-02-23 17:20:13 -080014814 cs->wm.need_postvbl_update = true;
Maarten Lankhorstccf010f2016-11-08 13:55:32 +010014815 dev_priv->display.optimize_watermarks(intel_state, cs);
Maarten Lankhorst556fe362017-11-10 12:34:53 +010014816
14817 to_intel_crtc_state(crtc->state)->wm = cs->wm;
Matt Roperd93c0372015-12-03 11:37:41 -080014818 }
14819
Arnd Bergmannb9a1b712016-10-18 17:16:23 +020014820put_state:
Chris Wilson08536952016-10-14 13:18:18 +010014821 drm_atomic_state_put(state);
Matt Roper0cd12622016-01-12 07:13:37 -080014822fail:
Matt Roperd93c0372015-12-03 11:37:41 -080014823 drm_modeset_drop_locks(&ctx);
14824 drm_modeset_acquire_fini(&ctx);
14825}
14826
Chris Wilson58ecd9d2017-11-05 13:49:05 +000014827static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
14828{
14829 if (IS_GEN5(dev_priv)) {
14830 u32 fdi_pll_clk =
14831 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
14832
14833 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
14834 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
14835 dev_priv->fdi_pll_freq = 270000;
14836 } else {
14837 return;
14838 }
14839
14840 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
14841}
14842
Azhar Shaikh516a49c2018-07-06 11:37:30 -070014843static int intel_initial_commit(struct drm_device *dev)
14844{
14845 struct drm_atomic_state *state = NULL;
14846 struct drm_modeset_acquire_ctx ctx;
14847 struct drm_crtc *crtc;
14848 struct drm_crtc_state *crtc_state;
14849 int ret = 0;
14850
14851 state = drm_atomic_state_alloc(dev);
14852 if (!state)
14853 return -ENOMEM;
14854
14855 drm_modeset_acquire_init(&ctx, 0);
14856
14857retry:
14858 state->acquire_ctx = &ctx;
14859
14860 drm_for_each_crtc(crtc, dev) {
14861 crtc_state = drm_atomic_get_crtc_state(state, crtc);
14862 if (IS_ERR(crtc_state)) {
14863 ret = PTR_ERR(crtc_state);
14864 goto out;
14865 }
14866
14867 if (crtc_state->active) {
14868 ret = drm_atomic_add_affected_planes(state, crtc);
14869 if (ret)
14870 goto out;
14871 }
14872 }
14873
14874 ret = drm_atomic_commit(state);
14875
14876out:
14877 if (ret == -EDEADLK) {
14878 drm_atomic_state_clear(state);
14879 drm_modeset_backoff(&ctx);
14880 goto retry;
14881 }
14882
14883 drm_atomic_state_put(state);
14884
14885 drm_modeset_drop_locks(&ctx);
14886 drm_modeset_acquire_fini(&ctx);
14887
14888 return ret;
14889}
14890
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014891int intel_modeset_init(struct drm_device *dev)
Jesse Barnes79e53942008-11-07 14:24:08 -080014892{
Joonas Lahtinen72e96d62016-03-30 16:57:10 +030014893 struct drm_i915_private *dev_priv = to_i915(dev);
14894 struct i915_ggtt *ggtt = &dev_priv->ggtt;
Damien Lespiau8cc87b72014-03-03 17:31:44 +000014895 enum pipe pipe;
Jesse Barnes46f297f2014-03-07 08:57:48 -080014896 struct intel_crtc *crtc;
Azhar Shaikh516a49c2018-07-06 11:37:30 -070014897 int ret;
Jesse Barnes79e53942008-11-07 14:24:08 -080014898
Ville Syrjälä757fffc2017-11-13 15:36:22 +020014899 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
14900
Jesse Barnes79e53942008-11-07 14:24:08 -080014901 drm_mode_config_init(dev);
14902
14903 dev->mode_config.min_width = 0;
14904 dev->mode_config.min_height = 0;
14905
Dave Airlie019d96c2011-09-29 16:20:42 +010014906 dev->mode_config.preferred_depth = 24;
14907 dev->mode_config.prefer_shadow = 1;
14908
Tvrtko Ursulin25bab382015-02-10 17:16:16 +000014909 dev->mode_config.allow_fb_modifiers = true;
14910
Laurent Pincharte6ecefa2012-05-17 13:27:23 +020014911 dev->mode_config.funcs = &intel_mode_funcs;
Jesse Barnes79e53942008-11-07 14:24:08 -080014912
Andrea Arcangeli400c19d2017-04-07 01:23:45 +020014913 init_llist_head(&dev_priv->atomic_helper.free_list);
Chris Wilsoneb955ee2017-01-23 21:29:39 +000014914 INIT_WORK(&dev_priv->atomic_helper.free_work,
Chris Wilsonba318c62017-02-02 20:47:41 +000014915 intel_atomic_helper_free_state_worker);
Chris Wilsoneb955ee2017-01-23 21:29:39 +000014916
Jesse Barnesb690e962010-07-19 13:53:12 -070014917 intel_init_quirks(dev);
14918
Ville Syrjälä62d75df2016-10-31 22:37:25 +020014919 intel_init_pm(dev_priv);
Eugeni Dodonov1fa61102012-04-18 15:29:26 -030014920
Lukas Wunner69f92f62015-07-15 13:57:35 +020014921 /*
14922 * There may be no VBT; and if the BIOS enabled SSC we can
14923 * just keep using it to avoid unnecessary flicker. Whereas if the
14924 * BIOS isn't using it, don't assume it will work even if the VBT
14925 * indicates as much.
14926 */
Tvrtko Ursulin6e266952016-10-13 11:02:53 +010014927 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
Lukas Wunner69f92f62015-07-15 13:57:35 +020014928 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14929 DREF_SSC1_ENABLE);
14930
14931 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
14932 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
14933 bios_lvds_use_ssc ? "en" : "dis",
14934 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
14935 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
14936 }
14937 }
14938
Ville Syrjäläad77c532018-06-15 20:44:05 +030014939 /* maximum framebuffer dimensions */
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014940 if (IS_GEN2(dev_priv)) {
Chris Wilsona6c45cf2010-09-17 00:32:17 +010014941 dev->mode_config.max_width = 2048;
14942 dev->mode_config.max_height = 2048;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014943 } else if (IS_GEN3(dev_priv)) {
Keith Packard5e4d6fa2009-07-12 23:53:17 -070014944 dev->mode_config.max_width = 4096;
14945 dev->mode_config.max_height = 4096;
Jesse Barnes79e53942008-11-07 14:24:08 -080014946 } else {
Chris Wilsona6c45cf2010-09-17 00:32:17 +010014947 dev->mode_config.max_width = 8192;
14948 dev->mode_config.max_height = 8192;
Jesse Barnes79e53942008-11-07 14:24:08 -080014949 }
Damien Lespiau068be562014-03-28 14:17:49 +000014950
Jani Nikula2a307c22016-11-30 17:43:04 +020014951 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14952 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
Ville Syrjälädc41c152014-08-13 11:57:05 +030014953 dev->mode_config.cursor_height = 1023;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +010014954 } else if (IS_GEN2(dev_priv)) {
Ville Syrjälä98fac1d2018-06-15 20:44:04 +030014955 dev->mode_config.cursor_width = 64;
14956 dev->mode_config.cursor_height = 64;
Damien Lespiau068be562014-03-28 14:17:49 +000014957 } else {
Ville Syrjälä98fac1d2018-06-15 20:44:04 +030014958 dev->mode_config.cursor_width = 256;
14959 dev->mode_config.cursor_height = 256;
Damien Lespiau068be562014-03-28 14:17:49 +000014960 }
14961
Matthew Auld73ebd502017-12-11 15:18:20 +000014962 dev->mode_config.fb_base = ggtt->gmadr.start;
Jesse Barnes79e53942008-11-07 14:24:08 -080014963
Zhao Yakui28c97732009-10-09 11:39:41 +080014964 DRM_DEBUG_KMS("%d display pipe%s available.\n",
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000014965 INTEL_INFO(dev_priv)->num_pipes,
14966 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
Jesse Barnes79e53942008-11-07 14:24:08 -080014967
Damien Lespiau055e3932014-08-18 13:49:10 +010014968 for_each_pipe(dev_priv, pipe) {
Ville Syrjälä5ab0d852016-10-31 22:37:11 +020014969 ret = intel_crtc_init(dev_priv, pipe);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030014970 if (ret) {
14971 drm_mode_config_cleanup(dev);
14972 return ret;
14973 }
Jesse Barnes79e53942008-11-07 14:24:08 -080014974 }
14975
Daniel Vettere72f9fb2013-06-05 13:34:06 +020014976 intel_shared_dpll_init(dev);
Chris Wilson58ecd9d2017-11-05 13:49:05 +000014977 intel_update_fdi_pll_freq(dev_priv);
Jesse Barnesee7b9f92012-04-20 17:11:53 +010014978
Ville Syrjälä5be6e332017-02-20 16:04:43 +020014979 intel_update_czclk(dev_priv);
14980 intel_modeset_init_hw(dev);
14981
Ville Syrjäläb2045352016-05-13 23:41:27 +030014982 if (dev_priv->max_cdclk_freq == 0)
Ville Syrjälä4c75b942016-10-31 22:37:12 +020014983 intel_update_max_cdclk(dev_priv);
Ville Syrjäläb2045352016-05-13 23:41:27 +030014984
Jesse Barnes9cce37f2010-08-13 15:11:26 -070014985 /* Just disable it once at startup */
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000014986 i915_disable_vga(dev_priv);
Ander Conselvan de Oliveirac39055b2016-11-23 16:21:44 +020014987 intel_setup_outputs(dev_priv);
Chris Wilson11be49e2012-11-15 11:32:20 +000014988
Daniel Vetter6e9f7982014-05-29 23:54:47 +020014989 drm_modeset_lock_all(dev);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030014990 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
Daniel Vetter6e9f7982014-05-29 23:54:47 +020014991 drm_modeset_unlock_all(dev);
Jesse Barnes46f297f2014-03-07 08:57:48 -080014992
Damien Lespiaud3fcc802014-05-13 23:32:22 +010014993 for_each_intel_crtc(dev, crtc) {
Maarten Lankhorsteeebeac2015-07-14 12:33:29 +020014994 struct intel_initial_plane_config plane_config = {};
14995
Jesse Barnes46f297f2014-03-07 08:57:48 -080014996 if (!crtc->active)
14997 continue;
14998
Jesse Barnes46f297f2014-03-07 08:57:48 -080014999 /*
Jesse Barnes46f297f2014-03-07 08:57:48 -080015000 * Note that reserving the BIOS fb up front prevents us
15001 * from stuffing other stolen allocations like the ring
15002 * on top. This prevents some ugliness at boot time, and
15003 * can even allow for smooth boot transitions if the BIOS
15004 * fb is large enough for the active pipe configuration.
15005 */
Maarten Lankhorsteeebeac2015-07-14 12:33:29 +020015006 dev_priv->display.get_initial_plane_config(crtc,
15007 &plane_config);
15008
15009 /*
15010 * If the fb is shared between multiple heads, we'll
15011 * just get the first one.
15012 */
15013 intel_find_initial_plane_obj(crtc, &plane_config);
Jesse Barnes46f297f2014-03-07 08:57:48 -080015014 }
Matt Roperd93c0372015-12-03 11:37:41 -080015015
15016 /*
15017 * Make sure hardware watermarks really match the state we read out.
15018 * Note that we need to do this after reconstructing the BIOS fb's
15019 * since the watermark calculation done here will use pstate->fb.
15020 */
Ville Syrjälä602ae832017-03-02 19:15:02 +020015021 if (!HAS_GMCH_DISPLAY(dev_priv))
15022 sanitize_watermarks(dev);
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015023
Azhar Shaikh516a49c2018-07-06 11:37:30 -070015024 /*
15025 * Force all active planes to recompute their states. So that on
15026 * mode_setcrtc after probe, all the intel_plane_state variables
15027 * are already calculated and there is no assert_plane warnings
15028 * during bootup.
15029 */
15030 ret = intel_initial_commit(dev);
15031 if (ret)
15032 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15033
Ville Syrjäläb079bd172016-10-25 18:58:02 +030015034 return 0;
Chris Wilson2c7111d2011-03-29 10:40:27 +010015035}
Jesse Barnesd5bb0812011-01-05 12:01:26 -080015036
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015037void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15038{
Ville Syrjäläd5fb43c2017-11-29 17:37:31 +020015039 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015040 /* 640x480@60Hz, ~25175 kHz */
15041 struct dpll clock = {
15042 .m1 = 18,
15043 .m2 = 7,
15044 .p1 = 13,
15045 .p2 = 4,
15046 .n = 2,
15047 };
15048 u32 dpll, fp;
15049 int i;
15050
15051 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15052
15053 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15054 pipe_name(pipe), clock.vco, clock.dot);
15055
15056 fp = i9xx_dpll_compute_fp(&clock);
15057 dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15058 DPLL_VGA_MODE_DIS |
15059 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15060 PLL_P2_DIVIDE_BY_4 |
15061 PLL_REF_INPUT_DREFCLK |
15062 DPLL_VCO_ENABLE;
15063
15064 I915_WRITE(FP0(pipe), fp);
15065 I915_WRITE(FP1(pipe), fp);
15066
15067 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15068 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15069 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15070 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15071 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15072 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15073 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15074
15075 /*
15076 * Apparently we need to have VGA mode enabled prior to changing
15077 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15078 * dividers, even though the register value does change.
15079 */
15080 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15081 I915_WRITE(DPLL(pipe), dpll);
15082
15083 /* Wait for the clocks to stabilize. */
15084 POSTING_READ(DPLL(pipe));
15085 udelay(150);
15086
15087 /* The pixel multiplier can only be updated once the
15088 * DPLL is enabled and the clocks are stable.
15089 *
15090 * So write it again.
15091 */
15092 I915_WRITE(DPLL(pipe), dpll);
15093
15094 /* We do this three times for luck */
15095 for (i = 0; i < 3 ; i++) {
15096 I915_WRITE(DPLL(pipe), dpll);
15097 POSTING_READ(DPLL(pipe));
15098 udelay(150); /* wait for warmup */
15099 }
15100
15101 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15102 POSTING_READ(PIPECONF(pipe));
Ville Syrjäläd5fb43c2017-11-29 17:37:31 +020015103
15104 intel_wait_for_pipe_scanline_moving(crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015105}
15106
15107void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15108{
Ville Syrjälä8fedd642017-11-29 17:37:30 +020015109 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15110
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015111 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15112 pipe_name(pipe));
15113
Ville Syrjälä5816d9c2017-11-29 14:54:11 +020015114 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15115 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15116 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
Ville Syrjäläb99b9ec2018-01-31 16:37:09 +020015117 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15118 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015119
15120 I915_WRITE(PIPECONF(pipe), 0);
15121 POSTING_READ(PIPECONF(pipe));
15122
Ville Syrjälä8fedd642017-11-29 17:37:30 +020015123 intel_wait_for_pipe_scanline_stopped(crtc);
Ville Syrjälä2ee0da12017-06-01 17:36:16 +030015124
15125 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15126 POSTING_READ(DPLL(pipe));
15127}
15128
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015129static void
15130intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15131{
15132 struct intel_crtc *crtc;
Daniel Vetterfa555832012-10-10 23:14:00 +020015133
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015134 if (INTEL_GEN(dev_priv) >= 4)
15135 return;
Daniel Vetterfa555832012-10-10 23:14:00 +020015136
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015137 for_each_intel_crtc(&dev_priv->drm, crtc) {
15138 struct intel_plane *plane =
15139 to_intel_plane(crtc->base.primary);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015140 struct intel_crtc *plane_crtc;
15141 enum pipe pipe;
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015142
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015143 if (!plane->get_hw_state(plane, &pipe))
15144 continue;
15145
15146 if (pipe == crtc->pipe)
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015147 continue;
15148
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +030015149 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
15150 plane->base.base.id, plane->base.name);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015151
15152 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15153 intel_plane_disable_noatomic(plane_crtc, plane);
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015154 }
Daniel Vetterfa555832012-10-10 23:14:00 +020015155}
15156
Ville Syrjälä02e93c32015-08-26 19:39:19 +030015157static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15158{
15159 struct drm_device *dev = crtc->base.dev;
15160 struct intel_encoder *encoder;
15161
15162 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15163 return true;
15164
15165 return false;
15166}
15167
Maarten Lankhorst496b0fc2016-08-23 16:18:07 +020015168static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15169{
15170 struct drm_device *dev = encoder->base.dev;
15171 struct intel_connector *connector;
15172
15173 for_each_connector_on_encoder(dev, &encoder->base, connector)
15174 return connector;
15175
15176 return NULL;
15177}
15178
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015179static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015180 enum pipe pch_transcoder)
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015181{
15182 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015183 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015184}
15185
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015186static void intel_sanitize_crtc(struct intel_crtc *crtc,
15187 struct drm_modeset_acquire_ctx *ctx)
Daniel Vetter24929352012-07-02 20:28:59 +020015188{
15189 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +010015190 struct drm_i915_private *dev_priv = to_i915(dev);
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015191 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
15192 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
Daniel Vetter24929352012-07-02 20:28:59 +020015193
Daniel Vetter24929352012-07-02 20:28:59 +020015194 /* Clear any frame start delays used for debugging left by the BIOS */
Ville Syrjälä738a8142017-11-15 22:04:42 +020015195 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
Jani Nikula4d1de972016-03-18 17:05:42 +020015196 i915_reg_t reg = PIPECONF(cpu_transcoder);
15197
15198 I915_WRITE(reg,
15199 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15200 }
Daniel Vetter24929352012-07-02 20:28:59 +020015201
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015202 if (crtc_state->base.active) {
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015203 struct intel_plane *plane;
15204
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015205 /* Disable everything but the primary plane */
15206 for_each_intel_plane_on_crtc(dev, crtc, plane) {
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015207 const struct intel_plane_state *plane_state =
15208 to_intel_plane_state(plane->base.state);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015209
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015210 if (plane_state->base.visible &&
15211 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15212 intel_plane_disable_noatomic(crtc, plane);
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015213 }
Daniel Vetter96256042015-02-13 21:03:42 +010015214 }
Ville Syrjäläd3eaf882014-05-20 17:20:05 +030015215
Daniel Vetter24929352012-07-02 20:28:59 +020015216 /* Adjust the state of the output pipe according to whether we
15217 * have active connectors/encoders. */
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015218 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
Ville Syrjäläda1d0e22017-06-01 17:36:14 +030015219 intel_crtc_disable_noatomic(&crtc->base, ctx);
Daniel Vetter24929352012-07-02 20:28:59 +020015220
Maarten Lankhorst1b52ad42018-10-11 12:04:53 +020015221 if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
Daniel Vetter4cc31482014-03-24 00:01:41 +010015222 /*
15223 * We start out with underrun reporting disabled to avoid races.
15224 * For correct bookkeeping mark this on active crtcs.
15225 *
Daniel Vetterc5ab3bc2014-05-14 15:40:34 +020015226 * Also on gmch platforms we dont have any hardware bits to
15227 * disable the underrun reporting. Which means we need to start
15228 * out with underrun reporting disabled also on inactive pipes,
15229 * since otherwise we'll complain about the garbage we read when
15230 * e.g. coming up after runtime pm.
15231 *
Daniel Vetter4cc31482014-03-24 00:01:41 +010015232 * No protection against concurrent access is required - at
15233 * worst a fifo underrun happens which also sets this to false.
15234 */
15235 crtc->cpu_fifo_underrun_disabled = true;
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015236 /*
15237 * We track the PCH trancoder underrun reporting state
15238 * within the crtc. With crtc for pipe A housing the underrun
15239 * reporting state for PCH transcoder A, crtc for pipe B housing
15240 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15241 * and marking underrun reporting as disabled for the non-existing
15242 * PCH transcoders B and C would prevent enabling the south
15243 * error interrupt (see cpt_can_enable_serr_int()).
15244 */
Ville Syrjäläecf837d92017-10-10 15:55:56 +030015245 if (has_pch_trancoder(dev_priv, crtc->pipe))
Ville Syrjäläa168f5b2016-08-05 20:00:17 +030015246 crtc->pch_fifo_underrun_disabled = true;
Daniel Vetter4cc31482014-03-24 00:01:41 +010015247 }
Daniel Vetter24929352012-07-02 20:28:59 +020015248}
15249
15250static void intel_sanitize_encoder(struct intel_encoder *encoder)
15251{
15252 struct intel_connector *connector;
Daniel Vetter24929352012-07-02 20:28:59 +020015253
15254 /* We need to check both for a crtc link (meaning that the
15255 * encoder is active and trying to read from a pipe) and the
15256 * pipe itself being active. */
15257 bool has_active_crtc = encoder->base.crtc &&
15258 to_intel_crtc(encoder->base.crtc)->active;
15259
Maarten Lankhorst496b0fc2016-08-23 16:18:07 +020015260 connector = intel_encoder_find_connector(encoder);
15261 if (connector && !has_active_crtc) {
Daniel Vetter24929352012-07-02 20:28:59 +020015262 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15263 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030015264 encoder->base.name);
Daniel Vetter24929352012-07-02 20:28:59 +020015265
15266 /* Connector is active, but has no active pipe. This is
15267 * fallout from our resume register restoring. Disable
15268 * the encoder manually again. */
15269 if (encoder->base.crtc) {
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015270 struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15271
Daniel Vetter24929352012-07-02 20:28:59 +020015272 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15273 encoder->base.base.id,
Jani Nikula8e329a032014-06-03 14:56:21 +030015274 encoder->base.name);
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015275 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
Ville Syrjäläa62d1492014-06-28 02:04:01 +030015276 if (encoder->post_disable)
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015277 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
Daniel Vetter24929352012-07-02 20:28:59 +020015278 }
Egbert Eich7f1950f2014-04-25 10:56:22 +020015279 encoder->base.crtc = NULL;
Daniel Vetter24929352012-07-02 20:28:59 +020015280
15281 /* Inconsistent output/port/pipe state happens presumably due to
15282 * a bug in one of the get_hw_state functions. Or someplace else
15283 * in our code, like the register restore mess on resume. Clamp
15284 * things to off as a safer default. */
Maarten Lankhorstfd6bbda2016-08-09 17:04:04 +020015285
15286 connector->base.dpms = DRM_MODE_DPMS_OFF;
15287 connector->base.encoder = NULL;
Daniel Vetter24929352012-07-02 20:28:59 +020015288 }
Maarten Lankhorstd6cae4a2018-05-16 10:50:38 +020015289
15290 /* notify opregion of the sanitized encoder state */
15291 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
Daniel Vetter24929352012-07-02 20:28:59 +020015292}
15293
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015294void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015295{
Tvrtko Ursulin920a14b2016-10-14 10:13:44 +010015296 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015297
Imre Deak04098752014-02-18 00:02:16 +020015298 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15299 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015300 i915_disable_vga(dev_priv);
Imre Deak04098752014-02-18 00:02:16 +020015301 }
15302}
15303
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015304void i915_redisable_vga(struct drm_i915_private *dev_priv)
Imre Deak04098752014-02-18 00:02:16 +020015305{
Paulo Zanoni8dc8a272013-08-02 16:22:24 -030015306 /* This function can be called both from intel_modeset_setup_hw_state or
15307 * at a very early point in our resume sequence, where the power well
15308 * structures are not yet restored. Since this function is at a very
15309 * paranoid "someone might have enabled VGA while we were not looking"
15310 * level, just check if the power well is enabled instead of trying to
15311 * follow the "don't touch the power well if we don't need it" policy
15312 * the rest of the driver uses. */
Imre Deak6392f842016-02-12 18:55:13 +020015313 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
Paulo Zanoni8dc8a272013-08-02 16:22:24 -030015314 return;
15315
Tvrtko Ursulin29b74b72016-11-16 08:55:39 +000015316 i915_redisable_vga_power_on(dev_priv);
Imre Deak6392f842016-02-12 18:55:13 +020015317
15318 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
Krzysztof Mazur0fde9012012-12-19 11:03:41 +010015319}
15320
Ville Syrjäläf9cd7b82015-09-10 18:59:08 +030015321/* FIXME read out full plane state for all planes */
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015322static void readout_plane_state(struct drm_i915_private *dev_priv)
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +020015323{
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015324 struct intel_plane *plane;
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015325 struct intel_crtc *crtc;
Maarten Lankhorstd032ffa2015-06-15 12:33:51 +020015326
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015327 for_each_intel_plane(&dev_priv->drm, plane) {
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015328 struct intel_plane_state *plane_state =
15329 to_intel_plane_state(plane->base.state);
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015330 struct intel_crtc_state *crtc_state;
15331 enum pipe pipe = PIPE_A;
Ville Syrjäläeade6c82018-01-30 22:38:03 +020015332 bool visible;
15333
15334 visible = plane->get_hw_state(plane, &pipe);
Maarten Lankhorstb26d3ea2015-09-23 16:11:41 +020015335
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015336 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15337 crtc_state = to_intel_crtc_state(crtc->base.state);
15338
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015339 intel_set_plane_visible(crtc_state, plane_state, visible);
Ville Syrjälä7a4a2a42018-10-03 17:50:52 +030015340
15341 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
15342 plane->base.base.id, plane->base.name,
15343 enableddisabled(visible), pipe_name(pipe));
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015344 }
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015345
15346 for_each_intel_crtc(&dev_priv->drm, crtc) {
15347 struct intel_crtc_state *crtc_state =
15348 to_intel_crtc_state(crtc->base.state);
15349
15350 fixup_active_planes(crtc_state);
15351 }
Ville Syrjälä98ec7732014-04-30 17:43:01 +030015352}
15353
Daniel Vetter30e984d2013-06-05 13:34:17 +020015354static void intel_modeset_readout_hw_state(struct drm_device *dev)
Daniel Vetter24929352012-07-02 20:28:59 +020015355{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015356 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015357 enum pipe pipe;
Daniel Vetter24929352012-07-02 20:28:59 +020015358 struct intel_crtc *crtc;
15359 struct intel_encoder *encoder;
15360 struct intel_connector *connector;
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015361 struct drm_connector_list_iter conn_iter;
Daniel Vetter53589012013-06-05 13:34:16 +020015362 int i;
Daniel Vetter24929352012-07-02 20:28:59 +020015363
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015364 dev_priv->active_crtcs = 0;
15365
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015366 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015367 struct intel_crtc_state *crtc_state =
15368 to_intel_crtc_state(crtc->base.state);
Daniel Vetter3b117c82013-04-17 20:15:07 +020015369
Daniel Vetterec2dc6a2016-05-09 16:34:09 +020015370 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015371 memset(crtc_state, 0, sizeof(*crtc_state));
15372 crtc_state->base.crtc = &crtc->base;
Daniel Vetter24929352012-07-02 20:28:59 +020015373
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015374 crtc_state->base.active = crtc_state->base.enable =
15375 dev_priv->display.get_pipe_config(crtc, crtc_state);
15376
15377 crtc->base.enabled = crtc_state->base.enable;
15378 crtc->active = crtc_state->base.active;
15379
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015380 if (crtc_state->base.active)
Maarten Lankhorst565602d2015-12-10 12:33:57 +010015381 dev_priv->active_crtcs |= 1 << crtc->pipe;
15382
Ville Syrjälä78108b72016-05-27 20:59:19 +030015383 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15384 crtc->base.base.id, crtc->base.name,
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015385 enableddisabled(crtc_state->base.active));
Daniel Vetter24929352012-07-02 20:28:59 +020015386 }
15387
Ville Syrjälä62358aa2018-10-03 17:50:17 +030015388 readout_plane_state(dev_priv);
15389
Daniel Vetter53589012013-06-05 13:34:16 +020015390 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15391 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15392
Lucas De Marchiee1398b2018-03-20 15:06:33 -070015393 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15394 &pll->state.hw_state);
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015395 pll->state.crtc_mask = 0;
Damien Lespiaud3fcc802014-05-13 23:32:22 +010015396 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015397 struct intel_crtc_state *crtc_state =
15398 to_intel_crtc_state(crtc->base.state);
15399
15400 if (crtc_state->base.active &&
15401 crtc_state->shared_dpll == pll)
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015402 pll->state.crtc_mask |= 1 << crtc->pipe;
Daniel Vetter53589012013-06-05 13:34:16 +020015403 }
Ander Conselvan de Oliveira2c42e532016-12-29 17:22:09 +020015404 pll->active_mask = pll->state.crtc_mask;
Daniel Vetter53589012013-06-05 13:34:16 +020015405
Ander Conselvan de Oliveira1e6f2dd2014-10-29 11:32:31 +020015406 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
Lucas De Marchi72f775f2018-03-20 15:06:34 -070015407 pll->info->name, pll->state.crtc_mask, pll->on);
Daniel Vetter53589012013-06-05 13:34:16 +020015408 }
15409
Damien Lespiaub2784e12014-08-05 11:29:37 +010015410 for_each_intel_encoder(dev, encoder) {
Daniel Vetter24929352012-07-02 20:28:59 +020015411 pipe = 0;
15412
15413 if (encoder->get_hw_state(encoder, &pipe)) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015414 struct intel_crtc_state *crtc_state;
15415
Ville Syrjälä98187832016-10-31 22:37:10 +020015416 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015417 crtc_state = to_intel_crtc_state(crtc->base.state);
Ville Syrjäläe2af48c2016-10-31 22:37:05 +020015418
Jesse Barnes045ac3b2013-05-14 17:08:26 -070015419 encoder->base.crtc = &crtc->base;
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015420 encoder->get_config(encoder, crtc_state);
Daniel Vetter24929352012-07-02 20:28:59 +020015421 } else {
15422 encoder->base.crtc = NULL;
15423 }
15424
Damien Lespiau6f2bcce2013-10-16 12:29:54 +010015425 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000015426 encoder->base.base.id, encoder->base.name,
15427 enableddisabled(encoder->base.crtc),
Damien Lespiau6f2bcce2013-10-16 12:29:54 +010015428 pipe_name(pipe));
Daniel Vetter24929352012-07-02 20:28:59 +020015429 }
15430
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015431 drm_connector_list_iter_begin(dev, &conn_iter);
15432 for_each_intel_connector_iter(connector, &conn_iter) {
Daniel Vetter24929352012-07-02 20:28:59 +020015433 if (connector->get_hw_state(connector)) {
15434 connector->base.dpms = DRM_MODE_DPMS_ON;
Maarten Lankhorst2aa974c2016-01-06 14:53:25 +010015435
15436 encoder = connector->encoder;
15437 connector->base.encoder = &encoder->base;
15438
15439 if (encoder->base.crtc &&
15440 encoder->base.crtc->state->active) {
15441 /*
15442 * This has to be done during hardware readout
15443 * because anything calling .crtc_disable may
15444 * rely on the connector_mask being accurate.
15445 */
15446 encoder->base.crtc->state->connector_mask |=
Ville Syrjälä40560e22018-06-26 22:47:11 +030015447 drm_connector_mask(&connector->base);
Maarten Lankhorste87a52b2016-01-28 15:04:58 +010015448 encoder->base.crtc->state->encoder_mask |=
Ville Syrjälä40560e22018-06-26 22:47:11 +030015449 drm_encoder_mask(&encoder->base);
Maarten Lankhorst2aa974c2016-01-06 14:53:25 +010015450 }
15451
Daniel Vetter24929352012-07-02 20:28:59 +020015452 } else {
15453 connector->base.dpms = DRM_MODE_DPMS_OFF;
15454 connector->base.encoder = NULL;
15455 }
15456 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
Tvrtko Ursulin08c4d7f2016-11-17 12:30:14 +000015457 connector->base.base.id, connector->base.name,
15458 enableddisabled(connector->base.encoder));
Daniel Vetter24929352012-07-02 20:28:59 +020015459 }
Daniel Vetterf9e905c2017-03-01 10:52:25 +010015460 drm_connector_list_iter_end(&conn_iter);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015461
15462 for_each_intel_crtc(dev, crtc) {
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015463 struct intel_crtc_state *crtc_state =
15464 to_intel_crtc_state(crtc->base.state);
Ville Syrjäläd305e062017-08-30 21:57:03 +030015465 int min_cdclk = 0;
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015466
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015467 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015468 if (crtc_state->base.active) {
15469 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
Ville Syrjäläbd4cd032018-04-26 19:30:15 +030015470 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15471 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015472 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015473 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15474
15475 /*
15476 * The initial mode needs to be set in order to keep
15477 * the atomic core happy. It wants a valid mode if the
15478 * crtc's enabled, so we do the above call.
15479 *
Daniel Vetter7800fb62016-12-19 09:24:23 +010015480 * But we don't set all the derived state fully, hence
15481 * set a flag to indicate that a full recalculation is
15482 * needed on the next commit.
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015483 */
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015484 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
Ville Syrjälä9eca68322015-09-10 18:59:10 +030015485
Ville Syrjäläa7d1b3f2017-01-26 21:50:31 +020015486 intel_crtc_compute_pixel_rate(crtc_state);
15487
Ville Syrjälä9c61de42017-07-10 22:33:47 +030015488 if (dev_priv->display.modeset_calc_cdclk) {
Ville Syrjäläd305e062017-08-30 21:57:03 +030015489 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
Ville Syrjälä9c61de42017-07-10 22:33:47 +030015490 if (WARN_ON(min_cdclk < 0))
15491 min_cdclk = 0;
15492 }
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015493
Daniel Vetter5caa0fe2017-05-09 16:03:29 +020015494 drm_calc_timestamping_constants(&crtc->base,
15495 &crtc_state->base.adjusted_mode);
Maarten Lankhorstf2bdd112018-10-11 12:04:52 +020015496 update_scanline_offset(crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015497 }
Ville Syrjäläe3b247d2016-02-17 21:41:09 +020015498
Ville Syrjäläd305e062017-08-30 21:57:03 +030015499 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
Ville Syrjälä53e9bf52017-10-24 12:52:14 +030015500 dev_priv->min_voltage_level[crtc->pipe] =
15501 crtc_state->min_voltage_level;
Ville Syrjäläaca1ebf2016-12-20 17:39:02 +020015502
Ville Syrjäläa8cd6da2016-12-22 16:04:41 +020015503 intel_pipe_config_sanity_check(dev_priv, crtc_state);
Ville Syrjälä7f4c6282015-09-10 18:59:07 +030015504 }
Daniel Vetter30e984d2013-06-05 13:34:17 +020015505}
15506
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015507static void
15508get_encoder_power_domains(struct drm_i915_private *dev_priv)
15509{
15510 struct intel_encoder *encoder;
15511
15512 for_each_intel_encoder(&dev_priv->drm, encoder) {
15513 u64 get_domains;
15514 enum intel_display_power_domain domain;
Imre Deak52528052018-06-21 21:44:49 +030015515 struct intel_crtc_state *crtc_state;
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015516
15517 if (!encoder->get_power_domains)
15518 continue;
15519
Imre Deak52528052018-06-21 21:44:49 +030015520 /*
Imre Deakb79ebe72018-07-05 15:26:54 +030015521 * MST-primary and inactive encoders don't have a crtc state
15522 * and neither of these require any power domain references.
Imre Deak52528052018-06-21 21:44:49 +030015523 */
Imre Deakb79ebe72018-07-05 15:26:54 +030015524 if (!encoder->base.crtc)
15525 continue;
Imre Deak52528052018-06-21 21:44:49 +030015526
Imre Deakb79ebe72018-07-05 15:26:54 +030015527 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
Imre Deak52528052018-06-21 21:44:49 +030015528 get_domains = encoder->get_power_domains(encoder, crtc_state);
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015529 for_each_power_domain(domain, get_domains)
15530 intel_display_power_get(dev_priv, domain);
15531 }
15532}
15533
Rodrigo Vividf49ec82017-11-10 16:03:19 -080015534static void intel_early_display_was(struct drm_i915_private *dev_priv)
15535{
15536 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15537 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15538 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15539 DARBF_GATING_DIS);
15540
15541 if (IS_HASWELL(dev_priv)) {
15542 /*
15543 * WaRsPkgCStateDisplayPMReq:hsw
15544 * System hang if this isn't done before disabling all planes!
15545 */
15546 I915_WRITE(CHICKEN_PAR1_1,
15547 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15548 }
15549}
15550
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015551/* Scan out the current hw modeset state,
15552 * and sanitizes it to the current state
15553 */
15554static void
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015555intel_modeset_setup_hw_state(struct drm_device *dev,
15556 struct drm_modeset_acquire_ctx *ctx)
Daniel Vetter30e984d2013-06-05 13:34:17 +020015557{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015558 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter30e984d2013-06-05 13:34:17 +020015559 struct intel_crtc *crtc;
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015560 struct intel_crtc_state *crtc_state;
Daniel Vetter30e984d2013-06-05 13:34:17 +020015561 struct intel_encoder *encoder;
Daniel Vetter35c95372013-07-17 06:55:04 +020015562 int i;
Daniel Vetter30e984d2013-06-05 13:34:17 +020015563
Imre Deak2cd9a682018-08-16 15:37:57 +030015564 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
15565
Rodrigo Vividf49ec82017-11-10 16:03:19 -080015566 intel_early_display_was(dev_priv);
Daniel Vetter30e984d2013-06-05 13:34:17 +020015567 intel_modeset_readout_hw_state(dev);
Daniel Vetter24929352012-07-02 20:28:59 +020015568
15569 /* HW state is read out, now we need to sanitize this mess. */
Ander Conselvan de Oliveira62b69562017-02-24 16:19:59 +020015570 get_encoder_power_domains(dev_priv);
15571
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015572 /*
15573 * intel_sanitize_plane_mapping() may need to do vblank
15574 * waits, so we need vblank interrupts restored beforehand.
15575 */
15576 for_each_intel_crtc(&dev_priv->drm, crtc) {
15577 drm_crtc_vblank_reset(&crtc->base);
Ville Syrjäläb1e01592017-11-17 21:19:09 +020015578
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015579 if (crtc->base.state->active)
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015580 drm_crtc_vblank_on(&crtc->base);
Daniel Vetter24929352012-07-02 20:28:59 +020015581 }
15582
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015583 intel_sanitize_plane_mapping(dev_priv);
Ville Syrjäläe2af48c2016-10-31 22:37:05 +020015584
Ville Syrjälä68bc30d2018-10-03 17:49:51 +030015585 for_each_intel_encoder(dev, encoder)
15586 intel_sanitize_encoder(encoder);
15587
15588 for_each_intel_crtc(&dev_priv->drm, crtc) {
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015589 crtc_state = to_intel_crtc_state(crtc->base.state);
Ville Syrjäläaecd36b2017-06-01 17:36:13 +030015590 intel_sanitize_crtc(crtc, ctx);
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015591 intel_dump_pipe_config(crtc, crtc_state,
Ander Conselvan de Oliveira6e3c9712015-01-15 14:55:25 +020015592 "[setup_hw_state]");
Daniel Vetter24929352012-07-02 20:28:59 +020015593 }
Daniel Vetter9a935852012-07-05 22:34:27 +020015594
Ander Conselvan de Oliveirad29b2f92015-03-20 16:18:05 +020015595 intel_modeset_update_connector_atomic_state(dev);
15596
Daniel Vetter35c95372013-07-17 06:55:04 +020015597 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15598 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15599
Maarten Lankhorst2dd66ebd2016-03-14 09:27:52 +010015600 if (!pll->on || pll->active_mask)
Daniel Vetter35c95372013-07-17 06:55:04 +020015601 continue;
15602
Lucas De Marchi72f775f2018-03-20 15:06:34 -070015603 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15604 pll->info->name);
Daniel Vetter35c95372013-07-17 06:55:04 +020015605
Lucas De Marchiee1398b2018-03-20 15:06:33 -070015606 pll->info->funcs->disable(dev_priv, pll);
Daniel Vetter35c95372013-07-17 06:55:04 +020015607 pll->on = false;
15608 }
15609
Ville Syrjälä04548cb2017-04-21 21:14:29 +030015610 if (IS_G4X(dev_priv)) {
15611 g4x_wm_get_hw_state(dev);
15612 g4x_wm_sanitize(dev_priv);
15613 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä6eb1a682015-06-24 22:00:03 +030015614 vlv_wm_get_hw_state(dev);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015615 vlv_wm_sanitize(dev_priv);
Rodrigo Vivia029fa42017-08-09 13:52:48 -070015616 } else if (INTEL_GEN(dev_priv) >= 9) {
Pradeep Bhat30789992014-11-04 17:06:45 +000015617 skl_wm_get_hw_state(dev);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015618 } else if (HAS_PCH_SPLIT(dev_priv)) {
Ville Syrjälä243e6a42013-10-14 14:55:24 +030015619 ilk_wm_get_hw_state(dev);
Ville Syrjälä602ae832017-03-02 19:15:02 +020015620 }
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015621
15622 for_each_intel_crtc(dev, crtc) {
Ander Conselvan de Oliveirad8fc70b2017-02-09 11:31:21 +020015623 u64 put_domains;
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015624
Maarten Lankhorst91d78192018-10-11 12:04:54 +020015625 crtc_state = to_intel_crtc_state(crtc->base.state);
15626 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
Maarten Lankhorst292b9902015-07-13 16:30:27 +020015627 if (WARN_ON(put_domains))
15628 modeset_put_power_domains(dev_priv, put_domains);
15629 }
Imre Deak2cd9a682018-08-16 15:37:57 +030015630
15631 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
Paulo Zanoni010cf732016-01-19 11:35:48 -020015632
15633 intel_fbc_init_pipe_state(dev_priv);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015634}
Ville Syrjälä7d0bc1e2013-09-16 17:38:33 +030015635
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015636void intel_display_resume(struct drm_device *dev)
15637{
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015638 struct drm_i915_private *dev_priv = to_i915(dev);
15639 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15640 struct drm_modeset_acquire_ctx ctx;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015641 int ret;
Daniel Vetterf30da182013-04-11 20:22:50 +020015642
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015643 dev_priv->modeset_restore_state = NULL;
Maarten Lankhorst73974892016-08-05 23:28:27 +030015644 if (state)
15645 state->acquire_ctx = &ctx;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015646
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015647 drm_modeset_acquire_init(&ctx, 0);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015648
Maarten Lankhorst73974892016-08-05 23:28:27 +030015649 while (1) {
15650 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15651 if (ret != -EDEADLK)
15652 break;
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015653
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015654 drm_modeset_backoff(&ctx);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015655 }
15656
Maarten Lankhorst73974892016-08-05 23:28:27 +030015657 if (!ret)
Maarten Lankhorst581e49f2017-01-16 10:37:38 +010015658 ret = __intel_display_resume(dev, state, &ctx);
Maarten Lankhorst73974892016-08-05 23:28:27 +030015659
Kumar, Mahesh2503a0f2017-08-17 19:15:28 +053015660 intel_enable_ipc(dev_priv);
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015661 drm_modeset_drop_locks(&ctx);
15662 drm_modeset_acquire_fini(&ctx);
Maarten Lankhorst043e9bd2015-07-13 16:30:25 +020015663
Chris Wilson08536952016-10-14 13:18:18 +010015664 if (ret)
Maarten Lankhorste2c8b872016-02-16 10:06:14 +010015665 DRM_ERROR("Restoring old state failed with %i\n", ret);
Chris Wilson3c5e37f2017-01-15 12:58:25 +000015666 if (state)
15667 drm_atomic_state_put(state);
Chris Wilson2c7111d2011-03-29 10:40:27 +010015668}
15669
Manasi Navare886c6b82017-10-26 14:52:00 -070015670static void intel_hpd_poll_fini(struct drm_device *dev)
15671{
15672 struct intel_connector *connector;
15673 struct drm_connector_list_iter conn_iter;
15674
Chris Wilson448aa912017-11-28 11:01:47 +000015675 /* Kill all the work that may have been queued by hpd. */
Manasi Navare886c6b82017-10-26 14:52:00 -070015676 drm_connector_list_iter_begin(dev, &conn_iter);
15677 for_each_intel_connector_iter(connector, &conn_iter) {
15678 if (connector->modeset_retry_work.func)
15679 cancel_work_sync(&connector->modeset_retry_work);
Sean Paulee5e5e72018-01-08 14:55:39 -050015680 if (connector->hdcp_shim) {
15681 cancel_delayed_work_sync(&connector->hdcp_check_work);
15682 cancel_work_sync(&connector->hdcp_prop_work);
15683 }
Manasi Navare886c6b82017-10-26 14:52:00 -070015684 }
15685 drm_connector_list_iter_end(&conn_iter);
15686}
15687
Jesse Barnes79e53942008-11-07 14:24:08 -080015688void intel_modeset_cleanup(struct drm_device *dev)
15689{
Chris Wilsonfac5e232016-07-04 11:34:36 +010015690 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes652c3932009-08-17 13:31:43 -070015691
Chris Wilson8bcf9f72018-07-10 10:44:20 +010015692 flush_workqueue(dev_priv->modeset_wq);
15693
Chris Wilsoneb955ee2017-01-23 21:29:39 +000015694 flush_work(&dev_priv->atomic_helper.free_work);
15695 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
15696
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015697 /*
15698 * Interrupts and polling as the first thing to avoid creating havoc.
Imre Deak2eb52522014-11-19 15:30:05 +020015699 * Too much stuff here (turning of connectors, ...) would
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015700 * experience fancy races otherwise.
15701 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +020015702 intel_irq_uninstall(dev_priv);
Jesse Barneseb21b922014-06-20 11:57:33 -070015703
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015704 /*
15705 * Due to the hpd irq storm handling the hotplug work can re-arm the
15706 * poll handlers. Hence disable polling after hpd handling is shut down.
15707 */
Manasi Navare886c6b82017-10-26 14:52:00 -070015708 intel_hpd_poll_fini(dev);
Daniel Vetterfd0c0642013-04-24 11:13:35 +020015709
Daniel Vetter4f256d82017-07-15 00:46:55 +020015710 /* poll work can call into fbdev, hence clean that up afterwards */
15711 intel_fbdev_fini(dev_priv);
15712
Jesse Barnes723bfd72010-10-07 16:01:13 -070015713 intel_unregister_dsm_handler();
15714
Paulo Zanonic937ab3e52016-01-19 11:35:46 -020015715 intel_fbc_global_disable(dev_priv);
Kristian Høgsberg69341a52009-11-11 12:19:17 -050015716
Chris Wilson1630fe72011-07-08 12:22:42 +010015717 /* flush any delayed tasks or pending work */
15718 flush_scheduled_work();
15719
Jesse Barnes79e53942008-11-07 14:24:08 -080015720 drm_mode_config_cleanup(dev);
Daniel Vetter4d7bb012012-12-18 15:24:37 +010015721
Chris Wilson1ee8da62016-05-12 12:43:23 +010015722 intel_cleanup_overlay(dev_priv);
Imre Deakae484342014-03-31 15:10:44 +030015723
Tvrtko Ursulin40196442016-12-01 14:16:42 +000015724 intel_teardown_gmbus(dev_priv);
Ville Syrjälä757fffc2017-11-13 15:36:22 +020015725
15726 destroy_workqueue(dev_priv->modeset_wq);
Jesse Barnes79e53942008-11-07 14:24:08 -080015727}
15728
Dave Airlie28d52042009-09-21 14:33:58 +100015729/*
15730 * set vga decode state - true == enable VGA decode
15731 */
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000015732int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
Dave Airlie28d52042009-09-21 14:33:58 +100015733{
Tvrtko Ursulin6315b5d2016-11-16 12:32:42 +000015734 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
Dave Airlie28d52042009-09-21 14:33:58 +100015735 u16 gmch_ctrl;
15736
Chris Wilson75fa0412014-02-07 18:37:02 -020015737 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15738 DRM_ERROR("failed to read control word\n");
15739 return -EIO;
15740 }
15741
Chris Wilsonc0cc8a52014-02-07 18:37:03 -020015742 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15743 return 0;
15744
Dave Airlie28d52042009-09-21 14:33:58 +100015745 if (state)
15746 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15747 else
15748 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
Chris Wilson75fa0412014-02-07 18:37:02 -020015749
15750 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15751 DRM_ERROR("failed to write control word\n");
15752 return -EIO;
15753 }
15754
Dave Airlie28d52042009-09-21 14:33:58 +100015755 return 0;
15756}
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015757
Chris Wilson98a2f412016-10-12 10:05:18 +010015758#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
15759
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015760struct intel_display_error_state {
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030015761
15762 u32 power_well_driver;
15763
Chris Wilson63b66e52013-08-08 15:12:06 +020015764 int num_transcoders;
15765
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015766 struct intel_cursor_error_state {
15767 u32 control;
15768 u32 position;
15769 u32 base;
15770 u32 size;
Damien Lespiau52331302012-08-15 19:23:25 +010015771 } cursor[I915_MAX_PIPES];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015772
15773 struct intel_pipe_error_state {
Imre Deakddf9c532013-11-27 22:02:02 +020015774 bool power_domain_on;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015775 u32 source;
Imre Deakf301b1e12014-04-18 15:55:04 +030015776 u32 stat;
Damien Lespiau52331302012-08-15 19:23:25 +010015777 } pipe[I915_MAX_PIPES];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015778
15779 struct intel_plane_error_state {
15780 u32 control;
15781 u32 stride;
15782 u32 size;
15783 u32 pos;
15784 u32 addr;
15785 u32 surface;
15786 u32 tile_offset;
Damien Lespiau52331302012-08-15 19:23:25 +010015787 } plane[I915_MAX_PIPES];
Chris Wilson63b66e52013-08-08 15:12:06 +020015788
15789 struct intel_transcoder_error_state {
Imre Deakddf9c532013-11-27 22:02:02 +020015790 bool power_domain_on;
Chris Wilson63b66e52013-08-08 15:12:06 +020015791 enum transcoder cpu_transcoder;
15792
15793 u32 conf;
15794
15795 u32 htotal;
15796 u32 hblank;
15797 u32 hsync;
15798 u32 vtotal;
15799 u32 vblank;
15800 u32 vsync;
15801 } transcoder[4];
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015802};
15803
15804struct intel_display_error_state *
Chris Wilsonc0336662016-05-06 15:40:21 +010015805intel_display_capture_error_state(struct drm_i915_private *dev_priv)
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015806{
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015807 struct intel_display_error_state *error;
Chris Wilson63b66e52013-08-08 15:12:06 +020015808 int transcoders[] = {
15809 TRANSCODER_A,
15810 TRANSCODER_B,
15811 TRANSCODER_C,
15812 TRANSCODER_EDP,
15813 };
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015814 int i;
15815
Chris Wilsonc0336662016-05-06 15:40:21 +010015816 if (INTEL_INFO(dev_priv)->num_pipes == 0)
Chris Wilson63b66e52013-08-08 15:12:06 +020015817 return NULL;
15818
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020015819 error = kzalloc(sizeof(*error), GFP_ATOMIC);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015820 if (error == NULL)
15821 return NULL;
15822
Chris Wilsonc0336662016-05-06 15:40:21 +010015823 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Imre Deak75e39682018-08-06 12:58:39 +030015824 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030015825
Damien Lespiau055e3932014-08-18 13:49:10 +010015826 for_each_pipe(dev_priv, i) {
Imre Deakddf9c532013-11-27 22:02:02 +020015827 error->pipe[i].power_domain_on =
Daniel Vetterf458ebb2014-09-30 10:56:39 +020015828 __intel_display_power_is_enabled(dev_priv,
15829 POWER_DOMAIN_PIPE(i));
Imre Deakddf9c532013-11-27 22:02:02 +020015830 if (!error->pipe[i].power_domain_on)
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020015831 continue;
15832
Ville Syrjälä5efb3e22014-04-09 13:28:53 +030015833 error->cursor[i].control = I915_READ(CURCNTR(i));
15834 error->cursor[i].position = I915_READ(CURPOS(i));
15835 error->cursor[i].base = I915_READ(CURBASE(i));
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015836
15837 error->plane[i].control = I915_READ(DSPCNTR(i));
15838 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
Chris Wilsonc0336662016-05-06 15:40:21 +010015839 if (INTEL_GEN(dev_priv) <= 3) {
Paulo Zanoni51889b32013-03-06 20:03:13 -030015840 error->plane[i].size = I915_READ(DSPSIZE(i));
Paulo Zanoni80ca3782013-03-22 14:20:57 -030015841 error->plane[i].pos = I915_READ(DSPPOS(i));
15842 }
Chris Wilsonc0336662016-05-06 15:40:21 +010015843 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
Paulo Zanonica291362013-03-06 20:03:14 -030015844 error->plane[i].addr = I915_READ(DSPADDR(i));
Chris Wilsonc0336662016-05-06 15:40:21 +010015845 if (INTEL_GEN(dev_priv) >= 4) {
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015846 error->plane[i].surface = I915_READ(DSPSURF(i));
15847 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15848 }
15849
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015850 error->pipe[i].source = I915_READ(PIPESRC(i));
Imre Deakf301b1e12014-04-18 15:55:04 +030015851
Chris Wilsonc0336662016-05-06 15:40:21 +010015852 if (HAS_GMCH_DISPLAY(dev_priv))
Imre Deakf301b1e12014-04-18 15:55:04 +030015853 error->pipe[i].stat = I915_READ(PIPESTAT(i));
Chris Wilson63b66e52013-08-08 15:12:06 +020015854 }
15855
Jani Nikula4d1de972016-03-18 17:05:42 +020015856 /* Note: this does not include DSI transcoders. */
Chris Wilsonc0336662016-05-06 15:40:21 +010015857 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +030015858 if (HAS_DDI(dev_priv))
Chris Wilson63b66e52013-08-08 15:12:06 +020015859 error->num_transcoders++; /* Account for eDP. */
15860
15861 for (i = 0; i < error->num_transcoders; i++) {
15862 enum transcoder cpu_transcoder = transcoders[i];
15863
Imre Deakddf9c532013-11-27 22:02:02 +020015864 error->transcoder[i].power_domain_on =
Daniel Vetterf458ebb2014-09-30 10:56:39 +020015865 __intel_display_power_is_enabled(dev_priv,
Paulo Zanoni38cc1da2013-12-20 15:09:41 -020015866 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
Imre Deakddf9c532013-11-27 22:02:02 +020015867 if (!error->transcoder[i].power_domain_on)
Paulo Zanoni9d1cb912013-11-01 13:32:08 -020015868 continue;
15869
Chris Wilson63b66e52013-08-08 15:12:06 +020015870 error->transcoder[i].cpu_transcoder = cpu_transcoder;
15871
15872 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15873 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15874 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15875 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15876 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15877 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15878 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015879 }
15880
15881 return error;
15882}
15883
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015884#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15885
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015886void
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015887intel_display_print_error_state(struct drm_i915_error_state_buf *m,
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015888 struct intel_display_error_state *error)
15889{
Chris Wilson5a4c6f12017-02-14 16:46:11 +000015890 struct drm_i915_private *dev_priv = m->i915;
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015891 int i;
15892
Chris Wilson63b66e52013-08-08 15:12:06 +020015893 if (!error)
15894 return;
15895
Tvrtko Ursulinb7f05d42016-11-09 11:30:45 +000015896 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
Tvrtko Ursulin86527442016-10-13 11:03:00 +010015897 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015898 err_printf(m, "PWR_WELL_CTL2: %08x\n",
Paulo Zanoniff57f1b2013-05-03 12:15:37 -030015899 error->power_well_driver);
Damien Lespiau055e3932014-08-18 13:49:10 +010015900 for_each_pipe(dev_priv, i) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015901 err_printf(m, "Pipe [%d]:\n", i);
Imre Deakddf9c532013-11-27 22:02:02 +020015902 err_printf(m, " Power: %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +020015903 onoff(error->pipe[i].power_domain_on));
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015904 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
Imre Deakf301b1e12014-04-18 15:55:04 +030015905 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015906
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015907 err_printf(m, "Plane [%d]:\n", i);
15908 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
15909 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
Tvrtko Ursulin5f56d5f2016-11-16 08:55:37 +000015910 if (INTEL_GEN(dev_priv) <= 3) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015911 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
15912 err_printf(m, " POS: %08x\n", error->plane[i].pos);
Paulo Zanoni80ca3782013-03-22 14:20:57 -030015913 }
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +010015914 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015915 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
Tvrtko Ursulin5f56d5f2016-11-16 08:55:37 +000015916 if (INTEL_GEN(dev_priv) >= 4) {
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015917 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
15918 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015919 }
15920
Mika Kuoppalaedc3d882013-05-23 13:55:35 +030015921 err_printf(m, "Cursor [%d]:\n", i);
15922 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
15923 err_printf(m, " POS: %08x\n", error->cursor[i].position);
15924 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015925 }
Chris Wilson63b66e52013-08-08 15:12:06 +020015926
15927 for (i = 0; i < error->num_transcoders; i++) {
Jani Nikulada205632016-03-15 21:51:10 +020015928 err_printf(m, "CPU transcoder: %s\n",
Chris Wilson63b66e52013-08-08 15:12:06 +020015929 transcoder_name(error->transcoder[i].cpu_transcoder));
Imre Deakddf9c532013-11-27 22:02:02 +020015930 err_printf(m, " Power: %s\n",
Jani Nikula87ad3212016-01-14 12:53:34 +020015931 onoff(error->transcoder[i].power_domain_on));
Chris Wilson63b66e52013-08-08 15:12:06 +020015932 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
15933 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
15934 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
15935 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
15936 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
15937 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
15938 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
15939 }
Chris Wilsonc4a1d9e2010-11-21 13:12:35 +000015940}
Chris Wilson98a2f412016-10-12 10:05:18 +010015941
15942#endif