blob: e682237e70096c7b6314fefa4a4ba14e6aad82e6 [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Daniel Vetterfca52a52014-09-30 10:56:45 +020040/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +030048static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +030052static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +030056static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020060static const u32 hpd_ibx[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050061 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020068static const u32 hpd_cpt[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050069 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010070 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050071 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
Xiong Zhang26951ca2015-08-17 15:55:50 +080076static const u32 hpd_spt[HPD_NUM_PINS] = {
Ville Syrjälä74c0b392015-08-27 23:56:07 +030077 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
Xiong Zhang26951ca2015-08-17 15:55:50 +080078 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020084static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050085 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020093static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050094 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
Ville Syrjälä4bca26d2015-05-11 20:49:10 +0300102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -0500103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
Shashank Sharmae0a20ad2015-03-27 14:54:14 +0200111/* BXT hpd list */
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
Sonika Jindal7f3561b2015-08-10 10:35:35 +0530113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
Shashank Sharmae0a20ad2015-03-27 14:54:14 +0200114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
Paulo Zanoni5c502442014-04-01 15:37:11 -0300118/* IIR can theoretically queue up two events. Be paranoid. */
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300119#define GEN8_IRQ_RESET_NDX(type, which) do { \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300129#define GEN5_IRQ_RESET(type) do { \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300130 I915_WRITE(type##IMR, 0xffffffff); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300131 POSTING_READ(type##IMR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300132 I915_WRITE(type##IER, 0); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300137} while (0)
138
Paulo Zanoni337ba012014-04-01 15:37:16 -0300139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300144{
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200151 i915_mmio_reg_offset(reg), val);
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156}
Paulo Zanoni337ba012014-04-01 15:37:16 -0300157
Paulo Zanoni35079892014-04-01 15:37:15 -0300158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300163} while (0)
164
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300167 I915_WRITE(type##IER, (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300170} while (0)
171
Imre Deakc9a9a262014-11-05 20:48:37 +0200172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530173static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
Imre Deakc9a9a262014-11-05 20:48:37 +0200174
Egbert Eich0706f172015-09-23 16:15:27 +0200175/* For display hotplug interrupt */
176static inline void
177i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
178 uint32_t mask,
179 uint32_t bits)
180{
181 uint32_t val;
182
Chris Wilson67520412017-03-02 13:28:01 +0000183 lockdep_assert_held(&dev_priv->irq_lock);
Egbert Eich0706f172015-09-23 16:15:27 +0200184 WARN_ON(bits & ~mask);
185
186 val = I915_READ(PORT_HOTPLUG_EN);
187 val &= ~mask;
188 val |= bits;
189 I915_WRITE(PORT_HOTPLUG_EN, val);
190}
191
192/**
193 * i915_hotplug_interrupt_update - update hotplug interrupt enable
194 * @dev_priv: driver private
195 * @mask: bits to update
196 * @bits: bits to enable
197 * NOTE: the HPD enable bits are modified both inside and outside
198 * of an interrupt context. To avoid that read-modify-write cycles
199 * interfer, these bits are protected by a spinlock. Since this
200 * function is usually not called from a context where the lock is
201 * held already, this function acquires the lock itself. A non-locking
202 * version is also available.
203 */
204void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
205 uint32_t mask,
206 uint32_t bits)
207{
208 spin_lock_irq(&dev_priv->irq_lock);
209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
210 spin_unlock_irq(&dev_priv->irq_lock);
211}
212
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300213/**
214 * ilk_update_display_irq - update DEIMR
215 * @dev_priv: driver private
216 * @interrupt_mask: mask of interrupt bits to update
217 * @enabled_irq_mask: mask of interrupt bits to enable
218 */
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +0200219void ilk_update_display_irq(struct drm_i915_private *dev_priv,
220 uint32_t interrupt_mask,
221 uint32_t enabled_irq_mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800222{
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300223 uint32_t new_val;
224
Chris Wilson67520412017-03-02 13:28:01 +0000225 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200226
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300227 WARN_ON(enabled_irq_mask & ~interrupt_mask);
228
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700229 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300230 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300231
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300232 new_val = dev_priv->irq_mask;
233 new_val &= ~interrupt_mask;
234 new_val |= (~enabled_irq_mask & interrupt_mask);
235
236 if (new_val != dev_priv->irq_mask) {
237 dev_priv->irq_mask = new_val;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000238 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000239 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800240 }
241}
242
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300243/**
244 * ilk_update_gt_irq - update GTIMR
245 * @dev_priv: driver private
246 * @interrupt_mask: mask of interrupt bits to update
247 * @enabled_irq_mask: mask of interrupt bits to enable
248 */
249static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
250 uint32_t interrupt_mask,
251 uint32_t enabled_irq_mask)
252{
Chris Wilson67520412017-03-02 13:28:01 +0000253 lockdep_assert_held(&dev_priv->irq_lock);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300254
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100255 WARN_ON(enabled_irq_mask & ~interrupt_mask);
256
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700257 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300258 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300259
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300260 dev_priv->gt_irq_mask &= ~interrupt_mask;
261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300263}
264
Daniel Vetter480c8032014-07-16 09:49:40 +0200265void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300266{
267 ilk_update_gt_irq(dev_priv, mask, mask);
Chris Wilson31bb59c2016-07-01 17:23:27 +0100268 POSTING_READ_FW(GTIMR);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300269}
270
Daniel Vetter480c8032014-07-16 09:49:40 +0200271void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300272{
273 ilk_update_gt_irq(dev_priv, mask, 0);
274}
275
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200276static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200277{
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -0700278 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
Imre Deakb900b942014-11-05 20:48:48 +0200279}
280
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200281static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
Imre Deaka72fbc32014-11-05 20:48:31 +0200282{
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -0700283 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
Imre Deaka72fbc32014-11-05 20:48:31 +0200284}
285
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200286static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200287{
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -0700288 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
Imre Deakb900b942014-11-05 20:48:48 +0200289}
290
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300291/**
Ville Syrjälä81fd8742015-11-25 16:21:30 +0200292 * snb_update_pm_irq - update GEN6_PMIMR
293 * @dev_priv: driver private
294 * @interrupt_mask: mask of interrupt bits to update
295 * @enabled_irq_mask: mask of interrupt bits to enable
296 */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300297static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
298 uint32_t interrupt_mask,
299 uint32_t enabled_irq_mask)
300{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300301 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300302
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100303 WARN_ON(enabled_irq_mask & ~interrupt_mask);
304
Chris Wilson67520412017-03-02 13:28:01 +0000305 lockdep_assert_held(&dev_priv->irq_lock);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300306
Akash Goelf4e9af42016-10-12 21:54:30 +0530307 new_val = dev_priv->pm_imr;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300308 new_val &= ~interrupt_mask;
309 new_val |= (~enabled_irq_mask & interrupt_mask);
310
Akash Goelf4e9af42016-10-12 21:54:30 +0530311 if (new_val != dev_priv->pm_imr) {
312 dev_priv->pm_imr = new_val;
313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
Imre Deaka72fbc32014-11-05 20:48:31 +0200314 POSTING_READ(gen6_pm_imr(dev_priv));
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300315 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300316}
317
Akash Goelf4e9af42016-10-12 21:54:30 +0530318void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300319{
Imre Deak9939fba2014-11-20 23:01:47 +0200320 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
321 return;
322
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300323 snb_update_pm_irq(dev_priv, mask, mask);
324}
325
Akash Goelf4e9af42016-10-12 21:54:30 +0530326static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Imre Deak9939fba2014-11-20 23:01:47 +0200327{
328 snb_update_pm_irq(dev_priv, mask, 0);
329}
330
Akash Goelf4e9af42016-10-12 21:54:30 +0530331void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300332{
Imre Deak9939fba2014-11-20 23:01:47 +0200333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
Akash Goelf4e9af42016-10-12 21:54:30 +0530336 __gen6_mask_pm_irq(dev_priv, mask);
337}
338
339void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
340{
341 i915_reg_t reg = gen6_pm_iir(dev_priv);
342
Chris Wilson67520412017-03-02 13:28:01 +0000343 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530344
345 I915_WRITE(reg, reset_mask);
346 I915_WRITE(reg, reset_mask);
347 POSTING_READ(reg);
348}
349
350void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
351{
Chris Wilson67520412017-03-02 13:28:01 +0000352 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530353
354 dev_priv->pm_ier |= enable_mask;
355 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
356 gen6_unmask_pm_irq(dev_priv, enable_mask);
357 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
358}
359
360void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
361{
Chris Wilson67520412017-03-02 13:28:01 +0000362 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530363
364 dev_priv->pm_ier &= ~disable_mask;
365 __gen6_mask_pm_irq(dev_priv, disable_mask);
366 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
367 /* though a barrier is missing here, but don't really need a one */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300368}
369
Chris Wilsondc979972016-05-10 14:10:04 +0100370void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deak3cc134e2014-11-19 15:30:03 +0200371{
Imre Deak3cc134e2014-11-19 15:30:03 +0200372 spin_lock_irq(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530373 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
Imre Deak096fad92015-03-23 19:11:35 +0200374 dev_priv->rps.pm_iir = 0;
Imre Deak3cc134e2014-11-19 15:30:03 +0200375 spin_unlock_irq(&dev_priv->irq_lock);
376}
377
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100378void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200379{
Chris Wilsonf2a91d12016-09-21 14:51:06 +0100380 if (READ_ONCE(dev_priv->rps.interrupts_enabled))
381 return;
382
Imre Deakb900b942014-11-05 20:48:48 +0200383 spin_lock_irq(&dev_priv->irq_lock);
Chris Wilsonc33d2472016-07-04 08:08:36 +0100384 WARN_ON_ONCE(dev_priv->rps.pm_iir);
385 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
Imre Deakd4d70aa2014-11-19 15:30:04 +0200386 dev_priv->rps.interrupts_enabled = true;
Imre Deakb900b942014-11-05 20:48:48 +0200387 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deak78e68d32014-12-15 18:59:27 +0200388
Imre Deakb900b942014-11-05 20:48:48 +0200389 spin_unlock_irq(&dev_priv->irq_lock);
390}
391
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100392void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200393{
Chris Wilsonf2a91d12016-09-21 14:51:06 +0100394 if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
395 return;
396
Imre Deakd4d70aa2014-11-19 15:30:04 +0200397 spin_lock_irq(&dev_priv->irq_lock);
398 dev_priv->rps.interrupts_enabled = false;
Imre Deak9939fba2014-11-20 23:01:47 +0200399
Dave Gordonb20e3cf2016-09-12 21:19:35 +0100400 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
Imre Deak9939fba2014-11-20 23:01:47 +0200401
Akash Goelf4e9af42016-10-12 21:54:30 +0530402 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deak58072cc2015-03-23 19:11:34 +0200403
404 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilson91c8a322016-07-05 10:40:23 +0100405 synchronize_irq(dev_priv->drm.irq);
Chris Wilsonc33d2472016-07-04 08:08:36 +0100406
407 /* Now that we will not be generating any more work, flush any
408 * outsanding tasks. As we are called on the RPS idle path,
409 * we will reset the GPU to minimum frequencies, so the current
410 * state of the worker can be discarded.
411 */
412 cancel_work_sync(&dev_priv->rps.work);
413 gen6_reset_rps_interrupts(dev_priv);
Imre Deakb900b942014-11-05 20:48:48 +0200414}
415
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530416void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
417{
418 spin_lock_irq(&dev_priv->irq_lock);
419 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
420 spin_unlock_irq(&dev_priv->irq_lock);
421}
422
423void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
424{
425 spin_lock_irq(&dev_priv->irq_lock);
426 if (!dev_priv->guc.interrupts_enabled) {
427 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
428 dev_priv->pm_guc_events);
429 dev_priv->guc.interrupts_enabled = true;
430 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
431 }
432 spin_unlock_irq(&dev_priv->irq_lock);
433}
434
435void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
436{
437 spin_lock_irq(&dev_priv->irq_lock);
438 dev_priv->guc.interrupts_enabled = false;
439
440 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
441
442 spin_unlock_irq(&dev_priv->irq_lock);
443 synchronize_irq(dev_priv->drm.irq);
444
445 gen9_reset_guc_interrupts(dev_priv);
446}
447
Ben Widawsky09610212014-05-15 20:58:08 +0300448/**
Ville Syrjälä81fd8742015-11-25 16:21:30 +0200449 * bdw_update_port_irq - update DE port interrupt
450 * @dev_priv: driver private
451 * @interrupt_mask: mask of interrupt bits to update
452 * @enabled_irq_mask: mask of interrupt bits to enable
453 */
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +0300454static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
455 uint32_t interrupt_mask,
456 uint32_t enabled_irq_mask)
457{
458 uint32_t new_val;
459 uint32_t old_val;
460
Chris Wilson67520412017-03-02 13:28:01 +0000461 lockdep_assert_held(&dev_priv->irq_lock);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +0300462
463 WARN_ON(enabled_irq_mask & ~interrupt_mask);
464
465 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
466 return;
467
468 old_val = I915_READ(GEN8_DE_PORT_IMR);
469
470 new_val = old_val;
471 new_val &= ~interrupt_mask;
472 new_val |= (~enabled_irq_mask & interrupt_mask);
473
474 if (new_val != old_val) {
475 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
476 POSTING_READ(GEN8_DE_PORT_IMR);
477 }
478}
479
480/**
Ville Syrjälä013d3752015-11-23 18:06:17 +0200481 * bdw_update_pipe_irq - update DE pipe interrupt
482 * @dev_priv: driver private
483 * @pipe: pipe whose interrupt to update
484 * @interrupt_mask: mask of interrupt bits to update
485 * @enabled_irq_mask: mask of interrupt bits to enable
486 */
487void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
488 enum pipe pipe,
489 uint32_t interrupt_mask,
490 uint32_t enabled_irq_mask)
491{
492 uint32_t new_val;
493
Chris Wilson67520412017-03-02 13:28:01 +0000494 lockdep_assert_held(&dev_priv->irq_lock);
Ville Syrjälä013d3752015-11-23 18:06:17 +0200495
496 WARN_ON(enabled_irq_mask & ~interrupt_mask);
497
498 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
499 return;
500
501 new_val = dev_priv->de_irq_mask[pipe];
502 new_val &= ~interrupt_mask;
503 new_val |= (~enabled_irq_mask & interrupt_mask);
504
505 if (new_val != dev_priv->de_irq_mask[pipe]) {
506 dev_priv->de_irq_mask[pipe] = new_val;
507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
509 }
510}
511
512/**
Daniel Vetterfee884e2013-07-04 23:35:21 +0200513 * ibx_display_interrupt_update - update SDEIMR
514 * @dev_priv: driver private
515 * @interrupt_mask: mask of interrupt bits to update
516 * @enabled_irq_mask: mask of interrupt bits to enable
517 */
Daniel Vetter47339cd2014-09-30 10:56:46 +0200518void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
519 uint32_t interrupt_mask,
520 uint32_t enabled_irq_mask)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200521{
522 uint32_t sdeimr = I915_READ(SDEIMR);
523 sdeimr &= ~interrupt_mask;
524 sdeimr |= (~enabled_irq_mask & interrupt_mask);
525
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100526 WARN_ON(enabled_irq_mask & ~interrupt_mask);
527
Chris Wilson67520412017-03-02 13:28:01 +0000528 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterfee884e2013-07-04 23:35:21 +0200529
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700530 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300531 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300532
Daniel Vetterfee884e2013-07-04 23:35:21 +0200533 I915_WRITE(SDEIMR, sdeimr);
534 POSTING_READ(SDEIMR);
535}
Paulo Zanoni86642812013-04-12 17:57:57 -0300536
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100537static void
Imre Deak755e9012014-02-10 18:42:47 +0200538__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
539 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800540{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200541 i915_reg_t reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200542 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800543
Chris Wilson67520412017-03-02 13:28:01 +0000544 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200545 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200546
Ville Syrjälä04feced2014-04-03 13:28:33 +0300547 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
548 status_mask & ~PIPESTAT_INT_STATUS_MASK,
549 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
550 pipe_name(pipe), enable_mask, status_mask))
Imre Deak755e9012014-02-10 18:42:47 +0200551 return;
552
553 if ((pipestat & enable_mask) == enable_mask)
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200554 return;
555
Imre Deak91d181d2014-02-10 18:42:49 +0200556 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
557
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200558 /* Enable the interrupt, clear any pending status */
Imre Deak755e9012014-02-10 18:42:47 +0200559 pipestat |= enable_mask | status_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200560 I915_WRITE(reg, pipestat);
561 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800562}
563
Daniel Vetterb5ea6422014-03-02 21:18:00 +0100564static void
Imre Deak755e9012014-02-10 18:42:47 +0200565__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
566 u32 enable_mask, u32 status_mask)
Keith Packard7c463582008-11-04 02:03:27 -0800567{
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200568 i915_reg_t reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200569 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
Keith Packard7c463582008-11-04 02:03:27 -0800570
Chris Wilson67520412017-03-02 13:28:01 +0000571 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterd518ce52014-08-27 10:43:37 +0200572 WARN_ON(!intel_irqs_enabled(dev_priv));
Daniel Vetterb79480b2013-06-27 17:52:10 +0200573
Ville Syrjälä04feced2014-04-03 13:28:33 +0300574 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
575 status_mask & ~PIPESTAT_INT_STATUS_MASK,
576 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
577 pipe_name(pipe), enable_mask, status_mask))
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200578 return;
579
Imre Deak755e9012014-02-10 18:42:47 +0200580 if ((pipestat & enable_mask) == 0)
581 return;
582
Imre Deak91d181d2014-02-10 18:42:49 +0200583 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
584
Imre Deak755e9012014-02-10 18:42:47 +0200585 pipestat &= ~enable_mask;
Ville Syrjälä46c06a32013-02-20 21:16:18 +0200586 I915_WRITE(reg, pipestat);
587 POSTING_READ(reg);
Keith Packard7c463582008-11-04 02:03:27 -0800588}
589
Imre Deak10c59c52014-02-10 18:42:48 +0200590static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
591{
592 u32 enable_mask = status_mask << 16;
593
594 /*
Ville Syrjälä724a6902014-04-09 13:28:48 +0300595 * On pipe A we don't support the PSR interrupt yet,
596 * on pipe B and C the same bit MBZ.
Imre Deak10c59c52014-02-10 18:42:48 +0200597 */
598 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
599 return 0;
Ville Syrjälä724a6902014-04-09 13:28:48 +0300600 /*
601 * On pipe B and C we don't support the PSR interrupt yet, on pipe
602 * A the same bit is for perf counters which we don't use either.
603 */
604 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
605 return 0;
Imre Deak10c59c52014-02-10 18:42:48 +0200606
607 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
608 SPRITE0_FLIP_DONE_INT_EN_VLV |
609 SPRITE1_FLIP_DONE_INT_EN_VLV);
610 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
611 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
612 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
613 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
614
615 return enable_mask;
616}
617
Imre Deak755e9012014-02-10 18:42:47 +0200618void
619i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
620 u32 status_mask)
621{
622 u32 enable_mask;
623
Wayne Boyer666a4532015-12-09 12:29:35 -0800624 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson91c8a322016-07-05 10:40:23 +0100625 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
Imre Deak10c59c52014-02-10 18:42:48 +0200626 status_mask);
627 else
628 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200629 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
630}
631
632void
633i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
634 u32 status_mask)
635{
636 u32 enable_mask;
637
Wayne Boyer666a4532015-12-09 12:29:35 -0800638 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
Chris Wilson91c8a322016-07-05 10:40:23 +0100639 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
Imre Deak10c59c52014-02-10 18:42:48 +0200640 status_mask);
641 else
642 enable_mask = status_mask << 16;
Imre Deak755e9012014-02-10 18:42:47 +0200643 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
644}
645
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000646/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300647 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100648 * @dev_priv: i915 device private
Zhao Yakui01c66882009-10-28 05:10:00 +0000649 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100650static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
Zhao Yakui01c66882009-10-28 05:10:00 +0000651{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100652 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300653 return;
654
Daniel Vetter13321782014-09-15 14:55:29 +0200655 spin_lock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000656
Imre Deak755e9012014-02-10 18:42:47 +0200657 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100658 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200659 i915_enable_pipestat(dev_priv, PIPE_A,
Imre Deak755e9012014-02-10 18:42:47 +0200660 PIPE_LEGACY_BLC_EVENT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000661
Daniel Vetter13321782014-09-15 14:55:29 +0200662 spin_unlock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000663}
664
Ville Syrjäläf75f3742014-05-15 20:20:36 +0300665/*
666 * This timing diagram depicts the video signal in and
667 * around the vertical blanking period.
668 *
669 * Assumptions about the fictitious mode used in this example:
670 * vblank_start >= 3
671 * vsync_start = vblank_start + 1
672 * vsync_end = vblank_start + 2
673 * vtotal = vblank_start + 3
674 *
675 * start of vblank:
676 * latch double buffered registers
677 * increment frame counter (ctg+)
678 * generate start of vblank interrupt (gen4+)
679 * |
680 * | frame start:
681 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
682 * | may be shifted forward 1-3 extra lines via PIPECONF
683 * | |
684 * | | start of vsync:
685 * | | generate vsync interrupt
686 * | | |
687 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
688 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
689 * ----va---> <-----------------vb--------------------> <--------va-------------
690 * | | <----vs-----> |
691 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
692 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
693 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
694 * | | |
695 * last visible pixel first visible pixel
696 * | increment frame counter (gen3/4)
697 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
698 *
699 * x = horizontal active
700 * _ = horizontal blanking
701 * hs = horizontal sync
702 * va = vertical active
703 * vb = vertical blanking
704 * vs = vertical sync
705 * vbs = vblank_start (number)
706 *
707 * Summary:
708 * - most events happen at the start of horizontal sync
709 * - frame start happens at the start of horizontal blank, 1-4 lines
710 * (depending on PIPECONF settings) after the start of vblank
711 * - gen3/4 pixel and frame counter are synchronized with the start
712 * of horizontal active on the first line of vertical active
713 */
714
Keith Packard42f52ef2008-10-18 19:39:29 -0700715/* Called from drm generic code, passed a 'crtc', which
716 * we use as a pipe index
717 */
Thierry Reding88e72712015-09-24 18:35:31 +0200718static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700719{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100720 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200721 i915_reg_t high_frame, low_frame;
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300722 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200723 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
Ville Syrjälä694e4092017-03-09 17:44:30 +0200724 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700725
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +0100726 htotal = mode->crtc_htotal;
727 hsync_start = mode->crtc_hsync_start;
728 vbl_start = mode->crtc_vblank_start;
729 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
730 vbl_start = DIV_ROUND_UP(vbl_start, 2);
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300731
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300732 /* Convert to pixel count */
733 vbl_start *= htotal;
734
735 /* Start of vblank event occurs at start of hsync */
736 vbl_start -= htotal - hsync_start;
737
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800738 high_frame = PIPEFRAME(pipe);
739 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100740
Ville Syrjälä694e4092017-03-09 17:44:30 +0200741 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
742
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700743 /*
744 * High & low register fields aren't synchronized, so make sure
745 * we get a low value that's stable across two reads of the high
746 * register.
747 */
748 do {
Ville Syrjälä694e4092017-03-09 17:44:30 +0200749 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
750 low = I915_READ_FW(low_frame);
751 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700752 } while (high1 != high2);
753
Ville Syrjälä694e4092017-03-09 17:44:30 +0200754 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
755
Chris Wilson5eddb702010-09-11 13:48:45 +0100756 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300757 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100758 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300759
760 /*
761 * The frame counter increments at beginning of active.
762 * Cook up a vblank counter by also checking the pixel
763 * counter against vblank start.
764 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200765 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700766}
767
Dave Airlie974e59b2015-10-30 09:45:33 +1000768static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800769{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100770 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800771
Ville Syrjälä649636e2015-09-22 19:50:01 +0300772 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800773}
774
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300775/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300776static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
777{
778 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100779 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200780 const struct drm_display_mode *mode;
781 struct drm_vblank_crtc *vblank;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300782 enum pipe pipe = crtc->pipe;
Ville Syrjälä80715b22014-05-15 20:23:23 +0300783 int position, vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300784
Ville Syrjälä72259532017-03-02 19:15:05 +0200785 if (!crtc->active)
786 return -1;
787
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200788 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
789 mode = &vblank->hwmode;
790
Ville Syrjälä80715b22014-05-15 20:23:23 +0300791 vtotal = mode->crtc_vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300792 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
793 vtotal /= 2;
794
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100795 if (IS_GEN2(dev_priv))
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300796 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300797 else
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300798 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300799
800 /*
Jesse Barnes41b578f2015-09-22 12:15:54 -0700801 * On HSW, the DSL reg (0x70000) appears to return 0 if we
802 * read it just before the start of vblank. So try it again
803 * so we don't accidentally end up spanning a vblank frame
804 * increment, causing the pipe_update_end() code to squak at us.
805 *
806 * The nature of this problem means we can't simply check the ISR
807 * bit and return the vblank start value; nor can we use the scanline
808 * debug register in the transcoder as it appears to have the same
809 * problem. We may need to extend this to include other platforms,
810 * but so far testing only shows the problem on HSW.
811 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100812 if (HAS_DDI(dev_priv) && !position) {
Jesse Barnes41b578f2015-09-22 12:15:54 -0700813 int i, temp;
814
815 for (i = 0; i < 100; i++) {
816 udelay(1);
Ville Syrjälä707bdd32017-03-09 17:44:31 +0200817 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Jesse Barnes41b578f2015-09-22 12:15:54 -0700818 if (temp != position) {
819 position = temp;
820 break;
821 }
822 }
823 }
824
825 /*
Ville Syrjälä80715b22014-05-15 20:23:23 +0300826 * See update_scanline_offset() for the details on the
827 * scanline_offset adjustment.
Ville Syrjäläa225f072014-04-29 13:35:45 +0300828 */
Ville Syrjälä80715b22014-05-15 20:23:23 +0300829 return (position + crtc->scanline_offset) % vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300830}
831
Daniel Vetter1bf6ad62017-05-09 16:03:28 +0200832static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
833 bool in_vblank_irq, int *vpos, int *hpos,
834 ktime_t *stime, ktime_t *etime,
835 const struct drm_display_mode *mode)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100836{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100837 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä98187832016-10-31 22:37:10 +0200838 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
839 pipe);
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300840 int position;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300841 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100842 bool in_vbl = true;
Mario Kleinerad3543e2013-10-30 05:13:08 +0100843 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100844
Maarten Lankhorstfc467a222015-06-01 12:50:07 +0200845 if (WARN_ON(!mode->crtc_clock)) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100846 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800847 "pipe %c\n", pipe_name(pipe));
Daniel Vetter1bf6ad62017-05-09 16:03:28 +0200848 return false;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100849 }
850
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300851 htotal = mode->crtc_htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300852 hsync_start = mode->crtc_hsync_start;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +0300853 vtotal = mode->crtc_vtotal;
854 vbl_start = mode->crtc_vblank_start;
855 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100856
Ville Syrjäläd31faf62013-10-28 16:31:41 +0200857 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
858 vbl_start = DIV_ROUND_UP(vbl_start, 2);
859 vbl_end /= 2;
860 vtotal /= 2;
861 }
862
Mario Kleinerad3543e2013-10-30 05:13:08 +0100863 /*
864 * Lock uncore.lock, as we will do multiple timing critical raw
865 * register reads, potentially with preemption disabled, so the
866 * following code must not block on uncore.lock.
867 */
868 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300869
Mario Kleinerad3543e2013-10-30 05:13:08 +0100870 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
871
872 /* Get optional system timestamp before query. */
873 if (stime)
874 *stime = ktime_get();
875
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100876 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100877 /* No obvious pixelcount register. Only query vertical
878 * scanout position from Display scan line register.
879 */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300880 position = __intel_get_crtc_scanline(intel_crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100881 } else {
882 /* Have access to pixelcount since start of frame.
883 * We can split this into vertical and horizontal
884 * scanout position.
885 */
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300886 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100887
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300888 /* convert to pixel counts */
889 vbl_start *= htotal;
890 vbl_end *= htotal;
891 vtotal *= htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300892
893 /*
Ville Syrjälä7e78f1cb2014-04-29 13:35:49 +0300894 * In interlaced modes, the pixel counter counts all pixels,
895 * so one field will have htotal more pixels. In order to avoid
896 * the reported position from jumping backwards when the pixel
897 * counter is beyond the length of the shorter field, just
898 * clamp the position the length of the shorter field. This
899 * matches how the scanline counter based position works since
900 * the scanline counter doesn't count the two half lines.
901 */
902 if (position >= vtotal)
903 position = vtotal - 1;
904
905 /*
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300906 * Start of vblank interrupt is triggered at start of hsync,
907 * just prior to the first active line of vblank. However we
908 * consider lines to start at the leading edge of horizontal
909 * active. So, should we get here before we've crossed into
910 * the horizontal active of the first line in vblank, we would
911 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
912 * always add htotal-hsync_start to the current pixel position.
913 */
914 position = (position + htotal - hsync_start) % vtotal;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300915 }
916
Mario Kleinerad3543e2013-10-30 05:13:08 +0100917 /* Get optional system timestamp after query. */
918 if (etime)
919 *etime = ktime_get();
920
921 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
922
923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
924
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300925 in_vbl = position >= vbl_start && position < vbl_end;
926
927 /*
928 * While in vblank, position will be negative
929 * counting up towards 0 at vbl_end. And outside
930 * vblank, position will be positive counting
931 * up since vbl_end.
932 */
933 if (position >= vbl_start)
934 position -= vbl_end;
935 else
936 position += vtotal - vbl_end;
937
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100938 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300939 *vpos = position;
940 *hpos = 0;
941 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100942 *vpos = position / htotal;
943 *hpos = position - (*vpos * htotal);
944 }
945
Daniel Vetter1bf6ad62017-05-09 16:03:28 +0200946 return true;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100947}
948
Ville Syrjäläa225f072014-04-29 13:35:45 +0300949int intel_get_crtc_scanline(struct intel_crtc *crtc)
950{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläa225f072014-04-29 13:35:45 +0300952 unsigned long irqflags;
953 int position;
954
955 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
956 position = __intel_get_crtc_scanline(crtc);
957 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
958
959 return position;
960}
961
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100962static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
Jesse Barnesf97108d2010-01-29 11:27:07 -0800963{
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000964 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +0200965 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200966
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200967 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800968
Daniel Vetter73edd18f2012-08-08 23:35:37 +0200969 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
970
Daniel Vetter20e4d402012-08-08 23:35:39 +0200971 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +0200972
Jesse Barnes7648fa92010-05-20 14:28:11 -0700973 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000974 busy_up = I915_READ(RCPREVBSYTUPAVG);
975 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -0800976 max_avg = I915_READ(RCBMAXAVG);
977 min_avg = I915_READ(RCBMINAVG);
978
979 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000980 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200981 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
982 new_delay = dev_priv->ips.cur_delay - 1;
983 if (new_delay < dev_priv->ips.max_delay)
984 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +0000985 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +0200986 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
987 new_delay = dev_priv->ips.cur_delay + 1;
988 if (new_delay > dev_priv->ips.min_delay)
989 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800990 }
991
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100992 if (ironlake_set_drps(dev_priv, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +0200993 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -0800994
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +0200995 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +0200996
Jesse Barnesf97108d2010-01-29 11:27:07 -0800997 return;
998}
999
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001000static void notify_ring(struct intel_engine_cs *engine)
Chris Wilson549f7362010-10-19 11:19:32 +01001001{
Chris Wilson56299fb2017-02-27 20:58:48 +00001002 struct drm_i915_gem_request *rq = NULL;
1003 struct intel_wait *wait;
Tvrtko Ursulindffabc82017-02-21 09:13:48 +00001004
Chris Wilson2246bea2017-02-17 15:13:00 +00001005 atomic_inc(&engine->irq_count);
Chris Wilson538b2572017-01-24 15:18:05 +00001006 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
Chris Wilson56299fb2017-02-27 20:58:48 +00001007
Chris Wilson61d3dc72017-03-03 19:08:24 +00001008 spin_lock(&engine->breadcrumbs.irq_lock);
1009 wait = engine->breadcrumbs.irq_wait;
Chris Wilson56299fb2017-02-27 20:58:48 +00001010 if (wait) {
1011 /* We use a callback from the dma-fence to submit
1012 * requests after waiting on our own requests. To
1013 * ensure minimum delay in queuing the next request to
1014 * hardware, signal the fence now rather than wait for
1015 * the signaler to be woken up. We still wake up the
1016 * waiter in order to handle the irq-seqno coherency
1017 * issues (we may receive the interrupt before the
1018 * seqno is written, see __i915_request_irq_complete())
1019 * and to handle coalescing of multiple seqno updates
1020 * and many waiters.
1021 */
1022 if (i915_seqno_passed(intel_engine_get_seqno(engine),
Chris Wilsondb939912017-03-15 21:07:26 +00001023 wait->seqno) &&
1024 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1025 &wait->request->fence.flags))
Chris Wilson24754d72017-03-03 14:45:57 +00001026 rq = i915_gem_request_get(wait->request);
Chris Wilson56299fb2017-02-27 20:58:48 +00001027
1028 wake_up_process(wait->tsk);
Chris Wilson67b807a82017-02-27 20:58:50 +00001029 } else {
1030 __intel_engine_disarm_breadcrumbs(engine);
Chris Wilson56299fb2017-02-27 20:58:48 +00001031 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001032 spin_unlock(&engine->breadcrumbs.irq_lock);
Chris Wilson56299fb2017-02-27 20:58:48 +00001033
Chris Wilson24754d72017-03-03 14:45:57 +00001034 if (rq) {
Chris Wilson56299fb2017-02-27 20:58:48 +00001035 dma_fence_signal(&rq->fence);
Chris Wilson24754d72017-03-03 14:45:57 +00001036 i915_gem_request_put(rq);
1037 }
Chris Wilson56299fb2017-02-27 20:58:48 +00001038
1039 trace_intel_engine_notify(engine, wait);
Chris Wilson549f7362010-10-19 11:19:32 +01001040}
1041
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001042static void vlv_c0_read(struct drm_i915_private *dev_priv,
1043 struct intel_rps_ei *ei)
Deepak S31685c22014-07-03 17:33:01 -04001044{
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001045 ei->ktime = ktime_get_raw();
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001046 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1047 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
Deepak S31685c22014-07-03 17:33:01 -04001048}
1049
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001050void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1051{
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001052 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001053}
1054
1055static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1056{
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001057 const struct intel_rps_ei *prev = &dev_priv->rps.ei;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001058 struct intel_rps_ei now;
1059 u32 events = 0;
1060
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001061 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001062 return 0;
1063
1064 vlv_c0_read(dev_priv, &now);
Deepak S31685c22014-07-03 17:33:01 -04001065
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001066 if (prev->ktime) {
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001067 u64 time, c0;
Chris Wilson569884e2017-03-09 21:12:31 +00001068 u32 render, media;
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001069
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001070 time = ktime_us_delta(now.ktime, prev->ktime);
Chris Wilson8f68d592017-03-13 17:06:17 +00001071
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001072 time *= dev_priv->czclk_freq;
1073
1074 /* Workload can be split between render + media,
1075 * e.g. SwapBuffers being blitted in X after being rendered in
1076 * mesa. To account for this we need to combine both engines
1077 * into our activity counter.
1078 */
Chris Wilson569884e2017-03-09 21:12:31 +00001079 render = now.render_c0 - prev->render_c0;
1080 media = now.media_c0 - prev->media_c0;
1081 c0 = max(render, media);
Mika Kuoppala6b7f6aa2017-03-15 18:12:59 +02001082 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001083
1084 if (c0 > time * dev_priv->rps.up_threshold)
1085 events = GEN6_PM_RP_UP_THRESHOLD;
1086 else if (c0 < time * dev_priv->rps.down_threshold)
1087 events = GEN6_PM_RP_DOWN_THRESHOLD;
Deepak S31685c22014-07-03 17:33:01 -04001088 }
1089
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001090 dev_priv->rps.ei = now;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001091 return events;
Deepak S31685c22014-07-03 17:33:01 -04001092}
1093
Ben Widawsky4912d042011-04-25 11:25:20 -07001094static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001095{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001096 struct drm_i915_private *dev_priv =
1097 container_of(work, struct drm_i915_private, rps.work);
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001098 bool client_boost = false;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001099 int new_delay, adj, min, max;
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001100 u32 pm_iir = 0;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001101
Daniel Vetter59cdb632013-07-04 23:35:28 +02001102 spin_lock_irq(&dev_priv->irq_lock);
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001103 if (dev_priv->rps.interrupts_enabled) {
1104 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
Chris Wilson7b92c1b2017-06-28 13:35:48 +01001105 client_boost = atomic_read(&dev_priv->rps.num_waiters);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001106 }
Daniel Vetter59cdb632013-07-04 23:35:28 +02001107 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -07001108
Paulo Zanoni60611c12013-08-15 11:50:01 -03001109 /* Make sure we didn't queue anything we're not going to process. */
Deepak Sa6706b42014-03-15 20:23:22 +05301110 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
Chris Wilson8d3afd72015-05-21 21:01:47 +01001111 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001112 goto out;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001113
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001114 mutex_lock(&dev_priv->rps.hw_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01001115
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001116 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1117
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001118 adj = dev_priv->rps.last_adj;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001119 new_delay = dev_priv->rps.cur_freq;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001120 min = dev_priv->rps.min_freq_softlimit;
1121 max = dev_priv->rps.max_freq_softlimit;
Chris Wilson7b92c1b2017-06-28 13:35:48 +01001122 if (client_boost)
Chris Wilson29ecd78d2016-07-13 09:10:35 +01001123 max = dev_priv->rps.max_freq;
1124 if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1125 new_delay = dev_priv->rps.boost_freq;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001126 adj = 0;
1127 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001128 if (adj > 0)
1129 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001130 else /* CHV needs even encode values */
1131 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
Sagar Arun Kamble7e79a682017-01-20 09:18:24 +05301132
1133 if (new_delay >= dev_priv->rps.max_freq_softlimit)
1134 adj = 0;
Chris Wilson7b92c1b2017-06-28 13:35:48 +01001135 } else if (client_boost) {
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001136 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001137 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
Ben Widawskyb39fb292014-03-19 18:31:11 -07001138 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1139 new_delay = dev_priv->rps.efficient_freq;
Chris Wilson17136d52017-02-10 15:03:47 +00001140 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
Ben Widawskyb39fb292014-03-19 18:31:11 -07001141 new_delay = dev_priv->rps.min_freq_softlimit;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001142 adj = 0;
1143 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1144 if (adj < 0)
1145 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001146 else /* CHV needs even encode values */
1147 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
Sagar Arun Kamble7e79a682017-01-20 09:18:24 +05301148
1149 if (new_delay <= dev_priv->rps.min_freq_softlimit)
1150 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001151 } else { /* unknown event */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001152 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001153 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001154
Chris Wilsonedcf2842015-04-07 16:20:29 +01001155 dev_priv->rps.last_adj = adj;
1156
Ben Widawsky79249632012-09-07 19:43:42 -07001157 /* sysfs frequency interfaces may have snuck in while servicing the
1158 * interrupt
1159 */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001160 new_delay += adj;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001161 new_delay = clamp_t(int, new_delay, min, max);
Deepak S27544362014-01-27 21:35:05 +05301162
Chris Wilson9fcee2f2017-01-26 10:19:19 +00001163 if (intel_set_rps(dev_priv, new_delay)) {
1164 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1165 dev_priv->rps.last_adj = 0;
1166 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001167
Jesse Barnes4fc688c2012-11-02 11:14:01 -07001168 mutex_unlock(&dev_priv->rps.hw_lock);
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001169
1170out:
1171 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1172 spin_lock_irq(&dev_priv->irq_lock);
1173 if (dev_priv->rps.interrupts_enabled)
1174 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1175 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001176}
1177
Ben Widawskye3689192012-05-25 16:56:22 -07001178
1179/**
1180 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1181 * occurred.
1182 * @work: workqueue struct
1183 *
1184 * Doesn't actually do anything except notify userspace. As a consequence of
1185 * this event, userspace should try to remap the bad rows since statistically
1186 * it is likely the same row is more likely to go bad again.
1187 */
1188static void ivybridge_parity_work(struct work_struct *work)
1189{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001190 struct drm_i915_private *dev_priv =
Joonas Lahtinencefcff82017-04-28 10:58:39 +03001191 container_of(work, typeof(*dev_priv), l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001192 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001193 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001194 uint32_t misccpctl;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001195 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001196
1197 /* We must turn off DOP level clock gating to access the L3 registers.
1198 * In order to prevent a get/put style interface, acquire struct mutex
1199 * any time we access those registers.
1200 */
Chris Wilson91c8a322016-07-05 10:40:23 +01001201 mutex_lock(&dev_priv->drm.struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001202
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001203 /* If we've screwed up tracking, just let the interrupt fire again */
1204 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1205 goto out;
1206
Ben Widawskye3689192012-05-25 16:56:22 -07001207 misccpctl = I915_READ(GEN7_MISCCPCTL);
1208 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1209 POSTING_READ(GEN7_MISCCPCTL);
1210
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001211 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001212 i915_reg_t reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001213
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001214 slice--;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001215 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001216 break;
1217
1218 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1219
Ville Syrjälä6fa1c5f2015-11-04 23:20:02 +02001220 reg = GEN7_L3CDERRST1(slice);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001221
1222 error_status = I915_READ(reg);
1223 row = GEN7_PARITY_ERROR_ROW(error_status);
1224 bank = GEN7_PARITY_ERROR_BANK(error_status);
1225 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1226
1227 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1228 POSTING_READ(reg);
1229
1230 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1231 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1232 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1233 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1234 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1235 parity_event[5] = NULL;
1236
Chris Wilson91c8a322016-07-05 10:40:23 +01001237 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001238 KOBJ_CHANGE, parity_event);
1239
1240 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1241 slice, row, bank, subbank);
1242
1243 kfree(parity_event[4]);
1244 kfree(parity_event[3]);
1245 kfree(parity_event[2]);
1246 kfree(parity_event[1]);
1247 }
Ben Widawskye3689192012-05-25 16:56:22 -07001248
1249 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1250
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001251out:
1252 WARN_ON(dev_priv->l3_parity.which_slice);
Daniel Vetter4cb21832014-09-15 14:55:26 +02001253 spin_lock_irq(&dev_priv->irq_lock);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001254 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
Daniel Vetter4cb21832014-09-15 14:55:26 +02001255 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001256
Chris Wilson91c8a322016-07-05 10:40:23 +01001257 mutex_unlock(&dev_priv->drm.struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001258}
1259
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001260static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1261 u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001262{
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001263 if (!HAS_L3_DPF(dev_priv))
Ben Widawskye3689192012-05-25 16:56:22 -07001264 return;
1265
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001266 spin_lock(&dev_priv->irq_lock);
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001267 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001268 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001269
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001270 iir &= GT_PARITY_ERROR(dev_priv);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001271 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1272 dev_priv->l3_parity.which_slice |= 1 << 1;
1273
1274 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1275 dev_priv->l3_parity.which_slice |= 1 << 0;
1276
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001277 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001278}
1279
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001280static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001281 u32 gt_iir)
1282{
Chris Wilsonf8973c22016-07-01 17:23:21 +01001283 if (gt_iir & GT_RENDER_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301284 notify_ring(dev_priv->engine[RCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001285 if (gt_iir & ILK_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301286 notify_ring(dev_priv->engine[VCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001287}
1288
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001289static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001290 u32 gt_iir)
1291{
Chris Wilsonf8973c22016-07-01 17:23:21 +01001292 if (gt_iir & GT_RENDER_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301293 notify_ring(dev_priv->engine[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001294 if (gt_iir & GT_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301295 notify_ring(dev_priv->engine[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001296 if (gt_iir & GT_BLT_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301297 notify_ring(dev_priv->engine[BCS]);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001298
Ben Widawskycc609d52013-05-28 19:22:29 -07001299 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1300 GT_BSD_CS_ERROR_INTERRUPT |
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001301 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1302 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
Ben Widawskye3689192012-05-25 16:56:22 -07001303
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001304 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1305 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001306}
1307
Chris Wilson5d3d69d2017-05-17 13:10:06 +01001308static void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001309gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001310{
Chris Wilson31de7352017-03-16 12:56:18 +00001311 bool tasklet = false;
Chris Wilsonf7470262017-01-24 15:20:21 +00001312
1313 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
Chris Wilsona4b2b012017-05-17 13:10:01 +01001314 if (port_count(&engine->execlist_port[0])) {
Chris Wilson955a4b82017-05-17 13:10:07 +01001315 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
Chris Wilsona4b2b012017-05-17 13:10:01 +01001316 tasklet = true;
1317 }
Chris Wilsonf7470262017-01-24 15:20:21 +00001318 }
Chris Wilson31de7352017-03-16 12:56:18 +00001319
1320 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
1321 notify_ring(engine);
1322 tasklet |= i915.enable_guc_submission;
1323 }
1324
1325 if (tasklet)
1326 tasklet_hi_schedule(&engine->irq_tasklet);
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001327}
1328
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001329static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1330 u32 master_ctl,
1331 u32 gt_iir[4])
Ben Widawskyabd58f02013-11-02 21:07:09 -07001332{
Ben Widawskyabd58f02013-11-02 21:07:09 -07001333 irqreturn_t ret = IRQ_NONE;
1334
1335 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001336 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1337 if (gt_iir[0]) {
1338 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001339 ret = IRQ_HANDLED;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001340 } else
1341 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1342 }
1343
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001344 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001345 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1346 if (gt_iir[1]) {
1347 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001348 ret = IRQ_HANDLED;
Ben Widawskyabd58f02013-11-02 21:07:09 -07001349 } else
1350 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1351 }
1352
Chris Wilson74cdb332015-04-07 16:21:05 +01001353 if (master_ctl & GEN8_GT_VECS_IRQ) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001354 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1355 if (gt_iir[3]) {
1356 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
Chris Wilson74cdb332015-04-07 16:21:05 +01001357 ret = IRQ_HANDLED;
Chris Wilson74cdb332015-04-07 16:21:05 +01001358 } else
1359 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1360 }
1361
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301362 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001363 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301364 if (gt_iir[2] & (dev_priv->pm_rps_events |
1365 dev_priv->pm_guc_events)) {
Chris Wilsoncb0d2052015-04-07 16:21:04 +01001366 I915_WRITE_FW(GEN8_GT_IIR(2),
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301367 gt_iir[2] & (dev_priv->pm_rps_events |
1368 dev_priv->pm_guc_events));
Oscar Mateo38cc46d2014-06-16 16:10:59 +01001369 ret = IRQ_HANDLED;
Ben Widawsky09610212014-05-15 20:58:08 +03001370 } else
1371 DRM_ERROR("The master control interrupt lied (PM)!\n");
1372 }
1373
Ben Widawskyabd58f02013-11-02 21:07:09 -07001374 return ret;
1375}
1376
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001377static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1378 u32 gt_iir[4])
1379{
1380 if (gt_iir[0]) {
Akash Goel3b3f1652016-10-13 22:44:48 +05301381 gen8_cs_irq_handler(dev_priv->engine[RCS],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001382 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
Akash Goel3b3f1652016-10-13 22:44:48 +05301383 gen8_cs_irq_handler(dev_priv->engine[BCS],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001384 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1385 }
1386
1387 if (gt_iir[1]) {
Akash Goel3b3f1652016-10-13 22:44:48 +05301388 gen8_cs_irq_handler(dev_priv->engine[VCS],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001389 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
Akash Goel3b3f1652016-10-13 22:44:48 +05301390 gen8_cs_irq_handler(dev_priv->engine[VCS2],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001391 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1392 }
1393
1394 if (gt_iir[3])
Akash Goel3b3f1652016-10-13 22:44:48 +05301395 gen8_cs_irq_handler(dev_priv->engine[VECS],
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001396 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1397
1398 if (gt_iir[2] & dev_priv->pm_rps_events)
1399 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301400
1401 if (gt_iir[2] & dev_priv->pm_guc_events)
1402 gen9_guc_irq_handler(dev_priv, gt_iir[2]);
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001403}
1404
Imre Deak63c88d22015-07-20 14:43:39 -07001405static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1406{
1407 switch (port) {
1408 case PORT_A:
Ville Syrjälä195baa02015-08-27 23:56:00 +03001409 return val & PORTA_HOTPLUG_LONG_DETECT;
Imre Deak63c88d22015-07-20 14:43:39 -07001410 case PORT_B:
1411 return val & PORTB_HOTPLUG_LONG_DETECT;
1412 case PORT_C:
1413 return val & PORTC_HOTPLUG_LONG_DETECT;
Imre Deak63c88d22015-07-20 14:43:39 -07001414 default:
1415 return false;
1416 }
1417}
1418
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03001419static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1420{
1421 switch (port) {
1422 case PORT_E:
1423 return val & PORTE_HOTPLUG_LONG_DETECT;
1424 default:
1425 return false;
1426 }
1427}
1428
Ville Syrjälä74c0b392015-08-27 23:56:07 +03001429static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1430{
1431 switch (port) {
1432 case PORT_A:
1433 return val & PORTA_HOTPLUG_LONG_DETECT;
1434 case PORT_B:
1435 return val & PORTB_HOTPLUG_LONG_DETECT;
1436 case PORT_C:
1437 return val & PORTC_HOTPLUG_LONG_DETECT;
1438 case PORT_D:
1439 return val & PORTD_HOTPLUG_LONG_DETECT;
1440 default:
1441 return false;
1442 }
1443}
1444
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03001445static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1446{
1447 switch (port) {
1448 case PORT_A:
1449 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1450 default:
1451 return false;
1452 }
1453}
1454
Jani Nikula676574d2015-05-28 15:43:53 +03001455static bool pch_port_hotplug_long_detect(enum port port, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001456{
1457 switch (port) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001458 case PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001459 return val & PORTB_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001460 case PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001461 return val & PORTC_HOTPLUG_LONG_DETECT;
Dave Airlie13cf5502014-06-18 11:29:35 +10001462 case PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001463 return val & PORTD_HOTPLUG_LONG_DETECT;
1464 default:
1465 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001466 }
1467}
1468
Jani Nikula676574d2015-05-28 15:43:53 +03001469static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001470{
1471 switch (port) {
Dave Airlie13cf5502014-06-18 11:29:35 +10001472 case PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001473 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
Dave Airlie13cf5502014-06-18 11:29:35 +10001474 case PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001475 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
Dave Airlie13cf5502014-06-18 11:29:35 +10001476 case PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001477 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1478 default:
1479 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001480 }
1481}
1482
Ville Syrjälä42db67d2015-08-28 21:26:27 +03001483/*
1484 * Get a bit mask of pins that have triggered, and which ones may be long.
1485 * This can be called multiple times with the same masks to accumulate
1486 * hotplug detection results from several registers.
1487 *
1488 * Note that the caller is expected to zero out the masks initially.
1489 */
Imre Deakfd63e2a2015-07-21 15:32:44 -07001490static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
Jani Nikula8c841e52015-06-18 13:06:17 +03001491 u32 hotplug_trigger, u32 dig_hotplug_reg,
Imre Deakfd63e2a2015-07-21 15:32:44 -07001492 const u32 hpd[HPD_NUM_PINS],
1493 bool long_pulse_detect(enum port port, u32 val))
Jani Nikula676574d2015-05-28 15:43:53 +03001494{
Jani Nikula8c841e52015-06-18 13:06:17 +03001495 enum port port;
Jani Nikula676574d2015-05-28 15:43:53 +03001496 int i;
1497
Jani Nikula676574d2015-05-28 15:43:53 +03001498 for_each_hpd_pin(i) {
Jani Nikula8c841e52015-06-18 13:06:17 +03001499 if ((hpd[i] & hotplug_trigger) == 0)
1500 continue;
Jani Nikula676574d2015-05-28 15:43:53 +03001501
Jani Nikula8c841e52015-06-18 13:06:17 +03001502 *pin_mask |= BIT(i);
1503
Imre Deakcc24fcd2015-07-21 15:32:45 -07001504 if (!intel_hpd_pin_to_port(i, &port))
1505 continue;
1506
Imre Deakfd63e2a2015-07-21 15:32:44 -07001507 if (long_pulse_detect(port, dig_hotplug_reg))
Jani Nikula8c841e52015-06-18 13:06:17 +03001508 *long_mask |= BIT(i);
Jani Nikula676574d2015-05-28 15:43:53 +03001509 }
1510
1511 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1512 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1513
1514}
1515
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001516static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001517{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001518 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001519}
1520
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001521static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
Daniel Vetterce99c252012-12-01 13:53:47 +01001522{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001523 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001524}
1525
Shuang He8bf1e9f2013-10-15 18:55:27 +01001526#if defined(CONFIG_DEBUG_FS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001527static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1528 enum pipe pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001529 uint32_t crc0, uint32_t crc1,
1530 uint32_t crc2, uint32_t crc3,
1531 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001532{
Shuang He8bf1e9f2013-10-15 18:55:27 +01001533 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1534 struct intel_pipe_crc_entry *entry;
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001535 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1536 struct drm_driver *driver = dev_priv->drm.driver;
1537 uint32_t crcs[5];
Damien Lespiauac2300d2013-10-15 18:55:30 +01001538 int head, tail;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001539
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001540 spin_lock(&pipe_crc->lock);
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001541 if (pipe_crc->source) {
1542 if (!pipe_crc->entries) {
1543 spin_unlock(&pipe_crc->lock);
1544 DRM_DEBUG_KMS("spurious interrupt\n");
1545 return;
1546 }
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001547
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001548 head = pipe_crc->head;
1549 tail = pipe_crc->tail;
1550
1551 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1552 spin_unlock(&pipe_crc->lock);
1553 DRM_ERROR("CRC buffer overflowing\n");
1554 return;
1555 }
1556
1557 entry = &pipe_crc->entries[head];
1558
1559 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
1560 entry->crc[0] = crc0;
1561 entry->crc[1] = crc1;
1562 entry->crc[2] = crc2;
1563 entry->crc[3] = crc3;
1564 entry->crc[4] = crc4;
1565
1566 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1567 pipe_crc->head = head;
1568
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001569 spin_unlock(&pipe_crc->lock);
Damien Lespiau0c912c72013-10-15 18:55:37 +01001570
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001571 wake_up_interruptible(&pipe_crc->wq);
1572 } else {
1573 /*
1574 * For some not yet identified reason, the first CRC is
1575 * bonkers. So let's just wait for the next vblank and read
1576 * out the buggy result.
1577 *
1578 * On CHV sometimes the second CRC is bonkers as well, so
1579 * don't trust that one either.
1580 */
1581 if (pipe_crc->skipped == 0 ||
1582 (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
1583 pipe_crc->skipped++;
1584 spin_unlock(&pipe_crc->lock);
1585 return;
1586 }
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001587 spin_unlock(&pipe_crc->lock);
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001588 crcs[0] = crc0;
1589 crcs[1] = crc1;
1590 crcs[2] = crc2;
1591 crcs[3] = crc3;
1592 crcs[4] = crc4;
Tomeu Vizoso246ee522017-01-10 14:43:05 +01001593 drm_crtc_add_crc_entry(&crtc->base, true,
Daniel Vetterca814b22017-05-24 16:51:47 +02001594 drm_crtc_accurate_vblank_count(&crtc->base),
Tomeu Vizoso246ee522017-01-10 14:43:05 +01001595 crcs);
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001596 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01001597}
Daniel Vetter277de952013-10-18 16:37:07 +02001598#else
1599static inline void
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001600display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1601 enum pipe pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001602 uint32_t crc0, uint32_t crc1,
1603 uint32_t crc2, uint32_t crc3,
1604 uint32_t crc4) {}
1605#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001606
Daniel Vetter277de952013-10-18 16:37:07 +02001607
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001608static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1609 enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001610{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001611 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001612 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1613 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001614}
1615
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001616static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1617 enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001618{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001619 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001620 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1621 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1622 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1623 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1624 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001625}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001626
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001627static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1628 enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001629{
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001630 uint32_t res1, res2;
1631
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001632 if (INTEL_GEN(dev_priv) >= 3)
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001633 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1634 else
1635 res1 = 0;
1636
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001637 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001638 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1639 else
1640 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001641
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001642 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001643 I915_READ(PIPE_CRC_RES_RED(pipe)),
1644 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1645 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1646 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001647}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001648
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001649/* The RPS events need forcewake, so we add them to a work queue and mask their
1650 * IMR bits until the work is done. Other interrupts can be processed without
1651 * the work queue. */
1652static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001653{
Deepak Sa6706b42014-03-15 20:23:22 +05301654 if (pm_iir & dev_priv->pm_rps_events) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001655 spin_lock(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +05301656 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001657 if (dev_priv->rps.interrupts_enabled) {
1658 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
Chris Wilsonc33d2472016-07-04 08:08:36 +01001659 schedule_work(&dev_priv->rps.work);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001660 }
Daniel Vetter59cdb632013-07-04 23:35:28 +02001661 spin_unlock(&dev_priv->irq_lock);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001662 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001663
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07001664 if (INTEL_GEN(dev_priv) >= 8)
Imre Deakc9a9a262014-11-05 20:48:37 +02001665 return;
1666
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001667 if (HAS_VEBOX(dev_priv)) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001668 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301669 notify_ring(dev_priv->engine[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001670
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001671 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1672 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
Ben Widawsky12638c52013-05-28 19:22:31 -07001673 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001674}
1675
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301676static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1677{
1678 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) {
Sagar Arun Kamble4100b2a2016-10-12 21:54:32 +05301679 /* Sample the log buffer flush related bits & clear them out now
1680 * itself from the message identity register to minimize the
1681 * probability of losing a flush interrupt, when there are back
1682 * to back flush interrupts.
1683 * There can be a new flush interrupt, for different log buffer
1684 * type (like for ISR), whilst Host is handling one (for DPC).
1685 * Since same bit is used in message register for ISR & DPC, it
1686 * could happen that GuC sets the bit for 2nd interrupt but Host
1687 * clears out the bit on handling the 1st interrupt.
1688 */
1689 u32 msg, flush;
1690
1691 msg = I915_READ(SOFT_SCRATCH(15));
Arkadiusz Hilera80bc452016-11-25 18:59:34 +01001692 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
1693 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
Sagar Arun Kamble4100b2a2016-10-12 21:54:32 +05301694 if (flush) {
1695 /* Clear the message bits that are handled */
1696 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
1697
1698 /* Handle flush interrupt in bottom half */
Oscar Mateoe7465472017-03-22 10:39:48 -07001699 queue_work(dev_priv->guc.log.runtime.flush_wq,
1700 &dev_priv->guc.log.runtime.flush_work);
Akash Goel5aa1ee42016-10-12 21:54:36 +05301701
1702 dev_priv->guc.log.flush_interrupt_count++;
Sagar Arun Kamble4100b2a2016-10-12 21:54:32 +05301703 } else {
1704 /* Not clearing of unhandled event bits won't result in
1705 * re-triggering of the interrupt.
1706 */
1707 }
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301708 }
1709}
1710
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001711static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1712 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
Imre Deakc1874ed2014-02-04 21:35:46 +02001713{
Imre Deakc1874ed2014-02-04 21:35:46 +02001714 int pipe;
1715
Imre Deak58ead0d2014-02-04 21:35:47 +02001716 spin_lock(&dev_priv->irq_lock);
Ville Syrjälä1ca993d2016-02-18 21:54:26 +02001717
1718 if (!dev_priv->display_irqs_enabled) {
1719 spin_unlock(&dev_priv->irq_lock);
1720 return;
1721 }
1722
Damien Lespiau055e3932014-08-18 13:49:10 +01001723 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001724 i915_reg_t reg;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001725 u32 mask, iir_bit = 0;
Imre Deak91d181d2014-02-10 18:42:49 +02001726
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001727 /*
1728 * PIPESTAT bits get signalled even when the interrupt is
1729 * disabled with the mask bits, and some of the status bits do
1730 * not generate interrupts at all (like the underrun bit). Hence
1731 * we need to be careful that we only handle what we want to
1732 * handle.
1733 */
Daniel Vetter0f239f42014-09-30 10:56:49 +02001734
1735 /* fifo underruns are filterered in the underrun handler. */
1736 mask = PIPE_FIFO_UNDERRUN_STATUS;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001737
1738 switch (pipe) {
1739 case PIPE_A:
1740 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1741 break;
1742 case PIPE_B:
1743 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1744 break;
Ville Syrjälä3278f672014-04-09 13:28:49 +03001745 case PIPE_C:
1746 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1747 break;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001748 }
1749 if (iir & iir_bit)
1750 mask |= dev_priv->pipestat_irq_mask[pipe];
1751
1752 if (!mask)
Imre Deak91d181d2014-02-10 18:42:49 +02001753 continue;
1754
1755 reg = PIPESTAT(pipe);
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001756 mask |= PIPESTAT_INT_ENABLE_MASK;
1757 pipe_stats[pipe] = I915_READ(reg) & mask;
Imre Deakc1874ed2014-02-04 21:35:46 +02001758
1759 /*
1760 * Clear the PIPE*STAT regs before the IIR
1761 */
Imre Deak91d181d2014-02-10 18:42:49 +02001762 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1763 PIPESTAT_INT_STATUS_MASK))
Imre Deakc1874ed2014-02-04 21:35:46 +02001764 I915_WRITE(reg, pipe_stats[pipe]);
1765 }
Imre Deak58ead0d2014-02-04 21:35:47 +02001766 spin_unlock(&dev_priv->irq_lock);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001767}
1768
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001769static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001770 u32 pipe_stats[I915_MAX_PIPES])
1771{
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001772 enum pipe pipe;
Imre Deakc1874ed2014-02-04 21:35:46 +02001773
Damien Lespiau055e3932014-08-18 13:49:10 +01001774 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02001775 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1776 drm_handle_vblank(&dev_priv->drm, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001777
1778 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001779 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001780
Daniel Vetter1f7247c2014-09-30 10:56:48 +02001781 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1782 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001783 }
1784
1785 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001786 gmbus_irq_handler(dev_priv);
Imre Deakc1874ed2014-02-04 21:35:46 +02001787}
1788
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001789static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001790{
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001791 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001792
1793 if (hotplug_status)
1794 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1795
1796 return hotplug_status;
1797}
1798
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001799static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001800 u32 hotplug_status)
1801{
Ville Syrjälä42db67d2015-08-28 21:26:27 +03001802 u32 pin_mask = 0, long_mask = 0;
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001803
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001804 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1805 IS_CHERRYVIEW(dev_priv)) {
Jani Nikula0d2e4292015-05-27 15:03:39 +03001806 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001807
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001808 if (hotplug_trigger) {
1809 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1810 hotplug_trigger, hpd_status_g4x,
1811 i9xx_port_hotplug_long_detect);
1812
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001813 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001814 }
Jani Nikula369712e2015-05-27 15:03:40 +03001815
1816 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001817 dp_aux_irq_handler(dev_priv);
Jani Nikula0d2e4292015-05-27 15:03:39 +03001818 } else {
1819 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001820
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001821 if (hotplug_trigger) {
1822 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
Daniel Vetter44cc6c02015-09-30 08:47:41 +02001823 hotplug_trigger, hpd_status_i915,
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001824 i9xx_port_hotplug_long_detect);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001825 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03001826 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001827 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03001828}
1829
Daniel Vetterff1f5252012-10-02 15:10:55 +02001830static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001831{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001832 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01001833 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001834 irqreturn_t ret = IRQ_NONE;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001835
Imre Deak2dd2a882015-02-24 11:14:30 +02001836 if (!intel_irqs_enabled(dev_priv))
1837 return IRQ_NONE;
1838
Imre Deak1f814da2015-12-16 02:52:19 +02001839 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1840 disable_rpm_wakeref_asserts(dev_priv);
1841
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001842 do {
Ville Syrjälä6e814802016-04-13 21:19:53 +03001843 u32 iir, gt_iir, pm_iir;
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001844 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001845 u32 hotplug_status = 0;
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001846 u32 ier = 0;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001847
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001848 gt_iir = I915_READ(GTIIR);
1849 pm_iir = I915_READ(GEN6_PMIIR);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001850 iir = I915_READ(VLV_IIR);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001851
1852 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001853 break;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001854
1855 ret = IRQ_HANDLED;
1856
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001857 /*
1858 * Theory on interrupt generation, based on empirical evidence:
1859 *
1860 * x = ((VLV_IIR & VLV_IER) ||
1861 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1862 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1863 *
1864 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1865 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1866 * guarantee the CPU interrupt will be raised again even if we
1867 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1868 * bits this time around.
1869 */
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001870 I915_WRITE(VLV_MASTER_IER, 0);
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001871 ier = I915_READ(VLV_IER);
1872 I915_WRITE(VLV_IER, 0);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001873
1874 if (gt_iir)
1875 I915_WRITE(GTIIR, gt_iir);
1876 if (pm_iir)
1877 I915_WRITE(GEN6_PMIIR, pm_iir);
1878
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001879 if (iir & I915_DISPLAY_PORT_INTERRUPT)
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001880 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001881
Oscar Mateo3ff60f82014-06-16 16:10:58 +01001882 /* Call regardless, as some status bits might not be
1883 * signalled in iir */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001884 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001885
Jerome Anandeef57322017-01-25 04:27:49 +05301886 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1887 I915_LPE_PIPE_B_INTERRUPT))
1888 intel_lpe_audio_irq_handler(dev_priv);
1889
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001890 /*
1891 * VLV_IIR is single buffered, and reflects the level
1892 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1893 */
1894 if (iir)
1895 I915_WRITE(VLV_IIR, iir);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001896
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001897 I915_WRITE(VLV_IER, ier);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03001898 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1899 POSTING_READ(VLV_MASTER_IER);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001900
Ville Syrjälä52894872016-04-13 21:19:56 +03001901 if (gt_iir)
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001902 snb_gt_irq_handler(dev_priv, gt_iir);
Ville Syrjälä52894872016-04-13 21:19:56 +03001903 if (pm_iir)
1904 gen6_rps_irq_handler(dev_priv, pm_iir);
1905
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001906 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001907 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001908
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001909 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03001910 } while (0);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001911
Imre Deak1f814da2015-12-16 02:52:19 +02001912 enable_rpm_wakeref_asserts(dev_priv);
1913
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07001914 return ret;
1915}
1916
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001917static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1918{
Daniel Vetter45a83f82014-05-12 19:17:55 +02001919 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01001920 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001921 irqreturn_t ret = IRQ_NONE;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001922
Imre Deak2dd2a882015-02-24 11:14:30 +02001923 if (!intel_irqs_enabled(dev_priv))
1924 return IRQ_NONE;
1925
Imre Deak1f814da2015-12-16 02:52:19 +02001926 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1927 disable_rpm_wakeref_asserts(dev_priv);
1928
Chris Wilson579de732016-03-14 09:01:57 +00001929 do {
Ville Syrjälä6e814802016-04-13 21:19:53 +03001930 u32 master_ctl, iir;
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001931 u32 gt_iir[4] = {};
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001932 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001933 u32 hotplug_status = 0;
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001934 u32 ier = 0;
1935
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001936 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1937 iir = I915_READ(VLV_IIR);
Ville Syrjälä3278f672014-04-09 13:28:49 +03001938
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001939 if (master_ctl == 0 && iir == 0)
1940 break;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001941
Oscar Mateo27b6c122014-06-16 16:11:00 +01001942 ret = IRQ_HANDLED;
1943
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001944 /*
1945 * Theory on interrupt generation, based on empirical evidence:
1946 *
1947 * x = ((VLV_IIR & VLV_IER) ||
1948 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1949 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1950 *
1951 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1952 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1953 * guarantee the CPU interrupt will be raised again even if we
1954 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1955 * bits this time around.
1956 */
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001957 I915_WRITE(GEN8_MASTER_IRQ, 0);
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001958 ier = I915_READ(VLV_IER);
1959 I915_WRITE(VLV_IER, 0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001960
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001961 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001962
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001963 if (iir & I915_DISPLAY_PORT_INTERRUPT)
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001964 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001965
Oscar Mateo27b6c122014-06-16 16:11:00 +01001966 /* Call regardless, as some status bits might not be
1967 * signalled in iir */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001968 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001969
Jerome Anandeef57322017-01-25 04:27:49 +05301970 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1971 I915_LPE_PIPE_B_INTERRUPT |
1972 I915_LPE_PIPE_C_INTERRUPT))
1973 intel_lpe_audio_irq_handler(dev_priv);
1974
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03001975 /*
1976 * VLV_IIR is single buffered, and reflects the level
1977 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1978 */
1979 if (iir)
1980 I915_WRITE(VLV_IIR, iir);
1981
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03001982 I915_WRITE(VLV_IER, ier);
Ville Syrjäläe5328c42016-04-13 21:19:47 +03001983 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03001984 POSTING_READ(GEN8_MASTER_IRQ);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001985
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001986 gen8_gt_irq_handler(dev_priv, gt_iir);
1987
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03001988 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001989 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001990
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001991 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
Chris Wilson579de732016-03-14 09:01:57 +00001992 } while (0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001993
Imre Deak1f814da2015-12-16 02:52:19 +02001994 enable_rpm_wakeref_asserts(dev_priv);
1995
Ville Syrjälä43f328d2014-04-09 20:40:52 +03001996 return ret;
1997}
1998
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001999static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2000 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002001 const u32 hpd[HPD_NUM_PINS])
2002{
Ville Syrjälä40e56412015-08-27 23:56:10 +03002003 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2004
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002005 /*
2006 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2007 * unless we touch the hotplug register, even if hotplug_trigger is
2008 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2009 * errors.
2010 */
Ville Syrjälä40e56412015-08-27 23:56:10 +03002011 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002012 if (!hotplug_trigger) {
2013 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2014 PORTD_HOTPLUG_STATUS_MASK |
2015 PORTC_HOTPLUG_STATUS_MASK |
2016 PORTB_HOTPLUG_STATUS_MASK;
2017 dig_hotplug_reg &= ~mask;
2018 }
2019
Ville Syrjälä40e56412015-08-27 23:56:10 +03002020 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002021 if (!hotplug_trigger)
2022 return;
Ville Syrjälä40e56412015-08-27 23:56:10 +03002023
2024 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2025 dig_hotplug_reg, hpd,
2026 pch_port_hotplug_long_detect);
2027
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002028 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002029}
2030
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002031static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08002032{
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002033 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02002034 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08002035
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002036 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002037
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002038 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2039 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2040 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08002041 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002042 port_name(port));
2043 }
Jesse Barnes776ad802011-01-04 15:09:39 -08002044
Daniel Vetterce99c252012-12-01 13:53:47 +01002045 if (pch_iir & SDE_AUX_MASK)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002046 dp_aux_irq_handler(dev_priv);
Daniel Vetterce99c252012-12-01 13:53:47 +01002047
Jesse Barnes776ad802011-01-04 15:09:39 -08002048 if (pch_iir & SDE_GMBUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002049 gmbus_irq_handler(dev_priv);
Jesse Barnes776ad802011-01-04 15:09:39 -08002050
2051 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2052 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2053
2054 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2055 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2056
2057 if (pch_iir & SDE_POISON)
2058 DRM_ERROR("PCH poison interrupt\n");
2059
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002060 if (pch_iir & SDE_FDI_MASK)
Damien Lespiau055e3932014-08-18 13:49:10 +01002061 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002062 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2063 pipe_name(pipe),
2064 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08002065
2066 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2067 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2068
2069 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2070 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2071
Jesse Barnes776ad802011-01-04 15:09:39 -08002072 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Matthias Kaehlckea2196032017-07-17 11:14:03 -07002073 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03002074
2075 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
Matthias Kaehlckea2196032017-07-17 11:14:03 -07002076 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03002077}
2078
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002079static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
Paulo Zanoni86642812013-04-12 17:57:57 -03002080{
Paulo Zanoni86642812013-04-12 17:57:57 -03002081 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002082 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03002083
Paulo Zanonide032bf2013-04-12 17:57:58 -03002084 if (err_int & ERR_INT_POISON)
2085 DRM_ERROR("Poison interrupt\n");
2086
Damien Lespiau055e3932014-08-18 13:49:10 +01002087 for_each_pipe(dev_priv, pipe) {
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002088 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2089 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanoni86642812013-04-12 17:57:57 -03002090
Daniel Vetter5a69b892013-10-16 22:55:52 +02002091 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002092 if (IS_IVYBRIDGE(dev_priv))
2093 ivb_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002094 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002095 hsw_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002096 }
2097 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01002098
Paulo Zanoni86642812013-04-12 17:57:57 -03002099 I915_WRITE(GEN7_ERR_INT, err_int);
2100}
2101
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002102static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
Paulo Zanoni86642812013-04-12 17:57:57 -03002103{
Paulo Zanoni86642812013-04-12 17:57:57 -03002104 u32 serr_int = I915_READ(SERR_INT);
2105
Paulo Zanonide032bf2013-04-12 17:57:58 -03002106 if (serr_int & SERR_INT_POISON)
2107 DRM_ERROR("PCH poison interrupt\n");
2108
Paulo Zanoni86642812013-04-12 17:57:57 -03002109 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
Matthias Kaehlckea2196032017-07-17 11:14:03 -07002110 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03002111
2112 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
Matthias Kaehlckea2196032017-07-17 11:14:03 -07002113 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03002114
2115 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
Matthias Kaehlckea2196032017-07-17 11:14:03 -07002116 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
Paulo Zanoni86642812013-04-12 17:57:57 -03002117
2118 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08002119}
2120
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002121static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Adam Jackson23e81d62012-06-06 15:45:44 -04002122{
Adam Jackson23e81d62012-06-06 15:45:44 -04002123 int pipe;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002124 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04002125
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002126 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002127
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002128 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2129 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2130 SDE_AUDIO_POWER_SHIFT_CPT);
2131 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2132 port_name(port));
2133 }
Adam Jackson23e81d62012-06-06 15:45:44 -04002134
2135 if (pch_iir & SDE_AUX_MASK_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002136 dp_aux_irq_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002137
2138 if (pch_iir & SDE_GMBUS_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002139 gmbus_irq_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002140
2141 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2142 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2143
2144 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2145 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2146
2147 if (pch_iir & SDE_FDI_MASK_CPT)
Damien Lespiau055e3932014-08-18 13:49:10 +01002148 for_each_pipe(dev_priv, pipe)
Adam Jackson23e81d62012-06-06 15:45:44 -04002149 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2150 pipe_name(pipe),
2151 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03002152
2153 if (pch_iir & SDE_ERROR_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002154 cpt_serr_int_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002155}
2156
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002157static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002158{
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002159 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2160 ~SDE_PORTE_HOTPLUG_SPT;
2161 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2162 u32 pin_mask = 0, long_mask = 0;
2163
2164 if (hotplug_trigger) {
2165 u32 dig_hotplug_reg;
2166
2167 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2168 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2169
2170 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2171 dig_hotplug_reg, hpd_spt,
Ville Syrjälä74c0b392015-08-27 23:56:07 +03002172 spt_port_hotplug_long_detect);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002173 }
2174
2175 if (hotplug2_trigger) {
2176 u32 dig_hotplug_reg;
2177
2178 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2179 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2180
2181 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2182 dig_hotplug_reg, hpd_spt,
2183 spt_port_hotplug2_long_detect);
2184 }
2185
2186 if (pin_mask)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002187 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002188
2189 if (pch_iir & SDE_GMBUS_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002190 gmbus_irq_handler(dev_priv);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002191}
2192
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002193static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2194 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002195 const u32 hpd[HPD_NUM_PINS])
2196{
Ville Syrjälä40e56412015-08-27 23:56:10 +03002197 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2198
2199 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2200 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2201
2202 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2203 dig_hotplug_reg, hpd,
2204 ilk_port_hotplug_long_detect);
2205
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002206 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002207}
2208
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002209static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2210 u32 de_iir)
Paulo Zanonic008bc62013-07-12 16:35:10 -03002211{
Daniel Vetter40da17c22013-10-21 18:04:36 +02002212 enum pipe pipe;
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03002213 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2214
Ville Syrjälä40e56412015-08-27 23:56:10 +03002215 if (hotplug_trigger)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002216 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002217
2218 if (de_iir & DE_AUX_CHANNEL_A)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002219 dp_aux_irq_handler(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002220
2221 if (de_iir & DE_GSE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002222 intel_opregion_asle_intr(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002223
Paulo Zanonic008bc62013-07-12 16:35:10 -03002224 if (de_iir & DE_POISON)
2225 DRM_ERROR("Poison interrupt\n");
2226
Damien Lespiau055e3932014-08-18 13:49:10 +01002227 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002228 if (de_iir & DE_PIPE_VBLANK(pipe))
2229 drm_handle_vblank(&dev_priv->drm, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002230
Daniel Vetter40da17c22013-10-21 18:04:36 +02002231 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002232 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002233
Daniel Vetter40da17c22013-10-21 18:04:36 +02002234 if (de_iir & DE_PIPE_CRC_DONE(pipe))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002235 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002236 }
2237
2238 /* check event from PCH */
2239 if (de_iir & DE_PCH_EVENT) {
2240 u32 pch_iir = I915_READ(SDEIIR);
2241
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002242 if (HAS_PCH_CPT(dev_priv))
2243 cpt_irq_handler(dev_priv, pch_iir);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002244 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002245 ibx_irq_handler(dev_priv, pch_iir);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002246
2247 /* should clear PCH hotplug event before clear CPU irq */
2248 I915_WRITE(SDEIIR, pch_iir);
2249 }
2250
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002251 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2252 ironlake_rps_change_irq_handler(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002253}
2254
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002255static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2256 u32 de_iir)
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002257{
Damien Lespiau07d27e22014-03-03 17:31:46 +00002258 enum pipe pipe;
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03002259 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2260
Ville Syrjälä40e56412015-08-27 23:56:10 +03002261 if (hotplug_trigger)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002262 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002263
2264 if (de_iir & DE_ERR_INT_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002265 ivb_err_int_handler(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002266
2267 if (de_iir & DE_AUX_CHANNEL_A_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002268 dp_aux_irq_handler(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002269
2270 if (de_iir & DE_GSE_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002271 intel_opregion_asle_intr(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002272
Damien Lespiau055e3932014-08-18 13:49:10 +01002273 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002274 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2275 drm_handle_vblank(&dev_priv->drm, pipe);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002276 }
2277
2278 /* check event from PCH */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002279 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002280 u32 pch_iir = I915_READ(SDEIIR);
2281
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002282 cpt_irq_handler(dev_priv, pch_iir);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002283
2284 /* clear PCH hotplug event before clear CPU irq */
2285 I915_WRITE(SDEIIR, pch_iir);
2286 }
2287}
2288
Oscar Mateo72c90f62014-06-16 16:10:57 +01002289/*
2290 * To handle irqs with the minimum potential races with fresh interrupts, we:
2291 * 1 - Disable Master Interrupt Control.
2292 * 2 - Find the source(s) of the interrupt.
2293 * 3 - Clear the Interrupt Identity bits (IIR).
2294 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2295 * 5 - Re-enable Master Interrupt Control.
2296 */
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002297static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002298{
Daniel Vetter45a83f82014-05-12 19:17:55 +02002299 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002300 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002301 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01002302 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002303
Imre Deak2dd2a882015-02-24 11:14:30 +02002304 if (!intel_irqs_enabled(dev_priv))
2305 return IRQ_NONE;
2306
Imre Deak1f814da2015-12-16 02:52:19 +02002307 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2308 disable_rpm_wakeref_asserts(dev_priv);
2309
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002310 /* disable master interrupt before clearing iir */
2311 de_ier = I915_READ(DEIER);
2312 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Paulo Zanoni23a78512013-07-12 16:35:14 -03002313 POSTING_READ(DEIER);
Chris Wilson0e434062012-05-09 21:45:44 +01002314
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002315 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2316 * interrupts will will be stored on its back queue, and then we'll be
2317 * able to process them after we restore SDEIER (as soon as we restore
2318 * it, we'll get an interrupt if SDEIIR still has something to process
2319 * due to its back queue). */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002320 if (!HAS_PCH_NOP(dev_priv)) {
Ben Widawskyab5c6082013-04-05 13:12:41 -07002321 sde_ier = I915_READ(SDEIER);
2322 I915_WRITE(SDEIER, 0);
2323 POSTING_READ(SDEIER);
2324 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002325
Oscar Mateo72c90f62014-06-16 16:10:57 +01002326 /* Find, clear, then process each source of interrupt */
2327
Chris Wilson0e434062012-05-09 21:45:44 +01002328 gt_iir = I915_READ(GTIIR);
2329 if (gt_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002330 I915_WRITE(GTIIR, gt_iir);
2331 ret = IRQ_HANDLED;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002332 if (INTEL_GEN(dev_priv) >= 6)
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002333 snb_gt_irq_handler(dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03002334 else
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002335 ilk_gt_irq_handler(dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002336 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002337
2338 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01002339 if (de_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002340 I915_WRITE(DEIIR, de_iir);
2341 ret = IRQ_HANDLED;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002342 if (INTEL_GEN(dev_priv) >= 7)
2343 ivb_display_irq_handler(dev_priv, de_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002344 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002345 ilk_display_irq_handler(dev_priv, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002346 }
2347
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002348 if (INTEL_GEN(dev_priv) >= 6) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002349 u32 pm_iir = I915_READ(GEN6_PMIIR);
2350 if (pm_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002351 I915_WRITE(GEN6_PMIIR, pm_iir);
2352 ret = IRQ_HANDLED;
Oscar Mateo72c90f62014-06-16 16:10:57 +01002353 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002354 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002355 }
2356
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002357 I915_WRITE(DEIER, de_ier);
2358 POSTING_READ(DEIER);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002359 if (!HAS_PCH_NOP(dev_priv)) {
Ben Widawskyab5c6082013-04-05 13:12:41 -07002360 I915_WRITE(SDEIER, sde_ier);
2361 POSTING_READ(SDEIER);
2362 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002363
Imre Deak1f814da2015-12-16 02:52:19 +02002364 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2365 enable_rpm_wakeref_asserts(dev_priv);
2366
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002367 return ret;
2368}
2369
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002370static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2371 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002372 const u32 hpd[HPD_NUM_PINS])
Shashank Sharmad04a4922014-08-22 17:40:41 +05302373{
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002374 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
Shashank Sharmad04a4922014-08-22 17:40:41 +05302375
Ville Syrjäläa52bb152015-08-27 23:56:11 +03002376 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2377 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302378
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002379 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002380 dig_hotplug_reg, hpd,
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002381 bxt_port_hotplug_long_detect);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002382
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002383 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302384}
2385
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002386static irqreturn_t
2387gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002388{
Ben Widawskyabd58f02013-11-02 21:07:09 -07002389 irqreturn_t ret = IRQ_NONE;
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002390 u32 iir;
Daniel Vetterc42664c2013-11-07 11:05:40 +01002391 enum pipe pipe;
Jesse Barnes88e04702014-11-13 17:51:48 +00002392
Ben Widawskyabd58f02013-11-02 21:07:09 -07002393 if (master_ctl & GEN8_DE_MISC_IRQ) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002394 iir = I915_READ(GEN8_DE_MISC_IIR);
2395 if (iir) {
2396 I915_WRITE(GEN8_DE_MISC_IIR, iir);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002397 ret = IRQ_HANDLED;
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002398 if (iir & GEN8_DE_MISC_GSE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002399 intel_opregion_asle_intr(dev_priv);
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002400 else
2401 DRM_ERROR("Unexpected DE Misc interrupt\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002402 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002403 else
2404 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002405 }
2406
Daniel Vetter6d766f02013-11-07 14:49:55 +01002407 if (master_ctl & GEN8_DE_PORT_IRQ) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002408 iir = I915_READ(GEN8_DE_PORT_IIR);
2409 if (iir) {
2410 u32 tmp_mask;
Shashank Sharmad04a4922014-08-22 17:40:41 +05302411 bool found = false;
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002412
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002413 I915_WRITE(GEN8_DE_PORT_IIR, iir);
Daniel Vetter6d766f02013-11-07 14:49:55 +01002414 ret = IRQ_HANDLED;
Jesse Barnes88e04702014-11-13 17:51:48 +00002415
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002416 tmp_mask = GEN8_AUX_CHANNEL_A;
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07002417 if (INTEL_GEN(dev_priv) >= 9)
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002418 tmp_mask |= GEN9_AUX_CHANNEL_B |
2419 GEN9_AUX_CHANNEL_C |
2420 GEN9_AUX_CHANNEL_D;
2421
2422 if (iir & tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002423 dp_aux_irq_handler(dev_priv);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302424 found = true;
2425 }
2426
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002427 if (IS_GEN9_LP(dev_priv)) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002428 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2429 if (tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002430 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2431 hpd_bxt);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002432 found = true;
2433 }
2434 } else if (IS_BROADWELL(dev_priv)) {
2435 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2436 if (tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002437 ilk_hpd_irq_handler(dev_priv,
2438 tmp_mask, hpd_bdw);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002439 found = true;
2440 }
Shashank Sharmad04a4922014-08-22 17:40:41 +05302441 }
2442
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002443 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002444 gmbus_irq_handler(dev_priv);
Shashank Sharma9e637432014-08-22 17:40:43 +05302445 found = true;
2446 }
2447
Shashank Sharmad04a4922014-08-22 17:40:41 +05302448 if (!found)
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002449 DRM_ERROR("Unexpected DE Port interrupt\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002450 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002451 else
2452 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002453 }
2454
Damien Lespiau055e3932014-08-18 13:49:10 +01002455 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002456 u32 fault_errors;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002457
Daniel Vetterc42664c2013-11-07 11:05:40 +01002458 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2459 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002460
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002461 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2462 if (!iir) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07002463 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002464 continue;
2465 }
2466
2467 ret = IRQ_HANDLED;
2468 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2469
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002470 if (iir & GEN8_PIPE_VBLANK)
2471 drm_handle_vblank(&dev_priv->drm, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002472
2473 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002474 hsw_pipe_crc_irq_handler(dev_priv, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002475
2476 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2477 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2478
2479 fault_errors = iir;
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07002480 if (INTEL_GEN(dev_priv) >= 9)
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002481 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2482 else
2483 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2484
2485 if (fault_errors)
Tvrtko Ursulin1353ec32016-10-27 13:48:32 +01002486 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002487 pipe_name(pipe),
2488 fault_errors);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002489 }
2490
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002491 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
Shashank Sharma266ea3d2014-08-22 17:40:42 +05302492 master_ctl & GEN8_DE_PCH_IRQ) {
Daniel Vetter92d03a82013-11-07 11:05:43 +01002493 /*
2494 * FIXME(BDW): Assume for now that the new interrupt handling
2495 * scheme also closed the SDE interrupt handling race we've seen
2496 * on older pch-split platforms. But this needs testing.
2497 */
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002498 iir = I915_READ(SDEIIR);
2499 if (iir) {
2500 I915_WRITE(SDEIIR, iir);
Daniel Vetter92d03a82013-11-07 11:05:43 +01002501 ret = IRQ_HANDLED;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002502
Rodrigo Vivi7b22b8c2017-06-02 13:06:39 -07002503 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
2504 HAS_PCH_CNP(dev_priv))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002505 spt_irq_handler(dev_priv, iir);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002506 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002507 cpt_irq_handler(dev_priv, iir);
Jani Nikula2dfb0b82016-01-07 10:29:10 +02002508 } else {
2509 /*
2510 * Like on previous PCH there seems to be something
2511 * fishy going on with forwarding PCH interrupts.
2512 */
2513 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2514 }
Daniel Vetter92d03a82013-11-07 11:05:43 +01002515 }
2516
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002517 return ret;
2518}
2519
2520static irqreturn_t gen8_irq_handler(int irq, void *arg)
2521{
2522 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002523 struct drm_i915_private *dev_priv = to_i915(dev);
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002524 u32 master_ctl;
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002525 u32 gt_iir[4] = {};
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002526 irqreturn_t ret;
2527
2528 if (!intel_irqs_enabled(dev_priv))
2529 return IRQ_NONE;
2530
2531 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2532 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2533 if (!master_ctl)
2534 return IRQ_NONE;
2535
2536 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2537
2538 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2539 disable_rpm_wakeref_asserts(dev_priv);
2540
2541 /* Find, clear, then process each source of interrupt */
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002542 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2543 gen8_gt_irq_handler(dev_priv, gt_iir);
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002544 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2545
Chris Wilsoncb0d2052015-04-07 16:21:04 +01002546 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2547 POSTING_READ_FW(GEN8_MASTER_IRQ);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002548
Imre Deak1f814da2015-12-16 02:52:19 +02002549 enable_rpm_wakeref_asserts(dev_priv);
2550
Ben Widawskyabd58f02013-11-02 21:07:09 -07002551 return ret;
2552}
2553
Chris Wilson36703e72017-06-22 11:56:25 +01002554struct wedge_me {
2555 struct delayed_work work;
2556 struct drm_i915_private *i915;
2557 const char *name;
2558};
2559
2560static void wedge_me(struct work_struct *work)
2561{
2562 struct wedge_me *w = container_of(work, typeof(*w), work.work);
2563
2564 dev_err(w->i915->drm.dev,
2565 "%s timed out, cancelling all in-flight rendering.\n",
2566 w->name);
2567 i915_gem_set_wedged(w->i915);
2568}
2569
2570static void __init_wedge(struct wedge_me *w,
2571 struct drm_i915_private *i915,
2572 long timeout,
2573 const char *name)
2574{
2575 w->i915 = i915;
2576 w->name = name;
2577
2578 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
2579 schedule_delayed_work(&w->work, timeout);
2580}
2581
2582static void __fini_wedge(struct wedge_me *w)
2583{
2584 cancel_delayed_work_sync(&w->work);
2585 destroy_delayed_work_on_stack(&w->work);
2586 w->i915 = NULL;
2587}
2588
2589#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
2590 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
2591 (W)->i915; \
2592 __fini_wedge((W)))
2593
Jesse Barnes8a905232009-07-11 16:48:03 -04002594/**
Chris Wilsond5367302017-06-20 10:57:43 +01002595 * i915_reset_device - do process context error handling work
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002596 * @dev_priv: i915 device private
Jesse Barnes8a905232009-07-11 16:48:03 -04002597 *
2598 * Fire an error uevent so userspace can see that a hang or error
2599 * was detected.
2600 */
Chris Wilsond5367302017-06-20 10:57:43 +01002601static void i915_reset_device(struct drm_i915_private *dev_priv)
Jesse Barnes8a905232009-07-11 16:48:03 -04002602{
Chris Wilson91c8a322016-07-05 10:40:23 +01002603 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
Ben Widawskycce723e2013-07-19 09:16:42 -07002604 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2605 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2606 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Chris Wilson36703e72017-06-22 11:56:25 +01002607 struct wedge_me w;
Jesse Barnes8a905232009-07-11 16:48:03 -04002608
Chris Wilsonc0336662016-05-06 15:40:21 +01002609 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002610
Chris Wilson8af29b02016-09-09 14:11:47 +01002611 DRM_DEBUG_DRIVER("resetting chip\n");
2612 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2613
Chris Wilson36703e72017-06-22 11:56:25 +01002614 /* Use a watchdog to ensure that our reset completes */
2615 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
2616 intel_prepare_reset(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02002617
Chris Wilson36703e72017-06-22 11:56:25 +01002618 /* Signal that locked waiters should reset the GPU */
2619 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
2620 wake_up_all(&dev_priv->gpu_error.wait_queue);
Chris Wilson8c185ec2017-03-16 17:13:02 +00002621
Chris Wilson36703e72017-06-22 11:56:25 +01002622 /* Wait for anyone holding the lock to wakeup, without
2623 * blocking indefinitely on struct_mutex.
Chris Wilson780f2622016-09-09 14:11:52 +01002624 */
Chris Wilson36703e72017-06-22 11:56:25 +01002625 do {
2626 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
Chris Wilson535275d2017-07-21 13:32:37 +01002627 i915_reset(dev_priv, 0);
Chris Wilson36703e72017-06-22 11:56:25 +01002628 mutex_unlock(&dev_priv->drm.struct_mutex);
2629 }
2630 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
2631 I915_RESET_HANDOFF,
2632 TASK_UNINTERRUPTIBLE,
2633 1));
Chris Wilson780f2622016-09-09 14:11:52 +01002634
Chris Wilson36703e72017-06-22 11:56:25 +01002635 intel_finish_reset(dev_priv);
2636 }
Daniel Vetter17e1df02013-09-08 21:57:13 +02002637
Chris Wilson780f2622016-09-09 14:11:52 +01002638 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
Chris Wilson8af29b02016-09-09 14:11:47 +01002639 kobject_uevent_env(kobj,
2640 KOBJ_CHANGE, reset_done_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04002641}
2642
Ben Widawskyd6369512016-09-20 16:54:32 +03002643static inline void
2644i915_err_print_instdone(struct drm_i915_private *dev_priv,
2645 struct intel_instdone *instdone)
2646{
Ben Widawskyf9e61372016-09-20 16:54:33 +03002647 int slice;
2648 int subslice;
2649
Ben Widawskyd6369512016-09-20 16:54:32 +03002650 pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
2651
2652 if (INTEL_GEN(dev_priv) <= 3)
2653 return;
2654
2655 pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
2656
2657 if (INTEL_GEN(dev_priv) <= 6)
2658 return;
2659
Ben Widawskyf9e61372016-09-20 16:54:33 +03002660 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2661 pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
2662 slice, subslice, instdone->sampler[slice][subslice]);
2663
2664 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
2665 pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
2666 slice, subslice, instdone->row[slice][subslice]);
Ben Widawskyd6369512016-09-20 16:54:32 +03002667}
2668
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002669static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
Jesse Barnes8a905232009-07-11 16:48:03 -04002670{
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002671 u32 eir;
Jesse Barnes8a905232009-07-11 16:48:03 -04002672
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002673 if (!IS_GEN2(dev_priv))
2674 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
Jesse Barnes8a905232009-07-11 16:48:03 -04002675
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002676 if (INTEL_GEN(dev_priv) < 4)
2677 I915_WRITE(IPEIR, I915_READ(IPEIR));
2678 else
2679 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04002680
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002681 I915_WRITE(EIR, I915_READ(EIR));
Jesse Barnes8a905232009-07-11 16:48:03 -04002682 eir = I915_READ(EIR);
2683 if (eir) {
2684 /*
2685 * some errors might have become stuck,
2686 * mask them.
2687 */
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002688 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04002689 I915_WRITE(EMR, I915_READ(EMR) | eir);
2690 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2691 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01002692}
2693
2694/**
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02002695 * i915_handle_error - handle a gpu error
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01002696 * @dev_priv: i915 device private
arun.siluvery@linux.intel.com14b730f2016-03-18 20:07:55 +00002697 * @engine_mask: mask representing engines that are hung
Michel Thierry87c390b2017-01-11 20:18:08 -08002698 * @fmt: Error message format string
2699 *
Javier Martinez Canillasaafd8582015-10-08 09:57:49 +02002700 * Do some basic checking of register state at error time and
Chris Wilson35aed2e2010-05-27 13:18:12 +01002701 * dump it to the syslog. Also call i915_capture_error_state() to make
2702 * sure we get a record and make it available in debugfs. Fire a uevent
2703 * so userspace knows something bad happened (should trigger collection
2704 * of a ring dump etc.).
2705 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002706void i915_handle_error(struct drm_i915_private *dev_priv,
2707 u32 engine_mask,
Mika Kuoppala58174462014-02-25 17:11:26 +02002708 const char *fmt, ...)
Chris Wilson35aed2e2010-05-27 13:18:12 +01002709{
Michel Thierry142bc7d2017-06-20 10:57:46 +01002710 struct intel_engine_cs *engine;
2711 unsigned int tmp;
Mika Kuoppala58174462014-02-25 17:11:26 +02002712 va_list args;
2713 char error_msg[80];
Chris Wilson35aed2e2010-05-27 13:18:12 +01002714
Mika Kuoppala58174462014-02-25 17:11:26 +02002715 va_start(args, fmt);
2716 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2717 va_end(args);
2718
Chris Wilson1604a862017-03-14 17:18:40 +00002719 /*
2720 * In most cases it's guaranteed that we get here with an RPM
2721 * reference held, for example because there is a pending GPU
2722 * request that won't finish until the reset is done. This
2723 * isn't the case at least when we get here by doing a
2724 * simulated reset via debugfs, so get an RPM reference.
2725 */
2726 intel_runtime_pm_get(dev_priv);
2727
Chris Wilsonc0336662016-05-06 15:40:21 +01002728 i915_capture_error_state(dev_priv, engine_mask, error_msg);
Chris Wilsoneaa14c22016-10-19 13:52:03 +01002729 i915_clear_error_registers(dev_priv);
Jesse Barnes8a905232009-07-11 16:48:03 -04002730
Michel Thierry142bc7d2017-06-20 10:57:46 +01002731 /*
2732 * Try engine reset when available. We fall back to full reset if
2733 * single reset fails.
2734 */
2735 if (intel_has_reset_engine(dev_priv)) {
2736 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
2737 BUILD_BUG_ON(I915_RESET_HANDOFF >= I915_RESET_ENGINE);
2738 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
2739 &dev_priv->gpu_error.flags))
2740 continue;
2741
Chris Wilson535275d2017-07-21 13:32:37 +01002742 if (i915_reset_engine(engine, 0) == 0)
Michel Thierry142bc7d2017-06-20 10:57:46 +01002743 engine_mask &= ~intel_engine_flag(engine);
2744
2745 clear_bit(I915_RESET_ENGINE + engine->id,
2746 &dev_priv->gpu_error.flags);
2747 wake_up_bit(&dev_priv->gpu_error.flags,
2748 I915_RESET_ENGINE + engine->id);
2749 }
2750 }
2751
Chris Wilson8af29b02016-09-09 14:11:47 +01002752 if (!engine_mask)
Chris Wilson1604a862017-03-14 17:18:40 +00002753 goto out;
Ben Gamariba1234d2009-09-14 17:48:47 -04002754
Michel Thierry142bc7d2017-06-20 10:57:46 +01002755 /* Full reset needs the mutex, stop any other user trying to do so. */
Chris Wilsond5367302017-06-20 10:57:43 +01002756 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
2757 wait_event(dev_priv->gpu_error.reset_queue,
2758 !test_bit(I915_RESET_BACKOFF,
2759 &dev_priv->gpu_error.flags));
Chris Wilson1604a862017-03-14 17:18:40 +00002760 goto out;
Chris Wilsond5367302017-06-20 10:57:43 +01002761 }
Chris Wilson8af29b02016-09-09 14:11:47 +01002762
Michel Thierry142bc7d2017-06-20 10:57:46 +01002763 /* Prevent any other reset-engine attempt. */
2764 for_each_engine(engine, dev_priv, tmp) {
2765 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
2766 &dev_priv->gpu_error.flags))
2767 wait_on_bit(&dev_priv->gpu_error.flags,
2768 I915_RESET_ENGINE + engine->id,
2769 TASK_UNINTERRUPTIBLE);
2770 }
2771
Chris Wilsond5367302017-06-20 10:57:43 +01002772 i915_reset_device(dev_priv);
2773
Michel Thierry142bc7d2017-06-20 10:57:46 +01002774 for_each_engine(engine, dev_priv, tmp) {
2775 clear_bit(I915_RESET_ENGINE + engine->id,
2776 &dev_priv->gpu_error.flags);
2777 }
2778
Chris Wilsond5367302017-06-20 10:57:43 +01002779 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
2780 wake_up_all(&dev_priv->gpu_error.reset_queue);
Chris Wilson1604a862017-03-14 17:18:40 +00002781
2782out:
2783 intel_runtime_pm_put(dev_priv);
Jesse Barnes8a905232009-07-11 16:48:03 -04002784}
2785
Keith Packard42f52ef2008-10-18 19:39:29 -07002786/* Called from drm generic code, passed 'crtc' which
2787 * we use as a pipe index
2788 */
Chris Wilson86e83e32016-10-07 20:49:52 +01002789static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002790{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002791 struct drm_i915_private *dev_priv = to_i915(dev);
Keith Packarde9d21d72008-10-16 11:31:38 -07002792 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08002793
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002794 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson86e83e32016-10-07 20:49:52 +01002795 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2796 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2797
2798 return 0;
2799}
2800
2801static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
2802{
2803 struct drm_i915_private *dev_priv = to_i915(dev);
2804 unsigned long irqflags;
2805
2806 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2807 i915_enable_pipestat(dev_priv, pipe,
2808 PIPE_START_VBLANK_INTERRUPT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002809 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00002810
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002811 return 0;
2812}
2813
Thierry Reding88e72712015-09-24 18:35:31 +02002814static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002815{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002816 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002817 unsigned long irqflags;
Tvrtko Ursulin55b8f2a2016-10-14 09:17:22 +01002818 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
Chris Wilson86e83e32016-10-07 20:49:52 +01002819 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002820
Jesse Barnesf796cf82011-04-07 13:58:17 -07002821 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02002822 ilk_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002823 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2824
2825 return 0;
2826}
2827
Thierry Reding88e72712015-09-24 18:35:31 +02002828static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002829{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002830 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002831 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002832
Ben Widawskyabd58f02013-11-02 21:07:09 -07002833 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002834 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002835 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002836
Ben Widawskyabd58f02013-11-02 21:07:09 -07002837 return 0;
2838}
2839
Keith Packard42f52ef2008-10-18 19:39:29 -07002840/* Called from drm generic code, passed 'crtc' which
2841 * we use as a pipe index
2842 */
Chris Wilson86e83e32016-10-07 20:49:52 +01002843static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
2844{
2845 struct drm_i915_private *dev_priv = to_i915(dev);
2846 unsigned long irqflags;
2847
2848 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2849 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2850 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2851}
2852
2853static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002854{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002855 struct drm_i915_private *dev_priv = to_i915(dev);
Keith Packarde9d21d72008-10-16 11:31:38 -07002856 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07002857
Chris Wilson1ec14ad2010-12-04 11:30:53 +00002858 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002859 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02002860 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002861 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2862}
2863
Thierry Reding88e72712015-09-24 18:35:31 +02002864static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07002865{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002866 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002867 unsigned long irqflags;
Tvrtko Ursulin55b8f2a2016-10-14 09:17:22 +01002868 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
Chris Wilson86e83e32016-10-07 20:49:52 +01002869 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07002870
2871 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02002872 ilk_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002873 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2874}
2875
Thierry Reding88e72712015-09-24 18:35:31 +02002876static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002877{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002878 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002879 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002880
Ben Widawskyabd58f02013-11-02 21:07:09 -07002881 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02002882 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002883 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2884}
2885
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002886static void ibx_irq_reset(struct drm_i915_private *dev_priv)
Paulo Zanoni91738a92013-06-05 14:21:51 -03002887{
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002888 if (HAS_PCH_NOP(dev_priv))
Paulo Zanoni91738a92013-06-05 14:21:51 -03002889 return;
2890
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002891 GEN5_IRQ_RESET(SDE);
Paulo Zanoni105b1222014-04-01 15:37:17 -03002892
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002893 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
Paulo Zanoni105b1222014-04-01 15:37:17 -03002894 I915_WRITE(SERR_INT, 0xffffffff);
Paulo Zanoni622364b2014-04-01 15:37:22 -03002895}
Paulo Zanoni105b1222014-04-01 15:37:17 -03002896
Paulo Zanoni622364b2014-04-01 15:37:22 -03002897/*
2898 * SDEIER is also touched by the interrupt handler to work around missed PCH
2899 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2900 * instead we unconditionally enable all PCH interrupt sources here, but then
2901 * only unmask them as needed with SDEIMR.
2902 *
2903 * This function needs to be called before interrupts are enabled.
2904 */
2905static void ibx_irq_pre_postinstall(struct drm_device *dev)
2906{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002907 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni622364b2014-04-01 15:37:22 -03002908
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01002909 if (HAS_PCH_NOP(dev_priv))
Paulo Zanoni622364b2014-04-01 15:37:22 -03002910 return;
2911
2912 WARN_ON(I915_READ(SDEIER) != 0);
Paulo Zanoni91738a92013-06-05 14:21:51 -03002913 I915_WRITE(SDEIER, 0xffffffff);
2914 POSTING_READ(SDEIER);
2915}
2916
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002917static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002918{
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002919 GEN5_IRQ_RESET(GT);
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002920 if (INTEL_GEN(dev_priv) >= 6)
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03002921 GEN5_IRQ_RESET(GEN6_PM);
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02002922}
2923
Ville Syrjälä70591a42014-10-30 19:42:58 +02002924static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2925{
2926 enum pipe pipe;
2927
Ville Syrjälä71b8b412016-04-11 16:56:31 +03002928 if (IS_CHERRYVIEW(dev_priv))
2929 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2930 else
2931 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2932
Ville Syrjäläad22d102016-04-12 18:56:14 +03002933 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
Ville Syrjälä70591a42014-10-30 19:42:58 +02002934 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2935
Ville Syrjäläad22d102016-04-12 18:56:14 +03002936 for_each_pipe(dev_priv, pipe) {
2937 I915_WRITE(PIPESTAT(pipe),
2938 PIPE_FIFO_UNDERRUN_STATUS |
2939 PIPESTAT_INT_STATUS_MASK);
2940 dev_priv->pipestat_irq_mask[pipe] = 0;
2941 }
Ville Syrjälä70591a42014-10-30 19:42:58 +02002942
2943 GEN5_IRQ_RESET(VLV_);
Ville Syrjäläad22d102016-04-12 18:56:14 +03002944 dev_priv->irq_mask = ~0;
Ville Syrjälä70591a42014-10-30 19:42:58 +02002945}
2946
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002947static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2948{
2949 u32 pipestat_mask;
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03002950 u32 enable_mask;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002951 enum pipe pipe;
2952
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002953 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
2954 PIPE_CRC_DONE_INTERRUPT_STATUS;
2955
2956 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2957 for_each_pipe(dev_priv, pipe)
2958 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2959
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03002960 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2961 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Ville Syrjäläebf5f922017-04-27 19:02:22 +03002962 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2963 I915_LPE_PIPE_A_INTERRUPT |
2964 I915_LPE_PIPE_B_INTERRUPT;
2965
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002966 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläebf5f922017-04-27 19:02:22 +03002967 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2968 I915_LPE_PIPE_C_INTERRUPT;
Ville Syrjälä6b7eafc2016-04-11 16:56:29 +03002969
2970 WARN_ON(dev_priv->irq_mask != ~0);
2971
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03002972 dev_priv->irq_mask = ~enable_mask;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002973
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03002974 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002975}
2976
2977/* drm_dma.h hooks
2978*/
2979static void ironlake_irq_reset(struct drm_device *dev)
2980{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002981 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002982
2983 I915_WRITE(HWSTAM, 0xffffffff);
2984
2985 GEN5_IRQ_RESET(DE);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01002986 if (IS_GEN7(dev_priv))
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002987 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2988
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002989 gen5_gt_irq_reset(dev_priv);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002990
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00002991 ibx_irq_reset(dev_priv);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03002992}
2993
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002994static void valleyview_irq_preinstall(struct drm_device *dev)
2995{
Chris Wilsonfac5e232016-07-04 11:34:36 +01002996 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002997
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03002998 I915_WRITE(VLV_MASTER_IER, 0);
2999 POSTING_READ(VLV_MASTER_IER);
3000
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003001 gen5_gt_irq_reset(dev_priv);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003002
Ville Syrjäläad22d102016-04-12 18:56:14 +03003003 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003004 if (dev_priv->display_irqs_enabled)
3005 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003006 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003007}
3008
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003009static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3010{
3011 GEN8_IRQ_RESET_NDX(GT, 0);
3012 GEN8_IRQ_RESET_NDX(GT, 1);
3013 GEN8_IRQ_RESET_NDX(GT, 2);
3014 GEN8_IRQ_RESET_NDX(GT, 3);
3015}
3016
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003017static void gen8_irq_reset(struct drm_device *dev)
Ben Widawskyabd58f02013-11-02 21:07:09 -07003018{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003019 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003020 int pipe;
3021
Ben Widawskyabd58f02013-11-02 21:07:09 -07003022 I915_WRITE(GEN8_MASTER_IRQ, 0);
3023 POSTING_READ(GEN8_MASTER_IRQ);
3024
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003025 gen8_gt_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003026
Damien Lespiau055e3932014-08-18 13:49:10 +01003027 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003028 if (intel_display_power_is_enabled(dev_priv,
3029 POWER_DOMAIN_PIPE(pipe)))
Paulo Zanoni813bde42014-07-04 11:50:29 -03003030 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003031
Paulo Zanonif86f3fb2014-04-01 15:37:14 -03003032 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3033 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3034 GEN5_IRQ_RESET(GEN8_PCU_);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003035
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003036 if (HAS_PCH_SPLIT(dev_priv))
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003037 ibx_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003038}
Ben Widawskyabd58f02013-11-02 21:07:09 -07003039
Damien Lespiau4c6c03b2015-03-06 18:50:48 +00003040void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
Imre Deak001bd2c2017-07-12 18:54:13 +03003041 u8 pipe_mask)
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003042{
Paulo Zanoni1180e202014-10-07 18:02:52 -03003043 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003044 enum pipe pipe;
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003045
Daniel Vetter13321782014-09-15 14:55:29 +02003046 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003047 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3048 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3049 dev_priv->de_irq_mask[pipe],
3050 ~dev_priv->de_irq_mask[pipe] | extra_ier);
Daniel Vetter13321782014-09-15 14:55:29 +02003051 spin_unlock_irq(&dev_priv->irq_lock);
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003052}
3053
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003054void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
Imre Deak001bd2c2017-07-12 18:54:13 +03003055 u8 pipe_mask)
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003056{
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003057 enum pipe pipe;
3058
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003059 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003060 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3061 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003062 spin_unlock_irq(&dev_priv->irq_lock);
3063
3064 /* make sure we're done processing display irqs */
Chris Wilson91c8a322016-07-05 10:40:23 +01003065 synchronize_irq(dev_priv->drm.irq);
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003066}
3067
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003068static void cherryview_irq_preinstall(struct drm_device *dev)
3069{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003070 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003071
3072 I915_WRITE(GEN8_MASTER_IRQ, 0);
3073 POSTING_READ(GEN8_MASTER_IRQ);
3074
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003075 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003076
3077 GEN5_IRQ_RESET(GEN8_PCU_);
3078
Ville Syrjäläad22d102016-04-12 18:56:14 +03003079 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003080 if (dev_priv->display_irqs_enabled)
3081 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003082 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003083}
3084
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003085static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
Ville Syrjälä87a02102015-08-27 23:55:57 +03003086 const u32 hpd[HPD_NUM_PINS])
3087{
Ville Syrjälä87a02102015-08-27 23:55:57 +03003088 struct intel_encoder *encoder;
3089 u32 enabled_irqs = 0;
3090
Chris Wilson91c8a322016-07-05 10:40:23 +01003091 for_each_intel_encoder(&dev_priv->drm, encoder)
Ville Syrjälä87a02102015-08-27 23:55:57 +03003092 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3093 enabled_irqs |= hpd[encoder->hpd_pin];
3094
3095 return enabled_irqs;
3096}
3097
Imre Deak1a56b1a2017-01-27 11:39:21 +02003098static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3099{
3100 u32 hotplug;
3101
3102 /*
3103 * Enable digital hotplug on the PCH, and configure the DP short pulse
3104 * duration to 2ms (which is the minimum in the Display Port spec).
3105 * The pulse duration bits are reserved on LPT+.
3106 */
3107 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3108 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3109 PORTC_PULSE_DURATION_MASK |
3110 PORTD_PULSE_DURATION_MASK);
3111 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3112 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3113 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3114 /*
3115 * When CPU and PCH are on the same package, port A
3116 * HPD must be enabled in both north and south.
3117 */
3118 if (HAS_PCH_LPT_LP(dev_priv))
3119 hotplug |= PORTA_HOTPLUG_ENABLE;
3120 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3121}
3122
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003123static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
Keith Packard7fe0b972011-09-19 13:31:02 -07003124{
Imre Deak1a56b1a2017-01-27 11:39:21 +02003125 u32 hotplug_irqs, enabled_irqs;
Keith Packard7fe0b972011-09-19 13:31:02 -07003126
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003127 if (HAS_PCH_IBX(dev_priv)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003128 hotplug_irqs = SDE_HOTPLUG_MASK;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003129 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003130 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003131 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003132 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003133 }
3134
Daniel Vetterfee884e2013-07-04 23:35:21 +02003135 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003136
Imre Deak1a56b1a2017-01-27 11:39:21 +02003137 ibx_hpd_detection_setup(dev_priv);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003138}
Xiong Zhang26951ca2015-08-17 15:55:50 +08003139
Imre Deak2a57d9c2017-01-27 11:39:18 +02003140static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3141{
3142 u32 hotplug;
3143
3144 /* Enable digital hotplug on the PCH */
3145 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3146 hotplug |= PORTA_HOTPLUG_ENABLE |
3147 PORTB_HOTPLUG_ENABLE |
3148 PORTC_HOTPLUG_ENABLE |
3149 PORTD_HOTPLUG_ENABLE;
3150 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3151
3152 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3153 hotplug |= PORTE_HOTPLUG_ENABLE;
3154 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3155}
3156
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003157static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003158{
Imre Deak2a57d9c2017-01-27 11:39:18 +02003159 u32 hotplug_irqs, enabled_irqs;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003160
3161 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003162 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003163
3164 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3165
Imre Deak2a57d9c2017-01-27 11:39:18 +02003166 spt_hpd_detection_setup(dev_priv);
Keith Packard7fe0b972011-09-19 13:31:02 -07003167}
3168
Imre Deak1a56b1a2017-01-27 11:39:21 +02003169static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3170{
3171 u32 hotplug;
3172
3173 /*
3174 * Enable digital hotplug on the CPU, and configure the DP short pulse
3175 * duration to 2ms (which is the minimum in the Display Port spec)
3176 * The pulse duration bits are reserved on HSW+.
3177 */
3178 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3179 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3180 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3181 DIGITAL_PORTA_PULSE_DURATION_2ms;
3182 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3183}
3184
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003185static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003186{
Imre Deak1a56b1a2017-01-27 11:39:21 +02003187 u32 hotplug_irqs, enabled_irqs;
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003188
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003189 if (INTEL_GEN(dev_priv) >= 8) {
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003190 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003191 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003192
3193 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003194 } else if (INTEL_GEN(dev_priv) >= 7) {
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003195 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003196 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003197
3198 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003199 } else {
3200 hotplug_irqs = DE_DP_A_HOTPLUG;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003201 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003202
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003203 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3204 }
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003205
Imre Deak1a56b1a2017-01-27 11:39:21 +02003206 ilk_hpd_detection_setup(dev_priv);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003207
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003208 ibx_hpd_irq_setup(dev_priv);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003209}
3210
Imre Deak2a57d9c2017-01-27 11:39:18 +02003211static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3212 u32 enabled_irqs)
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003213{
Imre Deak2a57d9c2017-01-27 11:39:18 +02003214 u32 hotplug;
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003215
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003216 hotplug = I915_READ(PCH_PORT_HOTPLUG);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003217 hotplug |= PORTA_HOTPLUG_ENABLE |
3218 PORTB_HOTPLUG_ENABLE |
3219 PORTC_HOTPLUG_ENABLE;
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303220
3221 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3222 hotplug, enabled_irqs);
3223 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3224
3225 /*
3226 * For BXT invert bit has to be set based on AOB design
3227 * for HPD detection logic, update it based on VBT fields.
3228 */
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303229 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3230 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3231 hotplug |= BXT_DDIA_HPD_INVERT;
3232 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3233 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3234 hotplug |= BXT_DDIB_HPD_INVERT;
3235 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3236 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3237 hotplug |= BXT_DDIC_HPD_INVERT;
3238
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003239 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003240}
3241
Imre Deak2a57d9c2017-01-27 11:39:18 +02003242static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3243{
3244 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3245}
3246
3247static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3248{
3249 u32 hotplug_irqs, enabled_irqs;
3250
3251 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3252 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3253
3254 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3255
3256 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3257}
3258
Paulo Zanonid46da432013-02-08 17:35:15 -02003259static void ibx_irq_postinstall(struct drm_device *dev)
3260{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003261 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003262 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02003263
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003264 if (HAS_PCH_NOP(dev_priv))
Daniel Vetter692a04c2013-05-29 21:43:05 +02003265 return;
3266
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003267 if (HAS_PCH_IBX(dev_priv))
Daniel Vetter5c673b62014-03-07 20:34:46 +01003268 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
Paulo Zanoni105b1222014-04-01 15:37:17 -03003269 else
Daniel Vetter5c673b62014-03-07 20:34:46 +01003270 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
Paulo Zanoni86642812013-04-12 17:57:57 -03003271
Ville Syrjäläb51a2842015-09-18 20:03:41 +03003272 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
Paulo Zanonid46da432013-02-08 17:35:15 -02003273 I915_WRITE(SDEIMR, ~mask);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003274
3275 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3276 HAS_PCH_LPT(dev_priv))
Imre Deak1a56b1a2017-01-27 11:39:21 +02003277 ibx_hpd_detection_setup(dev_priv);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003278 else
3279 spt_hpd_detection_setup(dev_priv);
Paulo Zanonid46da432013-02-08 17:35:15 -02003280}
3281
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003282static void gen5_gt_irq_postinstall(struct drm_device *dev)
3283{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003284 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003285 u32 pm_irqs, gt_irqs;
3286
3287 pm_irqs = gt_irqs = 0;
3288
3289 dev_priv->gt_irq_mask = ~0;
Tvrtko Ursulin3c9192b2016-10-13 11:03:05 +01003290 if (HAS_L3_DPF(dev_priv)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003291 /* L3 parity interrupt is always unmasked. */
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01003292 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
3293 gt_irqs |= GT_PARITY_ERROR(dev_priv);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003294 }
3295
3296 gt_irqs |= GT_RENDER_USER_INTERRUPT;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003297 if (IS_GEN5(dev_priv)) {
Chris Wilsonf8973c22016-07-01 17:23:21 +01003298 gt_irqs |= ILK_BSD_USER_INTERRUPT;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003299 } else {
3300 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3301 }
3302
Paulo Zanoni35079892014-04-01 15:37:15 -03003303 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003304
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003305 if (INTEL_GEN(dev_priv) >= 6) {
Imre Deak78e68d32014-12-15 18:59:27 +02003306 /*
3307 * RPS interrupts will get enabled/disabled on demand when RPS
3308 * itself is enabled/disabled.
3309 */
Akash Goelf4e9af42016-10-12 21:54:30 +05303310 if (HAS_VEBOX(dev_priv)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003311 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
Akash Goelf4e9af42016-10-12 21:54:30 +05303312 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
3313 }
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003314
Akash Goelf4e9af42016-10-12 21:54:30 +05303315 dev_priv->pm_imr = 0xffffffff;
3316 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003317 }
3318}
3319
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003320static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003321{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003322 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003323 u32 display_mask, extra_mask;
3324
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003325 if (INTEL_GEN(dev_priv) >= 7) {
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003326 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3327 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3328 DE_PLANEB_FLIP_DONE_IVB |
Daniel Vetter5c673b62014-03-07 20:34:46 +01003329 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003330 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003331 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3332 DE_DP_A_HOTPLUG_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003333 } else {
3334 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3335 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003336 DE_AUX_CHANNEL_A |
Daniel Vetter5b3a8562013-10-16 22:55:48 +02003337 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3338 DE_POISON);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003339 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3340 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3341 DE_DP_A_HOTPLUG);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03003342 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003343
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003344 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003345
Paulo Zanoni0c841212014-04-01 15:37:27 -03003346 I915_WRITE(HWSTAM, 0xeffe);
3347
Paulo Zanoni622364b2014-04-01 15:37:22 -03003348 ibx_irq_pre_postinstall(dev);
3349
Paulo Zanoni35079892014-04-01 15:37:15 -03003350 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003351
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003352 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003353
Imre Deak1a56b1a2017-01-27 11:39:21 +02003354 ilk_hpd_detection_setup(dev_priv);
3355
Paulo Zanonid46da432013-02-08 17:35:15 -02003356 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07003357
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01003358 if (IS_IRONLAKE_M(dev_priv)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02003359 /* Enable PCU event interrupts
3360 *
3361 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02003362 * setup is guaranteed to run in single-threaded context. But we
3363 * need it to make the assert_spin_locked happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003364 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02003365 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetterd6207432014-09-15 14:55:27 +02003366 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08003367 }
3368
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003369 return 0;
3370}
3371
Imre Deakf8b79e52014-03-04 19:23:07 +02003372void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3373{
Chris Wilson67520412017-03-02 13:28:01 +00003374 lockdep_assert_held(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02003375
3376 if (dev_priv->display_irqs_enabled)
3377 return;
3378
3379 dev_priv->display_irqs_enabled = true;
3380
Ville Syrjäläd6c69802016-04-11 16:56:27 +03003381 if (intel_irqs_enabled(dev_priv)) {
3382 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003383 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläd6c69802016-04-11 16:56:27 +03003384 }
Imre Deakf8b79e52014-03-04 19:23:07 +02003385}
3386
3387void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3388{
Chris Wilson67520412017-03-02 13:28:01 +00003389 lockdep_assert_held(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02003390
3391 if (!dev_priv->display_irqs_enabled)
3392 return;
3393
3394 dev_priv->display_irqs_enabled = false;
3395
Imre Deak950eaba2014-09-08 15:21:09 +03003396 if (intel_irqs_enabled(dev_priv))
Ville Syrjäläad22d102016-04-12 18:56:14 +03003397 vlv_display_irq_reset(dev_priv);
Imre Deakf8b79e52014-03-04 19:23:07 +02003398}
3399
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003400
3401static int valleyview_irq_postinstall(struct drm_device *dev)
3402{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003403 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02003404
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003405 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003406
Ville Syrjäläad22d102016-04-12 18:56:14 +03003407 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003408 if (dev_priv->display_irqs_enabled)
3409 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003410 spin_unlock_irq(&dev_priv->irq_lock);
3411
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003412 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03003413 POSTING_READ(VLV_MASTER_IER);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003414
3415 return 0;
3416}
3417
Ben Widawskyabd58f02013-11-02 21:07:09 -07003418static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3419{
Ben Widawskyabd58f02013-11-02 21:07:09 -07003420 /* These are interrupts we'll toggle with the ring mask register */
3421 uint32_t gt_interrupts[] = {
3422 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003423 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003424 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3425 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003426 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01003427 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3428 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3429 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07003430 0,
Oscar Mateo73d477f2014-07-24 17:04:31 +01003431 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3432 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
Ben Widawskyabd58f02013-11-02 21:07:09 -07003433 };
3434
Tvrtko Ursulin98735732016-04-19 16:46:08 +01003435 if (HAS_L3_DPF(dev_priv))
3436 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3437
Akash Goelf4e9af42016-10-12 21:54:30 +05303438 dev_priv->pm_ier = 0x0;
3439 dev_priv->pm_imr = ~dev_priv->pm_ier;
Deepak S9a2d2d82014-08-22 08:32:40 +05303440 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3441 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
Imre Deak78e68d32014-12-15 18:59:27 +02003442 /*
3443 * RPS interrupts will get enabled/disabled on demand when RPS itself
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05303444 * is enabled/disabled. Same wil be the case for GuC interrupts.
Imre Deak78e68d32014-12-15 18:59:27 +02003445 */
Akash Goelf4e9af42016-10-12 21:54:30 +05303446 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
Deepak S9a2d2d82014-08-22 08:32:40 +05303447 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003448}
3449
3450static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3451{
Damien Lespiau770de83d2014-03-20 20:45:01 +00003452 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3453 uint32_t de_pipe_enables;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003454 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3455 u32 de_port_enables;
Ville Syrjälä11825b02016-05-19 12:14:43 +03003456 u32 de_misc_masked = GEN8_DE_MISC_GSE;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003457 enum pipe pipe;
Damien Lespiau770de83d2014-03-20 20:45:01 +00003458
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07003459 if (INTEL_GEN(dev_priv) >= 9) {
Damien Lespiau770de83d2014-03-20 20:45:01 +00003460 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3461 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003462 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3463 GEN9_AUX_CHANNEL_D;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02003464 if (IS_GEN9_LP(dev_priv))
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003465 de_port_masked |= BXT_DE_PORT_GMBUS;
3466 } else {
Damien Lespiau770de83d2014-03-20 20:45:01 +00003467 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3468 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003469 }
Damien Lespiau770de83d2014-03-20 20:45:01 +00003470
3471 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3472 GEN8_PIPE_FIFO_UNDERRUN;
3473
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003474 de_port_enables = de_port_masked;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02003475 if (IS_GEN9_LP(dev_priv))
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003476 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3477 else if (IS_BROADWELL(dev_priv))
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003478 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3479
Daniel Vetter13b3a0a2013-11-07 15:31:52 +01003480 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3481 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3482 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003483
Damien Lespiau055e3932014-08-18 13:49:10 +01003484 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003485 if (intel_display_power_is_enabled(dev_priv,
Paulo Zanoni813bde42014-07-04 11:50:29 -03003486 POWER_DOMAIN_PIPE(pipe)))
3487 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3488 dev_priv->de_irq_mask[pipe],
3489 de_pipe_enables);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003490
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003491 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
Ville Syrjälä11825b02016-05-19 12:14:43 +03003492 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003493
3494 if (IS_GEN9_LP(dev_priv))
3495 bxt_hpd_detection_setup(dev_priv);
Imre Deak1a56b1a2017-01-27 11:39:21 +02003496 else if (IS_BROADWELL(dev_priv))
3497 ilk_hpd_detection_setup(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003498}
3499
3500static int gen8_irq_postinstall(struct drm_device *dev)
3501{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003502 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003503
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003504 if (HAS_PCH_SPLIT(dev_priv))
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303505 ibx_irq_pre_postinstall(dev);
Paulo Zanoni622364b2014-04-01 15:37:22 -03003506
Ben Widawskyabd58f02013-11-02 21:07:09 -07003507 gen8_gt_irq_postinstall(dev_priv);
3508 gen8_de_irq_postinstall(dev_priv);
3509
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003510 if (HAS_PCH_SPLIT(dev_priv))
Shashank Sharma266ea3d2014-08-22 17:40:42 +05303511 ibx_irq_postinstall(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003512
Ville Syrjäläe5328c42016-04-13 21:19:47 +03003513 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003514 POSTING_READ(GEN8_MASTER_IRQ);
3515
3516 return 0;
3517}
3518
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003519static int cherryview_irq_postinstall(struct drm_device *dev)
3520{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003521 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003522
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003523 gen8_gt_irq_postinstall(dev_priv);
3524
Ville Syrjäläad22d102016-04-12 18:56:14 +03003525 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003526 if (dev_priv->display_irqs_enabled)
3527 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003528 spin_unlock_irq(&dev_priv->irq_lock);
3529
Ville Syrjäläe5328c42016-04-13 21:19:47 +03003530 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003531 POSTING_READ(GEN8_MASTER_IRQ);
3532
3533 return 0;
3534}
3535
Ben Widawskyabd58f02013-11-02 21:07:09 -07003536static void gen8_irq_uninstall(struct drm_device *dev)
3537{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003538 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003539
3540 if (!dev_priv)
3541 return;
3542
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003543 gen8_irq_reset(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003544}
3545
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003546static void valleyview_irq_uninstall(struct drm_device *dev)
3547{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003548 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003549
3550 if (!dev_priv)
3551 return;
3552
Imre Deak843d0e72014-04-14 20:24:23 +03003553 I915_WRITE(VLV_MASTER_IER, 0);
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03003554 POSTING_READ(VLV_MASTER_IER);
Imre Deak843d0e72014-04-14 20:24:23 +03003555
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003556 gen5_gt_irq_reset(dev_priv);
Ville Syrjälä893fce82014-10-30 19:42:56 +02003557
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003558 I915_WRITE(HWSTAM, 0xffffffff);
Imre Deakf8b79e52014-03-04 19:23:07 +02003559
Ville Syrjäläad22d102016-04-12 18:56:14 +03003560 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003561 if (dev_priv->display_irqs_enabled)
3562 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003563 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003564}
3565
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003566static void cherryview_irq_uninstall(struct drm_device *dev)
3567{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003568 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003569
3570 if (!dev_priv)
3571 return;
3572
3573 I915_WRITE(GEN8_MASTER_IRQ, 0);
3574 POSTING_READ(GEN8_MASTER_IRQ);
3575
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003576 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003577
Ville Syrjäläa2c30fb2014-10-30 19:42:52 +02003578 GEN5_IRQ_RESET(GEN8_PCU_);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003579
Ville Syrjäläad22d102016-04-12 18:56:14 +03003580 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003581 if (dev_priv->display_irqs_enabled)
3582 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003583 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003584}
3585
Jesse Barnesf71d4af2011-06-28 13:00:41 -07003586static void ironlake_irq_uninstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003587{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003588 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes46979952011-04-07 13:53:55 -07003589
3590 if (!dev_priv)
3591 return;
3592
Paulo Zanonibe30b292014-04-01 15:37:25 -03003593 ironlake_irq_reset(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08003594}
3595
Chris Wilsonc2798b12012-04-22 21:13:57 +01003596static void i8xx_irq_preinstall(struct drm_device * dev)
3597{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003598 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003599 int pipe;
3600
Damien Lespiau055e3932014-08-18 13:49:10 +01003601 for_each_pipe(dev_priv, pipe)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003602 I915_WRITE(PIPESTAT(pipe), 0);
3603 I915_WRITE16(IMR, 0xffff);
3604 I915_WRITE16(IER, 0x0);
3605 POSTING_READ16(IER);
3606}
3607
3608static int i8xx_irq_postinstall(struct drm_device *dev)
3609{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003610 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003611
Chris Wilsonc2798b12012-04-22 21:13:57 +01003612 I915_WRITE16(EMR,
3613 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3614
3615 /* Unmask the interrupts that we always want on. */
3616 dev_priv->irq_mask =
3617 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3618 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3619 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
Daniel Vetter37ef01a2015-04-01 13:43:46 +02003620 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003621 I915_WRITE16(IMR, dev_priv->irq_mask);
3622
3623 I915_WRITE16(IER,
3624 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3625 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Chris Wilsonc2798b12012-04-22 21:13:57 +01003626 I915_USER_INTERRUPT);
3627 POSTING_READ16(IER);
3628
Daniel Vetter379ef822013-10-16 22:55:56 +02003629 /* Interrupt setup is already guaranteed to be single-threaded, this is
3630 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003631 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003632 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3633 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003634 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003635
Chris Wilsonc2798b12012-04-22 21:13:57 +01003636 return 0;
3637}
3638
Daniel Vetter5a21b662016-05-24 17:13:53 +02003639/*
3640 * Returns true when a page flip has completed.
3641 */
Daniel Vetterff1f5252012-10-02 15:10:55 +02003642static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003643{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003644 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003645 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003646 u16 iir, new_iir;
3647 u32 pipe_stats[2];
Chris Wilsonc2798b12012-04-22 21:13:57 +01003648 int pipe;
Imre Deak1f814da2015-12-16 02:52:19 +02003649 irqreturn_t ret;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003650
Imre Deak2dd2a882015-02-24 11:14:30 +02003651 if (!intel_irqs_enabled(dev_priv))
3652 return IRQ_NONE;
3653
Imre Deak1f814da2015-12-16 02:52:19 +02003654 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3655 disable_rpm_wakeref_asserts(dev_priv);
3656
3657 ret = IRQ_NONE;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003658 iir = I915_READ16(IIR);
3659 if (iir == 0)
Imre Deak1f814da2015-12-16 02:52:19 +02003660 goto out;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003661
Daniel Vetterfd3a4022017-07-20 19:57:51 +02003662 while (iir) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003663 /* Can't rely on pipestat interrupt bit in iir as it might
3664 * have been cleared after the pipestat interrupt was received.
3665 * It doesn't set the bit in iir again, but it still produces
3666 * interrupts (for non-MSI).
3667 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003668 spin_lock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003669 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01003670 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003671
Damien Lespiau055e3932014-08-18 13:49:10 +01003672 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003673 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003674 pipe_stats[pipe] = I915_READ(reg);
3675
3676 /*
3677 * Clear the PIPE*STAT regs before the IIR
3678 */
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003679 if (pipe_stats[pipe] & 0x8000ffff)
Chris Wilsonc2798b12012-04-22 21:13:57 +01003680 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003681 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003682 spin_unlock(&dev_priv->irq_lock);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003683
Daniel Vetterfd3a4022017-07-20 19:57:51 +02003684 I915_WRITE16(IIR, iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003685 new_iir = I915_READ16(IIR); /* Flush posted writes */
3686
Chris Wilsonc2798b12012-04-22 21:13:57 +01003687 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05303688 notify_ring(dev_priv->engine[RCS]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003689
Damien Lespiau055e3932014-08-18 13:49:10 +01003690 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02003691 int plane = pipe;
3692 if (HAS_FBC(dev_priv))
3693 plane = !plane;
3694
Daniel Vetterfd3a4022017-07-20 19:57:51 +02003695 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
3696 drm_handle_vblank(&dev_priv->drm, pipe);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003697
Daniel Vetter4356d582013-10-16 22:55:55 +02003698 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003699 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003700
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003701 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3702 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3703 pipe);
Daniel Vetter4356d582013-10-16 22:55:55 +02003704 }
Chris Wilsonc2798b12012-04-22 21:13:57 +01003705
3706 iir = new_iir;
3707 }
Imre Deak1f814da2015-12-16 02:52:19 +02003708 ret = IRQ_HANDLED;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003709
Imre Deak1f814da2015-12-16 02:52:19 +02003710out:
3711 enable_rpm_wakeref_asserts(dev_priv);
3712
3713 return ret;
Chris Wilsonc2798b12012-04-22 21:13:57 +01003714}
3715
3716static void i8xx_irq_uninstall(struct drm_device * dev)
3717{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003718 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01003719 int pipe;
3720
Damien Lespiau055e3932014-08-18 13:49:10 +01003721 for_each_pipe(dev_priv, pipe) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01003722 /* Clear enable bits; then clear status bits */
3723 I915_WRITE(PIPESTAT(pipe), 0);
3724 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3725 }
3726 I915_WRITE16(IMR, 0xffff);
3727 I915_WRITE16(IER, 0x0);
3728 I915_WRITE16(IIR, I915_READ16(IIR));
3729}
3730
Chris Wilsona266c7d2012-04-24 22:59:44 +01003731static void i915_irq_preinstall(struct drm_device * dev)
3732{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003733 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003734 int pipe;
3735
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00003736 if (I915_HAS_HOTPLUG(dev_priv)) {
Egbert Eich0706f172015-09-23 16:15:27 +02003737 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003738 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3739 }
3740
Chris Wilson00d98eb2012-04-24 22:59:48 +01003741 I915_WRITE16(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01003742 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003743 I915_WRITE(PIPESTAT(pipe), 0);
3744 I915_WRITE(IMR, 0xffffffff);
3745 I915_WRITE(IER, 0x0);
3746 POSTING_READ(IER);
3747}
3748
3749static int i915_irq_postinstall(struct drm_device *dev)
3750{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003751 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson38bde182012-04-24 22:59:50 +01003752 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003753
Chris Wilson38bde182012-04-24 22:59:50 +01003754 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3755
3756 /* Unmask the interrupts that we always want on. */
3757 dev_priv->irq_mask =
3758 ~(I915_ASLE_INTERRUPT |
3759 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3760 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3761 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
Daniel Vetter37ef01a2015-04-01 13:43:46 +02003762 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilson38bde182012-04-24 22:59:50 +01003763
3764 enable_mask =
3765 I915_ASLE_INTERRUPT |
3766 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3767 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Chris Wilson38bde182012-04-24 22:59:50 +01003768 I915_USER_INTERRUPT;
3769
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00003770 if (I915_HAS_HOTPLUG(dev_priv)) {
Egbert Eich0706f172015-09-23 16:15:27 +02003771 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003772 POSTING_READ(PORT_HOTPLUG_EN);
3773
Chris Wilsona266c7d2012-04-24 22:59:44 +01003774 /* Enable in IER... */
3775 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3776 /* and unmask in IMR */
3777 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3778 }
3779
Chris Wilsona266c7d2012-04-24 22:59:44 +01003780 I915_WRITE(IMR, dev_priv->irq_mask);
3781 I915_WRITE(IER, enable_mask);
3782 POSTING_READ(IER);
3783
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003784 i915_enable_asle_pipestat(dev_priv);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003785
Daniel Vetter379ef822013-10-16 22:55:56 +02003786 /* Interrupt setup is already guaranteed to be single-threaded, this is
3787 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003788 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003789 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3790 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003791 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02003792
Daniel Vetter20afbda2012-12-11 14:05:07 +01003793 return 0;
3794}
3795
Daniel Vetterff1f5252012-10-02 15:10:55 +02003796static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003797{
Daniel Vetter45a83f82014-05-12 19:17:55 +02003798 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01003799 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson8291ee92012-04-24 22:59:47 +01003800 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
Chris Wilson38bde182012-04-24 22:59:50 +01003801 int pipe, ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003802
Imre Deak2dd2a882015-02-24 11:14:30 +02003803 if (!intel_irqs_enabled(dev_priv))
3804 return IRQ_NONE;
3805
Imre Deak1f814da2015-12-16 02:52:19 +02003806 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3807 disable_rpm_wakeref_asserts(dev_priv);
3808
Chris Wilsona266c7d2012-04-24 22:59:44 +01003809 iir = I915_READ(IIR);
Chris Wilson38bde182012-04-24 22:59:50 +01003810 do {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02003811 bool irq_received = (iir) != 0;
Chris Wilson8291ee92012-04-24 22:59:47 +01003812 bool blc_event = false;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003813
3814 /* Can't rely on pipestat interrupt bit in iir as it might
3815 * have been cleared after the pipestat interrupt was received.
3816 * It doesn't set the bit in iir again, but it still produces
3817 * interrupts (for non-MSI).
3818 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02003819 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003820 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01003821 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003822
Damien Lespiau055e3932014-08-18 13:49:10 +01003823 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02003824 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003825 pipe_stats[pipe] = I915_READ(reg);
3826
Chris Wilson38bde182012-04-24 22:59:50 +01003827 /* Clear the PIPE*STAT regs before the IIR */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003828 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003829 I915_WRITE(reg, pipe_stats[pipe]);
Chris Wilson38bde182012-04-24 22:59:50 +01003830 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003831 }
3832 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02003833 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003834
3835 if (!irq_received)
3836 break;
3837
Chris Wilsona266c7d2012-04-24 22:59:44 +01003838 /* Consume port. Then clear IIR or we'll miss events */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003839 if (I915_HAS_HOTPLUG(dev_priv) &&
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03003840 iir & I915_DISPLAY_PORT_INTERRUPT) {
3841 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3842 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003843 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03003844 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003845
Daniel Vetterfd3a4022017-07-20 19:57:51 +02003846 I915_WRITE(IIR, iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003847 new_iir = I915_READ(IIR); /* Flush posted writes */
3848
Chris Wilsona266c7d2012-04-24 22:59:44 +01003849 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05303850 notify_ring(dev_priv->engine[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003851
Damien Lespiau055e3932014-08-18 13:49:10 +01003852 for_each_pipe(dev_priv, pipe) {
Daniel Vetter5a21b662016-05-24 17:13:53 +02003853 int plane = pipe;
3854 if (HAS_FBC(dev_priv))
3855 plane = !plane;
3856
Daniel Vetterfd3a4022017-07-20 19:57:51 +02003857 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
3858 drm_handle_vblank(&dev_priv->drm, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003859
3860 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3861 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02003862
3863 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003864 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02003865
Daniel Vetter1f7247c2014-09-30 10:56:48 +02003866 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3867 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3868 pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003869 }
3870
Chris Wilsona266c7d2012-04-24 22:59:44 +01003871 if (blc_event || (iir & I915_ASLE_INTERRUPT))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003872 intel_opregion_asle_intr(dev_priv);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003873
3874 /* With MSI, interrupts are only generated when iir
3875 * transitions from zero to nonzero. If another bit got
3876 * set while we were handling the existing iir bits, then
3877 * we would never get another interrupt.
3878 *
3879 * This is fine on non-MSI as well, as if we hit this path
3880 * we avoid exiting the interrupt handler only to generate
3881 * another one.
3882 *
3883 * Note that for MSI this could cause a stray interrupt report
3884 * if an interrupt landed in the time between writing IIR and
3885 * the posting read. This should be rare enough to never
3886 * trigger the 99% of 100,000 interrupts test for disabling
3887 * stray interrupts.
3888 */
Chris Wilson38bde182012-04-24 22:59:50 +01003889 ret = IRQ_HANDLED;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003890 iir = new_iir;
Daniel Vetterfd3a4022017-07-20 19:57:51 +02003891 } while (iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003892
Imre Deak1f814da2015-12-16 02:52:19 +02003893 enable_rpm_wakeref_asserts(dev_priv);
3894
Chris Wilsona266c7d2012-04-24 22:59:44 +01003895 return ret;
3896}
3897
3898static void i915_irq_uninstall(struct drm_device * dev)
3899{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003900 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003901 int pipe;
3902
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00003903 if (I915_HAS_HOTPLUG(dev_priv)) {
Egbert Eich0706f172015-09-23 16:15:27 +02003904 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003905 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3906 }
3907
Chris Wilson00d98eb2012-04-24 22:59:48 +01003908 I915_WRITE16(HWSTAM, 0xffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01003909 for_each_pipe(dev_priv, pipe) {
Chris Wilson55b39752012-04-24 22:59:49 +01003910 /* Clear enable bits; then clear status bits */
Chris Wilsona266c7d2012-04-24 22:59:44 +01003911 I915_WRITE(PIPESTAT(pipe), 0);
Chris Wilson55b39752012-04-24 22:59:49 +01003912 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3913 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01003914 I915_WRITE(IMR, 0xffffffff);
3915 I915_WRITE(IER, 0x0);
3916
Chris Wilsona266c7d2012-04-24 22:59:44 +01003917 I915_WRITE(IIR, I915_READ(IIR));
3918}
3919
3920static void i965_irq_preinstall(struct drm_device * dev)
3921{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003922 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003923 int pipe;
3924
Egbert Eich0706f172015-09-23 16:15:27 +02003925 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsonadca4732012-05-11 18:01:31 +01003926 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01003927
3928 I915_WRITE(HWSTAM, 0xeffe);
Damien Lespiau055e3932014-08-18 13:49:10 +01003929 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01003930 I915_WRITE(PIPESTAT(pipe), 0);
3931 I915_WRITE(IMR, 0xffffffff);
3932 I915_WRITE(IER, 0x0);
3933 POSTING_READ(IER);
3934}
3935
3936static int i965_irq_postinstall(struct drm_device *dev)
3937{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003938 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003939 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003940 u32 error_mask;
3941
Chris Wilsona266c7d2012-04-24 22:59:44 +01003942 /* Unmask the interrupts that we always want on. */
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003943 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
Chris Wilsonadca4732012-05-11 18:01:31 +01003944 I915_DISPLAY_PORT_INTERRUPT |
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003945 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3946 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3947 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3948 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3949 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3950
3951 enable_mask = ~dev_priv->irq_mask;
Ville Syrjälä21ad8332013-02-19 15:16:39 +02003952 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3953 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003954 enable_mask |= I915_USER_INTERRUPT;
3955
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003956 if (IS_G4X(dev_priv))
Chris Wilsonbbba0a92012-04-24 22:59:51 +01003957 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01003958
Daniel Vetterb79480b2013-06-27 17:52:10 +02003959 /* Interrupt setup is already guaranteed to be single-threaded, this is
3960 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02003961 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02003962 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3963 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3964 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02003965 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01003966
Chris Wilsona266c7d2012-04-24 22:59:44 +01003967 /*
3968 * Enable some error detection, note the instruction error mask
3969 * bit is reserved, so we leave it masked.
3970 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003971 if (IS_G4X(dev_priv)) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01003972 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3973 GM45_ERROR_MEM_PRIV |
3974 GM45_ERROR_CP_PRIV |
3975 I915_ERROR_MEMORY_REFRESH);
3976 } else {
3977 error_mask = ~(I915_ERROR_PAGE_TABLE |
3978 I915_ERROR_MEMORY_REFRESH);
3979 }
3980 I915_WRITE(EMR, error_mask);
3981
3982 I915_WRITE(IMR, dev_priv->irq_mask);
3983 I915_WRITE(IER, enable_mask);
3984 POSTING_READ(IER);
3985
Egbert Eich0706f172015-09-23 16:15:27 +02003986 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003987 POSTING_READ(PORT_HOTPLUG_EN);
3988
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003989 i915_enable_asle_pipestat(dev_priv);
Daniel Vetter20afbda2012-12-11 14:05:07 +01003990
3991 return 0;
3992}
3993
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003994static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
Daniel Vetter20afbda2012-12-11 14:05:07 +01003995{
Daniel Vetter20afbda2012-12-11 14:05:07 +01003996 u32 hotplug_en;
3997
Chris Wilson67520412017-03-02 13:28:01 +00003998 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02003999
Ville Syrjälä778eb332015-01-09 14:21:13 +02004000 /* Note HDMI and DP share hotplug bits */
4001 /* enable bits are the same for all generations */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004002 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
Ville Syrjälä778eb332015-01-09 14:21:13 +02004003 /* Programming the CRT detection parameters tends
4004 to generate a spurious hotplug event about three
4005 seconds later. So just do it once.
4006 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004007 if (IS_G4X(dev_priv))
Ville Syrjälä778eb332015-01-09 14:21:13 +02004008 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Ville Syrjälä778eb332015-01-09 14:21:13 +02004009 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004010
Ville Syrjälä778eb332015-01-09 14:21:13 +02004011 /* Ignore TV since it's buggy */
Egbert Eich0706f172015-09-23 16:15:27 +02004012 i915_hotplug_interrupt_update_locked(dev_priv,
Jani Nikulaf9e3dc72015-10-21 17:22:43 +03004013 HOTPLUG_INT_EN_MASK |
4014 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4015 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4016 hotplug_en);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004017}
4018
Daniel Vetterff1f5252012-10-02 15:10:55 +02004019static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004020{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004021 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004022 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004023 u32 iir, new_iir;
4024 u32 pipe_stats[I915_MAX_PIPES];
Chris Wilsona266c7d2012-04-24 22:59:44 +01004025 int ret = IRQ_NONE, pipe;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004026
Imre Deak2dd2a882015-02-24 11:14:30 +02004027 if (!intel_irqs_enabled(dev_priv))
4028 return IRQ_NONE;
4029
Imre Deak1f814da2015-12-16 02:52:19 +02004030 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4031 disable_rpm_wakeref_asserts(dev_priv);
4032
Chris Wilsona266c7d2012-04-24 22:59:44 +01004033 iir = I915_READ(IIR);
4034
Chris Wilsona266c7d2012-04-24 22:59:44 +01004035 for (;;) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02004036 bool irq_received = (iir) != 0;
Chris Wilson2c8ba292012-04-24 22:59:46 +01004037 bool blc_event = false;
4038
Chris Wilsona266c7d2012-04-24 22:59:44 +01004039 /* Can't rely on pipestat interrupt bit in iir as it might
4040 * have been cleared after the pipestat interrupt was received.
4041 * It doesn't set the bit in iir again, but it still produces
4042 * interrupts (for non-MSI).
4043 */
Daniel Vetter222c7f52014-09-15 14:55:28 +02004044 spin_lock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004045 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
Daniel Vetteraaecdf62014-11-04 15:52:22 +01004046 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004047
Damien Lespiau055e3932014-08-18 13:49:10 +01004048 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02004049 i915_reg_t reg = PIPESTAT(pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004050 pipe_stats[pipe] = I915_READ(reg);
4051
4052 /*
4053 * Clear the PIPE*STAT regs before the IIR
4054 */
4055 if (pipe_stats[pipe] & 0x8000ffff) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004056 I915_WRITE(reg, pipe_stats[pipe]);
Ville Syrjälä501e01d2014-01-17 11:35:15 +02004057 irq_received = true;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004058 }
4059 }
Daniel Vetter222c7f52014-09-15 14:55:28 +02004060 spin_unlock(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004061
4062 if (!irq_received)
4063 break;
4064
4065 ret = IRQ_HANDLED;
4066
4067 /* Consume port. Then clear IIR or we'll miss events */
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03004068 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4069 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4070 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004071 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03004072 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004073
Daniel Vetterfd3a4022017-07-20 19:57:51 +02004074 I915_WRITE(IIR, iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004075 new_iir = I915_READ(IIR); /* Flush posted writes */
4076
Chris Wilsona266c7d2012-04-24 22:59:44 +01004077 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05304078 notify_ring(dev_priv->engine[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004079 if (iir & I915_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05304080 notify_ring(dev_priv->engine[VCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004081
Damien Lespiau055e3932014-08-18 13:49:10 +01004082 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02004083 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
4084 drm_handle_vblank(&dev_priv->drm, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004085
4086 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4087 blc_event = true;
Daniel Vetter4356d582013-10-16 22:55:55 +02004088
4089 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004090 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004091
Daniel Vetter1f7247c2014-09-30 10:56:48 +02004092 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4093 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Ville Syrjälä2d9d2b02014-01-17 11:44:31 +02004094 }
Chris Wilsona266c7d2012-04-24 22:59:44 +01004095
4096 if (blc_event || (iir & I915_ASLE_INTERRUPT))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004097 intel_opregion_asle_intr(dev_priv);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004098
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004099 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004100 gmbus_irq_handler(dev_priv);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004101
Chris Wilsona266c7d2012-04-24 22:59:44 +01004102 /* With MSI, interrupts are only generated when iir
4103 * transitions from zero to nonzero. If another bit got
4104 * set while we were handling the existing iir bits, then
4105 * we would never get another interrupt.
4106 *
4107 * This is fine on non-MSI as well, as if we hit this path
4108 * we avoid exiting the interrupt handler only to generate
4109 * another one.
4110 *
4111 * Note that for MSI this could cause a stray interrupt report
4112 * if an interrupt landed in the time between writing IIR and
4113 * the posting read. This should be rare enough to never
4114 * trigger the 99% of 100,000 interrupts test for disabling
4115 * stray interrupts.
4116 */
4117 iir = new_iir;
4118 }
4119
Imre Deak1f814da2015-12-16 02:52:19 +02004120 enable_rpm_wakeref_asserts(dev_priv);
4121
Chris Wilsona266c7d2012-04-24 22:59:44 +01004122 return ret;
4123}
4124
4125static void i965_irq_uninstall(struct drm_device * dev)
4126{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004127 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004128 int pipe;
4129
4130 if (!dev_priv)
4131 return;
4132
Egbert Eich0706f172015-09-23 16:15:27 +02004133 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsonadca4732012-05-11 18:01:31 +01004134 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01004135
4136 I915_WRITE(HWSTAM, 0xffffffff);
Damien Lespiau055e3932014-08-18 13:49:10 +01004137 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004138 I915_WRITE(PIPESTAT(pipe), 0);
4139 I915_WRITE(IMR, 0xffffffff);
4140 I915_WRITE(IER, 0x0);
4141
Damien Lespiau055e3932014-08-18 13:49:10 +01004142 for_each_pipe(dev_priv, pipe)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004143 I915_WRITE(PIPESTAT(pipe),
4144 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4145 I915_WRITE(IIR, I915_READ(IIR));
4146}
4147
Daniel Vetterfca52a52014-09-30 10:56:45 +02004148/**
4149 * intel_irq_init - initializes irq support
4150 * @dev_priv: i915 device instance
4151 *
4152 * This function initializes all the irq support including work items, timers
4153 * and all the vtables. It does not setup the interrupt itself though.
4154 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004155void intel_irq_init(struct drm_i915_private *dev_priv)
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004156{
Chris Wilson91c8a322016-07-05 10:40:23 +01004157 struct drm_device *dev = &dev_priv->drm;
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004158 int i;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004159
Jani Nikula77913b32015-06-18 13:06:16 +03004160 intel_hpd_init_work(dev_priv);
4161
Daniel Vetterc6a828d2012-08-08 23:35:35 +02004162 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004163
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004164 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004165 for (i = 0; i < MAX_L3_SLICES; ++i)
4166 dev_priv->l3_parity.remap_info[i] = NULL;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004167
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00004168 if (HAS_GUC_SCHED(dev_priv))
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05304169 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4170
Deepak Sa6706b42014-03-15 20:23:22 +05304171 /* Let's track the enabled rps events */
Wayne Boyer666a4532015-12-09 12:29:35 -08004172 if (IS_VALLEYVIEW(dev_priv))
Ville Syrjälä6c65a5872014-08-29 14:14:07 +03004173 /* WaGsvRC0ResidencyMethod:vlv */
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00004174 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
Deepak S31685c22014-07-03 17:33:01 -04004175 else
4176 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
Deepak Sa6706b42014-03-15 20:23:22 +05304177
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05304178 dev_priv->rps.pm_intrmsk_mbz = 0;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304179
4180 /*
Mika Kuoppalaacf2dc22017-04-13 14:15:27 +03004181 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304182 * if GEN6_PM_UP_EI_EXPIRED is masked.
4183 *
4184 * TODO: verify if this can be reproduced on VLV,CHV.
4185 */
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004186 if (INTEL_GEN(dev_priv) <= 7)
Sagar Arun Kamble5dd04552017-03-11 08:07:00 +05304187 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304188
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004189 if (INTEL_GEN(dev_priv) >= 8)
Chris Wilson655d49e2017-03-12 13:27:45 +00004190 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304191
Daniel Vetterb9632912014-09-30 10:56:44 +02004192 if (IS_GEN2(dev_priv)) {
Rodrigo Vivi4194c082016-08-03 10:00:56 -07004193 /* Gen2 doesn't have a hardware frame counter */
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03004194 dev->max_vblank_count = 0;
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004195 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004196 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
Ville Syrjäläfd8f507c2015-09-18 20:03:42 +03004197 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03004198 } else {
4199 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4200 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004201 }
4202
Ville Syrjälä21da2702014-08-06 14:49:55 +03004203 /*
4204 * Opt out of the vblank disable timer on everything except gen2.
4205 * Gen2 doesn't have a hardware frame counter and so depends on
4206 * vblank interrupts to produce sane vblank seuquence numbers.
4207 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004208 if (!IS_GEN2(dev_priv))
Ville Syrjälä21da2702014-08-06 14:49:55 +03004209 dev->vblank_disable_immediate = true;
4210
Chris Wilson262fd482017-02-15 13:15:47 +00004211 /* Most platforms treat the display irq block as an always-on
4212 * power domain. vlv/chv can disable it at runtime and need
4213 * special care to avoid writing any of the display block registers
4214 * outside of the power domain. We defer setting up the display irqs
4215 * in this case to the runtime pm.
4216 */
4217 dev_priv->display_irqs_enabled = true;
4218 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4219 dev_priv->display_irqs_enabled = false;
4220
Lyude317eaa92017-02-03 21:18:25 -05004221 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4222
Daniel Vetter1bf6ad62017-05-09 16:03:28 +02004223 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +01004224 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004225
Daniel Vetterb9632912014-09-30 10:56:44 +02004226 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004227 dev->driver->irq_handler = cherryview_irq_handler;
4228 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4229 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4230 dev->driver->irq_uninstall = cherryview_irq_uninstall;
Chris Wilson86e83e32016-10-07 20:49:52 +01004231 dev->driver->enable_vblank = i965_enable_vblank;
4232 dev->driver->disable_vblank = i965_disable_vblank;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004233 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004234 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004235 dev->driver->irq_handler = valleyview_irq_handler;
4236 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4237 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4238 dev->driver->irq_uninstall = valleyview_irq_uninstall;
Chris Wilson86e83e32016-10-07 20:49:52 +01004239 dev->driver->enable_vblank = i965_enable_vblank;
4240 dev->driver->disable_vblank = i965_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05004241 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004242 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07004243 dev->driver->irq_handler = gen8_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004244 dev->driver->irq_preinstall = gen8_irq_reset;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004245 dev->driver->irq_postinstall = gen8_irq_postinstall;
4246 dev->driver->irq_uninstall = gen8_irq_uninstall;
4247 dev->driver->enable_vblank = gen8_enable_vblank;
4248 dev->driver->disable_vblank = gen8_disable_vblank;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004249 if (IS_GEN9_LP(dev_priv))
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02004250 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
Rodrigo Vivi7b22b8c2017-06-02 13:06:39 -07004251 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4252 HAS_PCH_CNP(dev_priv))
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03004253 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4254 else
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004255 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004256 } else if (HAS_PCH_SPLIT(dev_priv)) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004257 dev->driver->irq_handler = ironlake_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004258 dev->driver->irq_preinstall = ironlake_irq_reset;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004259 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4260 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4261 dev->driver->enable_vblank = ironlake_enable_vblank;
4262 dev->driver->disable_vblank = ironlake_disable_vblank;
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03004263 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004264 } else {
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01004265 if (IS_GEN2(dev_priv)) {
Chris Wilsonc2798b12012-04-22 21:13:57 +01004266 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4267 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4268 dev->driver->irq_handler = i8xx_irq_handler;
4269 dev->driver->irq_uninstall = i8xx_irq_uninstall;
Chris Wilson86e83e32016-10-07 20:49:52 +01004270 dev->driver->enable_vblank = i8xx_enable_vblank;
4271 dev->driver->disable_vblank = i8xx_disable_vblank;
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01004272 } else if (IS_GEN3(dev_priv)) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004273 dev->driver->irq_preinstall = i915_irq_preinstall;
4274 dev->driver->irq_postinstall = i915_irq_postinstall;
4275 dev->driver->irq_uninstall = i915_irq_uninstall;
4276 dev->driver->irq_handler = i915_irq_handler;
Chris Wilson86e83e32016-10-07 20:49:52 +01004277 dev->driver->enable_vblank = i8xx_enable_vblank;
4278 dev->driver->disable_vblank = i8xx_disable_vblank;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004279 } else {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004280 dev->driver->irq_preinstall = i965_irq_preinstall;
4281 dev->driver->irq_postinstall = i965_irq_postinstall;
4282 dev->driver->irq_uninstall = i965_irq_uninstall;
4283 dev->driver->irq_handler = i965_irq_handler;
Chris Wilson86e83e32016-10-07 20:49:52 +01004284 dev->driver->enable_vblank = i965_enable_vblank;
4285 dev->driver->disable_vblank = i965_disable_vblank;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004286 }
Ville Syrjälä778eb332015-01-09 14:21:13 +02004287 if (I915_HAS_HOTPLUG(dev_priv))
4288 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004289 }
4290}
Daniel Vetter20afbda2012-12-11 14:05:07 +01004291
Daniel Vetterfca52a52014-09-30 10:56:45 +02004292/**
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004293 * intel_irq_fini - deinitializes IRQ support
4294 * @i915: i915 device instance
4295 *
4296 * This function deinitializes all the IRQ support.
4297 */
4298void intel_irq_fini(struct drm_i915_private *i915)
4299{
4300 int i;
4301
4302 for (i = 0; i < MAX_L3_SLICES; ++i)
4303 kfree(i915->l3_parity.remap_info[i]);
4304}
4305
4306/**
Daniel Vetterfca52a52014-09-30 10:56:45 +02004307 * intel_irq_install - enables the hardware interrupt
4308 * @dev_priv: i915 device instance
4309 *
4310 * This function enables the hardware interrupt handling, but leaves the hotplug
4311 * handling still disabled. It is called after intel_irq_init().
4312 *
4313 * In the driver load and resume code we need working interrupts in a few places
4314 * but don't want to deal with the hassle of concurrent probe and hotplug
4315 * workers. Hence the split into this two-stage approach.
4316 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004317int intel_irq_install(struct drm_i915_private *dev_priv)
4318{
4319 /*
4320 * We enable some interrupt sources in our postinstall hooks, so mark
4321 * interrupts as enabled _before_ actually enabling them to avoid
4322 * special cases in our ordering checks.
4323 */
4324 dev_priv->pm.irqs_enabled = true;
4325
Chris Wilson91c8a322016-07-05 10:40:23 +01004326 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004327}
4328
Daniel Vetterfca52a52014-09-30 10:56:45 +02004329/**
4330 * intel_irq_uninstall - finilizes all irq handling
4331 * @dev_priv: i915 device instance
4332 *
4333 * This stops interrupt and hotplug handling and unregisters and frees all
4334 * resources acquired in the init functions.
4335 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004336void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4337{
Chris Wilson91c8a322016-07-05 10:40:23 +01004338 drm_irq_uninstall(&dev_priv->drm);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004339 intel_hpd_cancel_work(dev_priv);
4340 dev_priv->pm.irqs_enabled = false;
4341}
4342
Daniel Vetterfca52a52014-09-30 10:56:45 +02004343/**
4344 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4345 * @dev_priv: i915 device instance
4346 *
4347 * This function is used to disable interrupts at runtime, both in the runtime
4348 * pm and the system suspend/resume code.
4349 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004350void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004351{
Chris Wilson91c8a322016-07-05 10:40:23 +01004352 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004353 dev_priv->pm.irqs_enabled = false;
Chris Wilson91c8a322016-07-05 10:40:23 +01004354 synchronize_irq(dev_priv->drm.irq);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004355}
4356
Daniel Vetterfca52a52014-09-30 10:56:45 +02004357/**
4358 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4359 * @dev_priv: i915 device instance
4360 *
4361 * This function is used to enable interrupts at runtime, both in the runtime
4362 * pm and the system suspend/resume code.
4363 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004364void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004365{
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004366 dev_priv->pm.irqs_enabled = true;
Chris Wilson91c8a322016-07-05 10:40:23 +01004367 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4368 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004369}