blob: 6975a169c4e4ad78515c38b85a99a61f5d3e857a [file] [log] [blame]
Dave Airlie0d6aa602006-01-02 20:14:23 +11001/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 */
Dave Airlie0d6aa602006-01-02 20:14:23 +11003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
Dave Airliebc54fd12005-06-23 22:46:46 +10006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
Dave Airlie0d6aa602006-01-02 20:14:23 +110027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Joe Perchesa70491c2012-03-18 13:00:11 -070029#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
Jesse Barnes63eeaf32009-06-18 16:56:52 -070031#include <linux/sysrq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Damien Lespiaub2c88f52013-10-15 18:55:29 +010033#include <linux/circ_buf.h>
David Howells760285e2012-10-02 18:01:07 +010034#include <drm/drmP.h>
35#include <drm/i915_drm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include "i915_drv.h"
Chris Wilson1c5d22f2009-08-25 11:15:50 +010037#include "i915_trace.h"
Jesse Barnes79e53942008-11-07 14:24:08 -080038#include "intel_drv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Daniel Vetterfca52a52014-09-30 10:56:45 +020040/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +030048static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +030052static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +030056static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020060static const u32 hpd_ibx[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050061 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020068static const u32 hpd_cpt[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050069 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
Daniel Vetter73c352a2013-03-26 22:38:43 +010070 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
Egbert Eiche5868a32013-02-28 04:17:12 -050071 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
Xiong Zhang26951ca2015-08-17 15:55:50 +080076static const u32 hpd_spt[HPD_NUM_PINS] = {
Ville Syrjälä74c0b392015-08-27 23:56:07 +030077 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
Xiong Zhang26951ca2015-08-17 15:55:50 +080078 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020084static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050085 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
Ville Syrjälä7c7e10d2015-01-09 14:21:12 +020093static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -050094 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
Ville Syrjälä4bca26d2015-05-11 20:49:10 +0300102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
Egbert Eiche5868a32013-02-28 04:17:12 -0500103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
Shashank Sharmae0a20ad2015-03-27 14:54:14 +0200111/* BXT hpd list */
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
Sonika Jindal7f3561b2015-08-10 10:35:35 +0530113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
Shashank Sharmae0a20ad2015-03-27 14:54:14 +0200114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
Dhinakaran Pandiyanb796b972018-06-15 17:05:30 -0700118static const u32 hpd_gen11[HPD_NUM_PINS] = {
119 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
120 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
121 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
122 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -0700123};
124
Anusha Srivatsa31604222018-06-26 13:52:23 -0700125static const u32 hpd_icp[HPD_NUM_PINS] = {
126 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
127 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
128 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
129 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
130 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
131 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
132};
133
Paulo Zanoni5c502442014-04-01 15:37:11 -0300134/* IIR can theoretically queue up two events. Be paranoid. */
Paulo Zanonif86f3fb2014-04-01 15:37:14 -0300135#define GEN8_IRQ_RESET_NDX(type, which) do { \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300136 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
137 POSTING_READ(GEN8_##type##_IMR(which)); \
138 I915_WRITE(GEN8_##type##_IER(which), 0); \
139 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
140 POSTING_READ(GEN8_##type##_IIR(which)); \
141 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
142 POSTING_READ(GEN8_##type##_IIR(which)); \
143} while (0)
144
Ville Syrjälä3488d4e2017-08-18 21:36:52 +0300145#define GEN3_IRQ_RESET(type) do { \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300146 I915_WRITE(type##IMR, 0xffffffff); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300147 POSTING_READ(type##IMR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300148 I915_WRITE(type##IER, 0); \
Paulo Zanoni5c502442014-04-01 15:37:11 -0300149 I915_WRITE(type##IIR, 0xffffffff); \
150 POSTING_READ(type##IIR); \
151 I915_WRITE(type##IIR, 0xffffffff); \
152 POSTING_READ(type##IIR); \
Paulo Zanonia9d356a2014-04-01 15:37:09 -0300153} while (0)
154
Ville Syrjäläe9e98482017-08-18 21:36:54 +0300155#define GEN2_IRQ_RESET(type) do { \
156 I915_WRITE16(type##IMR, 0xffff); \
157 POSTING_READ16(type##IMR); \
158 I915_WRITE16(type##IER, 0); \
159 I915_WRITE16(type##IIR, 0xffff); \
160 POSTING_READ16(type##IIR); \
161 I915_WRITE16(type##IIR, 0xffff); \
162 POSTING_READ16(type##IIR); \
163} while (0)
164
Paulo Zanoni337ba012014-04-01 15:37:16 -0300165/*
166 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
167 */
Ville Syrjälä3488d4e2017-08-18 21:36:52 +0300168static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200169 i915_reg_t reg)
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300170{
171 u32 val = I915_READ(reg);
172
173 if (val == 0)
174 return;
175
176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200177 i915_mmio_reg_offset(reg), val);
Ville Syrjäläb51a2842015-09-18 20:03:41 +0300178 I915_WRITE(reg, 0xffffffff);
179 POSTING_READ(reg);
180 I915_WRITE(reg, 0xffffffff);
181 POSTING_READ(reg);
182}
Paulo Zanoni337ba012014-04-01 15:37:16 -0300183
Ville Syrjäläe9e98482017-08-18 21:36:54 +0300184static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
185 i915_reg_t reg)
186{
187 u16 val = I915_READ16(reg);
188
189 if (val == 0)
190 return;
191
192 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
193 i915_mmio_reg_offset(reg), val);
194 I915_WRITE16(reg, 0xffff);
195 POSTING_READ16(reg);
196 I915_WRITE16(reg, 0xffff);
197 POSTING_READ16(reg);
198}
199
Paulo Zanoni35079892014-04-01 15:37:15 -0300200#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
Ville Syrjälä3488d4e2017-08-18 21:36:52 +0300201 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300202 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200203 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
204 POSTING_READ(GEN8_##type##_IMR(which)); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300205} while (0)
206
Ville Syrjälä3488d4e2017-08-18 21:36:52 +0300207#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
208 gen3_assert_iir_is_zero(dev_priv, type##IIR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300209 I915_WRITE(type##IER, (ier_val)); \
Ville Syrjälä7d1bd5392014-10-30 19:42:50 +0200210 I915_WRITE(type##IMR, (imr_val)); \
211 POSTING_READ(type##IMR); \
Paulo Zanoni35079892014-04-01 15:37:15 -0300212} while (0)
213
Ville Syrjäläe9e98482017-08-18 21:36:54 +0300214#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
215 gen2_assert_iir_is_zero(dev_priv, type##IIR); \
216 I915_WRITE16(type##IER, (ier_val)); \
217 I915_WRITE16(type##IMR, (imr_val)); \
218 POSTING_READ16(type##IMR); \
219} while (0)
220
Imre Deakc9a9a262014-11-05 20:48:37 +0200221static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530222static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
Imre Deakc9a9a262014-11-05 20:48:37 +0200223
Egbert Eich0706f172015-09-23 16:15:27 +0200224/* For display hotplug interrupt */
225static inline void
226i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
227 uint32_t mask,
228 uint32_t bits)
229{
230 uint32_t val;
231
Chris Wilson67520412017-03-02 13:28:01 +0000232 lockdep_assert_held(&dev_priv->irq_lock);
Egbert Eich0706f172015-09-23 16:15:27 +0200233 WARN_ON(bits & ~mask);
234
235 val = I915_READ(PORT_HOTPLUG_EN);
236 val &= ~mask;
237 val |= bits;
238 I915_WRITE(PORT_HOTPLUG_EN, val);
239}
240
241/**
242 * i915_hotplug_interrupt_update - update hotplug interrupt enable
243 * @dev_priv: driver private
244 * @mask: bits to update
245 * @bits: bits to enable
246 * NOTE: the HPD enable bits are modified both inside and outside
247 * of an interrupt context. To avoid that read-modify-write cycles
248 * interfer, these bits are protected by a spinlock. Since this
249 * function is usually not called from a context where the lock is
250 * held already, this function acquires the lock itself. A non-locking
251 * version is also available.
252 */
253void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
254 uint32_t mask,
255 uint32_t bits)
256{
257 spin_lock_irq(&dev_priv->irq_lock);
258 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
259 spin_unlock_irq(&dev_priv->irq_lock);
260}
261
Oscar Mateo96606f32018-04-06 12:32:37 +0300262static u32
263gen11_gt_engine_identity(struct drm_i915_private * const i915,
264 const unsigned int bank, const unsigned int bit);
265
Oscar Mateoff047a82018-04-24 14:39:55 -0700266bool gen11_reset_one_iir(struct drm_i915_private * const i915,
267 const unsigned int bank,
268 const unsigned int bit)
Oscar Mateo96606f32018-04-06 12:32:37 +0300269{
270 void __iomem * const regs = i915->regs;
271 u32 dw;
272
273 lockdep_assert_held(&i915->irq_lock);
274
275 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
276 if (dw & BIT(bit)) {
277 /*
278 * According to the BSpec, DW_IIR bits cannot be cleared without
279 * first servicing the Selector & Shared IIR registers.
280 */
281 gen11_gt_engine_identity(i915, bank, bit);
282
283 /*
284 * We locked GT INT DW by reading it. If we want to (try
285 * to) recover from this succesfully, we need to clear
286 * our bit, otherwise we are locking the register for
287 * everybody.
288 */
289 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
290
291 return true;
292 }
293
294 return false;
295}
296
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300297/**
298 * ilk_update_display_irq - update DEIMR
299 * @dev_priv: driver private
300 * @interrupt_mask: mask of interrupt bits to update
301 * @enabled_irq_mask: mask of interrupt bits to enable
302 */
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +0200303void ilk_update_display_irq(struct drm_i915_private *dev_priv,
304 uint32_t interrupt_mask,
305 uint32_t enabled_irq_mask)
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800306{
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300307 uint32_t new_val;
308
Chris Wilson67520412017-03-02 13:28:01 +0000309 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetter4bc9d432013-06-27 13:44:58 +0200310
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300311 WARN_ON(enabled_irq_mask & ~interrupt_mask);
312
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700313 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300314 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300315
Ville Syrjäläd9dc34f12015-08-27 23:55:58 +0300316 new_val = dev_priv->irq_mask;
317 new_val &= ~interrupt_mask;
318 new_val |= (~enabled_irq_mask & interrupt_mask);
319
320 if (new_val != dev_priv->irq_mask) {
321 dev_priv->irq_mask = new_val;
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000322 I915_WRITE(DEIMR, dev_priv->irq_mask);
Chris Wilson3143a2b2010-11-16 15:55:10 +0000323 POSTING_READ(DEIMR);
Zhenyu Wang036a4a72009-06-08 14:40:19 +0800324 }
325}
326
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300327/**
328 * ilk_update_gt_irq - update GTIMR
329 * @dev_priv: driver private
330 * @interrupt_mask: mask of interrupt bits to update
331 * @enabled_irq_mask: mask of interrupt bits to enable
332 */
333static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
334 uint32_t interrupt_mask,
335 uint32_t enabled_irq_mask)
336{
Chris Wilson67520412017-03-02 13:28:01 +0000337 lockdep_assert_held(&dev_priv->irq_lock);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300338
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100339 WARN_ON(enabled_irq_mask & ~interrupt_mask);
340
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700341 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300342 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300343
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300344 dev_priv->gt_irq_mask &= ~interrupt_mask;
345 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
346 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300347}
348
Daniel Vetter480c8032014-07-16 09:49:40 +0200349void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300350{
351 ilk_update_gt_irq(dev_priv, mask, mask);
Chris Wilson31bb59c2016-07-01 17:23:27 +0100352 POSTING_READ_FW(GTIMR);
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300353}
354
Daniel Vetter480c8032014-07-16 09:49:40 +0200355void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
Paulo Zanoni43eaea12013-08-06 18:57:12 -0300356{
357 ilk_update_gt_irq(dev_priv, mask, 0);
358}
359
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200360static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200361{
Oscar Mateod02b98b2018-04-05 17:00:50 +0300362 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
363
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -0700364 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
Imre Deakb900b942014-11-05 20:48:48 +0200365}
366
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200367static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
Imre Deaka72fbc32014-11-05 20:48:31 +0200368{
Oscar Mateod02b98b2018-04-05 17:00:50 +0300369 if (INTEL_GEN(dev_priv) >= 11)
370 return GEN11_GPM_WGBOXPERF_INTR_MASK;
371 else if (INTEL_GEN(dev_priv) >= 8)
372 return GEN8_GT_IMR(2);
373 else
374 return GEN6_PMIMR;
Imre Deaka72fbc32014-11-05 20:48:31 +0200375}
376
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200377static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200378{
Oscar Mateod02b98b2018-04-05 17:00:50 +0300379 if (INTEL_GEN(dev_priv) >= 11)
380 return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
381 else if (INTEL_GEN(dev_priv) >= 8)
382 return GEN8_GT_IER(2);
383 else
384 return GEN6_PMIER;
Imre Deakb900b942014-11-05 20:48:48 +0200385}
386
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300387/**
Ville Syrjälä81fd8742015-11-25 16:21:30 +0200388 * snb_update_pm_irq - update GEN6_PMIMR
389 * @dev_priv: driver private
390 * @interrupt_mask: mask of interrupt bits to update
391 * @enabled_irq_mask: mask of interrupt bits to enable
392 */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300393static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
394 uint32_t interrupt_mask,
395 uint32_t enabled_irq_mask)
396{
Paulo Zanoni605cd252013-08-06 18:57:15 -0300397 uint32_t new_val;
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300398
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100399 WARN_ON(enabled_irq_mask & ~interrupt_mask);
400
Chris Wilson67520412017-03-02 13:28:01 +0000401 lockdep_assert_held(&dev_priv->irq_lock);
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300402
Akash Goelf4e9af42016-10-12 21:54:30 +0530403 new_val = dev_priv->pm_imr;
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300404 new_val &= ~interrupt_mask;
405 new_val |= (~enabled_irq_mask & interrupt_mask);
406
Akash Goelf4e9af42016-10-12 21:54:30 +0530407 if (new_val != dev_priv->pm_imr) {
408 dev_priv->pm_imr = new_val;
409 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
Imre Deaka72fbc32014-11-05 20:48:31 +0200410 POSTING_READ(gen6_pm_imr(dev_priv));
Paulo Zanonif52ecbc2013-08-06 18:57:14 -0300411 }
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300412}
413
Akash Goelf4e9af42016-10-12 21:54:30 +0530414void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300415{
Imre Deak9939fba2014-11-20 23:01:47 +0200416 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
417 return;
418
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300419 snb_update_pm_irq(dev_priv, mask, mask);
420}
421
Akash Goelf4e9af42016-10-12 21:54:30 +0530422static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Imre Deak9939fba2014-11-20 23:01:47 +0200423{
424 snb_update_pm_irq(dev_priv, mask, 0);
425}
426
Akash Goelf4e9af42016-10-12 21:54:30 +0530427void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300428{
Imre Deak9939fba2014-11-20 23:01:47 +0200429 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
430 return;
431
Akash Goelf4e9af42016-10-12 21:54:30 +0530432 __gen6_mask_pm_irq(dev_priv, mask);
433}
434
Oscar Mateo3814fd72017-08-23 16:58:24 -0700435static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
Akash Goelf4e9af42016-10-12 21:54:30 +0530436{
437 i915_reg_t reg = gen6_pm_iir(dev_priv);
438
Chris Wilson67520412017-03-02 13:28:01 +0000439 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530440
441 I915_WRITE(reg, reset_mask);
442 I915_WRITE(reg, reset_mask);
443 POSTING_READ(reg);
444}
445
Oscar Mateo3814fd72017-08-23 16:58:24 -0700446static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
Akash Goelf4e9af42016-10-12 21:54:30 +0530447{
Chris Wilson67520412017-03-02 13:28:01 +0000448 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530449
450 dev_priv->pm_ier |= enable_mask;
451 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
452 gen6_unmask_pm_irq(dev_priv, enable_mask);
453 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
454}
455
Oscar Mateo3814fd72017-08-23 16:58:24 -0700456static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
Akash Goelf4e9af42016-10-12 21:54:30 +0530457{
Chris Wilson67520412017-03-02 13:28:01 +0000458 lockdep_assert_held(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530459
460 dev_priv->pm_ier &= ~disable_mask;
461 __gen6_mask_pm_irq(dev_priv, disable_mask);
462 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
463 /* though a barrier is missing here, but don't really need a one */
Paulo Zanoniedbfdb42013-08-06 18:57:13 -0300464}
465
Oscar Mateod02b98b2018-04-05 17:00:50 +0300466void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
467{
Oscar Mateod02b98b2018-04-05 17:00:50 +0300468 spin_lock_irq(&dev_priv->irq_lock);
469
Oscar Mateo96606f32018-04-06 12:32:37 +0300470 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
471 ;
Oscar Mateod02b98b2018-04-05 17:00:50 +0300472
473 dev_priv->gt_pm.rps.pm_iir = 0;
474
475 spin_unlock_irq(&dev_priv->irq_lock);
476}
477
Chris Wilsondc979972016-05-10 14:10:04 +0100478void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deak3cc134e2014-11-19 15:30:03 +0200479{
Imre Deak3cc134e2014-11-19 15:30:03 +0200480 spin_lock_irq(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +0530481 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100482 dev_priv->gt_pm.rps.pm_iir = 0;
Imre Deak3cc134e2014-11-19 15:30:03 +0200483 spin_unlock_irq(&dev_priv->irq_lock);
484}
485
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100486void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200487{
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100488 struct intel_rps *rps = &dev_priv->gt_pm.rps;
489
490 if (READ_ONCE(rps->interrupts_enabled))
Chris Wilsonf2a91d12016-09-21 14:51:06 +0100491 return;
492
Imre Deakb900b942014-11-05 20:48:48 +0200493 spin_lock_irq(&dev_priv->irq_lock);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100494 WARN_ON_ONCE(rps->pm_iir);
Oscar Mateo96606f32018-04-06 12:32:37 +0300495
Oscar Mateod02b98b2018-04-05 17:00:50 +0300496 if (INTEL_GEN(dev_priv) >= 11)
Oscar Mateo96606f32018-04-06 12:32:37 +0300497 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
Oscar Mateod02b98b2018-04-05 17:00:50 +0300498 else
499 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
Oscar Mateo96606f32018-04-06 12:32:37 +0300500
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100501 rps->interrupts_enabled = true;
Imre Deakb900b942014-11-05 20:48:48 +0200502 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deak78e68d32014-12-15 18:59:27 +0200503
Imre Deakb900b942014-11-05 20:48:48 +0200504 spin_unlock_irq(&dev_priv->irq_lock);
505}
506
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100507void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
Imre Deakb900b942014-11-05 20:48:48 +0200508{
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100509 struct intel_rps *rps = &dev_priv->gt_pm.rps;
510
511 if (!READ_ONCE(rps->interrupts_enabled))
Chris Wilsonf2a91d12016-09-21 14:51:06 +0100512 return;
513
Imre Deakd4d70aa2014-11-19 15:30:04 +0200514 spin_lock_irq(&dev_priv->irq_lock);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100515 rps->interrupts_enabled = false;
Imre Deak9939fba2014-11-20 23:01:47 +0200516
Dave Gordonb20e3cf2016-09-12 21:19:35 +0100517 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
Imre Deak9939fba2014-11-20 23:01:47 +0200518
Akash Goelf4e9af42016-10-12 21:54:30 +0530519 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
Imre Deak58072cc2015-03-23 19:11:34 +0200520
521 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilson91c8a322016-07-05 10:40:23 +0100522 synchronize_irq(dev_priv->drm.irq);
Chris Wilsonc33d2472016-07-04 08:08:36 +0100523
524 /* Now that we will not be generating any more work, flush any
Oscar Mateo3814fd72017-08-23 16:58:24 -0700525 * outstanding tasks. As we are called on the RPS idle path,
Chris Wilsonc33d2472016-07-04 08:08:36 +0100526 * we will reset the GPU to minimum frequencies, so the current
527 * state of the worker can be discarded.
528 */
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +0100529 cancel_work_sync(&rps->work);
Oscar Mateod02b98b2018-04-05 17:00:50 +0300530 if (INTEL_GEN(dev_priv) >= 11)
531 gen11_reset_rps_interrupts(dev_priv);
532 else
533 gen6_reset_rps_interrupts(dev_priv);
Imre Deakb900b942014-11-05 20:48:48 +0200534}
535
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530536void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
537{
Sagar Arun Kamble1be333d2018-01-24 21:16:56 +0530538 assert_rpm_wakelock_held(dev_priv);
539
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530540 spin_lock_irq(&dev_priv->irq_lock);
541 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
542 spin_unlock_irq(&dev_priv->irq_lock);
543}
544
545void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
546{
Sagar Arun Kamble1be333d2018-01-24 21:16:56 +0530547 assert_rpm_wakelock_held(dev_priv);
548
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530549 spin_lock_irq(&dev_priv->irq_lock);
550 if (!dev_priv->guc.interrupts_enabled) {
551 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
552 dev_priv->pm_guc_events);
553 dev_priv->guc.interrupts_enabled = true;
554 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
555 }
556 spin_unlock_irq(&dev_priv->irq_lock);
557}
558
559void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
560{
Sagar Arun Kamble1be333d2018-01-24 21:16:56 +0530561 assert_rpm_wakelock_held(dev_priv);
562
Sagar Arun Kamble26705e22016-10-12 21:54:31 +0530563 spin_lock_irq(&dev_priv->irq_lock);
564 dev_priv->guc.interrupts_enabled = false;
565
566 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
567
568 spin_unlock_irq(&dev_priv->irq_lock);
569 synchronize_irq(dev_priv->drm.irq);
570
571 gen9_reset_guc_interrupts(dev_priv);
572}
573
Ben Widawsky09610212014-05-15 20:58:08 +0300574/**
Ville Syrjälä81fd8742015-11-25 16:21:30 +0200575 * bdw_update_port_irq - update DE port interrupt
576 * @dev_priv: driver private
577 * @interrupt_mask: mask of interrupt bits to update
578 * @enabled_irq_mask: mask of interrupt bits to enable
579 */
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +0300580static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
581 uint32_t interrupt_mask,
582 uint32_t enabled_irq_mask)
583{
584 uint32_t new_val;
585 uint32_t old_val;
586
Chris Wilson67520412017-03-02 13:28:01 +0000587 lockdep_assert_held(&dev_priv->irq_lock);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +0300588
589 WARN_ON(enabled_irq_mask & ~interrupt_mask);
590
591 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
592 return;
593
594 old_val = I915_READ(GEN8_DE_PORT_IMR);
595
596 new_val = old_val;
597 new_val &= ~interrupt_mask;
598 new_val |= (~enabled_irq_mask & interrupt_mask);
599
600 if (new_val != old_val) {
601 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
602 POSTING_READ(GEN8_DE_PORT_IMR);
603 }
604}
605
606/**
Ville Syrjälä013d3752015-11-23 18:06:17 +0200607 * bdw_update_pipe_irq - update DE pipe interrupt
608 * @dev_priv: driver private
609 * @pipe: pipe whose interrupt to update
610 * @interrupt_mask: mask of interrupt bits to update
611 * @enabled_irq_mask: mask of interrupt bits to enable
612 */
613void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
614 enum pipe pipe,
615 uint32_t interrupt_mask,
616 uint32_t enabled_irq_mask)
617{
618 uint32_t new_val;
619
Chris Wilson67520412017-03-02 13:28:01 +0000620 lockdep_assert_held(&dev_priv->irq_lock);
Ville Syrjälä013d3752015-11-23 18:06:17 +0200621
622 WARN_ON(enabled_irq_mask & ~interrupt_mask);
623
624 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
625 return;
626
627 new_val = dev_priv->de_irq_mask[pipe];
628 new_val &= ~interrupt_mask;
629 new_val |= (~enabled_irq_mask & interrupt_mask);
630
631 if (new_val != dev_priv->de_irq_mask[pipe]) {
632 dev_priv->de_irq_mask[pipe] = new_val;
633 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
634 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
635 }
636}
637
638/**
Daniel Vetterfee884e2013-07-04 23:35:21 +0200639 * ibx_display_interrupt_update - update SDEIMR
640 * @dev_priv: driver private
641 * @interrupt_mask: mask of interrupt bits to update
642 * @enabled_irq_mask: mask of interrupt bits to enable
643 */
Daniel Vetter47339cd2014-09-30 10:56:46 +0200644void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
645 uint32_t interrupt_mask,
646 uint32_t enabled_irq_mask)
Daniel Vetterfee884e2013-07-04 23:35:21 +0200647{
648 uint32_t sdeimr = I915_READ(SDEIMR);
649 sdeimr &= ~interrupt_mask;
650 sdeimr |= (~enabled_irq_mask & interrupt_mask);
651
Daniel Vetter15a17aa2014-12-08 16:30:00 +0100652 WARN_ON(enabled_irq_mask & ~interrupt_mask);
653
Chris Wilson67520412017-03-02 13:28:01 +0000654 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterfee884e2013-07-04 23:35:21 +0200655
Jesse Barnes9df7575f2014-06-20 09:29:20 -0700656 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Paulo Zanonic67a4702013-08-19 13:18:09 -0300657 return;
Paulo Zanonic67a4702013-08-19 13:18:09 -0300658
Daniel Vetterfee884e2013-07-04 23:35:21 +0200659 I915_WRITE(SDEIMR, sdeimr);
660 POSTING_READ(SDEIMR);
661}
Paulo Zanoni86642812013-04-12 17:57:57 -0300662
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300663u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
664 enum pipe pipe)
Keith Packard7c463582008-11-04 02:03:27 -0800665{
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300666 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
Imre Deak10c59c52014-02-10 18:42:48 +0200667 u32 enable_mask = status_mask << 16;
668
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300669 lockdep_assert_held(&dev_priv->irq_lock);
670
671 if (INTEL_GEN(dev_priv) < 5)
672 goto out;
673
Imre Deak10c59c52014-02-10 18:42:48 +0200674 /*
Ville Syrjälä724a6902014-04-09 13:28:48 +0300675 * On pipe A we don't support the PSR interrupt yet,
676 * on pipe B and C the same bit MBZ.
Imre Deak10c59c52014-02-10 18:42:48 +0200677 */
678 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
679 return 0;
Ville Syrjälä724a6902014-04-09 13:28:48 +0300680 /*
681 * On pipe B and C we don't support the PSR interrupt yet, on pipe
682 * A the same bit is for perf counters which we don't use either.
683 */
684 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
685 return 0;
Imre Deak10c59c52014-02-10 18:42:48 +0200686
687 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
688 SPRITE0_FLIP_DONE_INT_EN_VLV |
689 SPRITE1_FLIP_DONE_INT_EN_VLV);
690 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
691 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
692 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
693 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
694
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300695out:
696 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
697 status_mask & ~PIPESTAT_INT_STATUS_MASK,
698 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
699 pipe_name(pipe), enable_mask, status_mask);
700
Imre Deak10c59c52014-02-10 18:42:48 +0200701 return enable_mask;
702}
703
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300704void i915_enable_pipestat(struct drm_i915_private *dev_priv,
705 enum pipe pipe, u32 status_mask)
Imre Deak755e9012014-02-10 18:42:47 +0200706{
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300707 i915_reg_t reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200708 u32 enable_mask;
709
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300710 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
711 "pipe %c: status_mask=0x%x\n",
712 pipe_name(pipe), status_mask);
713
714 lockdep_assert_held(&dev_priv->irq_lock);
715 WARN_ON(!intel_irqs_enabled(dev_priv));
716
717 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
718 return;
719
720 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
721 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
722
723 I915_WRITE(reg, enable_mask | status_mask);
724 POSTING_READ(reg);
Imre Deak755e9012014-02-10 18:42:47 +0200725}
726
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300727void i915_disable_pipestat(struct drm_i915_private *dev_priv,
728 enum pipe pipe, u32 status_mask)
Imre Deak755e9012014-02-10 18:42:47 +0200729{
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300730 i915_reg_t reg = PIPESTAT(pipe);
Imre Deak755e9012014-02-10 18:42:47 +0200731 u32 enable_mask;
732
Ville Syrjälä6b12ca52017-09-14 18:17:31 +0300733 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
734 "pipe %c: status_mask=0x%x\n",
735 pipe_name(pipe), status_mask);
736
737 lockdep_assert_held(&dev_priv->irq_lock);
738 WARN_ON(!intel_irqs_enabled(dev_priv));
739
740 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
741 return;
742
743 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
744 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
745
746 I915_WRITE(reg, enable_mask | status_mask);
747 POSTING_READ(reg);
Imre Deak755e9012014-02-10 18:42:47 +0200748}
749
=?utf-8?q?Michel_D=C3=A4nzer?=a6b54f32006-10-24 23:37:43 +1000750/**
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300751 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +0100752 * @dev_priv: i915 device private
Zhao Yakui01c66882009-10-28 05:10:00 +0000753 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100754static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
Zhao Yakui01c66882009-10-28 05:10:00 +0000755{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100756 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
Jani Nikulaf49e38d2013-04-29 13:02:54 +0300757 return;
758
Daniel Vetter13321782014-09-15 14:55:29 +0200759 spin_lock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000760
Imre Deak755e9012014-02-10 18:42:47 +0200761 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100762 if (INTEL_GEN(dev_priv) >= 4)
Daniel Vetter3b6c42e2013-10-21 18:04:35 +0200763 i915_enable_pipestat(dev_priv, PIPE_A,
Imre Deak755e9012014-02-10 18:42:47 +0200764 PIPE_LEGACY_BLC_EVENT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000765
Daniel Vetter13321782014-09-15 14:55:29 +0200766 spin_unlock_irq(&dev_priv->irq_lock);
Zhao Yakui01c66882009-10-28 05:10:00 +0000767}
768
Ville Syrjäläf75f3742014-05-15 20:20:36 +0300769/*
770 * This timing diagram depicts the video signal in and
771 * around the vertical blanking period.
772 *
773 * Assumptions about the fictitious mode used in this example:
774 * vblank_start >= 3
775 * vsync_start = vblank_start + 1
776 * vsync_end = vblank_start + 2
777 * vtotal = vblank_start + 3
778 *
779 * start of vblank:
780 * latch double buffered registers
781 * increment frame counter (ctg+)
782 * generate start of vblank interrupt (gen4+)
783 * |
784 * | frame start:
785 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
786 * | may be shifted forward 1-3 extra lines via PIPECONF
787 * | |
788 * | | start of vsync:
789 * | | generate vsync interrupt
790 * | | |
791 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
792 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
793 * ----va---> <-----------------vb--------------------> <--------va-------------
794 * | | <----vs-----> |
795 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
796 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
797 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
798 * | | |
799 * last visible pixel first visible pixel
800 * | increment frame counter (gen3/4)
801 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
802 *
803 * x = horizontal active
804 * _ = horizontal blanking
805 * hs = horizontal sync
806 * va = vertical active
807 * vb = vertical blanking
808 * vs = vertical sync
809 * vbs = vblank_start (number)
810 *
811 * Summary:
812 * - most events happen at the start of horizontal sync
813 * - frame start happens at the start of horizontal blank, 1-4 lines
814 * (depending on PIPECONF settings) after the start of vblank
815 * - gen3/4 pixel and frame counter are synchronized with the start
816 * of horizontal active on the first line of vertical active
817 */
818
Keith Packard42f52ef2008-10-18 19:39:29 -0700819/* Called from drm generic code, passed a 'crtc', which
820 * we use as a pipe index
821 */
Thierry Reding88e72712015-09-24 18:35:31 +0200822static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700823{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100824 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200825 i915_reg_t high_frame, low_frame;
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300826 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200827 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
Ville Syrjälä694e4092017-03-09 17:44:30 +0200828 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700829
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +0100830 htotal = mode->crtc_htotal;
831 hsync_start = mode->crtc_hsync_start;
832 vbl_start = mode->crtc_vblank_start;
833 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
834 vbl_start = DIV_ROUND_UP(vbl_start, 2);
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300835
Ville Syrjälä0b2a8e02014-04-29 13:35:50 +0300836 /* Convert to pixel count */
837 vbl_start *= htotal;
838
839 /* Start of vblank event occurs at start of hsync */
840 vbl_start -= htotal - hsync_start;
841
Jesse Barnes9db4a9c2011-02-07 12:26:52 -0800842 high_frame = PIPEFRAME(pipe);
843 low_frame = PIPEFRAMEPIXEL(pipe);
Chris Wilson5eddb702010-09-11 13:48:45 +0100844
Ville Syrjälä694e4092017-03-09 17:44:30 +0200845 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
846
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700847 /*
848 * High & low register fields aren't synchronized, so make sure
849 * we get a low value that's stable across two reads of the high
850 * register.
851 */
852 do {
Ville Syrjälä694e4092017-03-09 17:44:30 +0200853 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
854 low = I915_READ_FW(low_frame);
855 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700856 } while (high1 != high2);
857
Ville Syrjälä694e4092017-03-09 17:44:30 +0200858 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
859
Chris Wilson5eddb702010-09-11 13:48:45 +0100860 high1 >>= PIPE_FRAME_HIGH_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300861 pixel = low & PIPE_PIXEL_MASK;
Chris Wilson5eddb702010-09-11 13:48:45 +0100862 low >>= PIPE_FRAME_LOW_SHIFT;
Ville Syrjälä391f75e2013-09-25 19:55:26 +0300863
864 /*
865 * The frame counter increments at beginning of active.
866 * Cook up a vblank counter by also checking the pixel
867 * counter against vblank start.
868 */
Ville Syrjäläedc08d02013-11-06 13:56:27 -0200869 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -0700870}
871
Dave Airlie974e59b2015-10-30 09:45:33 +1000872static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800873{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100874 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800875
Ville Syrjälä649636e2015-09-22 19:50:01 +0300876 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
Jesse Barnes9880b7a2009-02-06 10:22:41 -0800877}
878
Uma Shankaraec02462017-09-25 19:26:01 +0530879/*
880 * On certain encoders on certain platforms, pipe
881 * scanline register will not work to get the scanline,
882 * since the timings are driven from the PORT or issues
883 * with scanline register updates.
884 * This function will use Framestamp and current
885 * timestamp registers to calculate the scanline.
886 */
887static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
888{
889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
890 struct drm_vblank_crtc *vblank =
891 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
892 const struct drm_display_mode *mode = &vblank->hwmode;
893 u32 vblank_start = mode->crtc_vblank_start;
894 u32 vtotal = mode->crtc_vtotal;
895 u32 htotal = mode->crtc_htotal;
896 u32 clock = mode->crtc_clock;
897 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
898
899 /*
900 * To avoid the race condition where we might cross into the
901 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
902 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
903 * during the same frame.
904 */
905 do {
906 /*
907 * This field provides read back of the display
908 * pipe frame time stamp. The time stamp value
909 * is sampled at every start of vertical blank.
910 */
911 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
912
913 /*
914 * The TIMESTAMP_CTR register has the current
915 * time stamp value.
916 */
917 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
918
919 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
920 } while (scan_post_time != scan_prev_time);
921
922 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
923 clock), 1000 * htotal);
924 scanline = min(scanline, vtotal - 1);
925 scanline = (scanline + vblank_start) % vtotal;
926
927 return scanline;
928}
929
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300930/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
Ville Syrjäläa225f072014-04-29 13:35:45 +0300931static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
932{
933 struct drm_device *dev = crtc->base.dev;
Chris Wilsonfac5e232016-07-04 11:34:36 +0100934 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200935 const struct drm_display_mode *mode;
936 struct drm_vblank_crtc *vblank;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300937 enum pipe pipe = crtc->pipe;
Ville Syrjälä80715b22014-05-15 20:23:23 +0300938 int position, vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300939
Ville Syrjälä72259532017-03-02 19:15:05 +0200940 if (!crtc->active)
941 return -1;
942
Daniel Vetter5caa0fe2017-05-09 16:03:29 +0200943 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
944 mode = &vblank->hwmode;
945
Uma Shankaraec02462017-09-25 19:26:01 +0530946 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
947 return __intel_get_crtc_scanline_from_timestamp(crtc);
948
Ville Syrjälä80715b22014-05-15 20:23:23 +0300949 vtotal = mode->crtc_vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300950 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
951 vtotal /= 2;
952
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100953 if (IS_GEN2(dev_priv))
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300954 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300955 else
Ville Syrjälä75aa3f62015-10-22 15:34:56 +0300956 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300957
958 /*
Jesse Barnes41b578f2015-09-22 12:15:54 -0700959 * On HSW, the DSL reg (0x70000) appears to return 0 if we
960 * read it just before the start of vblank. So try it again
961 * so we don't accidentally end up spanning a vblank frame
962 * increment, causing the pipe_update_end() code to squak at us.
963 *
964 * The nature of this problem means we can't simply check the ISR
965 * bit and return the vblank start value; nor can we use the scanline
966 * debug register in the transcoder as it appears to have the same
967 * problem. We may need to extend this to include other platforms,
968 * but so far testing only shows the problem on HSW.
969 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +0100970 if (HAS_DDI(dev_priv) && !position) {
Jesse Barnes41b578f2015-09-22 12:15:54 -0700971 int i, temp;
972
973 for (i = 0; i < 100; i++) {
974 udelay(1);
Ville Syrjälä707bdd32017-03-09 17:44:31 +0200975 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
Jesse Barnes41b578f2015-09-22 12:15:54 -0700976 if (temp != position) {
977 position = temp;
978 break;
979 }
980 }
981 }
982
983 /*
Ville Syrjälä80715b22014-05-15 20:23:23 +0300984 * See update_scanline_offset() for the details on the
985 * scanline_offset adjustment.
Ville Syrjäläa225f072014-04-29 13:35:45 +0300986 */
Ville Syrjälä80715b22014-05-15 20:23:23 +0300987 return (position + crtc->scanline_offset) % vtotal;
Ville Syrjäläa225f072014-04-29 13:35:45 +0300988}
989
Daniel Vetter1bf6ad62017-05-09 16:03:28 +0200990static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
991 bool in_vblank_irq, int *vpos, int *hpos,
992 ktime_t *stime, ktime_t *etime,
993 const struct drm_display_mode *mode)
Mario Kleiner0af7e4d2010-12-08 04:07:19 +0100994{
Chris Wilsonfac5e232016-07-04 11:34:36 +0100995 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä98187832016-10-31 22:37:10 +0200996 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
997 pipe);
Ville Syrjälä3aa18df2013-10-11 19:10:32 +0300998 int position;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +0300999 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
Mario Kleinerad3543e2013-10-30 05:13:08 +01001000 unsigned long irqflags;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001001
Maarten Lankhorstfc467a222015-06-01 12:50:07 +02001002 if (WARN_ON(!mode->crtc_clock)) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001003 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08001004 "pipe %c\n", pipe_name(pipe));
Daniel Vetter1bf6ad62017-05-09 16:03:28 +02001005 return false;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001006 }
1007
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03001008 htotal = mode->crtc_htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +03001009 hsync_start = mode->crtc_hsync_start;
Ville Syrjäläc2baf4b2013-09-23 14:48:50 +03001010 vtotal = mode->crtc_vtotal;
1011 vbl_start = mode->crtc_vblank_start;
1012 vbl_end = mode->crtc_vblank_end;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001013
Ville Syrjäläd31faf62013-10-28 16:31:41 +02001014 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1015 vbl_start = DIV_ROUND_UP(vbl_start, 2);
1016 vbl_end /= 2;
1017 vtotal /= 2;
1018 }
1019
Mario Kleinerad3543e2013-10-30 05:13:08 +01001020 /*
1021 * Lock uncore.lock, as we will do multiple timing critical raw
1022 * register reads, potentially with preemption disabled, so the
1023 * following code must not block on uncore.lock.
1024 */
1025 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
Ville Syrjälä78e8fc62014-04-29 13:35:44 +03001026
Mario Kleinerad3543e2013-10-30 05:13:08 +01001027 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1028
1029 /* Get optional system timestamp before query. */
1030 if (stime)
1031 *stime = ktime_get();
1032
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001033 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001034 /* No obvious pixelcount register. Only query vertical
1035 * scanout position from Display scan line register.
1036 */
Ville Syrjäläa225f072014-04-29 13:35:45 +03001037 position = __intel_get_crtc_scanline(intel_crtc);
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001038 } else {
1039 /* Have access to pixelcount since start of frame.
1040 * We can split this into vertical and horizontal
1041 * scanout position.
1042 */
Ville Syrjälä75aa3f62015-10-22 15:34:56 +03001043 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001044
Ville Syrjälä3aa18df2013-10-11 19:10:32 +03001045 /* convert to pixel counts */
1046 vbl_start *= htotal;
1047 vbl_end *= htotal;
1048 vtotal *= htotal;
Ville Syrjälä78e8fc62014-04-29 13:35:44 +03001049
1050 /*
Ville Syrjälä7e78f1cb2014-04-29 13:35:49 +03001051 * In interlaced modes, the pixel counter counts all pixels,
1052 * so one field will have htotal more pixels. In order to avoid
1053 * the reported position from jumping backwards when the pixel
1054 * counter is beyond the length of the shorter field, just
1055 * clamp the position the length of the shorter field. This
1056 * matches how the scanline counter based position works since
1057 * the scanline counter doesn't count the two half lines.
1058 */
1059 if (position >= vtotal)
1060 position = vtotal - 1;
1061
1062 /*
Ville Syrjälä78e8fc62014-04-29 13:35:44 +03001063 * Start of vblank interrupt is triggered at start of hsync,
1064 * just prior to the first active line of vblank. However we
1065 * consider lines to start at the leading edge of horizontal
1066 * active. So, should we get here before we've crossed into
1067 * the horizontal active of the first line in vblank, we would
1068 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
1069 * always add htotal-hsync_start to the current pixel position.
1070 */
1071 position = (position + htotal - hsync_start) % vtotal;
Ville Syrjälä3aa18df2013-10-11 19:10:32 +03001072 }
1073
Mario Kleinerad3543e2013-10-30 05:13:08 +01001074 /* Get optional system timestamp after query. */
1075 if (etime)
1076 *etime = ktime_get();
1077
1078 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1079
1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1081
Ville Syrjälä3aa18df2013-10-11 19:10:32 +03001082 /*
1083 * While in vblank, position will be negative
1084 * counting up towards 0 at vbl_end. And outside
1085 * vblank, position will be positive counting
1086 * up since vbl_end.
1087 */
1088 if (position >= vbl_start)
1089 position -= vbl_end;
1090 else
1091 position += vtotal - vbl_end;
1092
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001093 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Ville Syrjälä3aa18df2013-10-11 19:10:32 +03001094 *vpos = position;
1095 *hpos = 0;
1096 } else {
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001097 *vpos = position / htotal;
1098 *hpos = position - (*vpos * htotal);
1099 }
1100
Daniel Vetter1bf6ad62017-05-09 16:03:28 +02001101 return true;
Mario Kleiner0af7e4d2010-12-08 04:07:19 +01001102}
1103
Ville Syrjäläa225f072014-04-29 13:35:45 +03001104int intel_get_crtc_scanline(struct intel_crtc *crtc)
1105{
Chris Wilsonfac5e232016-07-04 11:34:36 +01001106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
Ville Syrjäläa225f072014-04-29 13:35:45 +03001107 unsigned long irqflags;
1108 int position;
1109
1110 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1111 position = __intel_get_crtc_scanline(crtc);
1112 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1113
1114 return position;
1115}
1116
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001117static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
Jesse Barnesf97108d2010-01-29 11:27:07 -08001118{
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001119 u32 busy_up, busy_down, max_avg, min_avg;
Daniel Vetter92703882012-08-09 16:46:01 +02001120 u8 new_delay;
Daniel Vetter92703882012-08-09 16:46:01 +02001121
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001122 spin_lock(&mchdev_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001123
Daniel Vetter73edd18f2012-08-08 23:35:37 +02001124 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1125
Daniel Vetter20e4d402012-08-08 23:35:39 +02001126 new_delay = dev_priv->ips.cur_delay;
Daniel Vetter92703882012-08-09 16:46:01 +02001127
Jesse Barnes7648fa92010-05-20 14:28:11 -07001128 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001129 busy_up = I915_READ(RCPREVBSYTUPAVG);
1130 busy_down = I915_READ(RCPREVBSYTDNAVG);
Jesse Barnesf97108d2010-01-29 11:27:07 -08001131 max_avg = I915_READ(RCBMAXAVG);
1132 min_avg = I915_READ(RCBMINAVG);
1133
1134 /* Handle RCS change request from hw */
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001135 if (busy_up > max_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02001136 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1137 new_delay = dev_priv->ips.cur_delay - 1;
1138 if (new_delay < dev_priv->ips.max_delay)
1139 new_delay = dev_priv->ips.max_delay;
Matthew Garrettb5b72e82010-02-02 18:30:47 +00001140 } else if (busy_down < min_avg) {
Daniel Vetter20e4d402012-08-08 23:35:39 +02001141 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1142 new_delay = dev_priv->ips.cur_delay + 1;
1143 if (new_delay > dev_priv->ips.min_delay)
1144 new_delay = dev_priv->ips.min_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001145 }
1146
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001147 if (ironlake_set_drps(dev_priv, new_delay))
Daniel Vetter20e4d402012-08-08 23:35:39 +02001148 dev_priv->ips.cur_delay = new_delay;
Jesse Barnesf97108d2010-01-29 11:27:07 -08001149
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001150 spin_unlock(&mchdev_lock);
Daniel Vetter92703882012-08-09 16:46:01 +02001151
Jesse Barnesf97108d2010-01-29 11:27:07 -08001152 return;
1153}
1154
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001155static void notify_ring(struct intel_engine_cs *engine)
Chris Wilson549f7362010-10-19 11:19:32 +01001156{
Chris Wilson3f883252018-06-27 21:13:01 +01001157 const u32 seqno = intel_engine_get_seqno(engine);
Chris Wilsone61e0f52018-02-21 09:56:36 +00001158 struct i915_request *rq = NULL;
Chris Wilson3f883252018-06-27 21:13:01 +01001159 struct task_struct *tsk = NULL;
Chris Wilson56299fb2017-02-27 20:58:48 +00001160 struct intel_wait *wait;
Tvrtko Ursulindffabc82017-02-21 09:13:48 +00001161
Chris Wilson3f883252018-06-27 21:13:01 +01001162 if (unlikely(!engine->breadcrumbs.irq_armed))
Chris Wilsonbcbd5c32017-10-25 15:39:42 +01001163 return;
1164
Chris Wilson3f883252018-06-27 21:13:01 +01001165 rcu_read_lock();
Chris Wilson56299fb2017-02-27 20:58:48 +00001166
Chris Wilson61d3dc72017-03-03 19:08:24 +00001167 spin_lock(&engine->breadcrumbs.irq_lock);
1168 wait = engine->breadcrumbs.irq_wait;
Chris Wilson56299fb2017-02-27 20:58:48 +00001169 if (wait) {
Chris Wilson3f883252018-06-27 21:13:01 +01001170 /*
1171 * We use a callback from the dma-fence to submit
Chris Wilson56299fb2017-02-27 20:58:48 +00001172 * requests after waiting on our own requests. To
1173 * ensure minimum delay in queuing the next request to
1174 * hardware, signal the fence now rather than wait for
1175 * the signaler to be woken up. We still wake up the
1176 * waiter in order to handle the irq-seqno coherency
1177 * issues (we may receive the interrupt before the
1178 * seqno is written, see __i915_request_irq_complete())
1179 * and to handle coalescing of multiple seqno updates
1180 * and many waiters.
1181 */
Chris Wilson3f883252018-06-27 21:13:01 +01001182 if (i915_seqno_passed(seqno, wait->seqno)) {
Chris Wilsone61e0f52018-02-21 09:56:36 +00001183 struct i915_request *waiter = wait->request;
Chris Wilsonde4d2102017-09-18 17:27:34 +01001184
Chris Wilsone3be4072018-06-27 21:13:04 +01001185 if (waiter &&
1186 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
Chris Wilsonde4d2102017-09-18 17:27:34 +01001187 &waiter->fence.flags) &&
1188 intel_wait_check_request(wait, waiter))
Chris Wilsone61e0f52018-02-21 09:56:36 +00001189 rq = i915_request_get(waiter);
Chris Wilson56299fb2017-02-27 20:58:48 +00001190
Chris Wilson3f883252018-06-27 21:13:01 +01001191 tsk = wait->tsk;
1192 } else {
Chris Wilson69dc4d02018-06-27 21:13:02 +01001193 if (engine->irq_seqno_barrier &&
1194 i915_seqno_passed(seqno, wait->seqno - 1)) {
Chris Wilson3f883252018-06-27 21:13:01 +01001195 set_bit(ENGINE_IRQ_BREADCRUMB,
1196 &engine->irq_posted);
1197 tsk = wait->tsk;
1198 }
1199 }
Chris Wilson78796872018-06-27 21:13:03 +01001200
1201 engine->breadcrumbs.irq_count++;
Chris Wilson67b807a82017-02-27 20:58:50 +00001202 } else {
Chris Wilsonbcbd5c32017-10-25 15:39:42 +01001203 if (engine->breadcrumbs.irq_armed)
1204 __intel_engine_disarm_breadcrumbs(engine);
Chris Wilson56299fb2017-02-27 20:58:48 +00001205 }
Chris Wilson61d3dc72017-03-03 19:08:24 +00001206 spin_unlock(&engine->breadcrumbs.irq_lock);
Chris Wilson56299fb2017-02-27 20:58:48 +00001207
Chris Wilson24754d72017-03-03 14:45:57 +00001208 if (rq) {
Chris Wilsone3be4072018-06-27 21:13:04 +01001209 spin_lock(&rq->lock);
1210 dma_fence_signal_locked(&rq->fence);
Chris Wilson4e9a8be2018-03-05 10:41:05 +00001211 GEM_BUG_ON(!i915_request_completed(rq));
Chris Wilsone3be4072018-06-27 21:13:04 +01001212 spin_unlock(&rq->lock);
1213
Chris Wilsone61e0f52018-02-21 09:56:36 +00001214 i915_request_put(rq);
Chris Wilson24754d72017-03-03 14:45:57 +00001215 }
Chris Wilson56299fb2017-02-27 20:58:48 +00001216
Chris Wilson3f883252018-06-27 21:13:01 +01001217 if (tsk && tsk->state & TASK_NORMAL)
1218 wake_up_process(tsk);
1219
1220 rcu_read_unlock();
1221
Chris Wilson56299fb2017-02-27 20:58:48 +00001222 trace_intel_engine_notify(engine, wait);
Chris Wilson549f7362010-10-19 11:19:32 +01001223}
1224
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001225static void vlv_c0_read(struct drm_i915_private *dev_priv,
1226 struct intel_rps_ei *ei)
Deepak S31685c22014-07-03 17:33:01 -04001227{
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001228 ei->ktime = ktime_get_raw();
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001229 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1230 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
Deepak S31685c22014-07-03 17:33:01 -04001231}
1232
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001233void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1234{
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001235 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001236}
1237
1238static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1239{
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001240 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1241 const struct intel_rps_ei *prev = &rps->ei;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001242 struct intel_rps_ei now;
1243 u32 events = 0;
1244
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001245 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001246 return 0;
1247
1248 vlv_c0_read(dev_priv, &now);
Deepak S31685c22014-07-03 17:33:01 -04001249
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001250 if (prev->ktime) {
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001251 u64 time, c0;
Chris Wilson569884e2017-03-09 21:12:31 +00001252 u32 render, media;
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001253
Mika Kuoppala679cb6c2017-03-15 17:43:03 +02001254 time = ktime_us_delta(now.ktime, prev->ktime);
Chris Wilson8f68d592017-03-13 17:06:17 +00001255
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001256 time *= dev_priv->czclk_freq;
1257
1258 /* Workload can be split between render + media,
1259 * e.g. SwapBuffers being blitted in X after being rendered in
1260 * mesa. To account for this we need to combine both engines
1261 * into our activity counter.
1262 */
Chris Wilson569884e2017-03-09 21:12:31 +00001263 render = now.render_c0 - prev->render_c0;
1264 media = now.media_c0 - prev->media_c0;
1265 c0 = max(render, media);
Mika Kuoppala6b7f6aa2017-03-15 18:12:59 +02001266 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001267
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001268 if (c0 > time * rps->up_threshold)
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001269 events = GEN6_PM_RP_UP_THRESHOLD;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001270 else if (c0 < time * rps->down_threshold)
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00001271 events = GEN6_PM_RP_DOWN_THRESHOLD;
Deepak S31685c22014-07-03 17:33:01 -04001272 }
1273
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001274 rps->ei = now;
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001275 return events;
Deepak S31685c22014-07-03 17:33:01 -04001276}
1277
Ben Widawsky4912d042011-04-25 11:25:20 -07001278static void gen6_pm_rps_work(struct work_struct *work)
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001279{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001280 struct drm_i915_private *dev_priv =
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001281 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1282 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001283 bool client_boost = false;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001284 int new_delay, adj, min, max;
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001285 u32 pm_iir = 0;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001286
Daniel Vetter59cdb632013-07-04 23:35:28 +02001287 spin_lock_irq(&dev_priv->irq_lock);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001288 if (rps->interrupts_enabled) {
1289 pm_iir = fetch_and_zero(&rps->pm_iir);
1290 client_boost = atomic_read(&rps->num_waiters);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001291 }
Daniel Vetter59cdb632013-07-04 23:35:28 +02001292 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawsky4912d042011-04-25 11:25:20 -07001293
Paulo Zanoni60611c12013-08-15 11:50:01 -03001294 /* Make sure we didn't queue anything we're not going to process. */
Deepak Sa6706b42014-03-15 20:23:22 +05301295 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
Chris Wilson8d3afd72015-05-21 21:01:47 +01001296 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001297 goto out;
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001298
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001299 mutex_lock(&dev_priv->pcu_lock);
Chris Wilson7b9e0ae2012-04-28 08:56:39 +01001300
Chris Wilson43cf3bf2015-03-18 09:48:22 +00001301 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1302
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001303 adj = rps->last_adj;
1304 new_delay = rps->cur_freq;
1305 min = rps->min_freq_softlimit;
1306 max = rps->max_freq_softlimit;
Chris Wilson7b92c1b2017-06-28 13:35:48 +01001307 if (client_boost)
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001308 max = rps->max_freq;
1309 if (client_boost && new_delay < rps->boost_freq) {
1310 new_delay = rps->boost_freq;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001311 adj = 0;
1312 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001313 if (adj > 0)
1314 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001315 else /* CHV needs even encode values */
1316 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
Sagar Arun Kamble7e79a682017-01-20 09:18:24 +05301317
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001318 if (new_delay >= rps->max_freq_softlimit)
Sagar Arun Kamble7e79a682017-01-20 09:18:24 +05301319 adj = 0;
Chris Wilson7b92c1b2017-06-28 13:35:48 +01001320 } else if (client_boost) {
Chris Wilsonf5a4c672015-04-27 13:41:23 +01001321 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001322 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001323 if (rps->cur_freq > rps->efficient_freq)
1324 new_delay = rps->efficient_freq;
1325 else if (rps->cur_freq > rps->min_freq_softlimit)
1326 new_delay = rps->min_freq_softlimit;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001327 adj = 0;
1328 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1329 if (adj < 0)
1330 adj *= 2;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001331 else /* CHV needs even encode values */
1332 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
Sagar Arun Kamble7e79a682017-01-20 09:18:24 +05301333
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001334 if (new_delay <= rps->min_freq_softlimit)
Sagar Arun Kamble7e79a682017-01-20 09:18:24 +05301335 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001336 } else { /* unknown event */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001337 adj = 0;
Chris Wilsondd75fdc2013-09-25 17:34:57 +01001338 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001339
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001340 rps->last_adj = adj;
Chris Wilsonedcf2842015-04-07 16:20:29 +01001341
Ben Widawsky79249632012-09-07 19:43:42 -07001342 /* sysfs frequency interfaces may have snuck in while servicing the
1343 * interrupt
1344 */
Chris Wilsonedcf2842015-04-07 16:20:29 +01001345 new_delay += adj;
Chris Wilson8d3afd72015-05-21 21:01:47 +01001346 new_delay = clamp_t(int, new_delay, min, max);
Deepak S27544362014-01-27 21:35:05 +05301347
Chris Wilson9fcee2f2017-01-26 10:19:19 +00001348 if (intel_set_rps(dev_priv, new_delay)) {
1349 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001350 rps->last_adj = 0;
Chris Wilson9fcee2f2017-01-26 10:19:19 +00001351 }
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001352
Sagar Arun Kamble9f817502017-10-10 22:30:05 +01001353 mutex_unlock(&dev_priv->pcu_lock);
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001354
1355out:
1356 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1357 spin_lock_irq(&dev_priv->irq_lock);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001358 if (rps->interrupts_enabled)
Chris Wilson7c0a16a2017-03-09 21:12:32 +00001359 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1360 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes3b8d8d92010-12-17 14:19:02 -08001361}
1362
Ben Widawskye3689192012-05-25 16:56:22 -07001363
1364/**
1365 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1366 * occurred.
1367 * @work: workqueue struct
1368 *
1369 * Doesn't actually do anything except notify userspace. As a consequence of
1370 * this event, userspace should try to remap the bad rows since statistically
1371 * it is likely the same row is more likely to go bad again.
1372 */
1373static void ivybridge_parity_work(struct work_struct *work)
1374{
Jani Nikula2d1013d2014-03-31 14:27:17 +03001375 struct drm_i915_private *dev_priv =
Joonas Lahtinencefcff82017-04-28 10:58:39 +03001376 container_of(work, typeof(*dev_priv), l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001377 u32 error_status, row, bank, subbank;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001378 char *parity_event[6];
Ben Widawskye3689192012-05-25 16:56:22 -07001379 uint32_t misccpctl;
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001380 uint8_t slice = 0;
Ben Widawskye3689192012-05-25 16:56:22 -07001381
1382 /* We must turn off DOP level clock gating to access the L3 registers.
1383 * In order to prevent a get/put style interface, acquire struct mutex
1384 * any time we access those registers.
1385 */
Chris Wilson91c8a322016-07-05 10:40:23 +01001386 mutex_lock(&dev_priv->drm.struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001387
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001388 /* If we've screwed up tracking, just let the interrupt fire again */
1389 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1390 goto out;
1391
Ben Widawskye3689192012-05-25 16:56:22 -07001392 misccpctl = I915_READ(GEN7_MISCCPCTL);
1393 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1394 POSTING_READ(GEN7_MISCCPCTL);
1395
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001396 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001397 i915_reg_t reg;
Ben Widawskye3689192012-05-25 16:56:22 -07001398
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001399 slice--;
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001400 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001401 break;
1402
1403 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1404
Ville Syrjälä6fa1c5f2015-11-04 23:20:02 +02001405 reg = GEN7_L3CDERRST1(slice);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001406
1407 error_status = I915_READ(reg);
1408 row = GEN7_PARITY_ERROR_ROW(error_status);
1409 bank = GEN7_PARITY_ERROR_BANK(error_status);
1410 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1411
1412 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1413 POSTING_READ(reg);
1414
1415 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1416 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1417 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1418 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1419 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1420 parity_event[5] = NULL;
1421
Chris Wilson91c8a322016-07-05 10:40:23 +01001422 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001423 KOBJ_CHANGE, parity_event);
1424
1425 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1426 slice, row, bank, subbank);
1427
1428 kfree(parity_event[4]);
1429 kfree(parity_event[3]);
1430 kfree(parity_event[2]);
1431 kfree(parity_event[1]);
1432 }
Ben Widawskye3689192012-05-25 16:56:22 -07001433
1434 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1435
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001436out:
1437 WARN_ON(dev_priv->l3_parity.which_slice);
Daniel Vetter4cb21832014-09-15 14:55:26 +02001438 spin_lock_irq(&dev_priv->irq_lock);
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001439 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
Daniel Vetter4cb21832014-09-15 14:55:26 +02001440 spin_unlock_irq(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001441
Chris Wilson91c8a322016-07-05 10:40:23 +01001442 mutex_unlock(&dev_priv->drm.struct_mutex);
Ben Widawskye3689192012-05-25 16:56:22 -07001443}
1444
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001445static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1446 u32 iir)
Ben Widawskye3689192012-05-25 16:56:22 -07001447{
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001448 if (!HAS_L3_DPF(dev_priv))
Ben Widawskye3689192012-05-25 16:56:22 -07001449 return;
1450
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001451 spin_lock(&dev_priv->irq_lock);
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001452 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
Daniel Vetterd0ecd7e2013-07-04 23:35:25 +02001453 spin_unlock(&dev_priv->irq_lock);
Ben Widawskye3689192012-05-25 16:56:22 -07001454
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001455 iir &= GT_PARITY_ERROR(dev_priv);
Ben Widawsky35a85ac2013-09-19 11:13:41 -07001456 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1457 dev_priv->l3_parity.which_slice |= 1 << 1;
1458
1459 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1460 dev_priv->l3_parity.which_slice |= 1 << 0;
1461
Daniel Vettera4da4fa2012-11-02 19:55:07 +01001462 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
Ben Widawskye3689192012-05-25 16:56:22 -07001463}
1464
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001465static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001466 u32 gt_iir)
1467{
Chris Wilsonf8973c22016-07-01 17:23:21 +01001468 if (gt_iir & GT_RENDER_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301469 notify_ring(dev_priv->engine[RCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001470 if (gt_iir & ILK_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301471 notify_ring(dev_priv->engine[VCS]);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03001472}
1473
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001474static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001475 u32 gt_iir)
1476{
Chris Wilsonf8973c22016-07-01 17:23:21 +01001477 if (gt_iir & GT_RENDER_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301478 notify_ring(dev_priv->engine[RCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001479 if (gt_iir & GT_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301480 notify_ring(dev_priv->engine[VCS]);
Ben Widawskycc609d52013-05-28 19:22:29 -07001481 if (gt_iir & GT_BLT_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301482 notify_ring(dev_priv->engine[BCS]);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001483
Ben Widawskycc609d52013-05-28 19:22:29 -07001484 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1485 GT_BSD_CS_ERROR_INTERRUPT |
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001486 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1487 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
Ben Widawskye3689192012-05-25 16:56:22 -07001488
Ville Syrjälä261e40b2016-04-13 21:19:57 +03001489 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1490 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
Daniel Vettere7b4c6b2012-03-30 20:24:35 +02001491}
1492
Chris Wilson5d3d69d2017-05-17 13:10:06 +01001493static void
Chris Wilson51f6b0f2018-03-09 01:08:08 +00001494gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001495{
Chris Wilson31de7352017-03-16 12:56:18 +00001496 bool tasklet = false;
Chris Wilsonf7470262017-01-24 15:20:21 +00001497
Chris Wilsonfd8526e2018-06-28 21:12:10 +01001498 if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
1499 tasklet = true;
Chris Wilson31de7352017-03-16 12:56:18 +00001500
Chris Wilson51f6b0f2018-03-09 01:08:08 +00001501 if (iir & GT_RENDER_USER_INTERRUPT) {
Chris Wilson31de7352017-03-16 12:56:18 +00001502 notify_ring(engine);
Michal Wajdeczko93ffbe82017-12-06 13:53:12 +00001503 tasklet |= USES_GUC_SUBMISSION(engine->i915);
Chris Wilson31de7352017-03-16 12:56:18 +00001504 }
1505
1506 if (tasklet)
Chris Wilsonfd8526e2018-06-28 21:12:10 +01001507 tasklet_hi_schedule(&engine->execlists.tasklet);
Nick Hoathfbcc1a02015-10-20 10:23:52 +01001508}
1509
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001510static void gen8_gt_irq_ack(struct drm_i915_private *i915,
Chris Wilson55ef72f2018-02-02 15:34:48 +00001511 u32 master_ctl, u32 gt_iir[4])
Ben Widawskyabd58f02013-11-02 21:07:09 -07001512{
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001513 void __iomem * const regs = i915->regs;
1514
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00001515#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
1516 GEN8_GT_BCS_IRQ | \
1517 GEN8_GT_VCS1_IRQ | \
1518 GEN8_GT_VCS2_IRQ | \
1519 GEN8_GT_VECS_IRQ | \
1520 GEN8_GT_PM_IRQ | \
1521 GEN8_GT_GUC_IRQ)
1522
Ben Widawskyabd58f02013-11-02 21:07:09 -07001523 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001524 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
1525 if (likely(gt_iir[0]))
1526 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07001527 }
1528
Zhao Yakui85f9b5f2014-04-17 10:37:38 +08001529 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001530 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
1531 if (likely(gt_iir[1]))
1532 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
Chris Wilson74cdb332015-04-07 16:21:05 +01001533 }
1534
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301535 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001536 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
1537 if (likely(gt_iir[2] & (i915->pm_rps_events |
1538 i915->pm_guc_events)))
1539 raw_reg_write(regs, GEN8_GT_IIR(2),
1540 gt_iir[2] & (i915->pm_rps_events |
1541 i915->pm_guc_events));
1542 }
1543
1544 if (master_ctl & GEN8_GT_VECS_IRQ) {
1545 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
1546 if (likely(gt_iir[3]))
1547 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
Ben Widawsky09610212014-05-15 20:58:08 +03001548 }
Ben Widawskyabd58f02013-11-02 21:07:09 -07001549}
1550
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001551static void gen8_gt_irq_handler(struct drm_i915_private *i915,
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00001552 u32 master_ctl, u32 gt_iir[4])
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001553{
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00001554 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001555 gen8_cs_irq_handler(i915->engine[RCS],
Chris Wilson51f6b0f2018-03-09 01:08:08 +00001556 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001557 gen8_cs_irq_handler(i915->engine[BCS],
Chris Wilson51f6b0f2018-03-09 01:08:08 +00001558 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001559 }
1560
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00001561 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001562 gen8_cs_irq_handler(i915->engine[VCS],
Chris Wilson51f6b0f2018-03-09 01:08:08 +00001563 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001564 gen8_cs_irq_handler(i915->engine[VCS2],
Chris Wilson51f6b0f2018-03-09 01:08:08 +00001565 gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001566 }
1567
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00001568 if (master_ctl & GEN8_GT_VECS_IRQ) {
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001569 gen8_cs_irq_handler(i915->engine[VECS],
Chris Wilson51f6b0f2018-03-09 01:08:08 +00001570 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00001571 }
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001572
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00001573 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
Chris Wilson2e4a5b22018-02-19 10:09:26 +00001574 gen6_rps_irq_handler(i915, gt_iir[2]);
1575 gen9_guc_irq_handler(i915, gt_iir[2]);
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00001576 }
Ville Syrjäläe30e2512016-04-13 21:19:58 +03001577}
1578
Ville Syrjäläaf920582018-07-05 19:43:55 +03001579static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07001580{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001581 switch (pin) {
1582 case HPD_PORT_C:
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07001583 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
Ville Syrjäläaf920582018-07-05 19:43:55 +03001584 case HPD_PORT_D:
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07001585 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
Ville Syrjäläaf920582018-07-05 19:43:55 +03001586 case HPD_PORT_E:
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07001587 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
Ville Syrjäläaf920582018-07-05 19:43:55 +03001588 case HPD_PORT_F:
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07001589 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1590 default:
1591 return false;
1592 }
1593}
1594
Ville Syrjäläaf920582018-07-05 19:43:55 +03001595static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
Imre Deak63c88d22015-07-20 14:43:39 -07001596{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001597 switch (pin) {
1598 case HPD_PORT_A:
Ville Syrjälä195baa02015-08-27 23:56:00 +03001599 return val & PORTA_HOTPLUG_LONG_DETECT;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001600 case HPD_PORT_B:
Imre Deak63c88d22015-07-20 14:43:39 -07001601 return val & PORTB_HOTPLUG_LONG_DETECT;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001602 case HPD_PORT_C:
Imre Deak63c88d22015-07-20 14:43:39 -07001603 return val & PORTC_HOTPLUG_LONG_DETECT;
Imre Deak63c88d22015-07-20 14:43:39 -07001604 default:
1605 return false;
1606 }
1607}
1608
Ville Syrjäläaf920582018-07-05 19:43:55 +03001609static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
Anusha Srivatsa31604222018-06-26 13:52:23 -07001610{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001611 switch (pin) {
1612 case HPD_PORT_A:
Anusha Srivatsa31604222018-06-26 13:52:23 -07001613 return val & ICP_DDIA_HPD_LONG_DETECT;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001614 case HPD_PORT_B:
Anusha Srivatsa31604222018-06-26 13:52:23 -07001615 return val & ICP_DDIB_HPD_LONG_DETECT;
1616 default:
1617 return false;
1618 }
1619}
1620
Ville Syrjäläaf920582018-07-05 19:43:55 +03001621static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
Anusha Srivatsa31604222018-06-26 13:52:23 -07001622{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001623 switch (pin) {
1624 case HPD_PORT_C:
Anusha Srivatsa31604222018-06-26 13:52:23 -07001625 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
Ville Syrjäläaf920582018-07-05 19:43:55 +03001626 case HPD_PORT_D:
Anusha Srivatsa31604222018-06-26 13:52:23 -07001627 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
Ville Syrjäläaf920582018-07-05 19:43:55 +03001628 case HPD_PORT_E:
Anusha Srivatsa31604222018-06-26 13:52:23 -07001629 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
Ville Syrjäläaf920582018-07-05 19:43:55 +03001630 case HPD_PORT_F:
Anusha Srivatsa31604222018-06-26 13:52:23 -07001631 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1632 default:
1633 return false;
1634 }
1635}
1636
Ville Syrjäläaf920582018-07-05 19:43:55 +03001637static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03001638{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001639 switch (pin) {
1640 case HPD_PORT_E:
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03001641 return val & PORTE_HOTPLUG_LONG_DETECT;
1642 default:
1643 return false;
1644 }
1645}
1646
Ville Syrjäläaf920582018-07-05 19:43:55 +03001647static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
Ville Syrjälä74c0b392015-08-27 23:56:07 +03001648{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001649 switch (pin) {
1650 case HPD_PORT_A:
Ville Syrjälä74c0b392015-08-27 23:56:07 +03001651 return val & PORTA_HOTPLUG_LONG_DETECT;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001652 case HPD_PORT_B:
Ville Syrjälä74c0b392015-08-27 23:56:07 +03001653 return val & PORTB_HOTPLUG_LONG_DETECT;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001654 case HPD_PORT_C:
Ville Syrjälä74c0b392015-08-27 23:56:07 +03001655 return val & PORTC_HOTPLUG_LONG_DETECT;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001656 case HPD_PORT_D:
Ville Syrjälä74c0b392015-08-27 23:56:07 +03001657 return val & PORTD_HOTPLUG_LONG_DETECT;
1658 default:
1659 return false;
1660 }
1661}
1662
Ville Syrjäläaf920582018-07-05 19:43:55 +03001663static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03001664{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001665 switch (pin) {
1666 case HPD_PORT_A:
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03001667 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1668 default:
1669 return false;
1670 }
1671}
1672
Ville Syrjäläaf920582018-07-05 19:43:55 +03001673static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001674{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001675 switch (pin) {
1676 case HPD_PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001677 return val & PORTB_HOTPLUG_LONG_DETECT;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001678 case HPD_PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001679 return val & PORTC_HOTPLUG_LONG_DETECT;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001680 case HPD_PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001681 return val & PORTD_HOTPLUG_LONG_DETECT;
1682 default:
1683 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001684 }
1685}
1686
Ville Syrjäläaf920582018-07-05 19:43:55 +03001687static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
Dave Airlie13cf5502014-06-18 11:29:35 +10001688{
Ville Syrjäläaf920582018-07-05 19:43:55 +03001689 switch (pin) {
1690 case HPD_PORT_B:
Jani Nikula676574d2015-05-28 15:43:53 +03001691 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001692 case HPD_PORT_C:
Jani Nikula676574d2015-05-28 15:43:53 +03001693 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
Ville Syrjäläaf920582018-07-05 19:43:55 +03001694 case HPD_PORT_D:
Jani Nikula676574d2015-05-28 15:43:53 +03001695 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1696 default:
1697 return false;
Dave Airlie13cf5502014-06-18 11:29:35 +10001698 }
1699}
1700
Ville Syrjälä42db67d2015-08-28 21:26:27 +03001701/*
1702 * Get a bit mask of pins that have triggered, and which ones may be long.
1703 * This can be called multiple times with the same masks to accumulate
1704 * hotplug detection results from several registers.
1705 *
1706 * Note that the caller is expected to zero out the masks initially.
1707 */
Rodrigo Vivicf539022018-01-29 15:22:21 -08001708static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1709 u32 *pin_mask, u32 *long_mask,
1710 u32 hotplug_trigger, u32 dig_hotplug_reg,
1711 const u32 hpd[HPD_NUM_PINS],
Ville Syrjäläaf920582018-07-05 19:43:55 +03001712 bool long_pulse_detect(enum hpd_pin pin, u32 val))
Jani Nikula676574d2015-05-28 15:43:53 +03001713{
Ville Syrjäläe9be2852018-07-05 19:43:54 +03001714 enum hpd_pin pin;
Jani Nikula676574d2015-05-28 15:43:53 +03001715
Ville Syrjäläe9be2852018-07-05 19:43:54 +03001716 for_each_hpd_pin(pin) {
1717 if ((hpd[pin] & hotplug_trigger) == 0)
Jani Nikula8c841e52015-06-18 13:06:17 +03001718 continue;
Jani Nikula676574d2015-05-28 15:43:53 +03001719
Ville Syrjäläe9be2852018-07-05 19:43:54 +03001720 *pin_mask |= BIT(pin);
Jani Nikula8c841e52015-06-18 13:06:17 +03001721
Ville Syrjäläaf920582018-07-05 19:43:55 +03001722 if (long_pulse_detect(pin, dig_hotplug_reg))
Ville Syrjäläe9be2852018-07-05 19:43:54 +03001723 *long_mask |= BIT(pin);
Jani Nikula676574d2015-05-28 15:43:53 +03001724 }
1725
Ville Syrjäläf88f0472018-07-05 19:43:57 +03001726 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1727 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
Jani Nikula676574d2015-05-28 15:43:53 +03001728
1729}
1730
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001731static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001732{
Daniel Vetter28c70f12012-12-01 13:53:45 +01001733 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01001734}
1735
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001736static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
Daniel Vetterce99c252012-12-01 13:53:47 +01001737{
Daniel Vetter9ee32fea2012-12-01 13:53:48 +01001738 wake_up_all(&dev_priv->gmbus_wait_queue);
Daniel Vetterce99c252012-12-01 13:53:47 +01001739}
1740
Shuang He8bf1e9f2013-10-15 18:55:27 +01001741#if defined(CONFIG_DEBUG_FS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001742static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1743 enum pipe pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001744 uint32_t crc0, uint32_t crc1,
1745 uint32_t crc2, uint32_t crc3,
1746 uint32_t crc4)
Shuang He8bf1e9f2013-10-15 18:55:27 +01001747{
Shuang He8bf1e9f2013-10-15 18:55:27 +01001748 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001749 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
Tomeu Vizoso8c6b7092017-01-10 14:43:04 +01001750 uint32_t crcs[5];
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001751
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001752 spin_lock(&pipe_crc->lock);
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02001753 /*
1754 * For some not yet identified reason, the first CRC is
1755 * bonkers. So let's just wait for the next vblank and read
1756 * out the buggy result.
1757 *
1758 * On GEN8+ sometimes the second CRC is bonkers as well, so
1759 * don't trust that one either.
1760 */
1761 if (pipe_crc->skipped <= 0 ||
1762 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1763 pipe_crc->skipped++;
Damien Lespiaud538bbd2013-10-21 14:29:30 +01001764 spin_unlock(&pipe_crc->lock);
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02001765 return;
Damien Lespiaub2c88f52013-10-15 18:55:29 +01001766 }
Maarten Lankhorst6cc42152018-06-28 09:23:02 +02001767 spin_unlock(&pipe_crc->lock);
1768
1769 crcs[0] = crc0;
1770 crcs[1] = crc1;
1771 crcs[2] = crc2;
1772 crcs[3] = crc3;
1773 crcs[4] = crc4;
1774 drm_crtc_add_crc_entry(&crtc->base, true,
1775 drm_crtc_accurate_vblank_count(&crtc->base),
1776 crcs);
Shuang He8bf1e9f2013-10-15 18:55:27 +01001777}
Daniel Vetter277de952013-10-18 16:37:07 +02001778#else
1779static inline void
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001780display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1781 enum pipe pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001782 uint32_t crc0, uint32_t crc1,
1783 uint32_t crc2, uint32_t crc3,
1784 uint32_t crc4) {}
1785#endif
Daniel Vettereba94eb2013-10-16 22:55:46 +02001786
Daniel Vetter277de952013-10-18 16:37:07 +02001787
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001788static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1789 enum pipe pipe)
Daniel Vetter5a69b892013-10-16 22:55:52 +02001790{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001791 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001792 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1793 0, 0, 0, 0);
Daniel Vetter5a69b892013-10-16 22:55:52 +02001794}
1795
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001796static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1797 enum pipe pipe)
Daniel Vettereba94eb2013-10-16 22:55:46 +02001798{
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001799 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001800 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1801 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1802 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1803 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1804 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
Daniel Vettereba94eb2013-10-16 22:55:46 +02001805}
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001806
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001807static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1808 enum pipe pipe)
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001809{
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001810 uint32_t res1, res2;
1811
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001812 if (INTEL_GEN(dev_priv) >= 3)
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001813 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1814 else
1815 res1 = 0;
1816
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001817 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
Daniel Vetter0b5c5ed2013-10-16 22:55:53 +02001818 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1819 else
1820 res2 = 0;
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001821
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01001822 display_pipe_crc_irq_handler(dev_priv, pipe,
Daniel Vetter277de952013-10-18 16:37:07 +02001823 I915_READ(PIPE_CRC_RES_RED(pipe)),
1824 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1825 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1826 res1, res2);
Daniel Vetter5b3a8562013-10-16 22:55:48 +02001827}
Shuang He8bf1e9f2013-10-15 18:55:27 +01001828
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001829/* The RPS events need forcewake, so we add them to a work queue and mask their
1830 * IMR bits until the work is done. Other interrupts can be processed without
1831 * the work queue. */
1832static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
Ben Widawskybaf02a12013-05-28 19:22:24 -07001833{
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001834 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1835
Deepak Sa6706b42014-03-15 20:23:22 +05301836 if (pm_iir & dev_priv->pm_rps_events) {
Daniel Vetter59cdb632013-07-04 23:35:28 +02001837 spin_lock(&dev_priv->irq_lock);
Akash Goelf4e9af42016-10-12 21:54:30 +05301838 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01001839 if (rps->interrupts_enabled) {
1840 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1841 schedule_work(&rps->work);
Imre Deakd4d70aa2014-11-19 15:30:04 +02001842 }
Daniel Vetter59cdb632013-07-04 23:35:28 +02001843 spin_unlock(&dev_priv->irq_lock);
Ben Widawskybaf02a12013-05-28 19:22:24 -07001844 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001845
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07001846 if (INTEL_GEN(dev_priv) >= 8)
Imre Deakc9a9a262014-11-05 20:48:37 +02001847 return;
1848
Joonas Lahtinen2d1fe072016-04-07 11:08:05 +03001849 if (HAS_VEBOX(dev_priv)) {
Paulo Zanoni1403c0d2013-08-15 11:51:32 -03001850 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05301851 notify_ring(dev_priv->engine[VECS]);
Ben Widawsky12638c52013-05-28 19:22:31 -07001852
Daniel Vetteraaecdf62014-11-04 15:52:22 +01001853 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1854 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
Ben Widawsky12638c52013-05-28 19:22:31 -07001855 }
Ben Widawskybaf02a12013-05-28 19:22:24 -07001856}
1857
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301858static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
1859{
Michal Wajdeczko93bf8092018-03-08 16:46:55 +01001860 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
1861 intel_guc_to_host_event_handler(&dev_priv->guc);
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05301862}
1863
Ville Syrjälä44d92412017-08-18 21:36:51 +03001864static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1865{
1866 enum pipe pipe;
1867
1868 for_each_pipe(dev_priv, pipe) {
1869 I915_WRITE(PIPESTAT(pipe),
1870 PIPESTAT_INT_STATUS_MASK |
1871 PIPE_FIFO_UNDERRUN_STATUS);
1872
1873 dev_priv->pipestat_irq_mask[pipe] = 0;
1874 }
1875}
1876
Ville Syrjäläeb643432017-08-18 21:36:59 +03001877static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1878 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
Imre Deakc1874ed2014-02-04 21:35:46 +02001879{
Imre Deakc1874ed2014-02-04 21:35:46 +02001880 int pipe;
1881
Imre Deak58ead0d2014-02-04 21:35:47 +02001882 spin_lock(&dev_priv->irq_lock);
Ville Syrjälä1ca993d2016-02-18 21:54:26 +02001883
1884 if (!dev_priv->display_irqs_enabled) {
1885 spin_unlock(&dev_priv->irq_lock);
1886 return;
1887 }
1888
Damien Lespiau055e3932014-08-18 13:49:10 +01001889 for_each_pipe(dev_priv, pipe) {
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001890 i915_reg_t reg;
Ville Syrjälä6b12ca52017-09-14 18:17:31 +03001891 u32 status_mask, enable_mask, iir_bit = 0;
Imre Deak91d181d2014-02-10 18:42:49 +02001892
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001893 /*
1894 * PIPESTAT bits get signalled even when the interrupt is
1895 * disabled with the mask bits, and some of the status bits do
1896 * not generate interrupts at all (like the underrun bit). Hence
1897 * we need to be careful that we only handle what we want to
1898 * handle.
1899 */
Daniel Vetter0f239f42014-09-30 10:56:49 +02001900
1901 /* fifo underruns are filterered in the underrun handler. */
Ville Syrjälä6b12ca52017-09-14 18:17:31 +03001902 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001903
1904 switch (pipe) {
1905 case PIPE_A:
1906 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1907 break;
1908 case PIPE_B:
1909 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1910 break;
Ville Syrjälä3278f672014-04-09 13:28:49 +03001911 case PIPE_C:
1912 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1913 break;
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001914 }
1915 if (iir & iir_bit)
Ville Syrjälä6b12ca52017-09-14 18:17:31 +03001916 status_mask |= dev_priv->pipestat_irq_mask[pipe];
Daniel Vetterbbb5eeb2014-02-12 17:55:36 +01001917
Ville Syrjälä6b12ca52017-09-14 18:17:31 +03001918 if (!status_mask)
Imre Deak91d181d2014-02-10 18:42:49 +02001919 continue;
1920
1921 reg = PIPESTAT(pipe);
Ville Syrjälä6b12ca52017-09-14 18:17:31 +03001922 pipe_stats[pipe] = I915_READ(reg) & status_mask;
1923 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02001924
1925 /*
1926 * Clear the PIPE*STAT regs before the IIR
Ville Syrjälä132c27c2018-06-11 23:02:55 +03001927 *
1928 * Toggle the enable bits to make sure we get an
1929 * edge in the ISR pipe event bit if we don't clear
1930 * all the enabled status bits. Otherwise the edge
1931 * triggered IIR on i965/g4x wouldn't notice that
1932 * an interrupt is still pending.
Imre Deakc1874ed2014-02-04 21:35:46 +02001933 */
Ville Syrjälä132c27c2018-06-11 23:02:55 +03001934 if (pipe_stats[pipe]) {
1935 I915_WRITE(reg, pipe_stats[pipe]);
1936 I915_WRITE(reg, enable_mask);
1937 }
Imre Deakc1874ed2014-02-04 21:35:46 +02001938 }
Imre Deak58ead0d2014-02-04 21:35:47 +02001939 spin_unlock(&dev_priv->irq_lock);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03001940}
1941
Ville Syrjäläeb643432017-08-18 21:36:59 +03001942static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1943 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1944{
1945 enum pipe pipe;
1946
1947 for_each_pipe(dev_priv, pipe) {
1948 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1949 drm_handle_vblank(&dev_priv->drm, pipe);
1950
1951 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1952 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1953
1954 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1955 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1956 }
1957}
1958
1959static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1960 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1961{
1962 bool blc_event = false;
1963 enum pipe pipe;
1964
1965 for_each_pipe(dev_priv, pipe) {
1966 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1967 drm_handle_vblank(&dev_priv->drm, pipe);
1968
1969 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1970 blc_event = true;
1971
1972 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1973 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1974
1975 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1976 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1977 }
1978
1979 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1980 intel_opregion_asle_intr(dev_priv);
1981}
1982
1983static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1984 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1985{
1986 bool blc_event = false;
1987 enum pipe pipe;
1988
1989 for_each_pipe(dev_priv, pipe) {
1990 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1991 drm_handle_vblank(&dev_priv->drm, pipe);
1992
1993 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1994 blc_event = true;
1995
1996 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1997 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1998
1999 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2000 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2001 }
2002
2003 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2004 intel_opregion_asle_intr(dev_priv);
2005
2006 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2007 gmbus_irq_handler(dev_priv);
2008}
2009
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002010static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03002011 u32 pipe_stats[I915_MAX_PIPES])
2012{
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03002013 enum pipe pipe;
Imre Deakc1874ed2014-02-04 21:35:46 +02002014
Damien Lespiau055e3932014-08-18 13:49:10 +01002015 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002016 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2017 drm_handle_vblank(&dev_priv->drm, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02002018
2019 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002020 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02002021
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002022 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2023 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Imre Deakc1874ed2014-02-04 21:35:46 +02002024 }
2025
2026 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002027 gmbus_irq_handler(dev_priv);
Imre Deakc1874ed2014-02-04 21:35:46 +02002028}
2029
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002030static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
Ville Syrjälä16c6c562014-04-01 10:54:36 +03002031{
Ville Syrjälä0ba7c512018-06-14 20:56:25 +03002032 u32 hotplug_status = 0, hotplug_status_mask;
2033 int i;
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002034
Ville Syrjälä0ba7c512018-06-14 20:56:25 +03002035 if (IS_G4X(dev_priv) ||
2036 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2037 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
2038 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
2039 else
2040 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
2041
2042 /*
2043 * We absolutely have to clear all the pending interrupt
2044 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
2045 * interrupt bit won't have an edge, and the i965/g4x
2046 * edge triggered IIR will not notice that an interrupt
2047 * is still pending. We can't use PORT_HOTPLUG_EN to
2048 * guarantee the edge as the act of toggling the enable
2049 * bits can itself generate a new hotplug interrupt :(
2050 */
2051 for (i = 0; i < 10; i++) {
2052 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
2053
2054 if (tmp == 0)
2055 return hotplug_status;
2056
2057 hotplug_status |= tmp;
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002058 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
Ville Syrjälä0ba7c512018-06-14 20:56:25 +03002059 }
2060
2061 WARN_ONCE(1,
2062 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
2063 I915_READ(PORT_HOTPLUG_STAT));
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002064
2065 return hotplug_status;
2066}
2067
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002068static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002069 u32 hotplug_status)
2070{
Ville Syrjälä42db67d2015-08-28 21:26:27 +03002071 u32 pin_mask = 0, long_mask = 0;
Ville Syrjälä16c6c562014-04-01 10:54:36 +03002072
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002073 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
2074 IS_CHERRYVIEW(dev_priv)) {
Jani Nikula0d2e4292015-05-27 15:03:39 +03002075 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01002076
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03002077 if (hotplug_trigger) {
Rodrigo Vivicf539022018-01-29 15:22:21 -08002078 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2079 hotplug_trigger, hotplug_trigger,
2080 hpd_status_g4x,
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03002081 i9xx_port_hotplug_long_detect);
2082
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002083 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03002084 }
Jani Nikula369712e2015-05-27 15:03:40 +03002085
2086 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002087 dp_aux_irq_handler(dev_priv);
Jani Nikula0d2e4292015-05-27 15:03:39 +03002088 } else {
2089 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01002090
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03002091 if (hotplug_trigger) {
Rodrigo Vivicf539022018-01-29 15:22:21 -08002092 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2093 hotplug_trigger, hotplug_trigger,
2094 hpd_status_i915,
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03002095 i9xx_port_hotplug_long_detect);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002096 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä58f2cf22015-08-28 22:59:08 +03002097 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03002098 }
Ville Syrjälä16c6c562014-04-01 10:54:36 +03002099}
2100
Daniel Vetterff1f5252012-10-02 15:10:55 +02002101static irqreturn_t valleyview_irq_handler(int irq, void *arg)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002102{
Daniel Vetter45a83f82014-05-12 19:17:55 +02002103 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002104 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002105 irqreturn_t ret = IRQ_NONE;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002106
Imre Deak2dd2a882015-02-24 11:14:30 +02002107 if (!intel_irqs_enabled(dev_priv))
2108 return IRQ_NONE;
2109
Imre Deak1f814da2015-12-16 02:52:19 +02002110 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2111 disable_rpm_wakeref_asserts(dev_priv);
2112
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03002113 do {
Ville Syrjälä6e814802016-04-13 21:19:53 +03002114 u32 iir, gt_iir, pm_iir;
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03002115 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002116 u32 hotplug_status = 0;
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002117 u32 ier = 0;
Oscar Mateo3ff60f82014-06-16 16:10:58 +01002118
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002119 gt_iir = I915_READ(GTIIR);
2120 pm_iir = I915_READ(GEN6_PMIIR);
Oscar Mateo3ff60f82014-06-16 16:10:58 +01002121 iir = I915_READ(VLV_IIR);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002122
2123 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03002124 break;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002125
2126 ret = IRQ_HANDLED;
2127
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002128 /*
2129 * Theory on interrupt generation, based on empirical evidence:
2130 *
2131 * x = ((VLV_IIR & VLV_IER) ||
2132 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
2133 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
2134 *
2135 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2136 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
2137 * guarantee the CPU interrupt will be raised again even if we
2138 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
2139 * bits this time around.
2140 */
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03002141 I915_WRITE(VLV_MASTER_IER, 0);
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002142 ier = I915_READ(VLV_IER);
2143 I915_WRITE(VLV_IER, 0);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03002144
2145 if (gt_iir)
2146 I915_WRITE(GTIIR, gt_iir);
2147 if (pm_iir)
2148 I915_WRITE(GEN6_PMIIR, pm_iir);
2149
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03002150 if (iir & I915_DISPLAY_PORT_INTERRUPT)
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002151 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03002152
Oscar Mateo3ff60f82014-06-16 16:10:58 +01002153 /* Call regardless, as some status bits might not be
2154 * signalled in iir */
Ville Syrjäläeb643432017-08-18 21:36:59 +03002155 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03002156
Jerome Anandeef57322017-01-25 04:27:49 +05302157 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2158 I915_LPE_PIPE_B_INTERRUPT))
2159 intel_lpe_audio_irq_handler(dev_priv);
2160
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03002161 /*
2162 * VLV_IIR is single buffered, and reflects the level
2163 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2164 */
2165 if (iir)
2166 I915_WRITE(VLV_IIR, iir);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03002167
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002168 I915_WRITE(VLV_IER, ier);
Ville Syrjälä4a0a0202016-04-13 21:19:50 +03002169 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002170
Ville Syrjälä52894872016-04-13 21:19:56 +03002171 if (gt_iir)
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002172 snb_gt_irq_handler(dev_priv, gt_iir);
Ville Syrjälä52894872016-04-13 21:19:56 +03002173 if (pm_iir)
2174 gen6_rps_irq_handler(dev_priv, pm_iir);
2175
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002176 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002177 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03002178
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002179 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
Ville Syrjälä1e1cace2016-04-13 21:19:52 +03002180 } while (0);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002181
Imre Deak1f814da2015-12-16 02:52:19 +02002182 enable_rpm_wakeref_asserts(dev_priv);
2183
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07002184 return ret;
2185}
2186
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002187static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2188{
Daniel Vetter45a83f82014-05-12 19:17:55 +02002189 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002190 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002191 irqreturn_t ret = IRQ_NONE;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002192
Imre Deak2dd2a882015-02-24 11:14:30 +02002193 if (!intel_irqs_enabled(dev_priv))
2194 return IRQ_NONE;
2195
Imre Deak1f814da2015-12-16 02:52:19 +02002196 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2197 disable_rpm_wakeref_asserts(dev_priv);
2198
Chris Wilson579de732016-03-14 09:01:57 +00002199 do {
Ville Syrjälä6e814802016-04-13 21:19:53 +03002200 u32 master_ctl, iir;
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03002201 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002202 u32 hotplug_status = 0;
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00002203 u32 gt_iir[4];
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002204 u32 ier = 0;
2205
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03002206 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2207 iir = I915_READ(VLV_IIR);
Ville Syrjälä3278f672014-04-09 13:28:49 +03002208
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03002209 if (master_ctl == 0 && iir == 0)
2210 break;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002211
Oscar Mateo27b6c122014-06-16 16:11:00 +01002212 ret = IRQ_HANDLED;
2213
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002214 /*
2215 * Theory on interrupt generation, based on empirical evidence:
2216 *
2217 * x = ((VLV_IIR & VLV_IER) ||
2218 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
2219 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
2220 *
2221 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
2222 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
2223 * guarantee the CPU interrupt will be raised again even if we
2224 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
2225 * bits this time around.
2226 */
Ville Syrjälä8e5fd592014-04-09 13:28:50 +03002227 I915_WRITE(GEN8_MASTER_IRQ, 0);
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002228 ier = I915_READ(VLV_IER);
2229 I915_WRITE(VLV_IER, 0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002230
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002231 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002232
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03002233 if (iir & I915_DISPLAY_PORT_INTERRUPT)
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002234 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03002235
Oscar Mateo27b6c122014-06-16 16:11:00 +01002236 /* Call regardless, as some status bits might not be
2237 * signalled in iir */
Ville Syrjäläeb643432017-08-18 21:36:59 +03002238 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002239
Jerome Anandeef57322017-01-25 04:27:49 +05302240 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
2241 I915_LPE_PIPE_B_INTERRUPT |
2242 I915_LPE_PIPE_C_INTERRUPT))
2243 intel_lpe_audio_irq_handler(dev_priv);
2244
Ville Syrjälä7ce4d1f2016-04-13 21:19:49 +03002245 /*
2246 * VLV_IIR is single buffered, and reflects the level
2247 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
2248 */
2249 if (iir)
2250 I915_WRITE(VLV_IIR, iir);
2251
Ville Syrjäläa5e485a2016-04-13 21:19:51 +03002252 I915_WRITE(VLV_IER, ier);
Ville Syrjäläe5328c42016-04-13 21:19:47 +03002253 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002254
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00002255 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
Ville Syrjäläe30e2512016-04-13 21:19:58 +03002256
Ville Syrjälä1ae3c342016-04-13 21:19:54 +03002257 if (hotplug_status)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002258 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
Ville Syrjälä2ecb8ca2016-04-13 21:19:55 +03002259
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002260 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
Chris Wilson579de732016-03-14 09:01:57 +00002261 } while (0);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002262
Imre Deak1f814da2015-12-16 02:52:19 +02002263 enable_rpm_wakeref_asserts(dev_priv);
2264
Ville Syrjälä43f328d2014-04-09 20:40:52 +03002265 return ret;
2266}
2267
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002268static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
2269 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002270 const u32 hpd[HPD_NUM_PINS])
2271{
Ville Syrjälä40e56412015-08-27 23:56:10 +03002272 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2273
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002274 /*
2275 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
2276 * unless we touch the hotplug register, even if hotplug_trigger is
2277 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
2278 * errors.
2279 */
Ville Syrjälä40e56412015-08-27 23:56:10 +03002280 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002281 if (!hotplug_trigger) {
2282 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
2283 PORTD_HOTPLUG_STATUS_MASK |
2284 PORTC_HOTPLUG_STATUS_MASK |
2285 PORTB_HOTPLUG_STATUS_MASK;
2286 dig_hotplug_reg &= ~mask;
2287 }
2288
Ville Syrjälä40e56412015-08-27 23:56:10 +03002289 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Jani Nikula6a39d7c2015-11-25 16:47:22 +02002290 if (!hotplug_trigger)
2291 return;
Ville Syrjälä40e56412015-08-27 23:56:10 +03002292
Rodrigo Vivicf539022018-01-29 15:22:21 -08002293 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002294 dig_hotplug_reg, hpd,
2295 pch_port_hotplug_long_detect);
2296
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002297 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002298}
2299
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002300static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Jesse Barnes776ad802011-01-04 15:09:39 -08002301{
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002302 int pipe;
Egbert Eichb543fb02013-04-16 13:36:54 +02002303 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
Jesse Barnes776ad802011-01-04 15:09:39 -08002304
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002305 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002306
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002307 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2308 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2309 SDE_AUDIO_POWER_SHIFT);
Jesse Barnes776ad802011-01-04 15:09:39 -08002310 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002311 port_name(port));
2312 }
Jesse Barnes776ad802011-01-04 15:09:39 -08002313
Daniel Vetterce99c252012-12-01 13:53:47 +01002314 if (pch_iir & SDE_AUX_MASK)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002315 dp_aux_irq_handler(dev_priv);
Daniel Vetterce99c252012-12-01 13:53:47 +01002316
Jesse Barnes776ad802011-01-04 15:09:39 -08002317 if (pch_iir & SDE_GMBUS)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002318 gmbus_irq_handler(dev_priv);
Jesse Barnes776ad802011-01-04 15:09:39 -08002319
2320 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2321 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2322
2323 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2324 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2325
2326 if (pch_iir & SDE_POISON)
2327 DRM_ERROR("PCH poison interrupt\n");
2328
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002329 if (pch_iir & SDE_FDI_MASK)
Damien Lespiau055e3932014-08-18 13:49:10 +01002330 for_each_pipe(dev_priv, pipe)
Jesse Barnes9db4a9c2011-02-07 12:26:52 -08002331 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2332 pipe_name(pipe),
2333 I915_READ(FDI_RX_IIR(pipe)));
Jesse Barnes776ad802011-01-04 15:09:39 -08002334
2335 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2336 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2337
2338 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2339 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2340
Jesse Barnes776ad802011-01-04 15:09:39 -08002341 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
Matthias Kaehlckea2196032017-07-17 11:14:03 -07002342 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
Paulo Zanoni86642812013-04-12 17:57:57 -03002343
2344 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
Matthias Kaehlckea2196032017-07-17 11:14:03 -07002345 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
Paulo Zanoni86642812013-04-12 17:57:57 -03002346}
2347
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002348static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
Paulo Zanoni86642812013-04-12 17:57:57 -03002349{
Paulo Zanoni86642812013-04-12 17:57:57 -03002350 u32 err_int = I915_READ(GEN7_ERR_INT);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002351 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03002352
Paulo Zanonide032bf2013-04-12 17:57:58 -03002353 if (err_int & ERR_INT_POISON)
2354 DRM_ERROR("Poison interrupt\n");
2355
Damien Lespiau055e3932014-08-18 13:49:10 +01002356 for_each_pipe(dev_priv, pipe) {
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002357 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2358 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanoni86642812013-04-12 17:57:57 -03002359
Daniel Vetter5a69b892013-10-16 22:55:52 +02002360 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002361 if (IS_IVYBRIDGE(dev_priv))
2362 ivb_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002363 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002364 hsw_pipe_crc_irq_handler(dev_priv, pipe);
Daniel Vetter5a69b892013-10-16 22:55:52 +02002365 }
2366 }
Shuang He8bf1e9f2013-10-15 18:55:27 +01002367
Paulo Zanoni86642812013-04-12 17:57:57 -03002368 I915_WRITE(GEN7_ERR_INT, err_int);
2369}
2370
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002371static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
Paulo Zanoni86642812013-04-12 17:57:57 -03002372{
Paulo Zanoni86642812013-04-12 17:57:57 -03002373 u32 serr_int = I915_READ(SERR_INT);
Mika Kahola45c1cd82017-10-10 13:17:06 +03002374 enum pipe pipe;
Paulo Zanoni86642812013-04-12 17:57:57 -03002375
Paulo Zanonide032bf2013-04-12 17:57:58 -03002376 if (serr_int & SERR_INT_POISON)
2377 DRM_ERROR("PCH poison interrupt\n");
2378
Mika Kahola45c1cd82017-10-10 13:17:06 +03002379 for_each_pipe(dev_priv, pipe)
2380 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
2381 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanoni86642812013-04-12 17:57:57 -03002382
2383 I915_WRITE(SERR_INT, serr_int);
Jesse Barnes776ad802011-01-04 15:09:39 -08002384}
2385
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002386static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Adam Jackson23e81d62012-06-06 15:45:44 -04002387{
Adam Jackson23e81d62012-06-06 15:45:44 -04002388 int pipe;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002389 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
Adam Jackson23e81d62012-06-06 15:45:44 -04002390
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002391 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
Daniel Vetter91d131d2013-06-27 17:52:14 +02002392
Ville Syrjäläcfc33bf2013-04-17 17:48:48 +03002393 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2394 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2395 SDE_AUDIO_POWER_SHIFT_CPT);
2396 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2397 port_name(port));
2398 }
Adam Jackson23e81d62012-06-06 15:45:44 -04002399
2400 if (pch_iir & SDE_AUX_MASK_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002401 dp_aux_irq_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002402
2403 if (pch_iir & SDE_GMBUS_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002404 gmbus_irq_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002405
2406 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2407 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2408
2409 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2410 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2411
2412 if (pch_iir & SDE_FDI_MASK_CPT)
Damien Lespiau055e3932014-08-18 13:49:10 +01002413 for_each_pipe(dev_priv, pipe)
Adam Jackson23e81d62012-06-06 15:45:44 -04002414 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2415 pipe_name(pipe),
2416 I915_READ(FDI_RX_IIR(pipe)));
Paulo Zanoni86642812013-04-12 17:57:57 -03002417
2418 if (pch_iir & SDE_ERROR_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002419 cpt_serr_int_handler(dev_priv);
Adam Jackson23e81d62012-06-06 15:45:44 -04002420}
2421
Anusha Srivatsa31604222018-06-26 13:52:23 -07002422static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2423{
2424 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
2425 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
2426 u32 pin_mask = 0, long_mask = 0;
2427
2428 if (ddi_hotplug_trigger) {
2429 u32 dig_hotplug_reg;
2430
2431 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
2432 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
2433
2434 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2435 ddi_hotplug_trigger,
2436 dig_hotplug_reg, hpd_icp,
2437 icp_ddi_port_hotplug_long_detect);
2438 }
2439
2440 if (tc_hotplug_trigger) {
2441 u32 dig_hotplug_reg;
2442
2443 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
2444 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
2445
2446 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2447 tc_hotplug_trigger,
2448 dig_hotplug_reg, hpd_icp,
2449 icp_tc_port_hotplug_long_detect);
2450 }
2451
2452 if (pin_mask)
2453 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2454
2455 if (pch_iir & SDE_GMBUS_ICP)
2456 gmbus_irq_handler(dev_priv);
2457}
2458
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002459static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002460{
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002461 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2462 ~SDE_PORTE_HOTPLUG_SPT;
2463 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2464 u32 pin_mask = 0, long_mask = 0;
2465
2466 if (hotplug_trigger) {
2467 u32 dig_hotplug_reg;
2468
2469 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2470 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2471
Rodrigo Vivicf539022018-01-29 15:22:21 -08002472 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2473 hotplug_trigger, dig_hotplug_reg, hpd_spt,
Ville Syrjälä74c0b392015-08-27 23:56:07 +03002474 spt_port_hotplug_long_detect);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002475 }
2476
2477 if (hotplug2_trigger) {
2478 u32 dig_hotplug_reg;
2479
2480 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2481 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2482
Rodrigo Vivicf539022018-01-29 15:22:21 -08002483 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2484 hotplug2_trigger, dig_hotplug_reg, hpd_spt,
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002485 spt_port_hotplug2_long_detect);
2486 }
2487
2488 if (pin_mask)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002489 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002490
2491 if (pch_iir & SDE_GMBUS_CPT)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002492 gmbus_irq_handler(dev_priv);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002493}
2494
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002495static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2496 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002497 const u32 hpd[HPD_NUM_PINS])
2498{
Ville Syrjälä40e56412015-08-27 23:56:10 +03002499 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2500
2501 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2502 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2503
Rodrigo Vivicf539022018-01-29 15:22:21 -08002504 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002505 dig_hotplug_reg, hpd,
2506 ilk_port_hotplug_long_detect);
2507
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002508 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002509}
2510
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002511static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2512 u32 de_iir)
Paulo Zanonic008bc62013-07-12 16:35:10 -03002513{
Daniel Vetter40da17c22013-10-21 18:04:36 +02002514 enum pipe pipe;
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03002515 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2516
Ville Syrjälä40e56412015-08-27 23:56:10 +03002517 if (hotplug_trigger)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002518 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002519
2520 if (de_iir & DE_AUX_CHANNEL_A)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002521 dp_aux_irq_handler(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002522
2523 if (de_iir & DE_GSE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002524 intel_opregion_asle_intr(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002525
Paulo Zanonic008bc62013-07-12 16:35:10 -03002526 if (de_iir & DE_POISON)
2527 DRM_ERROR("Poison interrupt\n");
2528
Damien Lespiau055e3932014-08-18 13:49:10 +01002529 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002530 if (de_iir & DE_PIPE_VBLANK(pipe))
2531 drm_handle_vblank(&dev_priv->drm, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002532
Daniel Vetter40da17c22013-10-21 18:04:36 +02002533 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
Daniel Vetter1f7247c2014-09-30 10:56:48 +02002534 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002535
Daniel Vetter40da17c22013-10-21 18:04:36 +02002536 if (de_iir & DE_PIPE_CRC_DONE(pipe))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002537 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002538 }
2539
2540 /* check event from PCH */
2541 if (de_iir & DE_PCH_EVENT) {
2542 u32 pch_iir = I915_READ(SDEIIR);
2543
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002544 if (HAS_PCH_CPT(dev_priv))
2545 cpt_irq_handler(dev_priv, pch_iir);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002546 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002547 ibx_irq_handler(dev_priv, pch_iir);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002548
2549 /* should clear PCH hotplug event before clear CPU irq */
2550 I915_WRITE(SDEIIR, pch_iir);
2551 }
2552
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002553 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2554 ironlake_rps_change_irq_handler(dev_priv);
Paulo Zanonic008bc62013-07-12 16:35:10 -03002555}
2556
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002557static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2558 u32 de_iir)
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002559{
Damien Lespiau07d27e22014-03-03 17:31:46 +00002560 enum pipe pipe;
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03002561 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2562
Ville Syrjälä40e56412015-08-27 23:56:10 +03002563 if (hotplug_trigger)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002564 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002565
2566 if (de_iir & DE_ERR_INT_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002567 ivb_err_int_handler(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002568
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002569 if (de_iir & DE_EDP_PSR_INT_HSW) {
2570 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2571
2572 intel_psr_irq_handler(dev_priv, psr_iir);
2573 I915_WRITE(EDP_PSR_IIR, psr_iir);
2574 }
Daniel Vetterfc340442018-04-05 15:00:23 -07002575
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002576 if (de_iir & DE_AUX_CHANNEL_A_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002577 dp_aux_irq_handler(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002578
2579 if (de_iir & DE_GSE_IVB)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002580 intel_opregion_asle_intr(dev_priv);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002581
Damien Lespiau055e3932014-08-18 13:49:10 +01002582 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002583 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2584 drm_handle_vblank(&dev_priv->drm, pipe);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002585 }
2586
2587 /* check event from PCH */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002588 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002589 u32 pch_iir = I915_READ(SDEIIR);
2590
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002591 cpt_irq_handler(dev_priv, pch_iir);
Paulo Zanoni9719fb92013-07-12 16:35:11 -03002592
2593 /* clear PCH hotplug event before clear CPU irq */
2594 I915_WRITE(SDEIIR, pch_iir);
2595 }
2596}
2597
Oscar Mateo72c90f62014-06-16 16:10:57 +01002598/*
2599 * To handle irqs with the minimum potential races with fresh interrupts, we:
2600 * 1 - Disable Master Interrupt Control.
2601 * 2 - Find the source(s) of the interrupt.
2602 * 3 - Clear the Interrupt Identity bits (IIR).
2603 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2604 * 5 - Re-enable Master Interrupt Control.
2605 */
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002606static irqreturn_t ironlake_irq_handler(int irq, void *arg)
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002607{
Daniel Vetter45a83f82014-05-12 19:17:55 +02002608 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01002609 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002610 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
Chris Wilson0e434062012-05-09 21:45:44 +01002611 irqreturn_t ret = IRQ_NONE;
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002612
Imre Deak2dd2a882015-02-24 11:14:30 +02002613 if (!intel_irqs_enabled(dev_priv))
2614 return IRQ_NONE;
2615
Imre Deak1f814da2015-12-16 02:52:19 +02002616 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2617 disable_rpm_wakeref_asserts(dev_priv);
2618
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002619 /* disable master interrupt before clearing iir */
2620 de_ier = I915_READ(DEIER);
2621 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
Chris Wilson0e434062012-05-09 21:45:44 +01002622
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002623 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2624 * interrupts will will be stored on its back queue, and then we'll be
2625 * able to process them after we restore SDEIER (as soon as we restore
2626 * it, we'll get an interrupt if SDEIIR still has something to process
2627 * due to its back queue). */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002628 if (!HAS_PCH_NOP(dev_priv)) {
Ben Widawskyab5c6082013-04-05 13:12:41 -07002629 sde_ier = I915_READ(SDEIER);
2630 I915_WRITE(SDEIER, 0);
Ben Widawskyab5c6082013-04-05 13:12:41 -07002631 }
Paulo Zanoni44498ae2013-02-22 17:05:28 -03002632
Oscar Mateo72c90f62014-06-16 16:10:57 +01002633 /* Find, clear, then process each source of interrupt */
2634
Chris Wilson0e434062012-05-09 21:45:44 +01002635 gt_iir = I915_READ(GTIIR);
2636 if (gt_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002637 I915_WRITE(GTIIR, gt_iir);
2638 ret = IRQ_HANDLED;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002639 if (INTEL_GEN(dev_priv) >= 6)
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002640 snb_gt_irq_handler(dev_priv, gt_iir);
Paulo Zanonid8fc8a42013-07-19 18:57:55 -03002641 else
Ville Syrjälä261e40b2016-04-13 21:19:57 +03002642 ilk_gt_irq_handler(dev_priv, gt_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002643 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002644
2645 de_iir = I915_READ(DEIIR);
Chris Wilson0e434062012-05-09 21:45:44 +01002646 if (de_iir) {
Oscar Mateo72c90f62014-06-16 16:10:57 +01002647 I915_WRITE(DEIIR, de_iir);
2648 ret = IRQ_HANDLED;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002649 if (INTEL_GEN(dev_priv) >= 7)
2650 ivb_display_irq_handler(dev_priv, de_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002651 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002652 ilk_display_irq_handler(dev_priv, de_iir);
Chris Wilson0e434062012-05-09 21:45:44 +01002653 }
2654
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002655 if (INTEL_GEN(dev_priv) >= 6) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002656 u32 pm_iir = I915_READ(GEN6_PMIIR);
2657 if (pm_iir) {
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002658 I915_WRITE(GEN6_PMIIR, pm_iir);
2659 ret = IRQ_HANDLED;
Oscar Mateo72c90f62014-06-16 16:10:57 +01002660 gen6_rps_irq_handler(dev_priv, pm_iir);
Paulo Zanonif1af8fc2013-07-12 19:56:30 -03002661 }
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002662 }
2663
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002664 I915_WRITE(DEIER, de_ier);
Chris Wilson74093f32018-06-28 21:12:03 +01002665 if (!HAS_PCH_NOP(dev_priv))
Ben Widawskyab5c6082013-04-05 13:12:41 -07002666 I915_WRITE(SDEIER, sde_ier);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002667
Imre Deak1f814da2015-12-16 02:52:19 +02002668 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2669 enable_rpm_wakeref_asserts(dev_priv);
2670
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07002671 return ret;
2672}
2673
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002674static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2675 u32 hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002676 const u32 hpd[HPD_NUM_PINS])
Shashank Sharmad04a4922014-08-22 17:40:41 +05302677{
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002678 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
Shashank Sharmad04a4922014-08-22 17:40:41 +05302679
Ville Syrjäläa52bb152015-08-27 23:56:11 +03002680 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2681 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302682
Rodrigo Vivicf539022018-01-29 15:22:21 -08002683 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
Ville Syrjälä40e56412015-08-27 23:56:10 +03002684 dig_hotplug_reg, hpd,
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002685 bxt_port_hotplug_long_detect);
Ville Syrjälä40e56412015-08-27 23:56:10 +03002686
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002687 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302688}
2689
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07002690static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2691{
2692 u32 pin_mask = 0, long_mask = 0;
Dhinakaran Pandiyanb796b972018-06-15 17:05:30 -07002693 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2694 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07002695
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07002696 if (trigger_tc) {
Dhinakaran Pandiyanb796b972018-06-15 17:05:30 -07002697 u32 dig_hotplug_reg;
2698
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07002699 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2700 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2701
2702 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
Dhinakaran Pandiyanb796b972018-06-15 17:05:30 -07002703 dig_hotplug_reg, hpd_gen11,
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07002704 gen11_port_hotplug_long_detect);
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07002705 }
Dhinakaran Pandiyanb796b972018-06-15 17:05:30 -07002706
2707 if (trigger_tbt) {
2708 u32 dig_hotplug_reg;
2709
2710 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2711 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2712
2713 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2714 dig_hotplug_reg, hpd_gen11,
2715 gen11_port_hotplug_long_detect);
2716 }
2717
2718 if (pin_mask)
2719 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2720 else
2721 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07002722}
2723
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002724static irqreturn_t
2725gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
Ben Widawskyabd58f02013-11-02 21:07:09 -07002726{
Ben Widawskyabd58f02013-11-02 21:07:09 -07002727 irqreturn_t ret = IRQ_NONE;
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002728 u32 iir;
Daniel Vetterc42664c2013-11-07 11:05:40 +01002729 enum pipe pipe;
Jesse Barnes88e04702014-11-13 17:51:48 +00002730
Ben Widawskyabd58f02013-11-02 21:07:09 -07002731 if (master_ctl & GEN8_DE_MISC_IRQ) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002732 iir = I915_READ(GEN8_DE_MISC_IIR);
2733 if (iir) {
Ville Syrjäläe04f7ec2018-04-03 14:24:18 -07002734 bool found = false;
2735
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002736 I915_WRITE(GEN8_DE_MISC_IIR, iir);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002737 ret = IRQ_HANDLED;
Ville Syrjäläe04f7ec2018-04-03 14:24:18 -07002738
2739 if (iir & GEN8_DE_MISC_GSE) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002740 intel_opregion_asle_intr(dev_priv);
Ville Syrjäläe04f7ec2018-04-03 14:24:18 -07002741 found = true;
2742 }
2743
2744 if (iir & GEN8_DE_EDP_PSR) {
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07002745 u32 psr_iir = I915_READ(EDP_PSR_IIR);
2746
2747 intel_psr_irq_handler(dev_priv, psr_iir);
2748 I915_WRITE(EDP_PSR_IIR, psr_iir);
Ville Syrjäläe04f7ec2018-04-03 14:24:18 -07002749 found = true;
2750 }
2751
2752 if (!found)
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002753 DRM_ERROR("Unexpected DE Misc interrupt\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002754 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002755 else
2756 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
Ben Widawskyabd58f02013-11-02 21:07:09 -07002757 }
2758
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07002759 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2760 iir = I915_READ(GEN11_DE_HPD_IIR);
2761 if (iir) {
2762 I915_WRITE(GEN11_DE_HPD_IIR, iir);
2763 ret = IRQ_HANDLED;
2764 gen11_hpd_irq_handler(dev_priv, iir);
2765 } else {
2766 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2767 }
2768 }
2769
Daniel Vetter6d766f02013-11-07 14:49:55 +01002770 if (master_ctl & GEN8_DE_PORT_IRQ) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002771 iir = I915_READ(GEN8_DE_PORT_IIR);
2772 if (iir) {
2773 u32 tmp_mask;
Shashank Sharmad04a4922014-08-22 17:40:41 +05302774 bool found = false;
Ville Syrjäläcebd87a2015-08-27 23:56:09 +03002775
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002776 I915_WRITE(GEN8_DE_PORT_IIR, iir);
Daniel Vetter6d766f02013-11-07 14:49:55 +01002777 ret = IRQ_HANDLED;
Jesse Barnes88e04702014-11-13 17:51:48 +00002778
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002779 tmp_mask = GEN8_AUX_CHANNEL_A;
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07002780 if (INTEL_GEN(dev_priv) >= 9)
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002781 tmp_mask |= GEN9_AUX_CHANNEL_B |
2782 GEN9_AUX_CHANNEL_C |
2783 GEN9_AUX_CHANNEL_D;
2784
James Ausmusbb187e92018-06-11 17:25:12 -07002785 if (INTEL_GEN(dev_priv) >= 11)
2786 tmp_mask |= ICL_AUX_CHANNEL_E;
2787
Dhinakaran Pandiyan9bb635d2018-05-21 17:25:35 -07002788 if (IS_CNL_WITH_PORT_F(dev_priv) ||
2789 INTEL_GEN(dev_priv) >= 11)
Rodrigo Vivia324fca2018-01-29 15:22:15 -08002790 tmp_mask |= CNL_AUX_CHANNEL_F;
2791
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002792 if (iir & tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002793 dp_aux_irq_handler(dev_priv);
Shashank Sharmad04a4922014-08-22 17:40:41 +05302794 found = true;
2795 }
2796
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002797 if (IS_GEN9_LP(dev_priv)) {
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002798 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2799 if (tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002800 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2801 hpd_bxt);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002802 found = true;
2803 }
2804 } else if (IS_BROADWELL(dev_priv)) {
2805 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2806 if (tmp_mask) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002807 ilk_hpd_irq_handler(dev_priv,
2808 tmp_mask, hpd_bdw);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002809 found = true;
2810 }
Shashank Sharmad04a4922014-08-22 17:40:41 +05302811 }
2812
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02002813 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002814 gmbus_irq_handler(dev_priv);
Shashank Sharma9e637432014-08-22 17:40:43 +05302815 found = true;
2816 }
2817
Shashank Sharmad04a4922014-08-22 17:40:41 +05302818 if (!found)
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002819 DRM_ERROR("Unexpected DE Port interrupt\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002820 }
Oscar Mateo38cc46d2014-06-16 16:10:59 +01002821 else
2822 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
Daniel Vetter6d766f02013-11-07 14:49:55 +01002823 }
2824
Damien Lespiau055e3932014-08-18 13:49:10 +01002825 for_each_pipe(dev_priv, pipe) {
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002826 u32 fault_errors;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002827
Daniel Vetterc42664c2013-11-07 11:05:40 +01002828 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2829 continue;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002830
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002831 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2832 if (!iir) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07002833 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002834 continue;
2835 }
2836
2837 ret = IRQ_HANDLED;
2838 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2839
Daniel Vetterfd3a4022017-07-20 19:57:51 +02002840 if (iir & GEN8_PIPE_VBLANK)
2841 drm_handle_vblank(&dev_priv->drm, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002842
2843 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002844 hsw_pipe_crc_irq_handler(dev_priv, pipe);
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002845
2846 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2847 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2848
2849 fault_errors = iir;
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07002850 if (INTEL_GEN(dev_priv) >= 9)
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002851 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2852 else
2853 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2854
2855 if (fault_errors)
Tvrtko Ursulin1353ec32016-10-27 13:48:32 +01002856 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002857 pipe_name(pipe),
2858 fault_errors);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002859 }
2860
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002861 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
Shashank Sharma266ea3d2014-08-22 17:40:42 +05302862 master_ctl & GEN8_DE_PCH_IRQ) {
Daniel Vetter92d03a82013-11-07 11:05:43 +01002863 /*
2864 * FIXME(BDW): Assume for now that the new interrupt handling
2865 * scheme also closed the SDE interrupt handling race we've seen
2866 * on older pch-split platforms. But this needs testing.
2867 */
Tvrtko Ursuline32192e2016-01-12 16:04:06 +00002868 iir = I915_READ(SDEIIR);
2869 if (iir) {
2870 I915_WRITE(SDEIIR, iir);
Daniel Vetter92d03a82013-11-07 11:05:43 +01002871 ret = IRQ_HANDLED;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002872
Anusha Srivatsa31604222018-06-26 13:52:23 -07002873 if (HAS_PCH_ICP(dev_priv))
2874 icp_irq_handler(dev_priv, iir);
2875 else if (HAS_PCH_SPT(dev_priv) ||
2876 HAS_PCH_KBP(dev_priv) ||
2877 HAS_PCH_CNP(dev_priv))
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002878 spt_irq_handler(dev_priv, iir);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03002879 else
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01002880 cpt_irq_handler(dev_priv, iir);
Jani Nikula2dfb0b82016-01-07 10:29:10 +02002881 } else {
2882 /*
2883 * Like on previous PCH there seems to be something
2884 * fishy going on with forwarding PCH interrupts.
2885 */
2886 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2887 }
Daniel Vetter92d03a82013-11-07 11:05:43 +01002888 }
2889
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002890 return ret;
2891}
2892
2893static irqreturn_t gen8_irq_handler(int irq, void *arg)
2894{
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00002895 struct drm_i915_private *dev_priv = to_i915(arg);
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002896 u32 master_ctl;
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00002897 u32 gt_iir[4];
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002898
2899 if (!intel_irqs_enabled(dev_priv))
2900 return IRQ_NONE;
2901
2902 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2903 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2904 if (!master_ctl)
2905 return IRQ_NONE;
2906
2907 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2908
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002909 /* Find, clear, then process each source of interrupt */
Chris Wilson55ef72f2018-02-02 15:34:48 +00002910 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00002911
2912 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2913 if (master_ctl & ~GEN8_GT_IRQS) {
2914 disable_rpm_wakeref_asserts(dev_priv);
2915 gen8_de_irq_handler(dev_priv, master_ctl);
2916 enable_rpm_wakeref_asserts(dev_priv);
2917 }
Tvrtko Ursulinf11a0f42016-01-12 16:04:07 +00002918
Chris Wilsoncb0d2052015-04-07 16:21:04 +01002919 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ben Widawskyabd58f02013-11-02 21:07:09 -07002920
Chris Wilsonf0fd96f2018-02-15 07:37:12 +00002921 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
Imre Deak1f814da2015-12-16 02:52:19 +02002922
Chris Wilson55ef72f2018-02-02 15:34:48 +00002923 return IRQ_HANDLED;
Ben Widawskyabd58f02013-11-02 21:07:09 -07002924}
2925
Chris Wilson36703e72017-06-22 11:56:25 +01002926struct wedge_me {
2927 struct delayed_work work;
2928 struct drm_i915_private *i915;
2929 const char *name;
2930};
2931
2932static void wedge_me(struct work_struct *work)
2933{
2934 struct wedge_me *w = container_of(work, typeof(*w), work.work);
2935
2936 dev_err(w->i915->drm.dev,
2937 "%s timed out, cancelling all in-flight rendering.\n",
2938 w->name);
2939 i915_gem_set_wedged(w->i915);
2940}
2941
2942static void __init_wedge(struct wedge_me *w,
2943 struct drm_i915_private *i915,
2944 long timeout,
2945 const char *name)
2946{
2947 w->i915 = i915;
2948 w->name = name;
2949
2950 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
2951 schedule_delayed_work(&w->work, timeout);
2952}
2953
2954static void __fini_wedge(struct wedge_me *w)
2955{
2956 cancel_delayed_work_sync(&w->work);
2957 destroy_delayed_work_on_stack(&w->work);
2958 w->i915 = NULL;
2959}
2960
2961#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
2962 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
2963 (W)->i915; \
2964 __fini_wedge((W)))
2965
Mika Kuoppala51951ae2018-02-28 12:11:53 +02002966static u32
Mika Kuoppalaf744dbc2018-04-06 12:31:45 +03002967gen11_gt_engine_identity(struct drm_i915_private * const i915,
2968 const unsigned int bank, const unsigned int bit)
Mika Kuoppala51951ae2018-02-28 12:11:53 +02002969{
2970 void __iomem * const regs = i915->regs;
2971 u32 timeout_ts;
2972 u32 ident;
2973
Oscar Mateo96606f32018-04-06 12:32:37 +03002974 lockdep_assert_held(&i915->irq_lock);
2975
Mika Kuoppala51951ae2018-02-28 12:11:53 +02002976 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
2977
2978 /*
2979 * NB: Specs do not specify how long to spin wait,
2980 * so we do ~100us as an educated guess.
2981 */
2982 timeout_ts = (local_clock() >> 10) + 100;
2983 do {
2984 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
2985 } while (!(ident & GEN11_INTR_DATA_VALID) &&
2986 !time_after32(local_clock() >> 10, timeout_ts));
2987
2988 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
2989 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
2990 bank, bit, ident);
2991 return 0;
2992 }
2993
2994 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
2995 GEN11_INTR_DATA_VALID);
2996
Mika Kuoppalaf744dbc2018-04-06 12:31:45 +03002997 return ident;
2998}
2999
3000static void
3001gen11_other_irq_handler(struct drm_i915_private * const i915,
3002 const u8 instance, const u16 iir)
3003{
Oscar Mateod02b98b2018-04-05 17:00:50 +03003004 if (instance == OTHER_GTPM_INSTANCE)
3005 return gen6_rps_irq_handler(i915, iir);
3006
Mika Kuoppalaf744dbc2018-04-06 12:31:45 +03003007 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
3008 instance, iir);
3009}
3010
3011static void
3012gen11_engine_irq_handler(struct drm_i915_private * const i915,
3013 const u8 class, const u8 instance, const u16 iir)
3014{
3015 struct intel_engine_cs *engine;
3016
3017 if (instance <= MAX_ENGINE_INSTANCE)
3018 engine = i915->engine_class[class][instance];
3019 else
3020 engine = NULL;
3021
3022 if (likely(engine))
3023 return gen8_cs_irq_handler(engine, iir);
3024
3025 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
3026 class, instance);
3027}
3028
3029static void
3030gen11_gt_identity_handler(struct drm_i915_private * const i915,
3031 const u32 identity)
3032{
3033 const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
3034 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
3035 const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
3036
3037 if (unlikely(!intr))
3038 return;
3039
3040 if (class <= COPY_ENGINE_CLASS)
3041 return gen11_engine_irq_handler(i915, class, instance, intr);
3042
3043 if (class == OTHER_CLASS)
3044 return gen11_other_irq_handler(i915, instance, intr);
3045
3046 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
3047 class, instance, intr);
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003048}
3049
3050static void
Oscar Mateo96606f32018-04-06 12:32:37 +03003051gen11_gt_bank_handler(struct drm_i915_private * const i915,
3052 const unsigned int bank)
3053{
3054 void __iomem * const regs = i915->regs;
3055 unsigned long intr_dw;
3056 unsigned int bit;
3057
3058 lockdep_assert_held(&i915->irq_lock);
3059
3060 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
3061
3062 if (unlikely(!intr_dw)) {
3063 DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
3064 return;
3065 }
3066
3067 for_each_set_bit(bit, &intr_dw, 32) {
3068 const u32 ident = gen11_gt_engine_identity(i915,
3069 bank, bit);
3070
3071 gen11_gt_identity_handler(i915, ident);
3072 }
3073
3074 /* Clear must be after shared has been served for engine */
3075 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
3076}
3077
3078static void
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003079gen11_gt_irq_handler(struct drm_i915_private * const i915,
3080 const u32 master_ctl)
3081{
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003082 unsigned int bank;
3083
Oscar Mateo96606f32018-04-06 12:32:37 +03003084 spin_lock(&i915->irq_lock);
3085
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003086 for (bank = 0; bank < 2; bank++) {
Oscar Mateo96606f32018-04-06 12:32:37 +03003087 if (master_ctl & GEN11_GT_DW_IRQ(bank))
3088 gen11_gt_bank_handler(i915, bank);
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003089 }
Oscar Mateo96606f32018-04-06 12:32:37 +03003090
3091 spin_unlock(&i915->irq_lock);
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003092}
3093
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07003094static void
3095gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
3096 u32 *iir)
3097{
3098 void __iomem * const regs = dev_priv->regs;
3099
3100 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3101 return;
3102
3103 *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3104 if (likely(*iir))
3105 raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
3106}
3107
3108static void
3109gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
3110 const u32 master_ctl, const u32 iir)
3111{
3112 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3113 return;
3114
3115 if (unlikely(!iir)) {
3116 DRM_ERROR("GU_MISC iir blank!\n");
3117 return;
3118 }
3119
3120 if (iir & GEN11_GU_MISC_GSE)
3121 intel_opregion_asle_intr(dev_priv);
3122 else
3123 DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
3124}
3125
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003126static irqreturn_t gen11_irq_handler(int irq, void *arg)
3127{
3128 struct drm_i915_private * const i915 = to_i915(arg);
3129 void __iomem * const regs = i915->regs;
3130 u32 master_ctl;
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07003131 u32 gu_misc_iir;
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003132
3133 if (!intel_irqs_enabled(i915))
3134 return IRQ_NONE;
3135
3136 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
3137 master_ctl &= ~GEN11_MASTER_IRQ;
3138 if (!master_ctl)
3139 return IRQ_NONE;
3140
3141 /* Disable interrupts. */
3142 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
3143
3144 /* Find, clear, then process each source of interrupt. */
3145 gen11_gt_irq_handler(i915, master_ctl);
3146
3147 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3148 if (master_ctl & GEN11_DISPLAY_IRQ) {
3149 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
3150
3151 disable_rpm_wakeref_asserts(i915);
3152 /*
3153 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
3154 * for the display related bits.
3155 */
3156 gen8_de_irq_handler(i915, disp_ctl);
3157 enable_rpm_wakeref_asserts(i915);
3158 }
3159
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07003160 gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
3161
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003162 /* Acknowledge and enable interrupts. */
3163 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
3164
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07003165 gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
3166
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003167 return IRQ_HANDLED;
3168}
3169
Chris Wilsonce800752018-03-20 10:04:49 +00003170static void i915_reset_device(struct drm_i915_private *dev_priv,
Chris Wilsond0667e92018-04-06 23:03:54 +01003171 u32 engine_mask,
3172 const char *reason)
Jesse Barnes8a905232009-07-11 16:48:03 -04003173{
Chris Wilsonce800752018-03-20 10:04:49 +00003174 struct i915_gpu_error *error = &dev_priv->gpu_error;
Chris Wilson91c8a322016-07-05 10:40:23 +01003175 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
Ben Widawskycce723e2013-07-19 09:16:42 -07003176 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
3177 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
3178 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
Chris Wilson36703e72017-06-22 11:56:25 +01003179 struct wedge_me w;
Jesse Barnes8a905232009-07-11 16:48:03 -04003180
Chris Wilsonc0336662016-05-06 15:40:21 +01003181 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04003182
Chris Wilson8af29b02016-09-09 14:11:47 +01003183 DRM_DEBUG_DRIVER("resetting chip\n");
3184 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
3185
Chris Wilson36703e72017-06-22 11:56:25 +01003186 /* Use a watchdog to ensure that our reset completes */
3187 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
3188 intel_prepare_reset(dev_priv);
Ville Syrjälä75147472014-11-24 18:28:11 +02003189
Chris Wilsond0667e92018-04-06 23:03:54 +01003190 error->reason = reason;
3191 error->stalled_mask = engine_mask;
Chris Wilsonce800752018-03-20 10:04:49 +00003192
Chris Wilson36703e72017-06-22 11:56:25 +01003193 /* Signal that locked waiters should reset the GPU */
Chris Wilsond0667e92018-04-06 23:03:54 +01003194 smp_mb__before_atomic();
Chris Wilsonce800752018-03-20 10:04:49 +00003195 set_bit(I915_RESET_HANDOFF, &error->flags);
3196 wake_up_all(&error->wait_queue);
Chris Wilson8c185ec2017-03-16 17:13:02 +00003197
Chris Wilson36703e72017-06-22 11:56:25 +01003198 /* Wait for anyone holding the lock to wakeup, without
3199 * blocking indefinitely on struct_mutex.
Chris Wilson780f2622016-09-09 14:11:52 +01003200 */
Chris Wilson36703e72017-06-22 11:56:25 +01003201 do {
3202 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
Chris Wilsond0667e92018-04-06 23:03:54 +01003203 i915_reset(dev_priv, engine_mask, reason);
Chris Wilson36703e72017-06-22 11:56:25 +01003204 mutex_unlock(&dev_priv->drm.struct_mutex);
3205 }
Chris Wilsonce800752018-03-20 10:04:49 +00003206 } while (wait_on_bit_timeout(&error->flags,
Chris Wilson36703e72017-06-22 11:56:25 +01003207 I915_RESET_HANDOFF,
3208 TASK_UNINTERRUPTIBLE,
3209 1));
Chris Wilson780f2622016-09-09 14:11:52 +01003210
Chris Wilsond0667e92018-04-06 23:03:54 +01003211 error->stalled_mask = 0;
Chris Wilsonce800752018-03-20 10:04:49 +00003212 error->reason = NULL;
3213
Chris Wilson36703e72017-06-22 11:56:25 +01003214 intel_finish_reset(dev_priv);
3215 }
Daniel Vetter17e1df02013-09-08 21:57:13 +02003216
Chris Wilsonce800752018-03-20 10:04:49 +00003217 if (!test_bit(I915_WEDGED, &error->flags))
3218 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
Jesse Barnes8a905232009-07-11 16:48:03 -04003219}
3220
Chris Wilsoneaa14c22016-10-19 13:52:03 +01003221static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
Jesse Barnes8a905232009-07-11 16:48:03 -04003222{
Chris Wilsoneaa14c22016-10-19 13:52:03 +01003223 u32 eir;
Jesse Barnes8a905232009-07-11 16:48:03 -04003224
Chris Wilsoneaa14c22016-10-19 13:52:03 +01003225 if (!IS_GEN2(dev_priv))
3226 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
Jesse Barnes8a905232009-07-11 16:48:03 -04003227
Chris Wilsoneaa14c22016-10-19 13:52:03 +01003228 if (INTEL_GEN(dev_priv) < 4)
3229 I915_WRITE(IPEIR, I915_READ(IPEIR));
3230 else
3231 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
Jesse Barnes8a905232009-07-11 16:48:03 -04003232
Chris Wilsoneaa14c22016-10-19 13:52:03 +01003233 I915_WRITE(EIR, I915_READ(EIR));
Jesse Barnes8a905232009-07-11 16:48:03 -04003234 eir = I915_READ(EIR);
3235 if (eir) {
3236 /*
3237 * some errors might have become stuck,
3238 * mask them.
3239 */
Chris Wilsoneaa14c22016-10-19 13:52:03 +01003240 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
Jesse Barnes8a905232009-07-11 16:48:03 -04003241 I915_WRITE(EMR, I915_READ(EMR) | eir);
Ville Syrjälä78c357d2018-06-11 23:02:57 +03003242 I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
Jesse Barnes8a905232009-07-11 16:48:03 -04003243 }
Chris Wilson35aed2e2010-05-27 13:18:12 +01003244}
3245
3246/**
Mika Kuoppalab8d24a02015-01-28 17:03:14 +02003247 * i915_handle_error - handle a gpu error
Tvrtko Ursulin14bb2c12016-06-03 14:02:17 +01003248 * @dev_priv: i915 device private
arun.siluvery@linux.intel.com14b730f2016-03-18 20:07:55 +00003249 * @engine_mask: mask representing engines that are hung
Chris Wilsonce800752018-03-20 10:04:49 +00003250 * @flags: control flags
Michel Thierry87c390b2017-01-11 20:18:08 -08003251 * @fmt: Error message format string
3252 *
Javier Martinez Canillasaafd8582015-10-08 09:57:49 +02003253 * Do some basic checking of register state at error time and
Chris Wilson35aed2e2010-05-27 13:18:12 +01003254 * dump it to the syslog. Also call i915_capture_error_state() to make
3255 * sure we get a record and make it available in debugfs. Fire a uevent
3256 * so userspace knows something bad happened (should trigger collection
3257 * of a ring dump etc.).
3258 */
Chris Wilsonc0336662016-05-06 15:40:21 +01003259void i915_handle_error(struct drm_i915_private *dev_priv,
3260 u32 engine_mask,
Chris Wilsonce800752018-03-20 10:04:49 +00003261 unsigned long flags,
Mika Kuoppala58174462014-02-25 17:11:26 +02003262 const char *fmt, ...)
Chris Wilson35aed2e2010-05-27 13:18:12 +01003263{
Michel Thierry142bc7d2017-06-20 10:57:46 +01003264 struct intel_engine_cs *engine;
3265 unsigned int tmp;
Mika Kuoppala58174462014-02-25 17:11:26 +02003266 char error_msg[80];
Chris Wilsonce800752018-03-20 10:04:49 +00003267 char *msg = NULL;
Chris Wilson35aed2e2010-05-27 13:18:12 +01003268
Chris Wilsonce800752018-03-20 10:04:49 +00003269 if (fmt) {
3270 va_list args;
3271
3272 va_start(args, fmt);
3273 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
3274 va_end(args);
3275
3276 msg = error_msg;
3277 }
Mika Kuoppala58174462014-02-25 17:11:26 +02003278
Chris Wilson1604a862017-03-14 17:18:40 +00003279 /*
3280 * In most cases it's guaranteed that we get here with an RPM
3281 * reference held, for example because there is a pending GPU
3282 * request that won't finish until the reset is done. This
3283 * isn't the case at least when we get here by doing a
3284 * simulated reset via debugfs, so get an RPM reference.
3285 */
3286 intel_runtime_pm_get(dev_priv);
3287
Chris Wilson873d66f2018-03-16 21:49:59 +00003288 engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
Chris Wilsonce800752018-03-20 10:04:49 +00003289
3290 if (flags & I915_ERROR_CAPTURE) {
3291 i915_capture_error_state(dev_priv, engine_mask, msg);
3292 i915_clear_error_registers(dev_priv);
3293 }
Jesse Barnes8a905232009-07-11 16:48:03 -04003294
Michel Thierry142bc7d2017-06-20 10:57:46 +01003295 /*
3296 * Try engine reset when available. We fall back to full reset if
3297 * single reset fails.
3298 */
3299 if (intel_has_reset_engine(dev_priv)) {
3300 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
Daniel Vetter9db529a2017-08-08 10:08:28 +02003301 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
Michel Thierry142bc7d2017-06-20 10:57:46 +01003302 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3303 &dev_priv->gpu_error.flags))
3304 continue;
3305
Chris Wilsonce800752018-03-20 10:04:49 +00003306 if (i915_reset_engine(engine, msg) == 0)
Michel Thierry142bc7d2017-06-20 10:57:46 +01003307 engine_mask &= ~intel_engine_flag(engine);
3308
3309 clear_bit(I915_RESET_ENGINE + engine->id,
3310 &dev_priv->gpu_error.flags);
3311 wake_up_bit(&dev_priv->gpu_error.flags,
3312 I915_RESET_ENGINE + engine->id);
3313 }
3314 }
3315
Chris Wilson8af29b02016-09-09 14:11:47 +01003316 if (!engine_mask)
Chris Wilson1604a862017-03-14 17:18:40 +00003317 goto out;
Ben Gamariba1234d2009-09-14 17:48:47 -04003318
Michel Thierry142bc7d2017-06-20 10:57:46 +01003319 /* Full reset needs the mutex, stop any other user trying to do so. */
Chris Wilsond5367302017-06-20 10:57:43 +01003320 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
3321 wait_event(dev_priv->gpu_error.reset_queue,
3322 !test_bit(I915_RESET_BACKOFF,
3323 &dev_priv->gpu_error.flags));
Chris Wilson1604a862017-03-14 17:18:40 +00003324 goto out;
Chris Wilsond5367302017-06-20 10:57:43 +01003325 }
Chris Wilson8af29b02016-09-09 14:11:47 +01003326
Michel Thierry142bc7d2017-06-20 10:57:46 +01003327 /* Prevent any other reset-engine attempt. */
3328 for_each_engine(engine, dev_priv, tmp) {
3329 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
3330 &dev_priv->gpu_error.flags))
3331 wait_on_bit(&dev_priv->gpu_error.flags,
3332 I915_RESET_ENGINE + engine->id,
3333 TASK_UNINTERRUPTIBLE);
3334 }
3335
Chris Wilsond0667e92018-04-06 23:03:54 +01003336 i915_reset_device(dev_priv, engine_mask, msg);
Chris Wilsond5367302017-06-20 10:57:43 +01003337
Michel Thierry142bc7d2017-06-20 10:57:46 +01003338 for_each_engine(engine, dev_priv, tmp) {
3339 clear_bit(I915_RESET_ENGINE + engine->id,
3340 &dev_priv->gpu_error.flags);
3341 }
3342
Chris Wilsond5367302017-06-20 10:57:43 +01003343 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
3344 wake_up_all(&dev_priv->gpu_error.reset_queue);
Chris Wilson1604a862017-03-14 17:18:40 +00003345
3346out:
3347 intel_runtime_pm_put(dev_priv);
Jesse Barnes8a905232009-07-11 16:48:03 -04003348}
3349
Keith Packard42f52ef2008-10-18 19:39:29 -07003350/* Called from drm generic code, passed 'crtc' which
3351 * we use as a pipe index
3352 */
Chris Wilson86e83e32016-10-07 20:49:52 +01003353static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07003354{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003355 struct drm_i915_private *dev_priv = to_i915(dev);
Keith Packarde9d21d72008-10-16 11:31:38 -07003356 unsigned long irqflags;
Jesse Barnes71e0ffa2009-01-08 10:42:15 -08003357
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003358 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Chris Wilson86e83e32016-10-07 20:49:52 +01003359 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3360 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3361
3362 return 0;
3363}
3364
3365static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
3366{
3367 struct drm_i915_private *dev_priv = to_i915(dev);
3368 unsigned long irqflags;
3369
3370 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3371 i915_enable_pipestat(dev_priv, pipe,
3372 PIPE_START_VBLANK_INTERRUPT_STATUS);
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003373 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Chris Wilson8692d00e2011-02-05 10:08:21 +00003374
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07003375 return 0;
3376}
3377
Thierry Reding88e72712015-09-24 18:35:31 +02003378static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07003379{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003380 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf796cf82011-04-07 13:58:17 -07003381 unsigned long irqflags;
Tvrtko Ursulin55b8f2a2016-10-14 09:17:22 +01003382 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
Chris Wilson86e83e32016-10-07 20:49:52 +01003383 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07003384
Jesse Barnesf796cf82011-04-07 13:58:17 -07003385 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02003386 ilk_enable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07003387 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3388
Dhinakaran Pandiyan2e8bf222018-02-02 21:13:02 -08003389 /* Even though there is no DMC, frame counter can get stuck when
3390 * PSR is active as no frames are generated.
3391 */
3392 if (HAS_PSR(dev_priv))
3393 drm_vblank_restore(dev, pipe);
3394
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07003395 return 0;
3396}
3397
Thierry Reding88e72712015-09-24 18:35:31 +02003398static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
Ben Widawskyabd58f02013-11-02 21:07:09 -07003399{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003400 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003401 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003402
Ben Widawskyabd58f02013-11-02 21:07:09 -07003403 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02003404 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003405 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02003406
Dhinakaran Pandiyan2e8bf222018-02-02 21:13:02 -08003407 /* Even if there is no DMC, frame counter can get stuck when
3408 * PSR is active as no frames are generated, so check only for PSR.
3409 */
3410 if (HAS_PSR(dev_priv))
3411 drm_vblank_restore(dev, pipe);
3412
Ben Widawskyabd58f02013-11-02 21:07:09 -07003413 return 0;
3414}
3415
Keith Packard42f52ef2008-10-18 19:39:29 -07003416/* Called from drm generic code, passed 'crtc' which
3417 * we use as a pipe index
3418 */
Chris Wilson86e83e32016-10-07 20:49:52 +01003419static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
3420{
3421 struct drm_i915_private *dev_priv = to_i915(dev);
3422 unsigned long irqflags;
3423
3424 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3425 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
3426 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3427}
3428
3429static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07003430{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003431 struct drm_i915_private *dev_priv = to_i915(dev);
Keith Packarde9d21d72008-10-16 11:31:38 -07003432 unsigned long irqflags;
Jesse Barnes0a3e67a2008-09-30 12:14:26 -07003433
Chris Wilson1ec14ad2010-12-04 11:30:53 +00003434 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Jesse Barnesf796cf82011-04-07 13:58:17 -07003435 i915_disable_pipestat(dev_priv, pipe,
Imre Deak755e9012014-02-10 18:42:47 +02003436 PIPE_START_VBLANK_INTERRUPT_STATUS);
Jesse Barnesf796cf82011-04-07 13:58:17 -07003437 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3438}
3439
Thierry Reding88e72712015-09-24 18:35:31 +02003440static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
Jesse Barnesf796cf82011-04-07 13:58:17 -07003441{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003442 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnesf796cf82011-04-07 13:58:17 -07003443 unsigned long irqflags;
Tvrtko Ursulin55b8f2a2016-10-14 09:17:22 +01003444 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
Chris Wilson86e83e32016-10-07 20:49:52 +01003445 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
Jesse Barnesf796cf82011-04-07 13:58:17 -07003446
3447 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02003448 ilk_disable_display_irq(dev_priv, bit);
Jesse Barnesb1f14ad2011-04-06 12:13:38 -07003449 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3450}
3451
Thierry Reding88e72712015-09-24 18:35:31 +02003452static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
Ben Widawskyabd58f02013-11-02 21:07:09 -07003453{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003454 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003455 unsigned long irqflags;
Ben Widawskyabd58f02013-11-02 21:07:09 -07003456
Ben Widawskyabd58f02013-11-02 21:07:09 -07003457 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
Ville Syrjälä013d3752015-11-23 18:06:17 +02003458 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003459 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3460}
3461
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003462static void ibx_irq_reset(struct drm_i915_private *dev_priv)
Paulo Zanoni91738a92013-06-05 14:21:51 -03003463{
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003464 if (HAS_PCH_NOP(dev_priv))
Paulo Zanoni91738a92013-06-05 14:21:51 -03003465 return;
3466
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003467 GEN3_IRQ_RESET(SDE);
Paulo Zanoni105b1222014-04-01 15:37:17 -03003468
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003469 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
Paulo Zanoni105b1222014-04-01 15:37:17 -03003470 I915_WRITE(SERR_INT, 0xffffffff);
Paulo Zanoni622364b2014-04-01 15:37:22 -03003471}
Paulo Zanoni105b1222014-04-01 15:37:17 -03003472
Paulo Zanoni622364b2014-04-01 15:37:22 -03003473/*
3474 * SDEIER is also touched by the interrupt handler to work around missed PCH
3475 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3476 * instead we unconditionally enable all PCH interrupt sources here, but then
3477 * only unmask them as needed with SDEIMR.
3478 *
3479 * This function needs to be called before interrupts are enabled.
3480 */
3481static void ibx_irq_pre_postinstall(struct drm_device *dev)
3482{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003483 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni622364b2014-04-01 15:37:22 -03003484
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003485 if (HAS_PCH_NOP(dev_priv))
Paulo Zanoni622364b2014-04-01 15:37:22 -03003486 return;
3487
3488 WARN_ON(I915_READ(SDEIER) != 0);
Paulo Zanoni91738a92013-06-05 14:21:51 -03003489 I915_WRITE(SDEIER, 0xffffffff);
3490 POSTING_READ(SDEIER);
3491}
3492
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003493static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02003494{
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003495 GEN3_IRQ_RESET(GT);
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003496 if (INTEL_GEN(dev_priv) >= 6)
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003497 GEN3_IRQ_RESET(GEN6_PM);
Daniel Vetterd18ea1b2013-07-12 22:43:25 +02003498}
3499
Ville Syrjälä70591a42014-10-30 19:42:58 +02003500static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3501{
Ville Syrjälä71b8b412016-04-11 16:56:31 +03003502 if (IS_CHERRYVIEW(dev_priv))
3503 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3504 else
3505 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3506
Ville Syrjäläad22d102016-04-12 18:56:14 +03003507 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
Ville Syrjälä70591a42014-10-30 19:42:58 +02003508 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3509
Ville Syrjälä44d92412017-08-18 21:36:51 +03003510 i9xx_pipestat_irq_reset(dev_priv);
Ville Syrjälä70591a42014-10-30 19:42:58 +02003511
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003512 GEN3_IRQ_RESET(VLV_);
Chris Wilson8bd099a2017-11-30 12:52:53 +00003513 dev_priv->irq_mask = ~0u;
Ville Syrjälä70591a42014-10-30 19:42:58 +02003514}
3515
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003516static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3517{
3518 u32 pipestat_mask;
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03003519 u32 enable_mask;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003520 enum pipe pipe;
3521
Ville Syrjälä842ebf72017-08-18 21:36:50 +03003522 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003523
3524 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3525 for_each_pipe(dev_priv, pipe)
3526 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3527
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03003528 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3529 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Ville Syrjäläebf5f922017-04-27 19:02:22 +03003530 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3531 I915_LPE_PIPE_A_INTERRUPT |
3532 I915_LPE_PIPE_B_INTERRUPT;
3533
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003534 if (IS_CHERRYVIEW(dev_priv))
Ville Syrjäläebf5f922017-04-27 19:02:22 +03003535 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3536 I915_LPE_PIPE_C_INTERRUPT;
Ville Syrjälä6b7eafc2016-04-11 16:56:29 +03003537
Chris Wilson8bd099a2017-11-30 12:52:53 +00003538 WARN_ON(dev_priv->irq_mask != ~0u);
Ville Syrjälä6b7eafc2016-04-11 16:56:29 +03003539
Ville Syrjälä9ab981f2016-04-11 16:56:28 +03003540 dev_priv->irq_mask = ~enable_mask;
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003541
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003542 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003543}
3544
3545/* drm_dma.h hooks
3546*/
3547static void ironlake_irq_reset(struct drm_device *dev)
3548{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003549 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003550
Ville Syrjäläd420a502017-08-18 21:37:03 +03003551 if (IS_GEN5(dev_priv))
3552 I915_WRITE(HWSTAM, 0xffffffff);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003553
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003554 GEN3_IRQ_RESET(DE);
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01003555 if (IS_GEN7(dev_priv))
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003556 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3557
Daniel Vetterfc340442018-04-05 15:00:23 -07003558 if (IS_HASWELL(dev_priv)) {
3559 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3560 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3561 }
3562
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003563 gen5_gt_irq_reset(dev_priv);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003564
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003565 ibx_irq_reset(dev_priv);
Ville Syrjälä8bb61302016-04-12 18:56:44 +03003566}
3567
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03003568static void valleyview_irq_reset(struct drm_device *dev)
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003569{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003570 struct drm_i915_private *dev_priv = to_i915(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003571
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03003572 I915_WRITE(VLV_MASTER_IER, 0);
3573 POSTING_READ(VLV_MASTER_IER);
3574
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003575 gen5_gt_irq_reset(dev_priv);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003576
Ville Syrjäläad22d102016-04-12 18:56:14 +03003577 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003578 if (dev_priv->display_irqs_enabled)
3579 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003580 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07003581}
3582
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003583static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3584{
3585 GEN8_IRQ_RESET_NDX(GT, 0);
3586 GEN8_IRQ_RESET_NDX(GT, 1);
3587 GEN8_IRQ_RESET_NDX(GT, 2);
3588 GEN8_IRQ_RESET_NDX(GT, 3);
3589}
3590
Paulo Zanoni823f6b32014-04-01 15:37:26 -03003591static void gen8_irq_reset(struct drm_device *dev)
Ben Widawskyabd58f02013-11-02 21:07:09 -07003592{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003593 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003594 int pipe;
3595
Ben Widawskyabd58f02013-11-02 21:07:09 -07003596 I915_WRITE(GEN8_MASTER_IRQ, 0);
3597 POSTING_READ(GEN8_MASTER_IRQ);
3598
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003599 gen8_gt_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003600
Ville Syrjäläe04f7ec2018-04-03 14:24:18 -07003601 I915_WRITE(EDP_PSR_IMR, 0xffffffff);
3602 I915_WRITE(EDP_PSR_IIR, 0xffffffff);
3603
Damien Lespiau055e3932014-08-18 13:49:10 +01003604 for_each_pipe(dev_priv, pipe)
Daniel Vetterf458ebb2014-09-30 10:56:39 +02003605 if (intel_display_power_is_enabled(dev_priv,
3606 POWER_DOMAIN_PIPE(pipe)))
Paulo Zanoni813bde42014-07-04 11:50:29 -03003607 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003608
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003609 GEN3_IRQ_RESET(GEN8_DE_PORT_);
3610 GEN3_IRQ_RESET(GEN8_DE_MISC_);
3611 GEN3_IRQ_RESET(GEN8_PCU_);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003612
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003613 if (HAS_PCH_SPLIT(dev_priv))
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00003614 ibx_irq_reset(dev_priv);
Ben Widawskyabd58f02013-11-02 21:07:09 -07003615}
Ben Widawskyabd58f02013-11-02 21:07:09 -07003616
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003617static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
3618{
3619 /* Disable RCS, BCS, VCS and VECS class engines. */
3620 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
3621 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
3622
3623 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
3624 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
3625 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
3626 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
3627 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
3628 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
Oscar Mateod02b98b2018-04-05 17:00:50 +03003629
3630 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
3631 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003632}
3633
3634static void gen11_irq_reset(struct drm_device *dev)
3635{
3636 struct drm_i915_private *dev_priv = dev->dev_private;
3637 int pipe;
3638
3639 I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
3640 POSTING_READ(GEN11_GFX_MSTR_IRQ);
3641
3642 gen11_gt_irq_reset(dev_priv);
3643
3644 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
3645
3646 for_each_pipe(dev_priv, pipe)
3647 if (intel_display_power_is_enabled(dev_priv,
3648 POWER_DOMAIN_PIPE(pipe)))
3649 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3650
3651 GEN3_IRQ_RESET(GEN8_DE_PORT_);
3652 GEN3_IRQ_RESET(GEN8_DE_MISC_);
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07003653 GEN3_IRQ_RESET(GEN11_DE_HPD_);
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07003654 GEN3_IRQ_RESET(GEN11_GU_MISC_);
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003655 GEN3_IRQ_RESET(GEN8_PCU_);
Anusha Srivatsa31604222018-06-26 13:52:23 -07003656
3657 if (HAS_PCH_ICP(dev_priv))
3658 GEN3_IRQ_RESET(SDE);
Mika Kuoppala51951ae2018-02-28 12:11:53 +02003659}
3660
Damien Lespiau4c6c03b2015-03-06 18:50:48 +00003661void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
Imre Deak001bd2c2017-07-12 18:54:13 +03003662 u8 pipe_mask)
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003663{
Paulo Zanoni1180e202014-10-07 18:02:52 -03003664 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003665 enum pipe pipe;
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003666
Daniel Vetter13321782014-09-15 14:55:29 +02003667 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak9dfe2e32017-09-28 13:06:24 +03003668
3669 if (!intel_irqs_enabled(dev_priv)) {
3670 spin_unlock_irq(&dev_priv->irq_lock);
3671 return;
3672 }
3673
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003674 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3675 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3676 dev_priv->de_irq_mask[pipe],
3677 ~dev_priv->de_irq_mask[pipe] | extra_ier);
Imre Deak9dfe2e32017-09-28 13:06:24 +03003678
Daniel Vetter13321782014-09-15 14:55:29 +02003679 spin_unlock_irq(&dev_priv->irq_lock);
Paulo Zanonid49bdb02014-07-04 11:50:31 -03003680}
3681
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003682void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
Imre Deak001bd2c2017-07-12 18:54:13 +03003683 u8 pipe_mask)
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003684{
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003685 enum pipe pipe;
3686
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003687 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak9dfe2e32017-09-28 13:06:24 +03003688
3689 if (!intel_irqs_enabled(dev_priv)) {
3690 spin_unlock_irq(&dev_priv->irq_lock);
3691 return;
3692 }
3693
Ville Syrjälä6831f3e2016-02-19 20:47:31 +02003694 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3695 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
Imre Deak9dfe2e32017-09-28 13:06:24 +03003696
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003697 spin_unlock_irq(&dev_priv->irq_lock);
3698
3699 /* make sure we're done processing display irqs */
Chris Wilson91c8a322016-07-05 10:40:23 +01003700 synchronize_irq(dev_priv->drm.irq);
Ville Syrjäläaae8ba82016-02-19 20:47:30 +02003701}
3702
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03003703static void cherryview_irq_reset(struct drm_device *dev)
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003704{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003705 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003706
3707 I915_WRITE(GEN8_MASTER_IRQ, 0);
3708 POSTING_READ(GEN8_MASTER_IRQ);
3709
Daniel Vetterd6e3cca2014-05-22 22:18:22 +02003710 gen8_gt_irq_reset(dev_priv);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003711
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003712 GEN3_IRQ_RESET(GEN8_PCU_);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003713
Ville Syrjäläad22d102016-04-12 18:56:14 +03003714 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03003715 if (dev_priv->display_irqs_enabled)
3716 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03003717 spin_unlock_irq(&dev_priv->irq_lock);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03003718}
3719
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003720static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
Ville Syrjälä87a02102015-08-27 23:55:57 +03003721 const u32 hpd[HPD_NUM_PINS])
3722{
Ville Syrjälä87a02102015-08-27 23:55:57 +03003723 struct intel_encoder *encoder;
3724 u32 enabled_irqs = 0;
3725
Chris Wilson91c8a322016-07-05 10:40:23 +01003726 for_each_intel_encoder(&dev_priv->drm, encoder)
Ville Syrjälä87a02102015-08-27 23:55:57 +03003727 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3728 enabled_irqs |= hpd[encoder->hpd_pin];
3729
3730 return enabled_irqs;
3731}
3732
Imre Deak1a56b1a2017-01-27 11:39:21 +02003733static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3734{
3735 u32 hotplug;
3736
3737 /*
3738 * Enable digital hotplug on the PCH, and configure the DP short pulse
3739 * duration to 2ms (which is the minimum in the Display Port spec).
3740 * The pulse duration bits are reserved on LPT+.
3741 */
3742 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3743 hotplug &= ~(PORTB_PULSE_DURATION_MASK |
3744 PORTC_PULSE_DURATION_MASK |
3745 PORTD_PULSE_DURATION_MASK);
3746 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3747 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3748 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3749 /*
3750 * When CPU and PCH are on the same package, port A
3751 * HPD must be enabled in both north and south.
3752 */
3753 if (HAS_PCH_LPT_LP(dev_priv))
3754 hotplug |= PORTA_HOTPLUG_ENABLE;
3755 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3756}
3757
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003758static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
Keith Packard7fe0b972011-09-19 13:31:02 -07003759{
Imre Deak1a56b1a2017-01-27 11:39:21 +02003760 u32 hotplug_irqs, enabled_irqs;
Keith Packard7fe0b972011-09-19 13:31:02 -07003761
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003762 if (HAS_PCH_IBX(dev_priv)) {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003763 hotplug_irqs = SDE_HOTPLUG_MASK;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003764 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003765 } else {
Daniel Vetterfee884e2013-07-04 23:35:21 +02003766 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003767 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003768 }
3769
Daniel Vetterfee884e2013-07-04 23:35:21 +02003770 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003771
Imre Deak1a56b1a2017-01-27 11:39:21 +02003772 ibx_hpd_detection_setup(dev_priv);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003773}
Xiong Zhang26951ca2015-08-17 15:55:50 +08003774
Anusha Srivatsa31604222018-06-26 13:52:23 -07003775static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
3776{
3777 u32 hotplug;
3778
3779 hotplug = I915_READ(SHOTPLUG_CTL_DDI);
3780 hotplug |= ICP_DDIA_HPD_ENABLE |
3781 ICP_DDIB_HPD_ENABLE;
3782 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
3783
3784 hotplug = I915_READ(SHOTPLUG_CTL_TC);
3785 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
3786 ICP_TC_HPD_ENABLE(PORT_TC2) |
3787 ICP_TC_HPD_ENABLE(PORT_TC3) |
3788 ICP_TC_HPD_ENABLE(PORT_TC4);
3789 I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
3790}
3791
3792static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3793{
3794 u32 hotplug_irqs, enabled_irqs;
3795
3796 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
3797 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
3798
3799 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3800
3801 icp_hpd_detection_setup(dev_priv);
3802}
3803
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07003804static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3805{
3806 u32 hotplug;
3807
3808 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3809 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3810 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3811 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3812 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3813 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
Dhinakaran Pandiyanb796b972018-06-15 17:05:30 -07003814
3815 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3816 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3817 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3818 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3819 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3820 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07003821}
3822
3823static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3824{
3825 u32 hotplug_irqs, enabled_irqs;
3826 u32 val;
3827
Dhinakaran Pandiyanb796b972018-06-15 17:05:30 -07003828 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
3829 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07003830
3831 val = I915_READ(GEN11_DE_HPD_IMR);
3832 val &= ~hotplug_irqs;
3833 I915_WRITE(GEN11_DE_HPD_IMR, val);
3834 POSTING_READ(GEN11_DE_HPD_IMR);
3835
3836 gen11_hpd_detection_setup(dev_priv);
Anusha Srivatsa31604222018-06-26 13:52:23 -07003837
3838 if (HAS_PCH_ICP(dev_priv))
3839 icp_hpd_irq_setup(dev_priv);
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07003840}
3841
Imre Deak2a57d9c2017-01-27 11:39:18 +02003842static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3843{
Rodrigo Vivi3b92e262017-09-19 14:57:03 -07003844 u32 val, hotplug;
3845
3846 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3847 if (HAS_PCH_CNP(dev_priv)) {
3848 val = I915_READ(SOUTH_CHICKEN1);
3849 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3850 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3851 I915_WRITE(SOUTH_CHICKEN1, val);
3852 }
Imre Deak2a57d9c2017-01-27 11:39:18 +02003853
3854 /* Enable digital hotplug on the PCH */
3855 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3856 hotplug |= PORTA_HOTPLUG_ENABLE |
3857 PORTB_HOTPLUG_ENABLE |
3858 PORTC_HOTPLUG_ENABLE |
3859 PORTD_HOTPLUG_ENABLE;
3860 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3861
3862 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3863 hotplug |= PORTE_HOTPLUG_ENABLE;
3864 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3865}
3866
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003867static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003868{
Imre Deak2a57d9c2017-01-27 11:39:18 +02003869 u32 hotplug_irqs, enabled_irqs;
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003870
3871 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003872 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03003873
3874 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3875
Imre Deak2a57d9c2017-01-27 11:39:18 +02003876 spt_hpd_detection_setup(dev_priv);
Keith Packard7fe0b972011-09-19 13:31:02 -07003877}
3878
Imre Deak1a56b1a2017-01-27 11:39:21 +02003879static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3880{
3881 u32 hotplug;
3882
3883 /*
3884 * Enable digital hotplug on the CPU, and configure the DP short pulse
3885 * duration to 2ms (which is the minimum in the Display Port spec)
3886 * The pulse duration bits are reserved on HSW+.
3887 */
3888 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3889 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3890 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3891 DIGITAL_PORTA_PULSE_DURATION_2ms;
3892 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3893}
3894
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003895static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003896{
Imre Deak1a56b1a2017-01-27 11:39:21 +02003897 u32 hotplug_irqs, enabled_irqs;
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003898
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003899 if (INTEL_GEN(dev_priv) >= 8) {
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003900 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003901 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003902
3903 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003904 } else if (INTEL_GEN(dev_priv) >= 7) {
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003905 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003906 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003907
3908 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03003909 } else {
3910 hotplug_irqs = DE_DP_A_HOTPLUG;
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003911 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003912
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03003913 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3914 }
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003915
Imre Deak1a56b1a2017-01-27 11:39:21 +02003916 ilk_hpd_detection_setup(dev_priv);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003917
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01003918 ibx_hpd_irq_setup(dev_priv);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03003919}
3920
Imre Deak2a57d9c2017-01-27 11:39:18 +02003921static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3922 u32 enabled_irqs)
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003923{
Imre Deak2a57d9c2017-01-27 11:39:18 +02003924 u32 hotplug;
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003925
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003926 hotplug = I915_READ(PCH_PORT_HOTPLUG);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003927 hotplug |= PORTA_HOTPLUG_ENABLE |
3928 PORTB_HOTPLUG_ENABLE |
3929 PORTC_HOTPLUG_ENABLE;
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303930
3931 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3932 hotplug, enabled_irqs);
3933 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3934
3935 /*
3936 * For BXT invert bit has to be set based on AOB design
3937 * for HPD detection logic, update it based on VBT fields.
3938 */
Shubhangi Shrivastavad252bf62016-03-31 16:11:47 +05303939 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3940 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3941 hotplug |= BXT_DDIA_HPD_INVERT;
3942 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3943 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3944 hotplug |= BXT_DDIB_HPD_INVERT;
3945 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3946 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3947 hotplug |= BXT_DDIC_HPD_INVERT;
3948
Ville Syrjäläa52bb152015-08-27 23:56:11 +03003949 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02003950}
3951
Imre Deak2a57d9c2017-01-27 11:39:18 +02003952static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3953{
3954 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3955}
3956
3957static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3958{
3959 u32 hotplug_irqs, enabled_irqs;
3960
3961 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3962 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3963
3964 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3965
3966 __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3967}
3968
Paulo Zanonid46da432013-02-08 17:35:15 -02003969static void ibx_irq_postinstall(struct drm_device *dev)
3970{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003971 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter82a28bc2013-03-27 15:55:01 +01003972 u32 mask;
Paulo Zanonid46da432013-02-08 17:35:15 -02003973
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003974 if (HAS_PCH_NOP(dev_priv))
Daniel Vetter692a04c2013-05-29 21:43:05 +02003975 return;
3976
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01003977 if (HAS_PCH_IBX(dev_priv))
Daniel Vetter5c673b62014-03-07 20:34:46 +01003978 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
Dhinakaran Pandiyan4ebc6502017-09-08 17:42:55 -07003979 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
Daniel Vetter5c673b62014-03-07 20:34:46 +01003980 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
Dhinakaran Pandiyan4ebc6502017-09-08 17:42:55 -07003981 else
3982 mask = SDE_GMBUS_CPT;
Paulo Zanoni86642812013-04-12 17:57:57 -03003983
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03003984 gen3_assert_iir_is_zero(dev_priv, SDEIIR);
Paulo Zanonid46da432013-02-08 17:35:15 -02003985 I915_WRITE(SDEIMR, ~mask);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003986
3987 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3988 HAS_PCH_LPT(dev_priv))
Imre Deak1a56b1a2017-01-27 11:39:21 +02003989 ibx_hpd_detection_setup(dev_priv);
Imre Deak2a57d9c2017-01-27 11:39:18 +02003990 else
3991 spt_hpd_detection_setup(dev_priv);
Paulo Zanonid46da432013-02-08 17:35:15 -02003992}
3993
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003994static void gen5_gt_irq_postinstall(struct drm_device *dev)
3995{
Chris Wilsonfac5e232016-07-04 11:34:36 +01003996 struct drm_i915_private *dev_priv = to_i915(dev);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02003997 u32 pm_irqs, gt_irqs;
3998
3999 pm_irqs = gt_irqs = 0;
4000
4001 dev_priv->gt_irq_mask = ~0;
Tvrtko Ursulin3c9192b2016-10-13 11:03:05 +01004002 if (HAS_L3_DPF(dev_priv)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004003 /* L3 parity interrupt is always unmasked. */
Tvrtko Ursulin772c2a52016-10-13 11:03:01 +01004004 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
4005 gt_irqs |= GT_PARITY_ERROR(dev_priv);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004006 }
4007
4008 gt_irqs |= GT_RENDER_USER_INTERRUPT;
Tvrtko Ursulin5db94012016-10-13 11:03:10 +01004009 if (IS_GEN5(dev_priv)) {
Chris Wilsonf8973c22016-07-01 17:23:21 +01004010 gt_irqs |= ILK_BSD_USER_INTERRUPT;
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004011 } else {
4012 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
4013 }
4014
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03004015 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004016
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00004017 if (INTEL_GEN(dev_priv) >= 6) {
Imre Deak78e68d32014-12-15 18:59:27 +02004018 /*
4019 * RPS interrupts will get enabled/disabled on demand when RPS
4020 * itself is enabled/disabled.
4021 */
Akash Goelf4e9af42016-10-12 21:54:30 +05304022 if (HAS_VEBOX(dev_priv)) {
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004023 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
Akash Goelf4e9af42016-10-12 21:54:30 +05304024 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
4025 }
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004026
Akash Goelf4e9af42016-10-12 21:54:30 +05304027 dev_priv->pm_imr = 0xffffffff;
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03004028 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004029 }
4030}
4031
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004032static int ironlake_irq_postinstall(struct drm_device *dev)
Zhenyu Wang036a4a72009-06-08 14:40:19 +08004033{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004034 struct drm_i915_private *dev_priv = to_i915(dev);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03004035 u32 display_mask, extra_mask;
4036
Tvrtko Ursulinb243f532016-11-16 08:55:38 +00004037 if (INTEL_GEN(dev_priv) >= 7) {
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03004038 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
Ville Syrjälä842ebf72017-08-18 21:36:50 +03004039 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03004040 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03004041 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
4042 DE_DP_A_HOTPLUG_IVB);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03004043 } else {
4044 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
Ville Syrjälä842ebf72017-08-18 21:36:50 +03004045 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
4046 DE_PIPEA_CRC_DONE | DE_POISON);
Ville Syrjäläe4ce95a2015-08-27 23:56:03 +03004047 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
4048 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
4049 DE_DP_A_HOTPLUG);
Paulo Zanoni8e76f8d2013-07-12 20:01:56 -03004050 }
Zhenyu Wang036a4a72009-06-08 14:40:19 +08004051
Daniel Vetterfc340442018-04-05 15:00:23 -07004052 if (IS_HASWELL(dev_priv)) {
4053 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004054 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
Daniel Vetterfc340442018-04-05 15:00:23 -07004055 display_mask |= DE_EDP_PSR_INT_HSW;
4056 }
4057
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004058 dev_priv->irq_mask = ~display_mask;
Zhenyu Wang036a4a72009-06-08 14:40:19 +08004059
Paulo Zanoni622364b2014-04-01 15:37:22 -03004060 ibx_irq_pre_postinstall(dev);
4061
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03004062 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08004063
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004064 gen5_gt_irq_postinstall(dev);
Zhenyu Wang036a4a72009-06-08 14:40:19 +08004065
Imre Deak1a56b1a2017-01-27 11:39:21 +02004066 ilk_hpd_detection_setup(dev_priv);
4067
Paulo Zanonid46da432013-02-08 17:35:15 -02004068 ibx_irq_postinstall(dev);
Keith Packard7fe0b972011-09-19 13:31:02 -07004069
Tvrtko Ursulin50a0bc92016-10-13 11:02:58 +01004070 if (IS_IRONLAKE_M(dev_priv)) {
Daniel Vetter6005ce42013-06-27 13:44:59 +02004071 /* Enable PCU event interrupts
4072 *
4073 * spinlocking not required here for correctness since interrupt
Daniel Vetter4bc9d432013-06-27 13:44:58 +02004074 * setup is guaranteed to run in single-threaded context. But we
4075 * need it to make the assert_spin_locked happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02004076 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjäläfbdedaea2015-11-23 18:06:16 +02004077 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
Daniel Vetterd6207432014-09-15 14:55:27 +02004078 spin_unlock_irq(&dev_priv->irq_lock);
Jesse Barnesf97108d2010-01-29 11:27:07 -08004079 }
4080
Zhenyu Wang036a4a72009-06-08 14:40:19 +08004081 return 0;
4082}
4083
Imre Deakf8b79e52014-03-04 19:23:07 +02004084void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
4085{
Chris Wilson67520412017-03-02 13:28:01 +00004086 lockdep_assert_held(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02004087
4088 if (dev_priv->display_irqs_enabled)
4089 return;
4090
4091 dev_priv->display_irqs_enabled = true;
4092
Ville Syrjäläd6c69802016-04-11 16:56:27 +03004093 if (intel_irqs_enabled(dev_priv)) {
4094 vlv_display_irq_reset(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03004095 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläd6c69802016-04-11 16:56:27 +03004096 }
Imre Deakf8b79e52014-03-04 19:23:07 +02004097}
4098
4099void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
4100{
Chris Wilson67520412017-03-02 13:28:01 +00004101 lockdep_assert_held(&dev_priv->irq_lock);
Imre Deakf8b79e52014-03-04 19:23:07 +02004102
4103 if (!dev_priv->display_irqs_enabled)
4104 return;
4105
4106 dev_priv->display_irqs_enabled = false;
4107
Imre Deak950eaba2014-09-08 15:21:09 +03004108 if (intel_irqs_enabled(dev_priv))
Ville Syrjäläad22d102016-04-12 18:56:14 +03004109 vlv_display_irq_reset(dev_priv);
Imre Deakf8b79e52014-03-04 19:23:07 +02004110}
4111
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02004112
4113static int valleyview_irq_postinstall(struct drm_device *dev)
4114{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004115 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä0e6c9a92014-10-30 19:43:00 +02004116
Daniel Vetter0a9a8c92013-07-12 22:43:26 +02004117 gen5_gt_irq_postinstall(dev);
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004118
Ville Syrjäläad22d102016-04-12 18:56:14 +03004119 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03004120 if (dev_priv->display_irqs_enabled)
4121 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03004122 spin_unlock_irq(&dev_priv->irq_lock);
4123
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004124 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
Ville Syrjälä34c7b8a2016-04-13 21:19:48 +03004125 POSTING_READ(VLV_MASTER_IER);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004126
4127 return 0;
4128}
4129
Ben Widawskyabd58f02013-11-02 21:07:09 -07004130static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4131{
Ben Widawskyabd58f02013-11-02 21:07:09 -07004132 /* These are interrupts we'll toggle with the ring mask register */
4133 uint32_t gt_interrupts[] = {
4134 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01004135 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01004136 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
4137 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07004138 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
Oscar Mateo73d477f2014-07-24 17:04:31 +01004139 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
4140 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
4141 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
Ben Widawskyabd58f02013-11-02 21:07:09 -07004142 0,
Oscar Mateo73d477f2014-07-24 17:04:31 +01004143 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
4144 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
Ben Widawskyabd58f02013-11-02 21:07:09 -07004145 };
4146
Tvrtko Ursulin98735732016-04-19 16:46:08 +01004147 if (HAS_L3_DPF(dev_priv))
4148 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
4149
Akash Goelf4e9af42016-10-12 21:54:30 +05304150 dev_priv->pm_ier = 0x0;
4151 dev_priv->pm_imr = ~dev_priv->pm_ier;
Deepak S9a2d2d82014-08-22 08:32:40 +05304152 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
4153 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
Imre Deak78e68d32014-12-15 18:59:27 +02004154 /*
4155 * RPS interrupts will get enabled/disabled on demand when RPS itself
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05304156 * is enabled/disabled. Same wil be the case for GuC interrupts.
Imre Deak78e68d32014-12-15 18:59:27 +02004157 */
Akash Goelf4e9af42016-10-12 21:54:30 +05304158 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
Deepak S9a2d2d82014-08-22 08:32:40 +05304159 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
Ben Widawskyabd58f02013-11-02 21:07:09 -07004160}
4161
4162static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
4163{
Damien Lespiau770de83d2014-03-20 20:45:01 +00004164 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
4165 uint32_t de_pipe_enables;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004166 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
4167 u32 de_port_enables;
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07004168 u32 de_misc_masked = GEN8_DE_EDP_PSR;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004169 enum pipe pipe;
Damien Lespiau770de83d2014-03-20 20:45:01 +00004170
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07004171 if (INTEL_GEN(dev_priv) <= 10)
4172 de_misc_masked |= GEN8_DE_MISC_GSE;
4173
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004174 if (INTEL_GEN(dev_priv) >= 9) {
Ville Syrjälä842ebf72017-08-18 21:36:50 +03004175 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004176 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
4177 GEN9_AUX_CHANNEL_D;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004178 if (IS_GEN9_LP(dev_priv))
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004179 de_port_masked |= BXT_DE_PORT_GMBUS;
4180 } else {
Ville Syrjälä842ebf72017-08-18 21:36:50 +03004181 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004182 }
Damien Lespiau770de83d2014-03-20 20:45:01 +00004183
James Ausmusbb187e92018-06-11 17:25:12 -07004184 if (INTEL_GEN(dev_priv) >= 11)
4185 de_port_masked |= ICL_AUX_CHANNEL_E;
4186
Dhinakaran Pandiyan9bb635d2018-05-21 17:25:35 -07004187 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
Rodrigo Vivia324fca2018-01-29 15:22:15 -08004188 de_port_masked |= CNL_AUX_CHANNEL_F;
4189
Damien Lespiau770de83d2014-03-20 20:45:01 +00004190 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
4191 GEN8_PIPE_FIFO_UNDERRUN;
4192
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004193 de_port_enables = de_port_masked;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004194 if (IS_GEN9_LP(dev_priv))
Ville Syrjäläa52bb152015-08-27 23:56:11 +03004195 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
4196 else if (IS_BROADWELL(dev_priv))
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004197 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
4198
Ville Syrjäläe04f7ec2018-04-03 14:24:18 -07004199 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
Dhinakaran Pandiyan54fd3142018-04-04 18:37:17 -07004200 intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
Ville Syrjäläe04f7ec2018-04-03 14:24:18 -07004201
Mika Kahola0a195c02017-10-10 13:17:04 +03004202 for_each_pipe(dev_priv, pipe) {
4203 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004204
Daniel Vetterf458ebb2014-09-30 10:56:39 +02004205 if (intel_display_power_is_enabled(dev_priv,
Paulo Zanoni813bde42014-07-04 11:50:29 -03004206 POWER_DOMAIN_PIPE(pipe)))
4207 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
4208 dev_priv->de_irq_mask[pipe],
4209 de_pipe_enables);
Mika Kahola0a195c02017-10-10 13:17:04 +03004210 }
Ben Widawskyabd58f02013-11-02 21:07:09 -07004211
Ville Syrjälä3488d4e2017-08-18 21:36:52 +03004212 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
4213 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
Imre Deak2a57d9c2017-01-27 11:39:18 +02004214
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07004215 if (INTEL_GEN(dev_priv) >= 11) {
4216 u32 de_hpd_masked = 0;
Dhinakaran Pandiyanb796b972018-06-15 17:05:30 -07004217 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
4218 GEN11_DE_TBT_HOTPLUG_MASK;
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07004219
4220 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
4221 gen11_hpd_detection_setup(dev_priv);
4222 } else if (IS_GEN9_LP(dev_priv)) {
Imre Deak2a57d9c2017-01-27 11:39:18 +02004223 bxt_hpd_detection_setup(dev_priv);
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07004224 } else if (IS_BROADWELL(dev_priv)) {
Imre Deak1a56b1a2017-01-27 11:39:21 +02004225 ilk_hpd_detection_setup(dev_priv);
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07004226 }
Ben Widawskyabd58f02013-11-02 21:07:09 -07004227}
4228
4229static int gen8_irq_postinstall(struct drm_device *dev)
4230{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004231 struct drm_i915_private *dev_priv = to_i915(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07004232
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004233 if (HAS_PCH_SPLIT(dev_priv))
Shashank Sharma266ea3d2014-08-22 17:40:42 +05304234 ibx_irq_pre_postinstall(dev);
Paulo Zanoni622364b2014-04-01 15:37:22 -03004235
Ben Widawskyabd58f02013-11-02 21:07:09 -07004236 gen8_gt_irq_postinstall(dev_priv);
4237 gen8_de_irq_postinstall(dev_priv);
4238
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004239 if (HAS_PCH_SPLIT(dev_priv))
Shashank Sharma266ea3d2014-08-22 17:40:42 +05304240 ibx_irq_postinstall(dev);
Ben Widawskyabd58f02013-11-02 21:07:09 -07004241
Ville Syrjäläe5328c42016-04-13 21:19:47 +03004242 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ben Widawskyabd58f02013-11-02 21:07:09 -07004243 POSTING_READ(GEN8_MASTER_IRQ);
4244
4245 return 0;
4246}
4247
Mika Kuoppala51951ae2018-02-28 12:11:53 +02004248static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
4249{
4250 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
4251
4252 BUILD_BUG_ON(irqs & 0xffff0000);
4253
4254 /* Enable RCS, BCS, VCS and VECS class interrupts. */
4255 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
4256 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
4257
4258 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
4259 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
4260 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
4261 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
4262 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
4263 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
4264
Oscar Mateod02b98b2018-04-05 17:00:50 +03004265 /*
4266 * RPS interrupts will get enabled/disabled on demand when RPS itself
4267 * is enabled/disabled.
4268 */
4269 dev_priv->pm_ier = 0x0;
4270 dev_priv->pm_imr = ~dev_priv->pm_ier;
4271 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
4272 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
Mika Kuoppala51951ae2018-02-28 12:11:53 +02004273}
4274
Anusha Srivatsa31604222018-06-26 13:52:23 -07004275static void icp_irq_postinstall(struct drm_device *dev)
4276{
4277 struct drm_i915_private *dev_priv = to_i915(dev);
4278 u32 mask = SDE_GMBUS_ICP;
4279
4280 WARN_ON(I915_READ(SDEIER) != 0);
4281 I915_WRITE(SDEIER, 0xffffffff);
4282 POSTING_READ(SDEIER);
4283
4284 gen3_assert_iir_is_zero(dev_priv, SDEIIR);
4285 I915_WRITE(SDEIMR, ~mask);
4286
4287 icp_hpd_detection_setup(dev_priv);
4288}
4289
Mika Kuoppala51951ae2018-02-28 12:11:53 +02004290static int gen11_irq_postinstall(struct drm_device *dev)
4291{
4292 struct drm_i915_private *dev_priv = dev->dev_private;
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07004293 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
Mika Kuoppala51951ae2018-02-28 12:11:53 +02004294
Anusha Srivatsa31604222018-06-26 13:52:23 -07004295 if (HAS_PCH_ICP(dev_priv))
4296 icp_irq_postinstall(dev);
4297
Mika Kuoppala51951ae2018-02-28 12:11:53 +02004298 gen11_gt_irq_postinstall(dev_priv);
4299 gen8_de_irq_postinstall(dev_priv);
4300
Dhinakaran Pandiyandf0d28c2018-06-15 17:05:28 -07004301 GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
4302
Mika Kuoppala51951ae2018-02-28 12:11:53 +02004303 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
4304
4305 I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
4306 POSTING_READ(GEN11_GFX_MSTR_IRQ);
4307
4308 return 0;
4309}
4310
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004311static int cherryview_irq_postinstall(struct drm_device *dev)
4312{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004313 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004314
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004315 gen8_gt_irq_postinstall(dev_priv);
4316
Ville Syrjäläad22d102016-04-12 18:56:14 +03004317 spin_lock_irq(&dev_priv->irq_lock);
Ville Syrjälä99182712016-04-11 16:56:25 +03004318 if (dev_priv->display_irqs_enabled)
4319 vlv_display_irq_postinstall(dev_priv);
Ville Syrjäläad22d102016-04-12 18:56:14 +03004320 spin_unlock_irq(&dev_priv->irq_lock);
4321
Ville Syrjäläe5328c42016-04-13 21:19:47 +03004322 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004323 POSTING_READ(GEN8_MASTER_IRQ);
4324
4325 return 0;
4326}
4327
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004328static void i8xx_irq_reset(struct drm_device *dev)
Chris Wilsonc2798b12012-04-22 21:13:57 +01004329{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004330 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004331
Ville Syrjälä44d92412017-08-18 21:36:51 +03004332 i9xx_pipestat_irq_reset(dev_priv);
4333
Ville Syrjäläd420a502017-08-18 21:37:03 +03004334 I915_WRITE16(HWSTAM, 0xffff);
4335
Ville Syrjäläe9e98482017-08-18 21:36:54 +03004336 GEN2_IRQ_RESET();
Chris Wilsonc2798b12012-04-22 21:13:57 +01004337}
4338
4339static int i8xx_irq_postinstall(struct drm_device *dev)
4340{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004341 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläe9e98482017-08-18 21:36:54 +03004342 u16 enable_mask;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004343
Ville Syrjälä045cebd2017-08-18 21:36:55 +03004344 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
4345 I915_ERROR_MEMORY_REFRESH));
Chris Wilsonc2798b12012-04-22 21:13:57 +01004346
4347 /* Unmask the interrupts that we always want on. */
4348 dev_priv->irq_mask =
4349 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Ville Syrjälä16659bc2018-06-11 23:02:58 +03004350 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4351 I915_MASTER_ERROR_INTERRUPT);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004352
Ville Syrjäläe9e98482017-08-18 21:36:54 +03004353 enable_mask =
4354 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4355 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Ville Syrjälä16659bc2018-06-11 23:02:58 +03004356 I915_MASTER_ERROR_INTERRUPT |
Ville Syrjäläe9e98482017-08-18 21:36:54 +03004357 I915_USER_INTERRUPT;
4358
4359 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004360
Daniel Vetter379ef822013-10-16 22:55:56 +02004361 /* Interrupt setup is already guaranteed to be single-threaded, this is
4362 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02004363 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02004364 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4365 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02004366 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02004367
Chris Wilsonc2798b12012-04-22 21:13:57 +01004368 return 0;
4369}
4370
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004371static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
4372 u16 *eir, u16 *eir_stuck)
4373{
4374 u16 emr;
4375
4376 *eir = I915_READ16(EIR);
4377
4378 if (*eir)
4379 I915_WRITE16(EIR, *eir);
4380
4381 *eir_stuck = I915_READ16(EIR);
4382 if (*eir_stuck == 0)
4383 return;
4384
4385 /*
4386 * Toggle all EMR bits to make sure we get an edge
4387 * in the ISR master error bit if we don't clear
4388 * all the EIR bits. Otherwise the edge triggered
4389 * IIR on i965/g4x wouldn't notice that an interrupt
4390 * is still pending. Also some EIR bits can't be
4391 * cleared except by handling the underlying error
4392 * (or by a GPU reset) so we mask any bit that
4393 * remains set.
4394 */
4395 emr = I915_READ16(EMR);
4396 I915_WRITE16(EMR, 0xffff);
4397 I915_WRITE16(EMR, emr | *eir_stuck);
4398}
4399
4400static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4401 u16 eir, u16 eir_stuck)
4402{
4403 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4404
4405 if (eir_stuck)
4406 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
4407}
4408
4409static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4410 u32 *eir, u32 *eir_stuck)
4411{
4412 u32 emr;
4413
4414 *eir = I915_READ(EIR);
4415
4416 I915_WRITE(EIR, *eir);
4417
4418 *eir_stuck = I915_READ(EIR);
4419 if (*eir_stuck == 0)
4420 return;
4421
4422 /*
4423 * Toggle all EMR bits to make sure we get an edge
4424 * in the ISR master error bit if we don't clear
4425 * all the EIR bits. Otherwise the edge triggered
4426 * IIR on i965/g4x wouldn't notice that an interrupt
4427 * is still pending. Also some EIR bits can't be
4428 * cleared except by handling the underlying error
4429 * (or by a GPU reset) so we mask any bit that
4430 * remains set.
4431 */
4432 emr = I915_READ(EMR);
4433 I915_WRITE(EMR, 0xffffffff);
4434 I915_WRITE(EMR, emr | *eir_stuck);
4435}
4436
4437static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4438 u32 eir, u32 eir_stuck)
4439{
4440 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4441
4442 if (eir_stuck)
4443 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
4444}
4445
Daniel Vetterff1f5252012-10-02 15:10:55 +02004446static irqreturn_t i8xx_irq_handler(int irq, void *arg)
Chris Wilsonc2798b12012-04-22 21:13:57 +01004447{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004448 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004449 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004450 irqreturn_t ret = IRQ_NONE;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004451
Imre Deak2dd2a882015-02-24 11:14:30 +02004452 if (!intel_irqs_enabled(dev_priv))
4453 return IRQ_NONE;
4454
Imre Deak1f814da2015-12-16 02:52:19 +02004455 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4456 disable_rpm_wakeref_asserts(dev_priv);
4457
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004458 do {
Ville Syrjäläeb643432017-08-18 21:36:59 +03004459 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004460 u16 eir = 0, eir_stuck = 0;
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004461 u16 iir;
Ville Syrjäläeb643432017-08-18 21:36:59 +03004462
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004463 iir = I915_READ16(IIR);
4464 if (iir == 0)
4465 break;
4466
4467 ret = IRQ_HANDLED;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004468
Ville Syrjäläeb643432017-08-18 21:36:59 +03004469 /* Call regardless, as some status bits might not be
4470 * signalled in iir */
4471 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004472
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004473 if (iir & I915_MASTER_ERROR_INTERRUPT)
4474 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4475
Daniel Vetterfd3a4022017-07-20 19:57:51 +02004476 I915_WRITE16(IIR, iir);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004477
Chris Wilsonc2798b12012-04-22 21:13:57 +01004478 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05304479 notify_ring(dev_priv->engine[RCS]);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004480
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004481 if (iir & I915_MASTER_ERROR_INTERRUPT)
4482 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004483
Ville Syrjäläeb643432017-08-18 21:36:59 +03004484 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004485 } while (0);
Chris Wilsonc2798b12012-04-22 21:13:57 +01004486
Imre Deak1f814da2015-12-16 02:52:19 +02004487 enable_rpm_wakeref_asserts(dev_priv);
4488
4489 return ret;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004490}
4491
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004492static void i915_irq_reset(struct drm_device *dev)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004493{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004494 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004495
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00004496 if (I915_HAS_HOTPLUG(dev_priv)) {
Egbert Eich0706f172015-09-23 16:15:27 +02004497 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004498 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4499 }
4500
Ville Syrjälä44d92412017-08-18 21:36:51 +03004501 i9xx_pipestat_irq_reset(dev_priv);
4502
Ville Syrjäläd420a502017-08-18 21:37:03 +03004503 I915_WRITE(HWSTAM, 0xffffffff);
Ville Syrjälä44d92412017-08-18 21:36:51 +03004504
Ville Syrjäläba7eb782017-08-18 21:36:53 +03004505 GEN3_IRQ_RESET();
Chris Wilsona266c7d2012-04-24 22:59:44 +01004506}
4507
4508static int i915_irq_postinstall(struct drm_device *dev)
4509{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004510 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilson38bde182012-04-24 22:59:50 +01004511 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004512
Ville Syrjälä045cebd2017-08-18 21:36:55 +03004513 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
4514 I915_ERROR_MEMORY_REFRESH));
Chris Wilson38bde182012-04-24 22:59:50 +01004515
4516 /* Unmask the interrupts that we always want on. */
4517 dev_priv->irq_mask =
4518 ~(I915_ASLE_INTERRUPT |
4519 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
Ville Syrjälä16659bc2018-06-11 23:02:58 +03004520 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4521 I915_MASTER_ERROR_INTERRUPT);
Chris Wilson38bde182012-04-24 22:59:50 +01004522
4523 enable_mask =
4524 I915_ASLE_INTERRUPT |
4525 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4526 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Ville Syrjälä16659bc2018-06-11 23:02:58 +03004527 I915_MASTER_ERROR_INTERRUPT |
Chris Wilson38bde182012-04-24 22:59:50 +01004528 I915_USER_INTERRUPT;
4529
Tvrtko Ursulin56b857a2016-11-07 09:29:20 +00004530 if (I915_HAS_HOTPLUG(dev_priv)) {
Chris Wilsona266c7d2012-04-24 22:59:44 +01004531 /* Enable in IER... */
4532 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4533 /* and unmask in IMR */
4534 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4535 }
4536
Ville Syrjäläba7eb782017-08-18 21:36:53 +03004537 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004538
Daniel Vetter379ef822013-10-16 22:55:56 +02004539 /* Interrupt setup is already guaranteed to be single-threaded, this is
4540 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02004541 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02004542 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4543 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02004544 spin_unlock_irq(&dev_priv->irq_lock);
Daniel Vetter379ef822013-10-16 22:55:56 +02004545
Ville Syrjäläc30bb1f2017-08-18 21:36:57 +03004546 i915_enable_asle_pipestat(dev_priv);
4547
Daniel Vetter20afbda2012-12-11 14:05:07 +01004548 return 0;
4549}
4550
Daniel Vetterff1f5252012-10-02 15:10:55 +02004551static irqreturn_t i915_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004552{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004553 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004554 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004555 irqreturn_t ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004556
Imre Deak2dd2a882015-02-24 11:14:30 +02004557 if (!intel_irqs_enabled(dev_priv))
4558 return IRQ_NONE;
4559
Imre Deak1f814da2015-12-16 02:52:19 +02004560 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4561 disable_rpm_wakeref_asserts(dev_priv);
4562
Chris Wilson38bde182012-04-24 22:59:50 +01004563 do {
Ville Syrjäläeb643432017-08-18 21:36:59 +03004564 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004565 u32 eir = 0, eir_stuck = 0;
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004566 u32 hotplug_status = 0;
4567 u32 iir;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004568
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004569 iir = I915_READ(IIR);
4570 if (iir == 0)
4571 break;
4572
4573 ret = IRQ_HANDLED;
4574
4575 if (I915_HAS_HOTPLUG(dev_priv) &&
4576 iir & I915_DISPLAY_PORT_INTERRUPT)
4577 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004578
Ville Syrjäläeb643432017-08-18 21:36:59 +03004579 /* Call regardless, as some status bits might not be
4580 * signalled in iir */
4581 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004582
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004583 if (iir & I915_MASTER_ERROR_INTERRUPT)
4584 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4585
Daniel Vetterfd3a4022017-07-20 19:57:51 +02004586 I915_WRITE(IIR, iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004587
Chris Wilsona266c7d2012-04-24 22:59:44 +01004588 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05304589 notify_ring(dev_priv->engine[RCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004590
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004591 if (iir & I915_MASTER_ERROR_INTERRUPT)
4592 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004593
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004594 if (hotplug_status)
4595 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4596
4597 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4598 } while (0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004599
Imre Deak1f814da2015-12-16 02:52:19 +02004600 enable_rpm_wakeref_asserts(dev_priv);
4601
Chris Wilsona266c7d2012-04-24 22:59:44 +01004602 return ret;
4603}
4604
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004605static void i965_irq_reset(struct drm_device *dev)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004606{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004607 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004608
Egbert Eich0706f172015-09-23 16:15:27 +02004609 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
Chris Wilsonadca4732012-05-11 18:01:31 +01004610 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
Chris Wilsona266c7d2012-04-24 22:59:44 +01004611
Ville Syrjälä44d92412017-08-18 21:36:51 +03004612 i9xx_pipestat_irq_reset(dev_priv);
4613
Ville Syrjäläd420a502017-08-18 21:37:03 +03004614 I915_WRITE(HWSTAM, 0xffffffff);
Ville Syrjälä44d92412017-08-18 21:36:51 +03004615
Ville Syrjäläba7eb782017-08-18 21:36:53 +03004616 GEN3_IRQ_RESET();
Chris Wilsona266c7d2012-04-24 22:59:44 +01004617}
4618
4619static int i965_irq_postinstall(struct drm_device *dev)
4620{
Chris Wilsonfac5e232016-07-04 11:34:36 +01004621 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004622 u32 enable_mask;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004623 u32 error_mask;
4624
Ville Syrjälä045cebd2017-08-18 21:36:55 +03004625 /*
4626 * Enable some error detection, note the instruction error mask
4627 * bit is reserved, so we leave it masked.
4628 */
4629 if (IS_G4X(dev_priv)) {
4630 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4631 GM45_ERROR_MEM_PRIV |
4632 GM45_ERROR_CP_PRIV |
4633 I915_ERROR_MEMORY_REFRESH);
4634 } else {
4635 error_mask = ~(I915_ERROR_PAGE_TABLE |
4636 I915_ERROR_MEMORY_REFRESH);
4637 }
4638 I915_WRITE(EMR, error_mask);
4639
Chris Wilsona266c7d2012-04-24 22:59:44 +01004640 /* Unmask the interrupts that we always want on. */
Ville Syrjäläc30bb1f2017-08-18 21:36:57 +03004641 dev_priv->irq_mask =
4642 ~(I915_ASLE_INTERRUPT |
4643 I915_DISPLAY_PORT_INTERRUPT |
4644 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4645 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004646 I915_MASTER_ERROR_INTERRUPT);
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004647
Ville Syrjäläc30bb1f2017-08-18 21:36:57 +03004648 enable_mask =
4649 I915_ASLE_INTERRUPT |
4650 I915_DISPLAY_PORT_INTERRUPT |
4651 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4652 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004653 I915_MASTER_ERROR_INTERRUPT |
Ville Syrjäläc30bb1f2017-08-18 21:36:57 +03004654 I915_USER_INTERRUPT;
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004655
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004656 if (IS_G4X(dev_priv))
Chris Wilsonbbba0a92012-04-24 22:59:51 +01004657 enable_mask |= I915_BSD_USER_INTERRUPT;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004658
Ville Syrjäläc30bb1f2017-08-18 21:36:57 +03004659 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
4660
Daniel Vetterb79480b2013-06-27 17:52:10 +02004661 /* Interrupt setup is already guaranteed to be single-threaded, this is
4662 * just to make the assert_spin_locked check happy. */
Daniel Vetterd6207432014-09-15 14:55:27 +02004663 spin_lock_irq(&dev_priv->irq_lock);
Imre Deak755e9012014-02-10 18:42:47 +02004664 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4665 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4666 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
Daniel Vetterd6207432014-09-15 14:55:27 +02004667 spin_unlock_irq(&dev_priv->irq_lock);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004668
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004669 i915_enable_asle_pipestat(dev_priv);
Daniel Vetter20afbda2012-12-11 14:05:07 +01004670
4671 return 0;
4672}
4673
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004674static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
Daniel Vetter20afbda2012-12-11 14:05:07 +01004675{
Daniel Vetter20afbda2012-12-11 14:05:07 +01004676 u32 hotplug_en;
4677
Chris Wilson67520412017-03-02 13:28:01 +00004678 lockdep_assert_held(&dev_priv->irq_lock);
Daniel Vetterb5ea2d52013-06-27 17:52:15 +02004679
Ville Syrjälä778eb332015-01-09 14:21:13 +02004680 /* Note HDMI and DP share hotplug bits */
4681 /* enable bits are the same for all generations */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004682 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
Ville Syrjälä778eb332015-01-09 14:21:13 +02004683 /* Programming the CRT detection parameters tends
4684 to generate a spurious hotplug event about three
4685 seconds later. So just do it once.
4686 */
Tvrtko Ursulin91d14252016-05-06 14:48:28 +01004687 if (IS_G4X(dev_priv))
Ville Syrjälä778eb332015-01-09 14:21:13 +02004688 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
Ville Syrjälä778eb332015-01-09 14:21:13 +02004689 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004690
Ville Syrjälä778eb332015-01-09 14:21:13 +02004691 /* Ignore TV since it's buggy */
Egbert Eich0706f172015-09-23 16:15:27 +02004692 i915_hotplug_interrupt_update_locked(dev_priv,
Jani Nikulaf9e3dc72015-10-21 17:22:43 +03004693 HOTPLUG_INT_EN_MASK |
4694 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4695 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4696 hotplug_en);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004697}
4698
Daniel Vetterff1f5252012-10-02 15:10:55 +02004699static irqreturn_t i965_irq_handler(int irq, void *arg)
Chris Wilsona266c7d2012-04-24 22:59:44 +01004700{
Daniel Vetter45a83f82014-05-12 19:17:55 +02004701 struct drm_device *dev = arg;
Chris Wilsonfac5e232016-07-04 11:34:36 +01004702 struct drm_i915_private *dev_priv = to_i915(dev);
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004703 irqreturn_t ret = IRQ_NONE;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004704
Imre Deak2dd2a882015-02-24 11:14:30 +02004705 if (!intel_irqs_enabled(dev_priv))
4706 return IRQ_NONE;
4707
Imre Deak1f814da2015-12-16 02:52:19 +02004708 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4709 disable_rpm_wakeref_asserts(dev_priv);
4710
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004711 do {
Ville Syrjäläeb643432017-08-18 21:36:59 +03004712 u32 pipe_stats[I915_MAX_PIPES] = {};
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004713 u32 eir = 0, eir_stuck = 0;
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004714 u32 hotplug_status = 0;
4715 u32 iir;
Chris Wilson2c8ba292012-04-24 22:59:46 +01004716
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004717 iir = I915_READ(IIR);
4718 if (iir == 0)
4719 break;
4720
4721 ret = IRQ_HANDLED;
4722
4723 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4724 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004725
Ville Syrjäläeb643432017-08-18 21:36:59 +03004726 /* Call regardless, as some status bits might not be
4727 * signalled in iir */
4728 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004729
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004730 if (iir & I915_MASTER_ERROR_INTERRUPT)
4731 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4732
Daniel Vetterfd3a4022017-07-20 19:57:51 +02004733 I915_WRITE(IIR, iir);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004734
Chris Wilsona266c7d2012-04-24 22:59:44 +01004735 if (iir & I915_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05304736 notify_ring(dev_priv->engine[RCS]);
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004737
Chris Wilsona266c7d2012-04-24 22:59:44 +01004738 if (iir & I915_BSD_USER_INTERRUPT)
Akash Goel3b3f1652016-10-13 22:44:48 +05304739 notify_ring(dev_priv->engine[VCS]);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004740
Ville Syrjälä78c357d2018-06-11 23:02:57 +03004741 if (iir & I915_MASTER_ERROR_INTERRUPT)
4742 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
Daniel Vetter515ac2b2012-12-01 13:53:44 +01004743
Ville Syrjäläaf722d22017-08-18 21:37:00 +03004744 if (hotplug_status)
4745 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4746
4747 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4748 } while (0);
Chris Wilsona266c7d2012-04-24 22:59:44 +01004749
Imre Deak1f814da2015-12-16 02:52:19 +02004750 enable_rpm_wakeref_asserts(dev_priv);
4751
Chris Wilsona266c7d2012-04-24 22:59:44 +01004752 return ret;
4753}
4754
Daniel Vetterfca52a52014-09-30 10:56:45 +02004755/**
4756 * intel_irq_init - initializes irq support
4757 * @dev_priv: i915 device instance
4758 *
4759 * This function initializes all the irq support including work items, timers
4760 * and all the vtables. It does not setup the interrupt itself though.
4761 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004762void intel_irq_init(struct drm_i915_private *dev_priv)
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004763{
Chris Wilson91c8a322016-07-05 10:40:23 +01004764 struct drm_device *dev = &dev_priv->drm;
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01004765 struct intel_rps *rps = &dev_priv->gt_pm.rps;
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004766 int i;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004767
Jani Nikula77913b32015-06-18 13:06:16 +03004768 intel_hpd_init_work(dev_priv);
4769
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01004770 INIT_WORK(&rps->work, gen6_pm_rps_work);
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004771
Daniel Vettera4da4fa2012-11-02 19:55:07 +01004772 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004773 for (i = 0; i < MAX_L3_SLICES; ++i)
4774 dev_priv->l3_parity.remap_info[i] = NULL;
Chris Wilson8b2e3262012-04-24 22:59:41 +01004775
Tvrtko Ursulin4805fe82016-11-04 14:42:46 +00004776 if (HAS_GUC_SCHED(dev_priv))
Sagar Arun Kamble26705e22016-10-12 21:54:31 +05304777 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
4778
Deepak Sa6706b42014-03-15 20:23:22 +05304779 /* Let's track the enabled rps events */
Wayne Boyer666a4532015-12-09 12:29:35 -08004780 if (IS_VALLEYVIEW(dev_priv))
Ville Syrjälä6c65a5872014-08-29 14:14:07 +03004781 /* WaGsvRC0ResidencyMethod:vlv */
Chris Wilsone0e8c7c2017-03-09 21:12:30 +00004782 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
Deepak S31685c22014-07-03 17:33:01 -04004783 else
4784 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
Deepak Sa6706b42014-03-15 20:23:22 +05304785
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01004786 rps->pm_intrmsk_mbz = 0;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304787
4788 /*
Mika Kuoppalaacf2dc22017-04-13 14:15:27 +03004789 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304790 * if GEN6_PM_UP_EI_EXPIRED is masked.
4791 *
4792 * TODO: verify if this can be reproduced on VLV,CHV.
4793 */
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004794 if (INTEL_GEN(dev_priv) <= 7)
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01004795 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304796
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004797 if (INTEL_GEN(dev_priv) >= 8)
Sagar Arun Kamble562d9ba2017-10-10 22:30:06 +01004798 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
Sagar Arun Kamble1800ad22016-05-31 13:58:27 +05304799
Daniel Vetterb9632912014-09-30 10:56:44 +02004800 if (IS_GEN2(dev_priv)) {
Rodrigo Vivi4194c082016-08-03 10:00:56 -07004801 /* Gen2 doesn't have a hardware frame counter */
Ville Syrjälä4cdb83e2013-10-11 21:52:44 +03004802 dev->max_vblank_count = 0;
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004803 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004804 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
Ville Syrjäläfd8f507c2015-09-18 20:03:42 +03004805 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
Ville Syrjälä391f75e2013-09-25 19:55:26 +03004806 } else {
4807 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4808 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004809 }
4810
Ville Syrjälä21da2702014-08-06 14:49:55 +03004811 /*
4812 * Opt out of the vblank disable timer on everything except gen2.
4813 * Gen2 doesn't have a hardware frame counter and so depends on
4814 * vblank interrupts to produce sane vblank seuquence numbers.
4815 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004816 if (!IS_GEN2(dev_priv))
Ville Syrjälä21da2702014-08-06 14:49:55 +03004817 dev->vblank_disable_immediate = true;
4818
Chris Wilson262fd482017-02-15 13:15:47 +00004819 /* Most platforms treat the display irq block as an always-on
4820 * power domain. vlv/chv can disable it at runtime and need
4821 * special care to avoid writing any of the display block registers
4822 * outside of the power domain. We defer setting up the display irqs
4823 * in this case to the runtime pm.
4824 */
4825 dev_priv->display_irqs_enabled = true;
4826 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4827 dev_priv->display_irqs_enabled = false;
4828
Lyude317eaa92017-02-03 21:18:25 -05004829 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4830
Daniel Vetter1bf6ad62017-05-09 16:03:28 +02004831 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
Daniel Vetterf3a5c3f2015-02-13 21:03:44 +01004832 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004833
Daniel Vetterb9632912014-09-30 10:56:44 +02004834 if (IS_CHERRYVIEW(dev_priv)) {
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004835 dev->driver->irq_handler = cherryview_irq_handler;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004836 dev->driver->irq_preinstall = cherryview_irq_reset;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004837 dev->driver->irq_postinstall = cherryview_irq_postinstall;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004838 dev->driver->irq_uninstall = cherryview_irq_reset;
Chris Wilson86e83e32016-10-07 20:49:52 +01004839 dev->driver->enable_vblank = i965_enable_vblank;
4840 dev->driver->disable_vblank = i965_disable_vblank;
Ville Syrjälä43f328d2014-04-09 20:40:52 +03004841 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Daniel Vetterb9632912014-09-30 10:56:44 +02004842 } else if (IS_VALLEYVIEW(dev_priv)) {
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004843 dev->driver->irq_handler = valleyview_irq_handler;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004844 dev->driver->irq_preinstall = valleyview_irq_reset;
Jesse Barnes7e231dbe2012-03-28 13:39:38 -07004845 dev->driver->irq_postinstall = valleyview_irq_postinstall;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004846 dev->driver->irq_uninstall = valleyview_irq_reset;
Chris Wilson86e83e32016-10-07 20:49:52 +01004847 dev->driver->enable_vblank = i965_enable_vblank;
4848 dev->driver->disable_vblank = i965_disable_vblank;
Egbert Eichfa00abe2013-02-25 12:06:48 -05004849 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Mika Kuoppala51951ae2018-02-28 12:11:53 +02004850 } else if (INTEL_GEN(dev_priv) >= 11) {
4851 dev->driver->irq_handler = gen11_irq_handler;
4852 dev->driver->irq_preinstall = gen11_irq_reset;
4853 dev->driver->irq_postinstall = gen11_irq_postinstall;
4854 dev->driver->irq_uninstall = gen11_irq_reset;
4855 dev->driver->enable_vblank = gen8_enable_vblank;
4856 dev->driver->disable_vblank = gen8_disable_vblank;
Dhinakaran Pandiyan121e7582018-06-15 17:05:29 -07004857 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
Pandiyan, Dhinakaranbca2bf22017-07-18 11:28:00 -07004858 } else if (INTEL_GEN(dev_priv) >= 8) {
Ben Widawskyabd58f02013-11-02 21:07:09 -07004859 dev->driver->irq_handler = gen8_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004860 dev->driver->irq_preinstall = gen8_irq_reset;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004861 dev->driver->irq_postinstall = gen8_irq_postinstall;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004862 dev->driver->irq_uninstall = gen8_irq_reset;
Ben Widawskyabd58f02013-11-02 21:07:09 -07004863 dev->driver->enable_vblank = gen8_enable_vblank;
4864 dev->driver->disable_vblank = gen8_disable_vblank;
Ander Conselvan de Oliveiracc3f90f2016-12-02 10:23:49 +02004865 if (IS_GEN9_LP(dev_priv))
Shashank Sharmae0a20ad2015-03-27 14:54:14 +02004866 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
Rodrigo Vivi7b22b8c2017-06-02 13:06:39 -07004867 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
4868 HAS_PCH_CNP(dev_priv))
Ville Syrjälä6dbf30c2015-08-27 23:56:02 +03004869 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4870 else
Ville Syrjälä3a3b3c72015-08-27 23:56:06 +03004871 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
Tvrtko Ursulin6e266952016-10-13 11:02:53 +01004872 } else if (HAS_PCH_SPLIT(dev_priv)) {
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004873 dev->driver->irq_handler = ironlake_irq_handler;
Daniel Vetter723761b2014-05-22 17:56:34 +02004874 dev->driver->irq_preinstall = ironlake_irq_reset;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004875 dev->driver->irq_postinstall = ironlake_irq_postinstall;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004876 dev->driver->irq_uninstall = ironlake_irq_reset;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004877 dev->driver->enable_vblank = ironlake_enable_vblank;
4878 dev->driver->disable_vblank = ironlake_disable_vblank;
Ville Syrjälä23bb4cb2015-08-27 23:56:04 +03004879 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004880 } else {
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01004881 if (IS_GEN2(dev_priv)) {
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004882 dev->driver->irq_preinstall = i8xx_irq_reset;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004883 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4884 dev->driver->irq_handler = i8xx_irq_handler;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004885 dev->driver->irq_uninstall = i8xx_irq_reset;
Chris Wilson86e83e32016-10-07 20:49:52 +01004886 dev->driver->enable_vblank = i8xx_enable_vblank;
4887 dev->driver->disable_vblank = i8xx_disable_vblank;
Tvrtko Ursulin7e22dbb2016-05-10 10:57:06 +01004888 } else if (IS_GEN3(dev_priv)) {
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004889 dev->driver->irq_preinstall = i915_irq_reset;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004890 dev->driver->irq_postinstall = i915_irq_postinstall;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004891 dev->driver->irq_uninstall = i915_irq_reset;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004892 dev->driver->irq_handler = i915_irq_handler;
Chris Wilson86e83e32016-10-07 20:49:52 +01004893 dev->driver->enable_vblank = i8xx_enable_vblank;
4894 dev->driver->disable_vblank = i8xx_disable_vblank;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004895 } else {
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004896 dev->driver->irq_preinstall = i965_irq_reset;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004897 dev->driver->irq_postinstall = i965_irq_postinstall;
Ville Syrjälä6bcdb1c2017-08-18 21:37:04 +03004898 dev->driver->irq_uninstall = i965_irq_reset;
Chris Wilsona266c7d2012-04-24 22:59:44 +01004899 dev->driver->irq_handler = i965_irq_handler;
Chris Wilson86e83e32016-10-07 20:49:52 +01004900 dev->driver->enable_vblank = i965_enable_vblank;
4901 dev->driver->disable_vblank = i965_disable_vblank;
Chris Wilsonc2798b12012-04-22 21:13:57 +01004902 }
Ville Syrjälä778eb332015-01-09 14:21:13 +02004903 if (I915_HAS_HOTPLUG(dev_priv))
4904 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
Jesse Barnesf71d4af2011-06-28 13:00:41 -07004905 }
4906}
Daniel Vetter20afbda2012-12-11 14:05:07 +01004907
Daniel Vetterfca52a52014-09-30 10:56:45 +02004908/**
Joonas Lahtinencefcff82017-04-28 10:58:39 +03004909 * intel_irq_fini - deinitializes IRQ support
4910 * @i915: i915 device instance
4911 *
4912 * This function deinitializes all the IRQ support.
4913 */
4914void intel_irq_fini(struct drm_i915_private *i915)
4915{
4916 int i;
4917
4918 for (i = 0; i < MAX_L3_SLICES; ++i)
4919 kfree(i915->l3_parity.remap_info[i]);
4920}
4921
4922/**
Daniel Vetterfca52a52014-09-30 10:56:45 +02004923 * intel_irq_install - enables the hardware interrupt
4924 * @dev_priv: i915 device instance
4925 *
4926 * This function enables the hardware interrupt handling, but leaves the hotplug
4927 * handling still disabled. It is called after intel_irq_init().
4928 *
4929 * In the driver load and resume code we need working interrupts in a few places
4930 * but don't want to deal with the hassle of concurrent probe and hotplug
4931 * workers. Hence the split into this two-stage approach.
4932 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004933int intel_irq_install(struct drm_i915_private *dev_priv)
4934{
4935 /*
4936 * We enable some interrupt sources in our postinstall hooks, so mark
4937 * interrupts as enabled _before_ actually enabling them to avoid
4938 * special cases in our ordering checks.
4939 */
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01004940 dev_priv->runtime_pm.irqs_enabled = true;
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004941
Chris Wilson91c8a322016-07-05 10:40:23 +01004942 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004943}
4944
Daniel Vetterfca52a52014-09-30 10:56:45 +02004945/**
4946 * intel_irq_uninstall - finilizes all irq handling
4947 * @dev_priv: i915 device instance
4948 *
4949 * This stops interrupt and hotplug handling and unregisters and frees all
4950 * resources acquired in the init functions.
4951 */
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004952void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4953{
Chris Wilson91c8a322016-07-05 10:40:23 +01004954 drm_irq_uninstall(&dev_priv->drm);
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004955 intel_hpd_cancel_work(dev_priv);
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01004956 dev_priv->runtime_pm.irqs_enabled = false;
Daniel Vetter2aeb7d32014-09-30 10:56:43 +02004957}
4958
Daniel Vetterfca52a52014-09-30 10:56:45 +02004959/**
4960 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4961 * @dev_priv: i915 device instance
4962 *
4963 * This function is used to disable interrupts at runtime, both in the runtime
4964 * pm and the system suspend/resume code.
4965 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004966void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004967{
Chris Wilson91c8a322016-07-05 10:40:23 +01004968 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01004969 dev_priv->runtime_pm.irqs_enabled = false;
Chris Wilson91c8a322016-07-05 10:40:23 +01004970 synchronize_irq(dev_priv->drm.irq);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004971}
4972
Daniel Vetterfca52a52014-09-30 10:56:45 +02004973/**
4974 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4975 * @dev_priv: i915 device instance
4976 *
4977 * This function is used to enable interrupts at runtime, both in the runtime
4978 * pm and the system suspend/resume code.
4979 */
Daniel Vetterb9632912014-09-30 10:56:44 +02004980void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
Paulo Zanonic67a4702013-08-19 13:18:09 -03004981{
Sagar Arun Kamblead1443f2017-10-10 22:30:04 +01004982 dev_priv->runtime_pm.irqs_enabled = true;
Chris Wilson91c8a322016-07-05 10:40:23 +01004983 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4984 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
Paulo Zanonic67a4702013-08-19 13:18:09 -03004985}