Merge tag 'drm-intel-next-2015-10-10' of git://anongit.freedesktop.org/drm-intel...
[linux-drm-fsl-dcu.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41  * DOC: interrupt handling
42  *
43  * These functions provide the basic support for enabling and disabling the
44  * interrupt handling support. There's a lot more functionality in i915_irq.c
45  * and related files, but that will be described in separate chapters.
46  */
47
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49         [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53         [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57         [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61         [HPD_CRT] = SDE_CRT_HOTPLUG,
62         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77         [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81         [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113         [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114         [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115         [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120         I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121         POSTING_READ(GEN8_##type##_IMR(which)); \
122         I915_WRITE(GEN8_##type##_IER(which), 0); \
123         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124         POSTING_READ(GEN8_##type##_IIR(which)); \
125         I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126         POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128
129 #define GEN5_IRQ_RESET(type) do { \
130         I915_WRITE(type##IMR, 0xffffffff); \
131         POSTING_READ(type##IMR); \
132         I915_WRITE(type##IER, 0); \
133         I915_WRITE(type##IIR, 0xffffffff); \
134         POSTING_READ(type##IIR); \
135         I915_WRITE(type##IIR, 0xffffffff); \
136         POSTING_READ(type##IIR); \
137 } while (0)
138
139 /*
140  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141  */
142 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
143         u32 val = I915_READ(reg); \
144         if (val) { \
145                 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
146                      (reg), val); \
147                 I915_WRITE((reg), 0xffffffff); \
148                 POSTING_READ(reg); \
149                 I915_WRITE((reg), 0xffffffff); \
150                 POSTING_READ(reg); \
151         } \
152 } while (0)
153
154 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
155         GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
156         I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
157         I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
158         POSTING_READ(GEN8_##type##_IMR(which)); \
159 } while (0)
160
161 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
162         GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
163         I915_WRITE(type##IER, (ier_val)); \
164         I915_WRITE(type##IMR, (imr_val)); \
165         POSTING_READ(type##IMR); \
166 } while (0)
167
168 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
169
170 /* For display hotplug interrupt */
171 static inline void
172 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
173                                      uint32_t mask,
174                                      uint32_t bits)
175 {
176         uint32_t val;
177
178         assert_spin_locked(&dev_priv->irq_lock);
179         WARN_ON(bits & ~mask);
180
181         val = I915_READ(PORT_HOTPLUG_EN);
182         val &= ~mask;
183         val |= bits;
184         I915_WRITE(PORT_HOTPLUG_EN, val);
185 }
186
187 /**
188  * i915_hotplug_interrupt_update - update hotplug interrupt enable
189  * @dev_priv: driver private
190  * @mask: bits to update
191  * @bits: bits to enable
192  * NOTE: the HPD enable bits are modified both inside and outside
193  * of an interrupt context. To avoid that read-modify-write cycles
194  * interfer, these bits are protected by a spinlock. Since this
195  * function is usually not called from a context where the lock is
196  * held already, this function acquires the lock itself. A non-locking
197  * version is also available.
198  */
199 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
200                                    uint32_t mask,
201                                    uint32_t bits)
202 {
203         spin_lock_irq(&dev_priv->irq_lock);
204         i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
205         spin_unlock_irq(&dev_priv->irq_lock);
206 }
207
208 /**
209  * ilk_update_display_irq - update DEIMR
210  * @dev_priv: driver private
211  * @interrupt_mask: mask of interrupt bits to update
212  * @enabled_irq_mask: mask of interrupt bits to enable
213  */
214 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
215                                    uint32_t interrupt_mask,
216                                    uint32_t enabled_irq_mask)
217 {
218         uint32_t new_val;
219
220         assert_spin_locked(&dev_priv->irq_lock);
221
222         WARN_ON(enabled_irq_mask & ~interrupt_mask);
223
224         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
225                 return;
226
227         new_val = dev_priv->irq_mask;
228         new_val &= ~interrupt_mask;
229         new_val |= (~enabled_irq_mask & interrupt_mask);
230
231         if (new_val != dev_priv->irq_mask) {
232                 dev_priv->irq_mask = new_val;
233                 I915_WRITE(DEIMR, dev_priv->irq_mask);
234                 POSTING_READ(DEIMR);
235         }
236 }
237
238 void
239 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
240 {
241         ilk_update_display_irq(dev_priv, mask, mask);
242 }
243
244 void
245 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
246 {
247         ilk_update_display_irq(dev_priv, mask, 0);
248 }
249
250 /**
251  * ilk_update_gt_irq - update GTIMR
252  * @dev_priv: driver private
253  * @interrupt_mask: mask of interrupt bits to update
254  * @enabled_irq_mask: mask of interrupt bits to enable
255  */
256 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
257                               uint32_t interrupt_mask,
258                               uint32_t enabled_irq_mask)
259 {
260         assert_spin_locked(&dev_priv->irq_lock);
261
262         WARN_ON(enabled_irq_mask & ~interrupt_mask);
263
264         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
265                 return;
266
267         dev_priv->gt_irq_mask &= ~interrupt_mask;
268         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
269         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
270         POSTING_READ(GTIMR);
271 }
272
273 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
274 {
275         ilk_update_gt_irq(dev_priv, mask, mask);
276 }
277
278 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
279 {
280         ilk_update_gt_irq(dev_priv, mask, 0);
281 }
282
283 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
284 {
285         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
286 }
287
288 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
289 {
290         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
291 }
292
293 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
294 {
295         return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
296 }
297
298 /**
299   * snb_update_pm_irq - update GEN6_PMIMR
300   * @dev_priv: driver private
301   * @interrupt_mask: mask of interrupt bits to update
302   * @enabled_irq_mask: mask of interrupt bits to enable
303   */
304 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
305                               uint32_t interrupt_mask,
306                               uint32_t enabled_irq_mask)
307 {
308         uint32_t new_val;
309
310         WARN_ON(enabled_irq_mask & ~interrupt_mask);
311
312         assert_spin_locked(&dev_priv->irq_lock);
313
314         new_val = dev_priv->pm_irq_mask;
315         new_val &= ~interrupt_mask;
316         new_val |= (~enabled_irq_mask & interrupt_mask);
317
318         if (new_val != dev_priv->pm_irq_mask) {
319                 dev_priv->pm_irq_mask = new_val;
320                 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
321                 POSTING_READ(gen6_pm_imr(dev_priv));
322         }
323 }
324
325 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
326 {
327         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
328                 return;
329
330         snb_update_pm_irq(dev_priv, mask, mask);
331 }
332
333 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
334                                   uint32_t mask)
335 {
336         snb_update_pm_irq(dev_priv, mask, 0);
337 }
338
339 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
340 {
341         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
342                 return;
343
344         __gen6_disable_pm_irq(dev_priv, mask);
345 }
346
347 void gen6_reset_rps_interrupts(struct drm_device *dev)
348 {
349         struct drm_i915_private *dev_priv = dev->dev_private;
350         uint32_t reg = gen6_pm_iir(dev_priv);
351
352         spin_lock_irq(&dev_priv->irq_lock);
353         I915_WRITE(reg, dev_priv->pm_rps_events);
354         I915_WRITE(reg, dev_priv->pm_rps_events);
355         POSTING_READ(reg);
356         dev_priv->rps.pm_iir = 0;
357         spin_unlock_irq(&dev_priv->irq_lock);
358 }
359
360 void gen6_enable_rps_interrupts(struct drm_device *dev)
361 {
362         struct drm_i915_private *dev_priv = dev->dev_private;
363
364         spin_lock_irq(&dev_priv->irq_lock);
365
366         WARN_ON(dev_priv->rps.pm_iir);
367         WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
368         dev_priv->rps.interrupts_enabled = true;
369         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
370                                 dev_priv->pm_rps_events);
371         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
372
373         spin_unlock_irq(&dev_priv->irq_lock);
374 }
375
376 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
377 {
378         /*
379          * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
380          * if GEN6_PM_UP_EI_EXPIRED is masked.
381          *
382          * TODO: verify if this can be reproduced on VLV,CHV.
383          */
384         if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
385                 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
386
387         if (INTEL_INFO(dev_priv)->gen >= 8)
388                 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
389
390         return mask;
391 }
392
393 void gen6_disable_rps_interrupts(struct drm_device *dev)
394 {
395         struct drm_i915_private *dev_priv = dev->dev_private;
396
397         spin_lock_irq(&dev_priv->irq_lock);
398         dev_priv->rps.interrupts_enabled = false;
399         spin_unlock_irq(&dev_priv->irq_lock);
400
401         cancel_work_sync(&dev_priv->rps.work);
402
403         spin_lock_irq(&dev_priv->irq_lock);
404
405         I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
406
407         __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
408         I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
409                                 ~dev_priv->pm_rps_events);
410
411         spin_unlock_irq(&dev_priv->irq_lock);
412
413         synchronize_irq(dev->irq);
414 }
415
416 /**
417   * bdw_update_port_irq - update DE port interrupt
418   * @dev_priv: driver private
419   * @interrupt_mask: mask of interrupt bits to update
420   * @enabled_irq_mask: mask of interrupt bits to enable
421   */
422 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
423                                 uint32_t interrupt_mask,
424                                 uint32_t enabled_irq_mask)
425 {
426         uint32_t new_val;
427         uint32_t old_val;
428
429         assert_spin_locked(&dev_priv->irq_lock);
430
431         WARN_ON(enabled_irq_mask & ~interrupt_mask);
432
433         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
434                 return;
435
436         old_val = I915_READ(GEN8_DE_PORT_IMR);
437
438         new_val = old_val;
439         new_val &= ~interrupt_mask;
440         new_val |= (~enabled_irq_mask & interrupt_mask);
441
442         if (new_val != old_val) {
443                 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
444                 POSTING_READ(GEN8_DE_PORT_IMR);
445         }
446 }
447
448 /**
449  * ibx_display_interrupt_update - update SDEIMR
450  * @dev_priv: driver private
451  * @interrupt_mask: mask of interrupt bits to update
452  * @enabled_irq_mask: mask of interrupt bits to enable
453  */
454 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
455                                   uint32_t interrupt_mask,
456                                   uint32_t enabled_irq_mask)
457 {
458         uint32_t sdeimr = I915_READ(SDEIMR);
459         sdeimr &= ~interrupt_mask;
460         sdeimr |= (~enabled_irq_mask & interrupt_mask);
461
462         WARN_ON(enabled_irq_mask & ~interrupt_mask);
463
464         assert_spin_locked(&dev_priv->irq_lock);
465
466         if (WARN_ON(!intel_irqs_enabled(dev_priv)))
467                 return;
468
469         I915_WRITE(SDEIMR, sdeimr);
470         POSTING_READ(SDEIMR);
471 }
472
473 static void
474 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
475                        u32 enable_mask, u32 status_mask)
476 {
477         u32 reg = PIPESTAT(pipe);
478         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
479
480         assert_spin_locked(&dev_priv->irq_lock);
481         WARN_ON(!intel_irqs_enabled(dev_priv));
482
483         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
484                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
485                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
486                       pipe_name(pipe), enable_mask, status_mask))
487                 return;
488
489         if ((pipestat & enable_mask) == enable_mask)
490                 return;
491
492         dev_priv->pipestat_irq_mask[pipe] |= status_mask;
493
494         /* Enable the interrupt, clear any pending status */
495         pipestat |= enable_mask | status_mask;
496         I915_WRITE(reg, pipestat);
497         POSTING_READ(reg);
498 }
499
500 static void
501 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
502                         u32 enable_mask, u32 status_mask)
503 {
504         u32 reg = PIPESTAT(pipe);
505         u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
506
507         assert_spin_locked(&dev_priv->irq_lock);
508         WARN_ON(!intel_irqs_enabled(dev_priv));
509
510         if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
511                       status_mask & ~PIPESTAT_INT_STATUS_MASK,
512                       "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
513                       pipe_name(pipe), enable_mask, status_mask))
514                 return;
515
516         if ((pipestat & enable_mask) == 0)
517                 return;
518
519         dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
520
521         pipestat &= ~enable_mask;
522         I915_WRITE(reg, pipestat);
523         POSTING_READ(reg);
524 }
525
526 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
527 {
528         u32 enable_mask = status_mask << 16;
529
530         /*
531          * On pipe A we don't support the PSR interrupt yet,
532          * on pipe B and C the same bit MBZ.
533          */
534         if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
535                 return 0;
536         /*
537          * On pipe B and C we don't support the PSR interrupt yet, on pipe
538          * A the same bit is for perf counters which we don't use either.
539          */
540         if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
541                 return 0;
542
543         enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
544                          SPRITE0_FLIP_DONE_INT_EN_VLV |
545                          SPRITE1_FLIP_DONE_INT_EN_VLV);
546         if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
547                 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
548         if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
549                 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
550
551         return enable_mask;
552 }
553
554 void
555 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
556                      u32 status_mask)
557 {
558         u32 enable_mask;
559
560         if (IS_VALLEYVIEW(dev_priv->dev))
561                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
562                                                            status_mask);
563         else
564                 enable_mask = status_mask << 16;
565         __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
566 }
567
568 void
569 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
570                       u32 status_mask)
571 {
572         u32 enable_mask;
573
574         if (IS_VALLEYVIEW(dev_priv->dev))
575                 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
576                                                            status_mask);
577         else
578                 enable_mask = status_mask << 16;
579         __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
580 }
581
582 /**
583  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
584  * @dev: drm device
585  */
586 static void i915_enable_asle_pipestat(struct drm_device *dev)
587 {
588         struct drm_i915_private *dev_priv = dev->dev_private;
589
590         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
591                 return;
592
593         spin_lock_irq(&dev_priv->irq_lock);
594
595         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
596         if (INTEL_INFO(dev)->gen >= 4)
597                 i915_enable_pipestat(dev_priv, PIPE_A,
598                                      PIPE_LEGACY_BLC_EVENT_STATUS);
599
600         spin_unlock_irq(&dev_priv->irq_lock);
601 }
602
603 /*
604  * This timing diagram depicts the video signal in and
605  * around the vertical blanking period.
606  *
607  * Assumptions about the fictitious mode used in this example:
608  *  vblank_start >= 3
609  *  vsync_start = vblank_start + 1
610  *  vsync_end = vblank_start + 2
611  *  vtotal = vblank_start + 3
612  *
613  *           start of vblank:
614  *           latch double buffered registers
615  *           increment frame counter (ctg+)
616  *           generate start of vblank interrupt (gen4+)
617  *           |
618  *           |          frame start:
619  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
620  *           |          may be shifted forward 1-3 extra lines via PIPECONF
621  *           |          |
622  *           |          |  start of vsync:
623  *           |          |  generate vsync interrupt
624  *           |          |  |
625  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
626  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
627  * ----va---> <-----------------vb--------------------> <--------va-------------
628  *       |          |       <----vs----->                     |
629  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
630  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
631  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
632  *       |          |                                         |
633  *       last visible pixel                                   first visible pixel
634  *                  |                                         increment frame counter (gen3/4)
635  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
636  *
637  * x  = horizontal active
638  * _  = horizontal blanking
639  * hs = horizontal sync
640  * va = vertical active
641  * vb = vertical blanking
642  * vs = vertical sync
643  * vbs = vblank_start (number)
644  *
645  * Summary:
646  * - most events happen at the start of horizontal sync
647  * - frame start happens at the start of horizontal blank, 1-4 lines
648  *   (depending on PIPECONF settings) after the start of vblank
649  * - gen3/4 pixel and frame counter are synchronized with the start
650  *   of horizontal active on the first line of vertical active
651  */
652
653 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
654 {
655         /* Gen2 doesn't have a hardware frame counter */
656         return 0;
657 }
658
659 /* Called from drm generic code, passed a 'crtc', which
660  * we use as a pipe index
661  */
662 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
663 {
664         struct drm_i915_private *dev_priv = dev->dev_private;
665         unsigned long high_frame;
666         unsigned long low_frame;
667         u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
668         struct intel_crtc *intel_crtc =
669                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
670         const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
671
672         htotal = mode->crtc_htotal;
673         hsync_start = mode->crtc_hsync_start;
674         vbl_start = mode->crtc_vblank_start;
675         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
676                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
677
678         /* Convert to pixel count */
679         vbl_start *= htotal;
680
681         /* Start of vblank event occurs at start of hsync */
682         vbl_start -= htotal - hsync_start;
683
684         high_frame = PIPEFRAME(pipe);
685         low_frame = PIPEFRAMEPIXEL(pipe);
686
687         /*
688          * High & low register fields aren't synchronized, so make sure
689          * we get a low value that's stable across two reads of the high
690          * register.
691          */
692         do {
693                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
694                 low   = I915_READ(low_frame);
695                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
696         } while (high1 != high2);
697
698         high1 >>= PIPE_FRAME_HIGH_SHIFT;
699         pixel = low & PIPE_PIXEL_MASK;
700         low >>= PIPE_FRAME_LOW_SHIFT;
701
702         /*
703          * The frame counter increments at beginning of active.
704          * Cook up a vblank counter by also checking the pixel
705          * counter against vblank start.
706          */
707         return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
708 }
709
710 static u32 gm45_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
711 {
712         struct drm_i915_private *dev_priv = dev->dev_private;
713         int reg = PIPE_FRMCOUNT_GM45(pipe);
714
715         return I915_READ(reg);
716 }
717
718 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
719 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
720
721 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
722 {
723         struct drm_device *dev = crtc->base.dev;
724         struct drm_i915_private *dev_priv = dev->dev_private;
725         const struct drm_display_mode *mode = &crtc->base.hwmode;
726         enum pipe pipe = crtc->pipe;
727         int position, vtotal;
728
729         vtotal = mode->crtc_vtotal;
730         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
731                 vtotal /= 2;
732
733         if (IS_GEN2(dev))
734                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
735         else
736                 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
737
738         /*
739          * On HSW, the DSL reg (0x70000) appears to return 0 if we
740          * read it just before the start of vblank.  So try it again
741          * so we don't accidentally end up spanning a vblank frame
742          * increment, causing the pipe_update_end() code to squak at us.
743          *
744          * The nature of this problem means we can't simply check the ISR
745          * bit and return the vblank start value; nor can we use the scanline
746          * debug register in the transcoder as it appears to have the same
747          * problem.  We may need to extend this to include other platforms,
748          * but so far testing only shows the problem on HSW.
749          */
750         if (IS_HASWELL(dev) && !position) {
751                 int i, temp;
752
753                 for (i = 0; i < 100; i++) {
754                         udelay(1);
755                         temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
756                                 DSL_LINEMASK_GEN3;
757                         if (temp != position) {
758                                 position = temp;
759                                 break;
760                         }
761                 }
762         }
763
764         /*
765          * See update_scanline_offset() for the details on the
766          * scanline_offset adjustment.
767          */
768         return (position + crtc->scanline_offset) % vtotal;
769 }
770
771 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
772                                     unsigned int flags, int *vpos, int *hpos,
773                                     ktime_t *stime, ktime_t *etime,
774                                     const struct drm_display_mode *mode)
775 {
776         struct drm_i915_private *dev_priv = dev->dev_private;
777         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
778         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
779         int position;
780         int vbl_start, vbl_end, hsync_start, htotal, vtotal;
781         bool in_vbl = true;
782         int ret = 0;
783         unsigned long irqflags;
784
785         if (WARN_ON(!mode->crtc_clock)) {
786                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
787                                  "pipe %c\n", pipe_name(pipe));
788                 return 0;
789         }
790
791         htotal = mode->crtc_htotal;
792         hsync_start = mode->crtc_hsync_start;
793         vtotal = mode->crtc_vtotal;
794         vbl_start = mode->crtc_vblank_start;
795         vbl_end = mode->crtc_vblank_end;
796
797         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
798                 vbl_start = DIV_ROUND_UP(vbl_start, 2);
799                 vbl_end /= 2;
800                 vtotal /= 2;
801         }
802
803         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
804
805         /*
806          * Lock uncore.lock, as we will do multiple timing critical raw
807          * register reads, potentially with preemption disabled, so the
808          * following code must not block on uncore.lock.
809          */
810         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
811
812         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
813
814         /* Get optional system timestamp before query. */
815         if (stime)
816                 *stime = ktime_get();
817
818         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
819                 /* No obvious pixelcount register. Only query vertical
820                  * scanout position from Display scan line register.
821                  */
822                 position = __intel_get_crtc_scanline(intel_crtc);
823         } else {
824                 /* Have access to pixelcount since start of frame.
825                  * We can split this into vertical and horizontal
826                  * scanout position.
827                  */
828                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
829
830                 /* convert to pixel counts */
831                 vbl_start *= htotal;
832                 vbl_end *= htotal;
833                 vtotal *= htotal;
834
835                 /*
836                  * In interlaced modes, the pixel counter counts all pixels,
837                  * so one field will have htotal more pixels. In order to avoid
838                  * the reported position from jumping backwards when the pixel
839                  * counter is beyond the length of the shorter field, just
840                  * clamp the position the length of the shorter field. This
841                  * matches how the scanline counter based position works since
842                  * the scanline counter doesn't count the two half lines.
843                  */
844                 if (position >= vtotal)
845                         position = vtotal - 1;
846
847                 /*
848                  * Start of vblank interrupt is triggered at start of hsync,
849                  * just prior to the first active line of vblank. However we
850                  * consider lines to start at the leading edge of horizontal
851                  * active. So, should we get here before we've crossed into
852                  * the horizontal active of the first line in vblank, we would
853                  * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
854                  * always add htotal-hsync_start to the current pixel position.
855                  */
856                 position = (position + htotal - hsync_start) % vtotal;
857         }
858
859         /* Get optional system timestamp after query. */
860         if (etime)
861                 *etime = ktime_get();
862
863         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
864
865         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
866
867         in_vbl = position >= vbl_start && position < vbl_end;
868
869         /*
870          * While in vblank, position will be negative
871          * counting up towards 0 at vbl_end. And outside
872          * vblank, position will be positive counting
873          * up since vbl_end.
874          */
875         if (position >= vbl_start)
876                 position -= vbl_end;
877         else
878                 position += vtotal - vbl_end;
879
880         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
881                 *vpos = position;
882                 *hpos = 0;
883         } else {
884                 *vpos = position / htotal;
885                 *hpos = position - (*vpos * htotal);
886         }
887
888         /* In vblank? */
889         if (in_vbl)
890                 ret |= DRM_SCANOUTPOS_IN_VBLANK;
891
892         return ret;
893 }
894
895 int intel_get_crtc_scanline(struct intel_crtc *crtc)
896 {
897         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
898         unsigned long irqflags;
899         int position;
900
901         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
902         position = __intel_get_crtc_scanline(crtc);
903         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
904
905         return position;
906 }
907
908 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
909                               int *max_error,
910                               struct timeval *vblank_time,
911                               unsigned flags)
912 {
913         struct drm_crtc *crtc;
914
915         if (pipe >= INTEL_INFO(dev)->num_pipes) {
916                 DRM_ERROR("Invalid crtc %u\n", pipe);
917                 return -EINVAL;
918         }
919
920         /* Get drm_crtc to timestamp: */
921         crtc = intel_get_crtc_for_pipe(dev, pipe);
922         if (crtc == NULL) {
923                 DRM_ERROR("Invalid crtc %u\n", pipe);
924                 return -EINVAL;
925         }
926
927         if (!crtc->hwmode.crtc_clock) {
928                 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
929                 return -EBUSY;
930         }
931
932         /* Helper routine in DRM core does all the work: */
933         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
934                                                      vblank_time, flags,
935                                                      &crtc->hwmode);
936 }
937
938 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
939 {
940         struct drm_i915_private *dev_priv = dev->dev_private;
941         u32 busy_up, busy_down, max_avg, min_avg;
942         u8 new_delay;
943
944         spin_lock(&mchdev_lock);
945
946         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
947
948         new_delay = dev_priv->ips.cur_delay;
949
950         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
951         busy_up = I915_READ(RCPREVBSYTUPAVG);
952         busy_down = I915_READ(RCPREVBSYTDNAVG);
953         max_avg = I915_READ(RCBMAXAVG);
954         min_avg = I915_READ(RCBMINAVG);
955
956         /* Handle RCS change request from hw */
957         if (busy_up > max_avg) {
958                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
959                         new_delay = dev_priv->ips.cur_delay - 1;
960                 if (new_delay < dev_priv->ips.max_delay)
961                         new_delay = dev_priv->ips.max_delay;
962         } else if (busy_down < min_avg) {
963                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
964                         new_delay = dev_priv->ips.cur_delay + 1;
965                 if (new_delay > dev_priv->ips.min_delay)
966                         new_delay = dev_priv->ips.min_delay;
967         }
968
969         if (ironlake_set_drps(dev, new_delay))
970                 dev_priv->ips.cur_delay = new_delay;
971
972         spin_unlock(&mchdev_lock);
973
974         return;
975 }
976
977 static void notify_ring(struct intel_engine_cs *ring)
978 {
979         if (!intel_ring_initialized(ring))
980                 return;
981
982         trace_i915_gem_request_notify(ring);
983
984         wake_up_all(&ring->irq_queue);
985 }
986
987 static void vlv_c0_read(struct drm_i915_private *dev_priv,
988                         struct intel_rps_ei *ei)
989 {
990         ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
991         ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
992         ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
993 }
994
995 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
996                          const struct intel_rps_ei *old,
997                          const struct intel_rps_ei *now,
998                          int threshold)
999 {
1000         u64 time, c0;
1001         unsigned int mul = 100;
1002
1003         if (old->cz_clock == 0)
1004                 return false;
1005
1006         if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1007                 mul <<= 8;
1008
1009         time = now->cz_clock - old->cz_clock;
1010         time *= threshold * dev_priv->czclk_freq;
1011
1012         /* Workload can be split between render + media, e.g. SwapBuffers
1013          * being blitted in X after being rendered in mesa. To account for
1014          * this we need to combine both engines into our activity counter.
1015          */
1016         c0 = now->render_c0 - old->render_c0;
1017         c0 += now->media_c0 - old->media_c0;
1018         c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1019
1020         return c0 >= time;
1021 }
1022
1023 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1024 {
1025         vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1026         dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1027 }
1028
1029 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1030 {
1031         struct intel_rps_ei now;
1032         u32 events = 0;
1033
1034         if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1035                 return 0;
1036
1037         vlv_c0_read(dev_priv, &now);
1038         if (now.cz_clock == 0)
1039                 return 0;
1040
1041         if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1042                 if (!vlv_c0_above(dev_priv,
1043                                   &dev_priv->rps.down_ei, &now,
1044                                   dev_priv->rps.down_threshold))
1045                         events |= GEN6_PM_RP_DOWN_THRESHOLD;
1046                 dev_priv->rps.down_ei = now;
1047         }
1048
1049         if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1050                 if (vlv_c0_above(dev_priv,
1051                                  &dev_priv->rps.up_ei, &now,
1052                                  dev_priv->rps.up_threshold))
1053                         events |= GEN6_PM_RP_UP_THRESHOLD;
1054                 dev_priv->rps.up_ei = now;
1055         }
1056
1057         return events;
1058 }
1059
1060 static bool any_waiters(struct drm_i915_private *dev_priv)
1061 {
1062         struct intel_engine_cs *ring;
1063         int i;
1064
1065         for_each_ring(ring, dev_priv, i)
1066                 if (ring->irq_refcount)
1067                         return true;
1068
1069         return false;
1070 }
1071
1072 static void gen6_pm_rps_work(struct work_struct *work)
1073 {
1074         struct drm_i915_private *dev_priv =
1075                 container_of(work, struct drm_i915_private, rps.work);
1076         bool client_boost;
1077         int new_delay, adj, min, max;
1078         u32 pm_iir;
1079
1080         spin_lock_irq(&dev_priv->irq_lock);
1081         /* Speed up work cancelation during disabling rps interrupts. */
1082         if (!dev_priv->rps.interrupts_enabled) {
1083                 spin_unlock_irq(&dev_priv->irq_lock);
1084                 return;
1085         }
1086         pm_iir = dev_priv->rps.pm_iir;
1087         dev_priv->rps.pm_iir = 0;
1088         /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1089         gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1090         client_boost = dev_priv->rps.client_boost;
1091         dev_priv->rps.client_boost = false;
1092         spin_unlock_irq(&dev_priv->irq_lock);
1093
1094         /* Make sure we didn't queue anything we're not going to process. */
1095         WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1096
1097         if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1098                 return;
1099
1100         mutex_lock(&dev_priv->rps.hw_lock);
1101
1102         pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1103
1104         adj = dev_priv->rps.last_adj;
1105         new_delay = dev_priv->rps.cur_freq;
1106         min = dev_priv->rps.min_freq_softlimit;
1107         max = dev_priv->rps.max_freq_softlimit;
1108
1109         if (client_boost) {
1110                 new_delay = dev_priv->rps.max_freq_softlimit;
1111                 adj = 0;
1112         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1113                 if (adj > 0)
1114                         adj *= 2;
1115                 else /* CHV needs even encode values */
1116                         adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1117                 /*
1118                  * For better performance, jump directly
1119                  * to RPe if we're below it.
1120                  */
1121                 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1122                         new_delay = dev_priv->rps.efficient_freq;
1123                         adj = 0;
1124                 }
1125         } else if (any_waiters(dev_priv)) {
1126                 adj = 0;
1127         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1128                 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1129                         new_delay = dev_priv->rps.efficient_freq;
1130                 else
1131                         new_delay = dev_priv->rps.min_freq_softlimit;
1132                 adj = 0;
1133         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1134                 if (adj < 0)
1135                         adj *= 2;
1136                 else /* CHV needs even encode values */
1137                         adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1138         } else { /* unknown event */
1139                 adj = 0;
1140         }
1141
1142         dev_priv->rps.last_adj = adj;
1143
1144         /* sysfs frequency interfaces may have snuck in while servicing the
1145          * interrupt
1146          */
1147         new_delay += adj;
1148         new_delay = clamp_t(int, new_delay, min, max);
1149
1150         intel_set_rps(dev_priv->dev, new_delay);
1151
1152         mutex_unlock(&dev_priv->rps.hw_lock);
1153 }
1154
1155
1156 /**
1157  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1158  * occurred.
1159  * @work: workqueue struct
1160  *
1161  * Doesn't actually do anything except notify userspace. As a consequence of
1162  * this event, userspace should try to remap the bad rows since statistically
1163  * it is likely the same row is more likely to go bad again.
1164  */
1165 static void ivybridge_parity_work(struct work_struct *work)
1166 {
1167         struct drm_i915_private *dev_priv =
1168                 container_of(work, struct drm_i915_private, l3_parity.error_work);
1169         u32 error_status, row, bank, subbank;
1170         char *parity_event[6];
1171         uint32_t misccpctl;
1172         uint8_t slice = 0;
1173
1174         /* We must turn off DOP level clock gating to access the L3 registers.
1175          * In order to prevent a get/put style interface, acquire struct mutex
1176          * any time we access those registers.
1177          */
1178         mutex_lock(&dev_priv->dev->struct_mutex);
1179
1180         /* If we've screwed up tracking, just let the interrupt fire again */
1181         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1182                 goto out;
1183
1184         misccpctl = I915_READ(GEN7_MISCCPCTL);
1185         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1186         POSTING_READ(GEN7_MISCCPCTL);
1187
1188         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1189                 u32 reg;
1190
1191                 slice--;
1192                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1193                         break;
1194
1195                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1196
1197                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1198
1199                 error_status = I915_READ(reg);
1200                 row = GEN7_PARITY_ERROR_ROW(error_status);
1201                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1202                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1203
1204                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1205                 POSTING_READ(reg);
1206
1207                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1208                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1209                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1210                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1211                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1212                 parity_event[5] = NULL;
1213
1214                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1215                                    KOBJ_CHANGE, parity_event);
1216
1217                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1218                           slice, row, bank, subbank);
1219
1220                 kfree(parity_event[4]);
1221                 kfree(parity_event[3]);
1222                 kfree(parity_event[2]);
1223                 kfree(parity_event[1]);
1224         }
1225
1226         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1227
1228 out:
1229         WARN_ON(dev_priv->l3_parity.which_slice);
1230         spin_lock_irq(&dev_priv->irq_lock);
1231         gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1232         spin_unlock_irq(&dev_priv->irq_lock);
1233
1234         mutex_unlock(&dev_priv->dev->struct_mutex);
1235 }
1236
1237 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1238 {
1239         struct drm_i915_private *dev_priv = dev->dev_private;
1240
1241         if (!HAS_L3_DPF(dev))
1242                 return;
1243
1244         spin_lock(&dev_priv->irq_lock);
1245         gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1246         spin_unlock(&dev_priv->irq_lock);
1247
1248         iir &= GT_PARITY_ERROR(dev);
1249         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1250                 dev_priv->l3_parity.which_slice |= 1 << 1;
1251
1252         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1253                 dev_priv->l3_parity.which_slice |= 1 << 0;
1254
1255         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1256 }
1257
1258 static void ilk_gt_irq_handler(struct drm_device *dev,
1259                                struct drm_i915_private *dev_priv,
1260                                u32 gt_iir)
1261 {
1262         if (gt_iir &
1263             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1264                 notify_ring(&dev_priv->ring[RCS]);
1265         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1266                 notify_ring(&dev_priv->ring[VCS]);
1267 }
1268
1269 static void snb_gt_irq_handler(struct drm_device *dev,
1270                                struct drm_i915_private *dev_priv,
1271                                u32 gt_iir)
1272 {
1273
1274         if (gt_iir &
1275             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1276                 notify_ring(&dev_priv->ring[RCS]);
1277         if (gt_iir & GT_BSD_USER_INTERRUPT)
1278                 notify_ring(&dev_priv->ring[VCS]);
1279         if (gt_iir & GT_BLT_USER_INTERRUPT)
1280                 notify_ring(&dev_priv->ring[BCS]);
1281
1282         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1283                       GT_BSD_CS_ERROR_INTERRUPT |
1284                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1285                 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1286
1287         if (gt_iir & GT_PARITY_ERROR(dev))
1288                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1289 }
1290
1291 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1292                                        u32 master_ctl)
1293 {
1294         irqreturn_t ret = IRQ_NONE;
1295
1296         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1297                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1298                 if (tmp) {
1299                         I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1300                         ret = IRQ_HANDLED;
1301
1302                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1303                                 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1304                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1305                                 notify_ring(&dev_priv->ring[RCS]);
1306
1307                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1308                                 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1309                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1310                                 notify_ring(&dev_priv->ring[BCS]);
1311                 } else
1312                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
1313         }
1314
1315         if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1316                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1317                 if (tmp) {
1318                         I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1319                         ret = IRQ_HANDLED;
1320
1321                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1322                                 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1323                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1324                                 notify_ring(&dev_priv->ring[VCS]);
1325
1326                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1327                                 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1328                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1329                                 notify_ring(&dev_priv->ring[VCS2]);
1330                 } else
1331                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
1332         }
1333
1334         if (master_ctl & GEN8_GT_VECS_IRQ) {
1335                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1336                 if (tmp) {
1337                         I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1338                         ret = IRQ_HANDLED;
1339
1340                         if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1341                                 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1342                         if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1343                                 notify_ring(&dev_priv->ring[VECS]);
1344                 } else
1345                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
1346         }
1347
1348         if (master_ctl & GEN8_GT_PM_IRQ) {
1349                 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1350                 if (tmp & dev_priv->pm_rps_events) {
1351                         I915_WRITE_FW(GEN8_GT_IIR(2),
1352                                       tmp & dev_priv->pm_rps_events);
1353                         ret = IRQ_HANDLED;
1354                         gen6_rps_irq_handler(dev_priv, tmp);
1355                 } else
1356                         DRM_ERROR("The master control interrupt lied (PM)!\n");
1357         }
1358
1359         return ret;
1360 }
1361
1362 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1363 {
1364         switch (port) {
1365         case PORT_A:
1366                 return val & PORTA_HOTPLUG_LONG_DETECT;
1367         case PORT_B:
1368                 return val & PORTB_HOTPLUG_LONG_DETECT;
1369         case PORT_C:
1370                 return val & PORTC_HOTPLUG_LONG_DETECT;
1371         default:
1372                 return false;
1373         }
1374 }
1375
1376 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1377 {
1378         switch (port) {
1379         case PORT_E:
1380                 return val & PORTE_HOTPLUG_LONG_DETECT;
1381         default:
1382                 return false;
1383         }
1384 }
1385
1386 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1387 {
1388         switch (port) {
1389         case PORT_A:
1390                 return val & PORTA_HOTPLUG_LONG_DETECT;
1391         case PORT_B:
1392                 return val & PORTB_HOTPLUG_LONG_DETECT;
1393         case PORT_C:
1394                 return val & PORTC_HOTPLUG_LONG_DETECT;
1395         case PORT_D:
1396                 return val & PORTD_HOTPLUG_LONG_DETECT;
1397         default:
1398                 return false;
1399         }
1400 }
1401
1402 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1403 {
1404         switch (port) {
1405         case PORT_A:
1406                 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1407         default:
1408                 return false;
1409         }
1410 }
1411
1412 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1413 {
1414         switch (port) {
1415         case PORT_B:
1416                 return val & PORTB_HOTPLUG_LONG_DETECT;
1417         case PORT_C:
1418                 return val & PORTC_HOTPLUG_LONG_DETECT;
1419         case PORT_D:
1420                 return val & PORTD_HOTPLUG_LONG_DETECT;
1421         default:
1422                 return false;
1423         }
1424 }
1425
1426 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1427 {
1428         switch (port) {
1429         case PORT_B:
1430                 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1431         case PORT_C:
1432                 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1433         case PORT_D:
1434                 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1435         default:
1436                 return false;
1437         }
1438 }
1439
1440 /*
1441  * Get a bit mask of pins that have triggered, and which ones may be long.
1442  * This can be called multiple times with the same masks to accumulate
1443  * hotplug detection results from several registers.
1444  *
1445  * Note that the caller is expected to zero out the masks initially.
1446  */
1447 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1448                              u32 hotplug_trigger, u32 dig_hotplug_reg,
1449                              const u32 hpd[HPD_NUM_PINS],
1450                              bool long_pulse_detect(enum port port, u32 val))
1451 {
1452         enum port port;
1453         int i;
1454
1455         for_each_hpd_pin(i) {
1456                 if ((hpd[i] & hotplug_trigger) == 0)
1457                         continue;
1458
1459                 *pin_mask |= BIT(i);
1460
1461                 if (!intel_hpd_pin_to_port(i, &port))
1462                         continue;
1463
1464                 if (long_pulse_detect(port, dig_hotplug_reg))
1465                         *long_mask |= BIT(i);
1466         }
1467
1468         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1469                          hotplug_trigger, dig_hotplug_reg, *pin_mask);
1470
1471 }
1472
1473 static void gmbus_irq_handler(struct drm_device *dev)
1474 {
1475         struct drm_i915_private *dev_priv = dev->dev_private;
1476
1477         wake_up_all(&dev_priv->gmbus_wait_queue);
1478 }
1479
1480 static void dp_aux_irq_handler(struct drm_device *dev)
1481 {
1482         struct drm_i915_private *dev_priv = dev->dev_private;
1483
1484         wake_up_all(&dev_priv->gmbus_wait_queue);
1485 }
1486
1487 #if defined(CONFIG_DEBUG_FS)
1488 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1489                                          uint32_t crc0, uint32_t crc1,
1490                                          uint32_t crc2, uint32_t crc3,
1491                                          uint32_t crc4)
1492 {
1493         struct drm_i915_private *dev_priv = dev->dev_private;
1494         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1495         struct intel_pipe_crc_entry *entry;
1496         int head, tail;
1497
1498         spin_lock(&pipe_crc->lock);
1499
1500         if (!pipe_crc->entries) {
1501                 spin_unlock(&pipe_crc->lock);
1502                 DRM_DEBUG_KMS("spurious interrupt\n");
1503                 return;
1504         }
1505
1506         head = pipe_crc->head;
1507         tail = pipe_crc->tail;
1508
1509         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1510                 spin_unlock(&pipe_crc->lock);
1511                 DRM_ERROR("CRC buffer overflowing\n");
1512                 return;
1513         }
1514
1515         entry = &pipe_crc->entries[head];
1516
1517         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1518         entry->crc[0] = crc0;
1519         entry->crc[1] = crc1;
1520         entry->crc[2] = crc2;
1521         entry->crc[3] = crc3;
1522         entry->crc[4] = crc4;
1523
1524         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1525         pipe_crc->head = head;
1526
1527         spin_unlock(&pipe_crc->lock);
1528
1529         wake_up_interruptible(&pipe_crc->wq);
1530 }
1531 #else
1532 static inline void
1533 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1534                              uint32_t crc0, uint32_t crc1,
1535                              uint32_t crc2, uint32_t crc3,
1536                              uint32_t crc4) {}
1537 #endif
1538
1539
1540 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1541 {
1542         struct drm_i915_private *dev_priv = dev->dev_private;
1543
1544         display_pipe_crc_irq_handler(dev, pipe,
1545                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1546                                      0, 0, 0, 0);
1547 }
1548
1549 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1550 {
1551         struct drm_i915_private *dev_priv = dev->dev_private;
1552
1553         display_pipe_crc_irq_handler(dev, pipe,
1554                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1555                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1556                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1557                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1558                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1559 }
1560
1561 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1562 {
1563         struct drm_i915_private *dev_priv = dev->dev_private;
1564         uint32_t res1, res2;
1565
1566         if (INTEL_INFO(dev)->gen >= 3)
1567                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1568         else
1569                 res1 = 0;
1570
1571         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1572                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1573         else
1574                 res2 = 0;
1575
1576         display_pipe_crc_irq_handler(dev, pipe,
1577                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1578                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1579                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1580                                      res1, res2);
1581 }
1582
1583 /* The RPS events need forcewake, so we add them to a work queue and mask their
1584  * IMR bits until the work is done. Other interrupts can be processed without
1585  * the work queue. */
1586 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1587 {
1588         if (pm_iir & dev_priv->pm_rps_events) {
1589                 spin_lock(&dev_priv->irq_lock);
1590                 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1591                 if (dev_priv->rps.interrupts_enabled) {
1592                         dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1593                         queue_work(dev_priv->wq, &dev_priv->rps.work);
1594                 }
1595                 spin_unlock(&dev_priv->irq_lock);
1596         }
1597
1598         if (INTEL_INFO(dev_priv)->gen >= 8)
1599                 return;
1600
1601         if (HAS_VEBOX(dev_priv->dev)) {
1602                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1603                         notify_ring(&dev_priv->ring[VECS]);
1604
1605                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1606                         DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1607         }
1608 }
1609
1610 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1611 {
1612         if (!drm_handle_vblank(dev, pipe))
1613                 return false;
1614
1615         return true;
1616 }
1617
1618 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1619 {
1620         struct drm_i915_private *dev_priv = dev->dev_private;
1621         u32 pipe_stats[I915_MAX_PIPES] = { };
1622         int pipe;
1623
1624         spin_lock(&dev_priv->irq_lock);
1625         for_each_pipe(dev_priv, pipe) {
1626                 int reg;
1627                 u32 mask, iir_bit = 0;
1628
1629                 /*
1630                  * PIPESTAT bits get signalled even when the interrupt is
1631                  * disabled with the mask bits, and some of the status bits do
1632                  * not generate interrupts at all (like the underrun bit). Hence
1633                  * we need to be careful that we only handle what we want to
1634                  * handle.
1635                  */
1636
1637                 /* fifo underruns are filterered in the underrun handler. */
1638                 mask = PIPE_FIFO_UNDERRUN_STATUS;
1639
1640                 switch (pipe) {
1641                 case PIPE_A:
1642                         iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1643                         break;
1644                 case PIPE_B:
1645                         iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1646                         break;
1647                 case PIPE_C:
1648                         iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1649                         break;
1650                 }
1651                 if (iir & iir_bit)
1652                         mask |= dev_priv->pipestat_irq_mask[pipe];
1653
1654                 if (!mask)
1655                         continue;
1656
1657                 reg = PIPESTAT(pipe);
1658                 mask |= PIPESTAT_INT_ENABLE_MASK;
1659                 pipe_stats[pipe] = I915_READ(reg) & mask;
1660
1661                 /*
1662                  * Clear the PIPE*STAT regs before the IIR
1663                  */
1664                 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1665                                         PIPESTAT_INT_STATUS_MASK))
1666                         I915_WRITE(reg, pipe_stats[pipe]);
1667         }
1668         spin_unlock(&dev_priv->irq_lock);
1669
1670         for_each_pipe(dev_priv, pipe) {
1671                 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1672                     intel_pipe_handle_vblank(dev, pipe))
1673                         intel_check_page_flip(dev, pipe);
1674
1675                 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1676                         intel_prepare_page_flip(dev, pipe);
1677                         intel_finish_page_flip(dev, pipe);
1678                 }
1679
1680                 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1681                         i9xx_pipe_crc_irq_handler(dev, pipe);
1682
1683                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1684                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1685         }
1686
1687         if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1688                 gmbus_irq_handler(dev);
1689 }
1690
1691 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1692 {
1693         struct drm_i915_private *dev_priv = dev->dev_private;
1694         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1695         u32 pin_mask = 0, long_mask = 0;
1696
1697         if (!hotplug_status)
1698                 return;
1699
1700         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1701         /*
1702          * Make sure hotplug status is cleared before we clear IIR, or else we
1703          * may miss hotplug events.
1704          */
1705         POSTING_READ(PORT_HOTPLUG_STAT);
1706
1707         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1708                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1709
1710                 if (hotplug_trigger) {
1711                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1712                                            hotplug_trigger, hpd_status_g4x,
1713                                            i9xx_port_hotplug_long_detect);
1714
1715                         intel_hpd_irq_handler(dev, pin_mask, long_mask);
1716                 }
1717
1718                 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1719                         dp_aux_irq_handler(dev);
1720         } else {
1721                 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1722
1723                 if (hotplug_trigger) {
1724                         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1725                                            hotplug_trigger, hpd_status_i915,
1726                                            i9xx_port_hotplug_long_detect);
1727                         intel_hpd_irq_handler(dev, pin_mask, long_mask);
1728                 }
1729         }
1730 }
1731
1732 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1733 {
1734         struct drm_device *dev = arg;
1735         struct drm_i915_private *dev_priv = dev->dev_private;
1736         u32 iir, gt_iir, pm_iir;
1737         irqreturn_t ret = IRQ_NONE;
1738
1739         if (!intel_irqs_enabled(dev_priv))
1740                 return IRQ_NONE;
1741
1742         while (true) {
1743                 /* Find, clear, then process each source of interrupt */
1744
1745                 gt_iir = I915_READ(GTIIR);
1746                 if (gt_iir)
1747                         I915_WRITE(GTIIR, gt_iir);
1748
1749                 pm_iir = I915_READ(GEN6_PMIIR);
1750                 if (pm_iir)
1751                         I915_WRITE(GEN6_PMIIR, pm_iir);
1752
1753                 iir = I915_READ(VLV_IIR);
1754                 if (iir) {
1755                         /* Consume port before clearing IIR or we'll miss events */
1756                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1757                                 i9xx_hpd_irq_handler(dev);
1758                         I915_WRITE(VLV_IIR, iir);
1759                 }
1760
1761                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1762                         goto out;
1763
1764                 ret = IRQ_HANDLED;
1765
1766                 if (gt_iir)
1767                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1768                 if (pm_iir)
1769                         gen6_rps_irq_handler(dev_priv, pm_iir);
1770                 /* Call regardless, as some status bits might not be
1771                  * signalled in iir */
1772                 valleyview_pipestat_irq_handler(dev, iir);
1773         }
1774
1775 out:
1776         return ret;
1777 }
1778
1779 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1780 {
1781         struct drm_device *dev = arg;
1782         struct drm_i915_private *dev_priv = dev->dev_private;
1783         u32 master_ctl, iir;
1784         irqreturn_t ret = IRQ_NONE;
1785
1786         if (!intel_irqs_enabled(dev_priv))
1787                 return IRQ_NONE;
1788
1789         for (;;) {
1790                 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1791                 iir = I915_READ(VLV_IIR);
1792
1793                 if (master_ctl == 0 && iir == 0)
1794                         break;
1795
1796                 ret = IRQ_HANDLED;
1797
1798                 I915_WRITE(GEN8_MASTER_IRQ, 0);
1799
1800                 /* Find, clear, then process each source of interrupt */
1801
1802                 if (iir) {
1803                         /* Consume port before clearing IIR or we'll miss events */
1804                         if (iir & I915_DISPLAY_PORT_INTERRUPT)
1805                                 i9xx_hpd_irq_handler(dev);
1806                         I915_WRITE(VLV_IIR, iir);
1807                 }
1808
1809                 gen8_gt_irq_handler(dev_priv, master_ctl);
1810
1811                 /* Call regardless, as some status bits might not be
1812                  * signalled in iir */
1813                 valleyview_pipestat_irq_handler(dev, iir);
1814
1815                 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1816                 POSTING_READ(GEN8_MASTER_IRQ);
1817         }
1818
1819         return ret;
1820 }
1821
1822 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1823                                 const u32 hpd[HPD_NUM_PINS])
1824 {
1825         struct drm_i915_private *dev_priv = to_i915(dev);
1826         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1827
1828         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1829         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1830
1831         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1832                            dig_hotplug_reg, hpd,
1833                            pch_port_hotplug_long_detect);
1834
1835         intel_hpd_irq_handler(dev, pin_mask, long_mask);
1836 }
1837
1838 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1839 {
1840         struct drm_i915_private *dev_priv = dev->dev_private;
1841         int pipe;
1842         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1843
1844         if (hotplug_trigger)
1845                 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1846
1847         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1848                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1849                                SDE_AUDIO_POWER_SHIFT);
1850                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1851                                  port_name(port));
1852         }
1853
1854         if (pch_iir & SDE_AUX_MASK)
1855                 dp_aux_irq_handler(dev);
1856
1857         if (pch_iir & SDE_GMBUS)
1858                 gmbus_irq_handler(dev);
1859
1860         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1861                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1862
1863         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1864                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1865
1866         if (pch_iir & SDE_POISON)
1867                 DRM_ERROR("PCH poison interrupt\n");
1868
1869         if (pch_iir & SDE_FDI_MASK)
1870                 for_each_pipe(dev_priv, pipe)
1871                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1872                                          pipe_name(pipe),
1873                                          I915_READ(FDI_RX_IIR(pipe)));
1874
1875         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1876                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1877
1878         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1879                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1880
1881         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1882                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1883
1884         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1885                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1886 }
1887
1888 static void ivb_err_int_handler(struct drm_device *dev)
1889 {
1890         struct drm_i915_private *dev_priv = dev->dev_private;
1891         u32 err_int = I915_READ(GEN7_ERR_INT);
1892         enum pipe pipe;
1893
1894         if (err_int & ERR_INT_POISON)
1895                 DRM_ERROR("Poison interrupt\n");
1896
1897         for_each_pipe(dev_priv, pipe) {
1898                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1899                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1900
1901                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1902                         if (IS_IVYBRIDGE(dev))
1903                                 ivb_pipe_crc_irq_handler(dev, pipe);
1904                         else
1905                                 hsw_pipe_crc_irq_handler(dev, pipe);
1906                 }
1907         }
1908
1909         I915_WRITE(GEN7_ERR_INT, err_int);
1910 }
1911
1912 static void cpt_serr_int_handler(struct drm_device *dev)
1913 {
1914         struct drm_i915_private *dev_priv = dev->dev_private;
1915         u32 serr_int = I915_READ(SERR_INT);
1916
1917         if (serr_int & SERR_INT_POISON)
1918                 DRM_ERROR("PCH poison interrupt\n");
1919
1920         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1921                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1922
1923         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1924                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1925
1926         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1927                 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1928
1929         I915_WRITE(SERR_INT, serr_int);
1930 }
1931
1932 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1933 {
1934         struct drm_i915_private *dev_priv = dev->dev_private;
1935         int pipe;
1936         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1937
1938         if (hotplug_trigger)
1939                 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1940
1941         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1942                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1943                                SDE_AUDIO_POWER_SHIFT_CPT);
1944                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1945                                  port_name(port));
1946         }
1947
1948         if (pch_iir & SDE_AUX_MASK_CPT)
1949                 dp_aux_irq_handler(dev);
1950
1951         if (pch_iir & SDE_GMBUS_CPT)
1952                 gmbus_irq_handler(dev);
1953
1954         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1955                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1956
1957         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1958                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1959
1960         if (pch_iir & SDE_FDI_MASK_CPT)
1961                 for_each_pipe(dev_priv, pipe)
1962                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1963                                          pipe_name(pipe),
1964                                          I915_READ(FDI_RX_IIR(pipe)));
1965
1966         if (pch_iir & SDE_ERROR_CPT)
1967                 cpt_serr_int_handler(dev);
1968 }
1969
1970 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1971 {
1972         struct drm_i915_private *dev_priv = dev->dev_private;
1973         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1974                 ~SDE_PORTE_HOTPLUG_SPT;
1975         u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1976         u32 pin_mask = 0, long_mask = 0;
1977
1978         if (hotplug_trigger) {
1979                 u32 dig_hotplug_reg;
1980
1981                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1982                 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1983
1984                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1985                                    dig_hotplug_reg, hpd_spt,
1986                                    spt_port_hotplug_long_detect);
1987         }
1988
1989         if (hotplug2_trigger) {
1990                 u32 dig_hotplug_reg;
1991
1992                 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1993                 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1994
1995                 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1996                                    dig_hotplug_reg, hpd_spt,
1997                                    spt_port_hotplug2_long_detect);
1998         }
1999
2000         if (pin_mask)
2001                 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2002
2003         if (pch_iir & SDE_GMBUS_CPT)
2004                 gmbus_irq_handler(dev);
2005 }
2006
2007 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2008                                 const u32 hpd[HPD_NUM_PINS])
2009 {
2010         struct drm_i915_private *dev_priv = to_i915(dev);
2011         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2012
2013         dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2014         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2015
2016         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2017                            dig_hotplug_reg, hpd,
2018                            ilk_port_hotplug_long_detect);
2019
2020         intel_hpd_irq_handler(dev, pin_mask, long_mask);
2021 }
2022
2023 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2024 {
2025         struct drm_i915_private *dev_priv = dev->dev_private;
2026         enum pipe pipe;
2027         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2028
2029         if (hotplug_trigger)
2030                 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2031
2032         if (de_iir & DE_AUX_CHANNEL_A)
2033                 dp_aux_irq_handler(dev);
2034
2035         if (de_iir & DE_GSE)
2036                 intel_opregion_asle_intr(dev);
2037
2038         if (de_iir & DE_POISON)
2039                 DRM_ERROR("Poison interrupt\n");
2040
2041         for_each_pipe(dev_priv, pipe) {
2042                 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2043                     intel_pipe_handle_vblank(dev, pipe))
2044                         intel_check_page_flip(dev, pipe);
2045
2046                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2047                         intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2048
2049                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2050                         i9xx_pipe_crc_irq_handler(dev, pipe);
2051
2052                 /* plane/pipes map 1:1 on ilk+ */
2053                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2054                         intel_prepare_page_flip(dev, pipe);
2055                         intel_finish_page_flip_plane(dev, pipe);
2056                 }
2057         }
2058
2059         /* check event from PCH */
2060         if (de_iir & DE_PCH_EVENT) {
2061                 u32 pch_iir = I915_READ(SDEIIR);
2062
2063                 if (HAS_PCH_CPT(dev))
2064                         cpt_irq_handler(dev, pch_iir);
2065                 else
2066                         ibx_irq_handler(dev, pch_iir);
2067
2068                 /* should clear PCH hotplug event before clear CPU irq */
2069                 I915_WRITE(SDEIIR, pch_iir);
2070         }
2071
2072         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2073                 ironlake_rps_change_irq_handler(dev);
2074 }
2075
2076 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2077 {
2078         struct drm_i915_private *dev_priv = dev->dev_private;
2079         enum pipe pipe;
2080         u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2081
2082         if (hotplug_trigger)
2083                 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2084
2085         if (de_iir & DE_ERR_INT_IVB)
2086                 ivb_err_int_handler(dev);
2087
2088         if (de_iir & DE_AUX_CHANNEL_A_IVB)
2089                 dp_aux_irq_handler(dev);
2090
2091         if (de_iir & DE_GSE_IVB)
2092                 intel_opregion_asle_intr(dev);
2093
2094         for_each_pipe(dev_priv, pipe) {
2095                 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2096                     intel_pipe_handle_vblank(dev, pipe))
2097                         intel_check_page_flip(dev, pipe);
2098
2099                 /* plane/pipes map 1:1 on ilk+ */
2100                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2101                         intel_prepare_page_flip(dev, pipe);
2102                         intel_finish_page_flip_plane(dev, pipe);
2103                 }
2104         }
2105
2106         /* check event from PCH */
2107         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2108                 u32 pch_iir = I915_READ(SDEIIR);
2109
2110                 cpt_irq_handler(dev, pch_iir);
2111
2112                 /* clear PCH hotplug event before clear CPU irq */
2113                 I915_WRITE(SDEIIR, pch_iir);
2114         }
2115 }
2116
2117 /*
2118  * To handle irqs with the minimum potential races with fresh interrupts, we:
2119  * 1 - Disable Master Interrupt Control.
2120  * 2 - Find the source(s) of the interrupt.
2121  * 3 - Clear the Interrupt Identity bits (IIR).
2122  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2123  * 5 - Re-enable Master Interrupt Control.
2124  */
2125 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2126 {
2127         struct drm_device *dev = arg;
2128         struct drm_i915_private *dev_priv = dev->dev_private;
2129         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2130         irqreturn_t ret = IRQ_NONE;
2131
2132         if (!intel_irqs_enabled(dev_priv))
2133                 return IRQ_NONE;
2134
2135         /* We get interrupts on unclaimed registers, so check for this before we
2136          * do any I915_{READ,WRITE}. */
2137         intel_uncore_check_errors(dev);
2138
2139         /* disable master interrupt before clearing iir  */
2140         de_ier = I915_READ(DEIER);
2141         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2142         POSTING_READ(DEIER);
2143
2144         /* Disable south interrupts. We'll only write to SDEIIR once, so further
2145          * interrupts will will be stored on its back queue, and then we'll be
2146          * able to process them after we restore SDEIER (as soon as we restore
2147          * it, we'll get an interrupt if SDEIIR still has something to process
2148          * due to its back queue). */
2149         if (!HAS_PCH_NOP(dev)) {
2150                 sde_ier = I915_READ(SDEIER);
2151                 I915_WRITE(SDEIER, 0);
2152                 POSTING_READ(SDEIER);
2153         }
2154
2155         /* Find, clear, then process each source of interrupt */
2156
2157         gt_iir = I915_READ(GTIIR);
2158         if (gt_iir) {
2159                 I915_WRITE(GTIIR, gt_iir);
2160                 ret = IRQ_HANDLED;
2161                 if (INTEL_INFO(dev)->gen >= 6)
2162                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
2163                 else
2164                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2165         }
2166
2167         de_iir = I915_READ(DEIIR);
2168         if (de_iir) {
2169                 I915_WRITE(DEIIR, de_iir);
2170                 ret = IRQ_HANDLED;
2171                 if (INTEL_INFO(dev)->gen >= 7)
2172                         ivb_display_irq_handler(dev, de_iir);
2173                 else
2174                         ilk_display_irq_handler(dev, de_iir);
2175         }
2176
2177         if (INTEL_INFO(dev)->gen >= 6) {
2178                 u32 pm_iir = I915_READ(GEN6_PMIIR);
2179                 if (pm_iir) {
2180                         I915_WRITE(GEN6_PMIIR, pm_iir);
2181                         ret = IRQ_HANDLED;
2182                         gen6_rps_irq_handler(dev_priv, pm_iir);
2183                 }
2184         }
2185
2186         I915_WRITE(DEIER, de_ier);
2187         POSTING_READ(DEIER);
2188         if (!HAS_PCH_NOP(dev)) {
2189                 I915_WRITE(SDEIER, sde_ier);
2190                 POSTING_READ(SDEIER);
2191         }
2192
2193         return ret;
2194 }
2195
2196 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2197                                 const u32 hpd[HPD_NUM_PINS])
2198 {
2199         struct drm_i915_private *dev_priv = to_i915(dev);
2200         u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2201
2202         dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2203         I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2204
2205         intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2206                            dig_hotplug_reg, hpd,
2207                            bxt_port_hotplug_long_detect);
2208
2209         intel_hpd_irq_handler(dev, pin_mask, long_mask);
2210 }
2211
2212 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2213 {
2214         struct drm_device *dev = arg;
2215         struct drm_i915_private *dev_priv = dev->dev_private;
2216         u32 master_ctl;
2217         irqreturn_t ret = IRQ_NONE;
2218         uint32_t tmp = 0;
2219         enum pipe pipe;
2220         u32 aux_mask = GEN8_AUX_CHANNEL_A;
2221
2222         if (!intel_irqs_enabled(dev_priv))
2223                 return IRQ_NONE;
2224
2225         if (INTEL_INFO(dev_priv)->gen >= 9)
2226                 aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2227                         GEN9_AUX_CHANNEL_D;
2228
2229         master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2230         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2231         if (!master_ctl)
2232                 return IRQ_NONE;
2233
2234         I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2235
2236         /* Find, clear, then process each source of interrupt */
2237
2238         ret = gen8_gt_irq_handler(dev_priv, master_ctl);
2239
2240         if (master_ctl & GEN8_DE_MISC_IRQ) {
2241                 tmp = I915_READ(GEN8_DE_MISC_IIR);
2242                 if (tmp) {
2243                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2244                         ret = IRQ_HANDLED;
2245                         if (tmp & GEN8_DE_MISC_GSE)
2246                                 intel_opregion_asle_intr(dev);
2247                         else
2248                                 DRM_ERROR("Unexpected DE Misc interrupt\n");
2249                 }
2250                 else
2251                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2252         }
2253
2254         if (master_ctl & GEN8_DE_PORT_IRQ) {
2255                 tmp = I915_READ(GEN8_DE_PORT_IIR);
2256                 if (tmp) {
2257                         bool found = false;
2258                         u32 hotplug_trigger = 0;
2259
2260                         if (IS_BROXTON(dev_priv))
2261                                 hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2262                         else if (IS_BROADWELL(dev_priv))
2263                                 hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2264
2265                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2266                         ret = IRQ_HANDLED;
2267
2268                         if (tmp & aux_mask) {
2269                                 dp_aux_irq_handler(dev);
2270                                 found = true;
2271                         }
2272
2273                         if (hotplug_trigger) {
2274                                 if (IS_BROXTON(dev))
2275                                         bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2276                                 else
2277                                         ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2278                                 found = true;
2279                         }
2280
2281                         if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2282                                 gmbus_irq_handler(dev);
2283                                 found = true;
2284                         }
2285
2286                         if (!found)
2287                                 DRM_ERROR("Unexpected DE Port interrupt\n");
2288                 }
2289                 else
2290                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2291         }
2292
2293         for_each_pipe(dev_priv, pipe) {
2294                 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2295
2296                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2297                         continue;
2298
2299                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2300                 if (pipe_iir) {
2301                         ret = IRQ_HANDLED;
2302                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2303
2304                         if (pipe_iir & GEN8_PIPE_VBLANK &&
2305                             intel_pipe_handle_vblank(dev, pipe))
2306                                 intel_check_page_flip(dev, pipe);
2307
2308                         if (INTEL_INFO(dev_priv)->gen >= 9)
2309                                 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2310                         else
2311                                 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2312
2313                         if (flip_done) {
2314                                 intel_prepare_page_flip(dev, pipe);
2315                                 intel_finish_page_flip_plane(dev, pipe);
2316                         }
2317
2318                         if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2319                                 hsw_pipe_crc_irq_handler(dev, pipe);
2320
2321                         if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2322                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2323                                                                     pipe);
2324
2325
2326                         if (INTEL_INFO(dev_priv)->gen >= 9)
2327                                 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2328                         else
2329                                 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2330
2331                         if (fault_errors)
2332                                 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2333                                           pipe_name(pipe),
2334                                           pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2335                 } else
2336                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2337         }
2338
2339         if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2340             master_ctl & GEN8_DE_PCH_IRQ) {
2341                 /*
2342                  * FIXME(BDW): Assume for now that the new interrupt handling
2343                  * scheme also closed the SDE interrupt handling race we've seen
2344                  * on older pch-split platforms. But this needs testing.
2345                  */
2346                 u32 pch_iir = I915_READ(SDEIIR);
2347                 if (pch_iir) {
2348                         I915_WRITE(SDEIIR, pch_iir);
2349                         ret = IRQ_HANDLED;
2350
2351                         if (HAS_PCH_SPT(dev_priv))
2352                                 spt_irq_handler(dev, pch_iir);
2353                         else
2354                                 cpt_irq_handler(dev, pch_iir);
2355                 } else
2356                         DRM_ERROR("The master control interrupt lied (SDE)!\n");
2357
2358         }
2359
2360         I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2361         POSTING_READ_FW(GEN8_MASTER_IRQ);
2362
2363         return ret;
2364 }
2365
2366 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2367                                bool reset_completed)
2368 {
2369         struct intel_engine_cs *ring;
2370         int i;
2371
2372         /*
2373          * Notify all waiters for GPU completion events that reset state has
2374          * been changed, and that they need to restart their wait after
2375          * checking for potential errors (and bail out to drop locks if there is
2376          * a gpu reset pending so that i915_error_work_func can acquire them).
2377          */
2378
2379         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2380         for_each_ring(ring, dev_priv, i)
2381                 wake_up_all(&ring->irq_queue);
2382
2383         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2384         wake_up_all(&dev_priv->pending_flip_queue);
2385
2386         /*
2387          * Signal tasks blocked in i915_gem_wait_for_error that the pending
2388          * reset state is cleared.
2389          */
2390         if (reset_completed)
2391                 wake_up_all(&dev_priv->gpu_error.reset_queue);
2392 }
2393
2394 /**
2395  * i915_reset_and_wakeup - do process context error handling work
2396  * @dev: drm device
2397  *
2398  * Fire an error uevent so userspace can see that a hang or error
2399  * was detected.
2400  */
2401 static void i915_reset_and_wakeup(struct drm_device *dev)
2402 {
2403         struct drm_i915_private *dev_priv = to_i915(dev);
2404         struct i915_gpu_error *error = &dev_priv->gpu_error;
2405         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2406         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2407         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2408         int ret;
2409
2410         kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2411
2412         /*
2413          * Note that there's only one work item which does gpu resets, so we
2414          * need not worry about concurrent gpu resets potentially incrementing
2415          * error->reset_counter twice. We only need to take care of another
2416          * racing irq/hangcheck declaring the gpu dead for a second time. A
2417          * quick check for that is good enough: schedule_work ensures the
2418          * correct ordering between hang detection and this work item, and since
2419          * the reset in-progress bit is only ever set by code outside of this
2420          * work we don't need to worry about any other races.
2421          */
2422         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2423                 DRM_DEBUG_DRIVER("resetting chip\n");
2424                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2425                                    reset_event);
2426
2427                 /*
2428                  * In most cases it's guaranteed that we get here with an RPM
2429                  * reference held, for example because there is a pending GPU
2430                  * request that won't finish until the reset is done. This
2431                  * isn't the case at least when we get here by doing a
2432                  * simulated reset via debugs, so get an RPM reference.
2433                  */
2434                 intel_runtime_pm_get(dev_priv);
2435
2436                 intel_prepare_reset(dev);
2437
2438                 /*
2439                  * All state reset _must_ be completed before we update the
2440                  * reset counter, for otherwise waiters might miss the reset
2441                  * pending state and not properly drop locks, resulting in
2442                  * deadlocks with the reset work.
2443                  */
2444                 ret = i915_reset(dev);
2445
2446                 intel_finish_reset(dev);
2447
2448                 intel_runtime_pm_put(dev_priv);
2449
2450                 if (ret == 0) {
2451                         /*
2452                          * After all the gem state is reset, increment the reset
2453                          * counter and wake up everyone waiting for the reset to
2454                          * complete.
2455                          *
2456                          * Since unlock operations are a one-sided barrier only,
2457                          * we need to insert a barrier here to order any seqno
2458                          * updates before
2459                          * the counter increment.
2460                          */
2461                         smp_mb__before_atomic();
2462                         atomic_inc(&dev_priv->gpu_error.reset_counter);
2463
2464                         kobject_uevent_env(&dev->primary->kdev->kobj,
2465                                            KOBJ_CHANGE, reset_done_event);
2466                 } else {
2467                         atomic_or(I915_WEDGED, &error->reset_counter);
2468                 }
2469
2470                 /*
2471                  * Note: The wake_up also serves as a memory barrier so that
2472                  * waiters see the update value of the reset counter atomic_t.
2473                  */
2474                 i915_error_wake_up(dev_priv, true);
2475         }
2476 }
2477
2478 static void i915_report_and_clear_eir(struct drm_device *dev)
2479 {
2480         struct drm_i915_private *dev_priv = dev->dev_private;
2481         uint32_t instdone[I915_NUM_INSTDONE_REG];
2482         u32 eir = I915_READ(EIR);
2483         int pipe, i;
2484
2485         if (!eir)
2486                 return;
2487
2488         pr_err("render error detected, EIR: 0x%08x\n", eir);
2489
2490         i915_get_extra_instdone(dev, instdone);
2491
2492         if (IS_G4X(dev)) {
2493                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2494                         u32 ipeir = I915_READ(IPEIR_I965);
2495
2496                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2497                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2498                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
2499                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2500                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2501                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2502                         I915_WRITE(IPEIR_I965, ipeir);
2503                         POSTING_READ(IPEIR_I965);
2504                 }
2505                 if (eir & GM45_ERROR_PAGE_TABLE) {
2506                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2507                         pr_err("page table error\n");
2508                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2509                         I915_WRITE(PGTBL_ER, pgtbl_err);
2510                         POSTING_READ(PGTBL_ER);
2511                 }
2512         }
2513
2514         if (!IS_GEN2(dev)) {
2515                 if (eir & I915_ERROR_PAGE_TABLE) {
2516                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2517                         pr_err("page table error\n");
2518                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2519                         I915_WRITE(PGTBL_ER, pgtbl_err);
2520                         POSTING_READ(PGTBL_ER);
2521                 }
2522         }
2523
2524         if (eir & I915_ERROR_MEMORY_REFRESH) {
2525                 pr_err("memory refresh error:\n");
2526                 for_each_pipe(dev_priv, pipe)
2527                         pr_err("pipe %c stat: 0x%08x\n",
2528                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2529                 /* pipestat has already been acked */
2530         }
2531         if (eir & I915_ERROR_INSTRUCTION) {
2532                 pr_err("instruction error\n");
2533                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2534                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2535                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2536                 if (INTEL_INFO(dev)->gen < 4) {
2537                         u32 ipeir = I915_READ(IPEIR);
2538
2539                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2540                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2541                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2542                         I915_WRITE(IPEIR, ipeir);
2543                         POSTING_READ(IPEIR);
2544                 } else {
2545                         u32 ipeir = I915_READ(IPEIR_I965);
2546
2547                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2548                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2549                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2550                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2551                         I915_WRITE(IPEIR_I965, ipeir);
2552                         POSTING_READ(IPEIR_I965);
2553                 }
2554         }
2555
2556         I915_WRITE(EIR, eir);
2557         POSTING_READ(EIR);
2558         eir = I915_READ(EIR);
2559         if (eir) {
2560                 /*
2561                  * some errors might have become stuck,
2562                  * mask them.
2563                  */
2564                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2565                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2566                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2567         }
2568 }
2569
2570 /**
2571  * i915_handle_error - handle a gpu error
2572  * @dev: drm device
2573  *
2574  * Do some basic checking of register state at error time and
2575  * dump it to the syslog.  Also call i915_capture_error_state() to make
2576  * sure we get a record and make it available in debugfs.  Fire a uevent
2577  * so userspace knows something bad happened (should trigger collection
2578  * of a ring dump etc.).
2579  */
2580 void i915_handle_error(struct drm_device *dev, bool wedged,
2581                        const char *fmt, ...)
2582 {
2583         struct drm_i915_private *dev_priv = dev->dev_private;
2584         va_list args;
2585         char error_msg[80];
2586
2587         va_start(args, fmt);
2588         vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2589         va_end(args);
2590
2591         i915_capture_error_state(dev, wedged, error_msg);
2592         i915_report_and_clear_eir(dev);
2593
2594         if (wedged) {
2595                 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2596                                 &dev_priv->gpu_error.reset_counter);
2597
2598                 /*
2599                  * Wakeup waiting processes so that the reset function
2600                  * i915_reset_and_wakeup doesn't deadlock trying to grab
2601                  * various locks. By bumping the reset counter first, the woken
2602                  * processes will see a reset in progress and back off,
2603                  * releasing their locks and then wait for the reset completion.
2604                  * We must do this for _all_ gpu waiters that might hold locks
2605                  * that the reset work needs to acquire.
2606                  *
2607                  * Note: The wake_up serves as the required memory barrier to
2608                  * ensure that the waiters see the updated value of the reset
2609                  * counter atomic_t.
2610                  */
2611                 i915_error_wake_up(dev_priv, false);
2612         }
2613
2614         i915_reset_and_wakeup(dev);
2615 }
2616
2617 /* Called from drm generic code, passed 'crtc' which
2618  * we use as a pipe index
2619  */
2620 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2621 {
2622         struct drm_i915_private *dev_priv = dev->dev_private;
2623         unsigned long irqflags;
2624
2625         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2626         if (INTEL_INFO(dev)->gen >= 4)
2627                 i915_enable_pipestat(dev_priv, pipe,
2628                                      PIPE_START_VBLANK_INTERRUPT_STATUS);
2629         else
2630                 i915_enable_pipestat(dev_priv, pipe,
2631                                      PIPE_VBLANK_INTERRUPT_STATUS);
2632         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2633
2634         return 0;
2635 }
2636
2637 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2638 {
2639         struct drm_i915_private *dev_priv = dev->dev_private;
2640         unsigned long irqflags;
2641         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2642                                                      DE_PIPE_VBLANK(pipe);
2643
2644         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2645         ironlake_enable_display_irq(dev_priv, bit);
2646         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2647
2648         return 0;
2649 }
2650
2651 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2652 {
2653         struct drm_i915_private *dev_priv = dev->dev_private;
2654         unsigned long irqflags;
2655
2656         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2657         i915_enable_pipestat(dev_priv, pipe,
2658                              PIPE_START_VBLANK_INTERRUPT_STATUS);
2659         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2660
2661         return 0;
2662 }
2663
2664 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2665 {
2666         struct drm_i915_private *dev_priv = dev->dev_private;
2667         unsigned long irqflags;
2668
2669         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2670         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2671         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2672         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2673         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2674         return 0;
2675 }
2676
2677 /* Called from drm generic code, passed 'crtc' which
2678  * we use as a pipe index
2679  */
2680 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2681 {
2682         struct drm_i915_private *dev_priv = dev->dev_private;
2683         unsigned long irqflags;
2684
2685         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2686         i915_disable_pipestat(dev_priv, pipe,
2687                               PIPE_VBLANK_INTERRUPT_STATUS |
2688                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2689         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2690 }
2691
2692 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2693 {
2694         struct drm_i915_private *dev_priv = dev->dev_private;
2695         unsigned long irqflags;
2696         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2697                                                      DE_PIPE_VBLANK(pipe);
2698
2699         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2700         ironlake_disable_display_irq(dev_priv, bit);
2701         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2702 }
2703
2704 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2705 {
2706         struct drm_i915_private *dev_priv = dev->dev_private;
2707         unsigned long irqflags;
2708
2709         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2710         i915_disable_pipestat(dev_priv, pipe,
2711                               PIPE_START_VBLANK_INTERRUPT_STATUS);
2712         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2713 }
2714
2715 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2716 {
2717         struct drm_i915_private *dev_priv = dev->dev_private;
2718         unsigned long irqflags;
2719
2720         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2721         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2722         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2723         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2724         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2725 }
2726
2727 static bool
2728 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2729 {
2730         return (list_empty(&ring->request_list) ||
2731                 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2732 }
2733
2734 static bool
2735 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2736 {
2737         if (INTEL_INFO(dev)->gen >= 8) {
2738                 return (ipehr >> 23) == 0x1c;
2739         } else {
2740                 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2741                 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2742                                  MI_SEMAPHORE_REGISTER);
2743         }
2744 }
2745
2746 static struct intel_engine_cs *
2747 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2748 {
2749         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2750         struct intel_engine_cs *signaller;
2751         int i;
2752
2753         if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2754                 for_each_ring(signaller, dev_priv, i) {
2755                         if (ring == signaller)
2756                                 continue;
2757
2758                         if (offset == signaller->semaphore.signal_ggtt[ring->id])
2759                                 return signaller;
2760                 }
2761         } else {
2762                 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2763
2764                 for_each_ring(signaller, dev_priv, i) {
2765                         if(ring == signaller)
2766                                 continue;
2767
2768                         if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2769                                 return signaller;
2770                 }
2771         }
2772
2773         DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2774                   ring->id, ipehr, offset);
2775
2776         return NULL;
2777 }
2778
2779 static struct intel_engine_cs *
2780 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2781 {
2782         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2783         u32 cmd, ipehr, head;
2784         u64 offset = 0;
2785         int i, backwards;
2786
2787         /*
2788          * This function does not support execlist mode - any attempt to
2789          * proceed further into this function will result in a kernel panic
2790          * when dereferencing ring->buffer, which is not set up in execlist
2791          * mode.
2792          *
2793          * The correct way of doing it would be to derive the currently
2794          * executing ring buffer from the current context, which is derived
2795          * from the currently running request. Unfortunately, to get the
2796          * current request we would have to grab the struct_mutex before doing
2797          * anything else, which would be ill-advised since some other thread
2798          * might have grabbed it already and managed to hang itself, causing
2799          * the hang checker to deadlock.
2800          *
2801          * Therefore, this function does not support execlist mode in its
2802          * current form. Just return NULL and move on.
2803          */
2804         if (ring->buffer == NULL)
2805                 return NULL;
2806
2807         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2808         if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2809                 return NULL;
2810
2811         /*
2812          * HEAD is likely pointing to the dword after the actual command,
2813          * so scan backwards until we find the MBOX. But limit it to just 3
2814          * or 4 dwords depending on the semaphore wait command size.
2815          * Note that we don't care about ACTHD here since that might
2816          * point at at batch, and semaphores are always emitted into the
2817          * ringbuffer itself.
2818          */
2819         head = I915_READ_HEAD(ring) & HEAD_ADDR;
2820         backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2821
2822         for (i = backwards; i; --i) {
2823                 /*
2824                  * Be paranoid and presume the hw has gone off into the wild -
2825                  * our ring is smaller than what the hardware (and hence
2826                  * HEAD_ADDR) allows. Also handles wrap-around.
2827                  */
2828                 head &= ring->buffer->size - 1;
2829
2830                 /* This here seems to blow up */
2831                 cmd = ioread32(ring->buffer->virtual_start + head);
2832                 if (cmd == ipehr)
2833                         break;
2834
2835                 head -= 4;
2836         }
2837
2838         if (!i)
2839                 return NULL;
2840
2841         *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2842         if (INTEL_INFO(ring->dev)->gen >= 8) {
2843                 offset = ioread32(ring->buffer->virtual_start + head + 12);
2844                 offset <<= 32;
2845                 offset = ioread32(ring->buffer->virtual_start + head + 8);
2846         }
2847         return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2848 }
2849
2850 static int semaphore_passed(struct intel_engine_cs *ring)
2851 {
2852         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2853         struct intel_engine_cs *signaller;
2854         u32 seqno;
2855
2856         ring->hangcheck.deadlock++;
2857
2858         signaller = semaphore_waits_for(ring, &seqno);
2859         if (signaller == NULL)
2860                 return -1;
2861
2862         /* Prevent pathological recursion due to driver bugs */
2863         if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2864                 return -1;
2865
2866         if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2867                 return 1;
2868
2869         /* cursory check for an unkickable deadlock */
2870         if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2871             semaphore_passed(signaller) < 0)
2872                 return -1;
2873
2874         return 0;
2875 }
2876
2877 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2878 {
2879         struct intel_engine_cs *ring;
2880         int i;
2881
2882         for_each_ring(ring, dev_priv, i)
2883                 ring->hangcheck.deadlock = 0;
2884 }
2885
2886 static enum intel_ring_hangcheck_action
2887 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2888 {
2889         struct drm_device *dev = ring->dev;
2890         struct drm_i915_private *dev_priv = dev->dev_private;
2891         u32 tmp;
2892
2893         if (acthd != ring->hangcheck.acthd) {
2894                 if (acthd > ring->hangcheck.max_acthd) {
2895                         ring->hangcheck.max_acthd = acthd;
2896                         return HANGCHECK_ACTIVE;
2897                 }
2898
2899                 return HANGCHECK_ACTIVE_LOOP;
2900         }
2901
2902         if (IS_GEN2(dev))
2903                 return HANGCHECK_HUNG;
2904
2905         /* Is the chip hanging on a WAIT_FOR_EVENT?
2906          * If so we can simply poke the RB_WAIT bit
2907          * and break the hang. This should work on
2908          * all but the second generation chipsets.
2909          */
2910         tmp = I915_READ_CTL(ring);
2911         if (tmp & RING_WAIT) {
2912                 i915_handle_error(dev, false,
2913                                   "Kicking stuck wait on %s",
2914                                   ring->name);
2915                 I915_WRITE_CTL(ring, tmp);
2916                 return HANGCHECK_KICK;
2917         }
2918
2919         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2920                 switch (semaphore_passed(ring)) {
2921                 default:
2922                         return HANGCHECK_HUNG;
2923                 case 1:
2924                         i915_handle_error(dev, false,
2925                                           "Kicking stuck semaphore on %s",
2926                                           ring->name);
2927                         I915_WRITE_CTL(ring, tmp);
2928                         return HANGCHECK_KICK;
2929                 case 0:
2930                         return HANGCHECK_WAIT;
2931                 }
2932         }
2933
2934         return HANGCHECK_HUNG;
2935 }
2936
2937 /*
2938  * This is called when the chip hasn't reported back with completed
2939  * batchbuffers in a long time. We keep track per ring seqno progress and
2940  * if there are no progress, hangcheck score for that ring is increased.
2941  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2942  * we kick the ring. If we see no progress on three subsequent calls
2943  * we assume chip is wedged and try to fix it by resetting the chip.
2944  */
2945 static void i915_hangcheck_elapsed(struct work_struct *work)
2946 {
2947         struct drm_i915_private *dev_priv =
2948                 container_of(work, typeof(*dev_priv),
2949                              gpu_error.hangcheck_work.work);
2950         struct drm_device *dev = dev_priv->dev;
2951         struct intel_engine_cs *ring;
2952         int i;
2953         int busy_count = 0, rings_hung = 0;
2954         bool stuck[I915_NUM_RINGS] = { 0 };
2955 #define BUSY 1
2956 #define KICK 5
2957 #define HUNG 20
2958
2959         if (!i915.enable_hangcheck)
2960                 return;
2961
2962         for_each_ring(ring, dev_priv, i) {
2963                 u64 acthd;
2964                 u32 seqno;
2965                 bool busy = true;
2966
2967                 semaphore_clear_deadlocks(dev_priv);
2968
2969                 seqno = ring->get_seqno(ring, false);
2970                 acthd = intel_ring_get_active_head(ring);
2971
2972                 if (ring->hangcheck.seqno == seqno) {
2973                         if (ring_idle(ring, seqno)) {
2974                                 ring->hangcheck.action = HANGCHECK_IDLE;
2975
2976                                 if (waitqueue_active(&ring->irq_queue)) {
2977                                         /* Issue a wake-up to catch stuck h/w. */
2978                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2979                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2980                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2981                                                                   ring->name);
2982                                                 else
2983                                                         DRM_INFO("Fake missed irq on %s\n",
2984                                                                  ring->name);
2985                                                 wake_up_all(&ring->irq_queue);
2986                                         }
2987                                         /* Safeguard against driver failure */
2988                                         ring->hangcheck.score += BUSY;
2989                                 } else
2990                                         busy = false;
2991                         } else {
2992                                 /* We always increment the hangcheck score
2993                                  * if the ring is busy and still processing
2994                                  * the same request, so that no single request
2995                                  * can run indefinitely (such as a chain of
2996                                  * batches). The only time we do not increment
2997                                  * the hangcheck score on this ring, if this
2998                                  * ring is in a legitimate wait for another
2999                                  * ring. In that case the waiting ring is a
3000                                  * victim and we want to be sure we catch the
3001                                  * right culprit. Then every time we do kick
3002                                  * the ring, add a small increment to the
3003                                  * score so that we can catch a batch that is
3004                                  * being repeatedly kicked and so responsible
3005                                  * for stalling the machine.
3006                                  */
3007                                 ring->hangcheck.action = ring_stuck(ring,
3008                                                                     acthd);
3009
3010                                 switch (ring->hangcheck.action) {
3011                                 case HANGCHECK_IDLE:
3012                                 case HANGCHECK_WAIT:
3013                                 case HANGCHECK_ACTIVE:
3014                                         break;
3015                                 case HANGCHECK_ACTIVE_LOOP:
3016                                         ring->hangcheck.score += BUSY;
3017                                         break;
3018                                 case HANGCHECK_KICK:
3019                                         ring->hangcheck.score += KICK;
3020                                         break;
3021                                 case HANGCHECK_HUNG:
3022                                         ring->hangcheck.score += HUNG;
3023                                         stuck[i] = true;
3024                                         break;
3025                                 }
3026                         }
3027                 } else {
3028                         ring->hangcheck.action = HANGCHECK_ACTIVE;
3029
3030                         /* Gradually reduce the count so that we catch DoS
3031                          * attempts across multiple batches.
3032                          */
3033                         if (ring->hangcheck.score > 0)
3034                                 ring->hangcheck.score--;
3035
3036                         ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3037                 }
3038
3039                 ring->hangcheck.seqno = seqno;
3040                 ring->hangcheck.acthd = acthd;
3041                 busy_count += busy;
3042         }
3043
3044         for_each_ring(ring, dev_priv, i) {
3045                 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3046                         DRM_INFO("%s on %s\n",
3047                                  stuck[i] ? "stuck" : "no progress",
3048                                  ring->name);
3049                         rings_hung++;
3050                 }
3051         }
3052
3053         if (rings_hung)
3054                 return i915_handle_error(dev, true, "Ring hung");
3055
3056         if (busy_count)
3057                 /* Reset timer case chip hangs without another request
3058                  * being added */
3059                 i915_queue_hangcheck(dev);
3060 }
3061
3062 void i915_queue_hangcheck(struct drm_device *dev)
3063 {
3064         struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3065
3066         if (!i915.enable_hangcheck)
3067                 return;
3068
3069         /* Don't continually defer the hangcheck so that it is always run at
3070          * least once after work has been scheduled on any ring. Otherwise,
3071          * we will ignore a hung ring if a second ring is kept busy.
3072          */
3073
3074         queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3075                            round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3076 }
3077
3078 static void ibx_irq_reset(struct drm_device *dev)
3079 {
3080         struct drm_i915_private *dev_priv = dev->dev_private;
3081
3082         if (HAS_PCH_NOP(dev))
3083                 return;
3084
3085         GEN5_IRQ_RESET(SDE);
3086
3087         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3088                 I915_WRITE(SERR_INT, 0xffffffff);
3089 }
3090
3091 /*
3092  * SDEIER is also touched by the interrupt handler to work around missed PCH
3093  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3094  * instead we unconditionally enable all PCH interrupt sources here, but then
3095  * only unmask them as needed with SDEIMR.
3096  *
3097  * This function needs to be called before interrupts are enabled.
3098  */
3099 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3100 {
3101         struct drm_i915_private *dev_priv = dev->dev_private;
3102
3103         if (HAS_PCH_NOP(dev))
3104                 return;
3105
3106         WARN_ON(I915_READ(SDEIER) != 0);
3107         I915_WRITE(SDEIER, 0xffffffff);
3108         POSTING_READ(SDEIER);
3109 }
3110
3111 static void gen5_gt_irq_reset(struct drm_device *dev)
3112 {
3113         struct drm_i915_private *dev_priv = dev->dev_private;
3114
3115         GEN5_IRQ_RESET(GT);
3116         if (INTEL_INFO(dev)->gen >= 6)
3117                 GEN5_IRQ_RESET(GEN6_PM);
3118 }
3119
3120 /* drm_dma.h hooks
3121 */
3122 static void ironlake_irq_reset(struct drm_device *dev)
3123 {
3124         struct drm_i915_private *dev_priv = dev->dev_private;
3125
3126         I915_WRITE(HWSTAM, 0xffffffff);
3127
3128         GEN5_IRQ_RESET(DE);
3129         if (IS_GEN7(dev))
3130                 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3131
3132         gen5_gt_irq_reset(dev);
3133
3134         ibx_irq_reset(dev);
3135 }
3136
3137 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3138 {
3139         enum pipe pipe;
3140
3141         i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3142         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3143
3144         for_each_pipe(dev_priv, pipe)
3145                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3146
3147         GEN5_IRQ_RESET(VLV_);
3148 }
3149
3150 static void valleyview_irq_preinstall(struct drm_device *dev)
3151 {
3152         struct drm_i915_private *dev_priv = dev->dev_private;
3153
3154         /* VLV magic */
3155         I915_WRITE(VLV_IMR, 0);
3156         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3157         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3158         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3159
3160         gen5_gt_irq_reset(dev);
3161
3162         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3163
3164         vlv_display_irq_reset(dev_priv);
3165 }
3166
3167 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3168 {
3169         GEN8_IRQ_RESET_NDX(GT, 0);
3170         GEN8_IRQ_RESET_NDX(GT, 1);
3171         GEN8_IRQ_RESET_NDX(GT, 2);
3172         GEN8_IRQ_RESET_NDX(GT, 3);
3173 }
3174
3175 static void gen8_irq_reset(struct drm_device *dev)
3176 {
3177         struct drm_i915_private *dev_priv = dev->dev_private;
3178         int pipe;
3179
3180         I915_WRITE(GEN8_MASTER_IRQ, 0);
3181         POSTING_READ(GEN8_MASTER_IRQ);
3182
3183         gen8_gt_irq_reset(dev_priv);
3184
3185         for_each_pipe(dev_priv, pipe)
3186                 if (intel_display_power_is_enabled(dev_priv,
3187                                                    POWER_DOMAIN_PIPE(pipe)))
3188                         GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3189
3190         GEN5_IRQ_RESET(GEN8_DE_PORT_);
3191         GEN5_IRQ_RESET(GEN8_DE_MISC_);
3192         GEN5_IRQ_RESET(GEN8_PCU_);
3193
3194         if (HAS_PCH_SPLIT(dev))
3195                 ibx_irq_reset(dev);
3196 }
3197
3198 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3199                                      unsigned int pipe_mask)
3200 {
3201         uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3202
3203         spin_lock_irq(&dev_priv->irq_lock);
3204         if (pipe_mask & 1 << PIPE_A)
3205                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3206                                   dev_priv->de_irq_mask[PIPE_A],
3207                                   ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3208         if (pipe_mask & 1 << PIPE_B)
3209                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3210                                   dev_priv->de_irq_mask[PIPE_B],
3211                                   ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3212         if (pipe_mask & 1 << PIPE_C)
3213                 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3214                                   dev_priv->de_irq_mask[PIPE_C],
3215                                   ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3216         spin_unlock_irq(&dev_priv->irq_lock);
3217 }
3218
3219 static void cherryview_irq_preinstall(struct drm_device *dev)
3220 {
3221         struct drm_i915_private *dev_priv = dev->dev_private;
3222
3223         I915_WRITE(GEN8_MASTER_IRQ, 0);
3224         POSTING_READ(GEN8_MASTER_IRQ);
3225
3226         gen8_gt_irq_reset(dev_priv);
3227
3228         GEN5_IRQ_RESET(GEN8_PCU_);
3229
3230         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3231
3232         vlv_display_irq_reset(dev_priv);
3233 }
3234
3235 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3236                                   const u32 hpd[HPD_NUM_PINS])
3237 {
3238         struct drm_i915_private *dev_priv = to_i915(dev);
3239         struct intel_encoder *encoder;
3240         u32 enabled_irqs = 0;
3241
3242         for_each_intel_encoder(dev, encoder)
3243                 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3244                         enabled_irqs |= hpd[encoder->hpd_pin];
3245
3246         return enabled_irqs;
3247 }
3248
3249 static void ibx_hpd_irq_setup(struct drm_device *dev)
3250 {
3251         struct drm_i915_private *dev_priv = dev->dev_private;
3252         u32 hotplug_irqs, hotplug, enabled_irqs;
3253
3254         if (HAS_PCH_IBX(dev)) {
3255                 hotplug_irqs = SDE_HOTPLUG_MASK;
3256                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3257         } else {
3258                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3259                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3260         }
3261
3262         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3263
3264         /*
3265          * Enable digital hotplug on the PCH, and configure the DP short pulse
3266          * duration to 2ms (which is the minimum in the Display Port spec).
3267          * The pulse duration bits are reserved on LPT+.
3268          */
3269         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3270         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3271         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3272         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3273         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3274         /*
3275          * When CPU and PCH are on the same package, port A
3276          * HPD must be enabled in both north and south.
3277          */
3278         if (HAS_PCH_LPT_LP(dev))
3279                 hotplug |= PORTA_HOTPLUG_ENABLE;
3280         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3281 }
3282
3283 static void spt_hpd_irq_setup(struct drm_device *dev)
3284 {
3285         struct drm_i915_private *dev_priv = dev->dev_private;
3286         u32 hotplug_irqs, hotplug, enabled_irqs;
3287
3288         hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3289         enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3290
3291         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3292
3293         /* Enable digital hotplug on the PCH */
3294         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3295         hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3296                 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3297         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3298
3299         hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3300         hotplug |= PORTE_HOTPLUG_ENABLE;
3301         I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3302 }
3303
3304 static void ilk_hpd_irq_setup(struct drm_device *dev)
3305 {
3306         struct drm_i915_private *dev_priv = dev->dev_private;
3307         u32 hotplug_irqs, hotplug, enabled_irqs;
3308
3309         if (INTEL_INFO(dev)->gen >= 8) {
3310                 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3311                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3312
3313                 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3314         } else if (INTEL_INFO(dev)->gen >= 7) {
3315                 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3316                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3317
3318                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3319         } else {
3320                 hotplug_irqs = DE_DP_A_HOTPLUG;
3321                 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3322
3323                 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3324         }
3325
3326         /*
3327          * Enable digital hotplug on the CPU, and configure the DP short pulse
3328          * duration to 2ms (which is the minimum in the Display Port spec)
3329          * The pulse duration bits are reserved on HSW+.
3330          */
3331         hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3332         hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3333         hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3334         I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3335
3336         ibx_hpd_irq_setup(dev);
3337 }
3338
3339 static void bxt_hpd_irq_setup(struct drm_device *dev)
3340 {
3341         struct drm_i915_private *dev_priv = dev->dev_private;
3342         u32 hotplug_irqs, hotplug, enabled_irqs;
3343
3344         enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3345         hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3346
3347         bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3348
3349         hotplug = I915_READ(PCH_PORT_HOTPLUG);
3350         hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3351                 PORTA_HOTPLUG_ENABLE;
3352         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3353 }
3354
3355 static void ibx_irq_postinstall(struct drm_device *dev)
3356 {
3357         struct drm_i915_private *dev_priv = dev->dev_private;
3358         u32 mask;
3359
3360         if (HAS_PCH_NOP(dev))
3361                 return;
3362
3363         if (HAS_PCH_IBX(dev))
3364                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3365         else
3366                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3367
3368         GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3369         I915_WRITE(SDEIMR, ~mask);
3370 }
3371
3372 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3373 {
3374         struct drm_i915_private *dev_priv = dev->dev_private;
3375         u32 pm_irqs, gt_irqs;
3376
3377         pm_irqs = gt_irqs = 0;
3378
3379         dev_priv->gt_irq_mask = ~0;
3380         if (HAS_L3_DPF(dev)) {
3381                 /* L3 parity interrupt is always unmasked. */
3382                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3383                 gt_irqs |= GT_PARITY_ERROR(dev);
3384         }
3385
3386         gt_irqs |= GT_RENDER_USER_INTERRUPT;
3387         if (IS_GEN5(dev)) {
3388                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3389                            ILK_BSD_USER_INTERRUPT;
3390         } else {
3391                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3392         }
3393
3394         GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3395
3396         if (INTEL_INFO(dev)->gen >= 6) {
3397                 /*
3398                  * RPS interrupts will get enabled/disabled on demand when RPS
3399                  * itself is enabled/disabled.
3400                  */
3401                 if (HAS_VEBOX(dev))
3402                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3403
3404                 dev_priv->pm_irq_mask = 0xffffffff;
3405                 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3406         }
3407 }
3408
3409 static int ironlake_irq_postinstall(struct drm_device *dev)
3410 {
3411         struct drm_i915_private *dev_priv = dev->dev_private;
3412         u32 display_mask, extra_mask;
3413
3414         if (INTEL_INFO(dev)->gen >= 7) {
3415                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3416                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3417                                 DE_PLANEB_FLIP_DONE_IVB |
3418                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3419                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3420                               DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3421                               DE_DP_A_HOTPLUG_IVB);
3422         } else {
3423                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3424                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3425                                 DE_AUX_CHANNEL_A |
3426                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3427                                 DE_POISON);
3428                 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3429                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3430                               DE_DP_A_HOTPLUG);
3431         }
3432
3433         dev_priv->irq_mask = ~display_mask;
3434
3435         I915_WRITE(HWSTAM, 0xeffe);
3436
3437         ibx_irq_pre_postinstall(dev);
3438
3439         GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3440
3441         gen5_gt_irq_postinstall(dev);
3442
3443         ibx_irq_postinstall(dev);
3444
3445         if (IS_IRONLAKE_M(dev)) {
3446                 /* Enable PCU event interrupts
3447                  *
3448                  * spinlocking not required here for correctness since interrupt
3449                  * setup is guaranteed to run in single-threaded context. But we
3450                  * need it to make the assert_spin_locked happy. */
3451                 spin_lock_irq(&dev_priv->irq_lock);
3452                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3453                 spin_unlock_irq(&dev_priv->irq_lock);
3454         }
3455
3456         return 0;
3457 }
3458
3459 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3460 {
3461         u32 pipestat_mask;
3462         u32 iir_mask;
3463         enum pipe pipe;
3464
3465         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3466                         PIPE_FIFO_UNDERRUN_STATUS;
3467
3468         for_each_pipe(dev_priv, pipe)
3469                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3470         POSTING_READ(PIPESTAT(PIPE_A));
3471
3472         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3473                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3474
3475         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3476         for_each_pipe(dev_priv, pipe)
3477                       i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3478
3479         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3480                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3481                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3482         if (IS_CHERRYVIEW(dev_priv))
3483                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3484         dev_priv->irq_mask &= ~iir_mask;
3485
3486         I915_WRITE(VLV_IIR, iir_mask);
3487         I915_WRITE(VLV_IIR, iir_mask);
3488         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3489         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3490         POSTING_READ(VLV_IMR);
3491 }
3492
3493 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3494 {
3495         u32 pipestat_mask;
3496         u32 iir_mask;
3497         enum pipe pipe;
3498
3499         iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3500                    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3501                    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3502         if (IS_CHERRYVIEW(dev_priv))
3503                 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3504
3505         dev_priv->irq_mask |= iir_mask;
3506         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3507         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3508         I915_WRITE(VLV_IIR, iir_mask);
3509         I915_WRITE(VLV_IIR, iir_mask);
3510         POSTING_READ(VLV_IIR);
3511
3512         pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3513                         PIPE_CRC_DONE_INTERRUPT_STATUS;
3514
3515         i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3516         for_each_pipe(dev_priv, pipe)
3517                 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3518
3519         pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3520                         PIPE_FIFO_UNDERRUN_STATUS;
3521
3522         for_each_pipe(dev_priv, pipe)
3523                 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3524         POSTING_READ(PIPESTAT(PIPE_A));
3525 }
3526
3527 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3528 {
3529         assert_spin_locked(&dev_priv->irq_lock);
3530
3531         if (dev_priv->display_irqs_enabled)
3532                 return;
3533
3534         dev_priv->display_irqs_enabled = true;
3535
3536         if (intel_irqs_enabled(dev_priv))
3537                 valleyview_display_irqs_install(dev_priv);
3538 }
3539
3540 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3541 {
3542         assert_spin_locked(&dev_priv->irq_lock);
3543
3544         if (!dev_priv->display_irqs_enabled)
3545                 return;
3546
3547         dev_priv->display_irqs_enabled = false;
3548
3549         if (intel_irqs_enabled(dev_priv))
3550                 valleyview_display_irqs_uninstall(dev_priv);
3551 }
3552
3553 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3554 {
3555         dev_priv->irq_mask = ~0;
3556
3557         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3558         POSTING_READ(PORT_HOTPLUG_EN);
3559
3560         I915_WRITE(VLV_IIR, 0xffffffff);
3561         I915_WRITE(VLV_IIR, 0xffffffff);
3562         I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3563         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3564         POSTING_READ(VLV_IMR);
3565
3566         /* Interrupt setup is already guaranteed to be single-threaded, this is
3567          * just to make the assert_spin_locked check happy. */
3568         spin_lock_irq(&dev_priv->irq_lock);
3569         if (dev_priv->display_irqs_enabled)
3570                 valleyview_display_irqs_install(dev_priv);
3571         spin_unlock_irq(&dev_priv->irq_lock);
3572 }
3573
3574 static int valleyview_irq_postinstall(struct drm_device *dev)
3575 {
3576         struct drm_i915_private *dev_priv = dev->dev_private;
3577
3578         vlv_display_irq_postinstall(dev_priv);
3579
3580         gen5_gt_irq_postinstall(dev);
3581
3582         /* ack & enable invalid PTE error interrupts */
3583 #if 0 /* FIXME: add support to irq handler for checking these bits */
3584         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3585         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3586 #endif
3587
3588         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3589
3590         return 0;
3591 }
3592
3593 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3594 {
3595         /* These are interrupts we'll toggle with the ring mask register */
3596         uint32_t gt_interrupts[] = {
3597                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3598                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3599                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3600                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3601                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3602                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3603                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3604                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3605                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3606                 0,
3607                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3608                         GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3609                 };
3610
3611         dev_priv->pm_irq_mask = 0xffffffff;
3612         GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3613         GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3614         /*
3615          * RPS interrupts will get enabled/disabled on demand when RPS itself
3616          * is enabled/disabled.
3617          */
3618         GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3619         GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3620 }
3621
3622 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3623 {
3624         uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3625         uint32_t de_pipe_enables;
3626         u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3627         u32 de_port_enables;
3628         enum pipe pipe;
3629
3630         if (INTEL_INFO(dev_priv)->gen >= 9) {
3631                 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3632                                   GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3633                 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3634                                   GEN9_AUX_CHANNEL_D;
3635                 if (IS_BROXTON(dev_priv))
3636                         de_port_masked |= BXT_DE_PORT_GMBUS;
3637         } else {
3638                 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3639                                   GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3640         }
3641
3642         de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3643                                            GEN8_PIPE_FIFO_UNDERRUN;
3644
3645         de_port_enables = de_port_masked;
3646         if (IS_BROXTON(dev_priv))
3647                 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3648         else if (IS_BROADWELL(dev_priv))
3649                 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3650
3651         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3652         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3653         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3654
3655         for_each_pipe(dev_priv, pipe)
3656                 if (intel_display_power_is_enabled(dev_priv,
3657                                 POWER_DOMAIN_PIPE(pipe)))
3658                         GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3659                                           dev_priv->de_irq_mask[pipe],
3660                                           de_pipe_enables);
3661
3662         GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3663 }
3664
3665 static int gen8_irq_postinstall(struct drm_device *dev)
3666 {
3667         struct drm_i915_private *dev_priv = dev->dev_private;
3668
3669         if (HAS_PCH_SPLIT(dev))
3670                 ibx_irq_pre_postinstall(dev);
3671
3672         gen8_gt_irq_postinstall(dev_priv);
3673         gen8_de_irq_postinstall(dev_priv);
3674
3675         if (HAS_PCH_SPLIT(dev))
3676                 ibx_irq_postinstall(dev);
3677
3678         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3679         POSTING_READ(GEN8_MASTER_IRQ);
3680
3681         return 0;
3682 }
3683
3684 static int cherryview_irq_postinstall(struct drm_device *dev)
3685 {
3686         struct drm_i915_private *dev_priv = dev->dev_private;
3687
3688         vlv_display_irq_postinstall(dev_priv);
3689
3690         gen8_gt_irq_postinstall(dev_priv);
3691
3692         I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3693         POSTING_READ(GEN8_MASTER_IRQ);
3694
3695         return 0;
3696 }
3697
3698 static void gen8_irq_uninstall(struct drm_device *dev)
3699 {
3700         struct drm_i915_private *dev_priv = dev->dev_private;
3701
3702         if (!dev_priv)
3703                 return;
3704
3705         gen8_irq_reset(dev);
3706 }
3707
3708 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3709 {
3710         /* Interrupt setup is already guaranteed to be single-threaded, this is
3711          * just to make the assert_spin_locked check happy. */
3712         spin_lock_irq(&dev_priv->irq_lock);
3713         if (dev_priv->display_irqs_enabled)
3714                 valleyview_display_irqs_uninstall(dev_priv);
3715         spin_unlock_irq(&dev_priv->irq_lock);
3716
3717         vlv_display_irq_reset(dev_priv);
3718
3719         dev_priv->irq_mask = ~0;
3720 }
3721
3722 static void valleyview_irq_uninstall(struct drm_device *dev)
3723 {
3724         struct drm_i915_private *dev_priv = dev->dev_private;
3725
3726         if (!dev_priv)
3727                 return;
3728
3729         I915_WRITE(VLV_MASTER_IER, 0);
3730
3731         gen5_gt_irq_reset(dev);
3732
3733         I915_WRITE(HWSTAM, 0xffffffff);
3734
3735         vlv_display_irq_uninstall(dev_priv);
3736 }
3737
3738 static void cherryview_irq_uninstall(struct drm_device *dev)
3739 {
3740         struct drm_i915_private *dev_priv = dev->dev_private;
3741
3742         if (!dev_priv)
3743                 return;
3744
3745         I915_WRITE(GEN8_MASTER_IRQ, 0);
3746         POSTING_READ(GEN8_MASTER_IRQ);
3747
3748         gen8_gt_irq_reset(dev_priv);
3749
3750         GEN5_IRQ_RESET(GEN8_PCU_);
3751
3752         vlv_display_irq_uninstall(dev_priv);
3753 }
3754
3755 static void ironlake_irq_uninstall(struct drm_device *dev)
3756 {
3757         struct drm_i915_private *dev_priv = dev->dev_private;
3758
3759         if (!dev_priv)
3760                 return;
3761
3762         ironlake_irq_reset(dev);
3763 }
3764
3765 static void i8xx_irq_preinstall(struct drm_device * dev)
3766 {
3767         struct drm_i915_private *dev_priv = dev->dev_private;
3768         int pipe;
3769
3770         for_each_pipe(dev_priv, pipe)
3771                 I915_WRITE(PIPESTAT(pipe), 0);
3772         I915_WRITE16(IMR, 0xffff);
3773         I915_WRITE16(IER, 0x0);
3774         POSTING_READ16(IER);
3775 }
3776
3777 static int i8xx_irq_postinstall(struct drm_device *dev)
3778 {
3779         struct drm_i915_private *dev_priv = dev->dev_private;
3780
3781         I915_WRITE16(EMR,
3782                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3783
3784         /* Unmask the interrupts that we always want on. */
3785         dev_priv->irq_mask =
3786                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3787                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3788                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3789                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3790         I915_WRITE16(IMR, dev_priv->irq_mask);
3791
3792         I915_WRITE16(IER,
3793                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3794                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3795                      I915_USER_INTERRUPT);
3796         POSTING_READ16(IER);
3797
3798         /* Interrupt setup is already guaranteed to be single-threaded, this is
3799          * just to make the assert_spin_locked check happy. */
3800         spin_lock_irq(&dev_priv->irq_lock);
3801         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3802         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3803         spin_unlock_irq(&dev_priv->irq_lock);
3804
3805         return 0;
3806 }
3807
3808 /*
3809  * Returns true when a page flip has completed.
3810  */
3811 static bool i8xx_handle_vblank(struct drm_device *dev,
3812                                int plane, int pipe, u32 iir)
3813 {
3814         struct drm_i915_private *dev_priv = dev->dev_private;
3815         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3816
3817         if (!intel_pipe_handle_vblank(dev, pipe))
3818                 return false;
3819
3820         if ((iir & flip_pending) == 0)
3821                 goto check_page_flip;
3822
3823         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3824          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3825          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3826          * the flip is completed (no longer pending). Since this doesn't raise
3827          * an interrupt per se, we watch for the change at vblank.
3828          */
3829         if (I915_READ16(ISR) & flip_pending)
3830                 goto check_page_flip;
3831
3832         intel_prepare_page_flip(dev, plane);
3833         intel_finish_page_flip(dev, pipe);
3834         return true;
3835
3836 check_page_flip:
3837         intel_check_page_flip(dev, pipe);
3838         return false;
3839 }
3840
3841 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3842 {
3843         struct drm_device *dev = arg;
3844         struct drm_i915_private *dev_priv = dev->dev_private;
3845         u16 iir, new_iir;
3846         u32 pipe_stats[2];
3847         int pipe;
3848         u16 flip_mask =
3849                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3850                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3851
3852         if (!intel_irqs_enabled(dev_priv))
3853                 return IRQ_NONE;
3854
3855         iir = I915_READ16(IIR);
3856         if (iir == 0)
3857                 return IRQ_NONE;
3858
3859         while (iir & ~flip_mask) {
3860                 /* Can't rely on pipestat interrupt bit in iir as it might
3861                  * have been cleared after the pipestat interrupt was received.
3862                  * It doesn't set the bit in iir again, but it still produces
3863                  * interrupts (for non-MSI).
3864                  */
3865                 spin_lock(&dev_priv->irq_lock);
3866                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3867                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3868
3869                 for_each_pipe(dev_priv, pipe) {
3870                         int reg = PIPESTAT(pipe);
3871                         pipe_stats[pipe] = I915_READ(reg);
3872
3873                         /*
3874                          * Clear the PIPE*STAT regs before the IIR
3875                          */
3876                         if (pipe_stats[pipe] & 0x8000ffff)
3877                                 I915_WRITE(reg, pipe_stats[pipe]);
3878                 }
3879                 spin_unlock(&dev_priv->irq_lock);
3880
3881                 I915_WRITE16(IIR, iir & ~flip_mask);
3882                 new_iir = I915_READ16(IIR); /* Flush posted writes */
3883
3884                 if (iir & I915_USER_INTERRUPT)
3885                         notify_ring(&dev_priv->ring[RCS]);
3886
3887                 for_each_pipe(dev_priv, pipe) {
3888                         int plane = pipe;
3889                         if (HAS_FBC(dev))
3890                                 plane = !plane;
3891
3892                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3893                             i8xx_handle_vblank(dev, plane, pipe, iir))
3894                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3895
3896                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3897                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3898
3899                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3900                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3901                                                                     pipe);
3902                 }
3903
3904                 iir = new_iir;
3905         }
3906
3907         return IRQ_HANDLED;
3908 }
3909
3910 static void i8xx_irq_uninstall(struct drm_device * dev)
3911 {
3912         struct drm_i915_private *dev_priv = dev->dev_private;
3913         int pipe;
3914
3915         for_each_pipe(dev_priv, pipe) {
3916                 /* Clear enable bits; then clear status bits */
3917                 I915_WRITE(PIPESTAT(pipe), 0);
3918                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3919         }
3920         I915_WRITE16(IMR, 0xffff);
3921         I915_WRITE16(IER, 0x0);
3922         I915_WRITE16(IIR, I915_READ16(IIR));
3923 }
3924
3925 static void i915_irq_preinstall(struct drm_device * dev)
3926 {
3927         struct drm_i915_private *dev_priv = dev->dev_private;
3928         int pipe;
3929
3930         if (I915_HAS_HOTPLUG(dev)) {
3931                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3932                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3933         }
3934
3935         I915_WRITE16(HWSTAM, 0xeffe);
3936         for_each_pipe(dev_priv, pipe)
3937                 I915_WRITE(PIPESTAT(pipe), 0);
3938         I915_WRITE(IMR, 0xffffffff);
3939         I915_WRITE(IER, 0x0);
3940         POSTING_READ(IER);
3941 }
3942
3943 static int i915_irq_postinstall(struct drm_device *dev)
3944 {
3945         struct drm_i915_private *dev_priv = dev->dev_private;
3946         u32 enable_mask;
3947
3948         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3949
3950         /* Unmask the interrupts that we always want on. */
3951         dev_priv->irq_mask =
3952                 ~(I915_ASLE_INTERRUPT |
3953                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3954                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3955                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3956                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3957
3958         enable_mask =
3959                 I915_ASLE_INTERRUPT |
3960                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3961                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3962                 I915_USER_INTERRUPT;
3963
3964         if (I915_HAS_HOTPLUG(dev)) {
3965                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3966                 POSTING_READ(PORT_HOTPLUG_EN);
3967
3968                 /* Enable in IER... */
3969                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3970                 /* and unmask in IMR */
3971                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3972         }
3973
3974         I915_WRITE(IMR, dev_priv->irq_mask);
3975         I915_WRITE(IER, enable_mask);
3976         POSTING_READ(IER);
3977
3978         i915_enable_asle_pipestat(dev);
3979
3980         /* Interrupt setup is already guaranteed to be single-threaded, this is
3981          * just to make the assert_spin_locked check happy. */
3982         spin_lock_irq(&dev_priv->irq_lock);
3983         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3984         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3985         spin_unlock_irq(&dev_priv->irq_lock);
3986
3987         return 0;
3988 }
3989
3990 /*
3991  * Returns true when a page flip has completed.
3992  */
3993 static bool i915_handle_vblank(struct drm_device *dev,
3994                                int plane, int pipe, u32 iir)
3995 {
3996         struct drm_i915_private *dev_priv = dev->dev_private;
3997         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3998
3999         if (!intel_pipe_handle_vblank(dev, pipe))
4000                 return false;
4001
4002         if ((iir & flip_pending) == 0)
4003                 goto check_page_flip;
4004
4005         /* We detect FlipDone by looking for the change in PendingFlip from '1'
4006          * to '0' on the following vblank, i.e. IIR has the Pendingflip
4007          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4008          * the flip is completed (no longer pending). Since this doesn't raise
4009          * an interrupt per se, we watch for the change at vblank.
4010          */
4011         if (I915_READ(ISR) & flip_pending)
4012                 goto check_page_flip;
4013
4014         intel_prepare_page_flip(dev, plane);
4015         intel_finish_page_flip(dev, pipe);
4016         return true;
4017
4018 check_page_flip:
4019         intel_check_page_flip(dev, pipe);
4020         return false;
4021 }
4022
4023 static irqreturn_t i915_irq_handler(int irq, void *arg)
4024 {
4025         struct drm_device *dev = arg;
4026         struct drm_i915_private *dev_priv = dev->dev_private;
4027         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4028         u32 flip_mask =
4029                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4030                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4031         int pipe, ret = IRQ_NONE;
4032
4033         if (!intel_irqs_enabled(dev_priv))
4034                 return IRQ_NONE;
4035
4036         iir = I915_READ(IIR);
4037         do {
4038                 bool irq_received = (iir & ~flip_mask) != 0;
4039                 bool blc_event = false;
4040
4041                 /* Can't rely on pipestat interrupt bit in iir as it might
4042                  * have been cleared after the pipestat interrupt was received.
4043                  * It doesn't set the bit in iir again, but it still produces
4044                  * interrupts (for non-MSI).
4045                  */
4046                 spin_lock(&dev_priv->irq_lock);
4047                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4048                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4049
4050                 for_each_pipe(dev_priv, pipe) {
4051                         int reg = PIPESTAT(pipe);
4052                         pipe_stats[pipe] = I915_READ(reg);
4053
4054                         /* Clear the PIPE*STAT regs before the IIR */
4055                         if (pipe_stats[pipe] & 0x8000ffff) {
4056                                 I915_WRITE(reg, pipe_stats[pipe]);
4057                                 irq_received = true;
4058                         }
4059                 }
4060                 spin_unlock(&dev_priv->irq_lock);
4061
4062                 if (!irq_received)
4063                         break;
4064
4065                 /* Consume port.  Then clear IIR or we'll miss events */
4066                 if (I915_HAS_HOTPLUG(dev) &&
4067                     iir & I915_DISPLAY_PORT_INTERRUPT)
4068                         i9xx_hpd_irq_handler(dev);
4069
4070                 I915_WRITE(IIR, iir & ~flip_mask);
4071                 new_iir = I915_READ(IIR); /* Flush posted writes */
4072
4073                 if (iir & I915_USER_INTERRUPT)
4074                         notify_ring(&dev_priv->ring[RCS]);
4075
4076                 for_each_pipe(dev_priv, pipe) {
4077                         int plane = pipe;
4078                         if (HAS_FBC(dev))
4079                                 plane = !plane;
4080
4081                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4082                             i915_handle_vblank(dev, plane, pipe, iir))
4083                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4084
4085                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4086                                 blc_event = true;
4087
4088                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4089                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4090
4091                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4092                                 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4093                                                                     pipe);
4094                 }
4095
4096                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4097                         intel_opregion_asle_intr(dev);
4098
4099                 /* With MSI, interrupts are only generated when iir
4100                  * transitions from zero to nonzero.  If another bit got
4101                  * set while we were handling the existing iir bits, then
4102                  * we would never get another interrupt.
4103                  *
4104                  * This is fine on non-MSI as well, as if we hit this path
4105                  * we avoid exiting the interrupt handler only to generate
4106                  * another one.
4107                  *
4108                  * Note that for MSI this could cause a stray interrupt report
4109                  * if an interrupt landed in the time between writing IIR and
4110                  * the posting read.  This should be rare enough to never
4111                  * trigger the 99% of 100,000 interrupts test for disabling
4112                  * stray interrupts.
4113                  */
4114                 ret = IRQ_HANDLED;
4115                 iir = new_iir;
4116         } while (iir & ~flip_mask);
4117
4118         return ret;
4119 }
4120
4121 static void i915_irq_uninstall(struct drm_device * dev)
4122 {
4123         struct drm_i915_private *dev_priv = dev->dev_private;
4124         int pipe;
4125
4126         if (I915_HAS_HOTPLUG(dev)) {
4127                 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4128                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4129         }
4130
4131         I915_WRITE16(HWSTAM, 0xffff);
4132         for_each_pipe(dev_priv, pipe) {
4133                 /* Clear enable bits; then clear status bits */
4134                 I915_WRITE(PIPESTAT(pipe), 0);
4135                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4136         }
4137         I915_WRITE(IMR, 0xffffffff);
4138         I915_WRITE(IER, 0x0);
4139
4140         I915_WRITE(IIR, I915_READ(IIR));
4141 }
4142
4143 static void i965_irq_preinstall(struct drm_device * dev)
4144 {
4145         struct drm_i915_private *dev_priv = dev->dev_private;
4146         int pipe;
4147
4148         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4149         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4150
4151         I915_WRITE(HWSTAM, 0xeffe);
4152         for_each_pipe(dev_priv, pipe)
4153                 I915_WRITE(PIPESTAT(pipe), 0);
4154         I915_WRITE(IMR, 0xffffffff);
4155         I915_WRITE(IER, 0x0);
4156         POSTING_READ(IER);
4157 }
4158
4159 static int i965_irq_postinstall(struct drm_device *dev)
4160 {
4161         struct drm_i915_private *dev_priv = dev->dev_private;
4162         u32 enable_mask;
4163         u32 error_mask;
4164
4165         /* Unmask the interrupts that we always want on. */
4166         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4167                                I915_DISPLAY_PORT_INTERRUPT |
4168                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4169                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4170                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4171                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4172                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4173
4174         enable_mask = ~dev_priv->irq_mask;
4175         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4176                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4177         enable_mask |= I915_USER_INTERRUPT;
4178
4179         if (IS_G4X(dev))
4180                 enable_mask |= I915_BSD_USER_INTERRUPT;
4181
4182         /* Interrupt setup is already guaranteed to be single-threaded, this is
4183          * just to make the assert_spin_locked check happy. */
4184         spin_lock_irq(&dev_priv->irq_lock);
4185         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4186         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4187         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4188         spin_unlock_irq(&dev_priv->irq_lock);
4189
4190         /*
4191          * Enable some error detection, note the instruction error mask
4192          * bit is reserved, so we leave it masked.
4193          */
4194         if (IS_G4X(dev)) {
4195                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4196                                GM45_ERROR_MEM_PRIV |
4197                                GM45_ERROR_CP_PRIV |
4198                                I915_ERROR_MEMORY_REFRESH);
4199         } else {
4200                 error_mask = ~(I915_ERROR_PAGE_TABLE |
4201                                I915_ERROR_MEMORY_REFRESH);
4202         }
4203         I915_WRITE(EMR, error_mask);
4204
4205         I915_WRITE(IMR, dev_priv->irq_mask);
4206         I915_WRITE(IER, enable_mask);
4207         POSTING_READ(IER);
4208
4209         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4210         POSTING_READ(PORT_HOTPLUG_EN);
4211
4212         i915_enable_asle_pipestat(dev);
4213
4214         return 0;
4215 }
4216
4217 static void i915_hpd_irq_setup(struct drm_device *dev)
4218 {
4219         struct drm_i915_private *dev_priv = dev->dev_private;
4220         u32 hotplug_en;
4221
4222         assert_spin_locked(&dev_priv->irq_lock);
4223
4224         /* Note HDMI and DP share hotplug bits */
4225         /* enable bits are the same for all generations */
4226         hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4227         /* Programming the CRT detection parameters tends
4228            to generate a spurious hotplug event about three
4229            seconds later.  So just do it once.
4230         */
4231         if (IS_G4X(dev))
4232                 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4233         hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4234
4235         /* Ignore TV since it's buggy */
4236         i915_hotplug_interrupt_update_locked(dev_priv,
4237                                       (HOTPLUG_INT_EN_MASK
4238                                        | CRT_HOTPLUG_VOLTAGE_COMPARE_MASK),
4239                                       hotplug_en);
4240 }
4241
4242 static irqreturn_t i965_irq_handler(int irq, void *arg)
4243 {
4244         struct drm_device *dev = arg;
4245         struct drm_i915_private *dev_priv = dev->dev_private;
4246         u32 iir, new_iir;
4247         u32 pipe_stats[I915_MAX_PIPES];
4248         int ret = IRQ_NONE, pipe;
4249         u32 flip_mask =
4250                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4251                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4252
4253         if (!intel_irqs_enabled(dev_priv))
4254                 return IRQ_NONE;
4255
4256         iir = I915_READ(IIR);
4257
4258         for (;;) {
4259                 bool irq_received = (iir & ~flip_mask) != 0;
4260                 bool blc_event = false;
4261
4262                 /* Can't rely on pipestat interrupt bit in iir as it might
4263                  * have been cleared after the pipestat interrupt was received.
4264                  * It doesn't set the bit in iir again, but it still produces
4265                  * interrupts (for non-MSI).
4266                  */
4267                 spin_lock(&dev_priv->irq_lock);
4268                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4269                         DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4270
4271                 for_each_pipe(dev_priv, pipe) {
4272                         int reg = PIPESTAT(pipe);
4273                         pipe_stats[pipe] = I915_READ(reg);
4274
4275                         /*
4276                          * Clear the PIPE*STAT regs before the IIR
4277                          */
4278                         if (pipe_stats[pipe] & 0x8000ffff) {
4279                                 I915_WRITE(reg, pipe_stats[pipe]);
4280                                 irq_received = true;
4281                         }
4282                 }
4283                 spin_unlock(&dev_priv->irq_lock);
4284
4285                 if (!irq_received)
4286                         break;
4287
4288                 ret = IRQ_HANDLED;
4289
4290                 /* Consume port.  Then clear IIR or we'll miss events */
4291                 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4292                         i9xx_hpd_irq_handler(dev);
4293
4294                 I915_WRITE(IIR, iir & ~flip_mask);
4295                 new_iir = I915_READ(IIR); /* Flush posted writes */
4296
4297                 if (iir & I915_USER_INTERRUPT)
4298                         notify_ring(&dev_priv->ring[RCS]);
4299                 if (iir & I915_BSD_USER_INTERRUPT)
4300                         notify_ring(&dev_priv->ring[VCS]);
4301
4302                 for_each_pipe(dev_priv, pipe) {
4303                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4304                             i915_handle_vblank(dev, pipe, pipe, iir))
4305                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4306
4307                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4308                                 blc_event = true;
4309
4310                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4311                                 i9xx_pipe_crc_irq_handler(dev, pipe);
4312
4313                         if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4314                                 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4315                 }
4316
4317                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4318                         intel_opregion_asle_intr(dev);
4319
4320                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4321                         gmbus_irq_handler(dev);
4322
4323                 /* With MSI, interrupts are only generated when iir
4324                  * transitions from zero to nonzero.  If another bit got
4325                  * set while we were handling the existing iir bits, then
4326                  * we would never get another interrupt.
4327                  *
4328                  * This is fine on non-MSI as well, as if we hit this path
4329                  * we avoid exiting the interrupt handler only to generate
4330                  * another one.
4331                  *
4332                  * Note that for MSI this could cause a stray interrupt report
4333                  * if an interrupt landed in the time between writing IIR and
4334                  * the posting read.  This should be rare enough to never
4335                  * trigger the 99% of 100,000 interrupts test for disabling
4336                  * stray interrupts.
4337                  */
4338                 iir = new_iir;
4339         }
4340
4341         return ret;
4342 }
4343
4344 static void i965_irq_uninstall(struct drm_device * dev)
4345 {
4346         struct drm_i915_private *dev_priv = dev->dev_private;
4347         int pipe;
4348
4349         if (!dev_priv)
4350                 return;
4351
4352         i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4353         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4354
4355         I915_WRITE(HWSTAM, 0xffffffff);
4356         for_each_pipe(dev_priv, pipe)
4357                 I915_WRITE(PIPESTAT(pipe), 0);
4358         I915_WRITE(IMR, 0xffffffff);
4359         I915_WRITE(IER, 0x0);
4360
4361         for_each_pipe(dev_priv, pipe)
4362                 I915_WRITE(PIPESTAT(pipe),
4363                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4364         I915_WRITE(IIR, I915_READ(IIR));
4365 }
4366
4367 /**
4368  * intel_irq_init - initializes irq support
4369  * @dev_priv: i915 device instance
4370  *
4371  * This function initializes all the irq support including work items, timers
4372  * and all the vtables. It does not setup the interrupt itself though.
4373  */
4374 void intel_irq_init(struct drm_i915_private *dev_priv)
4375 {
4376         struct drm_device *dev = dev_priv->dev;
4377
4378         intel_hpd_init_work(dev_priv);
4379
4380         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4381         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4382
4383         /* Let's track the enabled rps events */
4384         if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4385                 /* WaGsvRC0ResidencyMethod:vlv */
4386                 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4387         else
4388                 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4389
4390         INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4391                           i915_hangcheck_elapsed);
4392
4393         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4394
4395         if (IS_GEN2(dev_priv)) {
4396                 dev->max_vblank_count = 0;
4397                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4398         } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4399                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4400                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4401         } else {
4402                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4403                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4404         }
4405
4406         /*
4407          * Opt out of the vblank disable timer on everything except gen2.
4408          * Gen2 doesn't have a hardware frame counter and so depends on
4409          * vblank interrupts to produce sane vblank seuquence numbers.
4410          */
4411         if (!IS_GEN2(dev_priv))
4412                 dev->vblank_disable_immediate = true;
4413
4414         dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4415         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4416
4417         if (IS_CHERRYVIEW(dev_priv)) {
4418                 dev->driver->irq_handler = cherryview_irq_handler;
4419                 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4420                 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4421                 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4422                 dev->driver->enable_vblank = valleyview_enable_vblank;
4423                 dev->driver->disable_vblank = valleyview_disable_vblank;
4424                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4425         } else if (IS_VALLEYVIEW(dev_priv)) {
4426                 dev->driver->irq_handler = valleyview_irq_handler;
4427                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4428                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4429                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4430                 dev->driver->enable_vblank = valleyview_enable_vblank;
4431                 dev->driver->disable_vblank = valleyview_disable_vblank;
4432                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4433         } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4434                 dev->driver->irq_handler = gen8_irq_handler;
4435                 dev->driver->irq_preinstall = gen8_irq_reset;
4436                 dev->driver->irq_postinstall = gen8_irq_postinstall;
4437                 dev->driver->irq_uninstall = gen8_irq_uninstall;
4438                 dev->driver->enable_vblank = gen8_enable_vblank;
4439                 dev->driver->disable_vblank = gen8_disable_vblank;
4440                 if (IS_BROXTON(dev))
4441                         dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4442                 else if (HAS_PCH_SPT(dev))
4443                         dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4444                 else
4445                         dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4446         } else if (HAS_PCH_SPLIT(dev)) {
4447                 dev->driver->irq_handler = ironlake_irq_handler;
4448                 dev->driver->irq_preinstall = ironlake_irq_reset;
4449                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4450                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4451                 dev->driver->enable_vblank = ironlake_enable_vblank;
4452                 dev->driver->disable_vblank = ironlake_disable_vblank;
4453                 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4454         } else {
4455                 if (INTEL_INFO(dev_priv)->gen == 2) {
4456                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
4457                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
4458                         dev->driver->irq_handler = i8xx_irq_handler;
4459                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
4460                 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4461                         dev->driver->irq_preinstall = i915_irq_preinstall;
4462                         dev->driver->irq_postinstall = i915_irq_postinstall;
4463                         dev->driver->irq_uninstall = i915_irq_uninstall;
4464                         dev->driver->irq_handler = i915_irq_handler;
4465                 } else {
4466                         dev->driver->irq_preinstall = i965_irq_preinstall;
4467                         dev->driver->irq_postinstall = i965_irq_postinstall;
4468                         dev->driver->irq_uninstall = i965_irq_uninstall;
4469                         dev->driver->irq_handler = i965_irq_handler;
4470                 }
4471                 if (I915_HAS_HOTPLUG(dev_priv))
4472                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4473                 dev->driver->enable_vblank = i915_enable_vblank;
4474                 dev->driver->disable_vblank = i915_disable_vblank;
4475         }
4476 }
4477
4478 /**
4479  * intel_irq_install - enables the hardware interrupt
4480  * @dev_priv: i915 device instance
4481  *
4482  * This function enables the hardware interrupt handling, but leaves the hotplug
4483  * handling still disabled. It is called after intel_irq_init().
4484  *
4485  * In the driver load and resume code we need working interrupts in a few places
4486  * but don't want to deal with the hassle of concurrent probe and hotplug
4487  * workers. Hence the split into this two-stage approach.
4488  */
4489 int intel_irq_install(struct drm_i915_private *dev_priv)
4490 {
4491         /*
4492          * We enable some interrupt sources in our postinstall hooks, so mark
4493          * interrupts as enabled _before_ actually enabling them to avoid
4494          * special cases in our ordering checks.
4495          */
4496         dev_priv->pm.irqs_enabled = true;
4497
4498         return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4499 }
4500
4501 /**
4502  * intel_irq_uninstall - finilizes all irq handling
4503  * @dev_priv: i915 device instance
4504  *
4505  * This stops interrupt and hotplug handling and unregisters and frees all
4506  * resources acquired in the init functions.
4507  */
4508 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4509 {
4510         drm_irq_uninstall(dev_priv->dev);
4511         intel_hpd_cancel_work(dev_priv);
4512         dev_priv->pm.irqs_enabled = false;
4513 }
4514
4515 /**
4516  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4517  * @dev_priv: i915 device instance
4518  *
4519  * This function is used to disable interrupts at runtime, both in the runtime
4520  * pm and the system suspend/resume code.
4521  */
4522 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4523 {
4524         dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4525         dev_priv->pm.irqs_enabled = false;
4526         synchronize_irq(dev_priv->dev->irq);
4527 }
4528
4529 /**
4530  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4531  * @dev_priv: i915 device instance
4532  *
4533  * This function is used to enable interrupts at runtime, both in the runtime
4534  * pm and the system suspend/resume code.
4535  */
4536 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4537 {
4538         dev_priv->pm.irqs_enabled = true;
4539         dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4540         dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4541 }