xref: /gfx-drm/usr/src/uts/intel/io/i915/i915_irq.c (revision 19482684)
1 /*
2  * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
3  */
4 
5 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
6  */
7 /*
8  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
9  * Copyright (c) 2009, 2012, Intel Corporation.
10  * All Rights Reserved.
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a
13  * copy of this software and associated documentation files (the
14  * "Software"), to deal in the Software without restriction, including
15  * without limitation the rights to use, copy, modify, merge, publish,
16  * distribute, sub license, and/or sell copies of the Software, and to
17  * permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice (including the
21  * next paragraph) shall be included in all copies or substantial portions
22  * of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
27  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
28  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
29  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
30  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31  *
32  */
33 
34 #include "drmP.h"
35 #include "drm.h"
36 #include "i915_drm.h"
37 #include "i915_drv.h"
38 #include "intel_drv.h"
39 
40 static const u32 hpd_ibx[] = {
41 	[HPD_CRT] = SDE_CRT_HOTPLUG,
42 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
46 };
47 
48 static const u32 hpd_cpt[] = {
49 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54 };
55 
56 static const u32 hpd_mask_i915[] = {
57 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63 };
64 
65 static const u32 hpd_status_gen4[] = {
66 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72 };
73 
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81 };
82 
83 /* For display hotplug interrupt */
84 static void
ironlake_enable_display_irq(drm_i915_private_t * dev_priv,u32 mask)85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86 {
87 	assert_spin_locked(&dev_priv->irq_lock);
88 
89 	if ((dev_priv->irq_mask & mask) != 0) {
90 		dev_priv->irq_mask &= ~mask;
91 		I915_WRITE(DEIMR, dev_priv->irq_mask);
92 		POSTING_READ(DEIMR);
93 	}
94 }
95 
96 static void
ironlake_disable_display_irq(drm_i915_private_t * dev_priv,u32 mask)97 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
98 {
99 	assert_spin_locked(&dev_priv->irq_lock);
100 
101 	if ((dev_priv->irq_mask & mask) != mask) {
102 		dev_priv->irq_mask |= mask;
103 		I915_WRITE(DEIMR, dev_priv->irq_mask);
104 		POSTING_READ(DEIMR);
105 	}
106 }
107 
ivb_can_enable_err_int(struct drm_device * dev)108 static bool ivb_can_enable_err_int(struct drm_device *dev)
109 {
110 	struct drm_i915_private *dev_priv = dev->dev_private;
111 	struct intel_crtc *crtc;
112 	enum pipe pipe;
113 
114 	assert_spin_locked(&dev_priv->irq_lock);
115 
116 	for_each_pipe(pipe) {
117 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
118 
119 		if (crtc->cpu_fifo_underrun_disabled)
120 			return false;
121 	}
122 
123 	return true;
124 }
125 
cpt_can_enable_serr_int(struct drm_device * dev)126 static bool cpt_can_enable_serr_int(struct drm_device *dev)
127 {
128 	struct drm_i915_private *dev_priv = dev->dev_private;
129 	enum pipe pipe;
130 	struct intel_crtc *crtc;
131 
132 	for_each_pipe(pipe) {
133 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
134 
135 		if (crtc->pch_fifo_underrun_disabled)
136 			return false;
137 	}
138 
139 	return true;
140 }
141 
ironlake_set_fifo_underrun_reporting(struct drm_device * dev,enum pipe pipe,bool enable)142 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
143 						 enum pipe pipe, bool enable)
144 {
145 	struct drm_i915_private *dev_priv = dev->dev_private;
146 	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
147 					  DE_PIPEB_FIFO_UNDERRUN;
148 
149 	if (enable)
150 		ironlake_enable_display_irq(dev_priv, bit);
151 	else
152 		ironlake_disable_display_irq(dev_priv, bit);
153 }
154 
ivybridge_set_fifo_underrun_reporting(struct drm_device * dev,bool enable)155 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
156 						  bool enable)
157 {
158 	struct drm_i915_private *dev_priv = dev->dev_private;
159 
160 	if (enable) {
161 		if (!ivb_can_enable_err_int(dev))
162 			return;
163 
164 		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
165 					 ERR_INT_FIFO_UNDERRUN_B |
166 					 ERR_INT_FIFO_UNDERRUN_C);
167 
168 		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
169 	} else {
170 		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
171 	}
172 }
173 
ibx_set_fifo_underrun_reporting(struct intel_crtc * crtc,bool enable)174 static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
175 					    bool enable)
176 {
177 	struct drm_device *dev = crtc->base.dev;
178 	struct drm_i915_private *dev_priv = dev->dev_private;
179 	uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
180 						SDE_TRANSB_FIFO_UNDER;
181 
182 	if (enable)
183 		I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
184 	else
185 		I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
186 
187 	POSTING_READ(SDEIMR);
188 }
189 
cpt_set_fifo_underrun_reporting(struct drm_device * dev,enum transcoder pch_transcoder,bool enable)190 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
191 					    enum transcoder pch_transcoder,
192 					    bool enable)
193 {
194 	struct drm_i915_private *dev_priv = dev->dev_private;
195 
196 	if (enable) {
197 		if (!cpt_can_enable_serr_int(dev))
198 			return;
199 
200 		I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
201 				     SERR_INT_TRANS_B_FIFO_UNDERRUN |
202 				     SERR_INT_TRANS_C_FIFO_UNDERRUN);
203 
204 		I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
205 	} else {
206 		I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
207 	}
208 
209 	POSTING_READ(SDEIMR);
210 }
211 
212 /**
213  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
214  * @dev: drm device
215  * @pipe: pipe
216  * @enable: true if we want to report FIFO underrun errors, false otherwise
217  *
218  * This function makes us disable or enable CPU fifo underruns for a specific
219  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
220  * reporting for one pipe may also disable all the other CPU error interruts for
221  * the other pipes, due to the fact that there's just one interrupt mask/enable
222  * bit for all the pipes.
223  *
224  * Returns the previous state of underrun reporting.
225  */
intel_set_cpu_fifo_underrun_reporting(struct drm_device * dev,enum pipe pipe,bool enable)226 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
227 					   enum pipe pipe, bool enable)
228 {
229 	struct drm_i915_private *dev_priv = dev->dev_private;
230 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
231 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
232 	unsigned long flags;
233 	bool ret;
234 
235 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
236 
237 	ret = !intel_crtc->cpu_fifo_underrun_disabled;
238 
239 	if (enable == ret)
240 		goto done;
241 
242 	intel_crtc->cpu_fifo_underrun_disabled = !enable;
243 
244 	if (IS_GEN5(dev) || IS_GEN6(dev))
245 		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
246 	else if (IS_GEN7(dev))
247 		ivybridge_set_fifo_underrun_reporting(dev, enable);
248 
249 done:
250 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
251 	return ret;
252 }
253 
254 /**
255  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
256  * @dev: drm device
257  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
258  * @enable: true if we want to report FIFO underrun errors, false otherwise
259  *
260  * This function makes us disable or enable PCH fifo underruns for a specific
261  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
262  * underrun reporting for one transcoder may also disable all the other PCH
263  * error interruts for the other transcoders, due to the fact that there's just
264  * one interrupt mask/enable bit for all the transcoders.
265  *
266  * Returns the previous state of underrun reporting.
267  */
intel_set_pch_fifo_underrun_reporting(struct drm_device * dev,enum transcoder pch_transcoder,bool enable)268 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
269 					   enum transcoder pch_transcoder,
270 					   bool enable)
271 {
272 	struct drm_i915_private *dev_priv = dev->dev_private;
273 	enum pipe p;
274 	struct drm_crtc *crtc;
275 	struct intel_crtc *intel_crtc;
276 	unsigned long flags;
277 	bool ret;
278 
279 	if (HAS_PCH_LPT(dev)) {
280 		crtc = NULL;
281 		for_each_pipe(p) {
282 			struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
283 			if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
284 				crtc = c;
285 				break;
286 			}
287 		}
288 		if (!crtc) {
289 			DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
290 			return false;
291 		}
292 	} else {
293 		crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
294 	}
295 	intel_crtc = to_intel_crtc(crtc);
296 
297 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
298 
299 	ret = !intel_crtc->pch_fifo_underrun_disabled;
300 
301 	if (enable == ret)
302 		goto done;
303 
304 	intel_crtc->pch_fifo_underrun_disabled = !enable;
305 
306 	if (HAS_PCH_IBX(dev))
307 		ibx_set_fifo_underrun_reporting(intel_crtc, enable);
308 	else
309 		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
310 
311 done:
312 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
313 	return ret;
314 }
315 
316 
317 void
i915_enable_pipestat(drm_i915_private_t * dev_priv,int pipe,u32 mask)318 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
319 {
320 	u32 reg = PIPESTAT(pipe);
321 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
322 
323 	if ((pipestat & mask) == mask)
324 		return;
325 
326 	/* Enable the interrupt, clear any pending status */
327 	pipestat |= mask | (mask >> 16);
328 	I915_WRITE(reg, pipestat);
329 	POSTING_READ(reg);
330 }
331 
332 void
i915_disable_pipestat(drm_i915_private_t * dev_priv,int pipe,u32 mask)333 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
334 {
335 	u32 reg = PIPESTAT(pipe);
336 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
337 
338 	if ((pipestat & mask) == 0)
339 		return;
340 
341 	pipestat &= ~mask;
342 	I915_WRITE(reg, pipestat);
343 		POSTING_READ(reg);
344 	}
345 
346 /**
347  * i915_pipe_enabled - check if a pipe is enabled
348  * @dev: DRM device
349  * @pipe: pipe to check
350  *
351  * Reading certain registers when the pipe is disabled can hang the chip.
352  * Use this routine to make sure the PLL is running and the pipe is active
353  * before reading such registers if unsure.
354  */
355 static int
i915_pipe_enabled(struct drm_device * dev,int pipe)356 i915_pipe_enabled(struct drm_device *dev, int pipe)
357 {
358 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
359 
360 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
361 		/* Locking is horribly broken here, but whatever. */
362 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
363 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
364 
365 		return intel_crtc->active;
366 	} else {
367 		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
368 	}
369 }
370 
371 /* Called from drm generic code, passed a 'crtc', which
372  * we use as a pipe index
373  */
i915_get_vblank_counter(struct drm_device * dev,int pipe)374 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
375 {
376 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
377 	unsigned long high_frame;
378 	unsigned long low_frame;
379 	u32 high1, high2, low;
380 
381 	if (!i915_pipe_enabled(dev, pipe)) {
382 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
383 				"pipe %c\n", pipe_name(pipe));
384 		return 0;
385 	}
386 
387 	high_frame = PIPEFRAME(pipe);
388 	low_frame = PIPEFRAMEPIXEL(pipe);
389 
390 	/*
391 	 * High & low register fields aren't synchronized, so make sure
392 	 * we get a low value that's stable across two reads of the high
393 	 * register.
394 	 */
395 	do {
396 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
397 		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
398 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
399 	} while (high1 != high2);
400 
401 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
402 	low >>= PIPE_FRAME_LOW_SHIFT;
403 	return (high1 << 8) | low;
404 }
405 
gm45_get_vblank_counter(struct drm_device * dev,int pipe)406 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
407 {
408 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
409 	int reg = PIPE_FRMCOUNT_GM45(pipe);
410 
411 	if (!i915_pipe_enabled(dev, pipe)) {
412 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
413 				 "pipe %c\n", pipe_name(pipe));
414 		return 0;
415 	}
416 
417 	return I915_READ(reg);
418 }
419 
i915_get_crtc_scanoutpos(struct drm_device * dev,int pipe,int * vpos,int * hpos)420 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
421 			     int *vpos, int *hpos)
422 {
423 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
424 	u32 vbl = 0, position = 0;
425 	int vbl_start, vbl_end, htotal, vtotal;
426 	bool in_vbl = true;
427 	int ret = 0;
428 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
429 								      pipe);
430 
431 	if (!i915_pipe_enabled(dev, pipe)) {
432 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
433 				 "pipe %c\n", pipe_name(pipe));
434 		return 0;
435 	}
436 
437 	/* Get vtotal. */
438 	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
439 
440 	if (INTEL_INFO(dev)->gen >= 4) {
441 		/* No obvious pixelcount register. Only query vertical
442 		 * scanout position from Display scan line register.
443 		 */
444 		position = I915_READ(PIPEDSL(pipe));
445 
446 		/* Decode into vertical scanout position. Don't have
447 		 * horizontal scanout position.
448 		 */
449 		*vpos = position & 0x1fff;
450 		*hpos = 0;
451 	} else {
452 		/* Have access to pixelcount since start of frame.
453 		 * We can split this into vertical and horizontal
454 		 * scanout position.
455 		 */
456 		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
457 
458 		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
459 		*vpos = position / htotal;
460 		*hpos = position - (*vpos * htotal);
461 	}
462 
463 	/* Query vblank area. */
464 	vbl = I915_READ(VBLANK(cpu_transcoder));
465 
466 	/* Test position against vblank region. */
467 	vbl_start = vbl & 0x1fff;
468 	vbl_end = (vbl >> 16) & 0x1fff;
469 
470 	if ((*vpos < vbl_start) || (*vpos > vbl_end))
471 		in_vbl = false;
472 
473 	/* Inside "upper part" of vblank area? Apply corrective offset: */
474 	if (in_vbl && (*vpos >= vbl_start))
475 		*vpos = *vpos - vtotal;
476 
477 	/* Readouts valid? */
478 	if (vbl > 0)
479 		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
480 
481 	/* In vblank? */
482 	if (in_vbl)
483 		ret |= DRM_SCANOUTPOS_INVBL;
484 
485 	return ret;
486 }
487 
i915_get_vblank_timestamp(struct drm_device * dev,int pipe,int * max_error,struct timeval * vblank_time,unsigned flags)488 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
489 			      int *max_error,
490 			      struct timeval *vblank_time,
491 			      unsigned flags)
492 {
493 	struct drm_crtc *crtc;
494 
495 	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
496 		DRM_ERROR("Invalid crtc %d\n", pipe);
497 		return -EINVAL;
498 	}
499 
500 	/* Get drm_crtc to timestamp: */
501 	crtc = intel_get_crtc_for_pipe(dev, pipe);
502 	if (crtc == NULL) {
503 		DRM_ERROR("Invalid crtc %d\n", pipe);
504 		return -EINVAL;
505 	}
506 
507 	if (!crtc->enabled) {
508 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
509 		return -EBUSY;
510 	}
511 
512 	/* Helper routine in DRM core does all the work: */
513 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
514 						     vblank_time, flags,
515 						     crtc);
516 }
517 
intel_hpd_irq_event(struct drm_device * dev,struct drm_connector * connector)518 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
519 {
520 	enum drm_connector_status old_status;
521 
522 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
523 	old_status = connector->status;
524 
525 	connector->status = connector->funcs->detect(connector, false);
526 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
527 		      connector->base.id,
528 		      drm_get_connector_name(connector),
529 		      old_status, connector->status);
530 	return (old_status != connector->status);
531 }
532 
533 /*
534  * Handle hotplug events outside the interrupt handler proper.
535  */
536 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
537 
i915_hotplug_work_func(struct work_struct * work)538 static void i915_hotplug_work_func(struct work_struct *work)
539 {
540 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
541 						    hotplug_work);
542 	struct drm_device *dev = dev_priv->dev;
543 	struct drm_mode_config *mode_config = &dev->mode_config;
544 	struct intel_connector *intel_connector;
545 	struct intel_encoder *intel_encoder;
546 	struct drm_connector *connector;
547 	unsigned long irqflags;
548 	bool hpd_disabled = false;
549 	bool changed = false;
550 	u32 hpd_event_bits;
551 
552 	/* HPD irq before everything is fully set up. */
553 	if (!dev_priv->enable_hotplug_processing)
554 		return;
555 
556 	mutex_lock(&mode_config->mutex);
557 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
558 
559 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
560 
561 	hpd_event_bits = dev_priv->hpd_event_bits;
562 	dev_priv->hpd_event_bits = 0;
563 	list_for_each_entry(connector, struct drm_connector, &mode_config->connector_list, head) {
564 		intel_connector = to_intel_connector(connector);
565 		intel_encoder = intel_connector->encoder;
566 		if (intel_encoder->hpd_pin > HPD_NONE &&
567 		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
568 		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
569 			DRM_INFO("HPD interrupt storm detected on connector %s: "
570 				 "switching from hotplug detection to polling\n",
571 				drm_get_connector_name(connector));
572 			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
573 			connector->polled = DRM_CONNECTOR_POLL_CONNECT
574 				| DRM_CONNECTOR_POLL_DISCONNECT;
575 			hpd_disabled = true;
576 		}
577 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
578 			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
579 				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
580 		}
581 	}
582 	 /* if there were no outputs to poll, poll was disabled,
583 	  * therefore make sure it's enabled when disabling HPD on
584 	  * some connectors */
585 	if (hpd_disabled) {
586 		drm_kms_helper_poll_enable(dev);
587 		mod_timer(&dev_priv->hotplug_reenable_timer,
588 			  msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
589 	}
590 
591 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
592 
593 	list_for_each_entry(connector, struct drm_connector, &mode_config->connector_list, head) {
594 		intel_connector = to_intel_connector(connector);
595 		intel_encoder = intel_connector->encoder;
596 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
597 			if (intel_encoder->hot_plug)
598 				intel_encoder->hot_plug(intel_encoder);
599 			if (intel_hpd_irq_event(dev, connector))
600 				changed = true;
601 		}
602 	}
603 	mutex_unlock(&mode_config->mutex);
604 
605 	if (changed)
606 		drm_kms_helper_hotplug_event(dev);
607 }
608 
ironlake_handle_rps_change(struct drm_device * dev)609 static void ironlake_handle_rps_change(struct drm_device *dev)
610 {
611 	drm_i915_private_t *dev_priv = dev->dev_private;
612 	u32 busy_up, busy_down, max_avg, min_avg;
613 	u8 new_delay;
614 	unsigned long flags;
615 
616 	spin_lock_irqsave(&mchdev_lock, flags);
617 
618 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
619 
620 	new_delay = dev_priv->ips.cur_delay;
621 
622 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
623 	busy_up = I915_READ(RCPREVBSYTUPAVG);
624 	busy_down = I915_READ(RCPREVBSYTDNAVG);
625 	max_avg = I915_READ(RCBMAXAVG);
626 	min_avg = I915_READ(RCBMINAVG);
627 
628 	/* Handle RCS change request from hw */
629 	if (busy_up > max_avg) {
630 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
631 			new_delay = dev_priv->ips.cur_delay - 1;
632 		if (new_delay < dev_priv->ips.max_delay)
633 			new_delay = dev_priv->ips.max_delay;
634 	} else if (busy_down < min_avg) {
635 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
636 			new_delay = dev_priv->ips.cur_delay + 1;
637 		if (new_delay > dev_priv->ips.min_delay)
638 			new_delay = dev_priv->ips.min_delay;
639 	}
640 
641 	if (ironlake_set_drps(dev, new_delay))
642 		dev_priv->ips.cur_delay = new_delay;
643 
644 	spin_unlock_irqrestore(&mchdev_lock, flags);
645 
646 	return;
647 }
648 
notify_ring(struct drm_device * dev,struct intel_ring_buffer * ring)649 static void notify_ring(struct drm_device *dev,
650 			struct intel_ring_buffer *ring)
651 {
652 	struct drm_i915_private *dev_priv = dev->dev_private;
653 
654 	if (ring->obj == NULL)
655 		return;
656 
657 	DRM_WAKEUP(&ring->irq_queue);
658 	if (i915_enable_hangcheck && !dev_priv->gpu_hang) {
659 		mod_timer(&dev_priv->gpu_error.hangcheck_timer,
660 			msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
661 	}
662 }
663 
gen6_pm_rps_work(struct work_struct * work)664 static void gen6_pm_rps_work(struct work_struct *work)
665 {
666 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
667 						    rps.work);
668 	u32 pm_iir, pm_imr;
669 	u8 new_delay;
670 
671 	spin_lock_irq(&dev_priv->rps.lock);
672 	pm_iir = dev_priv->rps.pm_iir;
673 	dev_priv->rps.pm_iir = 0;
674 	pm_imr = I915_READ(GEN6_PMIMR);
675 	I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
676 	spin_unlock_irq(&dev_priv->rps.lock);
677 
678 	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
679 		return;
680 
681 	mutex_lock(&dev_priv->rps.hw_lock);
682 
683 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
684 		new_delay = dev_priv->rps.cur_delay + 1;
685 
686 		/*
687 		 * For better performance, jump directly
688 		 * to RPe if we're below it.
689 		 */
690 		if (IS_VALLEYVIEW(dev_priv->dev) &&
691 		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
692 			new_delay = dev_priv->rps.rpe_delay;
693 	} else
694 		new_delay = dev_priv->rps.cur_delay - 1;
695 
696 	/* sysfs frequency interfaces may have snuck in while servicing the
697 	 * interrupt
698 	 */
699 	if (new_delay >= dev_priv->rps.min_delay &&
700 	    new_delay <= dev_priv->rps.max_delay) {
701 		if (IS_VALLEYVIEW(dev_priv->dev))
702 			valleyview_set_rps(dev_priv->dev, new_delay);
703 		else
704 			gen6_set_rps(dev_priv->dev, new_delay);
705 	}
706 
707 	mutex_unlock(&dev_priv->rps.hw_lock);
708 	}
709 
710 
711 /**
712  * ivybridge_parity_work - Workqueue called when a parity error interrupt
713  * occurred.
714  * @work: workqueue struct
715  *
716  * Doesn't actually do anything except notify userspace. As a consequence of
717  * this event, userspace should try to remap the bad rows since statistically
718  * it is likely the same row is more likely to go bad again.
719  */
ivybridge_parity_work(struct work_struct * work)720 static void ivybridge_parity_work(struct work_struct *work)
721 {
722 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
723 						    l3_parity.error_work);
724 	u32 error_status, row, bank, subbank;
725 	uint32_t misccpctl;
726 	unsigned long flags;
727 
728 	/* We must turn off DOP level clock gating to access the L3 registers.
729 	 * In order to prevent a get/put style interface, acquire struct mutex
730 	 * any time we access those registers.
731 	 */
732 	mutex_lock(&dev_priv->dev->struct_mutex);
733 
734 	misccpctl = I915_READ(GEN7_MISCCPCTL);
735 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
736 	POSTING_READ(GEN7_MISCCPCTL);
737 
738 	error_status = I915_READ(GEN7_L3CDERRST1);
739 	row = GEN7_PARITY_ERROR_ROW(error_status);
740 	bank = GEN7_PARITY_ERROR_BANK(error_status);
741 	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
742 
743 	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
744 				    GEN7_L3CDERRST1_ENABLE);
745 	POSTING_READ(GEN7_L3CDERRST1);
746 
747 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
748 
749 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
750 	dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
751 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
752 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
753 
754 	mutex_unlock(&dev_priv->dev->struct_mutex);
755 
756 	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
757 		  row, bank, subbank);
758 }
759 
ivybridge_handle_parity_error(struct drm_device * dev)760 static void ivybridge_handle_parity_error(struct drm_device *dev)
761 {
762 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
763 	unsigned long flags;
764 
765 	if (!HAS_L3_GPU_CACHE(dev))
766 		return;
767 
768 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
769 	dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
770 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
771 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
772 
773 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
774 }
775 
snb_gt_irq_handler(struct drm_device * dev,struct drm_i915_private * dev_priv,u32 gt_iir)776 static void snb_gt_irq_handler(struct drm_device *dev,
777 			       struct drm_i915_private *dev_priv,
778 			       u32 gt_iir)
779 {
780 
781 	if (gt_iir &
782 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
783 		notify_ring(dev, &dev_priv->ring[RCS]);
784 	if (gt_iir & GT_BSD_USER_INTERRUPT)
785 		notify_ring(dev, &dev_priv->ring[VCS]);
786 	if (gt_iir & GT_BLT_USER_INTERRUPT)
787 		notify_ring(dev, &dev_priv->ring[BCS]);
788 
789 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
790 		      GT_BSD_CS_ERROR_INTERRUPT |
791 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
792 		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
793 		i915_handle_error(dev, false);
794 	}
795 
796 	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
797 		ivybridge_handle_parity_error(dev);
798 }
799 
gen6_queue_rps_work(struct drm_i915_private * dev_priv,u32 pm_iir)800 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
801 				u32 pm_iir)
802 {
803 	unsigned long flags;
804 
805 	/*
806 	 * IIR bits should never already be set because IMR should
807 	 * prevent an interrupt from being shown in IIR. The warning
808 	 * displays a case where we've unsafely cleared
809 	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
810 	 * type is not a problem, it displays a problem in the logic.
811 	 *
812 	 * The mask bit in IMR is cleared by dev_priv->rps.work.
813 	 */
814 
815 	spin_lock_irqsave(&dev_priv->rps.lock, flags);
816 	dev_priv->rps.pm_iir |= pm_iir;
817 	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
818 	POSTING_READ(GEN6_PMIMR);
819 	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
820 
821 	queue_work(dev_priv->wq, &dev_priv->rps.work);
822 }
823 
824 #define HPD_STORM_DETECT_PERIOD 1000
825 #define HPD_STORM_THRESHOLD 5
826 
intel_hpd_irq_handler(struct drm_device * dev,u32 hotplug_trigger,const u32 * hpd)827 static inline void intel_hpd_irq_handler(struct drm_device *dev,
828 					 u32 hotplug_trigger,
829 					 const u32 *hpd)
830 {
831 	drm_i915_private_t *dev_priv = dev->dev_private;
832 	int i;
833 	bool storm_detected = false;
834 
835 	if (!hotplug_trigger)
836 		return;
837 
838 	spin_lock(&dev_priv->irq_lock);
839 	for (i = 1; i < HPD_NUM_PINS; i++) {
840 
841 		if (!(hpd[i] & hotplug_trigger) ||
842 		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
843 			continue;
844 
845 		dev_priv->hpd_event_bits |= (1 << i);
846 		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
847 				   dev_priv->hpd_stats[i].hpd_last_jiffies
848 				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
849 			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
850 			dev_priv->hpd_stats[i].hpd_cnt = 0;
851 		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
852 			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
853 			dev_priv->hpd_event_bits &= ~(1 << i);
854 			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
855 			storm_detected = true;
856 		} else {
857 			dev_priv->hpd_stats[i].hpd_cnt++;
858 		}
859 	}
860 
861 	if (storm_detected)
862 		dev_priv->display.hpd_irq_setup(dev);
863 	spin_unlock(&dev_priv->irq_lock);
864 
865 	queue_work(dev_priv->wq,
866 		   &dev_priv->hotplug_work);
867 }
868 
gmbus_irq_handler(struct drm_device * dev)869 static void gmbus_irq_handler(struct drm_device *dev)
870 {
871 	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
872 
873 	wake_up_all(&dev_priv->gmbus_wait_queue);
874 }
875 
dp_aux_irq_handler(struct drm_device * dev)876 static void dp_aux_irq_handler(struct drm_device *dev)
877 {
878 	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
879 
880 	wake_up_all(&dev_priv->gmbus_wait_queue);
881 }
882 
883 /* Unlike gen6_queue_rps_work() from which this function is originally derived,
884  * we must be able to deal with other PM interrupts. This is complicated because
885  * of the way in which we use the masks to defer the RPS work (which for
886  * posterity is necessary because of forcewake).
887  */
hsw_pm_irq_handler(struct drm_i915_private * dev_priv,u32 pm_iir)888 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
889 			       u32 pm_iir)
890 {
891 	unsigned long flags;
892 
893 	spin_lock_irqsave(&dev_priv->rps.lock, flags);
894 	dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
895 	if (dev_priv->rps.pm_iir) {
896 		I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
897 		/* never want to mask useful interrupts. (also posting read) */
898 		WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
899 		/* TODO: if queue_work is slow, move it out of the spinlock */
900 		queue_work(dev_priv->wq, &dev_priv->rps.work);
901 	}
902 	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
903 
904 	if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
905 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
906 			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
907 
908 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
909 			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
910 			i915_handle_error(dev_priv->dev, false);
911 		}
912 	}
913 }
914 
valleyview_irq_handler(DRM_IRQ_ARGS)915 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
916 {
917 	/* LINTED */
918 	struct drm_device *dev = (struct drm_device *) arg;
919 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
920 	u32 iir, gt_iir, pm_iir;
921 	irqreturn_t ret = IRQ_NONE;
922 	unsigned long irqflags;
923 	int pipe;
924 	u32 pipe_stats[I915_MAX_PIPES] = { 0 };
925 
926 	atomic_inc(&dev_priv->irq_received);
927 
928 	while (true) {
929 		iir = I915_READ(VLV_IIR);
930 		gt_iir = I915_READ(GTIIR);
931 		pm_iir = I915_READ(GEN6_PMIIR);
932 
933 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
934 			goto out;
935 
936 		ret = IRQ_HANDLED;
937 
938 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
939 
940 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
941 		for_each_pipe(pipe) {
942 			int reg = PIPESTAT(pipe);
943 			pipe_stats[pipe] = I915_READ(reg);
944 
945 			/*
946 			 * Clear the PIPE*STAT regs before the IIR
947 			 */
948 			if (pipe_stats[pipe] & 0x8000ffff) {
949 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
950 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
951 							 pipe_name(pipe));
952 				I915_WRITE(reg, pipe_stats[pipe]);
953 			}
954 		}
955 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
956 
957 		for_each_pipe(pipe) {
958 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
959 				drm_handle_vblank(dev, pipe);
960 
961 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
962 				intel_prepare_page_flip(dev, pipe);
963 				intel_finish_page_flip(dev, pipe);
964 			}
965 		}
966 
967 		/* Consume port.  Then clear IIR or we'll miss events */
968 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
969 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
970 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
971 
972 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
973 					 hotplug_status);
974 
975 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
976 
977 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
978 			I915_READ(PORT_HOTPLUG_STAT);
979 		}
980 
981 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
982 			gmbus_irq_handler(dev);
983 
984 		if (pm_iir & GEN6_PM_RPS_EVENTS)
985 			gen6_queue_rps_work(dev_priv, pm_iir);
986 
987 		I915_WRITE(GTIIR, gt_iir);
988 		I915_WRITE(GEN6_PMIIR, pm_iir);
989 		I915_WRITE(VLV_IIR, iir);
990 	}
991 
992 out:
993 	return ret;
994 }
995 
ibx_irq_handler(struct drm_device * dev,u32 pch_iir)996 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
997 {
998 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
999 	int pipe;
1000 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1001 
1002 	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1003 
1004 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1005 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1006 			       SDE_AUDIO_POWER_SHIFT);
1007 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1008 				 port_name(port));
1009 	}
1010 
1011 	if (pch_iir & SDE_AUX_MASK)
1012 		dp_aux_irq_handler(dev);
1013 
1014 	if (pch_iir & SDE_GMBUS)
1015 		gmbus_irq_handler(dev);
1016 
1017 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1018 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1019 
1020 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1021 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1022 
1023 	if (pch_iir & SDE_POISON)
1024 		DRM_ERROR("PCH poison interrupt\n");
1025 
1026 	if (pch_iir & SDE_FDI_MASK)
1027 		for_each_pipe(pipe)
1028 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1029 					 pipe_name(pipe),
1030 					 I915_READ(FDI_RX_IIR(pipe)));
1031 
1032 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1033 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1034 
1035 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1036 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1037 
1038 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1039 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1040 							  false))
1041 			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1042 
1043 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1044 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1045 							  false))
1046 			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1047 }
1048 
ivb_err_int_handler(struct drm_device * dev)1049 static void ivb_err_int_handler(struct drm_device *dev)
1050 {
1051 	struct drm_i915_private *dev_priv = dev->dev_private;
1052 	u32 err_int = I915_READ(GEN7_ERR_INT);
1053 
1054 	if (err_int & ERR_INT_POISON)
1055 		DRM_ERROR("Poison interrupt\n");
1056 
1057 	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1058 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1059 			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1060 
1061 	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1062 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1063 			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1064 
1065 	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1066 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1067 			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1068 
1069 	I915_WRITE(GEN7_ERR_INT, err_int);
1070 }
1071 
cpt_serr_int_handler(struct drm_device * dev)1072 static void cpt_serr_int_handler(struct drm_device *dev)
1073 {
1074 	struct drm_i915_private *dev_priv = dev->dev_private;
1075 	u32 serr_int = I915_READ(SERR_INT);
1076 
1077 	if (serr_int & SERR_INT_POISON)
1078 		DRM_ERROR("PCH poison interrupt\n");
1079 
1080 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1081 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1082 							  false))
1083 			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1084 
1085 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1086 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1087 							  false))
1088 			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1089 
1090 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1091 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1092 							  false))
1093 			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1094 
1095 	I915_WRITE(SERR_INT, serr_int);
1096 }
1097 
cpt_irq_handler(struct drm_device * dev,u32 pch_iir)1098 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1099 {
1100 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1101 	int pipe;
1102 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1103 
1104 	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1105 
1106 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1107 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1108 			       SDE_AUDIO_POWER_SHIFT_CPT);
1109 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1110 				 port_name(port));
1111 	}
1112 
1113 	if (pch_iir & SDE_AUX_MASK_CPT)
1114 		dp_aux_irq_handler(dev);
1115 
1116 	if (pch_iir & SDE_GMBUS_CPT)
1117 		gmbus_irq_handler(dev);
1118 
1119 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1120 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1121 
1122 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1123 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1124 
1125 	if (pch_iir & SDE_FDI_MASK_CPT)
1126 		for_each_pipe(pipe)
1127 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1128 					 pipe_name(pipe),
1129 					 I915_READ(FDI_RX_IIR(pipe)));
1130 
1131 	if (pch_iir & SDE_ERROR_CPT)
1132 		cpt_serr_int_handler(dev);
1133 }
1134 
ivybridge_irq_handler(DRM_IRQ_ARGS)1135 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
1136 {
1137 	/* LINTED */
1138 	struct drm_device *dev = (struct drm_device *) arg;
1139 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1140 	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1141 	irqreturn_t ret = IRQ_NONE;
1142 	int i;
1143 
1144 	atomic_inc(&dev_priv->irq_received);
1145 
1146 	/* We get interrupts on unclaimed registers, so check for this before we
1147 	 * do any I915_{READ,WRITE}. */
1148 	if (IS_HASWELL(dev) &&
1149 	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1150 		DRM_ERROR("Unclaimed register before interrupt\n");
1151 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1152 	}
1153 
1154 	/* disable master interrupt before clearing iir  */
1155 	de_ier = I915_READ(DEIER);
1156 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1157 
1158 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1159 	 * interrupts will will be stored on its back queue, and then we'll be
1160 	 * able to process them after we restore SDEIER (as soon as we restore
1161 	 * it, we'll get an interrupt if SDEIIR still has something to process
1162 	 * due to its back queue). */
1163 	if (!HAS_PCH_NOP(dev)) {
1164 		sde_ier = I915_READ(SDEIER);
1165 		I915_WRITE(SDEIER, 0);
1166 		POSTING_READ(SDEIER);
1167 	} else {
1168 		sde_ier = 0; /* Fix GCC "used unitialized" warning */
1169 	}
1170 
1171 	/* On Haswell, also mask ERR_INT because we don't want to risk
1172 	 * generating "unclaimed register" interrupts from inside the interrupt
1173 	 * handler. */
1174 	if (IS_HASWELL(dev)) {
1175 		spin_lock(&dev_priv->irq_lock);
1176 		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1177 		spin_unlock(&dev_priv->irq_lock);
1178 	}
1179 
1180 	gt_iir = I915_READ(GTIIR);
1181 	if (gt_iir) {
1182 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1183 		I915_WRITE(GTIIR, gt_iir);
1184 		ret = IRQ_HANDLED;
1185 	}
1186 
1187 	de_iir = I915_READ(DEIIR);
1188 	if (de_iir) {
1189 		if (de_iir & DE_ERR_INT_IVB)
1190 			ivb_err_int_handler(dev);
1191 
1192 		if (de_iir & DE_AUX_CHANNEL_A_IVB)
1193 			dp_aux_irq_handler(dev);
1194 
1195 
1196 		for (i = 0; i < 3; i++) {
1197 			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1198 				drm_handle_vblank(dev, i);
1199 			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1200 				intel_prepare_page_flip(dev, i);
1201 				intel_finish_page_flip_plane(dev, i);
1202 			}
1203 		}
1204 
1205 		/* check event from PCH */
1206 		if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1207 			u32 pch_iir = I915_READ(SDEIIR);
1208 
1209 			cpt_irq_handler(dev, pch_iir);
1210 
1211 			/* clear PCH hotplug event before clear CPU irq */
1212 			I915_WRITE(SDEIIR, pch_iir);
1213 		}
1214 
1215 		I915_WRITE(DEIIR, de_iir);
1216 		ret = IRQ_HANDLED;
1217 	}
1218 
1219 	pm_iir = I915_READ(GEN6_PMIIR);
1220 	if (pm_iir) {
1221 		if (IS_HASWELL(dev))
1222 			hsw_pm_irq_handler(dev_priv, pm_iir);
1223 		else if (pm_iir & GEN6_PM_RPS_EVENTS)
1224 			gen6_queue_rps_work(dev_priv, pm_iir);
1225 		I915_WRITE(GEN6_PMIIR, pm_iir);
1226 		ret = IRQ_HANDLED;
1227 	}
1228 
1229 	if (IS_HASWELL(dev)) {
1230 		spin_lock(&dev_priv->irq_lock);
1231 		if (ivb_can_enable_err_int(dev))
1232 			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1233 		spin_unlock(&dev_priv->irq_lock);
1234 	}
1235 
1236 	I915_WRITE(DEIER, de_ier);
1237 	POSTING_READ(DEIER);
1238 	if (!HAS_PCH_NOP(dev)) {
1239 		I915_WRITE(SDEIER, sde_ier);
1240 		POSTING_READ(SDEIER);
1241 	}
1242 
1243 	return ret;
1244 }
1245 
ilk_gt_irq_handler(struct drm_device * dev,struct drm_i915_private * dev_priv,u32 gt_iir)1246 static void ilk_gt_irq_handler(struct drm_device *dev,
1247 			       struct drm_i915_private *dev_priv,
1248 			       u32 gt_iir)
1249 {
1250 	if (gt_iir &
1251 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1252 		notify_ring(dev, &dev_priv->ring[RCS]);
1253 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1254 		notify_ring(dev, &dev_priv->ring[VCS]);
1255 }
1256 
ironlake_irq_handler(DRM_IRQ_ARGS)1257 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
1258 {
1259 	struct drm_device *dev = (struct drm_device *)(uintptr_t) arg;
1260 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1261 	int ret = IRQ_NONE;
1262 	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1263 
1264 	atomic_inc(&dev_priv->irq_received);
1265 
1266 	/* disable master interrupt before clearing iir  */
1267 	de_ier = I915_READ(DEIER);
1268 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1269 	POSTING_READ(DEIER);
1270 
1271 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1272 	 * interrupts will will be stored on its back queue, and then we'll be
1273 	 * able to process them after we restore SDEIER (as soon as we restore
1274 	 * it, we'll get an interrupt if SDEIIR still has something to process
1275 	 * due to its back queue). */
1276 	sde_ier = I915_READ(SDEIER);
1277 	I915_WRITE(SDEIER, 0);
1278 	POSTING_READ(SDEIER);
1279 
1280 	de_iir = I915_READ(DEIIR);
1281 	gt_iir = I915_READ(GTIIR);
1282 	pm_iir = I915_READ(GEN6_PMIIR);
1283 
1284 	if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
1285 		goto done;
1286 
1287 	ret = IRQ_HANDLED;
1288 
1289 	if (IS_GEN5(dev))
1290 		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1291 	else
1292 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1293 
1294 	if (de_iir & DE_AUX_CHANNEL_A)
1295 		dp_aux_irq_handler(dev);
1296 
1297 
1298 	if (de_iir & DE_PIPEA_VBLANK)
1299 		drm_handle_vblank(dev, 0);
1300 
1301 	if (de_iir & DE_PIPEB_VBLANK)
1302 		drm_handle_vblank(dev, 1);
1303 
1304 	if (de_iir & DE_POISON)
1305 		DRM_ERROR("Poison interrupt\n");
1306 
1307 	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1308 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1309 			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1310 
1311 	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1312 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1313 			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1314 
1315 	if (de_iir & DE_PLANEA_FLIP_DONE) {
1316 		intel_prepare_page_flip(dev, 0);
1317 		intel_finish_page_flip_plane(dev, 0);
1318 	}
1319 
1320 	if (de_iir & DE_PLANEB_FLIP_DONE) {
1321 		intel_prepare_page_flip(dev, 1);
1322 		intel_finish_page_flip_plane(dev, 1);
1323 	}
1324 
1325 	/* check event from PCH */
1326 	if (de_iir & DE_PCH_EVENT) {
1327 		u32 pch_iir = I915_READ(SDEIIR);
1328 
1329 		if (HAS_PCH_CPT(dev))
1330 			cpt_irq_handler(dev, pch_iir);
1331 		else
1332 			ibx_irq_handler(dev, pch_iir);
1333 
1334 		/* should clear PCH hotplug event before clear CPU irq */
1335 		I915_WRITE(SDEIIR, pch_iir);
1336 	}
1337 
1338 	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
1339 		ironlake_handle_rps_change(dev);
1340 
1341 	if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1342 		gen6_queue_rps_work(dev_priv, pm_iir);
1343 
1344 	I915_WRITE(GTIIR, gt_iir);
1345 	I915_WRITE(DEIIR, de_iir);
1346 	I915_WRITE(GEN6_PMIIR, pm_iir);
1347 
1348 done:
1349 	I915_WRITE(DEIER, de_ier);
1350 	POSTING_READ(DEIER);
1351 	I915_WRITE(SDEIER, sde_ier);
1352 	POSTING_READ(SDEIER);
1353 
1354 	return ret;
1355 }
1356 
1357 /**
1358  * i915_error_work_func - do process context error handling work
1359  * @work: work struct
1360  *
1361  * Fire an error uevent so userspace can see that a hang or error
1362  * was detected.
1363  */
i915_error_work_func(struct work_struct * work)1364 static void i915_error_work_func(struct work_struct *work)
1365 {
1366 	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1367 						    work);
1368 	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1369 						    gpu_error);
1370 	struct drm_device *dev = dev_priv->dev;
1371 	struct intel_ring_buffer *ring;
1372 	/* LINTED */
1373 	char *error_event[] = { "ERROR=1", NULL };
1374 	/* LINTED */
1375 	char *reset_event[] = { "RESET=1", NULL };
1376 	/* LINTED */
1377 	char *reset_done_event[] = { "ERROR=0", NULL };
1378 	int i, ret;
1379 
1380 	DRM_DEBUG_DRIVER("generating error event\n");
1381 	/* OSOL_i915: kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
1382 
1383 	/*
1384 	 * Note that there's only one work item which does gpu resets, so we
1385 	 * need not worry about concurrent gpu resets potentially incrementing
1386 	 * error->reset_counter twice. We only need to take care of another
1387 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
1388 	 * quick check for that is good enough: schedule_work ensures the
1389 	 * correct ordering between hang detection and this work item, and since
1390 	 * the reset in-progress bit is only ever set by code outside of this
1391 	 * work we don't need to worry about any other races.
1392 	 */
1393 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1394 		DRM_DEBUG_DRIVER("resetting chip\n");
1395 		/* OSOL_i915: kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
1396 		ret = i915_reset(dev);
1397 
1398 		if (ret == 0) {
1399 			/*
1400 			 * After all the gem state is reset, increment the reset
1401 			 * counter and wake up everyone waiting for the reset to
1402 			 * complete.
1403 			 *
1404 			 * Since unlock operations are a one-sided barrier only,
1405 			 * we need to insert a barrier here to order any seqno
1406 			 * updates before
1407 			 * the counter increment.
1408 			 */
1409 			atomic_inc(&dev_priv->gpu_error.reset_counter);
1410 			if (gpu_dump > 0) {
1411 				for_each_ring(ring, dev_priv, i)
1412 					ring_dump(dev, ring);
1413 				register_dump(dev);
1414 				gtt_dump(dev);
1415 			}
1416 		} else {
1417 			atomic_set(&error->reset_counter, I915_WEDGED);
1418 		}
1419 
1420 		for_each_ring(ring, dev_priv, i)
1421 			wake_up_all(&ring->irq_queue);
1422 
1423 		wake_up_all(&dev_priv->gpu_error.reset_queue);
1424 		DRM_INFO("resetting done");
1425 	}
1426 }
1427 
1428 /* NB: please notice the memset */
i915_get_extra_instdone(struct drm_device * dev,uint32_t * instdone)1429 static void i915_get_extra_instdone(struct drm_device *dev,
1430 				    uint32_t *instdone)
1431 {
1432 	struct drm_i915_private *dev_priv = dev->dev_private;
1433 	(void) memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1434 
1435 	switch(INTEL_INFO(dev)->gen) {
1436 	case 2:
1437 	case 3:
1438 		instdone[0] = I915_READ(INSTDONE);
1439 		break;
1440 	case 4:
1441 	case 5:
1442 	case 6:
1443 		instdone[0] = I915_READ(INSTDONE_I965);
1444 		instdone[1] = I915_READ(INSTDONE1);
1445 		break;
1446 	default:
1447 		DRM_INFO("Unsupported platform\n");
1448 		/* FALLTHROUGH */
1449 	case 7:
1450 		instdone[0] = I915_READ(GEN7_INSTDONE_1);
1451 		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1452 		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1453 		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1454 		break;
1455 	}
1456 }
1457 
1458 #ifdef CONFIG_DEBUG_FS
1459 static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private * dev_priv,struct drm_i915_gem_object * src,const int num_pages)1460 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1461 			       struct drm_i915_gem_object *src,
1462 			       const int num_pages)
1463 {
1464 	struct drm_i915_error_object *dst;
1465 	int i;
1466 	u32 reloc_offset;
1467 
1468 	if (src == NULL || src->pages == NULL)
1469 		return NULL;
1470 
1471 	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1472 	if (dst == NULL)
1473 		return NULL;
1474 
1475 	reloc_offset = src->gtt_offset;
1476 	for (i = 0; i < num_pages; i++) {
1477 		unsigned long flags;
1478 		void *d;
1479 
1480 		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1481 		if (d == NULL)
1482 			goto unwind;
1483 
1484 		local_irq_save(flags);
1485 		if (reloc_offset < dev_priv->gtt.mappable_end &&
1486 		    src->has_global_gtt_mapping) {
1487 			void __iomem *s;
1488 
1489 			/* Simply ignore tiling or any overlapping fence.
1490 			 * It's part of the error state, and this hopefully
1491 			 * captures what the GPU read.
1492 			 */
1493 
1494 			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1495 						     reloc_offset);
1496 			memcpy_fromio(d, s, PAGE_SIZE);
1497 			io_mapping_unmap_atomic(s);
1498 		} else if (src->stolen) {
1499 			unsigned long offset;
1500 
1501 			offset = dev_priv->mm.stolen_base;
1502 			offset += src->stolen->start;
1503 			offset += i << PAGE_SHIFT;
1504 
1505 			memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1506 		} else {
1507 			struct page *page;
1508 			void *s;
1509 
1510 			page = i915_gem_object_get_page(src, i);
1511 
1512 			drm_clflush_pages(&page, 1);
1513 
1514 			s = kmap_atomic(page);
1515 			memcpy(d, s, PAGE_SIZE);
1516 			kunmap_atomic(s);
1517 
1518 			drm_clflush_pages(&page, 1);
1519 		}
1520 		local_irq_restore(flags);
1521 
1522 		dst->pages[i] = d;
1523 
1524 		reloc_offset += PAGE_SIZE;
1525 	}
1526 	dst->page_count = num_pages;
1527 	dst->gtt_offset = src->gtt_offset;
1528 
1529 	return dst;
1530 
1531 unwind:
1532 	while (i--)
1533 		kfree(dst->pages[i]);
1534 	kfree(dst);
1535 	return NULL;
1536 }
1537 #define i915_error_object_create(dev_priv, src) \
1538 	i915_error_object_create_sized((dev_priv), (src), \
1539 				       (src)->base.size>>PAGE_SHIFT)
1540 
1541 static void
i915_error_object_free(struct drm_i915_error_object * obj)1542 i915_error_object_free(struct drm_i915_error_object *obj)
1543 {
1544 	int page;
1545 
1546 	if (obj == NULL)
1547 		return;
1548 
1549 	for (page = 0; page < obj->page_count; page++)
1550 		kfree(obj->pages[page]);
1551 
1552 	kfree(obj);
1553 }
1554 
1555 void
i915_error_state_free(struct kref * error_ref)1556 i915_error_state_free(struct kref *error_ref)
1557 {
1558 	struct drm_i915_error_state *error = container_of(error_ref,
1559 							  typeof(*error), ref);
1560 	int i;
1561 
1562 	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1563 		i915_error_object_free(error->ring[i].batchbuffer);
1564 		i915_error_object_free(error->ring[i].ringbuffer);
1565 		i915_error_object_free(error->ring[i].ctx);
1566 		kfree(error->ring[i].requests);
1567 	}
1568 
1569 	kfree(error->active_bo);
1570 	kfree(error->overlay);
1571 	kfree(error->display);
1572 	kfree(error);
1573 }
capture_bo(struct drm_i915_error_buffer * err,struct drm_i915_gem_object * obj)1574 static void capture_bo(struct drm_i915_error_buffer *err,
1575 		       struct drm_i915_gem_object *obj)
1576 {
1577 	err->size = obj->base.size;
1578 	err->name = obj->base.name;
1579 	err->rseqno = obj->last_read_seqno;
1580 	err->wseqno = obj->last_write_seqno;
1581 	err->gtt_offset = obj->gtt_offset;
1582 	err->read_domains = obj->base.read_domains;
1583 	err->write_domain = obj->base.write_domain;
1584 	err->fence_reg = obj->fence_reg;
1585 	err->pinned = 0;
1586 	if (obj->pin_count > 0)
1587 		err->pinned = 1;
1588 	if (obj->user_pin_count > 0)
1589 		err->pinned = -1;
1590 	err->tiling = obj->tiling_mode;
1591 	err->dirty = obj->dirty;
1592 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
1593 	err->ring = obj->ring ? obj->ring->id : -1;
1594 	err->cache_level = obj->cache_level;
1595 }
1596 
capture_active_bo(struct drm_i915_error_buffer * err,int count,struct list_head * head)1597 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1598 			     int count, struct list_head *head)
1599 {
1600 	struct drm_i915_gem_object *obj;
1601 	int i = 0;
1602 
1603 	list_for_each_entry(obj, head, mm_list) {
1604 		capture_bo(err++, obj);
1605 		if (++i == count)
1606 			break;
1607 	}
1608 
1609 	return i;
1610 }
1611 
capture_pinned_bo(struct drm_i915_error_buffer * err,int count,struct list_head * head)1612 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1613 			     int count, struct list_head *head)
1614 {
1615 	struct drm_i915_gem_object *obj;
1616 	int i = 0;
1617 
1618 	list_for_each_entry(obj, head, global_list) {
1619 		if (obj->pin_count == 0)
1620 			continue;
1621 
1622 		capture_bo(err++, obj);
1623 		if (++i == count)
1624 			break;
1625 	}
1626 
1627 	return i;
1628 }
1629 
i915_gem_record_fences(struct drm_device * dev,struct drm_i915_error_state * error)1630 static void i915_gem_record_fences(struct drm_device *dev,
1631 				   struct drm_i915_error_state *error)
1632 {
1633 	struct drm_i915_private *dev_priv = dev->dev_private;
1634 	int i;
1635 
1636 	/* Fences */
1637 	switch (INTEL_INFO(dev)->gen) {
1638 	case 7:
1639 	case 6:
1640 		for (i = 0; i < dev_priv->num_fence_regs; i++)
1641 			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1642 		break;
1643 	case 5:
1644 	case 4:
1645 		for (i = 0; i < 16; i++)
1646 			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1647 		break;
1648 	case 3:
1649 		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1650 			for (i = 0; i < 8; i++)
1651 				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1652 	case 2:
1653 		for (i = 0; i < 8; i++)
1654 			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1655 		break;
1656 
1657 	default:
1658 		BUG();
1659 	}
1660 }
1661 
1662 static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private * dev_priv,struct intel_ring_buffer * ring)1663 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1664 			     struct intel_ring_buffer *ring)
1665 {
1666 	struct drm_i915_gem_object *obj;
1667 	u32 seqno;
1668 
1669 	if (!ring->get_seqno)
1670 		return NULL;
1671 
1672 	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1673 		u32 acthd = I915_READ(ACTHD);
1674 
1675 		if (WARN_ON(ring->id != RCS))
1676 			return NULL;
1677 
1678 		obj = ring->private;
1679 		if (acthd >= obj->gtt_offset &&
1680 		    acthd < obj->gtt_offset + obj->base.size)
1681 			return i915_error_object_create(dev_priv, obj);
1682 	}
1683 
1684 	seqno = ring->get_seqno(ring, false);
1685 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1686 		if (obj->ring != ring)
1687 			continue;
1688 
1689 		if (i915_seqno_passed(seqno, obj->last_read_seqno))
1690 			continue;
1691 
1692 		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1693 			continue;
1694 
1695 		/* We need to copy these to an anonymous buffer as the simplest
1696 		 * method to avoid being overwritten by userspace.
1697 		 */
1698 		return i915_error_object_create(dev_priv, obj);
1699 	}
1700 
1701 	return NULL;
1702 }
1703 
i915_record_ring_state(struct drm_device * dev,struct drm_i915_error_state * error,struct intel_ring_buffer * ring)1704 static void i915_record_ring_state(struct drm_device *dev,
1705 				   struct drm_i915_error_state *error,
1706 				   struct intel_ring_buffer *ring)
1707 {
1708 	struct drm_i915_private *dev_priv = dev->dev_private;
1709 
1710 	if (INTEL_INFO(dev)->gen >= 6) {
1711 		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1712 		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1713 		error->semaphore_mboxes[ring->id][0]
1714 			= I915_READ(RING_SYNC_0(ring->mmio_base));
1715 		error->semaphore_mboxes[ring->id][1]
1716 			= I915_READ(RING_SYNC_1(ring->mmio_base));
1717 		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1718 		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1719 	}
1720 
1721 	if (INTEL_INFO(dev)->gen >= 4) {
1722 		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1723 		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1724 		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1725 		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1726 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1727 		if (ring->id == RCS)
1728 			error->bbaddr = I915_READ64(BB_ADDR);
1729 	} else {
1730 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1731 		error->ipeir[ring->id] = I915_READ(IPEIR);
1732 		error->ipehr[ring->id] = I915_READ(IPEHR);
1733 		error->instdone[ring->id] = I915_READ(INSTDONE);
1734 	}
1735 
1736 	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1737 	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1738 	error->seqno[ring->id] = ring->get_seqno(ring, false);
1739 	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1740 	error->head[ring->id] = I915_READ_HEAD(ring);
1741 	error->tail[ring->id] = I915_READ_TAIL(ring);
1742 	error->ctl[ring->id] = I915_READ_CTL(ring);
1743 
1744 	error->cpu_ring_head[ring->id] = ring->head;
1745 	error->cpu_ring_tail[ring->id] = ring->tail;
1746 }
1747 
1748 
i915_gem_record_active_context(struct intel_ring_buffer * ring,struct drm_i915_error_state * error,struct drm_i915_error_ring * ering)1749 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1750 					   struct drm_i915_error_state *error,
1751 					   struct drm_i915_error_ring *ering)
1752 {
1753 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1754 	struct drm_i915_gem_object *obj;
1755 
1756 	/* Currently render ring is the only HW context user */
1757 	if (ring->id != RCS || !error->ccid)
1758 		return;
1759 
1760 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1761 		if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1762 			ering->ctx = i915_error_object_create_sized(dev_priv,
1763 								    obj, 1);
1764 		}
1765 	}
1766 }
1767 
i915_gem_record_rings(struct drm_device * dev,struct drm_i915_error_state * error)1768 static void i915_gem_record_rings(struct drm_device *dev,
1769 				  struct drm_i915_error_state *error)
1770 {
1771 	struct drm_i915_private *dev_priv = dev->dev_private;
1772 	struct intel_ring_buffer *ring;
1773 	struct drm_i915_gem_request *request;
1774 	int i, count;
1775 
1776 	for_each_ring(ring, dev_priv, i) {
1777 		i915_record_ring_state(dev, error, ring);
1778 
1779 		error->ring[i].batchbuffer =
1780 			i915_error_first_batchbuffer(dev_priv, ring);
1781 
1782 		error->ring[i].ringbuffer =
1783 			i915_error_object_create(dev_priv, ring->obj);
1784 
1785 
1786 		i915_gem_record_active_context(ring, error, &error->ring[i]);
1787 
1788 		count = 0;
1789 		list_for_each_entry(request, &ring->request_list, list)
1790 			count++;
1791 
1792 		error->ring[i].num_requests = count;
1793 		error->ring[i].requests =
1794 			kmalloc(count*sizeof(struct drm_i915_error_request),
1795 				GFP_ATOMIC);
1796 		if (error->ring[i].requests == NULL) {
1797 			error->ring[i].num_requests = 0;
1798 			continue;
1799 		}
1800 
1801 		count = 0;
1802 		list_for_each_entry(request, &ring->request_list, list) {
1803 			struct drm_i915_error_request *erq;
1804 
1805 			erq = &error->ring[i].requests[count++];
1806 			erq->seqno = request->seqno;
1807 			erq->jiffies = request->emitted_jiffies;
1808 			erq->tail = request->tail;
1809 		}
1810 	}
1811 }
1812 
1813 /**
1814  * i915_capture_error_state - capture an error record for later analysis
1815  * @dev: drm device
1816  *
1817  * Should be called when an error is detected (either a hang or an error
1818  * interrupt) to capture error state from the time of the error.  Fills
1819  * out a structure which becomes available in debugfs for user level tools
1820  * to pick up.
1821  */
i915_capture_error_state(struct drm_device * dev)1822 static void i915_capture_error_state(struct drm_device *dev)
1823 {
1824 	struct drm_i915_private *dev_priv = dev->dev_private;
1825 	struct drm_i915_gem_object *obj;
1826 	struct drm_i915_error_state *error;
1827 	unsigned long flags;
1828 	int i, pipe;
1829 
1830 	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1831 	error = dev_priv->gpu_error.first_error;
1832 	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1833 	if (error)
1834 		return;
1835 
1836 	/* Account for pipe specific data like PIPE*STAT */
1837 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1838 	if (!error) {
1839 		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1840 		return;
1841 	}
1842 
1843 	DRM_INFO("capturing error event; look for more information in "
1844 		 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1845 		dev->primary->index);
1846 
1847 	kref_init(&error->ref);
1848 	error->eir = I915_READ(EIR);
1849 	error->pgtbl_er = I915_READ(PGTBL_ER);
1850 	if (HAS_HW_CONTEXTS(dev))
1851 		error->ccid = I915_READ(CCID);
1852 
1853 	if (HAS_PCH_SPLIT(dev))
1854 		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1855 	else if (IS_VALLEYVIEW(dev))
1856 		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1857 	else if (IS_GEN2(dev))
1858 		error->ier = I915_READ16(IER);
1859 	else
1860 		error->ier = I915_READ(IER);
1861 
1862 	if (INTEL_INFO(dev)->gen >= 6)
1863 		error->derrmr = I915_READ(DERRMR);
1864 
1865 	if (IS_VALLEYVIEW(dev))
1866 		error->forcewake = I915_READ(FORCEWAKE_VLV);
1867 	else if (INTEL_INFO(dev)->gen >= 7)
1868 		error->forcewake = I915_READ(FORCEWAKE_MT);
1869 	else if (INTEL_INFO(dev)->gen == 6)
1870 		error->forcewake = I915_READ(FORCEWAKE);
1871 
1872 	if (!HAS_PCH_SPLIT(dev))
1873 		for_each_pipe(pipe)
1874 			error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1875 
1876 	if (INTEL_INFO(dev)->gen >= 6) {
1877 		error->error = I915_READ(ERROR_GEN6);
1878 		error->done_reg = I915_READ(DONE_REG);
1879 	}
1880 
1881 	if (INTEL_INFO(dev)->gen == 7)
1882 		error->err_int = I915_READ(GEN7_ERR_INT);
1883 
1884 	i915_get_extra_instdone(dev, error->extra_instdone);
1885 
1886 	i915_gem_record_fences(dev, error);
1887 	i915_gem_record_rings(dev, error);
1888 
1889 	/* Record buffers on the active and pinned lists. */
1890 	error->active_bo = NULL;
1891 	error->pinned_bo = NULL;
1892 
1893 	i = 0;
1894 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1895 		i++;
1896 	error->active_bo_count = i;
1897 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1898 		if (obj->pin_count)
1899 		i++;
1900 	error->pinned_bo_count = i - error->active_bo_count;
1901 
1902 	error->active_bo = NULL;
1903 	error->pinned_bo = NULL;
1904 	if (i) {
1905 		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1906 					   GFP_ATOMIC);
1907 		if (error->active_bo)
1908 			error->pinned_bo =
1909 				error->active_bo + error->active_bo_count;
1910 	}
1911 
1912 	if (error->active_bo)
1913 		error->active_bo_count =
1914 			capture_active_bo(error->active_bo,
1915 					  error->active_bo_count,
1916 					  &dev_priv->mm.active_list);
1917 
1918 	if (error->pinned_bo)
1919 		error->pinned_bo_count =
1920 			capture_pinned_bo(error->pinned_bo,
1921 					  error->pinned_bo_count,
1922 					  &dev_priv->mm.bound_list);
1923 
1924 	do_gettimeofday(&error->time);
1925 
1926 	error->overlay = intel_overlay_capture_error_state(dev);
1927 	error->display = intel_display_capture_error_state(dev);
1928 
1929 	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1930 	if (dev_priv->gpu_error.first_error == NULL) {
1931 		dev_priv->gpu_error.first_error = error;
1932 		error = NULL;
1933 	}
1934 	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1935 
1936 	if (error)
1937 		i915_error_state_free(&error->ref);
1938 }
1939 
i915_destroy_error_state(struct drm_device * dev)1940 void i915_destroy_error_state(struct drm_device *dev)
1941 {
1942 	struct drm_i915_private *dev_priv = dev->dev_private;
1943 	struct drm_i915_error_state *error;
1944 	unsigned long flags;
1945 
1946 	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1947 	error = dev_priv->gpu_error.first_error;
1948 	dev_priv->gpu_error.first_error = NULL;
1949 	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1950 
1951 	if (error)
1952 		kref_put(&error->ref, i915_error_state_free);
1953 }
1954 #else
1955 #define i915_capture_error_state(x)
1956 #endif
1957 
i915_report_and_clear_eir(struct drm_device * dev)1958 static void i915_report_and_clear_eir(struct drm_device *dev)
1959 {
1960 	struct drm_i915_private *dev_priv = dev->dev_private;
1961 	uint32_t instdone[I915_NUM_INSTDONE_REG];
1962 	u32 eir = I915_READ(EIR);
1963 	int pipe;
1964 
1965 	if (!eir)
1966 		return;
1967 
1968 	DRM_ERROR("render error detected, EIR: 0x%08x\n", eir);
1969 
1970 	i915_get_extra_instdone(dev, instdone);
1971 
1972 	if (IS_G4X(dev)) {
1973 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1974 			u32 ipeir = I915_READ(IPEIR_I965);
1975 
1976 			DRM_DEBUG("  IPEIR: 0x%08x\n",
1977 			       I915_READ(IPEIR_I965));
1978 			DRM_DEBUG("  IPEHR: 0x%08x\n",
1979 			       I915_READ(IPEHR_I965));
1980 			DRM_DEBUG("  INSTDONE: 0x%08x\n",
1981 			       I915_READ(INSTDONE_I965));
1982 			DRM_DEBUG("  INSTPS: 0x%08x\n",
1983 			       I915_READ(INSTPS));
1984 			DRM_DEBUG("  INSTDONE1: 0x%08x\n",
1985 			       I915_READ(INSTDONE1));
1986 			DRM_DEBUG("  ACTHD: 0x%08x\n",
1987 			       I915_READ(ACTHD_I965));
1988 			I915_WRITE(IPEIR_I965, ipeir);
1989 			POSTING_READ(IPEIR_I965);
1990 		}
1991 		if (eir & GM45_ERROR_PAGE_TABLE) {
1992 			u32 pgtbl_err = I915_READ(PGTBL_ER);
1993 			DRM_DEBUG("page table error\n");
1994 			DRM_DEBUG("  PGTBL_ER: 0x%08x\n",
1995 			       pgtbl_err);
1996 			I915_WRITE(PGTBL_ER, pgtbl_err);
1997 			POSTING_READ(PGTBL_ER);
1998 		}
1999 	}
2000 
2001 	if (!IS_GEN2(dev)) {
2002 		if (eir & I915_ERROR_PAGE_TABLE) {
2003 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2004 			DRM_DEBUG("page table error\n");
2005 			DRM_DEBUG("  PGTBL_ER: 0x%08x\n",
2006 			       pgtbl_err);
2007 			I915_WRITE(PGTBL_ER, pgtbl_err);
2008 			POSTING_READ(PGTBL_ER);
2009 		}
2010 	}
2011 
2012 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2013 		DRM_DEBUG("memory refresh error:\n");
2014 		for_each_pipe(pipe)
2015 			DRM_DEBUG("pipe %c stat: 0x%08x\n",
2016 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2017 		/* pipestat has already been acked */
2018 	}
2019 	if (eir & I915_ERROR_INSTRUCTION) {
2020 		DRM_DEBUG("instruction error\n");
2021 		DRM_DEBUG("  INSTPM: 0x%08x\n",
2022 		       I915_READ(INSTPM));
2023 		if (INTEL_INFO(dev)->gen < 4) {
2024 			u32 ipeir = I915_READ(IPEIR);
2025 
2026 			DRM_DEBUG("  IPEIR: 0x%08x\n",
2027 			       I915_READ(IPEIR));
2028 			DRM_DEBUG("  IPEHR: 0x%08x\n",
2029 			       I915_READ(IPEHR));
2030 			DRM_DEBUG("  INSTDONE: 0x%08x\n",
2031 			       I915_READ(INSTDONE));
2032 			DRM_DEBUG("  ACTHD: 0x%08x\n",
2033 			       I915_READ(ACTHD));
2034 			I915_WRITE(IPEIR, ipeir);
2035 			POSTING_READ(IPEIR);
2036 		} else {
2037 			u32 ipeir = I915_READ(IPEIR_I965);
2038 
2039 			DRM_DEBUG("  IPEIR: 0x%08x\n",
2040 			       I915_READ(IPEIR_I965));
2041 			DRM_DEBUG("  IPEHR: 0x%08x\n",
2042 			       I915_READ(IPEHR_I965));
2043 			DRM_DEBUG("  INSTDONE: 0x%08x\n",
2044 			       I915_READ(INSTDONE_I965));
2045 			DRM_DEBUG("  INSTPS: 0x%08x\n",
2046 			       I915_READ(INSTPS));
2047 			DRM_DEBUG("  INSTDONE1: 0x%08x\n",
2048 			       I915_READ(INSTDONE1));
2049 			DRM_DEBUG("  ACTHD: 0x%08x\n",
2050 			       I915_READ(ACTHD_I965));
2051 			I915_WRITE(IPEIR_I965, ipeir);
2052 			POSTING_READ(IPEIR_I965);
2053 		}
2054 	}
2055 
2056 	I915_WRITE(EIR, eir);
2057 	POSTING_READ(EIR);
2058 	eir = I915_READ(EIR);
2059 	if (eir) {
2060 		/*
2061 		 * some errors might have become stuck,
2062 		 * mask them.
2063 		 */
2064 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2065 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2066 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2067 	}
2068 }
2069 
2070 /**
2071  * i915_handle_error - handle an error interrupt
2072  * @dev: drm device
2073  *
2074  * Do some basic checking of regsiter state at error interrupt time and
2075  * dump it to the syslog.  Also call i915_capture_error_state() to make
2076  * sure we get a record and make it available in debugfs.  Fire a uevent
2077  * so userspace knows something bad happened (should trigger collection
2078  * of a ring dump etc.).
2079  */
i915_handle_error(struct drm_device * dev,bool wedged)2080 void i915_handle_error(struct drm_device *dev, bool wedged)
2081 {
2082 	struct drm_i915_private *dev_priv = dev->dev_private;
2083 	struct intel_ring_buffer *ring;
2084 	int i;
2085 
2086 	i915_capture_error_state(dev);
2087 	i915_report_and_clear_eir(dev);
2088 
2089 	if (wedged) {
2090 		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2091 				&dev_priv->gpu_error.reset_counter);
2092 
2093 		/*
2094 		 * Wakeup waiting processes so they don't hang
2095 		 */
2096 		for_each_ring(ring, dev_priv, i)
2097 			wake_up_all(&ring->irq_queue);
2098 	}
2099 
2100 	(void) queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
2101 }
2102 
i915_pageflip_stall_check(struct drm_device * dev,int pipe)2103 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2104 {
2105 	drm_i915_private_t *dev_priv = dev->dev_private;
2106 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2107 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2108 	struct drm_i915_gem_object *obj;
2109 	struct intel_unpin_work *work;
2110 	unsigned long flags;
2111 	bool stall_detected;
2112 
2113 	/* Ignore early vblank irqs */
2114 	if (intel_crtc == NULL)
2115 		return;
2116 
2117 	spin_lock_irqsave(&dev->event_lock, flags);
2118 	work = intel_crtc->unpin_work;
2119 
2120 	if (work == NULL ||
2121 	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2122 	    !work->enable_stall_check) {
2123 		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
2124 		spin_unlock_irqrestore(&dev->event_lock, flags);
2125 		return;
2126 	}
2127 
2128 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2129 	obj = work->pending_flip_obj;
2130 	if (INTEL_INFO(dev)->gen >= 4) {
2131 		int dspsurf = DSPSURF(intel_crtc->plane);
2132 		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2133 					obj->gtt_offset;
2134 	} else {
2135 		int dspaddr = DSPADDR(intel_crtc->plane);
2136 		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
2137 							crtc->y * crtc->fb->pitches[0] +
2138 							crtc->x * crtc->fb->bits_per_pixel/8);
2139 	}
2140 
2141 	spin_unlock_irqrestore(&dev->event_lock, flags);
2142 
2143 	if (stall_detected) {
2144 		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2145 		intel_prepare_page_flip(dev, intel_crtc->plane);
2146 	}
2147 }
2148 
2149 /* Called from drm generic code, passed 'crtc' which
2150  * we use as a pipe index
2151  */
i915_enable_vblank(struct drm_device * dev,int pipe)2152 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2153 {
2154 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2155 	unsigned long irqflags;
2156 
2157 	if (!i915_pipe_enabled(dev, pipe))
2158 		return -EINVAL;
2159 
2160 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2161 	if (INTEL_INFO(dev)->gen >= 4)
2162 		i915_enable_pipestat(dev_priv, pipe,
2163 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
2164 	else
2165 		i915_enable_pipestat(dev_priv, pipe,
2166 				     PIPE_VBLANK_INTERRUPT_ENABLE);
2167 
2168 	/* maintain vblank delivery even in deep C-states */
2169 	if (dev_priv->info->gen == 3)
2170 		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2171 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2172 
2173 	return 0;
2174 }
2175 
ironlake_enable_vblank(struct drm_device * dev,int pipe)2176 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2177 {
2178 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2179 	unsigned long irqflags;
2180 
2181 	if (!i915_pipe_enabled(dev, pipe))
2182 		return -EINVAL;
2183 
2184 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2185 	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
2186 				    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
2187 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2188 
2189 	return 0;
2190 }
2191 
ivybridge_enable_vblank(struct drm_device * dev,int pipe)2192 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
2193 {
2194 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2195 	unsigned long irqflags;
2196 
2197 	if (!i915_pipe_enabled(dev, pipe))
2198 		return -EINVAL;
2199 
2200 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2201 	ironlake_enable_display_irq(dev_priv,
2202 				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
2203 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2204 
2205 	return 0;
2206 }
2207 
valleyview_enable_vblank(struct drm_device * dev,int pipe)2208 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2209 {
2210 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2211 	unsigned long irqflags;
2212 	u32 imr;
2213 
2214 	if (!i915_pipe_enabled(dev, pipe))
2215 		return -EINVAL;
2216 
2217 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2218 	imr = I915_READ(VLV_IMR);
2219 	if (pipe == 0)
2220 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2221 	else
2222 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2223 	I915_WRITE(VLV_IMR, imr);
2224 	i915_enable_pipestat(dev_priv, pipe,
2225 			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
2226 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2227 
2228 	return 0;
2229 }
2230 
2231 /* Called from drm generic code, passed 'crtc' which
2232  * we use as a pipe index
2233  */
i915_disable_vblank(struct drm_device * dev,int pipe)2234 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2235 {
2236 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2237 	unsigned long irqflags;
2238 
2239 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2240 	if (dev_priv->info->gen == 3)
2241 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2242 
2243 	i915_disable_pipestat(dev_priv, pipe,
2244 			      PIPE_VBLANK_INTERRUPT_ENABLE |
2245 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2246 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2247 }
2248 
ironlake_disable_vblank(struct drm_device * dev,int pipe)2249 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2250 {
2251 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2252 	unsigned long irqflags;
2253 
2254 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2255 	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
2256 				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2257 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2258 }
2259 
ivybridge_disable_vblank(struct drm_device * dev,int pipe)2260 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
2261 {
2262 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2263 	unsigned long irqflags;
2264 
2265 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2266 	ironlake_disable_display_irq(dev_priv,
2267 				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
2268 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2269 }
2270 
valleyview_disable_vblank(struct drm_device * dev,int pipe)2271 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2272 {
2273 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2274 	unsigned long irqflags;
2275 	u32 imr;
2276 
2277 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2278 	i915_disable_pipestat(dev_priv, pipe,
2279 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2280 	imr = I915_READ(VLV_IMR);
2281 	if (pipe == 0)
2282 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2283 	else
2284 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2285 	I915_WRITE(VLV_IMR, imr);
2286 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2287 }
2288 
2289 static u32
ring_last_seqno(struct intel_ring_buffer * ring)2290 ring_last_seqno(struct intel_ring_buffer *ring)
2291 {
2292 	struct drm_i915_gem_request *last_req;
2293 	last_req = list_entry(ring->request_list.prev,
2294 			  struct drm_i915_gem_request, list);
2295 	return (last_req->seqno);
2296 }
2297 
2298 static bool
ring_idle(struct intel_ring_buffer * ring,u32 seqno)2299 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2300 {
2301 	return (list_empty(&ring->request_list) ||
2302 		i915_seqno_passed(seqno, ring_last_seqno(ring)));
2303 }
2304 
2305 static struct intel_ring_buffer *
semaphore_waits_for(struct intel_ring_buffer * ring,u32 * seqno)2306 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2307 {
2308 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2309 	u32 cmd, ipehr, acthd, acthd_min;
2310 	u32 *tmp;
2311 
2312 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2313 	if ((ipehr & ~(0x3 << 16)) !=
2314 	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2315 		return NULL;
2316 
2317 	/* ACTHD is likely pointing to the dword after the actual command,
2318 	 * so scan backwards until we find the MBOX.
2319 	 */
2320 	acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2321 	acthd_min = max((int)acthd - 3 * 4, 0);
2322 	do {
2323 		tmp = (u32 *)((intptr_t)ring->virtual_start + acthd);
2324 		cmd = *tmp;
2325 		if (cmd == ipehr)
2326 			break;
2327 
2328 		acthd -= 4;
2329 		if (acthd < acthd_min)
2330 			return NULL;
2331 	} while (1);
2332 
2333 	tmp = (u32 *)((intptr_t)ring->virtual_start + acthd + 4 );
2334 	*seqno = *tmp+1;
2335 	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2336 }
2337 
semaphore_passed(struct intel_ring_buffer * ring)2338 static int semaphore_passed(struct intel_ring_buffer *ring)
2339 {
2340 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2341 	struct intel_ring_buffer *signaller;
2342 	u32 seqno, ctl;
2343 
2344 	ring->hangcheck.deadlock = true;
2345 
2346 	signaller = semaphore_waits_for(ring, &seqno);
2347 	if (signaller == NULL || signaller->hangcheck.deadlock)
2348 		return -1;
2349 
2350 	/* cursory check for an unkickable deadlock */
2351 	ctl = I915_READ_CTL(signaller);
2352 	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2353 		return -1;
2354 
2355 	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2356 }
2357 
semaphore_clear_deadlocks(struct drm_i915_private * dev_priv)2358 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2359 {
2360 	struct intel_ring_buffer *ring;
2361 	int i;
2362 
2363 	for_each_ring(ring, dev_priv, i)
2364 		ring->hangcheck.deadlock = false;
2365 }
2366 
2367 static enum intel_ring_hangcheck_action
ring_stuck(struct intel_ring_buffer * ring,u32 acthd)2368 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2369 {
2370 	struct drm_device *dev = ring->dev;
2371 	struct drm_i915_private *dev_priv = dev->dev_private;
2372 	u32 tmp;
2373 
2374 	if (ring->hangcheck.acthd != acthd)
2375 		return active;
2376 
2377 	if (IS_GEN2(dev))
2378 		return hung;
2379 
2380 	/* Is the chip hanging on a WAIT_FOR_EVENT?
2381 	 * If so we can simply poke the RB_WAIT bit
2382 	 * and break the hang. This should work on
2383 	 * all but the second generation chipsets.
2384 	 */
2385 	tmp = I915_READ_CTL(ring);
2386 	if (tmp & RING_WAIT) {
2387 		DRM_ERROR("Kicking stuck wait on %s\n",
2388 			  ring->name);
2389 		I915_WRITE_CTL(ring, tmp);
2390 		return kick;
2391 	}
2392 
2393 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2394 		switch (semaphore_passed(ring)) {
2395 		default:
2396 			return hung;
2397 		case 1:
2398 			DRM_ERROR("Kicking stuck semaphore on %s\n",
2399 				  ring->name);
2400 			I915_WRITE_CTL(ring, tmp);
2401 			return kick;
2402 		case 0:
2403 			return wait;
2404 		}
2405 	}
2406 
2407 	return hung;
2408 }
2409 
2410 /**
2411  * This is called when the chip hasn't reported back with completed
2412  * batchbuffers in a long time. The first time this is called we simply record
2413  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
2414  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2415  * we kick the ring. If we see no progress on three subsequent calls
2416  * again, we assume the chip is wedged and try to fix it.
2417  */
i915_hangcheck_elapsed(void * data)2418 void i915_hangcheck_elapsed(void* data)
2419 {
2420 	struct drm_device *dev = (struct drm_device *)data;
2421 	drm_i915_private_t *dev_priv = dev->dev_private;
2422 	struct intel_ring_buffer *ring;
2423 	int i;
2424 	int busy_count = 0, rings_hung = 0;
2425 	bool stuck[I915_NUM_RINGS] = { 0 };
2426 #define BUSY 1
2427 #define KICK 5
2428 #define HUNG 20
2429 #define FIRE 30
2430 
2431 	if (!i915_enable_hangcheck)
2432 		return;
2433 
2434 	for_each_ring(ring, dev_priv, i) {
2435 		u32 seqno, acthd;
2436 		bool busy = true;
2437 
2438 		semaphore_clear_deadlocks(dev_priv);
2439 
2440 		seqno = ring->get_seqno(ring, false);
2441 		acthd = intel_ring_get_active_head(ring);
2442 
2443 		if (ring->hangcheck.seqno == seqno) {
2444 			if (ring_idle(ring, seqno)) {
2445 				if (mutex_is_locked(&ring->irq_queue.lock)) {
2446 					/* Issue a wake-up to catch stuck h/w. */
2447 					DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2448 						  ring->name);
2449 					wake_up_all(&ring->irq_queue);
2450 					ring->hangcheck.score += HUNG;
2451 				} else
2452 					busy = false;
2453 			} else {
2454 				int score = 0;
2455 
2456 				/* We always increment the hangcheck score
2457 				 * if the ring is busy and still processing
2458 				 * the same request, so that no single request
2459 				 * can run indefinitely (such as a chain of
2460 				 * batches). The only time we do not increment
2461 				 * the hangcheck score on this ring, if this
2462 				 * ring is in a legitimate wait for another
2463 				 * ring. In that case the waiting ring is a
2464 				 * victim and we want to be sure we catch the
2465 				 * right culprit. Then every time we do kick
2466 				 * the ring, add a small increment to the
2467 				 * score so that we can catch a batch that is
2468 				 * being repeatedly kicked and so responsible
2469 				 * for stalling the machine.
2470 				 */
2471 				ring->hangcheck.action = ring_stuck(ring,
2472 								    acthd);
2473 
2474 				switch (ring->hangcheck.action) {
2475 				case wait:
2476 					score = 0;
2477 					break;
2478 				case active:
2479 					score = BUSY;
2480 					break;
2481 				case kick:
2482 					score = KICK;
2483 					break;
2484 				case hung:
2485 					score = HUNG;
2486 					stuck[i] = true;
2487 					break;
2488 				}
2489 				ring->hangcheck.score += score;
2490 			}
2491 		} else {
2492 			/* Gradually reduce the count so that we catch DoS
2493 			 * attempts across multiple batches.
2494 			 */
2495 			if (ring->hangcheck.score > 0)
2496 				ring->hangcheck.score--;
2497 		}
2498 
2499 		ring->hangcheck.seqno = seqno;
2500 		ring->hangcheck.acthd = acthd;
2501 		busy_count += busy;
2502 	}
2503 
2504 	for_each_ring(ring, dev_priv, i) {
2505 		if (ring->hangcheck.score > FIRE) {
2506 			DRM_ERROR("%s on %s\n",
2507 				  stuck[i] ? "stuck" : "no progress",
2508 				  ring->name);
2509 			rings_hung++;
2510 		}
2511 	}
2512 
2513 	if (rings_hung) {
2514 		i915_handle_error(dev, true);
2515 		return;
2516 	}
2517 
2518 	if (busy_count)
2519 		/* Reset timer case chip hangs without another request
2520 		 * being added */
2521 		mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2522 			msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
2523 }
2524 
ibx_irq_preinstall(struct drm_device * dev)2525 static void ibx_irq_preinstall(struct drm_device *dev)
2526 {
2527 	struct drm_i915_private *dev_priv = dev->dev_private;
2528 
2529 	if (HAS_PCH_NOP(dev))
2530 		return;
2531 
2532 	/* south display irq */
2533 	I915_WRITE(SDEIMR, 0xffffffff);
2534 	/*
2535 	 * SDEIER is also touched by the interrupt handler to work around missed
2536 	 * PCH interrupts. Hence we can't update it after the interrupt handler
2537 	 * is enabled - instead we unconditionally enable all PCH interrupt
2538 	 * sources here, but then only unmask them as needed with SDEIMR.
2539 	 */
2540 	I915_WRITE(SDEIER, 0xffffffff);
2541 	POSTING_READ(SDEIER);
2542 }
2543 
2544 /* drm_dma.h hooks
2545 */
ironlake_irq_preinstall(struct drm_device * dev)2546 static void ironlake_irq_preinstall(struct drm_device *dev)
2547 {
2548 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2549 
2550 	atomic_set(&dev_priv->irq_received, 0);
2551 
2552 	I915_WRITE(HWSTAM, 0xeffe);
2553 
2554 	/* XXX hotplug from PCH */
2555 
2556 	I915_WRITE(DEIMR, 0xffffffff);
2557 	I915_WRITE(DEIER, 0x0);
2558 	POSTING_READ(DEIER);
2559 
2560 	/* and GT */
2561 	I915_WRITE(GTIMR, 0xffffffff);
2562 	I915_WRITE(GTIER, 0x0);
2563 	POSTING_READ(GTIER);
2564 
2565 	ibx_irq_preinstall(dev);
2566 }
2567 
ivybridge_irq_preinstall(struct drm_device * dev)2568 static void ivybridge_irq_preinstall(struct drm_device *dev)
2569 {
2570 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2571 
2572 	atomic_set(&dev_priv->irq_received, 0);
2573 
2574 	I915_WRITE(HWSTAM, 0xeffe);
2575 
2576 	/* XXX hotplug from PCH */
2577 
2578 	I915_WRITE(DEIMR, 0xffffffff);
2579 	I915_WRITE(DEIER, 0x0);
2580 	POSTING_READ(DEIER);
2581 
2582 	/* and GT */
2583 	I915_WRITE(GTIMR, 0xffffffff);
2584 	I915_WRITE(GTIER, 0x0);
2585 	POSTING_READ(GTIER);
2586 
2587 	/* Power management */
2588 	I915_WRITE(GEN6_PMIMR, 0xffffffff);
2589 	I915_WRITE(GEN6_PMIER, 0x0);
2590 	POSTING_READ(GEN6_PMIER);
2591 
2592 	ibx_irq_preinstall(dev);
2593 }
2594 
valleyview_irq_preinstall(struct drm_device * dev)2595 static void valleyview_irq_preinstall(struct drm_device *dev)
2596 {
2597 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2598 	int pipe;
2599 
2600 	atomic_set(&dev_priv->irq_received, 0);
2601 
2602 	/* VLV magic */
2603 	I915_WRITE(VLV_IMR, 0);
2604 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2605 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2606 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2607 
2608 	/* and GT */
2609 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2610 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2611 	I915_WRITE(GTIMR, 0xffffffff);
2612 	I915_WRITE(GTIER, 0x0);
2613 	POSTING_READ(GTIER);
2614 
2615 	I915_WRITE(DPINVGTT, 0xff);
2616 
2617 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2618 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2619 	for_each_pipe(pipe)
2620 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2621 	I915_WRITE(VLV_IIR, 0xffffffff);
2622 	I915_WRITE(VLV_IMR, 0xffffffff);
2623 	I915_WRITE(VLV_IER, 0x0);
2624 	POSTING_READ(VLV_IER);
2625 }
2626 
ibx_hpd_irq_setup(struct drm_device * dev)2627 static void ibx_hpd_irq_setup(struct drm_device *dev)
2628 {
2629 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2630 	struct drm_mode_config *mode_config = &dev->mode_config;
2631 	struct intel_encoder *intel_encoder;
2632 	u32 mask = ~I915_READ(SDEIMR);
2633 	u32 hotplug;
2634 
2635 	if (HAS_PCH_IBX(dev)) {
2636 		mask &= ~SDE_HOTPLUG_MASK;
2637 		list_for_each_entry(intel_encoder, struct intel_encoder, &mode_config->encoder_list, base.head)
2638 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2639 				mask |= hpd_ibx[intel_encoder->hpd_pin];
2640 	} else {
2641 		mask &= ~SDE_HOTPLUG_MASK_CPT;
2642 		list_for_each_entry(intel_encoder, struct intel_encoder, &mode_config->encoder_list, base.head)
2643 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2644 				mask |= hpd_cpt[intel_encoder->hpd_pin];
2645 	}
2646 
2647 	I915_WRITE(SDEIMR, ~mask);
2648 
2649 	/*
2650 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
2651 	 * duration to 2ms (which is the minimum in the Display Port spec)
2652 	 *
2653 	 * This register is the same on all known PCH chips.
2654 	 */
2655 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2656 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2657 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2658 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2659 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2660 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2661 }
2662 
ibx_irq_postinstall(struct drm_device * dev)2663 static void ibx_irq_postinstall(struct drm_device *dev)
2664 {
2665 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2666 	u32 mask;
2667 
2668 	if (HAS_PCH_NOP(dev))
2669 		return;
2670 
2671 	if (HAS_PCH_IBX(dev)) {
2672 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2673 		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2674 	} else {
2675 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2676 
2677 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2678 	}
2679 
2680 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2681 	I915_WRITE(SDEIMR, ~mask);
2682 }
2683 
ironlake_irq_postinstall(struct drm_device * dev)2684 static int ironlake_irq_postinstall(struct drm_device *dev)
2685 {
2686 	unsigned long irqflags;
2687 
2688 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2689 	/* enable kind of interrupts always enabled */
2690 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2691 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2692 			   DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2693 			   DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2694 	u32 gt_irqs;
2695 
2696 	dev_priv->irq_mask = ~display_mask;
2697 
2698 	/* should always can generate irq */
2699 	I915_WRITE(DEIIR, I915_READ(DEIIR));
2700 	I915_WRITE(DEIMR, dev_priv->irq_mask);
2701 	I915_WRITE(DEIER, display_mask |
2702 			  DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
2703 
2704 	/* LINTED */
2705 	dev_priv->gt_irq_mask = ~0;
2706 
2707 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2708 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2709 
2710 	gt_irqs = GT_RENDER_USER_INTERRUPT;
2711 
2712 	if (IS_GEN6(dev))
2713 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2714 	else
2715 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2716 			   ILK_BSD_USER_INTERRUPT;
2717 
2718 	I915_WRITE(GTIER, gt_irqs);
2719 
2720 	ibx_irq_postinstall(dev);
2721 	POSTING_READ(GTIER);
2722 	POSTING_READ(DEIER);
2723 
2724 	if (IS_IRONLAKE_M(dev)) {
2725 		/* Enable PCU event interrupts
2726 		 *
2727 		 * spinlocking not required here for correctness since interrupt
2728 		 * setup is guaranteed to run in single-threaded context. But we
2729 		 * need it to make the assert_spin_locked happy. */
2730 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2731 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2732 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2733 	}
2734 
2735 	return 0;
2736 }
2737 
ivybridge_irq_postinstall(struct drm_device * dev)2738 static int ivybridge_irq_postinstall(struct drm_device *dev)
2739 {
2740 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2741 	/* enable kind of interrupts always enabled */
2742 	u32 display_mask =
2743 		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2744 		DE_PLANEC_FLIP_DONE_IVB |
2745 		DE_PLANEB_FLIP_DONE_IVB |
2746 		DE_PLANEA_FLIP_DONE_IVB |
2747 		DE_AUX_CHANNEL_A_IVB |
2748 		DE_ERR_INT_IVB;
2749 	u32 pm_irqs = GEN6_PM_RPS_EVENTS;
2750 	u32 gt_irqs;
2751 
2752 	dev_priv->irq_mask = ~display_mask;
2753 
2754 	/* should always can generate irq */
2755 	I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2756 	I915_WRITE(DEIIR, I915_READ(DEIIR));
2757 	I915_WRITE(DEIMR, dev_priv->irq_mask);
2758 	I915_WRITE(DEIER,
2759 		   display_mask |
2760 		   DE_PIPEC_VBLANK_IVB |
2761 		   DE_PIPEB_VBLANK_IVB |
2762 		   DE_PIPEA_VBLANK_IVB);
2763 	POSTING_READ(DEIER);
2764 
2765 	/* LINTED */
2766 	dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2767 
2768 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2769 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2770 
2771 	gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2772 		  GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2773 	I915_WRITE(GTIER, gt_irqs);
2774 	POSTING_READ(GTIER);
2775 
2776 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2777 	if (HAS_VEBOX(dev))
2778 		pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2779 			PM_VEBOX_CS_ERROR_INTERRUPT;
2780 
2781 	/* Our enable/disable rps functions may touch these registers so
2782 	 * make sure to set a known state for only the non-RPS bits.
2783 	 * The RMW is extra paranoia since this should be called after being set
2784 	 * to a known state in preinstall.
2785 	 * */
2786 	I915_WRITE(GEN6_PMIMR,
2787 		   (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2788 	I915_WRITE(GEN6_PMIER,
2789 		   (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2790 	POSTING_READ(GEN6_PMIER);
2791 
2792 	ibx_irq_postinstall(dev);
2793 
2794 	return 0;
2795 }
2796 
valleyview_irq_postinstall(struct drm_device * dev)2797 static int valleyview_irq_postinstall(struct drm_device *dev)
2798 {
2799 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2800 	u32 gt_irqs;
2801 	u32 enable_mask;
2802 	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2803 
2804 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2805 	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2806 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2807 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2808 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2809 
2810 	/*
2811 	 *Leave vblank interrupts masked initially.  enable/disable will
2812 	 * toggle them based on usage.
2813 	 */
2814 	dev_priv->irq_mask = (~enable_mask) |
2815 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2816 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2817 
2818 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2819 	POSTING_READ(PORT_HOTPLUG_EN);
2820 
2821 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2822 	I915_WRITE(VLV_IER, enable_mask);
2823 	I915_WRITE(VLV_IIR, 0xffffffff);
2824 	I915_WRITE(PIPESTAT(0), 0xffff);
2825 	I915_WRITE(PIPESTAT(1), 0xffff);
2826 	POSTING_READ(VLV_IER);
2827 
2828 	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2829 	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2830 	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2831 
2832 	I915_WRITE(VLV_IIR, 0xffffffff);
2833 	I915_WRITE(VLV_IIR, 0xffffffff);
2834 
2835 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2836 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2837 
2838 	gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2839 		GT_BLT_USER_INTERRUPT;
2840 	I915_WRITE(GTIER, gt_irqs);
2841 	POSTING_READ(GTIER);
2842 
2843 	/* ack & enable invalid PTE error interrupts */
2844 #if 0 /* FIXME: add support to irq handler for checking these bits */
2845 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2846 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2847 #endif
2848 
2849 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2850 
2851 	return 0;
2852 }
2853 
valleyview_irq_uninstall(struct drm_device * dev)2854 static void valleyview_irq_uninstall(struct drm_device *dev)
2855 {
2856 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2857 	int pipe;
2858 
2859 	if (!dev_priv)
2860 		return;
2861 
2862 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
2863 
2864 	for_each_pipe(pipe)
2865 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2866 
2867 	I915_WRITE(HWSTAM, 0xffffffff);
2868 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2869 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2870 	for_each_pipe(pipe)
2871 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2872 	I915_WRITE(VLV_IIR, 0xffffffff);
2873 	I915_WRITE(VLV_IMR, 0xffffffff);
2874 	I915_WRITE(VLV_IER, 0x0);
2875 	POSTING_READ(VLV_IER);
2876 }
2877 
ironlake_irq_uninstall(struct drm_device * dev)2878 static void ironlake_irq_uninstall(struct drm_device *dev)
2879 {
2880 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2881 
2882 	if (!dev_priv)
2883 		return;
2884 
2885 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
2886 
2887 	I915_WRITE(HWSTAM, 0xffffffff);
2888 
2889 	I915_WRITE(DEIMR, 0xffffffff);
2890 	I915_WRITE(DEIER, 0x0);
2891 	I915_WRITE(DEIIR, I915_READ(DEIIR));
2892 	if (IS_GEN7(dev))
2893 		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2894 
2895 	I915_WRITE(GTIMR, 0xffffffff);
2896 	I915_WRITE(GTIER, 0x0);
2897 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2898 
2899 	if (HAS_PCH_NOP(dev))
2900 		return;
2901 
2902 	I915_WRITE(SDEIMR, 0xffffffff);
2903 	I915_WRITE(SDEIER, 0x0);
2904 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2905 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2906 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2907 }
2908 
i8xx_irq_preinstall(struct drm_device * dev)2909 static void i8xx_irq_preinstall(struct drm_device * dev)
2910 {
2911 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2912 	int pipe;
2913 
2914 	atomic_set(&dev_priv->irq_received, 0);
2915 
2916 	for_each_pipe(pipe)
2917 		I915_WRITE(PIPESTAT(pipe), 0);
2918 	I915_WRITE16(IMR, 0xffff);
2919 	I915_WRITE16(IER, 0x0);
2920 	POSTING_READ16(IER);
2921 }
2922 
i8xx_irq_postinstall(struct drm_device * dev)2923 static int i8xx_irq_postinstall(struct drm_device *dev)
2924 {
2925 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2926 
2927 	I915_WRITE16(EMR,
2928 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2929 
2930 	/* Unmask the interrupts that we always want on. */
2931 	/* LINTED */
2932 	dev_priv->irq_mask =
2933 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2934 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2935 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2936 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2937 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2938 	I915_WRITE16(IMR, dev_priv->irq_mask);
2939 
2940 	I915_WRITE16(IER,
2941 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2942 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2943 		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2944 		     I915_USER_INTERRUPT);
2945 	POSTING_READ16(IER);
2946 
2947 	return 0;
2948 }
2949 
2950 /*
2951  * Returns true when a page flip has completed.
2952  */
i8xx_handle_vblank(struct drm_device * dev,int pipe,u16 iir)2953 static bool i8xx_handle_vblank(struct drm_device *dev,
2954 			       int pipe, u16 iir)
2955 {
2956 	drm_i915_private_t *dev_priv = dev->dev_private;
2957 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2958 
2959 	if (!drm_handle_vblank(dev, pipe))
2960 		return false;
2961 
2962 	if ((iir & flip_pending) == 0)
2963 		return false;
2964 
2965 	intel_prepare_page_flip(dev, pipe);
2966 
2967 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
2968 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2969 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2970 	 * the flip is completed (no longer pending). Since this doesn't raise
2971 	 * an interrupt per se, we watch for the change at vblank.
2972 	 */
2973 	if (I915_READ16(ISR) & flip_pending)
2974 		return false;
2975 
2976 	intel_finish_page_flip(dev, pipe);
2977 
2978 	return true;
2979 }
2980 
i8xx_irq_handler(DRM_IRQ_ARGS)2981 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
2982 {
2983 	/* LINTED */
2984 	struct drm_device *dev = (struct drm_device *) arg;
2985 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2986 	u16 iir, new_iir;
2987 	u32 pipe_stats[2] = { 0 };
2988 	unsigned long irqflags;
2989 	int pipe;
2990 	u16 flip_mask =
2991 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2992 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2993 
2994 	atomic_inc(&dev_priv->irq_received);
2995 
2996 	iir = I915_READ16(IIR);
2997 	if (iir == 0)
2998 		return IRQ_NONE;
2999 
3000 	while (iir & ~flip_mask) {
3001 		/* Can't rely on pipestat interrupt bit in iir as it might
3002 		 * have been cleared after the pipestat interrupt was received.
3003 		 * It doesn't set the bit in iir again, but it still produces
3004 		 * interrupts (for non-MSI).
3005 		 */
3006 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3007 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3008 			i915_handle_error(dev, false);
3009 
3010 		for_each_pipe(pipe) {
3011 			int reg = PIPESTAT(pipe);
3012 			pipe_stats[pipe] = I915_READ(reg);
3013 
3014 			/*
3015 			 * Clear the PIPE*STAT regs before the IIR
3016 			 */
3017 			if (pipe_stats[pipe] & 0x8000ffff) {
3018 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3019 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3020 							 pipe_name(pipe));
3021 				I915_WRITE(reg, pipe_stats[pipe]);
3022 			}
3023 		}
3024 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3025 
3026 		I915_WRITE16(IIR, iir & ~flip_mask);
3027 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3028 
3029 		i915_update_dri1_breadcrumb(dev);
3030 
3031 		if (iir & I915_USER_INTERRUPT)
3032 			notify_ring(dev, &dev_priv->ring[RCS]);
3033 
3034 		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
3035 		    i8xx_handle_vblank(dev, 0, iir))
3036 			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
3037 
3038 		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
3039 		    i8xx_handle_vblank(dev, 1, iir))
3040 			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
3041 
3042 		iir = new_iir;
3043 	}
3044 
3045 	return IRQ_HANDLED;
3046 }
3047 
i8xx_irq_uninstall(struct drm_device * dev)3048 static void i8xx_irq_uninstall(struct drm_device * dev)
3049 {
3050 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3051 	int pipe;
3052 
3053 	for_each_pipe(pipe) {
3054 		/* Clear enable bits; then clear status bits */
3055 		I915_WRITE(PIPESTAT(pipe), 0);
3056 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3057 	}
3058 	I915_WRITE16(IMR, 0xffff);
3059 	I915_WRITE16(IER, 0x0);
3060 	I915_WRITE16(IIR, I915_READ16(IIR));
3061 }
3062 
i915_irq_preinstall(struct drm_device * dev)3063 static void i915_irq_preinstall(struct drm_device * dev)
3064 {
3065 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3066 	int pipe;
3067 
3068 	atomic_set(&dev_priv->irq_received, 0);
3069 
3070 	if (I915_HAS_HOTPLUG(dev)) {
3071 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3072 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3073 	}
3074 
3075 	I915_WRITE16(HWSTAM, 0xeffe);
3076 	for_each_pipe(pipe)
3077 		I915_WRITE(PIPESTAT(pipe), 0);
3078 	I915_WRITE(IMR, 0xffffffff);
3079 	I915_WRITE(IER, 0x0);
3080 	POSTING_READ(IER);
3081 }
3082 
i915_irq_postinstall(struct drm_device * dev)3083 static int i915_irq_postinstall(struct drm_device *dev)
3084 {
3085 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3086 	u32 enable_mask;
3087 
3088 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3089 
3090 	/* Unmask the interrupts that we always want on. */
3091 	/* LINTED */
3092 	dev_priv->irq_mask =
3093 		~(I915_ASLE_INTERRUPT |
3094 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3095 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3096 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3097 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3098 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3099 
3100 	enable_mask =
3101 		I915_ASLE_INTERRUPT |
3102 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3103 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3104 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3105 		I915_USER_INTERRUPT;
3106 
3107 	if (I915_HAS_HOTPLUG(dev)) {
3108 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3109 		POSTING_READ(PORT_HOTPLUG_EN);
3110 
3111 		/* Enable in IER... */
3112 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3113 		/* and unmask in IMR */
3114 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3115 	}
3116 
3117 	I915_WRITE(IMR, dev_priv->irq_mask);
3118 	I915_WRITE(IER, enable_mask);
3119 	POSTING_READ(IER);
3120 
3121 
3122 	return 0;
3123 }
3124 
3125 /*
3126  * Returns true when a page flip has completed.
3127  */
i915_handle_vblank(struct drm_device * dev,int plane,int pipe,u32 iir)3128 static bool i915_handle_vblank(struct drm_device *dev,
3129 			       int plane, int pipe, u32 iir)
3130 {
3131 	drm_i915_private_t *dev_priv = dev->dev_private;
3132 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3133 
3134 	if (!drm_handle_vblank(dev, pipe))
3135 		return false;
3136 
3137 	if ((iir & flip_pending) == 0)
3138 		return false;
3139 
3140 	intel_prepare_page_flip(dev, plane);
3141 
3142 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3143 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3144 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3145 	 * the flip is completed (no longer pending). Since this doesn't raise
3146 	 * an interrupt per se, we watch for the change at vblank.
3147 	 */
3148 	if (I915_READ(ISR) & flip_pending)
3149 		return false;
3150 
3151 	intel_finish_page_flip(dev, pipe);
3152 
3153 	return true;
3154 }
3155 
i915_irq_handler(DRM_IRQ_ARGS)3156 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
3157 {
3158 	/* LINTED */
3159 	struct drm_device *dev = (struct drm_device *) arg;
3160 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3161 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES] = { 0 };
3162 	unsigned long irqflags;
3163 	u32 flip_mask =
3164 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3165 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3166 	int pipe, ret = IRQ_NONE;
3167 
3168 	atomic_inc(&dev_priv->irq_received);
3169 
3170 	iir = I915_READ(IIR);
3171 	do {
3172 		bool irq_received = (iir & ~flip_mask) != 0;
3173 
3174 		/* Can't rely on pipestat interrupt bit in iir as it might
3175 		 * have been cleared after the pipestat interrupt was received.
3176 		 * It doesn't set the bit in iir again, but it still produces
3177 		 * interrupts (for non-MSI).
3178 		 */
3179 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3180 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3181 			i915_handle_error(dev, false);
3182 
3183 		for_each_pipe(pipe) {
3184 			int reg = PIPESTAT(pipe);
3185 			pipe_stats[pipe] = I915_READ(reg);
3186 
3187 			/* Clear the PIPE*STAT regs before the IIR */
3188 			if (pipe_stats[pipe] & 0x8000ffff) {
3189 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3190 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3191 							 pipe_name(pipe));
3192 				I915_WRITE(reg, pipe_stats[pipe]);
3193 				irq_received = true;
3194 			}
3195 		}
3196 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3197 
3198 		if (!irq_received)
3199 			break;
3200 
3201 		/* Consume port.  Then clear IIR or we'll miss events */
3202 		if ((I915_HAS_HOTPLUG(dev)) &&
3203 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3204 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3205 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3206 
3207 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3208 				  hotplug_status);
3209 
3210 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3211 
3212 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3213 			POSTING_READ(PORT_HOTPLUG_STAT);
3214 		}
3215 
3216 		I915_WRITE(IIR, iir & ~flip_mask);
3217 		new_iir = I915_READ(IIR); /* Flush posted writes */
3218 
3219 		if (iir & I915_USER_INTERRUPT)
3220 			notify_ring(dev, &dev_priv->ring[RCS]);
3221 
3222 		for_each_pipe(pipe) {
3223 			int plane = pipe;
3224 			if (IS_MOBILE(dev))
3225 				plane = !plane;
3226 
3227 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3228 			    i915_handle_vblank(dev, plane, pipe, iir))
3229 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3230 
3231 		}
3232 
3233 
3234 		/* With MSI, interrupts are only generated when iir
3235 		 * transitions from zero to nonzero.  If another bit got
3236 		 * set while we were handling the existing iir bits, then
3237 		 * we would never get another interrupt.
3238 		 *
3239 		 * This is fine on non-MSI as well, as if we hit this path
3240 		 * we avoid exiting the interrupt handler only to generate
3241 		 * another one.
3242 		 *
3243 		 * Note that for MSI this could cause a stray interrupt report
3244 		 * if an interrupt landed in the time between writing IIR and
3245 		 * the posting read.  This should be rare enough to never
3246 		 * trigger the 99% of 100,000 interrupts test for disabling
3247 		 * stray interrupts.
3248 		 */
3249 		ret = IRQ_HANDLED;
3250 		iir = new_iir;
3251 	} while (iir & ~flip_mask);
3252 
3253 	i915_update_dri1_breadcrumb(dev);
3254 
3255 	return ret;
3256 }
3257 
i915_irq_uninstall(struct drm_device * dev)3258 static void i915_irq_uninstall(struct drm_device * dev)
3259 {
3260 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3261 	int pipe;
3262 
3263 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3264 
3265 	if (I915_HAS_HOTPLUG(dev)) {
3266 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3267 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3268 	}
3269 
3270 	I915_WRITE16(HWSTAM, 0xffff);
3271 	for_each_pipe(pipe) {
3272 		/* Clear enable bits; then clear status bits */
3273 		I915_WRITE(PIPESTAT(pipe), 0);
3274 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3275 	}
3276 	I915_WRITE(IMR, 0xffffffff);
3277 	I915_WRITE(IER, 0x0);
3278 
3279 	I915_WRITE(IIR, I915_READ(IIR));
3280 }
3281 
i965_irq_preinstall(struct drm_device * dev)3282 static void i965_irq_preinstall(struct drm_device * dev)
3283 {
3284 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3285 	int pipe;
3286 
3287 	atomic_set(&dev_priv->irq_received, 0);
3288 
3289 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3290 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3291 
3292 	I915_WRITE(HWSTAM, 0xeffe);
3293 	for_each_pipe(pipe)
3294 		I915_WRITE(PIPESTAT(pipe), 0);
3295 	I915_WRITE(IMR, 0xffffffff);
3296 	I915_WRITE(IER, 0x0);
3297 	POSTING_READ(IER);
3298 }
3299 
i965_irq_postinstall(struct drm_device * dev)3300 static int i965_irq_postinstall(struct drm_device *dev)
3301 {
3302 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3303 	u32 enable_mask;
3304 	u32 error_mask;
3305 
3306 	/* Unmask the interrupts that we always want on. */
3307 	/* LINTED */
3308 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3309 			       I915_DISPLAY_PORT_INTERRUPT |
3310 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3311 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3312 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3313 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3314 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3315 
3316 	enable_mask = ~dev_priv->irq_mask;
3317 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3318 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3319 	enable_mask |= I915_USER_INTERRUPT;
3320 
3321 	if (IS_G4X(dev))
3322 		enable_mask |= I915_BSD_USER_INTERRUPT;
3323 
3324 	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3325 
3326 	/*
3327 	 * Enable some error detection, note the instruction error mask
3328 	 * bit is reserved, so we leave it masked.
3329 	 */
3330 	if (IS_G4X(dev)) {
3331 		/* LINTED */
3332 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3333 			       GM45_ERROR_MEM_PRIV |
3334 			       GM45_ERROR_CP_PRIV |
3335 			       I915_ERROR_MEMORY_REFRESH);
3336 	} else {
3337 		/* LINTED */
3338 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3339 			       I915_ERROR_MEMORY_REFRESH);
3340 	}
3341 	I915_WRITE(EMR, error_mask);
3342 
3343 	I915_WRITE(IMR, dev_priv->irq_mask);
3344 	I915_WRITE(IER, enable_mask);
3345 	POSTING_READ(IER);
3346 
3347 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3348 	POSTING_READ(PORT_HOTPLUG_EN);
3349 
3350 
3351 	return 0;
3352 }
3353 
i915_hpd_irq_setup(struct drm_device * dev)3354 static void i915_hpd_irq_setup(struct drm_device *dev)
3355 {
3356 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3357 	struct drm_mode_config *mode_config = &dev->mode_config;
3358 	struct intel_encoder *intel_encoder;
3359 	u32 hotplug_en;
3360 
3361 	assert_spin_locked(&dev_priv->irq_lock);
3362 
3363 	if (I915_HAS_HOTPLUG(dev)) {
3364 		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3365 		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3366 		/* Note HDMI and DP share hotplug bits */
3367 		/* enable bits are the same for all generations */
3368 		list_for_each_entry(intel_encoder, struct intel_encoder, &mode_config->encoder_list, base.head)
3369 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3370 				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3371 		/* Programming the CRT detection parameters tends
3372 		   to generate a spurious hotplug event about three
3373 		   seconds later.  So just do it once.
3374 		*/
3375 		if (IS_G4X(dev))
3376 			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3377 		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3378 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3379 
3380 		/* Ignore TV since it's buggy */
3381 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3382 	}
3383 }
3384 
i965_irq_handler(DRM_IRQ_ARGS)3385 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
3386 {
3387 	/* LINTED */
3388 	struct drm_device *dev = (struct drm_device *) arg;
3389 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3390 	u32 iir, new_iir;
3391 	u32 pipe_stats[I915_MAX_PIPES] = { 0 };
3392 	unsigned long irqflags;
3393 	int irq_received;
3394 	int ret = IRQ_NONE, pipe;
3395 	u32 flip_mask =
3396 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3397 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3398 
3399 	atomic_inc(&dev_priv->irq_received);
3400 
3401 	iir = I915_READ(IIR);
3402 
3403 	for (;;) {
3404 
3405 		irq_received = (iir & ~flip_mask) != 0;
3406 
3407 		/* Can't rely on pipestat interrupt bit in iir as it might
3408 		 * have been cleared after the pipestat interrupt was received.
3409 		 * It doesn't set the bit in iir again, but it still produces
3410 		 * interrupts (for non-MSI).
3411 		 */
3412 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3413 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3414 			i915_handle_error(dev, false);
3415 
3416 		for_each_pipe(pipe) {
3417 			int reg = PIPESTAT(pipe);
3418 			pipe_stats[pipe] = I915_READ(reg);
3419 
3420 			/*
3421 			 * Clear the PIPE*STAT regs before the IIR
3422 			 */
3423 			if (pipe_stats[pipe] & 0x8000ffff) {
3424 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3425 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3426 							 pipe_name(pipe));
3427 				I915_WRITE(reg, pipe_stats[pipe]);
3428 				irq_received = 1;
3429 			}
3430 		}
3431 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3432 
3433 		if (!irq_received)
3434 			break;
3435 
3436 		ret = IRQ_HANDLED;
3437 
3438 		/* Consume port.  Then clear IIR or we'll miss events */
3439 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3440 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3441 			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3442 								  HOTPLUG_INT_STATUS_G4X :
3443 								  HOTPLUG_INT_STATUS_I915);
3444 
3445 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3446 				  hotplug_status);
3447 
3448 			intel_hpd_irq_handler(dev, hotplug_trigger,
3449 					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3450 
3451 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3452 			I915_READ(PORT_HOTPLUG_STAT);
3453 		}
3454 
3455 		I915_WRITE(IIR, iir & ~flip_mask);
3456 		new_iir = I915_READ(IIR); /* Flush posted writes */
3457 
3458 		if (iir & I915_USER_INTERRUPT)
3459 			notify_ring(dev, &dev_priv->ring[RCS]);
3460 		if (iir & I915_BSD_USER_INTERRUPT)
3461 			notify_ring(dev, &dev_priv->ring[VCS]);
3462 
3463 		for_each_pipe(pipe) {
3464 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3465 			    i915_handle_vblank(dev, pipe, pipe, iir))
3466 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3467 
3468 		}
3469 
3470 
3471 
3472 		/* With MSI, interrupts are only generated when iir
3473 		 * transitions from zero to nonzero.  If another bit got
3474 		 * set while we were handling the existing iir bits, then
3475 		 * we would never get another interrupt.
3476 		 *
3477 		 * This is fine on non-MSI as well, as if we hit this path
3478 		 * we avoid exiting the interrupt handler only to generate
3479 		 * another one.
3480 		 *
3481 		 * Note that for MSI this could cause a stray interrupt report
3482 		 * if an interrupt landed in the time between writing IIR and
3483 		 * the posting read.  This should be rare enough to never
3484 		 * trigger the 99% of 100,000 interrupts test for disabling
3485 		 * stray interrupts.
3486 		 */
3487 		iir = new_iir;
3488 	}
3489 
3490 	i915_update_dri1_breadcrumb(dev);
3491 
3492 	return ret;
3493 }
3494 
i965_irq_uninstall(struct drm_device * dev)3495 static void i965_irq_uninstall(struct drm_device * dev)
3496 {
3497 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3498 	int pipe;
3499 
3500 	if (!dev_priv)
3501 		return;
3502 
3503 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3504 
3505 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3506 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3507 
3508 	I915_WRITE(HWSTAM, 0xffffffff);
3509 	for_each_pipe(pipe)
3510 		I915_WRITE(PIPESTAT(pipe), 0);
3511 	I915_WRITE(IMR, 0xffffffff);
3512 	I915_WRITE(IER, 0x0);
3513 
3514 	for_each_pipe(pipe)
3515 		I915_WRITE(PIPESTAT(pipe),
3516 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3517 	I915_WRITE(IIR, I915_READ(IIR));
3518 }
3519 
i915_reenable_hotplug_timer_func(void * data)3520 static void i915_reenable_hotplug_timer_func(void* data)
3521 {
3522 	drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3523 	struct drm_device *dev = dev_priv->dev;
3524 	struct drm_mode_config *mode_config = &dev->mode_config;
3525 	unsigned long irqflags;
3526 	int i;
3527 
3528 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3529 	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3530 		struct drm_connector *connector;
3531 
3532 		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3533 			continue;
3534 
3535 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3536 
3537 		list_for_each_entry(connector, struct drm_connector, &mode_config->connector_list, head) {
3538 			struct intel_connector *intel_connector = to_intel_connector(connector);
3539 
3540 			if (intel_connector->encoder->hpd_pin == i) {
3541 				if (connector->polled != intel_connector->polled)
3542 					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3543 							 drm_get_connector_name(connector));
3544 				connector->polled = intel_connector->polled;
3545 				if (!connector->polled)
3546 					connector->polled = DRM_CONNECTOR_POLL_HPD;
3547 			}
3548 		}
3549 	}
3550 	if (dev_priv->display.hpd_irq_setup)
3551 		dev_priv->display.hpd_irq_setup(dev);
3552 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3553 }
3554 
intel_irq_init(struct drm_device * dev)3555 void intel_irq_init(struct drm_device *dev)
3556 {
3557 	struct drm_i915_private *dev_priv = dev->dev_private;
3558 
3559 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3560 	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3561 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3562 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3563 
3564 	init_timer(&dev_priv->gpu_error.hangcheck_timer);
3565 	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3566 		    i915_hangcheck_elapsed,
3567 		    (void *) dev);
3568 	init_timer(&dev_priv->hotplug_reenable_timer);
3569 	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3570 		    (void *) dev_priv);
3571 
3572 
3573 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
3574 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3575 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3576 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3577 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3578 	}
3579 
3580 	if (drm_core_check_feature(dev, DRIVER_MODESET))
3581 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3582 	else
3583 		dev->driver->get_vblank_timestamp = NULL;
3584 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3585 
3586 	if (IS_VALLEYVIEW(dev)) {
3587 		dev->driver->irq_handler = valleyview_irq_handler;
3588 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
3589 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
3590 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
3591 		dev->driver->enable_vblank = valleyview_enable_vblank;
3592 		dev->driver->disable_vblank = valleyview_disable_vblank;
3593 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3594 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3595 		/* Share pre & uninstall handlers with ILK/SNB */
3596 		dev->driver->irq_handler = ivybridge_irq_handler;
3597 		dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3598 		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3599 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
3600 		dev->driver->enable_vblank = ivybridge_enable_vblank;
3601 		dev->driver->disable_vblank = ivybridge_disable_vblank;
3602 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3603 	} else if (HAS_PCH_SPLIT(dev)) {
3604 		dev->driver->irq_handler = ironlake_irq_handler;
3605 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
3606 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
3607 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
3608 		dev->driver->enable_vblank = ironlake_enable_vblank;
3609 		dev->driver->disable_vblank = ironlake_disable_vblank;
3610 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3611 	} else {
3612 		if (INTEL_INFO(dev)->gen == 2) {
3613 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
3614 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
3615 			dev->driver->irq_handler = i8xx_irq_handler;
3616 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
3617 		} else if (INTEL_INFO(dev)->gen == 3) {
3618 			dev->driver->irq_preinstall = i915_irq_preinstall;
3619 			dev->driver->irq_postinstall = i915_irq_postinstall;
3620 			dev->driver->irq_uninstall = i915_irq_uninstall;
3621 			dev->driver->irq_handler = i915_irq_handler;
3622 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3623 		} else {
3624 			dev->driver->irq_preinstall = i965_irq_preinstall;
3625 			dev->driver->irq_postinstall = i965_irq_postinstall;
3626 			dev->driver->irq_uninstall = i965_irq_uninstall;
3627 			dev->driver->irq_handler = i965_irq_handler;
3628 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3629 		}
3630 		dev->driver->enable_vblank = i915_enable_vblank;
3631 		dev->driver->disable_vblank = i915_disable_vblank;
3632 	}
3633 }
3634 
intel_hpd_init(struct drm_device * dev)3635 void intel_hpd_init(struct drm_device *dev)
3636 {
3637 	struct drm_i915_private *dev_priv = dev->dev_private;
3638 	struct drm_mode_config *mode_config = &dev->mode_config;
3639 	struct drm_connector *connector;
3640 	unsigned long irqflags;
3641 	int i;
3642 
3643 	for (i = 1; i < HPD_NUM_PINS; i++) {
3644 		dev_priv->hpd_stats[i].hpd_cnt = 0;
3645 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3646 	}
3647 	list_for_each_entry(connector, struct drm_connector, &mode_config->connector_list, head) {
3648 		struct intel_connector *intel_connector = to_intel_connector(connector);
3649 		connector->polled = intel_connector->polled;
3650 		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3651 			connector->polled = DRM_CONNECTOR_POLL_HPD;
3652 	}
3653 
3654 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3655 	 * just to make the assert_spin_locked checks happy. */
3656 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3657 	if (dev_priv->display.hpd_irq_setup)
3658 		dev_priv->display.hpd_irq_setup(dev);
3659 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3660 }
3661