xref: /gfx-drm/usr/src/uts/intel/io/i915/i915_drv.c (revision e49fc716)
1 /*
2  * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
3  */
4 
5 /*
6  * i915_drv.c -- Intel i915 driver -*- linux-c -*-
7  * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
8  */
9 
10 /*
11  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
13  * Copyright (c) 2009, 2013, Intel Corporation.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  *
35  * Authors:
36  *    Gareth Hughes <gareth@valinux.com>
37  *
38  */
39 
40 /*
41  * I915 DRM Driver for Solaris
42  *
43  * This driver provides the hardware 3D acceleration support for Intel
44  * integrated video devices (e.g. i8xx/i915/i945 series chipsets), under the
45  * DRI (Direct Rendering Infrastructure). DRM (Direct Rendering Manager) here
46  * means the kernel device driver in DRI.
47  *
48  * I915 driver is a device dependent driver only, it depends on a misc module
49  * named drm for generic DRM operations.
50  */
51 
52 #include "drmP.h"
53 #include "i915_drm.h"
54 #include "i915_drv.h"
55 #include "drm_crtc_helper.h"
56 #include "intel_drv.h"
57 
58 static int i915_modeset = -1;
59 unsigned int i915_fbpercrtc = 0;
60 int i915_panel_ignore_lid = 1;
61 unsigned int i915_powersave = 1;
62 
63 int i915_semaphores = -1;
64 
65 int i915_enable_rc6 = 0;
66 int i915_enable_fbc = -1;
67 
68 unsigned int i915_lvds_downclock = 0;
69 int i915_lvds_channel_mode;
70 
71 int i915_panel_use_ssc = -1;
72 int i915_vbt_sdvo_panel_type = -1;
73 
74 bool i915_try_reset = false;
75 bool i915_enable_hangcheck = true;
76 int i915_enable_ppgtt = -1;
77 
78 int i915_disable_power_well = 1;
79 int i915_enable_ips = 1;
80 
81 static void *i915_statep;
82 
83 static int i915_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
84 static int i915_attach(dev_info_t *, ddi_attach_cmd_t);
85 static int i915_detach(dev_info_t *, ddi_detach_cmd_t);
86 static int i915_quiesce(dev_info_t *);
87 
88 extern struct cb_ops drm_cb_ops;
89 extern int intel_agp_enabled;
90 
91 static struct dev_ops i915_dev_ops = {
92 	DEVO_REV,		/* devo_rev */
93 	0,			/* devo_refcnt */
94 	i915_info,		/* devo_getinfo */
95 	nulldev,		/* devo_identify */
96 	nulldev,		/* devo_probe */
97 	i915_attach,		/* devo_attach */
98 	i915_detach,		/* devo_detach */
99 	nodev,			/* devo_reset */
100 	&drm_cb_ops,		/* devo_cb_ops */
101 	NULL,			/* devo_bus_ops */
102 	NULL,			/* power */
103 	i915_quiesce,		/* devo_quiesce */
104 };
105 
106 static struct modldrv modldrv = {
107 	&mod_driverops,		/* drv_modops */
108 	"I915 DRM driver",	/* drv_linkinfo */
109 	&i915_dev_ops,		/* drv_dev_ops */
110 };
111 
112 static struct modlinkage modlinkage = {
113 	MODREV_1, { (void *) &modldrv, NULL }
114 };
115 
116 #define INTEL_VGA_DEVICE(id, info) {		\
117 	.vendor = 0x8086,			\
118 	.device = id,				\
119 	.driver_data = (unsigned long) info }
120 
121 static const struct intel_device_info intel_i830_info = {
122 	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
123 	.has_overlay = 1, .overlay_needs_physical = 1,
124 };
125 
126 static const struct intel_device_info intel_845g_info = {
127 	.gen = 2, .num_pipes = 1,
128 	.has_overlay = 1, .overlay_needs_physical = 1,
129 };
130 
131 static const struct intel_device_info intel_i85x_info = {
132 	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
133 	.cursor_needs_physical = 1,
134 	.has_overlay = 1, .overlay_needs_physical = 1,
135 };
136 
137 static const struct intel_device_info intel_i865g_info = {
138 	.gen = 2, .num_pipes = 1,
139 	.has_overlay = 1, .overlay_needs_physical = 1,
140 };
141 
142 static const struct intel_device_info intel_i915g_info = {
143 	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
144 	.has_overlay = 1, .overlay_needs_physical = 1,
145 };
146 static const struct intel_device_info intel_i915gm_info = {
147 	.gen = 3, .is_mobile = 1, .num_pipes = 2,
148 	.cursor_needs_physical = 1,
149 	.has_overlay = 1, .overlay_needs_physical = 1,
150 	.supports_tv = 1,
151 };
152 static const struct intel_device_info intel_i945g_info = {
153 	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
154 	.has_overlay = 1, .overlay_needs_physical = 1,
155 };
156 static const struct intel_device_info intel_i945gm_info = {
157 	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
158 	.has_hotplug = 1, .cursor_needs_physical = 1,
159 	.has_overlay = 1, .overlay_needs_physical = 1,
160 	.supports_tv = 1,
161 };
162 
163 static const struct intel_device_info intel_i965g_info = {
164 	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
165 	.has_hotplug = 1,
166 	.has_overlay = 1,
167 };
168 
169 static const struct intel_device_info intel_i965gm_info = {
170 	.gen = 4, .is_crestline = 1, .num_pipes = 2,
171 	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
172 	.has_overlay = 1,
173 	.supports_tv = 1,
174 };
175 
176 static const struct intel_device_info intel_g33_info = {
177 	.gen = 3, .is_g33 = 1, .num_pipes = 2,
178 	.need_gfx_hws = 1, .has_hotplug = 1,
179 	.has_overlay = 1,
180 };
181 
182 static const struct intel_device_info intel_g45_info = {
183 	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
184 	.has_pipe_cxsr = 1, .has_hotplug = 1,
185 	.has_bsd_ring = 1,
186 };
187 
188 static const struct intel_device_info intel_gm45_info = {
189 	.gen = 4, .is_g4x = 1, .num_pipes = 2,
190 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
191 	.has_pipe_cxsr = 1, .has_hotplug = 1,
192 	.supports_tv = 1,
193 	.has_bsd_ring = 1,
194 };
195 
196 static const struct intel_device_info intel_pineview_info = {
197 	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
198 	.need_gfx_hws = 1, .has_hotplug = 1,
199 	.has_overlay = 1,
200 };
201 
202 static const struct intel_device_info intel_ironlake_d_info = {
203 	.gen = 5, .num_pipes = 2,
204 	.need_gfx_hws = 1, .has_hotplug = 1,
205 	.has_bsd_ring = 1,
206 };
207 
208 static const struct intel_device_info intel_ironlake_m_info = {
209 	.gen = 5, .is_mobile = 1, .num_pipes = 2,
210 	.need_gfx_hws = 1, .has_hotplug = 1,
211 	.has_fbc = 1,
212 	.has_bsd_ring = 1,
213 };
214 
215 static const struct intel_device_info intel_sandybridge_d_info = {
216 	.gen = 6, .num_pipes = 2,
217 	.need_gfx_hws = 1, .has_hotplug = 1,
218 	.has_bsd_ring = 1,
219 	.has_blt_ring = 1,
220 	.has_llc = 1,
221 	.has_force_wake = 1,
222 };
223 
224 static const struct intel_device_info intel_sandybridge_m_info = {
225 	.gen = 6, .is_mobile = 1, .num_pipes = 2,
226 	.need_gfx_hws = 1, .has_hotplug = 1,
227 	.has_fbc = 1,
228 	.has_bsd_ring = 1,
229 	.has_blt_ring = 1,
230 	.has_llc = 1,
231 	.has_force_wake = 1,
232 };
233 
234 #define GEN7_FEATURES  \
235 	.gen = 7, .num_pipes = 3, \
236 	.need_gfx_hws = 1, .has_hotplug = 1, \
237 	.has_bsd_ring = 1, \
238 	.has_blt_ring = 1, \
239 	.has_llc = 1, \
240 	.has_force_wake = 1
241 
242 static const struct intel_device_info intel_ivybridge_d_info = {
243 	GEN7_FEATURES,
244 	.is_ivybridge = 1,
245 };
246 
247 static const struct intel_device_info intel_ivybridge_m_info = {
248 	GEN7_FEATURES,
249 	.is_ivybridge = 1,
250 	.is_mobile = 1,
251 	.has_fbc = 1,
252 };
253 
254 static const struct intel_device_info intel_ivybridge_q_info = {
255 	GEN7_FEATURES,
256 	.is_ivybridge = 1,
257 	.num_pipes = 0, /* legal, last one wins */
258 };
259 
260 static const struct intel_device_info intel_valleyview_m_info = {
261 	GEN7_FEATURES,
262 	.is_mobile = 1,
263 	.num_pipes = 2,
264 	.is_valleyview = 1,
265 	.display_mmio_offset = VLV_DISPLAY_BASE,
266 	.has_llc = 0, /* legal, last one wins */
267 };
268 
269 static const struct intel_device_info intel_valleyview_d_info = {
270 	GEN7_FEATURES,
271 	.num_pipes = 2,
272 	.is_valleyview = 1,
273 	.display_mmio_offset = VLV_DISPLAY_BASE,
274 	.has_llc = 0, /* legal, last one wins */
275 };
276 
277 static const struct intel_device_info intel_haswell_d_info = {
278 	GEN7_FEATURES,
279 	.is_haswell = 1,
280 	.has_ddi = 1,
281 	.has_fpga_dbg = 1,
282 	.has_vebox_ring = 1,
283 };
284 
285 static const struct intel_device_info intel_haswell_m_info = {
286 	GEN7_FEATURES,
287 	.is_haswell = 1,
288 	.is_mobile = 1,
289 	.has_ddi = 1,
290 	.has_fpga_dbg = 1,
291 	.has_fbc = 1,
292 	.has_vebox_ring = 1,
293 };
294 static struct drm_pci_id_list pciidlist[] = {
295 	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
296 	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
297 	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
298 	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
299 	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
300 	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
301 	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
302 	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
303 	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
304 	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
305 	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
306 	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
307 	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
308 	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
309 	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
310 	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
311 	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
312 	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
313 	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
314 	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
315 	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
316 	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
317 	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
318 	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
319 	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
320 	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
321 	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
322 	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
323 	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
324 	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
325 	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
326 	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
327 	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
328 	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
329 	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
330 	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
331 	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
332 	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
333 	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
334 	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
335 	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
336 	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
337 	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
338 	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
339 	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
340 	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
341 	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
342 	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
343 	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
344 	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
345 	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
346 	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
347 	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT3 mobile */
348 	INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
349 	INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
350 	INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
351 	INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
352 	INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
353 	INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
354 	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
355 	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
356 	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
357 	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
358 	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
359 	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
360 	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
361 	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
362 	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
363 	INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
364 	INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
365 	INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
366 	INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
367 	INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
368 	INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
369 	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
370 	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
371 	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
372 	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
373 	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
374 	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
375 	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
376 	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
377 	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
378 	INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
379 	INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
380 	INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
381 	INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
382 	INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
383 	INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
384 	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
385 	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
386 	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
387 	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
388 	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
389 	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
390 	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
391 	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
392 	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
393 	INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
394 	INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
395 	INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
396 	INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
397 	INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
398 	INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
399 	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
400 	INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
401 	INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
402 	INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
403 	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
404 	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
405 	{0, 0, 0}
406 };
407 
408 #define PCI_VENDOR_ID_INTEL		0x8086
409 
intel_detect_pch(struct drm_device * dev)410 void intel_detect_pch (struct drm_device *dev)
411 {
412 	struct drm_i915_private *dev_priv = dev->dev_private;
413 	dev_info_t	*isa_dip;
414 	int	vendor_id, device_id;
415 	/* LINTED */
416 	int	error;
417 	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
418 	 * (which really amounts to a PCH but no South Display).
419 	 */
420 	if (INTEL_INFO(dev)->num_pipes == 0) {
421 		dev_priv->pch_type = PCH_NOP;
422 		return;
423 	}
424 
425 	/*
426 	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
427 	 * make graphics device passthrough work easy for VMM, that only
428 	 * need to expose ISA bridge to let driver know the real hardware
429 	 * underneath. This is a requirement from virtualization team.
430 	 */
431 	isa_dip = ddi_find_devinfo("isa", -1, 0);
432 
433 	if (isa_dip) {
434 		vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, isa_dip, DDI_PROP_DONTPASS,
435 			"vendor-id", -1);
436 		DRM_DEBUG("vendor_id 0x%x", vendor_id);
437 
438 		if (vendor_id == PCI_VENDOR_ID_INTEL) {
439 			device_id = ddi_prop_get_int(DDI_DEV_T_ANY, isa_dip, DDI_PROP_DONTPASS,
440 				"device-id", -1);
441 			DRM_DEBUG("device_id 0x%x", device_id);
442 			device_id &= INTEL_PCH_DEVICE_ID_MASK;
443 			dev_priv->pch_id = (unsigned short) device_id;
444 			if (device_id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
445 				dev_priv->pch_type = PCH_IBX;
446 				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
447 				WARN_ON(!IS_GEN5(dev));
448 			} else if (device_id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
449 				dev_priv->pch_type = PCH_CPT;
450 				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
451 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
452 			} else if (device_id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
453 				/* PantherPoint is CPT compatible */
454 				dev_priv->pch_type = PCH_CPT;
455 				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
456 				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
457 			} else if (device_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
458 				dev_priv->pch_type = PCH_LPT;
459 				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
460 				WARN_ON(!IS_HASWELL(dev));
461 				WARN_ON(IS_ULT(dev));
462 			} else if (device_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
463 				dev_priv->pch_type = PCH_LPT;
464 				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
465 				WARN_ON(!IS_HASWELL(dev));
466 				WARN_ON(!IS_ULT(dev));
467 			}
468 		}
469 	}
470 }
471 
i915_semaphore_is_enabled(struct drm_device * dev)472 bool i915_semaphore_is_enabled(struct drm_device *dev)
473 {
474 	if (INTEL_INFO(dev)->gen < 6)
475 		return 0;
476 
477 	if (i915_semaphores >= 0)
478 		return i915_semaphores;
479 
480 #ifdef CONFIG_INTEL_IOMMU
481 	/* Enable semaphores on SNB when IO remapping is off */
482 	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
483 		return false;
484 #endif
485 
486 	return 1;
487 }
488 
i915_drm_freeze(struct drm_device * dev)489 static int i915_drm_freeze(struct drm_device *dev)
490 {
491 	struct drm_i915_private *dev_priv = dev->dev_private;
492 	struct drm_crtc *crtc;
493 
494 	/* ignore lid events during suspend */
495 	mutex_lock(&dev_priv->modeset_restore_lock);
496 	dev_priv->modeset_restore = MODESET_SUSPENDED;
497 	mutex_unlock(&dev_priv->modeset_restore_lock);
498 
499 	intel_set_power_well(dev, true);
500 
501 	drm_kms_helper_poll_disable(dev);
502 
503 	/* XXX FIXME: pci_save_state(dev->pdev); */
504 
505 	/* If KMS is active, we do the leavevt stuff here */
506 	if (drm_core_check_feature(dev, DRIVER_MODESET) && dev_priv->gtt.total !=0) {
507 
508 		if (i915_gem_idle(dev, 0))
509 			DRM_ERROR("GEM idle failed, resume may fail\n");
510 
511 		del_timer_sync(&dev_priv->rps.delayed_resume_timer);
512 
513 
514 		(void) drm_irq_uninstall(dev);
515 		dev_priv->enable_hotplug_processing = false;
516 		/*
517 		 * Disable CRTCs directly since we want to preserve sw state
518 		 * for _thaw.
519 		 */
520 		list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head)
521 			dev_priv->display.crtc_disable(crtc);
522 
523 		intel_modeset_suspend_hw(dev);
524 	}
525 
526 	if (dev_priv->gtt.total !=0)
527 		(void) i915_save_state(dev);
528 
529 	return 0;
530 }
531 
532 int
i915_suspend(struct drm_device * dev)533 i915_suspend(struct drm_device *dev)
534 {
535 	struct drm_i915_private *dev_priv = dev->dev_private;
536 	int error;
537 
538 	/*
539 	 * First, try to restore the "console".
540 	 */
541 	(void) drm_fb_helper_force_kernel_mode();
542 
543 	if (!dev || !dev_priv) {
544 		DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
545 		DRM_ERROR("DRM not initialized, aborting suspend.\n");
546 		return -ENODEV;
547 	}
548 
549 	error = i915_drm_freeze(dev);
550 	if (error)
551 		return error;
552 
553 	return 0;
554 }
555 
556 static int
__i915_drm_thaw(struct drm_device * dev)557 __i915_drm_thaw(struct drm_device *dev)
558 {
559 	struct drm_i915_private *dev_priv = dev->dev_private;
560 	int error = 0;
561 
562 	if (dev_priv->gtt.total !=0)
563 		(void) i915_restore_state(dev);
564 
565 	/* KMS EnterVT equivalent */
566 	if (drm_core_check_feature(dev, DRIVER_MODESET) && dev_priv->gtt.total !=0) {
567 		intel_init_pch_refclk(dev);
568 
569 		mutex_lock(&dev->struct_mutex);
570 		dev_priv->mm.suspended = 0;
571 
572 		error = i915_gem_init_hw(dev);
573 		mutex_unlock(&dev->struct_mutex);
574 
575 		/* We need working interrupts for modeset enabling ... */
576 		(void) drm_irq_install(dev);
577 
578 		intel_modeset_init_hw(dev);
579 		drm_modeset_lock_all(dev);
580 		intel_modeset_setup_hw_state(dev, true);
581 		drm_modeset_unlock_all(dev);
582 
583 		/*
584 		 * ... but also need to make sure that hotplug processing
585 		 * doesn't cause havoc. Like in the driver load code we don't
586 		 * bother with the tiny race here where we might loose hotplug
587 		 * notifications.
588 		 * */
589 		intel_hpd_init(dev);
590 		dev_priv->enable_hotplug_processing = true;
591 	}
592 
593 	mutex_lock(&dev_priv->modeset_restore_lock);
594 	dev_priv->modeset_restore = MODESET_DONE;
595 	mutex_unlock(&dev_priv->modeset_restore_lock);
596 
597 	return error;
598 }
599 
600 static int
i915_drm_thaw(struct drm_device * dev)601 i915_drm_thaw(struct drm_device *dev)
602 {
603 	struct drm_i915_private *dev_priv = dev->dev_private;
604 	int error = 0;
605 
606 	if (dev_priv->gtt.total !=0)
607 		intel_gt_sanitize(dev);
608 	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
609 	    dev_priv->gtt.total !=0) {
610 		mutex_lock(&dev->struct_mutex);
611 		i915_gem_restore_gtt_mappings(dev);
612 		mutex_unlock(&dev->struct_mutex);
613 	}
614 	__i915_drm_thaw(dev);
615 
616 	return error;
617 }
618 
619 int
i915_resume(struct drm_device * dev)620 i915_resume(struct drm_device *dev)
621 {
622 	struct drm_i915_private *dev_priv = dev->dev_private;
623 	int ret;
624 	if (dev_priv->gtt.total !=0)
625 		intel_gt_sanitize(dev);
626 	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
627 	    dev_priv->gtt.total !=0) {
628 		mutex_lock(&dev->struct_mutex);
629 		i915_gem_restore_gtt_mappings(dev);
630 		mutex_unlock(&dev->struct_mutex);
631 	}
632 
633 	ret = __i915_drm_thaw(dev);
634 	if (ret)
635 		return ret;
636 
637 	drm_kms_helper_poll_enable(dev);
638 	return 0;
639 }
640 
i8xx_do_reset(struct drm_device * dev)641 static int i8xx_do_reset(struct drm_device *dev)
642 {
643 	struct drm_i915_private *dev_priv = dev->dev_private;
644 
645 	if (IS_I85X(dev))
646 		return -ENODEV;
647 
648 	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
649 	POSTING_READ(D_STATE);
650 
651 	if (IS_I830(dev) || IS_845G(dev)) {
652 		I915_WRITE(DEBUG_RESET_I830,
653 			   DEBUG_RESET_DISPLAY |
654 			   DEBUG_RESET_RENDER |
655 			   DEBUG_RESET_FULL);
656 		POSTING_READ(DEBUG_RESET_I830);
657 		msleep(1);
658 
659 		I915_WRITE(DEBUG_RESET_I830, 0);
660 		POSTING_READ(DEBUG_RESET_I830);
661 	}
662 
663 	msleep(1);
664 
665 	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
666 	POSTING_READ(D_STATE);
667 
668 	return 0;
669 }
670 
i965_reset_complete(struct drm_device * dev)671 static int i965_reset_complete(struct drm_device *dev)
672 {
673 	u8 gdrst;
674 	(void) pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
675 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
676 }
677 
i965_do_reset(struct drm_device * dev)678 static int i965_do_reset(struct drm_device *dev)
679 {
680 	int ret;
681 	u8 gdrst;
682 
683 	/*
684 	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
685 	 * well as the reset bit (GR/bit 0).  Setting the GR bit
686 	 * triggers the reset; when done, the hardware will clear it.
687 	 */
688 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
689 	pci_write_config_byte(dev->pdev, I965_GDRST,
690 			      gdrst | GRDOM_RENDER |
691 			      GRDOM_RESET_ENABLE);
692 	ret =  wait_for(i965_reset_complete(dev), 500);
693 	if (ret)
694 		return ret;
695 
696 	/* We can't reset render&media without also resetting display ... */
697 	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
698 	pci_write_config_byte(dev->pdev, I965_GDRST,
699 			      gdrst | GRDOM_MEDIA |
700 			      GRDOM_RESET_ENABLE);
701 
702 	return wait_for(i965_reset_complete(dev), 500);
703 }
704 
ironlake_do_reset(struct drm_device * dev)705 static int ironlake_do_reset(struct drm_device *dev)
706 {
707 	struct drm_i915_private *dev_priv = dev->dev_private;
708 	u32 gdrst;
709 	int ret;
710 
711 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
712 	gdrst &= ~GRDOM_MASK;
713 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
714 		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
715 	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
716 	if (ret)
717 		return ret;
718 
719 	/* We can't reset render&media without also resetting display ... */
720 	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
721 	gdrst &= ~GRDOM_MASK;
722 	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
723 		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
724 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
725 }
726 
gen6_do_reset(struct drm_device * dev)727 static int gen6_do_reset(struct drm_device *dev)
728 {
729 	struct drm_i915_private *dev_priv = dev->dev_private;
730 	int	ret;
731 	unsigned long irqflags;
732 
733 	/* Hold gt_lock across reset to prevent any register access
734 	 * with forcewake not set correctly
735 	 */
736 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
737 
738 	/* Reset the chip */
739 
740 	/* GEN6_GDRST is not in the gt power well, no need to check
741 	 * for fifo space for the write or forcewake the chip for
742 	 * the read
743 	 */
744 	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
745 
746 	/* Spin waiting for the device to ack the reset request */
747 	ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
748 
749 	/* If reset with a user forcewake, try to restore, otherwise turn it off */
750 	if (dev_priv->forcewake_count)
751 		dev_priv->gt.force_wake_get(dev_priv);
752 	else
753 		dev_priv->gt.force_wake_put(dev_priv);
754 
755 	/* Restore fifo count */
756 	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
757 
758 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
759 	return ret;
760 }
761 
intel_gpu_reset(struct drm_device * dev)762 int intel_gpu_reset(struct drm_device *dev)
763 {
764 	switch (INTEL_INFO(dev)->gen) {
765 	case 7:
766 	case 6: return gen6_do_reset(dev);
767 	case 5: return ironlake_do_reset(dev);
768 	case 4: return i965_do_reset(dev);
769 	case 2: return i8xx_do_reset(dev);
770 	default: return -ENODEV;
771 	}
772 }
773 
774 /**
775  * i915_reset - reset chip after a hang
776  * @dev: drm device to reset
777  *
778  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
779  * reset or otherwise an error code.
780  *
781  * Procedure is fairly simple:
782  *   - reset the chip using the reset reg
783  *   - re-init context state
784  *   - re-init hardware status page
785  *   - re-init ring buffer
786  *   - re-init interrupt state
787  *   - re-init display
788  */
i915_reset(struct drm_device * dev)789 int i915_reset(struct drm_device *dev)
790 {
791 	drm_i915_private_t *dev_priv = dev->dev_private;
792 	struct timeval cur_time;
793 	bool simulated;
794 	int ret;
795 
796 	if (!i915_try_reset)
797 		return 0;
798 
799 	mutex_lock(&dev->struct_mutex);
800 
801 	i915_gem_reset(dev);
802 
803 	simulated = dev_priv->gpu_error.stop_rings != 0;
804 
805 	do_gettimeofday(&cur_time);
806 	if (!simulated && cur_time.tv_sec - dev_priv->gpu_error.last_reset < 5) {
807 		ret = -ENODEV;
808 		DRM_ERROR("GPU hanging too fast, wait 5 secons for another reset");
809 		mutex_unlock(&dev->struct_mutex);
810 		return ret;
811 	} else {
812 		ret = intel_gpu_reset(dev);
813 
814 		/* Also reset the gpu hangman. */
815 		if (simulated) {
816 			DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
817 			dev_priv->gpu_error.stop_rings = 0;
818 			if (ret == -ENODEV) {
819 				DRM_ERROR("Reset not implemented, but ignoring "
820 					  "error for simulated gpu hangs\n");
821 				ret = 0;
822 			}
823 		} else {
824 			do_gettimeofday(&cur_time);
825 			dev_priv->gpu_error.last_reset = cur_time.tv_sec;
826 		}
827 	}
828 	if (ret) {
829 		DRM_ERROR("Failed to reset chip.\n");
830 		mutex_unlock(&dev->struct_mutex);
831 		return ret;
832 	}
833 
834 	/* Ok, now get things going again... */
835 
836 	/*
837 	 * Everything depends on having the GTT running, so we need to start
838 	 * there.  Fortunately we don't need to do this unless we reset the
839 	 * chip at a PCI level.
840 	 *
841 	 * Next we need to restore the context, but we don't use those
842 	 * yet either...
843 	 *
844 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
845 	 * was running at the time of the reset (i.e. we weren't VT
846 	 * switched away).
847 	 */
848 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
849 	    !dev_priv->mm.suspended) {
850 		struct intel_ring_buffer *ring;
851 		int i;
852 
853 		dev_priv->mm.suspended = 0;
854 
855 		i915_gem_init_swizzling(dev);
856 
857 		for_each_ring(ring, dev_priv, i)
858 			ring->init(ring);
859 
860 		i915_gem_context_init(dev);
861 		if (dev_priv->mm.aliasing_ppgtt) {
862 			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
863 			if (ret)
864 				i915_gem_cleanup_aliasing_ppgtt(dev);
865 		}
866 
867 		/*
868 		 * It would make sense to re-init all the other hw state, at
869 		 * least the rps/rc6/emon init done within modeset_init_hw. For
870 		 * some unknown reason, this blows up my ilk, so don't.
871 		 */
872 
873 		mutex_unlock(&dev->struct_mutex);
874 
875 		(void) drm_irq_uninstall(dev);
876 		if (drm_irq_install(dev)) {
877 			DRM_ERROR("Could not install irq for driver.\n");
878 			return -EIO;
879 		}
880 		intel_hpd_init(dev);
881 	} else {
882 		mutex_unlock(&dev->struct_mutex);
883 	}
884 
885 	return 0;
886 }
887 
888 static struct drm_driver driver = {
889 	/* don't use mtrr's here, the Xserver or user space app should
890 	 * deal with them for intel hardware.
891 	 */
892 	.driver_features =
893 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
894 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
895 	.load = i915_driver_load,
896 	.unload = i915_driver_unload,
897 	.firstopen = i915_driver_firstopen,
898 	.open = i915_driver_open,
899 	.lastclose = i915_driver_lastclose,
900 	.preclose = i915_driver_preclose,
901 	.postclose = i915_driver_postclose,
902 
903 	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
904 	.device_is_agp = i915_driver_device_is_agp,
905 	.master_create = i915_master_create,
906 	.master_destroy = i915_master_destroy,
907 	/* OSOL begin */
908 	.entervt = i915_driver_entervt,
909 	.leavevt = i915_driver_leavevt,
910 	.agp_support_detect = i915_driver_agp_support_detect,
911 	/* OSOL end */
912 #if defined(CONFIG_DEBUG_FS)
913 	.debugfs_init = i915_debugfs_init,
914 	.debugfs_cleanup = i915_debugfs_cleanup,
915 #endif
916 	.gem_init_object = i915_gem_init_object,
917 	.gem_free_object = i915_gem_free_object,
918 	/*.gem_vm_ops = &i915_gem_vm_ops,*/
919 	.gem_fault = i915_gem_fault,
920 	.ioctls = i915_ioctls,
921 
922 	.id_table = pciidlist,
923 
924 	.name = DRIVER_NAME,
925 	.desc = DRIVER_DESC,
926 	.date = DRIVER_DATE,
927 	.major = DRIVER_MAJOR,
928 	.minor = DRIVER_MINOR,
929 	.patchlevel = DRIVER_PATCHLEVEL,
930 };
931 
932 static int
i915_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)933 i915_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
934 {
935 	struct drm_device *dev;
936 	int ret, item;
937 
938 	item = ddi_get_instance(dip);
939 
940 	switch (cmd) {
941 	case DDI_ATTACH:
942 		if (ddi_soft_state_zalloc(i915_statep, item) != DDI_SUCCESS) {
943 			DRM_ERROR("failed to alloc softstate, item = %d", item);
944 			return (DDI_FAILURE);
945 		}
946 
947 		dev = ddi_get_soft_state(i915_statep, item);
948 		if (!dev) {
949 			DRM_ERROR("cannot get soft state");
950 			return (DDI_FAILURE);
951 		}
952 
953 		dev->devinfo = dip;
954 
955 		if (!(driver.driver_features & DRIVER_MODESET))
956 			driver.get_vblank_timestamp = NULL;
957 
958 		ret = drm_init(dev, &driver);
959 		if (ret != DDI_SUCCESS)
960 			(void) ddi_soft_state_free(i915_statep, item);
961 
962 		return (ret);
963 
964 	case DDI_RESUME:
965 		dev = ddi_get_soft_state(i915_statep, item);
966 		if (!dev) {
967 			DRM_ERROR("cannot get soft state");
968 			return (DDI_FAILURE);
969 		}
970 
971 		return (i915_resume(dev));
972 
973 	default:
974 		break;
975 	}
976 
977 	DRM_ERROR("only supports attach or resume");
978 	return (DDI_FAILURE);
979 }
980 
981 static int
i915_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)982 i915_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
983 {
984 	struct drm_device *dev;
985 	int item;
986 
987 	item = ddi_get_instance(dip);
988 	dev = ddi_get_soft_state(i915_statep, item);
989 	if (!dev) {
990 		DRM_ERROR("cannot get soft state");
991 		return (DDI_FAILURE);
992 	}
993 
994 	switch (cmd) {
995 	case DDI_DETACH:
996 		drm_exit(dev);
997 		(void) ddi_soft_state_free(i915_statep, item);
998 		return (DDI_SUCCESS);
999 
1000 	case DDI_SUSPEND:
1001 		return (i915_suspend(dev));
1002 
1003 	default:
1004 		break;
1005 	}
1006 
1007 	DRM_ERROR("only supports detach or suspend");
1008 	return (DDI_FAILURE);
1009 }
1010 
1011 static int
1012 /* LINTED */
i915_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)1013 i915_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1014 {
1015 	struct drm_minor *minor;
1016 
1017 	minor = idr_find(&drm_minors_idr, DRM_DEV2MINOR((dev_t)arg));
1018 	if (!minor)
1019 		return (DDI_FAILURE);
1020 	if (!minor->dev || !minor->dev->devinfo)
1021 		return (DDI_FAILURE);
1022 
1023 	switch (infocmd) {
1024 	case DDI_INFO_DEVT2DEVINFO:
1025 		*result = (void *)minor->dev->devinfo;
1026 		return (DDI_SUCCESS);
1027 
1028 	case DDI_INFO_DEVT2INSTANCE:
1029 		*result = (void *)(uintptr_t)ddi_get_instance(minor->dev->devinfo);
1030 		return (DDI_SUCCESS);
1031 	}
1032 
1033 	return (DDI_FAILURE);
1034 }
1035 
1036 static int
i915_quiesce(dev_info_t * dip)1037 i915_quiesce(dev_info_t *dip)
1038 {
1039 	struct drm_device *dev;
1040 	struct drm_i915_private *dev_priv;
1041 	struct drm_crtc *crtc;
1042 	int ret = 0;
1043 
1044 	dev = ddi_get_soft_state(i915_statep, ddi_get_instance(dip));
1045 
1046 	if (!dev)
1047 		return (DDI_FAILURE);
1048 
1049 	dev_priv = dev->dev_private;
1050 
1051 	if (dev_priv && dev_priv->gtt.total !=0) {
1052 
1053 
1054 
1055 		(void) drm_fb_helper_force_kernel_mode();
1056 
1057 		mutex_lock(&dev->struct_mutex);
1058 		ret = i915_gpu_idle(dev);
1059 		if (ret)
1060 			DRM_ERROR("failed to idle hardware: %d\n", ret);
1061 		i915_gem_retire_requests(dev);
1062 		mutex_unlock(&dev->struct_mutex);
1063 
1064 		if (dev_priv->fbcon_obj != NULL)
1065 			intel_fbdev_fini(dev);
1066 
1067 		drm_kms_helper_poll_fini(dev);
1068 		mutex_lock(&dev->struct_mutex);
1069 
1070 		list_for_each_entry(crtc, struct drm_crtc, &dev->mode_config.crtc_list, head) {
1071 			/* Skip inactive CRTCs */
1072 			if (!crtc->fb)
1073 				continue;
1074 
1075 			intel_increase_pllclock(crtc);
1076 		}
1077 
1078 		intel_disable_fbc(dev);
1079 
1080 		intel_disable_gt_powersave(dev);
1081 
1082 		ironlake_teardown_rc6(dev);
1083 
1084 		mutex_unlock(&dev->struct_mutex);
1085 		drm_mode_config_cleanup(dev);
1086 
1087 		mutex_lock(&dev->struct_mutex);
1088 		i915_gem_free_all_phys_object(dev);
1089 		i915_gem_cleanup_ringbuffer(dev);
1090 		i915_gem_context_fini(dev);
1091 		mutex_unlock(&dev->struct_mutex);
1092 
1093 		i915_gem_cleanup_aliasing_ppgtt(dev);
1094 		i915_gem_cleanup_stolen(dev);
1095 		drm_mm_takedown(&dev_priv->mm.stolen);
1096 		intel_cleanup_overlay(dev);
1097 
1098 		i915_gem_lastclose(dev);
1099 
1100 		if (dev_priv->gtt.scratch_page)
1101 			i915_teardown_scratch_page(dev);
1102 
1103 		if (MDB_TRACK_ENABLE) {
1104 			struct batch_info_list *r_list, *list_temp;
1105 			list_for_each_entry_safe(r_list, list_temp, struct batch_info_list, &dev_priv->batch_list, head) {
1106 				list_del(&r_list->head);
1107 				drm_free(r_list->obj_list, r_list->num * sizeof(caddr_t), DRM_MEM_MAPS);
1108 				drm_free(r_list, sizeof (struct batch_info_list), DRM_MEM_MAPS);
1109 			}
1110 			list_del(&dev_priv->batch_list);
1111 		}
1112 
1113 		if (dev->old_gtt) {
1114 			intel_rw_gtt(dev, dev->old_gtt_size,
1115 				0, (void *) dev->old_gtt, 1);
1116 			kmem_free(dev->old_gtt, dev->old_gtt_size);
1117 		}
1118 	}
1119 
1120 	return (DDI_SUCCESS);
1121 }
1122 
i915_init(void)1123 static int __init i915_init(void)
1124 {
1125 	driver.num_ioctls = i915_max_ioctl;
1126 
1127 	driver.driver_features |= DRIVER_MODESET;
1128 
1129 	return 0;
1130 }
1131 
i915_exit(void)1132 static void __exit i915_exit(void)
1133 {
1134 
1135 }
1136 
1137 int
_init(void)1138 _init(void)
1139 {
1140 	int ret;
1141 
1142 	ret = ddi_soft_state_init(&i915_statep,
1143 	    sizeof (struct drm_device), DRM_MAX_INSTANCES);
1144 	if (ret)
1145 		return (ret);
1146 
1147 	ret = i915_init();
1148 	if (ret) {
1149 		ddi_soft_state_fini(&i915_statep);
1150 		return (ret);
1151 	}
1152 
1153 	ret = mod_install(&modlinkage);
1154 	if (ret) {
1155 		i915_exit();
1156 		ddi_soft_state_fini(&i915_statep);
1157 		return (ret);
1158 	}
1159 
1160 	return (ret);
1161 }
1162 
1163 int
_fini(void)1164 _fini(void)
1165 {
1166 	int ret;
1167 
1168 	ret = mod_remove(&modlinkage);
1169 	if (ret)
1170 		return (ret);
1171 
1172 	i915_exit();
1173 
1174 	ddi_soft_state_fini(&i915_statep);
1175 
1176 	return (ret);
1177 }
1178 
1179 int
_info(struct modinfo * modinfop)1180 _info(struct modinfo *modinfop)
1181 {
1182 	return (mod_info(&modlinkage, modinfop));
1183 }
1184 
1185 /* We give fast paths for the really cool registers */
1186 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
1187 	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1188 	((reg) < 0x40000) && \
1189 	((reg) != FORCEWAKE))
1190 
1191 static void
ilk_dummy_write(struct drm_i915_private * dev_priv)1192 ilk_dummy_write(struct drm_i915_private *dev_priv)
1193 {
1194 	/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
1195 	 * chip from rc6 before touching it for real. MI_MODE is masked, hence
1196 	 * harmless to write 0 into. */
1197 	I915_WRITE_NOTRACE(MI_MODE, 0);
1198 }
1199 
1200 static void
hsw_unclaimed_reg_clear(struct drm_i915_private * dev_priv,u32 reg)1201 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
1202 {
1203 	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1204 	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1205 		DRM_INFO("Unknown unclaimed register before writing to %x\n",
1206 			  reg);
1207 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1208 	}
1209 }
1210 
1211 static void
hsw_unclaimed_reg_check(struct drm_i915_private * dev_priv,u32 reg)1212 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
1213 {
1214 	if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
1215 	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1216 		DRM_INFO("Unclaimed write to %x\n", reg);
1217 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1218 	}
1219 }
1220 
i915_read8(struct drm_i915_private * dev_priv,u32 reg)1221 u8 i915_read8(struct drm_i915_private *dev_priv, u32 reg)
1222 {
1223 	u8 val;
1224 
1225 	if (IS_GEN5(dev_priv->dev))
1226 		ilk_dummy_write(dev_priv);
1227 
1228 	if (NEEDS_FORCE_WAKE(dev_priv, reg)) {
1229 		unsigned long irqflags;
1230 		spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
1231 		if (dev_priv->forcewake_count == 0)
1232 			dev_priv->gt.force_wake_get(dev_priv);
1233 		val = DRM_READ8(dev_priv->regs, reg);
1234 		if (dev_priv->forcewake_count == 0)
1235 			dev_priv->gt.force_wake_put(dev_priv);
1236 		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
1237 	} else
1238 		val = DRM_READ8(dev_priv->regs, (reg));
1239 	return val;
1240 }
1241 
i915_read16(struct drm_i915_private * dev_priv,u32 reg)1242 u16 i915_read16(struct drm_i915_private *dev_priv, u32 reg)
1243 {
1244 	u16 val;
1245 
1246 	if (IS_GEN5(dev_priv->dev))
1247 		ilk_dummy_write(dev_priv);
1248 
1249 	if (NEEDS_FORCE_WAKE(dev_priv, reg)) {
1250 		unsigned long irqflags;
1251 		spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
1252 		if (dev_priv->forcewake_count == 0)
1253 			dev_priv->gt.force_wake_get(dev_priv);
1254 		val = DRM_READ16(dev_priv->regs, reg);
1255 		if (dev_priv->forcewake_count == 0)
1256 			dev_priv->gt.force_wake_get(dev_priv);
1257 		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
1258 	} else
1259 		val = DRM_READ16(dev_priv->regs, (reg));
1260 	return val;
1261 }
1262 
i915_read32(struct drm_i915_private * dev_priv,u32 reg)1263 u32 i915_read32(struct drm_i915_private *dev_priv, u32 reg)
1264 {
1265 	u32 val;
1266 
1267 	if (IS_GEN5(dev_priv->dev))
1268 		ilk_dummy_write(dev_priv);
1269 
1270 	if (NEEDS_FORCE_WAKE(dev_priv, reg)) {
1271 		unsigned long irqflags;
1272 		spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
1273 		if (dev_priv->forcewake_count == 0)
1274 			dev_priv->gt.force_wake_get(dev_priv);
1275 		val = DRM_READ32(dev_priv->regs, reg);
1276 		if (dev_priv->forcewake_count == 0)
1277 			dev_priv->gt.force_wake_get(dev_priv);
1278 		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
1279 	} else
1280 		val = DRM_READ32(dev_priv->regs, (reg));
1281 	return val;
1282 }
1283 
i915_read64(struct drm_i915_private * dev_priv,u32 reg)1284 u64 i915_read64(struct drm_i915_private *dev_priv, u32 reg)
1285 {
1286 	u64 val;
1287 
1288 	if (IS_GEN5(dev_priv->dev))
1289 		ilk_dummy_write(dev_priv);
1290 
1291 	if (NEEDS_FORCE_WAKE(dev_priv, reg)) {
1292 		unsigned long irqflags;
1293 		spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
1294 		if (dev_priv->forcewake_count == 0)
1295 			dev_priv->gt.force_wake_get(dev_priv);
1296 		val = DRM_READ64(dev_priv->regs, reg);
1297 		if (dev_priv->forcewake_count == 0)
1298 			dev_priv->gt.force_wake_get(dev_priv);
1299 		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
1300 	} else
1301 		val = DRM_READ64(dev_priv->regs, (reg));
1302 	return val;
1303 }
1304 
i915_write8(struct drm_i915_private * dev_priv,u32 reg,u8 val)1305 void i915_write8(struct drm_i915_private *dev_priv, u32 reg,
1306 			u8 val)
1307 {
1308 	unsigned long irqflags;
1309 	u32 __fifo_ret = 0;
1310 
1311 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
1312 	if (NEEDS_FORCE_WAKE(dev_priv, reg))
1313 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv);
1314 
1315 	if (IS_GEN5(dev_priv->dev))
1316 		ilk_dummy_write(dev_priv);
1317 
1318 	hsw_unclaimed_reg_clear(dev_priv, reg);
1319 
1320 	DRM_WRITE8(dev_priv->regs, (reg), (val));
1321 	if (__fifo_ret)
1322 		gen6_gt_check_fifodbg(dev_priv);
1323 	hsw_unclaimed_reg_check(dev_priv, reg);
1324 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
1325 }
1326 
i915_write16(struct drm_i915_private * dev_priv,u32 reg,u16 val)1327 void i915_write16(struct drm_i915_private *dev_priv, u32 reg,
1328 			u16 val)
1329 {
1330 	unsigned long irqflags;
1331 	u32 __fifo_ret = 0;
1332 
1333 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
1334 	if (NEEDS_FORCE_WAKE(dev_priv, reg))
1335 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv);
1336 
1337 	if (IS_GEN5(dev_priv->dev))
1338 		ilk_dummy_write(dev_priv);
1339 
1340 	hsw_unclaimed_reg_clear(dev_priv, reg);
1341 
1342 	DRM_WRITE16(dev_priv->regs, (reg), (val));
1343 	if (__fifo_ret)
1344 		gen6_gt_check_fifodbg(dev_priv);
1345 	hsw_unclaimed_reg_check(dev_priv, reg);
1346 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
1347 }
1348 
i915_write32(struct drm_i915_private * dev_priv,u32 reg,u32 val)1349 void i915_write32(struct drm_i915_private *dev_priv, u32 reg,
1350 			u32 val)
1351 {
1352 	unsigned long irqflags;
1353 	u32 __fifo_ret = 0;
1354 
1355 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
1356 	if (NEEDS_FORCE_WAKE(dev_priv, reg))
1357 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv);
1358 
1359 	if (IS_GEN5(dev_priv->dev))
1360 		ilk_dummy_write(dev_priv);
1361 
1362 	hsw_unclaimed_reg_clear(dev_priv, reg);
1363 
1364 	DRM_WRITE32(dev_priv->regs, (reg), (val));
1365 	if (__fifo_ret)
1366 		gen6_gt_check_fifodbg(dev_priv);
1367 	hsw_unclaimed_reg_check(dev_priv, reg);
1368 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
1369 }
1370 
i915_write64(struct drm_i915_private * dev_priv,u32 reg,u64 val)1371 void i915_write64(struct drm_i915_private *dev_priv, u32 reg,
1372 			u64 val)
1373 {
1374 	unsigned long irqflags;
1375 	u32 __fifo_ret = 0;
1376 
1377 	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
1378 	if (NEEDS_FORCE_WAKE(dev_priv, reg))
1379 		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv);
1380 
1381 	if (IS_GEN5(dev_priv->dev))
1382 		ilk_dummy_write(dev_priv);
1383 
1384 	hsw_unclaimed_reg_clear(dev_priv, reg);
1385 
1386 	DRM_WRITE64(dev_priv->regs, (reg), (val));
1387 	if (__fifo_ret)
1388 		gen6_gt_check_fifodbg(dev_priv);
1389 	hsw_unclaimed_reg_check(dev_priv, reg);
1390 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
1391 }
1392 
1393 #define __i915_read(x) \
1394 u ## x i915_read ## x(struct drm_i915_private *dev_priv, u32 reg);
1395 
1396 __i915_read(8)
1397 __i915_read(16)
1398 __i915_read(32)
1399 __i915_read(64)
1400 #undef __i915_read
1401 
1402 #define __i915_write(x)	\
1403 void i915_write ## x(struct drm_i915_private *dev_priv, u32 reg,	\
1404 				u ## x val);
1405 
1406 __i915_write(8)
1407 __i915_write(16)
1408 __i915_write(32)
1409 __i915_write(64)
1410 #undef __i915_write
1411 
1412 static const struct register_whitelist {
1413 	uint64_t offset;
1414 	uint32_t size;
1415 	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1416 } whitelist[] = {
1417 	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1418 };
1419 
i915_reg_read_ioctl(DRM_IOCTL_ARGS)1420 int i915_reg_read_ioctl(DRM_IOCTL_ARGS)
1421 {
1422 	struct drm_i915_private *dev_priv = dev->dev_private;
1423 	struct drm_i915_reg_read *reg = data;
1424 	struct register_whitelist const *entry = whitelist;
1425 	int i;
1426 
1427 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1428 		if (entry->offset == reg->offset &&
1429 		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1430 			break;
1431 	}
1432 
1433 	if (i == ARRAY_SIZE(whitelist))
1434 		return -EINVAL;
1435 
1436 	switch (entry->size) {
1437 	case 8:
1438 		reg->val = I915_READ64(reg->offset);
1439 		break;
1440 	case 4:
1441 		reg->val = I915_READ(reg->offset);
1442 		break;
1443 	case 2:
1444 		reg->val = I915_READ16(reg->offset);
1445 		break;
1446 	case 1:
1447 		reg->val = I915_READ8(reg->offset);
1448 		break;
1449 	default:
1450 		WARN_ON(1);
1451 		return -EINVAL;
1452 	}
1453 
1454 	return 0;
1455 }
1456 
i915_gem_chipset_flush(struct drm_device * dev)1457 void i915_gem_chipset_flush(struct drm_device *dev)
1458 {
1459 	if (INTEL_INFO(dev)->gen < 6)
1460 		drm_agp_chipset_flush(dev);
1461 }
1462 
i915_driver_agp_support_detect(struct drm_device * dev,unsigned long flags)1463 void i915_driver_agp_support_detect(struct drm_device *dev, unsigned long flags)
1464 {
1465 	struct intel_device_info *info;
1466 	info = (struct intel_device_info *) flags;
1467 
1468 	/* Remove AGP support for GEN6+ platform */
1469 	if (info->gen >= 6)
1470 		driver.driver_features &= ~DRIVER_USE_AGP;
1471 }
1472