xref: /gfx-drm/usr/src/uts/intel/io/i915/i915_dma.c (revision e49fc716)
1 /*
2  * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
3  */
4 
5 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
6  */
7 /*
8  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
9  * Copyright (c) 2009, 2013, Intel Corporation.
10  * All Rights Reserved.
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a
13  * copy of this software and associated documentation files (the
14  * "Software"), to deal in the Software without restriction, including
15  * without limitation the rights to use, copy, modify, merge, publish,
16  * distribute, sub license, and/or sell copies of the Software, and to
17  * permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice (including the
21  * next paragraph) shall be included in all copies or substantial portions
22  * of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
27  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
28  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
29  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
30  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31  *
32  */
33 
34 #include "drmP.h"
35 #include "drm.h"
36 #include "drm_crtc_helper.h"
37 #include "drm_fb_helper.h"
38 #include "drm_linux.h"
39 #include "drm_mm.h"
40 #include "intel_drv.h"
41 #include "i915_drm.h"
42 #include "i915_drv.h"
43 #include "i915_io32.h"
44 #include <sys/agp/agptarget_io.h>
45 
46 #define USE_PCI_DMA_API 0
47 
48 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
49 
50 #define BEGIN_LP_RING(n) \
51 	intel_ring_begin(LP_RING(dev_priv), (n))
52 
53 #define OUT_RING(x) \
54 	intel_ring_emit(LP_RING(dev_priv), x)
55 
56 #define ADVANCE_LP_RING() \
57 	intel_ring_advance(LP_RING(dev_priv))
58 
59 /**
60  * Lock test for when it's just for synchronization of ring access.
61  *
62  * In that case, we don't need to do it when GEM is initialized as nobody else
63  * has access to the ring.
64  */
65 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
66 	if (LP_RING(dev->dev_private)->obj == NULL)			\
67 		LOCK_TEST_WITH_RETURN(dev, file);			\
68 } while (__lintzero)
69 
70 static inline u32
intel_read_legacy_status_page(struct drm_i915_private * dev_priv,int reg)71 intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
72 {
73 	if (I915_NEED_GFX_HWS(dev_priv->dev)) {
74 		u32 *regs = (u32 *)dev_priv->dri1.gfx_hws_cpu_addr.handle;
75 		return regs[reg];
76 	} else
77 		return intel_read_status_page(LP_RING(dev_priv), reg);
78 }
79 
80 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
81 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
82 #define I915_BREADCRUMB_INDEX		0x21
83 
i915_update_dri1_breadcrumb(struct drm_device * dev)84 void i915_update_dri1_breadcrumb(struct drm_device *dev)
85 {
86 	drm_i915_private_t *dev_priv = dev->dev_private;
87 	struct drm_i915_master_private *master_priv;
88 
89 	if (dev->primary->master) {
90 		master_priv = dev->primary->master->driver_priv;
91 		if (master_priv->sarea_priv)
92 			master_priv->sarea_priv->last_dispatch =
93 				READ_BREADCRUMB(dev_priv);
94 	}
95 }
96 
i915_write_hws_pga(struct drm_device * dev)97 static void i915_write_hws_pga(struct drm_device *dev)
98 {
99 	drm_i915_private_t *dev_priv = dev->dev_private;
100 	u32 addr;
101 
102 	addr = dev_priv->status_page_dmah->paddr;
103 	if (INTEL_INFO(dev)->gen >= 4)
104 		addr |= (dev_priv->status_page_dmah->paddr >> 28) & 0xf0;
105 	I915_WRITE(HWS_PGA, addr);
106 }
107 
108 /**
109  * Frees the hardware status page, whether it's a physical address or a virtual
110  * address set up by the X Server.
111  */
i915_free_hws(struct drm_device * dev)112 static void i915_free_hws(struct drm_device *dev)
113 {
114 	drm_i915_private_t *dev_priv = dev->dev_private;
115 	struct intel_ring_buffer *ring = LP_RING(dev_priv);
116 
117 	if (dev_priv->status_page_dmah) {
118 		drm_pci_free(dev_priv->status_page_dmah);
119 		dev_priv->status_page_dmah = NULL;
120 	}
121 
122 	if (ring->status_page.gfx_addr) {
123 		ring->status_page.gfx_addr = 0;
124 		drm_core_ioremapfree(&dev_priv->dri1.gfx_hws_cpu_addr, dev);
125 	}
126 
127 	/* Need to rewrite hardware status page */
128 	I915_WRITE(HWS_PGA, 0x1ffff000);
129 }
130 
i915_kernel_lost_context(struct drm_device * dev)131 void i915_kernel_lost_context(struct drm_device * dev)
132 {
133 	drm_i915_private_t *dev_priv = dev->dev_private;
134 	struct drm_i915_master_private *master_priv;
135 	struct intel_ring_buffer *ring = LP_RING(dev_priv);
136 
137 	/*
138 	 * We should never lose context on the ring with modesetting
139 	 * as we don't expose it to userspace
140 	 */
141 	if (drm_core_check_feature(dev, DRIVER_MODESET))
142 		return;
143 
144 	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
145 	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
146 	ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
147 	if (ring->space < 0)
148 		ring->space += ring->size;
149 
150 	if (!dev->primary->master)
151 		return;
152 
153 	master_priv = dev->primary->master->driver_priv;
154 	if (ring->head == ring->tail && master_priv->sarea_priv)
155 		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
156 }
157 
i915_dma_cleanup(struct drm_device * dev)158 static int i915_dma_cleanup(struct drm_device * dev)
159 {
160 	drm_i915_private_t *dev_priv = dev->dev_private;
161 	int i;
162 
163 	/* Make sure interrupts are disabled here because the uninstall ioctl
164 	 * may not have been called from userspace and after dev_private
165 	 * is freed, it's too late.
166 	 */
167 	if (dev->irq_enabled)
168 		(void) drm_irq_uninstall(dev);
169 
170 	mutex_lock(&dev->struct_mutex);
171 	for (i = 0; i < I915_NUM_RINGS; i++)
172 		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
173 	mutex_unlock(&dev->struct_mutex);
174 
175 	/* Clear the HWS virtual address at teardown */
176 	if (I915_NEED_GFX_HWS(dev))
177 		i915_free_hws(dev);
178 
179 	return 0;
180 }
181 
i915_initialize(struct drm_device * dev,drm_i915_init_t * init)182 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
183 {
184 	drm_i915_private_t *dev_priv = dev->dev_private;
185 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
186 	int ret;
187 
188 	master_priv->sarea = drm_getsarea(dev);
189 	if (master_priv->sarea) {
190 		master_priv->sarea_priv = (drm_i915_sarea_t *)(uintptr_t)
191 			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
192 	} else {
193 		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
194 	}
195 
196 	if (init->ring_size != 0) {
197 		if (LP_RING(dev_priv)->obj != NULL) {
198 			(void) i915_dma_cleanup(dev);
199 			DRM_ERROR("Client tried to initialize ringbuffer in "
200 				  "GEM mode\n");
201 			return -EINVAL;
202 		}
203 
204 		ret = intel_render_ring_init_dri(dev,
205 						 init->ring_start,
206 						 init->ring_size);
207 		if (ret) {
208 			(void) i915_dma_cleanup(dev);
209 			return ret;
210 		}
211 	}
212 
213 	dev_priv->dri1.cpp = init->cpp;
214 	dev_priv->dri1.back_offset = init->back_offset;
215 	dev_priv->dri1.front_offset = init->front_offset;
216 	dev_priv->dri1.current_page = 0;
217 	if (master_priv->sarea_priv)
218 		master_priv->sarea_priv->pf_current_page = 0;
219 
220 	/* Allow hardware batchbuffers unless told otherwise.
221 	 */
222 	dev_priv->dri1.allow_batchbuffer = 1;
223 
224 	return 0;
225 }
226 
i915_dma_resume(struct drm_device * dev)227 static int i915_dma_resume(struct drm_device * dev)
228 {
229 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
230 	struct intel_ring_buffer *ring = LP_RING(dev_priv);
231 
232 	DRM_DEBUG_DRIVER("%s\n", __func__);
233 
234 	if (ring->virtual_start == NULL) {
235 		DRM_ERROR("can not ioremap virtual address for"
236 			  " ring buffer\n");
237 		return -ENOMEM;
238 	}
239 
240 	/* Program Hardware Status Page */
241 	if (!ring->status_page.page_addr) {
242 		DRM_ERROR("Can not find hardware status page\n");
243 		return -EINVAL;
244 	}
245 	DRM_DEBUG_DRIVER("hw status page @ %p\n",
246 				ring->status_page.page_addr);
247 	if (ring->status_page.gfx_addr != 0)
248 		intel_ring_setup_status_page(ring);
249 	else
250 		i915_write_hws_pga(dev);
251 	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
252 
253 	return 0;
254 }
255 /* LINTED */
i915_dma_init(DRM_IOCTL_ARGS)256 static int i915_dma_init(DRM_IOCTL_ARGS)
257 {
258 	drm_i915_init_t *init = data;
259 	int retcode = 0;
260 
261 	if (drm_core_check_feature(dev, DRIVER_MODESET))
262 		return -ENODEV;
263 
264 	switch (init->func) {
265 	case I915_INIT_DMA:
266 		retcode = i915_initialize(dev, init);
267 		break;
268 	case I915_CLEANUP_DMA:
269 		retcode = i915_dma_cleanup(dev);
270 		break;
271 	case I915_RESUME_DMA:
272 		retcode = i915_dma_resume(dev);
273 		break;
274 	default:
275 		retcode = -EINVAL;
276 		break;
277 	}
278 
279 	return retcode;
280 }
281 
282 /* Implement basically the same security restrictions as hardware does
283  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
284  *
285  * Most of the calculations below involve calculating the size of a
286  * particular instruction.  It's important to get the size right as
287  * that tells us where the next instruction to check is.  Any illegal
288  * instruction detected will be given a size of zero, which is a
289  * signal to abort the rest of the buffer.
290  */
validate_cmd(int cmd)291 static int validate_cmd(int cmd)
292 {
293 	switch (((cmd >> 29) & 0x7)) {
294 	case 0x0:
295 		switch ((cmd >> 23) & 0x3f) {
296 		case 0x0:
297 			return 1;	/* MI_NOOP */
298 		case 0x4:
299 			return 1;	/* MI_FLUSH */
300 		default:
301 			return 0;	/* disallow everything else */
302 		}
303 #ifndef __SUNPRO_C
304 		break;
305 #endif
306 	case 0x1:
307 		return 0;	/* reserved */
308 	case 0x2:
309 		return (cmd & 0xff) + 2;	/* 2d commands */
310 	case 0x3:
311 		if (((cmd >> 24) & 0x1f) <= 0x18)
312 			return 1;
313 
314 		switch ((cmd >> 24) & 0x1f) {
315 		case 0x1c:
316 			return 1;
317 		case 0x1d:
318 			switch ((cmd >> 16) & 0xff) {
319 			case 0x3:
320 				return (cmd & 0x1f) + 2;
321 			case 0x4:
322 				return (cmd & 0xf) + 2;
323 			default:
324 				return (cmd & 0xffff) + 2;
325 			}
326 		case 0x1e:
327 			if (cmd & (1 << 23))
328 				return (cmd & 0xffff) + 1;
329 			else
330 				return 1;
331 		case 0x1f:
332 			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
333 				return (cmd & 0x1ffff) + 2;
334 			else if (cmd & (1 << 17))	/* indirect random */
335 				if ((cmd & 0xffff) == 0)
336 					return 0;	/* unknown length, too hard */
337 				else
338 					return (((cmd & 0xffff) + 1) / 2) + 1;
339 			else
340 				return 2;	/* indirect sequential */
341 		default:
342 			return 0;
343 		}
344 	default:
345 		return 0;
346 	}
347 
348 #ifndef __SUNPRO_C
349 	return 0;
350 #endif
351 }
352 
i915_emit_cmds(struct drm_device * dev,int * buffer,int dwords)353 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
354 {
355 	drm_i915_private_t *dev_priv = dev->dev_private;
356 	int i, ret;
357 
358 	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
359 		return -EINVAL;
360 
361 	for (i = 0; i < dwords;) {
362 		int sz = validate_cmd(buffer[i]);
363 		if (sz == 0 || i + sz > dwords)
364 			return -EINVAL;
365 		i += sz;
366 	}
367 
368 	ret = BEGIN_LP_RING((dwords+1)&~1);
369 	if (ret)
370 		return ret;
371 
372 	for (i = 0; i < dwords; i++)
373 		OUT_RING(buffer[i]);
374 	if (dwords & 1)
375 		OUT_RING(0);
376 
377 	ADVANCE_LP_RING();
378 
379 	return 0;
380 }
381 
382 int
i915_emit_box(struct drm_device * dev,struct drm_clip_rect * box,int DR1,int DR4)383 i915_emit_box(struct drm_device *dev,
384 	      struct drm_clip_rect *box,
385 	      int DR1, int DR4)
386 {
387 	struct drm_i915_private *dev_priv = dev->dev_private;
388 	int ret;
389 
390 	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
391 	    (unsigned) box->y2 <= 0 || (unsigned) box->x2 <= 0) {
392 		DRM_ERROR("Bad box %d,%d..%d,%d\n",
393 			  box->x1, box->y1, box->x2, box->y2);
394 		return -EINVAL;
395 	}
396 
397 	if (INTEL_INFO(dev)->gen >= 4) {
398 		ret = BEGIN_LP_RING(4);
399 		if (ret)
400 			return ret;
401 
402 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
403 		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
404 		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
405 		OUT_RING(DR4);
406 	} else {
407 		ret = BEGIN_LP_RING(6);
408 		if (ret)
409 			return ret;
410 
411 		OUT_RING(GFX_OP_DRAWRECT_INFO);
412 		OUT_RING(DR1);
413 		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
414 		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
415 		OUT_RING(DR4);
416 		OUT_RING(0);
417 	}
418 	ADVANCE_LP_RING();
419 
420 	return 0;
421 }
422 
423 /* XXX: Emitting the counter should really be moved to part of the IRQ
424  * emit. For now, do it in both places:
425  */
426 
i915_emit_breadcrumb(struct drm_device * dev)427 static void i915_emit_breadcrumb(struct drm_device *dev)
428 {
429 	drm_i915_private_t *dev_priv = dev->dev_private;
430 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
431 
432 	dev_priv->dri1.counter++;
433 	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
434 		dev_priv->dri1.counter = 0;
435 	if (master_priv->sarea_priv)
436 		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
437 
438 	if (BEGIN_LP_RING(4) == 0) {
439 		OUT_RING(MI_STORE_DWORD_INDEX);
440 		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
441 		OUT_RING(dev_priv->dri1.counter);
442 		OUT_RING(0);
443 		ADVANCE_LP_RING();
444 	}
445 }
446 
i915_dispatch_cmdbuffer(struct drm_device * dev,drm_i915_cmdbuffer_t * cmd,struct drm_clip_rect * cliprects,void * cmdbuf)447 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
448 				   drm_i915_cmdbuffer_t *cmd,
449 				   struct drm_clip_rect *cliprects,
450 				   void *cmdbuf)
451 {
452 	int nbox = cmd->num_cliprects;
453 	int i = 0, count, ret;
454 
455 	if (cmd->sz & 0x3) {
456 		DRM_ERROR("alignment");
457 		return -EINVAL;
458 	}
459 
460 	i915_kernel_lost_context(dev);
461 
462 	count = nbox ? nbox : 1;
463 
464 	for (i = 0; i < count; i++) {
465 		if (i < nbox) {
466 			ret = i915_emit_box(dev, &cliprects[i],
467 					    cmd->DR1, cmd->DR4);
468 			if (ret)
469 				return ret;
470 		}
471 
472 		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
473 		if (ret)
474 			return ret;
475 	}
476 
477 	i915_emit_breadcrumb(dev);
478 	return 0;
479 }
480 
i915_dispatch_batchbuffer(struct drm_device * dev,drm_i915_batchbuffer_t * batch,struct drm_clip_rect * cliprects)481 static int i915_dispatch_batchbuffer(struct drm_device * dev,
482 				     drm_i915_batchbuffer_t * batch,
483 				     struct drm_clip_rect *cliprects)
484 {
485 	struct drm_i915_private *dev_priv = dev->dev_private;
486 	int nbox = batch->num_cliprects;
487 	int i, count, ret;
488 
489 	if ((batch->start | batch->used) & 0x7) {
490 		DRM_ERROR("alignment");
491 		return -EINVAL;
492 	}
493 
494 	i915_kernel_lost_context(dev);
495 
496 	count = nbox ? nbox : 1;
497 	for (i = 0; i < count; i++) {
498 		if (i < nbox) {
499 			ret = i915_emit_box(dev, &cliprects[i],
500 						batch->DR1, batch->DR4);
501 			if (ret)
502 				return ret;
503 		}
504 
505 		if (!IS_I830(dev) && !IS_845G(dev)) {
506 			ret = BEGIN_LP_RING(2);
507 			if (ret)
508 				return ret;
509 
510 			if (INTEL_INFO(dev)->gen >= 4) {
511 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
512 				OUT_RING(batch->start);
513 			} else {
514 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
515 				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
516 			}
517 		} else {
518 			ret = BEGIN_LP_RING(4);
519 			if (ret)
520 				return ret;
521 
522 			OUT_RING(MI_BATCH_BUFFER);
523 			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
524 			OUT_RING(batch->start + batch->used - 4);
525 			OUT_RING(0);
526 		}
527 		ADVANCE_LP_RING();
528 	}
529 
530 
531 	if (IS_G4X(dev) || IS_GEN5(dev)) {
532 		if (BEGIN_LP_RING(2) == 0) {
533 			OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
534 			OUT_RING(MI_NOOP);
535 			ADVANCE_LP_RING();
536 		}
537 	}
538 
539 	i915_emit_breadcrumb(dev);
540 	return 0;
541 }
542 
i915_dispatch_flip(struct drm_device * dev)543 static int i915_dispatch_flip(struct drm_device * dev)
544 {
545 	drm_i915_private_t *dev_priv = dev->dev_private;
546 	struct drm_i915_master_private *master_priv =
547 		dev->primary->master->driver_priv;
548 	int ret;
549 
550 	if (!master_priv->sarea_priv)
551 		return -EINVAL;
552 
553 	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
554 			  __func__,
555 			 dev_priv->dri1.current_page,
556 			 master_priv->sarea_priv->pf_current_page);
557 
558 	i915_kernel_lost_context(dev);
559 
560 	ret = BEGIN_LP_RING(10);
561 	if (ret)
562 		return ret;
563 
564 	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
565 	OUT_RING(0);
566 
567 	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
568 	OUT_RING(0);
569 	if (dev_priv->dri1.current_page == 0) {
570 		OUT_RING(dev_priv->dri1.back_offset);
571 		dev_priv->dri1.current_page = 1;
572 	} else {
573 		OUT_RING(dev_priv->dri1.front_offset);
574 		dev_priv->dri1.current_page = 0;
575 	}
576 	OUT_RING(0);
577 
578 	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
579 	OUT_RING(0);
580 
581 	ADVANCE_LP_RING();
582 
583 	master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
584 
585 	if (BEGIN_LP_RING(4) == 0) {
586 		OUT_RING(MI_STORE_DWORD_INDEX);
587 		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
588 		OUT_RING(dev_priv->dri1.counter);
589 		OUT_RING(0);
590 		ADVANCE_LP_RING();
591 	}
592 
593 	master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
594 	return 0;
595 }
596 
i915_quiescent(struct drm_device * dev)597 static int i915_quiescent(struct drm_device * dev)
598 {
599 	i915_kernel_lost_context(dev);
600 	return intel_ring_idle(LP_RING(dev->dev_private));
601 }
602 
603 /* LINTED */
i915_flush_ioctl(DRM_IOCTL_ARGS)604 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
605 {
606 	int ret;
607 
608 	if (drm_core_check_feature(dev, DRIVER_MODESET))
609 		return -ENODEV;
610 
611 	RING_LOCK_TEST_WITH_RETURN(dev, file);
612 
613 	mutex_lock(&dev->struct_mutex);
614 	ret = i915_quiescent(dev);
615 	mutex_unlock(&dev->struct_mutex);
616 
617 	return ret;
618 }
619 
620 /* LINTED */
i915_batchbuffer(DRM_IOCTL_ARGS)621 static int i915_batchbuffer(DRM_IOCTL_ARGS)
622 {
623 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
624 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
625 	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
626 	    master_priv->sarea_priv;
627 	drm_i915_batchbuffer_t *batch = data;
628 	int ret;
629 	struct drm_clip_rect *cliprects = NULL;
630 
631 	if (drm_core_check_feature(dev, DRIVER_MODESET))
632 		return -ENODEV;
633 
634 	if (!dev_priv->dri1.allow_batchbuffer) {
635 		DRM_ERROR("Batchbuffer ioctl disabled\n");
636 		return -EINVAL;
637 	}
638 
639 	DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
640 			batch->start, batch->used, batch->num_cliprects);
641 
642 	RING_LOCK_TEST_WITH_RETURN(dev, file);
643 
644 	if (batch->num_cliprects < 0)
645 		return -EINVAL;
646 
647 	if (batch->num_cliprects) {
648 		cliprects = kcalloc(batch->num_cliprects,
649 				    sizeof(struct drm_clip_rect),
650 				    GFP_KERNEL);
651 		if (cliprects == NULL)
652 			return -ENOMEM;
653 
654 		ret = copy_from_user(cliprects, batch->cliprects,
655 				     batch->num_cliprects *
656 				     sizeof(struct drm_clip_rect));
657 		if (ret != 0) {
658 			ret = -EFAULT;
659 			goto fail_free;
660 		}
661 	}
662 
663 	mutex_lock(&dev->struct_mutex);
664 	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
665 	mutex_unlock(&dev->struct_mutex);
666 
667 	if (sarea_priv)
668 		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
669 
670 fail_free:
671 	kfree(cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect));
672 
673 	return ret;
674 }
675 
676 /* LINTED */
i915_cmdbuffer(DRM_IOCTL_ARGS)677 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
678 {
679 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
680 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
681 	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
682 	    master_priv->sarea_priv;
683 	drm_i915_cmdbuffer_t *cmdbuf = data;
684 	struct drm_clip_rect *cliprects = NULL;
685 	void *batch_data;
686 	int ret;
687 
688 	DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
689 			(void *)cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
690 
691 	if (drm_core_check_feature(dev, DRIVER_MODESET))
692 		return -ENODEV;
693 
694 	RING_LOCK_TEST_WITH_RETURN(dev, file);
695 
696 	if (cmdbuf->num_cliprects < 0)
697 		return -EINVAL;
698 
699 	batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
700 	if (batch_data == NULL)
701 		return -ENOMEM;
702 
703 	ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
704 	if (ret != 0) {
705 		ret = -EFAULT;
706 		goto fail_batch_free;
707 	}
708 
709 	if (cmdbuf->num_cliprects) {
710 		cliprects = kcalloc(cmdbuf->num_cliprects,
711 				    sizeof(struct drm_clip_rect), GFP_KERNEL);
712 		if (cliprects == NULL) {
713 			ret = -ENOMEM;
714 			goto fail_batch_free;
715 		}
716 
717 		ret = copy_from_user(cliprects, cmdbuf->cliprects,
718 				     cmdbuf->num_cliprects *
719 				     sizeof(struct drm_clip_rect));
720 		if (ret != 0) {
721 			ret = -EFAULT;
722 			goto fail_clip_free;
723 		}
724 	}
725 
726 	mutex_lock(&dev->struct_mutex);
727 	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
728 	mutex_unlock(&dev->struct_mutex);
729 	if (ret) {
730 		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
731 		goto fail_clip_free;
732 	}
733 
734 	if (sarea_priv)
735 		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
736 
737 fail_clip_free:
738 	kfree(cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
739 fail_batch_free:
740 	kfree(batch_data, cmdbuf->sz);
741 
742 	return ret;
743 }
744 
i915_emit_irq(struct drm_device * dev)745 static int i915_emit_irq(struct drm_device * dev)
746 {
747 	drm_i915_private_t *dev_priv = dev->dev_private;
748 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
749 
750 	i915_kernel_lost_context(dev);
751 
752 	DRM_DEBUG_DRIVER("\n");
753 
754 	dev_priv->dri1.counter++;
755 	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
756 		dev_priv->dri1.counter = 1;
757 	if (master_priv->sarea_priv)
758 		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
759 
760 	if (BEGIN_LP_RING(4) == 0) {
761 		OUT_RING(MI_STORE_DWORD_INDEX);
762 		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
763 		OUT_RING(dev_priv->dri1.counter);
764 		OUT_RING(MI_USER_INTERRUPT);
765 		ADVANCE_LP_RING();
766 	}
767 
768 	return dev_priv->dri1.counter;
769 }
770 
i915_wait_irq(struct drm_device * dev,int irq_nr)771 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
772 {
773 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
774 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
775 	int ret = 0;
776 	struct intel_ring_buffer *ring = LP_RING(dev_priv);
777 
778 	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
779 		  READ_BREADCRUMB(dev_priv));
780 
781 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
782 		if (master_priv->sarea_priv)
783 			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
784 		return 0;
785 	}
786 
787 	if (master_priv->sarea_priv)
788 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
789 
790 	if (ring->irq_get(ring)) {
791 		DRM_WAIT_ON(ret, &ring->irq_queue, 3 * DRM_HZ,
792 			    READ_BREADCRUMB(dev_priv) >= irq_nr);
793 		ring->irq_put(ring);
794 	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
795 		ret = -EBUSY;
796 
797 	if (ret == -EBUSY) {
798 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
799 			  READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
800 	}
801 
802 	return ret;
803 }
804 
805 /* Needs the lock as it touches the ring.
806  */
807 /* LINTED */
i915_irq_emit(DRM_IOCTL_ARGS)808 int i915_irq_emit(DRM_IOCTL_ARGS)
809 {
810 	drm_i915_private_t *dev_priv = dev->dev_private;
811 	drm_i915_irq_emit_t *emit = data;
812 	int result;
813 
814 	if (drm_core_check_feature(dev, DRIVER_MODESET))
815 		return -ENODEV;
816 
817 	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
818 		DRM_ERROR("called with no initialization\n");
819 		return -EINVAL;
820 	}
821 
822 	RING_LOCK_TEST_WITH_RETURN(dev, file);
823 
824 	mutex_lock(&dev->struct_mutex);
825 	result = i915_emit_irq(dev);
826 	mutex_unlock(&dev->struct_mutex);
827 
828 	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
829 		DRM_ERROR("copy_to_user\n");
830 		return -EFAULT;
831 	}
832 
833 	return 0;
834 }
835 
836 /* Doesn't need the hardware lock.
837  */
838 /* LINTED */
i915_irq_wait(DRM_IOCTL_ARGS)839 int i915_irq_wait(DRM_IOCTL_ARGS)
840 {
841 	drm_i915_private_t *dev_priv = dev->dev_private;
842 	drm_i915_irq_wait_t *irqwait = data;
843 
844 	if (drm_core_check_feature(dev, DRIVER_MODESET))
845 		return -ENODEV;
846 
847 	if (!dev_priv) {
848 		DRM_ERROR("called with no initialization\n");
849 		return -EINVAL;
850 	}
851 
852 	return i915_wait_irq(dev, irqwait->irq_seq);
853 }
854 
855 /* LINTED */
i915_vblank_pipe_get(DRM_IOCTL_ARGS)856 int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
857 {
858 	drm_i915_private_t *dev_priv = dev->dev_private;
859 	drm_i915_vblank_pipe_t *pipe = data;
860 
861 	if (drm_core_check_feature(dev, DRIVER_MODESET))
862 		return -ENODEV;
863 
864 	if (!dev_priv) {
865 		DRM_ERROR("called with no initialization\n");
866 		return -EINVAL;
867 	}
868 
869 	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
870 
871 	return 0;
872 }
873 
874 /**
875  * Schedule buffer swap at given vertical blank.
876  */
877 /* LINTED */
i915_vblank_swap(DRM_IOCTL_ARGS)878 int i915_vblank_swap(DRM_IOCTL_ARGS)
879 {
880 	/* The delayed swap mechanism was fundamentally racy, and has been
881 	 * removed.  The model was that the client requested a delayed flip/swap
882 	 * from the kernel, then waited for vblank before continuing to perform
883 	 * rendering.  The problem was that the kernel might wake the client
884 	 * up before it dispatched the vblank swap (since the lock has to be
885 	 * held while touching the ringbuffer), in which case the client would
886 	 * clear and start the next frame before the swap occurred, and
887 	 * flicker would occur in addition to likely missing the vblank.
888 	 *
889 	 * In the absence of this ioctl, userland falls back to a correct path
890 	 * of waiting for a vblank, then dispatching the swap on its own.
891 	 * Context switching to userland and back is plenty fast enough for
892 	 * meeting the requirements of vblank swapping.
893 	 */
894 	return -EINVAL;
895 }
896 
897 /* LINTED */
i915_flip_bufs(DRM_IOCTL_ARGS)898 static int i915_flip_bufs(DRM_IOCTL_ARGS)
899 {
900 	int ret;
901 
902 	if (drm_core_check_feature(dev, DRIVER_MODESET))
903 		return -ENODEV;
904 
905 	DRM_DEBUG_DRIVER("%s\n", __func__);
906 
907 	RING_LOCK_TEST_WITH_RETURN(dev, file);
908 
909 	mutex_lock(&dev->struct_mutex);
910 	ret = i915_dispatch_flip(dev);
911 	mutex_unlock(&dev->struct_mutex);
912 
913 	return ret;
914 }
915 
916 /* LINTED */
i915_getparam(DRM_IOCTL_ARGS)917 static int i915_getparam(DRM_IOCTL_ARGS)
918 {
919 	drm_i915_private_t *dev_priv = dev->dev_private;
920 	drm_i915_getparam_t *param = data;
921 	int value;
922 
923 	if (!dev_priv) {
924 		DRM_ERROR("called with no initialization\n");
925 		return -EINVAL;
926 	}
927 
928 	switch (param->param) {
929 	case I915_PARAM_IRQ_ACTIVE:
930 		value = dev->pdev->irq ? 1 : 0;
931 		break;
932 	case I915_PARAM_ALLOW_BATCHBUFFER:
933 		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
934 		break;
935 	case I915_PARAM_LAST_DISPATCH:
936 		value = READ_BREADCRUMB(dev_priv);
937 		break;
938 	case I915_PARAM_CHIPSET_ID:
939 		value = dev->pci_device;
940 		break;
941 	case I915_PARAM_HAS_GEM:
942 		value = 1;
943 		break;
944 	case I915_PARAM_NUM_FENCES_AVAIL:
945 		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
946 		break;
947 	case I915_PARAM_HAS_OVERLAY:
948 		value = dev_priv->overlay ? 1 : 0;
949 		break;
950 	case I915_PARAM_HAS_PAGEFLIPPING:
951 		value = 1;
952 		break;
953 	case I915_PARAM_HAS_EXECBUF2:
954 		/* depends on GEM */
955 		value = 1;
956 		break;
957 	case I915_PARAM_HAS_BSD:
958 		value = intel_ring_initialized(&dev_priv->ring[VCS]);
959 		break;
960 	case I915_PARAM_HAS_BLT:
961 		value = intel_ring_initialized(&dev_priv->ring[BCS]);
962 		break;
963 	case I915_PARAM_HAS_VEBOX:
964 		value = intel_ring_initialized(&dev_priv->ring[VECS]);
965 		break;
966 	case I915_PARAM_HAS_RELAXED_FENCING:
967 		value = 1;
968 		break;
969 	case I915_PARAM_HAS_COHERENT_RINGS:
970 		value = 1;
971 		break;
972 	case I915_PARAM_HAS_EXEC_CONSTANTS:
973 		value = INTEL_INFO(dev)->gen >= 4;
974 		break;
975 	case I915_PARAM_HAS_RELAXED_DELTA:
976 		value = 1;
977 		break;
978 	case I915_PARAM_HAS_GEN7_SOL_RESET:
979 		value = 1;
980 		break;
981 	case I915_PARAM_HAS_LLC:
982 		value = HAS_LLC(dev);
983 		break;
984 	case I915_PARAM_HAS_ALIASING_PPGTT:
985 		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
986 		break;
987 	case I915_PARAM_HAS_WAIT_TIMEOUT:
988 		value = 1;
989 		break;
990 	case I915_PARAM_HAS_SEMAPHORES:
991 		value = i915_semaphore_is_enabled(dev);
992 		break;
993 	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
994 		value = 1;
995 		break;
996 	case I915_PARAM_HAS_SECURE_BATCHES:
997 		/* not support yet */
998 		value = 0;
999 		break;
1000 	case I915_PARAM_HAS_PINNED_BATCHES:
1001 		value = 1;
1002 		break;
1003 	case I915_PARAM_HAS_EXEC_NO_RELOC:
1004 		value = 1;
1005 		break;
1006 	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
1007 		value = 1;
1008 		break;
1009 	default:
1010 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1011 				 param->param);
1012 		return -EINVAL;
1013 	}
1014 
1015 	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1016 		DRM_ERROR("DRM_COPY_TO_USER failed\n");
1017 		return -EFAULT;
1018 	}
1019 
1020 	return 0;
1021 }
1022 
1023 /* LINTED */
i915_setparam(DRM_IOCTL_ARGS)1024 static int i915_setparam(DRM_IOCTL_ARGS)
1025 {
1026 	drm_i915_private_t *dev_priv = dev->dev_private;
1027 	drm_i915_setparam_t *param = data;
1028 
1029 	if (!dev_priv) {
1030 		DRM_ERROR("called with no initialization\n");
1031 		return -EINVAL;
1032 	}
1033 
1034 	switch (param->param) {
1035 	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1036 		break;
1037 	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1038 		break;
1039 	case I915_SETPARAM_ALLOW_BATCHBUFFER:
1040 		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
1041 		break;
1042 	case I915_SETPARAM_NUM_USED_FENCES:
1043 		if (param->value > dev_priv->num_fence_regs ||
1044 		    param->value < 0)
1045 			return -EINVAL;
1046 		/* Userspace can use first N regs */
1047 		dev_priv->fence_reg_start = param->value;
1048 		break;
1049 	default:
1050 		DRM_DEBUG_DRIVER("unknown parameter %d\n",
1051 					param->param);
1052 		return -EINVAL;
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 /* LINTED */
i915_set_status_page(DRM_IOCTL_ARGS)1059 static int i915_set_status_page(DRM_IOCTL_ARGS)
1060 {
1061 	drm_i915_private_t *dev_priv = dev->dev_private;
1062 	drm_i915_hws_addr_t *hws = data;
1063 	struct intel_ring_buffer *ring;
1064 
1065 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1066 		return -ENODEV;
1067 
1068 	if (!I915_NEED_GFX_HWS(dev))
1069 		return -EINVAL;
1070 
1071 	if (!dev_priv) {
1072 		DRM_ERROR("called with no initialization\n");
1073 		return -EINVAL;
1074 	}
1075 
1076 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1077 		DRM_ERROR("tried to set status page when mode setting active\n");
1078 		return 0;
1079 	}
1080 
1081 	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1082 
1083 	ring = LP_RING(dev_priv);
1084 	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1085 
1086 	dev_priv->dri1.gfx_hws_cpu_addr.offset = (u_offset_t)dev->agp_aperbase + hws->addr;
1087 	dev_priv->dri1.gfx_hws_cpu_addr.size = 4*1024;
1088 	dev_priv->dri1.gfx_hws_cpu_addr.type = 0;
1089 	dev_priv->dri1.gfx_hws_cpu_addr.flags = 0;
1090 	dev_priv->dri1.gfx_hws_cpu_addr.mtrr = 0;
1091 
1092 	drm_core_ioremap(&dev_priv->dri1.gfx_hws_cpu_addr, dev);
1093 	if (dev_priv->dri1.gfx_hws_cpu_addr.handle == NULL) {
1094 		(void) i915_dma_cleanup(dev);
1095 		ring->status_page.gfx_addr = 0;
1096 		DRM_ERROR("can not ioremap virtual address for"
1097 				" G33 hw status page\n");
1098 		return -ENOMEM;
1099 	}
1100 
1101 	(void) memset(dev_priv->dri1.gfx_hws_cpu_addr.handle, 0, PAGE_SIZE);
1102 	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1103 
1104 	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1105 			 ring->status_page.gfx_addr);
1106 	DRM_DEBUG_DRIVER("load hws at %p\n",
1107 			 ring->status_page.page_addr);
1108 	return 0;
1109 }
1110 
i915_get_bridge_dev(struct drm_device * dev)1111 static int i915_get_bridge_dev(struct drm_device *dev)
1112 {
1113 	struct drm_i915_private *dev_priv = dev->dev_private;
1114 
1115 	/* OSOL_i915 Begin */
1116 	struct drm_i915_bridge_dev *bridge_dev = &dev_priv->bridge_dev;
1117 	char name[32];
1118 	int i, err;
1119 
1120 	if (INTEL_INFO(dev)->gen >= 6)
1121 		return 0;
1122 
1123 	if (bridge_dev->ldi_id) {
1124 		DRM_DEBUG("end");
1125 		return 0;
1126 	}
1127 
1128 	if (ldi_ident_from_dip(dev->devinfo, &bridge_dev->ldi_id)) {
1129 		bridge_dev->ldi_id = NULL;
1130 		DRM_DEBUG("failed");
1131 		return -1;
1132 	};
1133 
1134 	/* Workaround here:
1135 	 * agptarget0 is not always linked to the right device
1136 	 * try agptarget1 if failed at agptarget0
1137 	 */
1138 	for (i = 0; i < 16; i++) {
1139 		(void) sprintf(name, "/dev/agp/agptarget%d", i);
1140 		err = ldi_open_by_name(name, 0, kcred,
1141 		    &bridge_dev->bridge_dev_hdl, bridge_dev->ldi_id);
1142 		if (err == 0) {
1143 			break;
1144 		}
1145 		DRM_INFO("can't open agptarget%d", i);
1146 	}
1147 
1148 	if (err) {
1149 		ldi_ident_release(bridge_dev->ldi_id);
1150 		bridge_dev->ldi_id = NULL;
1151 		bridge_dev->bridge_dev_hdl = NULL;
1152 		return -1;
1153 	}
1154 	/* OSOL_i915 End */
1155 
1156 	return 0;
1157 }
1158 
i915_bridge_dev_read_config_word(struct drm_i915_bridge_dev * bridge_dev,int where,u16 * val)1159 int i915_bridge_dev_read_config_word(struct drm_i915_bridge_dev *bridge_dev, int where, u16 *val)
1160 {
1161 	u16 data = (u16)where;
1162 
1163 	if (ldi_ioctl(bridge_dev->bridge_dev_hdl,
1164 	    AGP_TARGET_PCICONFIG_GET16, (intptr_t)&data, FKIOCTL, kcred, 0))
1165 		return -1;
1166 
1167 	*val = data;
1168 	return 0;
1169 }
1170 
i915_bridge_dev_write_config_word(struct drm_i915_bridge_dev * bridge_dev,int where,u16 val)1171 int i915_bridge_dev_write_config_word(struct drm_i915_bridge_dev *bridge_dev, int where, u16 val)
1172 {
1173 	u32 data = (u16)where << 16 | val;
1174 
1175 	if (ldi_ioctl(bridge_dev->bridge_dev_hdl,
1176 	    AGP_TARGET_PCICONFIG_SET16, (intptr_t)&data, FKIOCTL, kcred, 0))
1177 		return -1;
1178 
1179 	return 0;
1180 }
1181 
1182 /* true = enable decode, false = disable decoder */
1183 /* LINTED */
i915_vga_set_decode(void * cookie,bool state)1184 static unsigned int i915_vga_set_decode(void *cookie, bool state)
1185 {
1186 	struct drm_device *dev = cookie;
1187 
1188 	(void) intel_modeset_vga_set_state(dev, state);
1189 	if (state)
1190 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1191 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1192 	else
1193 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1194 }
1195 
1196 
i915_load_modeset_init(struct drm_device * dev)1197 static int i915_load_modeset_init(struct drm_device *dev)
1198 {
1199 	struct drm_i915_private *dev_priv = dev->dev_private;
1200 	int ret;
1201 
1202 	ret = intel_parse_bios(dev);
1203 	if (ret)
1204 		DRM_INFO("failed to find VBIOS tables\n");
1205 
1206 	/* Initialise stolen first so that we may reserve preallocated
1207 	 * objects for the BIOS to KMS transition.
1208 	 */
1209 	ret = i915_gem_init_stolen(dev);
1210 	if (ret)
1211 		goto out;
1212 
1213 	/* clear interrupt related bits */
1214 	if (dev->driver->irq_uninstall)
1215 		dev->driver->irq_uninstall(dev);
1216 
1217 	ret = drm_irq_install(dev);
1218 	if (ret)
1219 		goto cleanup_gem_stolen;
1220 
1221 	/* Important: The output setup functions called by modeset_init need
1222 	 * working irqs for e.g. gmbus and dp aux transfers. */
1223 	intel_modeset_init(dev);
1224 
1225 	ret = i915_gem_init(dev);
1226 	if (ret)
1227 		goto cleanup_irq;
1228 
1229 
1230 	intel_modeset_gem_init(dev);
1231 
1232 	/* Always safe in the mode setting case. */
1233 	/* FIXME: do pre/post-mode set stuff in core KMS code */
1234 	dev->vblank_disable_allowed = 1;
1235 	if (INTEL_INFO(dev)->num_pipes == 0) {
1236 		dev_priv->mm.suspended = 0;
1237 		return 0;
1238 	}
1239 
1240 	if (dev_priv->fbcon_obj != NULL) {
1241 		ret = intel_fbdev_init(dev);
1242 		if (ret)
1243 		goto cleanup_gem;
1244 
1245 		drm_register_fbops(dev);
1246 	}
1247 
1248 	/* Only enable hotplug handling once the fbdev is fully set up. */
1249 	intel_hpd_init(dev);
1250 
1251 	/*
1252 	 * Some ports require correctly set-up hpd registers for detection to
1253 	 * work properly (leading to ghost connected connector status), e.g. VGA
1254 	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
1255 	 * irqs are fully enabled. Now we should scan for the initial config
1256 	 * only once hotplug handling is enabled, but due to screwed-up locking
1257 	 * around kms/fbdev init we can't protect the fdbev initial config
1258 	 * scanning against hotplug events. Hence do this first and ignore the
1259 	 * tiny window where we will loose hotplug notifactions.
1260 	 */
1261 	if (dev_priv->fbcon_obj != NULL)
1262 		intel_fbdev_initial_config(dev);
1263 
1264 	/* Only enable hotplug handling once the fbdev is fully set up. */
1265 	dev_priv->enable_hotplug_processing = true;
1266 
1267 	drm_kms_helper_poll_init(dev);
1268 
1269 	/* We're off and running w/KMS */
1270 	dev_priv->mm.suspended = 0;
1271 
1272 	return 0;
1273 
1274 cleanup_gem:
1275 	mutex_lock(&dev->struct_mutex);
1276 	i915_gem_cleanup_ringbuffer(dev);
1277 	i915_gem_context_fini(dev);
1278 	mutex_unlock(&dev->struct_mutex);
1279 	i915_gem_cleanup_aliasing_ppgtt(dev);
1280 	drm_mm_takedown(&dev_priv->mm.gtt_space);
1281 cleanup_irq:
1282 	drm_irq_uninstall(dev);
1283 cleanup_gem_stolen:
1284 	i915_gem_cleanup_stolen(dev);
1285 out:
1286 	return ret;
1287 }
1288 
1289 /* LINTED */
i915_master_create(struct drm_device * dev,struct drm_master * master)1290 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1291 {
1292 	struct drm_i915_master_private *master_priv;
1293 
1294 	master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1295 	if (!master_priv)
1296 		return -ENOMEM;
1297 
1298 	master->driver_priv = master_priv;
1299 	return 0;
1300 }
1301 
1302 /* LINTED */
i915_master_destroy(struct drm_device * dev,struct drm_master * master)1303 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1304 {
1305 	struct drm_i915_master_private *master_priv = master->driver_priv;
1306 
1307 	if (!master_priv)
1308 		return;
1309 
1310 	kfree(master_priv, sizeof(struct drm_i915_master_private));
1311 
1312 	master->driver_priv = NULL;
1313 }
1314 
1315 
1316 /* OSOL_i915 Begin */
1317 #define pci_dev_put(d) do_pci_dev_put(&(d))
1318 
do_pci_dev_put(struct drm_i915_bridge_dev * bridge_dev)1319 void do_pci_dev_put(struct drm_i915_bridge_dev *bridge_dev)
1320 {
1321 	if (bridge_dev->bridge_dev_hdl) {
1322 		(void) ldi_close(bridge_dev->bridge_dev_hdl, 0, kcred);
1323 		bridge_dev->bridge_dev_hdl = NULL;
1324 	}
1325 
1326 	if (bridge_dev->ldi_id) {
1327 		ldi_ident_release(bridge_dev->ldi_id);
1328 		bridge_dev->ldi_id = NULL;
1329 	}
1330 }
1331 /* OSOL_i915 End */
1332 
1333 /**
1334  * intel_early_sanitize_regs - clean up BIOS state
1335  * @dev: DRM device
1336  *
1337  * This function must be called before we do any I915_READ or I915_WRITE. Its
1338  * purpose is to clean up any state left by the BIOS that may affect us when
1339  * reading and/or writing registers.
1340  */
intel_early_sanitize_regs(struct drm_device * dev)1341 static void intel_early_sanitize_regs(struct drm_device *dev)
1342 {
1343 	struct drm_i915_private *dev_priv = dev->dev_private;
1344 
1345 	if (HAS_FPGA_DBG_UNCLAIMED(dev))
1346 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1347 }
1348 
1349 /**
1350  * i915_driver_load - setup chip and create an initial config
1351  * @dev: DRM device
1352  * @flags: startup flags
1353  *
1354  * The driver load routine has to do several things:
1355  *   - drive output discovery via intel_modeset_init()
1356  *   - initialize the memory manager
1357  *   - allocate initial config memory
1358  *   - setup the DRM framebuffer with the allocated memory
1359  */
i915_driver_load(struct drm_device * dev,unsigned long flags)1360 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1361 {
1362 	struct drm_i915_private *dev_priv;
1363 	struct intel_device_info *info;
1364 	resource_size_t base, size;
1365 	int ret = 0, mmio_bar;
1366 
1367 	info = (struct intel_device_info *) flags;
1368 
1369 	/* Refuse to load on gen6+ without kms enabled. */
1370 	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1371 		return -ENODEV;
1372 
1373 	/* i915 has 4 more counters */
1374 	dev->counters += 4;
1375 	dev->types[6] = _DRM_STAT_IRQ;
1376 	dev->types[7] = _DRM_STAT_PRIMARY;
1377 	dev->types[8] = _DRM_STAT_SECONDARY;
1378 	dev->types[9] = _DRM_STAT_DMA;
1379 
1380 	dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1381 	if (dev_priv == NULL)
1382 		return -ENOMEM;
1383 
1384 	dev->dev_private = (void *)dev_priv;
1385 	dev_priv->dev = dev;
1386 	dev_priv->info = info;
1387 
1388 	dev_priv->info = (struct intel_device_info *) flags;
1389 
1390 	/* Add register map (needed for suspend/resume) */
1391 	mmio_bar = IS_GEN2(dev) ? 1 : 0;
1392 	base = drm_get_resource_start(dev, mmio_bar);
1393 	size = drm_get_resource_len(dev, mmio_bar);
1394 
1395 	dev_priv->regs = drm_alloc(sizeof (drm_local_map_t), DRM_MEM_MAPS);
1396 	dev_priv->regs->offset = base;
1397 	dev_priv->regs->size = size;
1398 	dev_priv->regs->type = _DRM_REGISTERS;
1399 	dev_priv->regs->flags = _DRM_REMOVABLE;
1400 	if (drm_ioremap(dev, dev_priv->regs)) {
1401 		ret = -EIO;
1402 		goto put_bridge;
1403 	}
1404 
1405 	DRM_DEBUG("mmio paddr=%lx, kvaddr=%p", dev_priv->regs->offset, dev_priv->regs->handle);
1406 
1407 	intel_early_sanitize_regs(dev);
1408 	/* The i915 workqueue is primarily used for batched retirement of
1409 	 * requests (and thus managing bo) once the task has been completed
1410 	 * by the GPU. i915_gem_retire_requests() is called directly when we
1411 	 * need high-priority retirement, such as waiting for an explicit
1412 	 * bo.
1413 	 *
1414 	 * It is also used for periodic low-priority events, such as
1415 	 * idle-timers and hangcheck.
1416 	 *
1417 	 * All tasks on the workqueue are expected to acquire the dev mutex
1418 	 * so there is no point in running more than one instance of the
1419 	 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1420 	 */
1421 	dev_priv->wq = create_workqueue(dev->devinfo, "i915");
1422 	if (dev_priv->wq == NULL) {
1423 		DRM_ERROR("Failed to create i915 workqueue.\n");
1424 		ret = -ENOMEM;
1425 		goto out_rmmap;
1426 	}
1427 
1428 	/* The i915 workqueue is primarily used for page_flip and fbc */
1429 	dev_priv->other_wq = create_workqueue(dev->devinfo, "i915_other");
1430 	if (dev_priv->other_wq == NULL) {
1431 		DRM_ERROR("Failed to create i915_other workqueue.\n");
1432 		ret = -ENOMEM;
1433 		goto out_mtrrfree;
1434 	}
1435 
1436 	/* This must be called before any calls to HAS_PCH_* */
1437 	intel_detect_pch(dev);
1438 
1439 	intel_irq_init(dev);
1440 	intel_pm_init(dev);
1441 	intel_gt_sanitize(dev);
1442 	intel_gt_init(dev);
1443 
1444 	if (intel_setup_gmbus(dev) != 0)
1445 		goto out_mtrrfree;
1446 
1447 	/* Make sure the bios did its job and set up vital registers */
1448 	intel_setup_bios(dev);
1449 
1450 	i915_gem_load(dev);
1451 
1452 	/* On the 945G/GM, the chipset reports the MSI capability on the
1453 	 * integrated graphics even though the support isn't actually there
1454 	 * according to the published specs.  It doesn't appear to function
1455 	 * correctly in testing on 945G.
1456 	 * This may be a side effect of MSI having been made available for PEG
1457 	 * and the registers being closely associated.
1458 	 *
1459 	 * According to chipset errata, on the 965GM, MSI interrupts may
1460 	 * be lost or delayed, but we use them anyways to avoid
1461 	 * stuck interrupts on some machines.
1462 	 */
1463 	/* Fix me: Failed to get interrupts after resume, when enable msi */
1464 	/*
1465 	if (!IS_I945G(dev) && !IS_I945GM(dev))
1466 		pci_enable_msi(dev->pdev);
1467 	*/
1468 	spin_lock_init(&dev_priv->irq_lock);
1469 	spin_lock_init(&dev_priv->gpu_error.lock);
1470 	spin_lock_init(&dev_priv->rps.lock);
1471 	spin_lock_init(&dev_priv->dpio_lock);
1472 
1473 	spin_lock_init(&dev_priv->rps.hw_lock);
1474 	spin_lock_init(&dev_priv->modeset_restore_lock);
1475 
1476 	dev_priv->num_plane = 1;
1477 	if (IS_VALLEYVIEW(dev))
1478 		dev_priv->num_plane = 2;
1479 
1480 	if (INTEL_INFO(dev)->num_pipes) {
1481 		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1482 		if (ret)
1483 			goto out_gem_unload;
1484 	}
1485 
1486 	/* Start out suspended */
1487 	dev_priv->mm.suspended = 1;
1488 
1489 	if (HAS_POWER_WELL(dev))
1490 		i915_init_power_well(dev);
1491 
1492 	if (IS_GEN6(dev) || IS_GEN7(dev))
1493 		i915_try_reset = true;
1494 
1495 	dev_priv->gpu_hang = 0;
1496 
1497 	init_timer(&dev_priv->gpu_top_timer);
1498 	setup_timer(&dev_priv->gpu_top_timer, gpu_top_handler,
1499 			(void *) dev);
1500 
1501 	if (MDB_TRACK_ENABLE)
1502 		INIT_LIST_HEAD(&dev_priv->batch_list);
1503 
1504 	return 0;
1505 
1506 out_gem_unload:
1507 	destroy_workqueue(dev_priv->other_wq);
1508 out_mtrrfree:
1509 	destroy_workqueue(dev_priv->wq);
1510 out_rmmap:
1511 	drm_ioremapfree(dev_priv->regs);
1512 put_bridge:
1513 	kfree(dev_priv, sizeof(struct drm_i915_private));
1514 	return ret;
1515 }
1516 
i915_driver_unload(struct drm_device * dev)1517 int i915_driver_unload(struct drm_device *dev)
1518 {
1519 	struct drm_i915_private *dev_priv = dev->dev_private;
1520 	int ret;
1521 
1522 	if (HAS_POWER_WELL(dev))
1523 		i915_remove_power_well(dev);
1524 
1525 	mutex_lock(&dev->struct_mutex);
1526 	ret = i915_gpu_idle(dev);
1527 	if (ret)
1528 		DRM_ERROR("failed to idle hardware: %d\n", ret);
1529 	i915_gem_retire_requests(dev);
1530 	mutex_unlock(&dev->struct_mutex);
1531 
1532 	destroy_workqueue(dev_priv->other_wq);
1533 	destroy_workqueue(dev_priv->wq);
1534 
1535 	del_timer_sync(&dev_priv->gpu_top_timer);
1536 	destroy_timer(&dev_priv->gpu_top_timer);
1537 	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1538 	destroy_timer(&dev_priv->gpu_error.hangcheck_timer);
1539 
1540 /* XXXX rebracket after this is tested */
1541 	/*
1542 	 * Uninitialized GTT indicates that i915 never opens.
1543 	 * So we should not try to release the resources
1544 	 * which are only allocated in i915_driver_firstopen.
1545 	 */
1546 	if (dev_priv->gtt.total !=0) {
1547 
1548 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1549 		(void) drm_irq_uninstall(dev);
1550 		/* XXX FIXME vga_client_register(dev->pdev, NULL, NULL, NULL); */
1551 	}
1552 
1553 	if (dev->pdev->msi_enabled)
1554 		pci_disable_msi(dev->pdev);
1555 
1556 	i915_free_hws(dev);//XXX should still be here ??
1557 
1558 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1559 		if (dev_priv->fbcon_obj != NULL)
1560 			intel_fbdev_fini(dev);
1561 		intel_modeset_cleanup(dev);
1562 
1563 		mutex_lock(&dev->struct_mutex);
1564 		i915_gem_free_all_phys_object(dev);
1565 		i915_gem_cleanup_ringbuffer(dev);
1566 		i915_gem_context_fini(dev);
1567 		mutex_unlock(&dev->struct_mutex);
1568 
1569 		i915_gem_cleanup_aliasing_ppgtt(dev);
1570 		i915_gem_cleanup_stolen(dev);
1571 		if (!I915_NEED_GFX_HWS(dev))
1572 			i915_free_hws(dev);
1573 		i915_gem_lastclose(dev);
1574 		if (dev_priv->gtt.scratch_page)
1575 			i915_teardown_scratch_page(dev);
1576 		if (dev_priv->fbcon_obj != NULL) {
1577 			i915_gem_free_object(&dev_priv->fbcon_obj->base);
1578 			dev_priv->fbcon_obj = NULL;
1579 		}
1580 	}
1581 	}
1582 	drm_mm_takedown(&dev_priv->mm.gtt_space);
1583 	dev_priv->gtt.gtt_remove(dev);
1584 
1585 	if (dev_priv->regs != NULL)
1586 		(void) drm_rmmap(dev, dev_priv->regs);
1587 
1588 	mutex_destroy(&dev_priv->irq_lock);
1589 
1590 	pci_dev_put(dev_priv->bridge_dev);
1591 
1592 	if (MDB_TRACK_ENABLE) {
1593 		struct batch_info_list *r_list, *list_temp;
1594 		list_for_each_entry_safe(r_list, list_temp, struct batch_info_list, &dev_priv->batch_list, head) {
1595 			list_del(&r_list->head);
1596 			drm_free(r_list->obj_list, r_list->num * sizeof(caddr_t), DRM_MEM_MAPS);
1597 			drm_free(r_list, sizeof (struct batch_info_list), DRM_MEM_MAPS);
1598 		}
1599 		list_del(&dev_priv->batch_list);
1600 	}
1601 	kfree(dev->dev_private, sizeof(drm_i915_private_t));
1602 	dev->dev_private = NULL;
1603 
1604 	return 0;
1605 }
1606 
1607 int
i915_driver_firstopen(struct drm_device * dev)1608 i915_driver_firstopen(struct drm_device *dev)
1609 {
1610 	static bool first_call = true;
1611 	struct drm_i915_private *dev_priv = dev->dev_private;
1612 	struct pci_dev *pdev = dev->pdev;
1613 	u32 aperbase;
1614 	int ret = 0;
1615 
1616 	if (first_call) {
1617 		/* OSOL_i915: moved from i915_driver_load */
1618 
1619 		if (i915_get_bridge_dev(dev)) {
1620 			DRM_ERROR("i915_get_bridge_dev() failed.");
1621 			return -EIO;
1622 		}
1623 
1624 		/*
1625 		 * AGP has been removed for GEN6+,
1626 		 * So we read the agp base and size here.
1627 		 */
1628 		if (INTEL_INFO(dev)->gen >= 6) {
1629 			pci_read_config_dword(pdev, GEN6_CONF_GMADR, &aperbase);
1630 			dev->agp_aperbase = aperbase & GEN6_GTT_BASE_MASK;
1631 		} else {
1632 			dev->agp_aperbase = dev->agp->agp_info.agpi_aperbase;
1633 		}
1634 
1635 		ret = i915_gem_gtt_init(dev);
1636 		if (ret) {
1637 			DRM_ERROR("Failed to initialize GTT\n");
1638 			pci_dev_put(dev_priv->bridge_dev);
1639 			ret = -ENODEV;
1640 			return ret;
1641 		}
1642 
1643 		if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1644 			ret = i915_load_modeset_init(dev);
1645 			if (ret < 0) {
1646 				DRM_ERROR("failed to init modeset\n");
1647 				pci_dev_put(dev_priv->bridge_dev);
1648 				return ret;
1649 			}
1650 		}
1651 	}
1652 
1653 	dev_priv->isX = 1;
1654 	first_call = false;
1655 	return ret;
1656 }
1657 
1658 /* LINTED */
i915_driver_open(struct drm_device * dev,struct drm_file * file_priv)1659 int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1660 {
1661 	struct drm_i915_file_private *i915_file_priv;
1662 
1663 	DRM_DEBUG_DRIVER("\n");
1664 	i915_file_priv = (struct drm_i915_file_private *)
1665 	    kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
1666 
1667 	if (!i915_file_priv)
1668 		return -ENOMEM;
1669 
1670 	spin_lock_init(&i915_file_priv->mm.lock);
1671 	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1672 
1673 	idr_init(&i915_file_priv->context_idr);
1674 
1675 	i915_file_priv->status = 1;
1676 	file_priv->driver_priv = i915_file_priv;
1677 
1678 	return 0;
1679 }
1680 
1681 /**
1682  * i915_driver_lastclose - clean up after all DRM clients have exited
1683  * @dev: DRM device
1684  *
1685  * Take care of cleaning up after all DRM clients have exited.  In the
1686  * mode setting case, we want to restore the kernel's initial mode (just
1687  * in case the last client left us in a bad state).
1688  *
1689  * Additionally, in the non-mode setting case, we'll tear down the AGP
1690  * and DMA structures, since the kernel won't be using them, and clea
1691  * up any GEM state.
1692  */
i915_driver_lastclose(struct drm_device * dev)1693 void i915_driver_lastclose(struct drm_device * dev)
1694 {
1695 	drm_i915_private_t *dev_priv = dev->dev_private;
1696 
1697 	/* On gen6+ we refuse to init without kms enabled, but then the drm core
1698 	 * goes right around and calls lastclose. Check for this and don't clean
1699 	 * up anything. */
1700 	if (!dev_priv)
1701 		return;
1702 	dev_priv->isX = 0;
1703 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1704 		intel_fb_restore_mode(dev);
1705 		return;
1706 	}
1707 
1708 	i915_gem_lastclose(dev);
1709 
1710 	(void) i915_dma_cleanup(dev);
1711 }
1712 
i915_driver_preclose(struct drm_device * dev,struct drm_file * file_priv)1713 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1714 {
1715 	i915_gem_context_close(dev, file_priv);
1716 	i915_gem_release(dev, file_priv);
1717 }
1718 
i915_driver_entervt(struct drm_device * dev)1719 void i915_driver_entervt(struct drm_device *dev)
1720 {
1721 	struct drm_i915_private *dev_priv = dev->dev_private;
1722 
1723 	/* Do nothing when coming back from high-res mode (VESA)*/
1724 	if (dev_priv->fbcon_obj)
1725 		return;
1726 
1727 	/* Need to do full modeset from VGA TEXT mode */
1728 	if (dev_priv->vt_holding > 0) {
1729 		(void) i915_restore_state(dev);
1730 		if (IS_HASWELL(dev))
1731 			intel_modeset_setup_hw_state(dev, false);
1732 		else
1733 			intel_modeset_setup_hw_state(dev, true);
1734 	}
1735 	dev_priv->vt_holding = 0;
1736 }
1737 
i915_driver_leavevt(struct drm_device * dev)1738 void i915_driver_leavevt(struct drm_device *dev)
1739 {
1740 	drm_i915_private_t *dev_priv = dev->dev_private;
1741 
1742 	if (dev_priv->fbcon_obj)
1743 		return;
1744 
1745 	(void) i915_save_state(dev);
1746 
1747 	if (IS_HASWELL(dev))
1748 		intel_modeset_disable(dev);
1749 
1750 	dev_priv->vt_holding = 1;
1751 }
1752 
1753 /* LINTED */
i915_driver_postclose(struct drm_device * dev,struct drm_file * file_priv)1754 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1755 {
1756 	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1757 	kfree(i915_file_priv, sizeof(*i915_file_priv));
1758 	file_priv->driver_priv = NULL;
1759 }
1760 
1761 #ifdef _MULTI_DATAMODEL
1762 #define I915_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
1763 	[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = _copyin32, .copyout32 = _copyout32}
1764 #else
1765 #define I915_IOCTL_DEF(ioctl, _func, _flags, _copyin32, _copyout32) \
1766 	[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {.cmd = ioctl, .flags = _flags, .func = _func, .copyin32 = NULL, .copyout32 = NULL}
1767 #endif
1768 
1769 struct drm_ioctl_desc i915_ioctls[] = {
1770 	I915_IOCTL_DEF(DRM_IOCTL_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
1771 	I915_IOCTL_DEF(DRM_IOCTL_I915_FLUSH, i915_flush_ioctl, DRM_AUTH, NULL, NULL),
1772 	I915_IOCTL_DEF(DRM_IOCTL_I915_FLIP, i915_flip_bufs, DRM_AUTH, NULL, NULL),
1773 	I915_IOCTL_DEF(DRM_IOCTL_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH, copyin32_i915_batchbuffer, NULL),
1774 	I915_IOCTL_DEF(DRM_IOCTL_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH, copyin32_i915_irq_emit, NULL),
1775 	I915_IOCTL_DEF(DRM_IOCTL_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH, NULL, NULL),
1776 	I915_IOCTL_DEF(DRM_IOCTL_I915_GETPARAM, i915_getparam, DRM_AUTH, copyin32_i915_getparam, NULL),
1777 	I915_IOCTL_DEF(DRM_IOCTL_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
1778 	I915_IOCTL_DEF(DRM_IOCTL_I915_ALLOC, drm_noop, DRM_AUTH, copyin32_i915_mem_alloc, NULL),
1779 	I915_IOCTL_DEF(DRM_IOCTL_I915_FREE, drm_noop, DRM_AUTH, NULL, NULL),
1780 	I915_IOCTL_DEF(DRM_IOCTL_I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
1781 	I915_IOCTL_DEF(DRM_IOCTL_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH, copyin32_i915_cmdbuffer, NULL),
1782 	I915_IOCTL_DEF(DRM_IOCTL_I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
1783 	I915_IOCTL_DEF(DRM_IOCTL_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH, NULL, NULL),
1784 	I915_IOCTL_DEF(DRM_IOCTL_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH, NULL, NULL),
1785 	I915_IOCTL_DEF(DRM_IOCTL_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY, NULL, NULL),
1786 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
1787 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
1788 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
1789 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
1790 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
1791 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
1792 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED, NULL, NULL),
1793 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED, NULL, NULL),
1794 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
1795 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
1796 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED, NULL, NULL),
1797 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED, NULL, NULL),
1798 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED, NULL, NULL),
1799 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED, NULL, NULL),
1800 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED, NULL, NULL),
1801 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED, NULL, NULL),
1802 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED, NULL, NULL),
1803 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED, NULL, NULL),
1804 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED, NULL, NULL),
1805 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED, NULL, NULL),
1806 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED, NULL, NULL),
1807 	I915_IOCTL_DEF(DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED, NULL, NULL),
1808 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED, NULL, NULL),
1809 	I915_IOCTL_DEF(DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
1810 	I915_IOCTL_DEF(DRM_IOCTL_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
1811 	I915_IOCTL_DEF(DRM_IOCTL_I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
1812 	I915_IOCTL_DEF(DRM_IOCTL_I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED, NULL, NULL),
1813 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED, NULL, NULL),
1814 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED, NULL, NULL),
1815 	I915_IOCTL_DEF(DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED, NULL, NULL),
1816 	I915_IOCTL_DEF(DRM_IOCTL_I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED, NULL, NULL),
1817 };
1818 
1819 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1820 
1821 /**
1822  * Determine if the device really is AGP or not.
1823  *
1824  * All Intel graphics chipsets are treated as AGP, even if they are really
1825  */
1826 /* LINTED */
i915_driver_device_is_agp(struct drm_device * dev)1827 int i915_driver_device_is_agp(struct drm_device * dev)
1828 {
1829 	return 1;
1830 }
1831