xref: /gfx-drm/usr/src/uts/common/io/drm/drm_sunmod.c (revision e49fc716)
1 /*
2  * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*
25  * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
26  */
27 
28 /*
29  * Common misc module interfaces of DRM under Solaris
30  */
31 
32 /*
33  * This module calls into gfx and agpmaster misc modules respectively
34  * for generic graphics operations and AGP master device support.
35  */
36 
37 #include "drm_sunmod.h"
38 #include "drm_sun_idr.h"
39 #include <sys/modctl.h>
40 #include <sys/kmem.h>
41 #include <vm/seg_kmem.h>
42 
43 int drm_debug_flag = 0;
44 int mdb_track_enable = B_FALSE;
45 
46 /* Identifier of this driver */
47 static struct vis_identifier text_ident = { "SUNWdrm" };
48 
49 static ddi_device_acc_attr_t dev_attr = {
50 	DDI_DEVICE_ATTR_V0,
51 	DDI_NEVERSWAP_ACC,
52 	DDI_STRICTORDER_ACC,
53 	DDI_FLAGERR_ACC
54 };
55 
56 static ddi_device_acc_attr_t gem_dev_attr = {
57 	DDI_DEVICE_ATTR_V0,
58 	DDI_NEVERSWAP_ACC,
59 	DDI_MERGING_OK_ACC,
60 	DDI_FLAGERR_ACC
61 };
62 
63 extern int __init drm_core_init(void);
64 extern void __exit drm_core_exit(void);
65 extern int drm_get_pci_index_reg(dev_info_t *, uint_t, uint_t, off_t *);
66 
67 struct find_gem_object {
68 	offset_t offset;
69 	struct drm_gem_object *obj;
70 };
71 
72 static int
drm_devmap_map(devmap_cookie_t dhc,dev_t dev_id,uint_t flags,offset_t offset,size_t len,void ** new_priv)73 drm_devmap_map(devmap_cookie_t dhc, dev_t dev_id, uint_t flags,
74     offset_t offset, size_t len, void **new_priv)
75 {
76 	_NOTE(ARGUNUSED(offset, len))
77 
78 	devmap_handle_t *dhp;
79 	struct ddi_umem_cookie 	*cp;
80 	struct drm_minor *minor;
81 	struct drm_device *dev;
82 	int minor_id;
83 
84 	minor_id = DRM_DEV2MINOR(dev_id);
85 	minor = idr_find(&drm_minors_idr, minor_id);
86 	if (!minor)
87 		return (ENODEV);
88 	dev = minor->dev;
89 
90 	/*
91 	 * This driver only supports MAP_SHARED,
92 	 * and doesn't support MAP_PRIVATE
93 	 */
94 	if (flags & MAP_PRIVATE) {
95 		DRM_ERROR("Not support MAP_PRIVATE");
96 		return (EINVAL);
97 	}
98 
99 	mutex_enter(&dev->struct_mutex);
100 	dhp = (devmap_handle_t *)dhc;
101 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
102 	ASSERT(cp->cook_refcnt == 0);
103 	cp->cook_refcnt++;
104 	mutex_exit(&dev->struct_mutex);
105 
106 	/*
107 	 * This is the first reference via this devmap handle.
108 	 * If needed, we could take a ref on some device-internal
109 	 * object here, and release it in drm_devmap_unmap().
110 	 */
111 	DRM_DEBUG("first ref dev=%p cp=%p", dev, cp);
112 
113 	*new_priv = dev;
114 	return (0);
115 }
116 
117 static int
drm_devmap_dup(devmap_cookie_t dhc,void * pvtp,devmap_cookie_t new_dhc,void ** new_pvtp)118 drm_devmap_dup(devmap_cookie_t dhc, void *pvtp, devmap_cookie_t new_dhc,
119     void **new_pvtp)
120 {
121 	_NOTE(ARGUNUSED(new_dhc))
122 
123 	struct drm_device *dev = (struct drm_device *)pvtp;
124 	devmap_handle_t *dhp;
125 	struct ddi_umem_cookie *cp;
126 
127 	mutex_enter(&dev->struct_mutex);
128 	dhp = (devmap_handle_t *)dhc;
129 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
130 	cp->cook_refcnt++;
131 	mutex_exit(&dev->struct_mutex);
132 
133 	*new_pvtp = dev;
134 	return (0);
135 }
136 
137 static void
drm_devmap_unmap(devmap_cookie_t dhc,void * pvtp,offset_t off,size_t len,devmap_cookie_t new_dhp1,void ** new_pvtp1,devmap_cookie_t new_dhp2,void ** new_pvtp2)138 drm_devmap_unmap(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
139     devmap_cookie_t new_dhp1, void **new_pvtp1, devmap_cookie_t new_dhp2,
140     void **new_pvtp2)
141 {
142 	_NOTE(ARGUNUSED(off, len))
143 
144 	struct drm_device *dev;
145 	devmap_handle_t *dhp;
146 	devmap_handle_t	*ndhp;
147 	struct ddi_umem_cookie	*cp;
148 	struct ddi_umem_cookie	*ncp;
149 	boolean_t last_ref = B_FALSE;
150 
151 	dhp = (devmap_handle_t *)dhc;
152 	dev = (struct drm_device *)pvtp;
153 
154 	mutex_enter(&dev->struct_mutex);
155 
156 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
157 	if (new_dhp1 != NULL) {
158 		ndhp = (devmap_handle_t *)new_dhp1;
159 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
160 		ncp->cook_refcnt++;
161 		*new_pvtp1 = dev;
162 		ASSERT(ncp == cp);
163 	}
164 	if (new_dhp2 != NULL) {
165 		ndhp = (devmap_handle_t *)new_dhp2;
166 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
167 		ncp->cook_refcnt++;
168 		*new_pvtp2 = dev;
169 		ASSERT(ncp == cp);
170 	}
171 
172 	ASSERT(cp->cook_refcnt > 0);
173 	cp->cook_refcnt--;
174 	if (cp->cook_refcnt == 0) {
175 		last_ref = B_TRUE;
176 	}
177 
178 	mutex_exit(&dev->struct_mutex);
179 
180 	if (last_ref) {
181 		/*
182 		 * This is the last reference to this device
183 		 * via this devmap handle, so drop whatever
184 		 * reference was taken in drm_gem_map().
185 		 *
186 		 * Previous versions of this code released
187 		 * dhp->dh_cookie in here, but that's handled
188 		 * outside of devmap_map/_unmap.  Given that,
189 		 * we probably don't need this callback at all,
190 		 * though it's somewhat useful for debugging.
191 		 */
192 		DRM_DEBUG("last ref dev=%p cp=%p", dev, cp);
193 	}
194 }
195 
196 static struct devmap_callback_ctl drm_devmap_callbacks = {
197 	DEVMAP_OPS_REV, 		/* devmap_rev */
198 	drm_devmap_map,			/* devmap_map */
199 	NULL,				/* devmap_access */
200 	drm_devmap_dup,			/* devmap_dup */
201 	drm_devmap_unmap 		/* devmap_unmap */
202 };
203 
204 static struct drm_local_map *
__find_local_map(struct drm_device * dev,offset_t offset)205 __find_local_map(struct drm_device *dev, offset_t offset)
206 {
207 	struct drm_map_list *entry;
208 
209 	entry = idr_find(&dev->map_idr, offset >> PAGE_SHIFT);
210 	if (entry)
211 		return (entry->map);
212 
213 	return (NULL);
214 }
215 
216 static int
drm_gem_map(devmap_cookie_t dhc,dev_t dev_id,uint_t flags,offset_t off,size_t len,void ** pvtp)217 drm_gem_map(devmap_cookie_t dhc, dev_t dev_id, uint_t flags,
218     offset_t off, size_t len, void **pvtp)
219 {
220 	_NOTE(ARGUNUSED(flags, len))
221 
222 	devmap_handle_t *dhp;
223 	struct ddi_umem_cookie *cp;
224 	struct drm_gem_object *obj;
225 	struct drm_device *dev;
226 	struct drm_minor *minor;
227 	int minor_id = DRM_DEV2MINOR(dev_id);
228 	drm_local_map_t *map = NULL;
229 
230 	minor = idr_find(&drm_minors_idr, minor_id);
231 	if (!minor)
232 		return (ENODEV);
233 	if (!minor->dev)
234 		return (ENODEV);
235 
236 	dev = minor->dev;
237 
238 	mutex_enter(&dev->struct_mutex);
239 	map = __find_local_map(dev, off);
240 	if (!map) {
241 		mutex_exit(&dev->struct_mutex);
242 		*pvtp = NULL;
243 		return (DDI_EINVAL);
244 	}
245 
246 	dhp = (devmap_handle_t *)dhc;
247 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
248 	ASSERT(cp->cook_refcnt == 0);
249 	cp->cook_refcnt++;
250 
251 	mutex_exit(&dev->struct_mutex);
252 
253 	/*
254 	 * First reference via this devmap handle.  Take a ref. on the
255 	 * gem object, and release it in drm_gem_unmap when last ref
256 	 * from this devmap handle goes away.   This corresponds to
257 	 * code in drm_gem_vm_open() in the Linux driver.
258 	 */
259 	obj = map->handle;
260 	drm_gem_object_reference(obj);
261 
262 	DRM_DEBUG("first ref obj=%p cp=%p", obj, cp);
263 
264 	*pvtp = obj;
265 
266 	return (DDI_SUCCESS);
267 }
268 
269 /*
270  * This is called by segdev_fault() to fault in pages for the given
271  * offset+len.  If the (GTT) device range has been configured with a
272  * fake offset for mmap, call the device's gem_fault handler to setup
273  * the GTT resources for this mapping.
274  *
275  * We should always call devmap_load(), as we're just interposing
276  * on these fault calls to (sometimes) setup GTT resources.
277  */
278 static int
drm_gem_map_access(devmap_cookie_t dhp,void * pvt,offset_t offset,size_t len,uint_t type,uint_t rw)279 drm_gem_map_access(devmap_cookie_t dhp, void *pvt, offset_t offset, size_t len,
280 		uint_t type, uint_t rw)
281 {
282 	struct drm_device *dev;
283 	struct drm_gem_object *obj;
284 	struct gem_map_list *seg;
285 
286 	obj = (struct drm_gem_object *)pvt;
287 
288 	/*
289 	 * Only call the driver fault handler after gtt_map_kaddr
290 	 * has been set, i.e. by i915_gem_mmap_gtt(), or we'll
291 	 * panic in trying to map pages at addr zero.
292 	 */
293 	if (obj != NULL && obj->gtt_map_kaddr != NULL) {
294 		dev = obj->dev;
295 		/* Could also check map->callback */
296 		if (dev->driver->gem_fault != NULL)
297 			dev->driver->gem_fault(obj);
298 	}
299 
300 	/* Internal devmap_default_access(9F) */
301 	if (devmap_load(dhp, offset, len, type, rw)) {
302 		return (DDI_FAILURE);
303 	}
304 
305 	/*
306 	 * Save list of loaded translations for later use in
307 	 * (i.e.) i915_gem_release_mmap()
308 	 */
309 	if (obj != NULL) {
310 		dev = obj->dev;
311 		seg = drm_alloc(sizeof (struct gem_map_list), DRM_MEM_MAPS);
312 		if (seg != NULL) {
313 			mutex_lock(&dev->page_fault_lock);
314 			seg->dhp = dhp;
315 			seg->mapoffset = offset;
316 			seg->maplen = len;
317 			list_add_tail(&seg->head, &obj->seg_list, (caddr_t)seg);
318 			mutex_unlock(&dev->page_fault_lock);
319 		}
320 	}
321 	return (DDI_SUCCESS);
322 }
323 
324 static int
drm_gem_dup(devmap_cookie_t dhc,void * pvt,devmap_cookie_t new_dhc,void ** new_pvtp)325 drm_gem_dup(devmap_cookie_t dhc, void *pvt, devmap_cookie_t new_dhc,
326     void **new_pvtp)
327 {
328 	_NOTE(ARGUNUSED(new_dhc))
329 
330 	struct drm_gem_object *obj = pvt;
331 	struct drm_device *dev = obj->dev;
332 	devmap_handle_t *dhp;
333 	struct ddi_umem_cookie *cp;
334 
335 	mutex_enter(&dev->struct_mutex);
336 	dhp = (devmap_handle_t *)dhc;
337 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
338 	cp->cook_refcnt++;
339 	mutex_exit(&dev->struct_mutex);
340 
341 	*new_pvtp = obj;
342 	return (0);
343 }
344 
345 static void
drm_gem_unmap(devmap_cookie_t dhc,void * pvt,offset_t off,size_t len,devmap_cookie_t new_dhp1,void ** new_pvtp1,devmap_cookie_t new_dhp2,void ** new_pvtp2)346 drm_gem_unmap(devmap_cookie_t dhc, void *pvt, offset_t off, size_t len,
347 		devmap_cookie_t new_dhp1, void **new_pvtp1,
348 		devmap_cookie_t new_dhp2, void **new_pvtp2)
349 {
350 	devmap_handle_t *dhp = (devmap_handle_t *)dhc;
351 	devmap_handle_t	*ndhp;
352 	struct ddi_umem_cookie *cp;
353 	struct ddi_umem_cookie *ncp;
354 	struct drm_device *dev;
355 	struct drm_gem_object *obj;
356 	struct gem_map_list *entry, *temp;
357 	boolean_t last_ref = B_FALSE;
358 
359 	_NOTE(ARGUNUSED(off, len))
360 
361 	obj = (struct drm_gem_object *)pvt;
362 	if (obj == NULL)
363 		return;
364 
365 	dev = obj->dev;
366 
367 	/*
368 	 * Unload what drm_gem_map_access loaded.
369 	 *
370 	 * XXX: The devmap_unload() here is probably unnecessary,
371 	 * as segdev_unmap() has done a hat_unload() for the
372 	 * entire segment by the time we get here.
373 	 */
374 	mutex_lock(&dev->page_fault_lock);
375 	if (!list_empty(&obj->seg_list)) {
376 		list_for_each_entry_safe(entry, temp, struct gem_map_list,
377 		    &obj->seg_list, head) {
378 			(void) devmap_unload(entry->dhp, entry->mapoffset,
379 			    entry->maplen);
380 			list_del(&entry->head);
381 			drm_free(entry, sizeof (struct gem_map_list), DRM_MEM_MAPS);
382 		}
383 	}
384 	mutex_unlock(&dev->page_fault_lock);
385 
386 	/*
387 	 * Manage dh_cookie ref counts
388 	 */
389 	mutex_enter(&dev->struct_mutex);
390 	cp = (struct ddi_umem_cookie *)dhp->dh_cookie;
391 	if (new_dhp1 != NULL) {
392 		ndhp = (devmap_handle_t *)new_dhp1;
393 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
394 		ncp->cook_refcnt++;
395 		*new_pvtp1 = obj;
396 		ASSERT(ncp == cp);
397 	}
398 	if (new_dhp2 != NULL) {
399 		ndhp = (devmap_handle_t *)new_dhp2;
400 		ncp = (struct ddi_umem_cookie *)ndhp->dh_cookie;
401 		ncp->cook_refcnt++;
402 		*new_pvtp2 = obj;
403 		ASSERT(ncp == cp);
404 	}
405 	ASSERT(cp->cook_refcnt > 0);
406 	cp->cook_refcnt--;
407 	if (cp->cook_refcnt == 0) {
408 		last_ref = B_TRUE;
409 	}
410 	mutex_exit(&dev->struct_mutex);
411 
412 	if (last_ref) {
413 		/*
414 		 * This is the last reference to this GEM object
415 		 * via this devmap handle, so drop the reference
416 		 * taken in drm_gem_map().  This corresponds to
417 		 * code in drm_gem_vm_close() in the Linux driver.
418 		 */
419 		DRM_DEBUG("last ref obj=%p cp=%p", obj, cp);
420 		drm_gem_object_unreference(obj);
421 	}
422 }
423 
424 static struct devmap_callback_ctl drm_gem_map_ops = {
425 	DEVMAP_OPS_REV,		/* devmap_ops version number */
426 	drm_gem_map,		/* devmap_ops map routine */
427 	drm_gem_map_access,	/* devmap_ops access routine */
428 	drm_gem_dup,		/* devmap_ops dup routine */
429 	drm_gem_unmap,		/* devmap_ops unmap routine */
430 };
431 
432 static int
__devmap_general(struct drm_device * dev,devmap_cookie_t dhp,struct drm_local_map * map,size_t len,size_t * maplen)433 __devmap_general(struct drm_device *dev, devmap_cookie_t dhp,
434     struct drm_local_map *map, size_t len, size_t *maplen)
435 {
436 	off_t regoff;
437 	int regno, ret;
438 
439 	regno = drm_get_pci_index_reg(dev->devinfo,
440 	    map->offset, (uint_t)len, &regoff);
441 	if (regno < 0) {
442 		DRM_ERROR("drm_get_pci_index_reg() failed");
443 		return (-EINVAL);
444 	}
445 
446 	ret = devmap_devmem_setup(dhp, dev->devinfo, NULL,
447 	    regno, (offset_t)regoff, len, PROT_ALL,
448 	    0, &dev_attr);
449 	if (ret != DDI_SUCCESS) {
450 		DRM_ERROR("devmap_devmem_setup failed, ret=%d", ret);
451 		return (-EFAULT);
452 	}
453 
454 	*maplen = len;
455 	return (0);
456 }
457 
458 static int
__devmap_shm(struct drm_device * dev,devmap_cookie_t dhp,struct drm_local_map * map,size_t len,size_t * maplen)459 __devmap_shm(struct drm_device *dev, devmap_cookie_t dhp,
460     struct drm_local_map *map, size_t len, size_t *maplen)
461 {
462 	int ret;
463 
464 	if (!map->umem_cookie)
465 		return (-EINVAL);
466 
467 	len = ptob(btopr(map->size));
468 
469 	ret = devmap_umem_setup(dhp, dev->devinfo,
470 	    NULL, map->umem_cookie, 0, len, PROT_ALL,
471 	    IOMEM_DATA_CACHED, NULL);
472 	if (ret != DDI_SUCCESS) {
473 		DRM_ERROR("devmap_umem_setup failed, ret=%d", ret);
474 		return (-EFAULT);
475 	}
476 
477 	*maplen = len;
478 	return (0);
479 }
480 
481 static int
__devmap_agp(struct drm_device * dev,devmap_cookie_t dhp,struct drm_local_map * map,size_t len,size_t * maplen)482 __devmap_agp(struct drm_device *dev, devmap_cookie_t dhp,
483     struct drm_local_map *map, size_t len, size_t *maplen)
484 {
485 	int ret;
486 
487 	if (dev->agp == NULL) {
488 		DRM_ERROR("attempted to mmap AGP"
489 		    "memory before AGP support is enabled");
490 		return (-ENODEV);
491 	}
492 
493 	len = ptob(btopr(len));
494 
495 	ret = devmap_umem_setup(dhp, dev->devinfo,
496 	    &drm_devmap_callbacks, map->umem_cookie, 0, len, PROT_ALL,
497 	    IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
498 	if (ret != DDI_SUCCESS) {
499 		DRM_ERROR("devmap_umem_setup() failed, ret=%d", ret);
500 		return (-EFAULT);
501 	}
502 
503 	*maplen = len;
504 	return (0);
505 }
506 
507 static int
__devmap_sg(struct drm_device * dev,devmap_cookie_t dhp,struct drm_local_map * map,size_t len,size_t * maplen)508 __devmap_sg(struct drm_device *dev, devmap_cookie_t dhp,
509     struct drm_local_map *map, size_t len, size_t *maplen)
510 {
511 	int ret;
512 
513 	len = ptob(btopr(len));
514 	if (len > map->size) {
515 		DRM_ERROR("offset=0x%lx, virtual=0x%p, "
516 		    "mapsize=0x%lx, len=0x%lx",
517 		    map->offset, dev->sg->virtual, map->size, len);
518 		return (-EINVAL);
519 	}
520 
521 	ret = devmap_umem_setup(dhp, dev->devinfo,
522 	    &drm_devmap_callbacks, map->umem_cookie, 0, len, PROT_ALL,
523 	    IOMEM_DATA_UNCACHED | DEVMAP_ALLOW_REMAP, &dev_attr);
524 	if (ret != DDI_SUCCESS) {
525 		DRM_ERROR("devmap_umem_setup() fail");
526 		return (-EFAULT);
527 	}
528 
529 	*maplen = len;
530 	return (0);
531 }
532 
533 static int
__devmap_gem(struct drm_device * dev,devmap_cookie_t dhp,struct drm_local_map * map,size_t * maplen)534 __devmap_gem(struct drm_device *dev, devmap_cookie_t dhp,
535     struct drm_local_map *map, size_t *maplen)
536 {
537 	int ret;
538 
539 	if (!map->umem_cookie)
540 		return (-EINVAL);
541 
542 	ret = gfxp_devmap_umem_setup(dhp, dev->devinfo, &drm_gem_map_ops,
543 	    map->umem_cookie, 0, map->size, PROT_ALL,
544 	    IOMEM_DATA_UC_WR_COMBINE | DEVMAP_ALLOW_REMAP, &gem_dev_attr);
545 	if (ret != DDI_SUCCESS) {
546 		DRM_ERROR("gfxp_devmap_umem_setup failed, ret=%d", ret);
547 		return (-EFAULT);
548 	}
549 
550 	*maplen = map->size;
551 	return (0);
552 }
553 
554 static int
drm_sun_open(dev_t * dev_id,int flag,int otyp,cred_t * credp)555 drm_sun_open(dev_t *dev_id, int flag, int otyp, cred_t *credp)
556 {
557 	_NOTE(ARGUNUSED(otyp))
558 
559 	int minor_id = DRM_DEV2MINOR(*dev_id);
560 	struct drm_minor *minor;
561 	int clone_id;
562 	int ret;
563 
564 	minor = idr_find(&drm_minors_idr, minor_id);
565 	if (!minor)
566 		return (ENODEV);
567 	if (!minor->dev)
568 		return (ENODEV);
569 
570 	/*
571 	 * No operations for VGA & AGP mater devices, always return OK.
572 	 */
573 	if (DRM_MINOR_IS_VGATEXT(minor_id))
574 		return (0);
575 
576 	if (DRM_MINOR_IS_AGPMASTER(minor_id))
577 		return (0);
578 
579 	/*
580 	 * Drm driver implements a software lock to serialize access
581 	 * to graphics hardware based on per-process granulation. Before
582 	 * operating graphics hardware, all clients, including kernel
583 	 * and applications, must acquire this lock via DRM_IOCTL_LOCK
584 	 * ioctl, and release it via DRM_IOCTL_UNLOCK after finishing
585 	 * operations. Drm driver will grant r/w permission to the
586 	 * process which acquires this lock (Kernel is assumed to have
587 	 * process ID 0).
588 	 *
589 	 * A process might be terminated without releasing drm lock, in
590 	 * this case, drm driver is responsible for clearing the holding.
591 	 * To be informed of process exiting, drm driver uses clone open
592 	 * to guarantee that each call to open(9e) have one corresponding
593 	 * call to close(9e). In most cases, a process will close drm
594 	 * during process termination, so that drm driver could have a
595 	 * chance to release drm lock.
596 	 *
597 	 * In fact, a driver cannot know exactly when a process exits.
598 	 * Clone open doesn't address this issue completely: Because of
599 	 * inheritance, child processes inherit file descriptors from
600 	 * their parent. As a result, if the parent exits before its
601 	 * children, drm close(9e) entrypoint won't be called until all
602 	 * of its children terminate.
603 	 *
604 	 * Another issue brought up by inhertance is the process PID
605 	 * that calls the drm close() entry point may not be the same
606 	 * as the one who called open(). Per-process struct is allocated
607 	 * when a process first open() drm, and released when the process
608 	 * last close() drm. Since open()/close() may be not the same
609 	 * process, PID cannot be used for key to lookup per-process
610 	 * struct. So, we associate minor number with per-process struct
611 	 * during open()'ing, and find corresponding process struct
612 	 * via minor number when close() is called.
613 	 */
614 	ret = idr_get_new_above(&minor->clone_idr, NULL, 0, &clone_id);
615 	if (ret)
616 		return (EMFILE);
617 
618 	if (clone_id > DRM_CLONEID_MAX) {
619 		(void) idr_remove(&minor->clone_idr, clone_id);
620 		return (EMFILE);
621 	}
622 
623 	ret = drm_open(minor, clone_id, flag, credp);
624 	if (ret) {
625 		(void) idr_remove(&minor->clone_idr, clone_id);
626 		return (-ret);
627 	}
628 
629 	*dev_id = DRM_MAKEDEV(getmajor(*dev_id), minor_id, clone_id);
630 
631 	return (-ret);
632 }
633 
634 static int
drm_sun_close(dev_t dev_id,int flag,int otyp,cred_t * credp)635 drm_sun_close(dev_t dev_id, int flag, int otyp, cred_t *credp)
636 {
637 	_NOTE(ARGUNUSED(flag, otyp, credp))
638 
639 	struct drm_minor *minor;
640 	struct drm_file *file_priv;
641 	int minor_id = DRM_DEV2MINOR(dev_id);
642 	int clone_id = DRM_DEV2CLONEID(dev_id);
643 	int ret = 0;
644 
645 	minor = idr_find(&drm_minors_idr, minor_id);
646 	if (!minor)
647 		return (ENODEV);
648 	if (!minor->dev)
649 		return (ENODEV);
650 
651 	/*
652 	 * No operations for VGA & AGP mater devices, always return OK.
653 	 */
654 	if (DRM_MINOR_IS_VGATEXT(minor_id))
655 		return (0);
656 
657 	if (DRM_MINOR_IS_AGPMASTER(minor_id))
658 		return (0);
659 
660 	file_priv = idr_find(&minor->clone_idr, clone_id);
661 	if (!file_priv)
662 		return (EBADF);
663 
664 	ret = drm_release(file_priv);
665 	if (ret)
666 		return (-ret);
667 
668 	(void) idr_remove(&minor->clone_idr, clone_id);
669 
670 	return (0);
671 }
672 
673 static int
drm_sun_ioctl(dev_t dev_id,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)674 drm_sun_ioctl(dev_t dev_id, int cmd, intptr_t arg, int mode, cred_t *credp,
675     int *rvalp)
676 {
677 	struct drm_minor *minor;
678 	struct drm_file *file_priv;
679 	int minor_id = DRM_DEV2MINOR(dev_id);
680 	int clone_id = DRM_DEV2CLONEID(dev_id);
681 
682 	minor = idr_find(&drm_minors_idr, minor_id);
683 	if (!minor)
684 		return (ENODEV);
685 	if (!minor->dev)
686 		return (ENODEV);
687 
688 	if (cmd == VIS_GETIDENTIFIER) {
689 		if (ddi_copyout(&text_ident, (void *)arg,
690 		    sizeof (struct vis_identifier), mode))
691 			return (EFAULT);
692 	}
693 
694 	if (DRM_MINOR_IS_VGATEXT(minor_id))
695 		return (gfxp_vgatext_ioctl(dev_id, cmd, arg, mode, credp,
696 		    rvalp, minor->private));
697 
698 	if (DRM_MINOR_IS_AGPMASTER(minor_id))
699 		return (agpmaster_ioctl(dev_id, cmd, arg, mode, credp,
700 		    rvalp, minor->private));
701 
702 	file_priv = idr_find(&minor->clone_idr, clone_id);
703 	if (!file_priv)
704 		return (EBADF);
705 
706 	return (-(drm_ioctl(dev_id, file_priv, cmd, arg, mode, credp)));
707 }
708 
709 static int
drm_sun_devmap(dev_t dev_id,devmap_cookie_t dhp,offset_t offset,size_t len,size_t * maplen,uint_t model)710 drm_sun_devmap(dev_t dev_id, devmap_cookie_t dhp, offset_t offset,
711     size_t len, size_t *maplen, uint_t model)
712 {
713 	struct drm_device *dev;
714 	struct drm_minor *minor;
715 	struct drm_file *file_priv;
716 	int minor_id = DRM_DEV2MINOR(dev_id);
717 	int clone_id = DRM_DEV2CLONEID(dev_id);
718 	drm_local_map_t *map = NULL;
719 
720 	minor = idr_find(&drm_minors_idr, minor_id);
721 	if (!minor)
722 		return (ENODEV);
723 	if (!minor->dev)
724 		return (ENODEV);
725 
726 	dev = minor->dev;
727 
728 	if (DRM_MINOR_IS_VGATEXT(minor_id))
729 		return (gfxp_vgatext_devmap(dev_id, dhp, offset, len,
730 		    maplen, model, minor->private));
731 
732 	if (DRM_MINOR_IS_AGPMASTER(minor_id))
733 		return (ENOTSUP);
734 
735 	file_priv = idr_find(&minor->clone_idr, clone_id);
736 	if (!file_priv)
737 		return (EBADF);
738 
739 	mutex_enter(&dev->struct_mutex);
740 	map = __find_local_map(dev, offset);
741 	if (!map) {
742 		mutex_exit(&dev->struct_mutex);
743 		return (EFAULT);
744 	}
745 	if (map->flags & _DRM_RESTRICTED) {
746 		mutex_exit(&dev->struct_mutex);
747 		return (ENOTSUP);
748 	}
749 	mutex_exit(&dev->struct_mutex);
750 
751 	switch (map->type) {
752 	case _DRM_FRAME_BUFFER:
753 	case _DRM_REGISTERS:
754 		return (__devmap_general(dev, dhp, map, len, maplen));
755 
756 	case _DRM_SHM:
757 		return (__devmap_shm(dev, dhp, map, len, maplen));
758 
759 	case _DRM_AGP:
760 		return (__devmap_agp(dev, dhp, map, len, maplen));
761 
762 	case _DRM_SCATTER_GATHER:
763 		return (__devmap_sg(dev, dhp, map, len, maplen));
764 
765 	case _DRM_GEM:
766 		return (__devmap_gem(dev, dhp, map, maplen));
767 
768 	default:
769 		break;
770 	}
771 
772 	return (ENOTSUP);
773 }
774 
775 static int
drm_sun_read(dev_t dev_id,struct uio * uiop,cred_t * credp)776 drm_sun_read(dev_t dev_id, struct uio *uiop, cred_t *credp)
777 {
778 	_NOTE(ARGUNUSED(credp))
779 
780 	struct drm_minor *minor;
781 	struct drm_file *file_priv;
782 	int minor_id = DRM_DEV2MINOR(dev_id);
783 	int clone_id = DRM_DEV2CLONEID(dev_id);
784 
785 	minor = idr_find(&drm_minors_idr, minor_id);
786 	if (!minor)
787 		return (ENODEV);
788 	if (!minor->dev)
789 		return (ENODEV);
790 
791 	/*
792 	 * No operations for VGA & AGP master devices, always return OK.
793 	 */
794 	if (DRM_MINOR_IS_VGATEXT(minor_id))
795 		return (0);
796 
797 	if (DRM_MINOR_IS_AGPMASTER(minor_id))
798 		return (0);
799 
800 	file_priv = idr_find(&minor->clone_idr, clone_id);
801 	if (!file_priv)
802 		return (EBADF);
803 
804 	(void) drm_read(file_priv, uiop);
805 	return (0);
806 }
807 
808 static int
drm_sun_chpoll(dev_t dev_id,short events,int anyyet,short * reventsp,struct pollhead ** phpp)809 drm_sun_chpoll(dev_t dev_id, short events, int anyyet, short *reventsp,
810     struct pollhead **phpp)
811 {
812 	struct drm_minor *minor;
813 	struct drm_file *file_priv;
814 	int minor_id = DRM_DEV2MINOR(dev_id);
815 	int clone_id = DRM_DEV2CLONEID(dev_id);
816 
817 	minor = idr_find(&drm_minors_idr, minor_id);
818 	if (!minor)
819 		return (ENODEV);
820 	if (!minor->dev)
821 		return (ENODEV);
822 
823 	/*
824 	 * No operations for VGA & AGP master devices, always return OK.
825 	 */
826 	if (DRM_MINOR_IS_VGATEXT(minor_id))
827 		return (0);
828 
829 	if (DRM_MINOR_IS_AGPMASTER(minor_id))
830 		return (0);
831 
832 	file_priv = idr_find(&minor->clone_idr, clone_id);
833 	if (!file_priv)
834 		return (EBADF);
835 
836 	if (!anyyet) {
837 		*phpp = &file_priv->drm_pollhead;
838 	}
839 
840 	*reventsp = drm_poll(file_priv, events);
841 	return (0);
842 }
843 
844 /*
845  * Common device operations structure for all DRM drivers
846  */
847 struct cb_ops drm_cb_ops = {
848 	drm_sun_open,			/* cb_open */
849 	drm_sun_close,			/* cb_close */
850 	nodev,				/* cb_strategy */
851 	nodev,				/* cb_print */
852 	nodev,				/* cb_dump */
853 	drm_sun_read,			/* cb_read */
854 	nodev,				/* cb_write */
855 	drm_sun_ioctl,			/* cb_ioctl */
856 	drm_sun_devmap,			/* cb_devmap */
857 	nodev,				/* cb_mmap */
858 	NULL,				/* cb_segmap */
859 	drm_sun_chpoll,			/* cb_chpoll */
860 	ddi_prop_op,			/* cb_prop_op */
861 	0,				/* cb_stream */
862 	D_NEW | D_MTSAFE | D_DEVMAP	/* cb_flag */
863 };
864 
865 static struct modlmisc modlmisc = {
866 	&mod_miscops, "DRM common interfaces"
867 };
868 
869 static struct modlinkage modlinkage = {
870 	MODREV_1, { (void *)&modlmisc, NULL }
871 };
872 
873 int
_init(void)874 _init(void)
875 {
876 	int ret;
877 
878 	ret = mod_install(&modlinkage);
879 	if (ret)
880 		return (ret);
881 
882 	return (drm_core_init());
883 }
884 
885 int
_fini(void)886 _fini(void)
887 {
888 	int ret;
889 
890 	ret = mod_remove(&modlinkage);
891 	if (ret)
892 		return (ret);
893 
894 	drm_core_exit();
895 
896 	return (0);
897 }
898 
899 int
_info(struct modinfo * modinfop)900 _info(struct modinfo *modinfop)
901 {
902 	return (mod_info(&modlinkage, modinfop));
903 }
904 
905 struct drm_local_map *
drm_core_findmap(struct drm_device * dev,unsigned int token)906 drm_core_findmap(struct drm_device *dev, unsigned int token)
907 {
908 	struct drm_map_list *_entry;
909 
910 	list_for_each_entry(_entry, struct drm_map_list, &dev->maplist, head) {
911 		if (_entry->user_token == token)
912 			return (_entry->map);
913 	}
914 
915 	return (NULL);
916 }
917