xref: /gfx-drm/usr/src/uts/common/io/drm/drm_gem.c (revision e49fc716)
1 /*
2  * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
3  */
4 
5 /*
6  * Copyright (c) 2009, 2013, Intel Corporation.
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26  * IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Eric Anholt <eric@anholt.net>
30  *
31  */
32 
33 #include "drmP.h"
34 #include <vm/seg_kmem.h>
35 
36 /** @file drm_gem.c
37  *
38  * This file provides some of the base ioctls and library routines for
39  * the graphics memory manager implemented by each device driver.
40  *
41  * Because various devices have different requirements in terms of
42  * synchronization and migration strategies, implementing that is left up to
43  * the driver, and all that the general API provides should be generic --
44  * allocating objects, reading/writing data with the cpu, freeing objects.
45  * Even there, platform-dependent optimizations for reading/writing data with
46  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
47  * the DRI2 implementation wants to have at least allocate/mmap be generic.
48  *
49  * The goal was to have swap-backed object allocation managed through
50  * struct file.  However, file descriptors as handles to a struct file have
51  * two major failings:
52  * - Process limits prevent more than 1024 or so being used at a time by
53  *   default.
54  * - Inability to allocate high fds will aggravate the X Server's select()
55  *   handling, and likely that of many GL client applications as well.
56  *
57  * This led to a plan of using our own integer IDs (called handles, following
58  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
59  * ioctls.  The objects themselves will still include the struct file so
60  * that we can transition to fds if the required kernel infrastructure shows
61  * up at a later date, and as our interface with shmfs for memory allocation.
62  */
63 
64 /*
65  * We make up offsets for buffer objects so we can recognize them at
66  * mmap time.
67  */
68 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
69 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
70 
71 int drm_use_mem_pool = 0;
72 /* memory pool is used for all platforms now */
73 #define	HAS_MEM_POOL(gen)	((gen > 30) && (drm_use_mem_pool))
74 
75 /**
76  * Initialize the GEM device fields
77  */
78 
79 int
drm_gem_init(struct drm_device * dev)80 drm_gem_init(struct drm_device *dev)
81 {
82 
83 	spin_lock_init(&dev->object_name_lock);
84 	idr_list_init(&dev->object_name_idr);
85 
86 	gfxp_mempool_init();
87 
88 	return 0;
89 }
90 
91 void
92 /* LINTED */
drm_gem_destroy(struct drm_device * dev)93 drm_gem_destroy(struct drm_device *dev)
94 {
95 }
96 
97 static void
drm_gem_object_free_internal(struct drm_gem_object * obj,int gen)98 drm_gem_object_free_internal(struct drm_gem_object *obj, int gen)
99 {
100 	if (obj->pfnarray != NULL)
101 		kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
102 	if (HAS_MEM_POOL(gen)) {
103 		gfxp_free_mempool(&obj->mempool_cookie, obj->kaddr, obj->real_size);
104 	} else {
105 		(void) ddi_dma_unbind_handle(obj->dma_hdl);
106 		ddi_dma_mem_free(&obj->acc_hdl);
107 		ddi_dma_free_handle(&obj->dma_hdl);
108 	}
109 	obj->kaddr = NULL;
110 }
111 
112 static ddi_dma_attr_t old_dma_attr = {
113 	DMA_ATTR_V0,
114 	0,				/* dma_attr_addr_lo */
115 	0xffffffffU,			/* dma_attr_addr_hi */
116 	0xffffffffU,			/* dma_attr_count_max */
117 	4096,				/* dma_attr_align */
118 	0x1fffU,			/* dma_attr_burstsizes */
119 	1,				/* dma_attr_minxfer */
120 	0xffffffffU,			/* dma_attr_maxxfer */
121 	0xffffffffU,			/* dma_attr_seg */
122 	1,				/* dma_attr_sgllen, variable */
123 	4,				/* dma_attr_granular */
124 	DDI_DMA_FLAGERR,		/* dma_attr_flags */
125 };
126 
127 static ddi_device_acc_attr_t old_acc_attr = {
128 	DDI_DEVICE_ATTR_V0,
129 	DDI_NEVERSWAP_ACC,
130 	DDI_MERGING_OK_ACC,
131 	DDI_FLAGERR_ACC
132 };
133 
134 static int
drm_gem_object_alloc_internal_normal(struct drm_device * dev,struct drm_gem_object * obj,size_t size,int flag)135 drm_gem_object_alloc_internal_normal(struct drm_device *dev, struct drm_gem_object *obj,
136 				size_t size, int flag)
137 {
138 	ddi_dma_cookie_t cookie;
139 	uint_t cookie_cnt;
140 	pgcnt_t real_pgcnt, pgcnt = btopr(size);
141 	uint64_t paddr, cookie_end;
142 	int i, n;
143 	int (*cb)(caddr_t);
144 	ddi_device_acc_attr_t *acc_attr;
145 	ddi_dma_attr_t* dma_attr;
146 	uint_t mode_flag;
147 
148 	acc_attr = &old_acc_attr;
149 	dma_attr = &old_dma_attr;
150 	mode_flag = IOMEM_DATA_UC_WR_COMBINE;
151 
152 	cb = (flag == 0) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
153 	dma_attr->dma_attr_sgllen = (int)pgcnt;
154 
155 	if (ddi_dma_alloc_handle(dev->devinfo, dma_attr,
156 	    cb, NULL, &obj->dma_hdl)) {
157 		DRM_ERROR("ddi_dma_alloc_handle failed");
158 		goto err1;
159 	}
160 	if (ddi_dma_mem_alloc(obj->dma_hdl, ptob(pgcnt), acc_attr,
161 	    mode_flag, cb, NULL,
162 	    &obj->kaddr, &obj->real_size, &obj->acc_hdl)) {
163 		DRM_ERROR("ddi_dma_mem_alloc failed");
164 		goto err2;
165 	}
166 	if (ddi_dma_addr_bind_handle(obj->dma_hdl, NULL,
167 	    obj->kaddr, obj->real_size, DDI_DMA_RDWR,
168 	    cb, NULL, &cookie, &cookie_cnt)
169 	    != DDI_DMA_MAPPED) {
170 		DRM_ERROR("ddi_dma_addr_bind_handle failed");
171 		goto err3;
172 	}
173 
174 	real_pgcnt = btopr(obj->real_size);
175 
176 	obj->pfnarray = kmem_zalloc(real_pgcnt * sizeof (pfn_t), KM_NOSLEEP);
177 	if (obj->pfnarray == NULL) {
178 		DRM_DEBUG("pfnarray == NULL");
179 		goto err4;
180 	}
181 
182 	for (n = 0, i = 1; ; i++) {
183 		for (paddr = cookie.dmac_laddress,
184 		    cookie_end = cookie.dmac_laddress + cookie.dmac_size;
185 		    paddr < cookie_end;
186 		    paddr += PAGESIZE) {
187 			obj->pfnarray[n++] = btop(paddr);
188 			if (n >= real_pgcnt)
189 				return (0);
190 		}
191 		if (i >= cookie_cnt)
192 			break;
193 		ddi_dma_nextcookie(obj->dma_hdl, &cookie);
194 	}
195 
196 err4:
197 	(void) ddi_dma_unbind_handle(obj->dma_hdl);
198 err3:
199 	ddi_dma_mem_free(&obj->acc_hdl);
200 err2:
201 	ddi_dma_free_handle(&obj->dma_hdl);
202 err1:
203 	return (-1);
204 
205 }
206 
207 /* Alloc GEM object by memory pool */
208 static int
drm_gem_object_alloc_internal_mempool(struct drm_gem_object * obj,size_t size,int flag)209 drm_gem_object_alloc_internal_mempool(struct drm_gem_object *obj,
210 				size_t size, int flag)
211 {
212 	int ret;
213 	pgcnt_t pgcnt = btopr(size);
214 
215 	obj->pfnarray = kmem_zalloc(pgcnt * sizeof (pfn_t), KM_NOSLEEP);
216 	if (obj->pfnarray == NULL) {
217 		DRM_ERROR("Failed to allocate pfnarray ");
218 		return (-1);
219 	}
220 
221 	ret = gfxp_alloc_from_mempool(&obj->mempool_cookie, &obj->kaddr,
222 					obj->pfnarray, pgcnt, flag);
223 	if (ret) {
224 		DRM_ERROR("Failed to alloc pages from memory pool");
225 		kmem_free(obj->pfnarray, pgcnt * sizeof (pfn_t));
226 		return (-1);
227 	}
228 
229 	obj->real_size = size;
230 	return (0);
231 }
232 
233 static int
drm_gem_object_internal(struct drm_device * dev,struct drm_gem_object * obj,size_t size,int gen)234 drm_gem_object_internal(struct drm_device *dev, struct drm_gem_object *obj,
235 			size_t size, int gen)
236 {
237 	pfn_t tmp_pfn;
238 	int ret, num = 0;
239 
240 alloc_again:
241 	if (HAS_MEM_POOL(gen)) {
242 		uint32_t mode;
243 		if (gen >= 60)
244 			mode = GFXP_MEMORY_CACHED;
245 		else
246 			mode = GFXP_MEMORY_WRITECOMBINED;
247 		ret = drm_gem_object_alloc_internal_mempool(obj, size, mode);
248                 if (ret)
249                         return (-1);
250 	} else {
251 		ret = drm_gem_object_alloc_internal_normal(dev, obj, size, 0);
252 		if (ret)
253 			return (-1);
254 	}
255 	tmp_pfn = hat_getpfnum(kas.a_hat, obj->kaddr);
256 	if (tmp_pfn != obj->pfnarray[0]) {
257 		DRM_ERROR("obj %p map incorrect 0x%lx != 0x%lx",
258 		    (void *)obj, tmp_pfn, obj->pfnarray[0]);
259 		drm_gem_object_free_internal(obj, gen);
260 		udelay(150);
261 
262 		if (num++ < 5)
263 			goto alloc_again;
264 		else
265 			return (-1);
266 	}
267 
268 	return (0);
269 }
270 /*
271  * Initialize an already allocate GEM object of the specified size with
272  * shmfs backing store.
273  */
274 int
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size,int gen)275 drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
276 				size_t size, int gen)
277 {
278 	drm_local_map_t *map;
279 	int ret;
280 
281 	if (size == 0) {
282 		DRM_DEBUG("size == 0");
283 		return (-1);
284 	}
285 
286 	obj->dev = dev;
287 	obj->size = size;
288 
289 	ret = drm_gem_object_internal(dev, obj, size, gen);
290 	if (ret)
291 		return (-1);
292 
293 	map = drm_alloc(sizeof (struct drm_local_map), DRM_MEM_MAPS);
294 	if (map == NULL) {
295 		DRM_DEBUG("map == NULL");
296 		goto err5;
297 	}
298 
299 	map->handle = obj;
300 	map->offset = (uintptr_t)map->handle;
301 	map->offset &= 0xffffffffUL;
302 	map->size = obj->real_size;
303 	map->type = _DRM_GEM;
304 	map->callback = 0;
305 	map->flags = _DRM_WRITE_COMBINING | _DRM_REMOVABLE;
306 	map->umem_cookie =
307 	    gfxp_umem_cookie_init(obj->kaddr, obj->real_size);
308 	if (map->umem_cookie == NULL) {
309 		DRM_DEBUG("umem_cookie == NULL");
310 		goto err6;
311 	}
312 
313 	obj->maplist.map = map;
314 	if (drm_map_handle(dev, &obj->maplist)) {
315 		DRM_DEBUG("drm_map_handle failed");
316 		goto err7;
317 	}
318 
319 	kref_init(&obj->refcount);
320 	atomic_set(&obj->handle_count, 0);
321 
322 	if (MDB_TRACK_ENABLE) {
323 		INIT_LIST_HEAD(&obj->track_list);
324 		spin_lock(&dev->track_lock);
325 		list_add_tail(&obj->track_list, &dev->gem_objects_list, (caddr_t)obj);
326 		spin_unlock(&dev->track_lock);
327 
328 		INIT_LIST_HEAD(&obj->his_list);
329 		drm_gem_object_track(obj, "obj init", 0, 0, NULL);
330 	}
331 
332 	INIT_LIST_HEAD(&obj->seg_list);
333 
334 	return (0);
335 
336 err7:
337 	gfxp_umem_cookie_destroy(map->umem_cookie);
338 err6:
339 	drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
340 err5:
341 	drm_gem_object_free_internal(obj, gen);
342 	return (-1);
343 }
344 
345 /**
346  * Initialize an already allocated GEM object of the specified size with
347  * no GEM provided backing store. Instead the caller is responsible for
348  * backing the object and handling it.
349  */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)350 int drm_gem_private_object_init(struct drm_device *dev,
351 			struct drm_gem_object *obj, size_t size)
352 {
353 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
354 
355 	obj->dev = dev;
356 
357 	kref_init(&obj->refcount);
358 	atomic_set(&obj->handle_count, 0);
359 	obj->size = size;
360 
361 	return 0;
362 }
363 
364 /**
365  * Allocate a GEM object of the specified size with shmfs backing store
366  */
367 struct drm_gem_object *
drm_gem_object_alloc(struct drm_device * dev,size_t size)368 drm_gem_object_alloc(struct drm_device *dev, size_t size)
369 {
370 	struct drm_gem_object *obj;
371 
372 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
373 	if (!obj)
374 		goto free;
375 
376 	if (drm_gem_object_init(dev, obj, size, 0) != 0) {
377 		kmem_free(obj, sizeof (struct drm_gem_object));
378 		return NULL;
379 	}
380 
381 	if (dev->driver->gem_init_object != NULL &&
382 	    dev->driver->gem_init_object(obj) != 0) {
383 		goto fput;
384 	}
385 	return obj;
386 fput:
387 	/* Object_init mangles the global counters - readjust them. */
388 	drm_gem_object_release(obj);
389 	kfree(obj, sizeof(*obj));
390 free:
391 	return NULL;
392 }
393 
394 /**
395  * Removes the mapping from handle to filp for this object.
396  */
397 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)398 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
399 {
400 	struct drm_device *dev;
401 	struct drm_gem_object *obj;
402 
403 	/* This is gross. The idr system doesn't let us try a delete and
404 	 * return an error code.  It just spews if you fail at deleting.
405 	 * So, we have to grab a lock around finding the object and then
406 	 * doing the delete on it and dropping the refcount, or the user
407 	 * could race us to double-decrement the refcount and cause a
408 	 * use-after-free later.  Given the frequency of our handle lookups,
409 	 * we may want to use ida for number allocation and a hash table
410 	 * for the pointers, anyway.
411 	 */
412 	spin_lock(&filp->table_lock);
413 
414 	/* Check if we currently have a reference on the object */
415 	obj = idr_list_find(&filp->object_idr, handle);
416 	if (obj == NULL) {
417 		spin_unlock(&filp->table_lock);
418 		return -EINVAL;
419 	}
420 	dev = obj->dev;
421 
422 	/* Release reference and decrement refcount. */
423 	(void) idr_list_remove(&filp->object_idr, handle);
424 	spin_unlock(&filp->table_lock);
425 
426 	if (dev->driver->gem_close_object)
427 		dev->driver->gem_close_object(obj, filp);
428 	drm_gem_object_handle_unreference_unlocked(obj);
429 
430 	return 0;
431 }
432 
433 /**
434  * Create a handle for this object. This adds a handle reference
435  * to the object, which includes a regular reference count. Callers
436  * will likely want to dereference the object afterwards.
437  */
438 int
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)439 drm_gem_handle_create(struct drm_file *file_priv,
440 		       struct drm_gem_object *obj,
441 		       u32 *handlep)
442 {
443 	struct drm_device *dev = obj->dev;
444 	int	ret;
445 
446 	/*
447 	 * Get the user-visible handle using idr.
448 	 */
449 again:
450 	/* ensure there is space available to allocate a handle */
451 	if (idr_list_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
452 		return -ENOMEM;
453 
454 	/* do the allocation under our spinlock */
455 	spin_lock(&file_priv->table_lock);
456 	ret = idr_list_get_new_above(&file_priv->object_idr, (void *)obj, (int *)handlep);
457 	spin_unlock(&file_priv->table_lock);
458 	if (ret == -EAGAIN)
459 		goto again;
460 
461 	if (ret != 0)
462 		return ret;
463 
464 	drm_gem_object_handle_reference(obj);
465 
466 	if (dev->driver->gem_open_object) {
467 		ret = dev->driver->gem_open_object(obj, file_priv);
468 		if (ret) {
469 			(void) drm_gem_handle_delete(file_priv, *handlep);
470 			return ret;
471 		}
472 	}
473 
474 	return 0;
475 }
476 
477 /** Returns a reference to the object named by the handle. */
478 struct drm_gem_object *
479 /* LINTED */
drm_gem_object_lookup(struct drm_device * dev,struct drm_file * filp,u32 handle)480 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
481 		      u32 handle)
482 {
483 	struct drm_gem_object *obj;
484 
485 	spin_lock(&filp->table_lock);
486 
487 	/* Check if we currently have a reference on the object */
488 	obj = idr_list_find(&filp->object_idr, handle);
489 	if (obj == NULL) {
490 		spin_unlock(&filp->table_lock);
491 		return NULL;
492 	}
493 
494 	drm_gem_object_reference(obj);
495 
496 	spin_unlock(&filp->table_lock);
497 
498 	return obj;
499 }
500 
501 /**
502  * Releases the handle to an mm object.
503  */
504 int
505 /* LINTED */
drm_gem_close_ioctl(DRM_IOCTL_ARGS)506 drm_gem_close_ioctl(DRM_IOCTL_ARGS)
507 {
508 	struct drm_gem_close *args = data;
509 	int ret;
510 
511 	if (!(dev->driver->driver_features & DRIVER_GEM))
512 		return -ENODEV;
513 
514 	ret = drm_gem_handle_delete(file, args->handle);
515 
516 	return ret;
517 }
518 
519 /**
520  * Create a global name for an object, returning the name.
521  *
522  * Note that the name does not hold a reference; when the object
523  * is freed, the name goes away.
524  */
525 int
526 /* LINTED */
drm_gem_flink_ioctl(DRM_IOCTL_ARGS)527 drm_gem_flink_ioctl(DRM_IOCTL_ARGS)
528 {
529 	struct drm_gem_flink *args = data;
530 	struct drm_gem_object *obj;
531 	int ret;
532 
533 	if (!(dev->driver->driver_features & DRIVER_GEM))
534 		return -ENODEV;
535 
536 	obj = drm_gem_object_lookup(dev, file, args->handle);
537 	if (obj == NULL)
538 		return -ENOENT;
539 
540 again:
541 	if (idr_list_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
542 		ret = -ENOMEM;
543 		goto err;
544 	}
545 
546 	spin_lock(&dev->object_name_lock);
547 	if (!obj->name) {
548 		ret = idr_list_get_new_above(&dev->object_name_idr, (void *) obj,
549 					&obj->name);
550 		args->name = (uint64_t) obj->name;
551 		spin_unlock(&dev->object_name_lock);
552 
553 		if (ret == -EAGAIN)
554 			goto again;
555 
556 		if (ret != 0)
557 			goto err;
558 
559 		/* Allocate a reference for the name table.  */
560 		drm_gem_object_reference(obj);
561 	} else {
562 		args->name = (uint64_t) obj->name;
563 		spin_unlock(&dev->object_name_lock);
564 		ret = 0;
565 	}
566 
567 err:
568 	drm_gem_object_unreference_unlocked(obj);
569 	return ret;
570 }
571 
572 /**
573  * Open an object using the global name, returning a handle and the size.
574  *
575  * This handle (of course) holds a reference to the object, so the object
576  * will not go away until the handle is deleted.
577  */
578 int
579 /* LINTED */
drm_gem_open_ioctl(DRM_IOCTL_ARGS)580 drm_gem_open_ioctl(DRM_IOCTL_ARGS)
581 {
582 	struct drm_gem_open *args = data;
583 	struct drm_gem_object *obj;
584 	int ret;
585 	u32 handle;
586 
587 	if (!(dev->driver->driver_features & DRIVER_GEM))
588 		return -ENODEV;
589 
590 	spin_lock(&dev->object_name_lock);
591 	obj = idr_list_find(&dev->object_name_idr, (int) args->name);
592 	if (obj)
593 		drm_gem_object_reference(obj);
594 	spin_unlock(&dev->object_name_lock);
595 	if (!obj)
596 		return -ENOENT;
597 
598 	ret = drm_gem_handle_create(file, obj, &handle);
599 	drm_gem_object_unreference_unlocked(obj);
600 	if (ret)
601 		return ret;
602 
603 	args->handle = handle;
604 	args->size = obj->size;
605 
606 	return 0;
607 }
608 
609 /**
610  * Called at device open time, sets up the structure for handling refcounting
611  * of mm objects.
612  */
613 void
614 /* LINTED */
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)615 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
616 {
617 	idr_list_init(&file_private->object_idr);
618 	spin_lock_init(&file_private->table_lock);
619 }
620 
621 /**
622  * Called at device close to release the file's
623  * handle references on objects.
624  */
625 static int
626 /* LINTED */
drm_gem_object_release_handle(int id,void * ptr,void * data)627 drm_gem_object_release_handle(int id, void *ptr, void *data)
628 {
629 	struct drm_file *file_priv = data;
630 	struct drm_gem_object *obj = ptr;
631 	struct drm_device *dev = obj->dev;
632 
633 	if (dev->driver->gem_close_object)
634 		dev->driver->gem_close_object(obj, file_priv);
635 
636 	drm_gem_object_handle_unreference_unlocked(obj);
637 
638 	return 0;
639 }
640 
641 /**
642  * Called at close time when the filp is going away.
643  *
644  * Releases any remaining references on objects by this filp.
645  */
646 void
647 /* LINTED E_FUNC_ARG_UNUSED */
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)648 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
649 {
650 	struct idr_list  *entry;
651 	struct drm_gem_object *obj;
652 
653 	idr_list_for_each(entry, &file_private->object_idr) {
654 		obj = (struct drm_gem_object *)entry->obj;
655 		(void) drm_gem_object_release_handle(obj->name, obj, (void *)file_private);
656 	}
657 	idr_list_free(&file_private->object_idr);
658 }
659 
660 void
drm_gem_object_release(struct drm_gem_object * obj)661 drm_gem_object_release(struct drm_gem_object *obj)
662 {
663 	struct drm_device *dev = obj->dev;
664 	struct drm_local_map *map = obj->maplist.map;
665 
666 	if (MDB_TRACK_ENABLE) {
667 		spin_lock(&dev->track_lock);
668 		list_del(&obj->track_list);
669 		spin_unlock(&dev->track_lock);
670 
671 		struct drm_history_list *r_list, *list_temp;
672 		list_for_each_entry_safe(r_list, list_temp, struct drm_history_list, &obj->his_list, head) {
673 			list_del(&r_list->head);
674 			drm_free(r_list, sizeof (struct drm_history_list), DRM_MEM_MAPS);
675 		}
676 		list_del(&obj->his_list);
677 	}
678 
679 	(void) idr_remove(&dev->map_idr, obj->maplist.user_token >> PAGE_SHIFT);
680 	gfxp_umem_cookie_destroy(map->umem_cookie);
681 	drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
682 
683 	kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
684 
685 	if (obj->dma_hdl == NULL) {
686 		gfxp_free_mempool(&obj->mempool_cookie, obj->kaddr, obj->real_size);
687 	} else {
688 		(void) ddi_dma_unbind_handle(obj->dma_hdl);
689 		ddi_dma_mem_free(&obj->acc_hdl);
690 		ddi_dma_free_handle(&obj->dma_hdl);
691 	}
692 	obj->kaddr = NULL;
693 }
694 
695 /**
696  * Called after the last reference to the object has been lost.
697  *
698  * Frees the object
699  */
700 void
drm_gem_object_free(struct kref * kref)701 drm_gem_object_free(struct kref *kref)
702 {
703 	/* LINTED */
704 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
705 	struct drm_device *dev = obj->dev;
706 
707 //	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
708 
709 	if (dev->driver->gem_free_object != NULL)
710 		dev->driver->gem_free_object(obj);
711 }
712 
713 /* LINTED E_FUNC_ARG_UNUSED */
drm_gem_object_ref_bug(struct kref * list_kref)714 static void drm_gem_object_ref_bug(struct kref *list_kref)
715 {
716 	BUG_ON(1);
717 }
718 
719 /**
720  * Called after the last handle to the object has been closed
721  *
722  * Removes any name for the object. Note that this must be
723  * called before drm_gem_object_free or we'll be touching
724  * freed memory
725  */
726 void
drm_gem_object_handle_free(struct drm_gem_object * obj)727 drm_gem_object_handle_free(struct drm_gem_object *obj)
728 {
729 	struct drm_device *dev = obj->dev;
730 
731 	/* Remove any name for this object */
732 	spin_lock(&dev->object_name_lock);
733 	if (obj->name) {
734 		(void) idr_list_remove(&dev->object_name_idr, obj->name);
735 		obj->name = 0;
736 		spin_unlock(&dev->object_name_lock);
737 		/*
738 		 * The object name held a reference to this object, drop
739 		 * that now.
740 		*
741 		* This cannot be the last reference, since the handle holds one too.
742 		 */
743 		kref_put(&obj->refcount, drm_gem_object_ref_bug);
744 	} else
745 		spin_unlock(&dev->object_name_lock);
746 
747 }
748 
749 int
drm_gem_create_mmap_offset(struct drm_gem_object * obj)750 drm_gem_create_mmap_offset(struct drm_gem_object *obj)
751 {
752 	obj->gtt_map_kaddr = gfxp_alloc_kernel_space(obj->real_size);
753 	if (obj->gtt_map_kaddr == NULL) {
754 		return -ENOMEM;
755 	}
756 	return 0;
757 }
758 
759 void
drm_gem_mmap(struct drm_gem_object * obj,pfn_t pfn)760 drm_gem_mmap(struct drm_gem_object *obj, pfn_t pfn)
761 {
762 	ASSERT(obj->gtt_map_kaddr != NULL);
763 	/* Does hat_devload() */
764 	gfxp_load_kernel_space(pfn, obj->real_size, GFXP_MEMORY_WRITECOMBINED, obj->gtt_map_kaddr);
765 }
766 
767 void
drm_gem_release_mmap(struct drm_gem_object * obj)768 drm_gem_release_mmap(struct drm_gem_object *obj)
769 {
770 	ASSERT(obj->gtt_map_kaddr != NULL);
771 	/* Does hat_unload() */
772 	gfxp_unload_kernel_space(obj->gtt_map_kaddr, obj->real_size);
773 }
774 
775 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)776 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
777 {
778 	struct ddi_umem_cookie *umem_cookie = obj->maplist.map->umem_cookie;
779 	umem_cookie->cvaddr = obj->kaddr;
780 
781 	if (obj->maplist.map->gtt_mmap == 0) {
782 		gfxp_free_kernel_space(obj->gtt_map_kaddr, obj->real_size);
783 		DRM_DEBUG("already freed, don't free more than once!");
784 	}
785 
786 	if (obj->maplist.map->gtt_mmap == 1) {
787 		gfxp_unmap_kernel_space(obj->gtt_map_kaddr, obj->real_size);
788 		obj->maplist.map->gtt_mmap = 0;
789 	}
790 
791 	obj->gtt_map_kaddr = NULL;
792 }
793 
794 void
drm_gem_object_track(struct drm_gem_object * obj,const char * name,uint32_t cur_seq,uint32_t last_seq,void * ptr)795 drm_gem_object_track(struct drm_gem_object *obj, const char *name,
796                         uint32_t cur_seq, uint32_t last_seq, void* ptr)
797 {
798 	struct drm_history_list *list;
799 	list = drm_alloc(sizeof (struct drm_history_list), DRM_MEM_MAPS);
800 	if (list != NULL) {
801 		(void) memcpy(list->info, name, (strlen(name) * sizeof(char)));
802 		list->cur_seq = cur_seq;
803 		list->last_seq = last_seq;
804 		list->ring_ptr = ptr;
805 		list_add_tail(&list->head, &obj->his_list, (caddr_t)list);
806 	}
807 }
808