1 /*
2 * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
3 */
4
5 /**
6 * \file drm_bufs.c
7 * Generic buffer template
8 *
9 * \author Rickard E. (Rik) Faith <faith@valinux.com>
10 * \author Gareth Hughes <gareth@valinux.com>
11 */
12
13 /*
14 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
15 *
16 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
17 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
18 * Copyright (c) 2009, 2012, Intel Corporation.
19 * All Rights Reserved.
20 *
21 * Permission is hereby granted, free of charge, to any person obtaining a
22 * copy of this software and associated documentation files (the "Software"),
23 * to deal in the Software without restriction, including without limitation
24 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
25 * and/or sell copies of the Software, and to permit persons to whom the
26 * Software is furnished to do so, subject to the following conditions:
27 *
28 * The above copyright notice and this permission notice (including the next
29 * paragraph) shall be included in all copies or substantial portions of the
30 * Software.
31 *
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
35 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
36 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
37 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
38 * OTHER DEALINGS IN THE SOFTWARE.
39 */
40
41 #include "drmP.h"
42 #include "drm_io32.h"
43
44 #ifdef _LP64
45 extern caddr_t smmap64(caddr_t, size_t, int, int, int, off_t);
46 #define drm_smmap smmap64
47 #elif defined(_SYSCALL32_IMPL) || defined(_ILP32)
48 extern caddr_t smmap32(caddr32_t, size32_t, int, int, int, off32_t);
49 #define drm_smmap smmap32
50 #else
51 #error "No define for _LP64, _SYSCALL32_IMPL or _ILP32"
52 #endif
53
54 #define PAGE_MASK (~(PAGE_SIZE - 1))
55 #define round_page(x) (((x) + (PAGE_SIZE - 1)) & PAGE_MASK)
56
drm_find_matching_map(struct drm_device * dev,struct drm_local_map * map)57 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
58 struct drm_local_map *map)
59 {
60 struct drm_map_list *entry;
61 list_for_each_entry(entry, struct drm_map_list, &dev->maplist, head) {
62 /*
63 * Because the kernel-userspace ABI is fixed at a 32-bit offset
64 * while PCI resources may live above that, we ignore the map
65 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
66 * It is assumed that each driver will have only one resource of
67 * each type.
68 */
69 if (!entry->map ||
70 map->type != entry->map->type ||
71 entry->master != dev->primary->master)
72 continue;
73 switch (map->type) {
74 case _DRM_SHM:
75 if (map->flags != _DRM_CONTAINS_LOCK)
76 break;
77 return entry;
78 case _DRM_REGISTERS:
79 case _DRM_FRAME_BUFFER:
80 if ((entry->map->offset & 0xffffffff) ==
81 (map->offset & 0xffffffff))
82 return entry;
83 default: /* Make gcc happy */
84 ;
85 }
86 if (entry->map->offset == map->offset)
87 return entry;
88 }
89
90 return NULL;
91 }
92
drm_map_handle(struct drm_device * dev,struct drm_map_list * list)93 int drm_map_handle(struct drm_device *dev, struct drm_map_list *list)
94 {
95 int newid, ret;
96
97 ret = idr_get_new_above(&dev->map_idr, list, 1, &newid);
98 if (ret < 0)
99 return ret;
100
101 list->user_token = newid << PAGE_SHIFT;
102 return 0;
103 }
104
105 /**
106 * Core function to create a range of memory available for mapping by a
107 * non-root process.
108 *
109 * Adjusts the memory offset to its absolute value according to the mapping
110 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
111 * applicable and if supported by the kernel.
112 */
drm_addmap_core(struct drm_device * dev,unsigned long offset,unsigned long size,enum drm_map_type type,enum drm_map_flags flags,struct drm_map_list ** maplist)113 static int drm_addmap_core(struct drm_device *dev, unsigned long offset,
114 unsigned long size, enum drm_map_type type,
115 enum drm_map_flags flags,
116 struct drm_map_list ** maplist)
117 {
118 struct drm_local_map *map;
119 struct drm_map_list *list;
120 /* LINTED */
121 unsigned long user_token;
122 int ret;
123
124 map = kmalloc(sizeof(*map), GFP_KERNEL);
125 if (!map)
126 return -ENOMEM;
127
128 map->offset = offset;
129 map->size = size;
130 map->flags = flags;
131 map->type = type;
132
133 /* Only allow shared memory to be removable since we only keep enough
134 * book keeping information about shared memory to allow for removal
135 * when processes fork.
136 */
137 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
138 kfree(map, sizeof(*map));
139 return -EINVAL;
140 }
141 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
142 (unsigned long long)map->offset, map->size, map->type);
143
144 /* page-align _DRM_SHM maps. They are allocated here so there is no security
145 * hole created by that and it works around various broken drivers that use
146 * a non-aligned quantity to map the SAREA. --BenH
147 */
148 if (map->type == _DRM_SHM)
149 map->size = PAGE_ALIGN(map->size);
150
151 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
152 kfree(map, sizeof(*map));
153 return -EINVAL;
154 }
155
156 if (map->offset + map->size < map->offset) {
157 kfree(map, sizeof(*map));
158 return -EINVAL;
159 }
160
161 map->mtrr = -1;
162 map->handle = NULL;
163
164 switch (map->type) {
165 case _DRM_REGISTERS:
166 case _DRM_FRAME_BUFFER:
167 /* Some drivers preinitialize some maps, without the X Server
168 * needing to be aware of it. Therefore, we just return success
169 * when the server tries to create a duplicate map.
170 */
171 list = drm_find_matching_map(dev, map);
172 if (list != NULL) {
173 if (list->map->size != map->size) {
174 DRM_DEBUG("Matching maps of type %d with "
175 "mismatched sizes, (%ld vs %ld)\n",
176 map->type, map->size,
177 list->map->size);
178 list->map->size = map->size;
179 }
180
181 kfree(map, sizeof(struct drm_local_map));
182 *maplist = list;
183 return 0;
184 }
185
186 if (map->type == _DRM_REGISTERS) {
187 map->handle = ioremap(map->offset, map->size);
188 if (!map->handle) {
189 kfree(map, sizeof(struct drm_local_map));
190 return -ENOMEM;
191 }
192 }
193
194 break;
195 case _DRM_SHM:
196 list = drm_find_matching_map(dev, map);
197 if (list != NULL) {
198 if(list->map->size != map->size) {
199 DRM_DEBUG("Matching maps of type %d with "
200 "mismatched sizes, (%ld vs %ld)\n",
201 map->type, map->size, list->map->size);
202 list->map->size = map->size;
203 }
204
205 kfree(map, sizeof(struct drm_local_map));
206 *maplist = list;
207 return 0;
208 }
209 map->handle = ddi_umem_alloc(map->size, DDI_UMEM_NOSLEEP, &map->umem_cookie);
210 DRM_DEBUG("%lu %p\n",
211 map->size, map->handle);
212 if (!map->handle) {
213 kfree(map, sizeof(struct drm_local_map));
214 return -ENOMEM;
215 }
216 map->offset = (uintptr_t)map->handle;
217 if (map->flags & _DRM_CONTAINS_LOCK) {
218 /* Prevent a 2nd X Server from creating a 2nd lock */
219 if (dev->primary->master->lock.hw_lock != NULL) {
220 ddi_umem_free(map->umem_cookie);
221 kfree(map, sizeof(struct drm_local_map));
222 return -EBUSY;
223 }
224 dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
225 }
226 break;
227 case _DRM_AGP: {
228 caddr_t kvaddr;
229
230 if (!drm_core_has_AGP(dev)) {
231 kfree(map, sizeof(struct drm_local_map));
232 return -EINVAL;
233 }
234
235 map->offset += dev->agp->base;
236 kvaddr = gfxp_alloc_kernel_space(map->size);
237 if (!kvaddr) {
238 DRM_ERROR("failed to alloc AGP aperture");
239 kfree(map, sizeof(struct drm_local_map));
240 return -EPERM;
241 }
242 gfxp_load_kernel_space(map->offset, map->size,
243 GFXP_MEMORY_WRITECOMBINED, kvaddr);
244 map->handle = (void *)(uintptr_t)kvaddr;
245 map->umem_cookie = gfxp_umem_cookie_init(map->handle, map->size);
246 if (!map->umem_cookie) {
247 DRM_ERROR("gfxp_umem_cookie_init() failed");
248 gfxp_unmap_kernel_space(map->handle, map->size);
249 kfree(map, sizeof(struct drm_local_map));
250 return (-ENOMEM);
251 }
252 break;
253 }
254 case _DRM_GEM:
255 DRM_ERROR("tried to addmap GEM object\n");
256 break;
257 case _DRM_SCATTER_GATHER:
258 if (!dev->sg) {
259 kfree(map, sizeof(struct drm_local_map));
260 return -EINVAL;
261 }
262 map->offset += (uintptr_t)dev->sg->virtual;
263 map->handle = (void *)map->offset;
264 map->umem_cookie = gfxp_umem_cookie_init(map->handle, map->size);
265 if (!map->umem_cookie) {
266 DRM_ERROR("gfxp_umem_cookie_init() failed");
267 kfree(map, sizeof(struct drm_local_map));
268 return (-ENOMEM);
269 }
270 break;
271 case _DRM_CONSISTENT:
272 DRM_ERROR("%d DRM_AGP_CONSISTENT", __LINE__);
273 kfree(map, sizeof(struct drm_local_map));
274 return -ENOTSUP;
275 default:
276 kfree(map, sizeof(struct drm_local_map));
277 return -EINVAL;
278 }
279
280 list = kmalloc(sizeof(*list), GFP_KERNEL);
281 if (!list) {
282 if (map->type == _DRM_REGISTERS)
283 iounmap(map->handle);
284 kfree(map, sizeof(struct drm_local_map));
285 return -EINVAL;
286 }
287 (void) memset(list, 0, sizeof(*list));
288 list->map = map;
289
290 mutex_lock(&dev->struct_mutex);
291 list_add(&list->head, &dev->maplist, (caddr_t)list);
292
293 /* Assign a 32-bit handle */
294 /* We do it here so that dev->struct_mutex protects the increment */
295 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
296 map->offset;
297 ret = drm_map_handle(dev, list);
298 if (ret) {
299 if (map->type == _DRM_REGISTERS)
300 iounmap(map->handle);
301 kfree(map, sizeof(struct drm_local_map));
302 kfree(list, sizeof(struct drm_map_list));
303 mutex_unlock(&dev->struct_mutex);
304 return ret;
305 }
306
307 mutex_unlock(&dev->struct_mutex);
308
309 if (!(map->flags & _DRM_DRIVER))
310 list->master = dev->primary->master;
311 *maplist = list;
312 return 0;
313 }
314
drm_addmap(struct drm_device * dev,unsigned long offset,unsigned long size,enum drm_map_type type,enum drm_map_flags flags,struct drm_local_map ** map_ptr)315 int drm_addmap(struct drm_device *dev, unsigned long offset,
316 unsigned long size, enum drm_map_type type,
317 enum drm_map_flags flags, struct drm_local_map ** map_ptr)
318 {
319 struct drm_map_list *list;
320 int rc;
321
322 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
323 if (!rc)
324 *map_ptr = list->map;
325 return rc;
326 }
327
328 /**
329 * Ioctl to specify a range of memory that is available for mapping by a
330 * non-root process.
331 *
332 * \param inode device inode.
333 * \param file_priv DRM file private.
334 * \param cmd command.
335 * \param arg pointer to a drm_map structure.
336 * \return zero on success or a negative value on error.
337 *
338 */
339 /* LINTED */
drm_addmap_ioctl(DRM_IOCTL_ARGS)340 int drm_addmap_ioctl(DRM_IOCTL_ARGS)
341 {
342 struct drm_map *map = data;
343 struct drm_map_list *maplist;
344 int err;
345
346 if (!(DRM_SUSER(credp) || map->type == _DRM_AGP || map->type == _DRM_SHM))
347 return -EPERM;
348
349 err = drm_addmap_core(dev, map->offset, map->size, map->type,
350 map->flags, &maplist);
351
352 if (err)
353 return err;
354
355 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
356 map->handle = maplist->user_token;
357 return 0;
358 }
359
360 /**
361 * Remove a map private from list and deallocate resources if the mapping
362 * isn't in use.
363 *
364 * Searches the map on drm_device::maplist, removes it from the list, see if
365 * its being used, and free any associate resource (such as MTRR's) if it's not
366 * being on use.
367 *
368 * \sa drm_addmap
369 */
drm_rmmap_locked(struct drm_device * dev,struct drm_local_map * map)370 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
371 {
372 struct drm_map_list *r_list = NULL, *list_t;
373 /* LINTED */
374 drm_dma_handle_t dmah;
375 int found = 0;
376 /* LINTED */
377 struct drm_master *master;
378
379 /* Find the list entry for the map and remove it */
380 list_for_each_entry_safe(r_list, list_t, struct drm_map_list, &dev->maplist, head) {
381 if (r_list->map == map) {
382 master = r_list->master;
383 list_del(&r_list->head);
384 (void) idr_remove(&dev->map_idr,
385 r_list->user_token >> PAGE_SHIFT);
386 kfree(r_list, sizeof(struct drm_map_list));
387 found = 1;
388 break;
389 }
390 }
391
392 if (!found)
393 return -EINVAL;
394
395 switch (map->type) {
396 case _DRM_REGISTERS:
397 iounmap(map->handle);
398 /* FALLTHROUGH */
399 case _DRM_FRAME_BUFFER:
400 break;
401 case _DRM_SHM:
402 ddi_umem_free(map->umem_cookie);
403 break;
404 case _DRM_AGP:
405 gfxp_umem_cookie_destroy(map->umem_cookie);
406 gfxp_unmap_kernel_space(map->handle, map->size);
407 break;
408 case _DRM_SCATTER_GATHER:
409 gfxp_umem_cookie_destroy(map->umem_cookie);
410 break;
411 case _DRM_CONSISTENT:
412 break;
413 case _DRM_GEM:
414 DRM_ERROR("tried to rmmap GEM object\n");
415 break;
416 }
417 kfree(map, sizeof(struct drm_local_map));
418
419 return 0;
420 }
421
drm_rmmap(struct drm_device * dev,struct drm_local_map * map)422 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
423 {
424 int ret;
425
426 mutex_lock(&dev->struct_mutex);
427 ret = drm_rmmap_locked(dev, map);
428 mutex_unlock(&dev->struct_mutex);
429
430 return ret;
431 }
432
433 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
434 * the last close of the device, and this is necessary for cleanup when things
435 * exit uncleanly. Therefore, having userland manually remove mappings seems
436 * like a pointless exercise since they're going away anyway.
437 *
438 * One use case might be after addmap is allowed for normal users for SHM and
439 * gets used by drivers that the server doesn't need to care about. This seems
440 * unlikely.
441 *
442 * \param inode device inode.
443 * \param file_priv DRM file private.
444 * \param cmd command.
445 * \param arg pointer to a struct drm_map structure.
446 * \return zero on success or a negative value on error.
447 */
448 /* LINTED */
drm_rmmap_ioctl(DRM_IOCTL_ARGS)449 int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
450 {
451 struct drm_map *request = data;
452 struct drm_local_map *map = NULL;
453 struct drm_map_list *r_list;
454 int ret;
455
456 mutex_lock(&dev->struct_mutex);
457 list_for_each_entry(r_list, struct drm_map_list, &dev->maplist, head) {
458 if (r_list->map &&
459 r_list->user_token == (unsigned long)request->handle &&
460 r_list->map->flags & _DRM_REMOVABLE) {
461 map = r_list->map;
462 break;
463 }
464 }
465
466 /* List has wrapped around to the head pointer, or its empty we didn't
467 * find anything.
468 */
469 if (list_empty(&dev->maplist) || !map) {
470 mutex_unlock(&dev->struct_mutex);
471 return -EINVAL;
472 }
473
474 /* Register and framebuffer maps are permanent */
475 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
476 mutex_unlock(&dev->struct_mutex);
477 return 0;
478 }
479
480 ret = drm_rmmap_locked(dev, map);
481
482 mutex_unlock(&dev->struct_mutex);
483
484 return ret;
485 }
486
487 /**
488 * Cleanup after an error on one of the addbufs() functions.
489 *
490 * \param dev DRM device.
491 * \param entry buffer entry where the error occurred.
492 *
493 * Frees any pages and buffers associated with the given entry.
494 */
495 /* LINTED */
drm_cleanup_buf_error(struct drm_device * dev,struct drm_buf_entry * entry)496 static void drm_cleanup_buf_error(struct drm_device * dev,
497 struct drm_buf_entry * entry)
498 {
499 int i;
500
501 if (entry->seg_count) {
502 for (i = 0; i < entry->seg_count; i++) {
503 if (entry->seglist[i]) {
504 DRM_ERROR(
505 "drm_cleanup_buf_error: not implemented");
506 }
507 }
508 kfree(entry->seglist, entry->seg_count * sizeof (*entry->seglist));
509
510 entry->seg_count = 0;
511 }
512
513 if (entry->buf_count) {
514 for (i = 0; i < entry->buf_count; i++) {
515 if (entry->buflist[i].dev_private) {
516 kfree(entry->buflist[i].dev_private,
517 entry->buflist[i].dev_priv_size);
518 }
519 }
520 kfree(entry->buflist, entry->buf_count * sizeof (*entry->buflist));
521
522 entry->buf_count = 0;
523 }
524 }
525
526 /**
527 * Add AGP buffers for DMA transfers.
528 *
529 * \param dev struct drm_device to which the buffers are to be added.
530 * \param request pointer to a struct drm_buf_desc describing the request.
531 * \return zero on success or a negative number on failure.
532 *
533 * After some sanity checks creates a drm_buf structure for each buffer and
534 * reallocates the buffer list of the same size order to accommodate the new
535 * buffers.
536 */
537 /* LINTED */
drm_addbufs_agp(struct drm_device * dev,struct drm_buf_desc * request,cred_t * credp)538 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request, cred_t *credp)
539 {
540 struct drm_device_dma *dma = dev->dma;
541 struct drm_buf_entry *entry;
542 struct drm_buf *buf;
543 unsigned long offset;
544 unsigned long agp_offset;
545 int count;
546 int order;
547 int size;
548 int alignment;
549 int page_order;
550 int total;
551 int byte_count;
552 int i;
553 struct drm_buf **temp_buflist;
554
555 if (!dma)
556 return -EINVAL;
557
558 count = request->count;
559 order = drm_order(request->size);
560 size = 1 << order;
561
562 alignment = (request->flags & _DRM_PAGE_ALIGN)
563 ? round_page(size) : size;
564 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
565 total = PAGE_SIZE << page_order;
566
567 byte_count = 0;
568 agp_offset = dev->agp->base + request->agp_start;
569
570 DRM_DEBUG("count: %d\n", count);
571 DRM_DEBUG("order: %d\n", order);
572 DRM_DEBUG("size: %d\n", size);
573 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
574 DRM_DEBUG("alignment: %d\n", alignment);
575 DRM_DEBUG("page_order: %d\n", page_order);
576 DRM_DEBUG("total: %d\n", total);
577
578 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
579 return -EINVAL;
580
581 spin_lock(&dev->count_lock);
582 if (dev->buf_use) {
583 spin_unlock(&dev->count_lock);
584 return -EBUSY;
585 }
586 atomic_inc(&dev->buf_alloc);
587 spin_unlock(&dev->count_lock);
588
589 mutex_lock(&dev->struct_mutex);
590 entry = &dma->bufs[order];
591 if (entry->buf_count) {
592 mutex_unlock(&dev->struct_mutex);
593 atomic_dec(&dev->buf_alloc);
594 return -ENOMEM; /* May only call once for each order */
595 }
596
597 if (count < 0 || count > 4096) {
598 mutex_unlock(&dev->struct_mutex);
599 atomic_dec(&dev->buf_alloc);
600 return -EINVAL;
601 }
602
603 entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
604 if (!entry->buflist) {
605 mutex_unlock(&dev->struct_mutex);
606 atomic_dec(&dev->buf_alloc);
607 return -ENOMEM;
608 }
609 (void) memset(entry->buflist, 0, count * sizeof(*entry->buflist));
610
611 entry->buf_size = size;
612 entry->page_order = page_order;
613
614 offset = 0;
615
616 while (entry->buf_count < count) {
617 buf = &entry->buflist[entry->buf_count];
618 buf->idx = dma->buf_count + entry->buf_count;
619 buf->total = alignment;
620 buf->order = order;
621 buf->used = 0;
622
623 buf->offset = (dma->byte_count + offset);
624 buf->bus_address = agp_offset + offset;
625 buf->address = (void *)(agp_offset + offset);
626 buf->next = NULL;
627 buf->pending = 0;
628 buf->file_priv = NULL;
629
630 buf->dev_priv_size = dev->driver->buf_priv_size;
631 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
632 if (!buf->dev_private) {
633 /* Set count correctly so we free the proper amount. */
634 entry->buf_count = count;
635 drm_cleanup_buf_error(dev, entry);
636 mutex_unlock(&dev->struct_mutex);
637 atomic_dec(&dev->buf_alloc);
638 return -ENOMEM;
639 }
640 (void) memset(buf->dev_private, 0, buf->dev_priv_size);
641
642 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
643
644 offset += alignment;
645 entry->buf_count++;
646 byte_count += PAGE_SIZE << page_order;
647 }
648
649 DRM_DEBUG("byte_count: %d\n", byte_count);
650
651 temp_buflist = kmalloc(
652 (dma->buf_count + entry->buf_count) *
653 sizeof(*dma->buflist), GFP_KERNEL);
654 if (!temp_buflist) {
655 /* Free the entry because it isn't valid */
656 drm_cleanup_buf_error(dev, entry);
657 mutex_unlock(&dev->struct_mutex);
658 atomic_dec(&dev->buf_alloc);
659 return -ENOMEM;
660 }
661 bcopy(temp_buflist, dma->buflist,
662 dma->buf_count * sizeof (*dma->buflist));
663 kmem_free(dma->buflist, dma->buf_count *sizeof (*dma->buflist));
664 dma->buflist = temp_buflist;
665
666 for (i = 0; i < entry->buf_count; i++) {
667 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
668 }
669
670 dma->buf_count += entry->buf_count;
671 dma->seg_count += entry->seg_count;
672 dma->page_count += byte_count >> PAGE_SHIFT;
673 dma->byte_count += byte_count;
674
675 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
676 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
677
678 mutex_unlock(&dev->struct_mutex);
679
680 request->count = entry->buf_count;
681 request->size = size;
682
683 dma->flags = _DRM_DMA_USE_AGP;
684
685 atomic_dec(&dev->buf_alloc);
686 return 0;
687 }
688
drm_addbufs_sg(struct drm_device * dev,struct drm_buf_desc * request,cred_t * credp)689 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request, cred_t *credp)
690 {
691 struct drm_device_dma *dma = dev->dma;
692 struct drm_buf_entry *entry;
693 struct drm_buf *buf;
694 unsigned long offset;
695 unsigned long agp_offset;
696 int count;
697 int order;
698 int size;
699 int alignment;
700 int page_order;
701 int total;
702 int byte_count;
703 int i;
704 struct drm_buf **temp_buflist;
705
706 if (!drm_core_check_feature(dev, DRIVER_SG))
707 return -EINVAL;
708
709 if (!dma)
710 return -EINVAL;
711
712 if (!DRM_SUSER(credp))
713 return -EPERM;
714
715 count = request->count;
716 order = drm_order(request->size);
717 size = 1 << order;
718
719 alignment = (request->flags & _DRM_PAGE_ALIGN)
720 ? round_page(size) : size;
721 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
722 total = PAGE_SIZE << page_order;
723
724 byte_count = 0;
725 agp_offset = request->agp_start;
726
727 DRM_DEBUG("count: %d\n", count);
728 DRM_DEBUG("order: %d\n", order);
729 DRM_DEBUG("size: %d\n", size);
730 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
731 DRM_DEBUG("alignment: %d\n", alignment);
732 DRM_DEBUG("page_order: %d\n", page_order);
733 DRM_DEBUG("total: %d\n", total);
734
735 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
736 return -EINVAL;
737
738 spin_lock(&dev->count_lock);
739 if (dev->buf_use) {
740 spin_unlock(&dev->count_lock);
741 return -EBUSY;
742 }
743 atomic_inc(&dev->buf_alloc);
744 spin_unlock(&dev->count_lock);
745
746 mutex_lock(&dev->struct_mutex);
747 entry = &dma->bufs[order];
748 if (entry->buf_count) {
749 mutex_unlock(&dev->struct_mutex);
750 atomic_dec(&dev->buf_alloc);
751 return -ENOMEM; /* May only call once for each order */
752 }
753
754 if (count < 0 || count > 4096) {
755 mutex_unlock(&dev->struct_mutex);
756 atomic_dec(&dev->buf_alloc);
757 return -EINVAL;
758 }
759
760 entry->buflist = kmalloc(count * sizeof(*entry->buflist),
761 GFP_KERNEL);
762 if (!entry->buflist) {
763 mutex_unlock(&dev->struct_mutex);
764 atomic_dec(&dev->buf_alloc);
765 return -ENOMEM;
766 }
767 (void) memset(entry->buflist, 0, count * sizeof(*entry->buflist));
768
769 entry->buf_size = size;
770 entry->page_order = page_order;
771
772 offset = 0;
773
774 while (entry->buf_count < count) {
775 buf = &entry->buflist[entry->buf_count];
776 buf->idx = dma->buf_count + entry->buf_count;
777 buf->total = alignment;
778 buf->order = order;
779 buf->used = 0;
780
781 buf->offset = (dma->byte_count + offset);
782 buf->bus_address = agp_offset + offset;
783 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
784 buf->next = NULL;
785 buf->pending = 0;
786 buf->file_priv = NULL;
787
788 buf->dev_priv_size = dev->driver->buf_priv_size;
789 buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
790 if (!buf->dev_private) {
791 /* Set count correctly so we free the proper amount. */
792 entry->buf_count = count;
793 drm_cleanup_buf_error(dev, entry);
794 mutex_unlock(&dev->struct_mutex);
795 atomic_dec(&dev->buf_alloc);
796 return -ENOMEM;
797 }
798
799 (void) memset(buf->dev_private, 0, buf->dev_priv_size);
800
801 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
802
803 offset += alignment;
804 entry->buf_count++;
805 byte_count += PAGE_SIZE << page_order;
806 }
807
808 DRM_DEBUG("byte_count: %d\n", byte_count);
809
810 temp_buflist = drm_realloc(dma->buflist,
811 dma->buf_count * sizeof (*dma->buflist),
812 (dma->buf_count + entry->buf_count)
813 * sizeof (*dma->buflist), DRM_MEM_BUFS);
814 if (!temp_buflist) {
815 /* Free the entry because it isn't valid */
816 drm_cleanup_buf_error(dev, entry);
817 mutex_unlock(&dev->struct_mutex);
818 atomic_dec(&dev->buf_alloc);
819 return -ENOMEM;
820 }
821 dma->buflist = temp_buflist;
822
823 for (i = 0; i < entry->buf_count; i++) {
824 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
825 }
826
827 dma->buf_count += entry->buf_count;
828 dma->byte_count += byte_count;
829
830 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
831 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
832
833 mutex_unlock(&dev->struct_mutex);
834
835 request->count = entry->buf_count;
836 request->size = size;
837
838 dma->flags = _DRM_DMA_USE_SG;
839
840 atomic_dec(&dev->buf_alloc);
841 return 0;
842 }
843
844 /**
845 * Add buffers for DMA transfers (ioctl).
846 *
847 * \param inode device inode.
848 * \param file_priv DRM file private.
849 * \param cmd command.
850 * \param arg pointer to a struct drm_buf_desc request.
851 * \return zero on success or a negative number on failure.
852 *
853 * According with the memory type specified in drm_buf_desc::flags and the
854 * build options, it dispatches the call either to addbufs_agp(),
855 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
856 * PCI memory respectively.
857 */
858 /* LINTED */
drm_addbufs(DRM_IOCTL_ARGS)859 int drm_addbufs(DRM_IOCTL_ARGS)
860 {
861 struct drm_buf_desc *request = data;
862 int ret = -EINVAL;
863
864 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
865 return -EINVAL;
866
867 if (request->flags & _DRM_AGP_BUFFER)
868 ret = drm_addbufs_agp(dev, request, credp);
869 else
870 if (request->flags & _DRM_SG_BUFFER)
871 ret = drm_addbufs_sg(dev, request, credp);
872
873 return ret;
874 }
875
876 /**
877 * Get information about the buffer mappings.
878 *
879 * This was originally mean for debugging purposes, or by a sophisticated
880 * client library to determine how best to use the available buffers (e.g.,
881 * large buffers can be used for image transfer).
882 *
883 * \param inode device inode.
884 * \param file_priv DRM file private.
885 * \param cmd command.
886 * \param arg pointer to a drm_buf_info structure.
887 * \return zero on success or a negative number on failure.
888 *
889 * Increments drm_device::buf_use while holding the drm_device::count_lock
890 * lock, preventing of allocating more buffers after this call. Information
891 * about each requested buffer is then copied into user space.
892 */
893 /* LINTED */
drm_infobufs(DRM_IOCTL_ARGS)894 int drm_infobufs(DRM_IOCTL_ARGS)
895 {
896 struct drm_device_dma *dma = dev->dma;
897 struct drm_buf_info *request = data;
898 int i;
899 int count;
900
901 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
902 return -EINVAL;
903
904 if (!dma)
905 return -EINVAL;
906
907 spin_lock(&dev->count_lock);
908 if (atomic_read(&dev->buf_alloc)) {
909 spin_unlock(&dev->count_lock);
910 return -EBUSY;
911 }
912 ++dev->buf_use; /* Can't allocate more after this call */
913 spin_unlock(&dev->count_lock);
914
915 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
916 if (dma->bufs[i].buf_count)
917 ++count;
918 }
919
920 DRM_DEBUG("count = %d\n", count);
921
922 if (request->count >= count) {
923 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
924 if (dma->bufs[i].buf_count) {
925 struct drm_buf_desc __user *to =
926 &request->list[count];
927 struct drm_buf_entry *from = &dma->bufs[i];
928 struct drm_freelist *list = &dma->bufs[i].freelist;
929 if (copy_to_user(&to->count,
930 &from->buf_count,
931 sizeof(from->buf_count)) ||
932 copy_to_user(&to->size,
933 &from->buf_size,
934 sizeof(from->buf_size)) ||
935 copy_to_user(&to->low_mark,
936 &list->low_mark,
937 sizeof(list->low_mark)) ||
938 copy_to_user(&to->high_mark,
939 &list->high_mark,
940 sizeof(list->high_mark)))
941 return -EFAULT;
942
943 DRM_DEBUG("%d %d %d %d %d\n",
944 i,
945 dma->bufs[i].buf_count,
946 dma->bufs[i].buf_size,
947 dma->bufs[i].freelist.low_mark,
948 dma->bufs[i].freelist.high_mark);
949 ++count;
950 }
951 }
952 }
953 request->count = count;
954
955 return 0;
956 }
957
958 /**
959 * Specifies a low and high water mark for buffer allocation
960 *
961 * \param inode device inode.
962 * \param file_priv DRM file private.
963 * \param cmd command.
964 * \param arg a pointer to a drm_buf_desc structure.
965 * \return zero on success or a negative number on failure.
966 *
967 * Verifies that the size order is bounded between the admissible orders and
968 * updates the respective drm_device_dma::bufs entry low and high water mark.
969 *
970 * \note This ioctl is deprecated and mostly never used.
971 */
972 /* LINTED */
drm_markbufs(DRM_IOCTL_ARGS)973 int drm_markbufs(DRM_IOCTL_ARGS)
974 {
975 struct drm_device_dma *dma = dev->dma;
976 struct drm_buf_desc *request = data;
977 int order;
978 struct drm_buf_entry *entry;
979
980 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
981 return -EINVAL;
982
983 if (!dma)
984 return -EINVAL;
985
986 DRM_DEBUG("%d, %d, %d\n",
987 request->size, request->low_mark, request->high_mark);
988 order = drm_order(request->size);
989 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
990 return -EINVAL;
991 entry = &dma->bufs[order];
992
993 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
994 return -EINVAL;
995 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
996 return -EINVAL;
997
998 entry->freelist.low_mark = request->low_mark;
999 entry->freelist.high_mark = request->high_mark;
1000
1001 return 0;
1002 }
1003
1004 /**
1005 * Unreserve the buffers in list, previously reserved using drmDMA.
1006 *
1007 * \param inode device inode.
1008 * \param file_priv DRM file private.
1009 * \param cmd command.
1010 * \param arg pointer to a drm_buf_free structure.
1011 * \return zero on success or a negative number on failure.
1012 *
1013 * Calls free_buffer() for each used buffer.
1014 * This function is primarily used for debugging.
1015 */
1016 /* LINTED */
drm_freebufs(DRM_IOCTL_ARGS)1017 int drm_freebufs(DRM_IOCTL_ARGS)
1018 {
1019 struct drm_device_dma *dma = dev->dma;
1020 struct drm_buf_free *request = data;
1021 int i;
1022 int idx;
1023 struct drm_buf *buf;
1024
1025 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1026 return -EINVAL;
1027
1028 if (!dma)
1029 return -EINVAL;
1030
1031 DRM_DEBUG("%d\n", request->count);
1032 for (i = 0; i < request->count; i++) {
1033 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof (idx)))
1034 return -EFAULT;
1035 if (idx < 0 || idx >= dma->buf_count) {
1036 DRM_ERROR("Index %d (of %d max)\n",
1037 idx, dma->buf_count - 1);
1038 return -EINVAL;
1039 }
1040 buf = dma->buflist[idx];
1041 if (buf->file_priv != file) {
1042 DRM_ERROR("Process %d freeing buffer not owned\n",
1043 DRM_CURRENTPID);
1044 return -EINVAL;
1045 }
1046 drm_free_buffer(dev, buf);
1047 }
1048
1049 return 0;
1050 }
1051
1052 /**
1053 * Maps all of the DMA buffers into client-virtual space (ioctl).
1054 *
1055 * \param inode device inode.
1056 * \param file_priv DRM file private.
1057 * \param cmd command.
1058 * \param arg pointer to a drm_buf_map structure.
1059 * \return zero on success or a negative number on failure.
1060 *
1061 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1062 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1063 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1064 * drm_mmap_dma().
1065 */
1066 /* LINTED */
drm_mapbufs(DRM_IOCTL_ARGS)1067 int drm_mapbufs(DRM_IOCTL_ARGS)
1068 {
1069 struct drm_device_dma *dma = dev->dma;
1070 int retcode = 0;
1071 const int zero = 0;
1072 unsigned long virtual;
1073 unsigned long address;
1074 struct drm_buf_map *request = data;
1075 int i;
1076 uint_t size, foff;
1077
1078 #ifdef _MULTI_DATAMODEL
1079 struct drm_buf_pub_32 *list32;
1080 uint_t address32;
1081 #endif
1082
1083 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1084 return -EINVAL;
1085
1086 if (!dma)
1087 return -EINVAL;
1088
1089 spin_lock(&dev->count_lock);
1090 if (atomic_read(&dev->buf_alloc)) {
1091 spin_unlock(&dev->count_lock);
1092 return -EBUSY;
1093 }
1094 dev->buf_use++; /* Can't allocate more after this call */
1095 spin_unlock(&dev->count_lock);
1096
1097 if (request->count >= dma->buf_count) {
1098 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1099 || (drm_core_check_feature(dev, DRIVER_SG)
1100 && (dma->flags & _DRM_DMA_USE_SG))) {
1101 struct drm_local_map *map = dev->agp_buffer_map;
1102
1103 if (!map) {
1104 retcode = -EINVAL;
1105 goto done;
1106 }
1107 size = round_page(map->size);
1108 foff = (uintptr_t)map->handle;
1109 } else {
1110 size = round_page(dma->byte_count);
1111 foff = 0;
1112 }
1113 request->virtual = drm_smmap(NULL, size, PROT_READ | PROT_WRITE,
1114 MAP_SHARED, request->fd, foff);
1115 if (request->virtual == NULL) {
1116 DRM_ERROR("request->virtual is NULL");
1117 retcode = -EINVAL;
1118 goto done;
1119 }
1120
1121 virtual = (unsigned long) request->virtual;
1122
1123 #ifdef _MULTI_DATAMODEL
1124 if (ddi_model_convert_from(ioctl_mode & FMODELS) == DDI_MODEL_ILP32) {
1125 list32 = (drm_buf_pub_32_t *)(uintptr_t)request->list;
1126 for (i = 0; i < dma->buf_count; i++) {
1127 if (DRM_COPY_TO_USER(&list32[i].idx,
1128 &dma->buflist[i]->idx,
1129 sizeof (list32[0].idx))) {
1130 retcode = -EFAULT;
1131 goto done;
1132 }
1133 if (DRM_COPY_TO_USER(&list32[i].total,
1134 &dma->buflist[i]->total,
1135 sizeof (list32[0].total))) {
1136 retcode = -EFAULT;
1137 goto done;
1138 }
1139 if (DRM_COPY_TO_USER(&list32[i].used,
1140 &zero, sizeof (zero))) {
1141 retcode = -EFAULT;
1142 goto done;
1143 }
1144 address32 = virtual + dma->buflist[i]->offset; /* *** */
1145 if (DRM_COPY_TO_USER(&list32[i].address,
1146 &address32, sizeof (list32[0].address))) {
1147 retcode = -EFAULT;
1148 goto done;
1149 }
1150 }
1151 } else {
1152 #endif
1153 for (i = 0; i < dma->buf_count; i++) {
1154 if (DRM_COPY_TO_USER(&request->list[i].idx,
1155 &dma->buflist[i]->idx,
1156 sizeof (request->list[0].idx))) {
1157 retcode = -EFAULT;
1158 goto done;
1159 }
1160 if (DRM_COPY_TO_USER(&request->list[i].total,
1161 &dma->buflist[i]->total,
1162 sizeof (request->list[0].total))) {
1163 retcode = -EFAULT;
1164 goto done;
1165 }
1166 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1167 sizeof (zero))) {
1168 retcode = -EFAULT;
1169 goto done;
1170 }
1171 address = virtual + dma->buflist[i]->offset; /* *** */
1172
1173 if (DRM_COPY_TO_USER(&request->list[i].address,
1174 &address, sizeof (address))) {
1175 retcode = -EFAULT;
1176 goto done;
1177 }
1178 }
1179 #ifdef _MULTI_DATAMODEL
1180 }
1181 #endif
1182 }
1183 done:
1184 request->count = dma->buf_count;
1185 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1186
1187 return retcode;
1188 }
1189
1190 /**
1191 * Compute size order. Returns the exponent of the smaller power of two which
1192 * is greater or equal to given number.
1193 *
1194 * \param size size.
1195 * \return order.
1196 *
1197 * \todo Can be made faster.
1198 */
drm_order(unsigned long size)1199 int drm_order(unsigned long size)
1200 {
1201 int order;
1202 unsigned long tmp;
1203
1204 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1205
1206 if (size & (size - 1))
1207 ++order;
1208
1209 return order;
1210 }
1211
1212