1 /*
2 * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
3 */
4
5 /*
6 * Copyright (c) 2006, 2013 Intel Corporation. All rights reserved.
7 */
8
9 /**
10 * \file drm_context.c
11 * IOCTLs for generic contexts
12 *
13 * \author Rickard E. (Rik) Faith <faith@valinux.com>
14 * \author Gareth Hughes <gareth@valinux.com>
15 */
16
17 /*
18 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
19 *
20 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
21 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
22 * All Rights Reserved.
23 *
24 * Permission is hereby granted, free of charge, to any person obtaining a
25 * copy of this software and associated documentation files (the "Software"),
26 * to deal in the Software without restriction, including without limitation
27 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
28 * and/or sell copies of the Software, and to permit persons to whom the
29 * Software is furnished to do so, subject to the following conditions:
30 *
31 * The above copyright notice and this permission notice (including the next
32 * paragraph) shall be included in all copies or substantial portions of the
33 * Software.
34 *
35 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
37 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
38 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
39 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
40 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
41 * OTHER DEALINGS IN THE SOFTWARE.
42 */
43
44 /*
45 * ChangeLog:
46 * 2001-11-16 Torsten Duwe <duwe@caldera.de>
47 * added context constructor/destructor hooks,
48 * needed by SiS driver's memory management.
49 */
50
51 #include "drmP.h"
52 #include "drm_io32.h"
53
54 /******************************************************************/
55 /** \name Context bitmap support */
56 /*@{*/
57
58 /**
59 * Free a handle from the context bitmap.
60 *
61 * \param dev DRM device.
62 * \param ctx_handle context handle.
63 *
64 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
65 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
66 * lock.
67 */
drm_ctxbitmap_free(struct drm_device * dev,int ctx_handle)68 void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
69 {
70 mutex_lock(&dev->struct_mutex);
71 (void) idr_remove(&dev->ctx_idr, ctx_handle);
72 mutex_unlock(&dev->struct_mutex);
73 }
74
75 /**
76 * Context bitmap allocation.
77 *
78 * \param dev DRM device.
79 * \return (non-negative) context handle on success or a negative number on failure.
80 *
81 * Allocate a new idr from drm_device::ctx_idr while holding the
82 * drm_device::struct_mutex lock.
83 */
drm_ctxbitmap_next(struct drm_device * dev)84 static int drm_ctxbitmap_next(struct drm_device * dev)
85 {
86 int new_id;
87 int ret;
88
89 again:
90 if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
91 DRM_ERROR("Out of memory expanding drawable idr\n");
92 return -ENOMEM;
93 }
94 mutex_lock(&dev->struct_mutex);
95 ret = idr_get_new_above(&dev->ctx_idr, NULL,
96 DRM_RESERVED_CONTEXTS, &new_id);
97 mutex_unlock(&dev->struct_mutex);
98 if (ret == -EAGAIN)
99 goto again;
100 else if (ret)
101 return ret;
102
103 return new_id;
104 }
105
106 /**
107 * Context bitmap initialization.
108 *
109 * \param dev DRM device.
110 *
111 * Initialise the drm_device::ctx_idr
112 */
drm_ctxbitmap_init(struct drm_device * dev)113 int drm_ctxbitmap_init(struct drm_device * dev)
114 {
115 idr_init(&dev->ctx_idr);
116 return 0;
117 }
118
119 /**
120 * Context bitmap cleanup.
121 *
122 * \param dev DRM device.
123 *
124 * Free all idr members using drm_ctx_sarea_free helper function
125 * while holding the drm_device::struct_mutex lock.
126 */
drm_ctxbitmap_cleanup(struct drm_device * dev)127 void drm_ctxbitmap_cleanup(struct drm_device * dev)
128 {
129 mutex_lock(&dev->struct_mutex);
130 idr_remove_all(&dev->ctx_idr);
131 mutex_unlock(&dev->struct_mutex);
132 }
133
134 /*@}*/
135
136 /******************************************************************/
137 /** \name Per Context SAREA Support */
138 /*@{*/
139
140 /**
141 * Get per-context SAREA.
142 *
143 * \param inode device inode.
144 * \param file_priv DRM file private.
145 * \param cmd command.
146 * \param arg user argument pointing to a drm_ctx_priv_map structure.
147 * \return zero on success or a negative number on failure.
148 *
149 * Gets the map from drm_device::ctx_idr with the handle specified and
150 * returns its handle.
151 */
152 /* LINTED */
drm_getsareactx(DRM_IOCTL_ARGS)153 int drm_getsareactx(DRM_IOCTL_ARGS)
154 {
155 struct drm_ctx_priv_map *request = data;
156 struct drm_local_map *map;
157 struct drm_map_list *_entry;
158
159 mutex_lock(&dev->struct_mutex);
160
161 map = idr_find(&dev->ctx_idr, request->ctx_id);
162 if (!map) {
163 mutex_unlock(&dev->struct_mutex);
164 return -EINVAL;
165 }
166
167 request->handle = NULL;
168 list_for_each_entry(_entry, struct drm_map_list, &dev->maplist, head) {
169 if (_entry->map == map) {
170 request->handle =
171 (void *)(unsigned long)_entry->user_token;
172 break;
173 }
174 }
175
176 mutex_unlock(&dev->struct_mutex);
177
178 if (request->handle == NULL)
179 return -EINVAL;
180
181 return 0;
182 }
183
184 /**
185 * Set per-context SAREA.
186 *
187 * \param inode device inode.
188 * \param file_priv DRM file private.
189 * \param cmd command.
190 * \param arg user argument pointing to a drm_ctx_priv_map structure.
191 * \return zero on success or a negative number on failure.
192 *
193 * Searches the mapping specified in \p arg and update the entry in
194 * drm_device::ctx_idr with it.
195 */
196 /* LINTED */
drm_setsareactx(DRM_IOCTL_ARGS)197 int drm_setsareactx(DRM_IOCTL_ARGS)
198 {
199 struct drm_ctx_priv_map *request = data;
200 struct drm_local_map *map = NULL;
201 struct drm_map_list *r_list = NULL;
202
203 mutex_lock(&dev->struct_mutex);
204 list_for_each_entry(r_list, struct drm_map_list, &dev->maplist, head) {
205 if (r_list->map
206 && r_list->user_token == (unsigned long) request->handle)
207 goto found;
208 }
209 bad:
210 mutex_unlock(&dev->struct_mutex);
211 return -EINVAL;
212
213 found:
214 map = r_list->map;
215 if (!map)
216 goto bad;
217
218 if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
219 goto bad;
220
221 mutex_unlock(&dev->struct_mutex);
222
223 return 0;
224 }
225
226 /*@}*/
227
228 /******************************************************************/
229 /** \name The actual DRM context handling routines */
230 /*@{*/
231
232 /**
233 * Switch context.
234 *
235 * \param dev DRM device.
236 * \param old old context handle.
237 * \param new new context handle.
238 * \return zero on success or a negative number on failure.
239 *
240 * Attempt to set drm_device::context_flag.
241 */
drm_context_switch(struct drm_device * dev,int old,int new)242 static int drm_context_switch(struct drm_device * dev, int old, int new)
243 {
244 if (test_and_set_bit(0, &dev->context_flag)) {
245 DRM_ERROR("Reentering -- FIXME\n");
246 return -EBUSY;
247 }
248
249 DRM_DEBUG("Context switch from %d to %d\n", old, new);
250
251 if (new == dev->last_context) {
252 clear_bit(0, &dev->context_flag);
253 return 0;
254 }
255
256 return 0;
257 }
258
259 /**
260 * Complete context switch.
261 *
262 * \param dev DRM device.
263 * \param new new context handle.
264 * \return zero on success or a negative number on failure.
265 *
266 * Updates drm_device::last_context and drm_device::last_switch. Verifies the
267 * hardware lock is held, clears the drm_device::context_flag and wakes up
268 * drm_device::context_wait.
269 */
drm_context_switch_complete(struct drm_device * dev,struct drm_file * file_priv,int new)270 static int drm_context_switch_complete(struct drm_device *dev,
271 struct drm_file *file_priv, int new)
272 {
273 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
274 dev->last_switch = jiffies;
275
276 if (file_priv->master->lock.hw_lock != NULL &&
277 !_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
278 DRM_ERROR("Lock isn't held after context switch\n");
279 }
280
281 /* If a context switch is ever initiated
282 when the kernel holds the lock, release
283 that lock here. */
284 clear_bit(0, &dev->context_flag);
285
286 return 0;
287 }
288
289 /**
290 * Reserve contexts.
291 *
292 * \param inode device inode.
293 * \param file_priv DRM file private.
294 * \param cmd command.
295 * \param arg user argument pointing to a drm_ctx_res structure.
296 * \return zero on success or a negative number on failure.
297 */
298 /* LINTED */
drm_resctx(DRM_IOCTL_ARGS)299 int drm_resctx(DRM_IOCTL_ARGS)
300 {
301 struct drm_ctx_res *res = data;
302 struct drm_ctx ctx;
303 int i;
304
305 if (res->count >= DRM_RESERVED_CONTEXTS) {
306 (void) memset(&ctx, 0, sizeof(ctx));
307 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
308 ctx.handle = i;
309 DRM_COPYTO_WITH_RETURN(&res->contexts[i],
310 &ctx, sizeof (ctx));
311 }
312 }
313 res->count = DRM_RESERVED_CONTEXTS;
314
315 return 0;
316 }
317
318 /**
319 * Add context.
320 *
321 * \param inode device inode.
322 * \param file_priv DRM file private.
323 * \param cmd command.
324 * \param arg user argument pointing to a drm_ctx structure.
325 * \return zero on success or a negative number on failure.
326 *
327 * Get a new handle for the context and copy to userspace.
328 */
329 /* LINTED */
drm_addctx(DRM_IOCTL_ARGS)330 int drm_addctx(DRM_IOCTL_ARGS)
331 {
332 struct drm_ctx_list *ctx_entry;
333 struct drm_ctx *ctx = data;
334
335 ctx->handle = drm_ctxbitmap_next(dev);
336 if (ctx->handle == DRM_KERNEL_CONTEXT) {
337 /* Skip kernel's context and get a new one. */
338 ctx->handle = drm_ctxbitmap_next(dev);
339 }
340 DRM_DEBUG("%d\n", ctx->handle);
341 /* LINTED */
342 if (ctx->handle == -1) {
343 DRM_DEBUG("Not enough free contexts.\n");
344 /* Should this return -EBUSY instead? */
345 return -ENOMEM;
346 }
347
348 ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
349 if (!ctx_entry) {
350 DRM_DEBUG("out of memory\n");
351 return -ENOMEM;
352 }
353
354 INIT_LIST_HEAD(&ctx_entry->head);
355 ctx_entry->handle = ctx->handle;
356 ctx_entry->tag = file;
357
358 mutex_lock(&dev->ctxlist_mutex);
359 list_add(&ctx_entry->head, &dev->ctxlist, (caddr_t)ctx_entry);
360 ++dev->ctx_count;
361 mutex_unlock(&dev->ctxlist_mutex);
362
363 return 0;
364 }
365
366 /* LINTED */
drm_modctx(DRM_IOCTL_ARGS)367 int drm_modctx(DRM_IOCTL_ARGS)
368 {
369 /* This does nothing */
370 return 0;
371 }
372
373 /**
374 * Get context.
375 *
376 * \param inode device inode.
377 * \param file_priv DRM file private.
378 * \param cmd command.
379 * \param arg user argument pointing to a drm_ctx structure.
380 * \return zero on success or a negative number on failure.
381 */
382 /* LINTED */
drm_getctx(DRM_IOCTL_ARGS)383 int drm_getctx(DRM_IOCTL_ARGS)
384 {
385 struct drm_ctx *ctx = data;
386
387 /* This is 0, because we don't handle any context flags */
388 ctx->flags = 0;
389
390 return 0;
391 }
392
393 /**
394 * Switch context.
395 *
396 * \param inode device inode.
397 * \param file_priv DRM file private.
398 * \param cmd command.
399 * \param arg user argument pointing to a drm_ctx structure.
400 * \return zero on success or a negative number on failure.
401 *
402 * Calls context_switch().
403 */
404 /* LINTED */
drm_switchctx(DRM_IOCTL_ARGS)405 int drm_switchctx(DRM_IOCTL_ARGS)
406 {
407 struct drm_ctx *ctx = data;
408
409 DRM_DEBUG("%d\n", ctx->handle);
410 return drm_context_switch(dev, dev->last_context, ctx->handle);
411 }
412
413 /**
414 * New context.
415 *
416 * \param inode device inode.
417 * \param file_priv DRM file private.
418 * \param cmd command.
419 * \param arg user argument pointing to a drm_ctx structure.
420 * \return zero on success or a negative number on failure.
421 *
422 * Calls context_switch_complete().
423 */
424 /* LINTED */
drm_newctx(DRM_IOCTL_ARGS)425 int drm_newctx(DRM_IOCTL_ARGS)
426 {
427 struct drm_ctx *ctx = data;
428
429 DRM_DEBUG("%d\n", ctx->handle);
430 (void) drm_context_switch_complete(dev, file, ctx->handle);
431
432 return 0;
433 }
434
435 /**
436 * Remove context.
437 *
438 * \param inode device inode.
439 * \param file_priv DRM file private.
440 * \param cmd command.
441 * \param arg user argument pointing to a drm_ctx structure.
442 * \return zero on success or a negative number on failure.
443 *
444 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
445 */
446 /* LINTED */
drm_rmctx(DRM_IOCTL_ARGS)447 int drm_rmctx(DRM_IOCTL_ARGS)
448 {
449 struct drm_ctx *ctx = data;
450
451 DRM_DEBUG("%d\n", ctx->handle);
452 if (ctx->handle != DRM_KERNEL_CONTEXT) {
453 if (dev->driver->context_dtor)
454 dev->driver->context_dtor(dev, ctx->handle);
455 drm_ctxbitmap_free(dev, ctx->handle);
456 }
457
458 mutex_lock(&dev->ctxlist_mutex);
459 if (!list_empty(&dev->ctxlist)) {
460 struct drm_ctx_list *pos, *n;
461
462 list_for_each_entry_safe(pos, n, struct drm_ctx_list, &dev->ctxlist, head) {
463 if (pos->handle == ctx->handle) {
464 list_del(&pos->head);
465 kfree(pos, sizeof (*pos));
466 --dev->ctx_count;
467 }
468 }
469 }
470 mutex_unlock(&dev->ctxlist_mutex);
471
472 return 0;
473 }
474
475 /*@}*/
476