1 /*
2 * Copyright © 2011-2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28 /*
29 * Copyright (c) 2013, Intel Corporation. All rights reserved.
30 */
31
32 /*
33 * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
34 */
35
36 /*
37 * This file implements HW context support. On gen5+ a HW context consists of an
38 * opaque GPU object which is referenced at times of context saves and restores.
39 * With RC6 enabled, the context is also referenced as the GPU enters and exists
40 * from RC6 (GPU has it's own internal power context, except on gen5). Though
41 * something like a context does exist for the media ring, the code only
42 * supports contexts for the render ring.
43 *
44 * In software, there is a distinction between contexts created by the user,
45 * and the default HW context. The default HW context is used by GPU clients
46 * that do not request setup of their own hardware context. The default
47 * context's state is never restored to help prevent programming errors. This
48 * would happen if a client ran and piggy-backed off another clients GPU state.
49 * The default context only exists to give the GPU some offset to load as the
50 * current to invoke a save of the context we actually care about. In fact, the
51 * code could likely be constructed, albeit in a more complicated fashion, to
52 * never use the default context, though that limits the driver's ability to
53 * swap out, and/or destroy other contexts.
54 *
55 * All other contexts are created as a request by the GPU client. These contexts
56 * store GPU state, and thus allow GPU clients to not re-emit state (and
57 * potentially query certain state) at any time. The kernel driver makes
58 * certain that the appropriate commands are inserted.
59 *
60 * The context life cycle is semi-complicated in that context BOs may live
61 * longer than the context itself because of the way the hardware, and object
62 * tracking works. Below is a very crude representation of the state machine
63 * describing the context life.
64 * refcount pincount active
65 * S0: initial state 0 0 0
66 * S1: context created 1 0 0
67 * S2: context is currently running 2 1 X
68 * S3: GPU referenced, but not current 2 0 1
69 * S4: context is current, but destroyed 1 1 0
70 * S5: like S3, but destroyed 1 0 1
71 *
72 * The most common (but not all) transitions:
73 * S0->S1: client creates a context
74 * S1->S2: client submits execbuf with context
75 * S2->S3: other clients submits execbuf with context
76 * S3->S1: context object was retired
77 * S3->S2: clients submits another execbuf
78 * S2->S4: context destroy called with current context
79 * S3->S5->S0: destroy path
80 * S4->S5->S0: destroy path on current context
81 *
82 * There are two confusing terms used above:
83 * The "current context" means the context which is currently running on the
84 * GPU. The GPU has loaded it's state already and has stored away the gtt
85 * offset of the BO. The GPU is not actively referencing the data at this
86 * offset, but it will on the next context switch. The only way to avoid this
87 * is to do a GPU reset.
88 *
89 * An "active context' is one which was previously the "current context" and is
90 * on the active list waiting for the next context switch to occur. Until this
91 * happens, the object must remain at the same gtt offset. It is therefore
92 * possible to destroy a context, but it is still active.
93 *
94 */
95
96 #include "drmP.h"
97 #include "i915_drm.h"
98 #include "i915_drv.h"
99
100 /* This is a HW constraint. The value below is the largest known requirement
101 * I've seen in a spec to date, and that was a workaround for a non-shipping
102 * part. It should be safe to decrease this, but it's more future proof as is.
103 */
104 #define CONTEXT_ALIGN (64<<10)
105
106 static struct i915_hw_context *
107 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
108 static int do_switch(struct i915_hw_context *to);
109
get_context_size(struct drm_device * dev)110 static int get_context_size(struct drm_device *dev)
111 {
112 struct drm_i915_private *dev_priv = dev->dev_private;
113 int ret = 0;
114 u32 reg;
115
116 switch (INTEL_INFO(dev)->gen) {
117 case 6:
118 reg = I915_READ(CXT_SIZE);
119 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
120 break;
121 case 7:
122 reg = I915_READ(GEN7_CXT_SIZE);
123 if (IS_HASWELL(dev))
124 ret = HSW_CXT_TOTAL_SIZE;
125 else
126 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
127 break;
128 default:
129 BUG();
130 }
131
132 return ret;
133 }
134
i915_gem_context_free(struct kref * ctx_ref)135 void i915_gem_context_free(struct kref *ctx_ref)
136 {
137 struct i915_hw_context *ctx = container_of(ctx_ref,
138 struct i915_hw_context, ref);
139
140 drm_gem_object_unreference(&ctx->obj->base);
141 kfree(ctx, sizeof(*ctx));
142 }
143
144 static struct i915_hw_context *
create_hw_context(struct drm_device * dev,struct drm_i915_file_private * file_priv)145 create_hw_context(struct drm_device *dev,
146 struct drm_i915_file_private *file_priv)
147 {
148 struct drm_i915_private *dev_priv = dev->dev_private;
149 struct i915_hw_context *ctx;
150 int ret, id;
151
152 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
153 if (ctx == NULL)
154 return NULL;
155
156 kref_init(&ctx->ref);
157 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
158 if (ctx->obj == NULL) {
159 kfree(ctx, sizeof(*ctx));
160 DRM_DEBUG_DRIVER("Context object allocated failed\n");
161 return NULL;
162 }
163
164 if (INTEL_INFO(dev)->gen >= 7) {
165 ret = i915_gem_object_set_cache_level(ctx->obj,
166 I915_CACHE_LLC_MLC);
167 if (ret)
168 goto err_out;
169 }
170
171 /* The ring associated with the context object is handled by the normal
172 * object tracking code. We give an initial ring value simple to pass an
173 * assertion in the context switch code.
174 */
175 ctx->ring = &dev_priv->ring[RCS];
176
177 /* Default context will never have a file_priv */
178 if (file_priv == NULL)
179 return ctx;
180
181
182 again:
183 if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
184 ret = -ENOMEM;
185 DRM_DEBUG_DRIVER("idr allocation failed\n");
186 goto err_out;
187 }
188
189 ret = idr_get_new_above(&file_priv->context_idr, ctx,
190 DEFAULT_CONTEXT_ID + 1, &id);
191 ctx->file_priv = file_priv;
192 if (ret == 0)
193 ctx->id = id;
194
195 if (ret == -EAGAIN)
196 goto again;
197 else if (ret)
198 goto err_out;
199
200 return ctx;
201
202 err_out:
203 i915_gem_context_unreference(ctx);
204 return NULL;
205 }
206
is_default_context(struct i915_hw_context * ctx)207 static inline bool is_default_context(struct i915_hw_context *ctx)
208 {
209 return (ctx == ctx->ring->default_context);
210 }
211
212 /**
213 * The default context needs to exist per ring that uses contexts. It stores the
214 * context state of the GPU for applications that don't utilize HW contexts, as
215 * well as an idle case.
216 */
create_default_context(struct drm_i915_private * dev_priv)217 static int create_default_context(struct drm_i915_private *dev_priv)
218 {
219 struct i915_hw_context *ctx;
220 int ret;
221
222 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
223
224 ctx = create_hw_context(dev_priv->dev, NULL);
225 if (ctx == NULL)
226 return (-ENOMEM);
227
228 /* We may need to do things with the shrinker which require us to
229 * immediately switch back to the default context. This can cause a
230 * problem as pinning the default context also requires GTT space which
231 * may not be available. To avoid this we always pin the
232 * default context.
233 */
234 dev_priv->ring[RCS].default_context = ctx;
235 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
236 if (ret) {
237 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
238 goto err_destroy;
239 }
240
241 ret = do_switch(ctx);
242 if (ret) {
243 DRM_DEBUG_DRIVER("Switch failed %d\n", ret);
244 goto err_unpin;
245 }
246
247 DRM_DEBUG_DRIVER("Default HW context loaded\n");
248 return 0;
249
250 err_unpin:
251 i915_gem_object_unpin(ctx->obj);
252 err_destroy:
253 i915_gem_context_unreference(ctx);
254 return ret;
255 }
256
i915_gem_context_init(struct drm_device * dev)257 void i915_gem_context_init(struct drm_device *dev)
258 {
259 struct drm_i915_private *dev_priv = dev->dev_private;
260
261 if (!HAS_HW_CONTEXTS(dev)) {
262 dev_priv->hw_contexts_disabled = true;
263 DRM_DEBUG_DRIVER("Disabling HW Contexts; old hardware\n");
264 return;
265 }
266
267 /* If called from reset, or thaw... we've been here already */
268 if (dev_priv->hw_contexts_disabled ||
269 dev_priv->ring[RCS].default_context)
270 return;
271
272 dev_priv->hw_context_size = ptob(DIV_ROUND_UP(get_context_size(dev), 4096));
273
274 if (dev_priv->hw_context_size > (1<<20)) {
275 dev_priv->hw_contexts_disabled = true;
276 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n");
277 return;
278 }
279
280 if (create_default_context(dev_priv)) {
281 dev_priv->hw_contexts_disabled = true;
282 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed\n");
283 return;
284 }
285
286 DRM_DEBUG_DRIVER("HW context support initialized\n");
287 }
288
i915_gem_context_fini(struct drm_device * dev)289 void i915_gem_context_fini(struct drm_device *dev)
290 {
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
293
294 if (dev_priv->hw_contexts_disabled)
295 return;
296
297 /* The only known way to stop the gpu from accessing the hw context is
298 * to reset it. Do this as the very last operation to avoid confusing
299 * other code, leading to spurious errors. */
300 intel_gpu_reset(dev);
301
302 /* Got a panic here with dctx=0 during reboot when Xorg was up.
303 * So... apparentl the context_unreference might already have
304 * happened when we get here? Not sure how, but check...
305 */
306 if (dctx == NULL) {
307 DRM_DEBUG_DRIVER("i915_gem_context_fini, dctx=0\n");
308 return;
309 }
310 i915_gem_object_unpin(dctx->obj);
311
312 /* When default context is created and switched to, base object refcount
313 * will be 2 (+1 from object creation and +1 from do_switch()).
314 * i915_gem_context_fini() will be called after gpu_idle() has switched
315 * to default context. So we need to unreference the base object once
316 * to offset the do_switch part, so that i915_gem_context_unreference()
317 * can then free the base object correctly. */
318 drm_gem_object_unreference(&dctx->obj->base);
319 i915_gem_context_unreference(dctx);
320 }
321
context_idr_cleanup(int id,void * p,void * data)322 static int context_idr_cleanup(int id, void *p, void *data)
323 {
324 struct i915_hw_context *ctx = p;
325
326 BUG_ON(id == DEFAULT_CONTEXT_ID);
327
328 i915_gem_context_unreference(ctx);
329 return 0;
330 }
331
332 struct i915_ctx_hang_stats *
i915_gem_context_get_hang_stats(struct intel_ring_buffer * ring,struct drm_file * file,u32 id)333 i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
334 struct drm_file *file,
335 u32 id)
336 {
337 struct drm_i915_private *dev_priv = ring->dev->dev_private;
338 struct drm_i915_file_private *file_priv = file->driver_priv;
339 struct i915_hw_context *to;
340
341 if (dev_priv->hw_contexts_disabled)
342 return NULL;
343
344 if (ring->id != RCS)
345 return NULL;
346
347 if (file == NULL)
348 return NULL;
349
350 if (id == DEFAULT_CONTEXT_ID)
351 return &file_priv->hang_stats;
352
353 to = i915_gem_context_get(file->driver_priv, id);
354 if (to == NULL)
355 return NULL;
356
357 return &to->hang_stats;
358 }
359
i915_gem_context_close(struct drm_device * dev,struct drm_file * file)360 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
361 {
362 struct drm_i915_file_private *file_priv = file->driver_priv;
363
364 mutex_lock(&dev->struct_mutex);
365 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
366 idr_destroy(&file_priv->context_idr);
367 mutex_unlock(&dev->struct_mutex);
368 }
369
370 static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private * file_priv,u32 id)371 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
372 {
373 return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
374 }
375
376 static inline int
mi_set_context(struct intel_ring_buffer * ring,struct i915_hw_context * new_context,u32 hw_flags)377 mi_set_context(struct intel_ring_buffer *ring,
378 struct i915_hw_context *new_context,
379 u32 hw_flags)
380 {
381 int ret;
382
383 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
384 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
385 * explicitly, so we rely on the value at ring init, stored in
386 * itlb_before_ctx_switch.
387 */
388 if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
389 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
390 if (ret)
391 return ret;
392 }
393
394 ret = intel_ring_begin(ring, 6);
395 if (ret)
396 return ret;
397
398 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw */
399 if (IS_GEN7(ring->dev))
400 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
401 else
402 intel_ring_emit(ring, MI_NOOP);
403
404 intel_ring_emit(ring, MI_NOOP);
405 intel_ring_emit(ring, MI_SET_CONTEXT);
406 intel_ring_emit(ring, new_context->obj->gtt_offset |
407 MI_MM_SPACE_GTT |
408 MI_SAVE_EXT_STATE_EN |
409 MI_RESTORE_EXT_STATE_EN |
410 hw_flags);
411 /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
412 intel_ring_emit(ring, MI_NOOP);
413
414 if (IS_GEN7(ring->dev))
415 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
416 else
417 intel_ring_emit(ring, MI_NOOP);
418
419 intel_ring_advance(ring);
420
421 return ret;
422 }
423
do_switch(struct i915_hw_context * to)424 static int do_switch(struct i915_hw_context *to)
425 {
426 struct intel_ring_buffer *ring = to->ring;
427 struct i915_hw_context *from = ring->last_context;
428 u32 hw_flags = 0;
429 int ret;
430
431 BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
432
433 if (from == to)
434 return 0;
435
436 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
437 if (ret)
438 return ret;
439
440 /* Clear this page out of any CPU caches for coherent swap-in/out. Note
441 * that thanks to write = false in this call and us not setting any gpu
442 * write domains when putting a context object onto the active list
443 * (when switching away from it), this won't block.
444 * XXX: We need a real interface to do this instead of trickery. */
445 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
446 if (ret) {
447 i915_gem_object_unpin(to->obj);
448 return ret;
449 }
450
451 if (!to->obj->has_global_gtt_mapping)
452 i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
453
454 if (!to->is_initialized || is_default_context(to))
455 hw_flags |= MI_RESTORE_INHIBIT;
456 else if (from == to) {
457 /* not yet expected */
458 hw_flags |= MI_FORCE_RESTORE;
459 WARN_ON(from == to);
460 }
461
462 ret = mi_set_context(ring, to, hw_flags);
463 if (ret) {
464 i915_gem_object_unpin(to->obj);
465 return ret;
466 }
467
468 /* The backing object for the context is done after switching to the
469 * *next* context. Therefore we cannot retire the previous context until
470 * the next context has already started running. In fact, the below code
471 * is a bit suboptimal because the retiring can occur simply after the
472 * MI_SET_CONTEXT instead of when the next seqno has completed.
473 */
474 if (from != NULL) {
475 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
476 i915_gem_object_move_to_active(from->obj, ring);
477 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
478 * whole damn pipeline, we don't need to explicitly mark the
479 * object dirty. The only exception is that the context must be
480 * correct in case the object gets swapped out. Ideally we'd be
481 * able to defer doing this until we know the object would be
482 * swapped, but there is no way to do that yet.
483 */
484 from->obj->dirty = 1;
485 BUG_ON(from->obj->ring != ring);
486
487 ret = i915_add_request(ring, NULL);
488 if (ret) {
489 /* Too late, we've already scheduled a context switch.
490 * Try to undo the change so that the hw state is
491 * consistent with out tracking. In case of emergency,
492 * scream.
493 */
494 WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
495 return ret;
496 }
497
498 i915_gem_object_unpin(from->obj);
499 i915_gem_context_unreference(from);
500 }
501
502 i915_gem_context_reference(to);
503 ring->last_context = to;
504 to->is_initialized = true;
505
506 return 0;
507 }
508
509 /**
510 * i915_switch_context() - perform a GPU context switch.
511 * @ring: ring for which we'll execute the context switch
512 * @file_priv: file_priv associated with the context, may be NULL
513 * @id: context id number
514 * @seqno: sequence number by which the new context will be switched to
515 * @flags:
516 *
517 * The context life cycle is simple. The context refcount is incremented and
518 * decremented by 1 and create and destroy. If the context is in use by the GPU,
519 * it will have a refoucnt > 1. This allows us to destroy the context abstract
520 * object while letting the normal object tracking destroy the backing BO.
521 */
i915_switch_context(struct intel_ring_buffer * ring,struct drm_file * file,int to_id)522 int i915_switch_context(struct intel_ring_buffer *ring,
523 struct drm_file *file,
524 int to_id)
525 {
526 struct drm_i915_private *dev_priv = ring->dev->dev_private;
527 struct i915_hw_context *to;
528
529 if (dev_priv->hw_contexts_disabled)
530 return 0;
531
532 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
533
534 if (ring != &dev_priv->ring[RCS])
535 return 0;
536
537 if (to_id == DEFAULT_CONTEXT_ID) {
538 to = ring->default_context;
539 } else {
540 if (file == NULL)
541 return -EINVAL;
542
543 to = i915_gem_context_get(file->driver_priv, to_id);
544 if (to == NULL)
545 return -ENOENT;
546 }
547
548 return do_switch(to);
549 }
550
i915_gem_context_create_ioctl(DRM_IOCTL_ARGS)551 int i915_gem_context_create_ioctl(DRM_IOCTL_ARGS)
552 {
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 struct drm_i915_gem_context_create *args = data;
555 struct drm_i915_file_private *file_priv = file->driver_priv;
556 struct i915_hw_context *ctx;
557 int ret;
558
559 if (!(dev->driver->driver_features & DRIVER_GEM))
560 return -ENODEV;
561
562 if (dev_priv->hw_contexts_disabled)
563 return -ENODEV;
564
565 ret = i915_mutex_lock_interruptible(dev);
566 if (ret)
567 return ret;
568
569 ctx = create_hw_context(dev, file_priv);
570 mutex_unlock(&dev->struct_mutex);
571 if (ctx == NULL)
572 return (-ENOMEM);
573
574 args->ctx_id = ctx->id;
575 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
576
577 return 0;
578 }
579
i915_gem_context_destroy_ioctl(DRM_IOCTL_ARGS)580 int i915_gem_context_destroy_ioctl(DRM_IOCTL_ARGS)
581 {
582 struct drm_i915_gem_context_destroy *args = data;
583 struct drm_i915_file_private *file_priv = file->driver_priv;
584 struct i915_hw_context *ctx;
585 int ret;
586
587 if (!(dev->driver->driver_features & DRIVER_GEM))
588 return -ENODEV;
589
590 ret = i915_mutex_lock_interruptible(dev);
591 if (ret)
592 return ret;
593
594 ctx = i915_gem_context_get(file_priv, args->ctx_id);
595 if (!ctx) {
596 mutex_unlock(&dev->struct_mutex);
597 return -ENOENT;
598 }
599
600 idr_remove(&ctx->file_priv->context_idr, ctx->id);
601 i915_gem_context_unreference(ctx);
602 mutex_unlock(&dev->struct_mutex);
603
604 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
605 return 0;
606 }
607
i915_gem_context_reference(struct i915_hw_context * ctx)608 void i915_gem_context_reference(struct i915_hw_context *ctx)
609 {
610 kref_get(&ctx->ref);
611 }
612
i915_gem_context_unreference(struct i915_hw_context * ctx)613 void i915_gem_context_unreference(struct i915_hw_context *ctx)
614 {
615 kref_put(&ctx->ref, i915_gem_context_free);
616 }
617