1 /*
2 * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
3 */
4
5 /*
6 * Copyright (c) 2009, 2013, Intel Corporation.
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * IN THE SOFTWARE.
27 *
28 * Authors:
29 * Eric Anholt <eric@anholt.net>
30 *
31 */
32
33 #include "drmP.h"
34 #include "drm.h"
35 #include "drm_mm.h"
36 #include "i915_drm.h"
37 #include "i915_drv.h"
38 #include "intel_drv.h"
39
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment,
44 bool map_and_fenceable,
45 bool nonblocking);
46 static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
48 struct drm_i915_gem_pwrite *args,
49 struct drm_file *file);
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51 struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53 struct drm_i915_fence_reg *fence,
54 bool enable);
55
56 int i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
57
i915_gem_object_fence_lost(struct drm_i915_gem_object * obj)58 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
59 {
60 if (obj->tiling_mode)
61 i915_gem_release_mmap(obj);
62
63 /* As we do not have an associated fence register, we will force
64 * a tiling change if we ever need to acquire one.
65 */
66 obj->fence_dirty = false;
67 obj->fence_reg = I915_FENCE_REG_NONE;
68 }
69
70 /* some bookkeeping */
i915_gem_info_add_obj(struct drm_i915_private * dev_priv,size_t size)71 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
72 size_t size)
73 {
74 dev_priv->mm.object_count++;
75 dev_priv->mm.object_memory += size;
76 }
77
i915_gem_info_remove_obj(struct drm_i915_private * dev_priv,size_t size)78 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
79 size_t size)
80 {
81 dev_priv->mm.object_count--;
82 dev_priv->mm.object_memory -= size;
83 }
84
85 static int
i915_gem_wait_for_error(struct i915_gpu_error * error)86 i915_gem_wait_for_error(struct i915_gpu_error *error)
87 {
88
89 #define EXIT_COND (!i915_reset_in_progress(error) || \
90 i915_terminally_wedged(error))
91 if (EXIT_COND)
92 return 0;
93
94 /*
95 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
96 * userspace. If it takes that long something really bad is going on and
97 * we should simply try to bail out and fail as gracefully as possible.
98 */
99 if (wait_for(EXIT_COND, 10*1000)) {
100 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
101 return -EIO;
102 }
103 #undef EXIT_COND
104
105 return 0;
106 }
107
108 int
i915_mutex_lock_interruptible(struct drm_device * dev)109 i915_mutex_lock_interruptible(struct drm_device *dev)
110 {
111 struct drm_i915_private *dev_priv = dev->dev_private;
112 int ret;
113
114 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
115 if (ret)
116 return ret;
117
118 /* fix me mutex_lock_interruptible */
119 mutex_lock(&dev->struct_mutex);
120
121 WARN_ON(i915_verify_lists(dev));
122 return 0;
123 }
124
125 static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object * obj)126 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
127 {
128 return obj->gtt_space && !obj->active;
129 }
130
131 int
132 /* LINTED */
i915_gem_init_ioctl(DRM_IOCTL_ARGS)133 i915_gem_init_ioctl(DRM_IOCTL_ARGS)
134 {
135 struct drm_i915_gem_init *args = data;
136 struct drm_i915_private *dev_priv = dev->dev_private;
137
138 if (drm_core_check_feature(dev, DRIVER_MODESET))
139 return -ENODEV;
140
141 if (args->gtt_start >= args->gtt_end ||
142 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
143 return -EINVAL;
144
145 /* GEM with user mode setting was never supported on ilk and later. */
146 if (INTEL_INFO(dev)->gen >= 5)
147 return -ENODEV;
148
149 mutex_lock(&dev->struct_mutex);
150 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
151 args->gtt_end);
152 dev_priv->gtt.mappable_end = args->gtt_end;
153 mutex_unlock(&dev->struct_mutex);
154
155 return 0;
156 }
157
158 int
159 /* LINTED */
i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS)160 i915_gem_get_aperture_ioctl(DRM_IOCTL_ARGS)
161 {
162 struct drm_i915_private *dev_priv = dev->dev_private;
163 struct drm_i915_gem_get_aperture *args = data;
164 struct drm_i915_gem_object *obj;
165 size_t pinned;
166
167 pinned = 0;
168 mutex_lock(&dev->struct_mutex);
169 list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.bound_list, global_list)
170 if (obj->pin_count)
171 pinned += obj->gtt_space->size;
172 mutex_unlock(&dev->struct_mutex);
173
174 args->aper_size = dev_priv->gtt.total;
175 args->aper_available_size = args->aper_size -pinned;
176
177 return 0;
178 }
179
i915_gem_object_alloc(struct drm_device * dev)180 void *i915_gem_object_alloc(struct drm_device *dev)
181 {
182 return NULL;
183 }
184
i915_gem_object_free(struct drm_i915_gem_object * obj)185 void i915_gem_object_free(struct drm_i915_gem_object *obj)
186 {
187 }
188
189 static int
i915_gem_create(struct drm_file * file,struct drm_device * dev,uint64_t size,uint32_t * handle_p)190 i915_gem_create(struct drm_file *file,
191 struct drm_device *dev,
192 uint64_t size,
193 uint32_t *handle_p)
194 {
195 struct drm_i915_gem_object *obj;
196 int ret;
197 u32 handle;
198
199 size = roundup(size, PAGE_SIZE);
200 if (size == 0)
201 return -EINVAL;
202
203 /* Allocate the new object */
204 obj = i915_gem_alloc_object(dev, size);
205 if (obj == NULL)
206 return -ENOMEM;
207
208 ret = drm_gem_handle_create(file, &obj->base, &handle);
209 if (ret) {
210 drm_gem_object_release(&obj->base);
211 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
212 i915_gem_object_free(obj);
213 return ret;
214 }
215
216 /* drop reference from allocate - handle holds it now */
217 drm_gem_object_unreference(&obj->base);
218
219 *handle_p = handle;
220 return 0;
221 }
222
223 int
i915_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)224 i915_gem_dumb_create(struct drm_file *file,
225 struct drm_device *dev,
226 struct drm_mode_create_dumb *args)
227 {
228 /* have to work out size/pitch and return them */
229 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
230 args->size = args->pitch * args->height;
231 return i915_gem_create(file, dev,
232 args->size, &args->handle);
233 }
234
i915_gem_dumb_destroy(struct drm_file * file,struct drm_device * dev,uint32_t handle)235 int i915_gem_dumb_destroy(struct drm_file *file,
236 struct drm_device *dev,
237 uint32_t handle)
238 {
239 return drm_gem_handle_delete(file, handle);
240 }
241 /**
242 * Creates a new mm object and returns a handle to it.
243 */
244 int
245 /* LINTED */
i915_gem_create_ioctl(DRM_IOCTL_ARGS)246 i915_gem_create_ioctl(DRM_IOCTL_ARGS)
247 {
248 struct drm_i915_gem_create *args = data;
249 return i915_gem_create(file, dev,
250 args->size, &args->handle);
251 }
252
253 static inline void
slow_shmem_bit17_copy(caddr_t gpu_page,int gpu_offset,uint32_t * cpu_page,int cpu_offset,int length,int is_read)254 slow_shmem_bit17_copy(caddr_t gpu_page,
255 int gpu_offset,
256 uint32_t *cpu_page,
257 int cpu_offset,
258 int length,
259 int is_read)
260 {
261
262 int ret = 0;
263 /* Use the unswizzled path if this page isn't affected. */
264 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
265 if (is_read)
266 ret = DRM_COPY_TO_USER(cpu_page + cpu_offset,
267 gpu_page + gpu_offset, length);
268 else
269 ret = DRM_COPY_FROM_USER(gpu_page + gpu_offset,
270 cpu_page + cpu_offset, length);
271 if (ret)
272 DRM_ERROR("slow_shmem_bit17_copy unswizzled path failed, ret = %d", ret);
273 return;
274 }
275
276 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
277 * XORing with the other bits (A9 for Y, A9 and A10 for X)
278 */
279 while (length > 0) {
280 int cacheline_end = ALIGN(gpu_offset + 1, 64);
281 int this_length = min(cacheline_end - gpu_offset, length);
282 int swizzled_gpu_offset = gpu_offset ^ 64;
283
284 if (is_read) {
285 ret = DRM_COPY_TO_USER(cpu_page + cpu_offset,
286 gpu_page + swizzled_gpu_offset,
287 this_length);
288 } else {
289 ret = DRM_COPY_FROM_USER(gpu_page + swizzled_gpu_offset,
290 cpu_page + cpu_offset,
291 this_length);
292 }
293 cpu_offset += this_length;
294 gpu_offset += this_length;
295 length -= this_length;
296 }
297 if (ret)
298 DRM_ERROR("slow_shmem_bit17_copy failed, ret = %d", ret);
299
300 }
301
302 int
303 /* LINTED */
i915_gem_shmem_pread(struct drm_device * dev,struct drm_i915_gem_object * obj,struct drm_i915_gem_pread * args,struct drm_file * file_priv)304 i915_gem_shmem_pread(struct drm_device *dev,
305 struct drm_i915_gem_object *obj,
306 struct drm_i915_gem_pread *args,
307 struct drm_file *file_priv)
308 {
309 ssize_t remain, page_length;
310 uint32_t offset;
311 uint64_t first_data_page;
312 int shmem_page_index, shmem_page_offset;
313 int data_page_index, data_page_offset;
314 int ret = 0;
315 uint64_t data_ptr = args->data_ptr;
316 int do_bit17_swizzling;
317 int needs_clflush = 0;
318 uint32_t *user_data = (uint32_t *)(uintptr_t)args->data_ptr;
319
320 remain = args->size;
321
322 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
323
324 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
325 /* If we're not in the cpu read domain, set ourself into the gtt
326 * read domain and manually flush cachelines (if required). This
327 * optimizes for the case when the gpu will dirty the data
328 * anyway again before the next pread happens. */
329 if (obj->cache_level == I915_CACHE_NONE)
330 needs_clflush = 1;
331 if (obj->gtt_space) {
332 ret = i915_gem_object_set_to_gtt_domain(obj, false);
333 if (ret)
334 return ret;
335 }
336 }
337
338 ret = i915_gem_object_get_pages(obj);
339 if (ret)
340 return ret;
341
342 i915_gem_object_pin_pages(obj);
343
344 first_data_page = data_ptr / PAGE_SIZE;
345
346 offset = args->offset;
347
348 if (needs_clflush)
349 i915_gem_clflush_object(obj);
350
351 if (do_bit17_swizzling) {
352 while (remain > 0) {
353 /* Operation in this page
354 *
355 * shmem_page_index = page number within shmem file
356 * shmem_page_offset = offset within page in shmem file
357 * data_page_index = page number in get_user_pages return
358 * data_page_offset = offset with data_page_index page.
359 * page_length = bytes to copy for this page
360 */
361 shmem_page_index = offset / DRM_PAGE_SIZE;
362 shmem_page_offset = offset & ~DRM_PAGE_MASK;
363 data_page_index = data_ptr / DRM_PAGE_SIZE - first_data_page;
364 data_page_offset = data_ptr & ~DRM_PAGE_MASK;
365
366 page_length = remain;
367 if ((shmem_page_offset + page_length) > DRM_PAGE_SIZE)
368 page_length = PAGE_SIZE - shmem_page_offset;
369 if ((data_page_offset + page_length) > DRM_PAGE_SIZE)
370 page_length = PAGE_SIZE - data_page_offset;
371
372 slow_shmem_bit17_copy(obj->page_list[shmem_page_index],
373 shmem_page_offset,
374 user_data + data_page_index * DRM_PAGE_SIZE,
375 data_page_offset,
376 page_length,
377 1);
378
379 remain -= page_length;
380 data_ptr += page_length;
381 offset += page_length;
382 }
383 } else {
384 ret = DRM_COPY_TO_USER((caddr_t)user_data,
385 obj->base.kaddr + args->offset,
386 args->size);
387 if (ret)
388 DRM_ERROR("shmem_pread_copy failed, ret = %d", ret);
389 }
390 i915_gem_object_unpin_pages(obj);
391 return ret;
392 }
393
394 /**
395 * Reads data from the object referenced by handle.
396 *
397 * On error, the contents of *data are undefined.
398 */
399 int
400 /* LINTED */
i915_gem_pread_ioctl(DRM_IOCTL_ARGS)401 i915_gem_pread_ioctl(DRM_IOCTL_ARGS)
402 {
403 struct drm_i915_gem_pread *args = data;
404 struct drm_i915_gem_object *obj;
405 int ret = 0;
406
407 if (args->size == 0)
408 return 0;
409
410 ret = i915_mutex_lock_interruptible(dev);
411 if (ret)
412 return ret;
413
414 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
415 if (&obj->base == NULL) {
416 ret = -ENOENT;
417 goto unlock;
418 }
419
420 /* Bounds check source. */
421 if (args->offset > obj->base.size ||
422 args->size > obj->base.size - args->offset) {
423 ret = -EINVAL;
424 goto out;
425 }
426
427 /* prime objects have no backing filp to GEM pread/pwrite
428 * pages from.
429 */
430 ret = i915_gem_shmem_pread(dev, obj, args, file);
431
432 TRACE_GEM_OBJ_HISTORY(obj, "pread");
433
434 out:
435 drm_gem_object_unreference(&obj->base);
436 unlock:
437 mutex_unlock(&dev->struct_mutex);
438 return ret;
439 }
440
441 static int
i915_gem_gtt_pwrite_fast(struct drm_device * dev,struct drm_i915_gem_object * obj,struct drm_i915_gem_pwrite * args,struct drm_file * file_priv)442 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
443 struct drm_i915_gem_object *obj,
444 struct drm_i915_gem_pwrite *args,
445 /* LINTED */
446 struct drm_file *file_priv)
447 {
448 uint32_t *user_data;
449 int ret = 0;
450 ret = i915_gem_object_pin(obj, 0, true, true);
451 if (ret)
452 goto out;
453
454 ret = i915_gem_object_set_to_gtt_domain(obj, true);
455 if (ret)
456 goto out_unpin;
457
458 ret = i915_gem_object_put_fence(obj);
459 if (ret)
460 goto out_unpin;
461 user_data = (uint32_t *)(uintptr_t)args->data_ptr;
462
463 ret = DRM_COPY_FROM_USER(obj->base.kaddr + args->offset, user_data, args->size);
464 if (ret) {
465 DRM_ERROR("copy_from_user failed, ret = %d", ret);
466 return ret;
467 }
468
469 out_unpin:
470 i915_gem_object_unpin(obj);
471 out:
472 return ret;
473 }
474
475 int
i915_gem_shmem_pwrite(struct drm_device * dev,struct drm_i915_gem_object * obj,struct drm_i915_gem_pwrite * args,struct drm_file * file_priv)476 i915_gem_shmem_pwrite(struct drm_device *dev,
477 struct drm_i915_gem_object *obj,
478 struct drm_i915_gem_pwrite *args,
479 /* LINTED */
480 struct drm_file *file_priv)
481 {
482 ssize_t remain, page_length;
483 uint32_t offset;
484 uint64_t first_data_page;
485 int shmem_page_index, shmem_page_offset;
486 int data_page_index, data_page_offset;
487 int ret = 0;
488 uint64_t data_ptr = args->data_ptr;
489 int needs_clflush_after = 0;
490 int needs_clflush_before = 0;
491 int do_bit17_swizzling;
492 uint32_t *user_data = (uint32_t *)(uintptr_t)args->data_ptr;
493
494 remain = args->size;
495
496 /* Pin the user pages containing the data. We can't fault while
497 * holding the struct mutex, and all of the pwrite implementations
498 * want to hold it while dereferencing the user data.
499 */
500 first_data_page = data_ptr / PAGE_SIZE;
501
502 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
503
504 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
505 /* If we're not in the cpu write domain, set ourself into the gtt
506 * write domain and manually flush cachelines (if required). This
507 * optimizes for the case when the gpu will use the data
508 * right away and we therefore have to clflush anyway. */
509 if (obj->cache_level == I915_CACHE_NONE)
510 needs_clflush_after = 1;
511 if (obj->gtt_space) {
512 ret = i915_gem_object_set_to_gtt_domain(obj, true);
513 if (ret)
514 return ret;
515 }
516 }
517 /* Same trick applies for invalidate partially written cachelines before
518 * writing. */
519 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
520 && obj->cache_level == I915_CACHE_NONE)
521 needs_clflush_before = 1;
522
523 ret = i915_gem_object_get_pages(obj);
524 if (ret)
525 return ret;
526
527 i915_gem_object_pin_pages(obj);
528
529 if (needs_clflush_before)
530 i915_gem_clflush_object(obj);
531
532 offset = args->offset;
533 obj->dirty = 1;
534
535 if (do_bit17_swizzling) {
536 while (remain > 0) {
537 /* Operation in this page
538 *
539 * shmem_page_index = page number within shmem file
540 * shmem_page_offset = offset within page in shmem file
541 * data_page_index = page number in get_user_pages return
542 * data_page_offset = offset with data_page_index page.
543 * page_length = bytes to copy for this page
544 */
545 shmem_page_index = offset / DRM_PAGE_SIZE;
546 shmem_page_offset = offset & ~DRM_PAGE_MASK;
547 data_page_index = data_ptr / DRM_PAGE_SIZE - first_data_page;
548 data_page_offset = data_ptr & ~DRM_PAGE_MASK;
549
550 page_length = remain;
551 if ((shmem_page_offset + page_length) > DRM_PAGE_SIZE)
552 page_length = PAGE_SIZE - shmem_page_offset;
553 if ((data_page_offset + page_length) > DRM_PAGE_SIZE)
554 page_length = PAGE_SIZE - data_page_offset;
555
556 slow_shmem_bit17_copy(obj->page_list[shmem_page_index],
557 shmem_page_offset,
558 user_data + data_page_index * DRM_PAGE_SIZE,
559 data_page_offset,
560 page_length,
561 0);
562
563 remain -= page_length;
564 data_ptr += page_length;
565 offset += page_length;
566 }
567 } else {
568 ret = DRM_COPY_FROM_USER(obj->base.kaddr + args->offset,
569 (caddr_t)user_data,
570 args->size);
571 if (ret)
572 DRM_ERROR("shmem_pwrite_copy failed, ret = %d", ret);
573 }
574
575 i915_gem_object_unpin_pages(obj);
576
577 if (needs_clflush_after)
578 i915_gem_chipset_flush(dev);
579 return ret;
580 }
581
582 /**
583 * Writes data to the object referenced by handle.
584 *
585 * On error, the contents of the buffer that were to be modified are undefined.
586 */
587 int
588 /* LINTED */
i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS)589 i915_gem_pwrite_ioctl(DRM_IOCTL_ARGS)
590 {
591 struct drm_i915_gem_pwrite *args = data;
592 struct drm_i915_gem_object *obj;
593 int ret;
594
595 if (args->size == 0)
596 return 0;
597
598 ret = i915_mutex_lock_interruptible(dev);
599 if (ret)
600 return ret;
601
602 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
603 if (&obj->base == NULL) {
604 ret = -ENOENT;
605 goto unlock;
606 }
607
608 /* Bounds check destination. */
609 if (args->offset > obj->base.size ||
610 args->size > obj->base.size - args->offset) {
611 ret = -EINVAL;
612 goto out;
613 }
614
615 /* prime objects have no backing filp to GEM pread/pwrite
616 * pages from.
617 */
618 TRACE_GEM_OBJ_HISTORY(obj, "pwrite");
619 ret = -EFAULT;
620 /* We can only do the GTT pwrite on untiled buffers, as otherwise
621 * it would end up going through the fenced access, and we'll get
622 * different detiling behavior between reading and writing.
623 * pread/pwrite currently are reading and writing from the CPU
624 * perspective, requiring manual detiling by the client.
625 */
626 if (obj->phys_obj) {
627 ret = i915_gem_phys_pwrite(dev, obj, args, file);
628 goto out;
629 }
630
631 if (obj->cache_level == I915_CACHE_NONE &&
632 obj->tiling_mode == I915_TILING_NONE &&
633 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
634 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
635 /* Note that the gtt paths might fail with non-page-backed user
636 * pointers (e.g. gtt mappings when moving data between
637 * textures). Fallback to the shmem path in that case. */
638
639 /* Flushing cursor object */
640 if (obj->is_cursor)
641 i915_gem_clflush_object(obj);
642 }
643
644 if (ret == -EFAULT || ret == -ENOSPC)
645 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
646
647 out:
648 drm_gem_object_unreference(&obj->base);
649 unlock:
650 mutex_unlock(&dev->struct_mutex);
651 return ret;
652 }
653
654 int
i915_gem_check_wedge(struct i915_gpu_error * error,bool interruptible)655 i915_gem_check_wedge(struct i915_gpu_error *error,
656 bool interruptible)
657 {
658 if (i915_reset_in_progress(error)) {
659 /* Non-interruptible callers can't handle -EAGAIN, hence return
660 * -EIO unconditionally for these. */
661 if (!interruptible)
662 return -EIO;
663
664 /* Recovery complete, but the reset failed ... */
665 if (i915_terminally_wedged(error))
666 return -EIO;
667
668 return -EAGAIN;
669 }
670
671 return 0;
672 }
673
674 /*
675 * Compare seqno against outstanding lazy request. Emit a request if they are
676 * equal.
677 */
678 static int
i915_gem_check_olr(struct intel_ring_buffer * ring,u32 seqno)679 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
680 {
681 int ret;
682
683 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
684
685 ret = 0;
686 if (seqno == ring->outstanding_lazy_request)
687 ret = i915_add_request(ring, NULL);
688
689 return ret;
690 }
691
692 /**
693 * __wait_seqno - wait until execution of seqno has finished
694 * @ring: the ring expected to report seqno
695 * @seqno: duh!
696 * @reset_counter: reset sequence associated with the given seqno
697 * @interruptible: do an interruptible wait (normally yes)
698 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
699 *
700 * Note: It is of utmost importance that the passed in seqno and reset_counter
701 * values have been read by the caller in an smp safe manner. Where read-side
702 * locks are involved, it is sufficient to read the reset_counter before
703 * unlocking the lock that protects the seqno. For lockless tricks, the
704 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
705 * inserted.
706 *
707 * Returns 0 if the seqno was found within the alloted time. Else returns the
708 * errno with remaining time filled in timeout argument.
709 */
__wait_seqno(struct intel_ring_buffer * ring,u32 seqno,unsigned reset_counter,bool interruptible,clock_t timeout)710 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
711 unsigned reset_counter,
712 bool interruptible, clock_t timeout)
713 {
714 drm_i915_private_t *dev_priv = ring->dev->dev_private;
715 clock_t wait_time = timeout;
716 bool wait_forever = false;
717 int ret = 0, end = 0;
718
719 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
720 return 0;
721
722 if (wait_time == 0) {
723 wait_time = 3 * DRM_HZ;
724 }
725
726 if (!ring->irq_get(ring))
727 return -ENODEV;
728
729 #define EXIT_COND \
730 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
731 i915_reset_in_progress(&dev_priv->gpu_error) || \
732 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
733 do {
734 /* busy check is faster than cv wait on gen6+ */
735 if (IS_GEN6(ring->dev)) {
736 if (wait_for(EXIT_COND, jiffies_to_msecs(wait_time)))
737 ret = -EBUSY;
738 } else if (IS_GEN7(ring->dev) && !IS_HASWELL(ring->dev)) {
739 /*
740 * Frequently read CS register may cause my GEN7 platform hang,
741 * but it's crucial for missed IRQ issue.
742 * So the first wait busy check the seqno,
743 * the second wait force correct ordering
744 * between irq and seqno writes then check again.
745 */
746 u32 *regs = ring->status_page.page_addr;
747 if (wait_for(i915_seqno_passed(regs[I915_GEM_HWS_INDEX],
748 seqno) || i915_reset_in_progress(&dev_priv->gpu_error) ||
749 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter),
750 2500)) {
751 if (wait_for(i915_seqno_passed(ring->get_seqno(ring, false),
752 seqno) || i915_reset_in_progress(&dev_priv->gpu_error) ||
753 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter),
754 500)) {
755 ret = -EBUSY;
756 }
757 }
758 } else {
759 DRM_WAIT(ret, &ring->irq_queue, EXIT_COND);
760 }
761
762 /* We need to check whether any gpu reset happened in between
763 * the caller grabbing the seqno and now ... */
764 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
765 ret = -EAGAIN;
766
767 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
768 * gone. */
769 end = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
770 if (end)
771 ret = end;
772 } while (end == 0 && wait_forever);
773
774
775 ring->irq_put(ring);
776 #undef EXIT_COND
777 if (ret) {
778 if ((gpu_dump > 0) && !IS_GEN7(ring->dev)) {
779 ring_dump(ring->dev, ring);
780 register_dump(ring->dev);
781 gtt_dump(ring->dev);
782 }
783 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
784 __func__, ret, seqno, ring->get_seqno(ring, true),
785 dev_priv->next_seqno);
786 }
787
788 return (ret);
789 }
790
791 /**
792 * Waits for a sequence number to be signaled, and cleans up the
793 * request and object lists appropriately for that event.
794 */
795 int
i915_wait_seqno(struct intel_ring_buffer * ring,uint32_t seqno)796 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
797 {
798 struct drm_device *dev = ring->dev;
799 struct drm_i915_private *dev_priv = dev->dev_private;
800 bool interruptible = dev_priv->mm.interruptible;
801 int ret;
802
803 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
804 BUG_ON(seqno == 0);
805
806 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
807 if (ret)
808 return ret;
809
810 ret = i915_gem_check_olr(ring, seqno);
811 if (ret)
812 return ret;
813
814 return __wait_seqno(ring, seqno,
815 atomic_read(&dev_priv->gpu_error.reset_counter),
816 interruptible, 0);
817 }
818
819 static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object * obj,struct intel_ring_buffer * ring)820 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
821 struct intel_ring_buffer *ring)
822 {
823 i915_gem_retire_requests_ring(ring);
824
825 /* Manually manage the write flush as we may have not yet
826 * retired the buffer.
827 *
828 * Note that the last_write_seqno is always the earlier of
829 * the two (read/write) seqno, so if we haved successfully waited,
830 * we know we have passed the last write.
831 */
832 obj->last_write_seqno = 0;
833 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
834
835 return 0;
836 }
837
838 /**
839 * Ensures that all rendering to the object has completed and the object is
840 * safe to unbind from the GTT or access from the CPU.
841 */
842 static int
i915_gem_object_wait_rendering(struct drm_i915_gem_object * obj,bool readonly)843 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
844 bool readonly)
845 {
846 struct intel_ring_buffer *ring = obj->ring;
847 u32 seqno;
848 int ret;
849
850 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
851 if (seqno == 0)
852 return 0;
853
854 ret = i915_wait_seqno(ring, seqno);
855 if (ret)
856 return ret;
857
858 return i915_gem_object_wait_rendering__tail(obj, ring);
859 }
860
861 /* A nonblocking variant of the above wait. This is a highly dangerous routine
862 * as the object state may change during this call.
863 */
864 static int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object * obj,bool readonly)865 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
866 bool readonly)
867 {
868 struct drm_device *dev = obj->base.dev;
869 struct drm_i915_private *dev_priv = dev->dev_private;
870 struct intel_ring_buffer *ring = obj->ring;
871 unsigned reset_counter;
872 u32 seqno;
873 int ret;
874
875 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
876 BUG_ON(!dev_priv->mm.interruptible);
877
878 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
879 if (seqno == 0)
880 return 0;
881
882 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
883 if (ret)
884 return ret;
885
886 ret = i915_gem_check_olr(ring, seqno);
887 if (ret)
888 return ret;
889
890 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
891 mutex_unlock(&dev->struct_mutex);
892 ret = __wait_seqno(ring, seqno, reset_counter, true, 0);
893 mutex_lock(&dev->struct_mutex);
894 if (ret)
895 return ret;
896
897 return i915_gem_object_wait_rendering__tail(obj, ring);
898 }
899
900 /**
901 * Called when user space prepares to use an object with the CPU, either
902 * through the mmap ioctl's mapping or a GTT mapping.
903 */
904 int
905 /* LINTED */
i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS)906 i915_gem_set_domain_ioctl(DRM_IOCTL_ARGS)
907 {
908 struct drm_i915_gem_set_domain *args = data;
909 struct drm_i915_gem_object *obj;
910 uint32_t read_domains = args->read_domains;
911 uint32_t write_domain = args->write_domain;
912 int ret;
913
914 /* Only handle setting domains to types used by the CPU. */
915 if (write_domain & I915_GEM_GPU_DOMAINS)
916 return -EINVAL;
917
918 if (read_domains & I915_GEM_GPU_DOMAINS)
919 return -EINVAL;
920
921 /* Having something in the write domain implies it's in the read
922 * domain, and only that read domain. Enforce that in the request.
923 */
924 if (write_domain != 0 && read_domains != write_domain)
925 return -EINVAL;
926
927 ret = i915_mutex_lock_interruptible(dev);
928 if (ret)
929 return ret;
930
931 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
932 if (&obj->base == NULL) {
933 ret = -ENOENT;
934 goto unlock;
935 }
936
937 /* Try to flush the object off the GPU without holding the lock.
938 * We will repeat the flush holding the lock in the normal manner
939 * to catch cases where we are gazumped.
940 */
941 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
942 if (ret)
943 goto unref;
944
945 if (read_domains & I915_GEM_DOMAIN_GTT) {
946 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
947
948 /* Silently promote "you're not bound, there was nothing to do"
949 * to success, since the client was just asking us to
950 * make sure everything was done.
951 */
952 if (ret == -EINVAL)
953 ret = 0;
954 } else {
955 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
956 }
957
958 unref:
959 drm_gem_object_unreference(&obj->base);
960 unlock:
961 mutex_unlock(&dev->struct_mutex);
962 return ret;
963 }
964
965 /**
966 * Called when user space has done writes to this buffer
967 */
968 int
969 /* LINTED */
i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS)970 i915_gem_sw_finish_ioctl(DRM_IOCTL_ARGS)
971 {
972 struct drm_i915_gem_sw_finish *args = data;
973 struct drm_i915_gem_object *obj;
974 int ret = 0;
975
976 ret = i915_mutex_lock_interruptible(dev);
977 if (ret)
978 return ret;
979
980 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
981 if (&obj->base == NULL) {
982 ret = -ENOENT;
983 goto unlock;
984 }
985
986 /* Pinned buffers may be scanout, so flush the cache */
987 if (obj->pin_count)
988 i915_gem_object_flush_cpu_write_domain(obj);
989
990 drm_gem_object_unreference(&obj->base);
991 unlock:
992 mutex_unlock(&dev->struct_mutex);
993 return ret;
994 }
995
996 /**
997 * Maps the contents of an object, returning the address it is mapped
998 * into.
999 *
1000 * While the mapping holds a reference on the contents of the object, it doesn't
1001 * imply a ref on the object itself.
1002 */
1003 int
1004 /* LINTED */
i915_gem_mmap_ioctl(DRM_IOCTL_ARGS)1005 i915_gem_mmap_ioctl(DRM_IOCTL_ARGS)
1006 {
1007 struct drm_i915_private *dev_priv = dev->dev_private;
1008 struct drm_i915_gem_mmap *args = data;
1009 struct drm_gem_object *obj;
1010 caddr_t vvaddr = NULL;
1011 int ret;
1012
1013 if (!(dev->driver->driver_features & DRIVER_GEM))
1014 return -ENODEV;
1015
1016 obj = drm_gem_object_lookup(dev, file, args->handle);
1017 if (obj == NULL)
1018 return -EBADF;
1019
1020 /* prime objects have no backing filp to GEM mmap
1021 * pages from.
1022 */
1023
1024 if (obj->size > dev_priv->gtt.mappable_end) {
1025 drm_gem_object_unreference_unlocked(obj);
1026 return -E2BIG;
1027 }
1028
1029 ret = ddi_devmap_segmap(dev_id, (off_t)obj->maplist.user_token,
1030 ttoproc(curthread)->p_as, &vvaddr, obj->maplist.map->size,
1031 PROT_ALL, PROT_ALL, MAP_SHARED, credp);
1032 if (ret)
1033 return ret;
1034
1035 mutex_lock(&dev->struct_mutex);
1036 drm_gem_object_unreference(obj);
1037 mutex_unlock(&dev->struct_mutex);
1038
1039 args->addr_ptr = (uint64_t)(uintptr_t)vvaddr;
1040
1041 return 0;
1042 }
1043
1044 void
i915_gem_fault(struct drm_gem_object * obj)1045 i915_gem_fault(struct drm_gem_object *obj)
1046 {
1047 struct drm_device *dev = obj->dev;
1048 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1049 uint64_t start;
1050 int ret = 0;
1051
1052 if (obj->maplist.map->gtt_mmap)
1053 return;
1054
1055 /* Now bind it into the GTT if needed */
1056 mutex_lock(&dev->struct_mutex);
1057
1058 TRACE_GEM_OBJ_HISTORY(obj_priv, "gfault");
1059
1060 /* Access to snoopable pages through the GTT is incoherent. */
1061 if (obj_priv->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1062 ret = -EINVAL;
1063 goto unlock;
1064 }
1065
1066 ret = i915_gem_object_pin(obj_priv, 0, true, false);
1067 if (ret)
1068 goto unlock;
1069
1070 ret = i915_gem_object_set_to_gtt_domain(obj_priv, 1);
1071 if (ret)
1072 goto unpin;
1073
1074 ret = i915_gem_object_get_fence(obj_priv);
1075 if (ret)
1076 goto unpin;
1077
1078 obj_priv->fault_mappable = true;
1079
1080 start = (dev->agp_aperbase + obj_priv->gtt_offset);
1081
1082 /* Finally, remap it using the new GTT offset */
1083 drm_gem_mmap(obj, start);
1084
1085 obj->maplist.map->gtt_mmap = 1;
1086
1087 unpin:
1088 i915_gem_object_unpin(obj_priv);
1089 unlock:
1090 mutex_unlock(&dev->struct_mutex);
1091 }
1092
1093 /**
1094 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1095 * @obj: obj in question
1096 *
1097 * GEM memory mapping works by handing back to userspace a fake mmap offset
1098 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1099 * up the object based on the offset and sets up the various memory mapping
1100 * structures.
1101 *
1102 * This routine allocates and attaches a fake offset for @obj.
1103 */
1104 static int
i915_gem_create_mmap_offset(struct drm_i915_gem_object * obj)1105 i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1106 {
1107 struct ddi_umem_cookie *umem_cookie = obj->base.maplist.map->umem_cookie;
1108 int ret;
1109
1110 if (obj->base.gtt_map_kaddr == NULL) {
1111 ret = drm_gem_create_mmap_offset(&obj->base);
1112 if (ret) {
1113 DRM_ERROR("failed to alloc kernel memory");
1114 return ret;
1115 }
1116 }
1117
1118 umem_cookie->cvaddr = obj->base.gtt_map_kaddr;
1119
1120 /* user_token is the fake offset
1121 * which create in drm_map_handle at alloc time
1122 */
1123 obj->mmap_offset = obj->base.maplist.user_token;
1124 obj->base.maplist.map->callback = 1;
1125
1126 return 0;
1127 }
1128
1129 /**
1130 * i915_gem_release_mmap - remove physical page mappings
1131 * @obj: obj in question
1132 *
1133 * Preserve the reservation of the mmaping with the DRM core code, but
1134 * relinquish ownership of the pages back to the system.
1135 *
1136 * It is vital that we remove the page mapping if we have mapped a tiled
1137 * object through the GTT and then lose the fence register due to
1138 * resource pressure. Similarly if the object has been moved out of the
1139 * aperture, than pages mapped into userspace must be revoked. Removing the
1140 * mapping will then trigger a page fault on the next user access, allowing
1141 * fixup by i915_gem_fault().
1142 */
1143 void
i915_gem_release_mmap(struct drm_i915_gem_object * obj)1144 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1145 {
1146 struct drm_device *dev = obj->base.dev;
1147 struct gem_map_list *entry, *temp;
1148
1149 if (obj->base.maplist.map->gtt_mmap) {
1150 mutex_lock(&dev->page_fault_lock);
1151 if (!list_empty(&obj->base.seg_list)) {
1152 list_for_each_entry_safe(entry, temp, struct gem_map_list, &obj->base.seg_list, head) {
1153 devmap_unload(entry->dhp, entry->mapoffset, entry->maplen);
1154 list_del(&entry->head);
1155 drm_free(entry, sizeof (struct gem_map_list), DRM_MEM_MAPS);
1156 }
1157 }
1158 mutex_unlock(&dev->page_fault_lock);
1159 drm_gem_release_mmap(&obj->base);
1160 obj->base.maplist.map->gtt_mmap = 0;
1161 }
1162 }
1163
1164 static void
i915_gem_free_mmap_offset(struct drm_i915_gem_object * obj)1165 i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1166 {
1167 drm_gem_free_mmap_offset(&obj->base);
1168 obj->mmap_offset = 0;
1169 }
1170
1171 uint32_t
i915_gem_get_gtt_size(struct drm_device * dev,uint32_t size,int tiling_mode)1172 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1173 {
1174 uint32_t gtt_size;
1175
1176 if (INTEL_INFO(dev)->gen >= 4 ||
1177 tiling_mode == I915_TILING_NONE)
1178 return size;
1179
1180 /* Previous chips need a power-of-two fence region when tiling */
1181 if (INTEL_INFO(dev)->gen == 3)
1182 gtt_size = 1024*1024;
1183 else
1184 gtt_size = 512*1024;
1185
1186 while (gtt_size < size)
1187 gtt_size <<= 1;
1188
1189 return gtt_size;
1190 }
1191
1192 /**
1193 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1194 * @obj: object to check
1195 *
1196 * Return the required GTT alignment for an object, taking into account
1197 * potential fence register mapping if needed.
1198 */
1199 uint32_t
i915_gem_get_gtt_alignment(struct drm_device * dev,uint32_t size,int tiling_mode,bool fenced)1200 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1201 int tiling_mode, bool fenced)
1202 {
1203 /*
1204 * Minimum alignment is 4k (GTT page size), but might be greater
1205 * if a fence register is needed for the object.
1206 */
1207 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1208 tiling_mode == I915_TILING_NONE)
1209 return 4096;
1210
1211 /*
1212 * Previous chips need to be aligned to the size of the smallest
1213 * fence register that can contain the object.
1214 */
1215 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1216 }
1217
1218
1219 int
i915_gem_mmap_gtt(struct drm_file * file,struct drm_device * dev,uint32_t handle,uint64_t * offset)1220 i915_gem_mmap_gtt(struct drm_file *file,
1221 struct drm_device *dev,
1222 uint32_t handle,
1223 uint64_t *offset)
1224 {
1225 struct drm_i915_private *dev_priv = dev->dev_private;
1226 struct drm_i915_gem_object *obj;
1227 int ret;
1228
1229 ret = i915_mutex_lock_interruptible(dev);
1230 if (ret)
1231 return ret;
1232
1233 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1234 if (&obj->base == NULL) {
1235 ret = -ENOENT;
1236 goto unlock;
1237 }
1238
1239 if (obj->base.size > dev_priv->gtt.mappable_end) {
1240 ret = -E2BIG;
1241 goto out;
1242 }
1243
1244 if (!obj->mmap_offset) {
1245 ret = i915_gem_create_mmap_offset(obj);
1246 if (ret)
1247 goto out;
1248 }
1249
1250 *offset = obj->mmap_offset;
1251
1252 out:
1253 drm_gem_object_unreference(&obj->base);
1254 unlock:
1255 mutex_unlock(&dev->struct_mutex);
1256 return ret;
1257 }
1258
1259 /**
1260 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1261 * @dev: DRM device
1262 * @data: GTT mapping ioctl data
1263 * @file_priv: GEM object info
1264 *
1265 * Simply returns the fake offset to userspace so it can mmap it.
1266 * The mmap call will end up in drm_gem_mmap(), which will set things
1267 * up so we can get faults in the handler above.
1268 *
1269 * The fault handler will take care of binding the object into the GTT
1270 * (since it may have been evicted to make room for something), allocating
1271 * a fence register, and mapping the appropriate aperture address into
1272 * userspace.
1273 */
1274 int
1275 /* LINTED */
i915_gem_mmap_gtt_ioctl(DRM_IOCTL_ARGS)1276 i915_gem_mmap_gtt_ioctl(DRM_IOCTL_ARGS)
1277 {
1278 struct drm_i915_gem_mmap_gtt *args = data;
1279
1280 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1281 }
1282
1283 static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object * obj)1284 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1285 {
1286 int ret;
1287 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1288 if (ret) {
1289 /* In the event of a disaster, abandon all caches and
1290 * hope for the best.
1291 */
1292 WARN_ON(ret != -EIO);
1293 i915_gem_clflush_object(obj);
1294 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1295 }
1296 if (i915_gem_object_needs_bit17_swizzle(obj))
1297 i915_gem_object_save_bit_17_swizzle(obj);
1298
1299 obj->dirty = 0;
1300
1301 kmem_free(obj->page_list,
1302 btop(obj->base.size) * sizeof(caddr_t));
1303 obj->page_list = NULL;
1304 }
1305
1306 static int
i915_gem_object_put_pages(struct drm_i915_gem_object * obj)1307 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1308 {
1309 const struct drm_i915_gem_object_ops *ops = obj->ops;
1310
1311 if (obj->page_list == NULL)
1312 return 0;
1313
1314 BUG_ON(obj->gtt_space);
1315
1316 if (obj->pages_pin_count)
1317 return -EBUSY;
1318
1319 ops->put_pages(obj);
1320 obj->page_list = NULL;
1321
1322 list_del(&obj->global_list);
1323 return 0;
1324 }
1325
1326 static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object * obj)1327 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1328 {
1329 pgcnt_t np = btop(obj->base.size);
1330 caddr_t va;
1331 long i;
1332
1333 obj->page_list = kmem_zalloc(np * sizeof(caddr_t), KM_SLEEP);
1334 if (obj->page_list == NULL) {
1335 DRM_ERROR("Faled to allocate page list. size = %ld", np * sizeof(caddr_t));
1336 return -ENOMEM;
1337 }
1338
1339 for (i = 0, va = obj->base.kaddr; i < np; i++, va += PAGESIZE) {
1340 obj->page_list[i] = va;
1341 }
1342
1343 if (i915_gem_object_needs_bit17_swizzle(obj))
1344 i915_gem_object_do_bit_17_swizzle(obj);
1345 return 0;
1346 }
1347
1348
1349 /* Ensure that the associated pages are gathered from the backing storage
1350 * and pinned into our object. i915_gem_object_get_pages() may be called
1351 * multiple times before they are released by a single call to
1352 * i915_gem_object_put_pages() - once the pages are no longer referenced
1353 * either as a result of memory pressure (reaping pages under the shrinker)
1354 * or as the object is itself released.
1355 */
1356 int
i915_gem_object_get_pages(struct drm_i915_gem_object * obj)1357 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1358 {
1359 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1360 const struct drm_i915_gem_object_ops *ops = obj->ops;
1361 int ret;
1362
1363 if (obj->page_list)
1364 return 0;
1365
1366 BUG_ON(obj->pages_pin_count);
1367
1368 ret = ops->get_pages(obj);
1369 if (ret)
1370 return ret;
1371
1372 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list, (caddr_t)obj);
1373 return 0;
1374 }
1375
1376 void
i915_gem_object_move_to_active(struct drm_i915_gem_object * obj,struct intel_ring_buffer * ring)1377 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1378 struct intel_ring_buffer *ring)
1379 {
1380 struct drm_device *dev = obj->base.dev;
1381 struct drm_i915_private *dev_priv = dev->dev_private;
1382 u32 seqno = intel_ring_get_seqno(ring);
1383
1384 BUG_ON(ring == NULL);
1385 if (obj->ring != ring && obj->last_write_seqno) {
1386 /* Keep the seqno relative to the current ring */
1387 obj->last_write_seqno = seqno;
1388 }
1389 obj->ring = ring;
1390
1391 /* Add a reference if we're newly entering the active list. */
1392 if (!obj->active) {
1393 drm_gem_object_reference(&obj->base);
1394 obj->active = 1;
1395 }
1396
1397 /* Move from whatever list we were on to the tail of execution. */
1398 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list, (caddr_t)obj);
1399 list_move_tail(&obj->ring_list, &ring->active_list, (caddr_t)obj);
1400 obj->last_read_seqno = seqno;
1401
1402 if (obj->fenced_gpu_access) {
1403 obj->last_fenced_seqno = seqno;
1404
1405 /* Bump MRU to take account of the delayed flush */
1406 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1407 struct drm_i915_fence_reg *reg;
1408
1409 reg = &dev_priv->fence_regs[obj->fence_reg];
1410 list_move_tail(®->lru_list, &dev_priv->mm.fence_list, (caddr_t)reg);
1411 }
1412 }
1413 TRACE_GEM_OBJ_HISTORY(obj, "to active");
1414 }
1415
1416 static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object * obj)1417 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1418 {
1419 struct drm_device *dev = obj->base.dev;
1420 struct drm_i915_private *dev_priv = dev->dev_private;
1421
1422 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1423 BUG_ON(!obj->active);
1424
1425 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
1426
1427 list_del_init(&obj->ring_list);
1428 obj->ring = NULL;
1429
1430 obj->last_read_seqno = 0;
1431 obj->last_write_seqno = 0;
1432 obj->base.write_domain = 0;
1433
1434 obj->last_fenced_seqno = 0;
1435 obj->fenced_gpu_access = false;
1436
1437 obj->active = 0;
1438 TRACE_GEM_OBJ_HISTORY(obj, "to inactive");
1439 drm_gem_object_unreference(&obj->base);
1440
1441 WARN_ON(i915_verify_lists(dev));
1442 }
1443
1444 static int
i915_gem_init_seqno(struct drm_device * dev,u32 seqno)1445 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1446 {
1447 struct drm_i915_private *dev_priv = dev->dev_private;
1448 struct intel_ring_buffer *ring;
1449 int ret, i, j;
1450
1451 /* Carefully retire all requests without writing to the rings */
1452 for_each_ring(ring, dev_priv, i) {
1453 ret = intel_ring_idle(ring);
1454 if (ret)
1455 return ret;
1456 }
1457 i915_gem_retire_requests(dev);
1458
1459 /* Finally reset hw state */
1460 for_each_ring(ring, dev_priv, i) {
1461 intel_ring_init_seqno(ring, seqno);
1462
1463 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1464 ring->sync_seqno[j] = 0;
1465 }
1466
1467 return 0;
1468 }
1469
i915_gem_set_seqno(struct drm_device * dev,u32 seqno)1470 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1471 {
1472 struct drm_i915_private *dev_priv = dev->dev_private;
1473 int ret;
1474
1475 if (seqno == 0)
1476 return -EINVAL;
1477
1478 /* HWS page needs to be set less than what we
1479 * will inject to ring
1480 */
1481 ret = i915_gem_init_seqno(dev, seqno - 1);
1482 if (ret)
1483 return ret;
1484
1485 /* Carefully set the last_seqno value so that wrap
1486 * detection still works
1487 */
1488 dev_priv->next_seqno = seqno;
1489 dev_priv->last_seqno = seqno - 1;
1490 if (dev_priv->last_seqno == 0)
1491 dev_priv->last_seqno--;
1492
1493 return 0;
1494 }
1495
1496 int
i915_gem_get_seqno(struct drm_device * dev,u32 * seqno)1497 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1498 {
1499 struct drm_i915_private *dev_priv = dev->dev_private;
1500
1501 /* reserve 0 for non-seqno */
1502 if (dev_priv->next_seqno == 0) {
1503 int ret = i915_gem_init_seqno(dev, 0);
1504 if (ret)
1505 return ret;
1506
1507 dev_priv->next_seqno = 1;
1508 }
1509
1510 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
1511 return 0;
1512 }
1513
__i915_add_request(struct intel_ring_buffer * ring,struct drm_file * file,struct drm_i915_gem_object * obj,u32 * out_seqno)1514 int __i915_add_request(struct intel_ring_buffer *ring,
1515 struct drm_file *file,
1516 struct drm_i915_gem_object *obj,
1517 u32 *out_seqno)
1518 {
1519 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1520 struct drm_i915_gem_request *request;
1521 u32 request_ring_position, request_start;
1522 int was_empty;
1523 int ret;
1524
1525 request_start = intel_ring_get_tail(ring);
1526 /*
1527 * Emit any outstanding flushes - execbuf can fail to emit the flush
1528 * after having emitted the batchbuffer command. Hence we need to fix
1529 * things up similar to emitting the lazy request. The difference here
1530 * is that the flush _must_ happen before the next request, no matter
1531 * what.
1532 */
1533 ret = intel_ring_flush_all_caches(ring);
1534 if (ret)
1535 return ret;
1536
1537 request = kmalloc(sizeof(*request), GFP_KERNEL);
1538 if (request == NULL)
1539 return -ENOMEM;
1540
1541
1542 /* Record the position of the start of the request so that
1543 * should we detect the updated seqno part-way through the
1544 * GPU processing the request, we never over-estimate the
1545 * position of the head.
1546 */
1547 request_ring_position = intel_ring_get_tail(ring);
1548
1549 ret = ring->add_request(ring);
1550 if (ret) {
1551 kfree(request, sizeof(*request));
1552 return ret;
1553 }
1554
1555 request->seqno = intel_ring_get_seqno(ring);
1556 request->ring = ring;
1557 request->head = request_start;
1558 request->tail = request_ring_position;
1559 request->ctx = ring->last_context;
1560 request->batch_obj = obj;
1561
1562 /* Whilst this request exists, batch_obj will be on the
1563 * active_list, and so will hold the active reference. Only when this
1564 * request is retired will the the batch_obj be moved onto the
1565 * inactive_list and lose its active reference. Hence we do not need
1566 * to explicitly hold another reference here.
1567 */
1568
1569 if (request->ctx)
1570 i915_gem_context_reference(request->ctx);
1571
1572 request->emitted_jiffies = jiffies;
1573 was_empty = list_empty(&ring->request_list);
1574 list_add_tail(&request->list, &ring->request_list, (caddr_t)request);
1575 request->file_priv = NULL;
1576
1577 if (file) {
1578 struct drm_i915_file_private *file_priv = file->driver_priv;
1579 if (file_priv->status == 1) {
1580 spin_lock(&file_priv->mm.lock);
1581 request->file_priv = file_priv;
1582 list_add_tail(&request->client_list,
1583 &file_priv->mm.request_list, (caddr_t)request);
1584 spin_unlock(&file_priv->mm.lock);
1585 }
1586 }
1587
1588 ring->outstanding_lazy_request = 0;
1589
1590 if (!dev_priv->mm.suspended && !dev_priv->gpu_hang) {
1591 if (i915_enable_hangcheck) {
1592 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
1593 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1594 }
1595 if (was_empty) {
1596 /* change to delay HZ and then run work (not insert to workqueue of Linux) */
1597 test_set_timer(&dev_priv->mm.retire_timer, DRM_HZ);
1598 DRM_DEBUG("i915_gem: schedule_delayed_work");
1599 intel_mark_busy(dev_priv->dev);
1600 }
1601 }
1602
1603 if (out_seqno)
1604 *out_seqno = request->seqno;
1605
1606 return 0;
1607 }
1608
1609 static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request * request)1610 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1611 {
1612 struct drm_i915_file_private *file_priv = request->file_priv;
1613
1614 if (!file_priv)
1615 return;
1616
1617 spin_lock(&file_priv->mm.lock);
1618 if (request->file_priv) {
1619 list_del(&request->client_list);
1620 request->file_priv = NULL;
1621 }
1622 spin_unlock(&file_priv->mm.lock);
1623 }
1624
i915_head_inside_object(u32 acthd,struct drm_i915_gem_object * obj)1625 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
1626 {
1627 if (acthd >= obj->gtt_offset &&
1628 acthd < obj->gtt_offset + obj->base.size)
1629 return true;
1630
1631 return false;
1632 }
1633
i915_head_inside_request(const u32 acthd_unmasked,const u32 request_start,const u32 request_end)1634 static bool i915_head_inside_request(const u32 acthd_unmasked,
1635 const u32 request_start,
1636 const u32 request_end)
1637 {
1638 const u32 acthd = acthd_unmasked & HEAD_ADDR;
1639
1640 if (request_start < request_end) {
1641 if (acthd >= request_start && acthd < request_end)
1642 return true;
1643 } else if (request_start > request_end) {
1644 if (acthd >= request_start || acthd < request_end)
1645 return true;
1646 }
1647
1648 return false;
1649 }
1650
i915_request_guilty(struct drm_i915_gem_request * request,const u32 acthd,bool * inside)1651 static bool i915_request_guilty(struct drm_i915_gem_request *request,
1652 const u32 acthd, bool *inside)
1653 {
1654 /* There is a possibility that unmasked head address
1655 * pointing inside the ring, matches the batch_obj address range.
1656 * However this is extremely unlikely.
1657 */
1658
1659 if (request->batch_obj) {
1660 if (i915_head_inside_object(acthd, request->batch_obj)) {
1661 *inside = true;
1662 return true;
1663 }
1664 }
1665
1666 if (i915_head_inside_request(acthd, request->head, request->tail)) {
1667 *inside = false;
1668 return true;
1669 }
1670
1671 return false;
1672 }
1673
i915_set_reset_status(struct intel_ring_buffer * ring,struct drm_i915_gem_request * request,u32 acthd)1674 static void i915_set_reset_status(struct intel_ring_buffer *ring,
1675 struct drm_i915_gem_request *request,
1676 u32 acthd)
1677 {
1678 struct i915_ctx_hang_stats *hs = NULL;
1679 bool inside, guilty;
1680
1681 /* Innocent until proven guilty */
1682 guilty = false;
1683
1684 if (ring->hangcheck.action != wait &&
1685 i915_request_guilty(request, acthd, &inside)) {
1686 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
1687 ring->name,
1688 inside ? "inside" : "flushing",
1689 request->batch_obj ?
1690 request->batch_obj->gtt_offset : 0,
1691 request->ctx ? request->ctx->id : 0,
1692 acthd);
1693
1694 guilty = true;
1695 }
1696
1697 /* If contexts are disabled or this is the default context, use
1698 * file_priv->reset_state
1699 */
1700 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
1701 hs = &request->ctx->hang_stats;
1702 else if (request->file_priv)
1703 hs = &request->file_priv->hang_stats;
1704
1705 if (hs) {
1706 if (guilty)
1707 hs->batch_active++;
1708 else
1709 hs->batch_pending++;
1710 }
1711 }
1712
i915_gem_free_request(struct drm_i915_gem_request * request)1713 static void i915_gem_free_request(struct drm_i915_gem_request *request)
1714 {
1715 list_del(&request->list);
1716 i915_gem_request_remove_from_client(request);
1717
1718 if (request->ctx)
1719 i915_gem_context_unreference(request->ctx);
1720
1721 kfree(request, sizeof(*request));
1722 }
1723
i915_gem_reset_ring_lists(struct drm_i915_private * dev_priv,struct intel_ring_buffer * ring)1724 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1725 struct intel_ring_buffer *ring)
1726 {
1727 u32 completed_seqno;
1728 u32 acthd;
1729
1730 acthd = intel_ring_get_active_head(ring);
1731 completed_seqno = ring->get_seqno(ring, false);
1732
1733 while (!list_empty(&ring->request_list)) {
1734 struct drm_i915_gem_request *request;
1735
1736 request = list_first_entry(&ring->request_list,
1737 struct drm_i915_gem_request,
1738 list);
1739
1740 if (request->seqno > completed_seqno)
1741 i915_set_reset_status(ring, request, acthd);
1742
1743 i915_gem_free_request(request);
1744 }
1745
1746 while (!list_empty(&ring->active_list)) {
1747 struct drm_i915_gem_object *obj;
1748
1749 obj = list_first_entry(&ring->active_list,
1750 struct drm_i915_gem_object,
1751 ring_list);
1752
1753 i915_gem_object_move_to_inactive(obj);
1754 }
1755 }
1756
i915_gem_restore_fences(struct drm_device * dev)1757 void i915_gem_restore_fences(struct drm_device *dev)
1758 {
1759 struct drm_i915_private *dev_priv = dev->dev_private;
1760 int i;
1761
1762 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1763 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1764
1765 /*
1766 * Commit delayed tiling changes if we have an object still
1767 * attached to the fence, otherwise just clear the fence.
1768 */
1769 if (reg->obj) {
1770 i915_gem_object_update_fence(reg->obj, reg,
1771 reg->obj->tiling_mode);
1772 } else {
1773 i915_gem_write_fence(dev, i, NULL);
1774 }
1775 }
1776 }
1777
i915_gem_reset(struct drm_device * dev)1778 void i915_gem_reset(struct drm_device *dev)
1779 {
1780 struct drm_i915_private *dev_priv = dev->dev_private;
1781 struct drm_i915_gem_object *obj;
1782 struct intel_ring_buffer *ring;
1783 int i;
1784
1785 for_each_ring(ring, dev_priv, i)
1786 i915_gem_reset_ring_lists(dev_priv, ring);
1787
1788 /* Move everything out of the GPU domains to ensure we do any
1789 * necessary invalidation upon reuse.
1790 */
1791 list_for_each_entry(obj, struct drm_i915_gem_object,
1792 &dev_priv->mm.inactive_list,
1793 mm_list)
1794 {
1795 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1796 }
1797
1798 i915_gem_restore_fences(dev);
1799 }
1800
1801 /**
1802 * This function clears the request list as sequence numbers are passed.
1803 */
1804 void
i915_gem_retire_requests_ring(struct intel_ring_buffer * ring)1805 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1806 {
1807 uint32_t seqno;
1808
1809 if (list_empty(&ring->request_list))
1810 return;
1811
1812 WARN_ON(i915_verify_lists(ring->dev));
1813
1814 seqno = ring->get_seqno(ring, true);
1815
1816 while (!list_empty(&ring->request_list)) {
1817 struct drm_i915_gem_request *request;
1818
1819 request = list_first_entry(&ring->request_list,
1820 struct drm_i915_gem_request,
1821 list);
1822
1823 if (!i915_seqno_passed(seqno, request->seqno))
1824 break;
1825
1826 /* We know the GPU must have read the request to have
1827 * sent us the seqno + interrupt, so use the position
1828 * of tail of the request to update the last known position
1829 * of the GPU head.
1830 */
1831 ring->last_retired_head = request->tail;
1832
1833 i915_gem_free_request(request);
1834 }
1835
1836 /* Move any buffers on the active list that are no longer referenced
1837 * by the ringbuffer to the flushing/inactive lists as appropriate.
1838 */
1839 while (!list_empty(&ring->active_list)) {
1840 struct drm_i915_gem_object *obj;
1841
1842 obj = list_first_entry(&ring->active_list,
1843 struct drm_i915_gem_object,
1844 ring_list);
1845
1846 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1847 break;
1848
1849 i915_gem_object_move_to_inactive(obj);
1850 }
1851
1852 if (ring->trace_irq_seqno &&
1853 i915_seqno_passed(seqno, ring->trace_irq_seqno)) {
1854 ring->irq_put(ring);
1855 ring->trace_irq_seqno = 0;
1856 }
1857
1858 WARN_ON(i915_verify_lists(ring->dev));
1859 }
1860
1861 void
i915_gem_retire_requests(struct drm_device * dev)1862 i915_gem_retire_requests(struct drm_device *dev)
1863 {
1864 drm_i915_private_t *dev_priv = dev->dev_private;
1865 struct intel_ring_buffer *ring;
1866 int i;
1867
1868 for_each_ring(ring, dev_priv, i)
1869 i915_gem_retire_requests_ring(ring);
1870 }
1871
1872 static void
i915_gem_retire_work_handler(struct work_struct * work)1873 i915_gem_retire_work_handler(struct work_struct *work)
1874 {
1875 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
1876 mm.retire_work);
1877 struct drm_device *dev = dev_priv->dev;
1878 struct intel_ring_buffer *ring;
1879 bool idle;
1880 int i;
1881
1882 /* Come back later if the device is busy... */
1883 if (!mutex_tryenter(&dev->struct_mutex)) {
1884 test_set_timer(&dev_priv->mm.retire_timer, DRM_HZ);
1885 return;
1886 }
1887
1888 i915_gem_retire_requests(dev);
1889
1890 /* Send a periodic flush down the ring so we don't hold onto GEM
1891 * objects indefinitely.
1892 */
1893 idle = true;
1894 for_each_ring(ring, dev_priv, i) {
1895 if (ring->gpu_caches_dirty)
1896 i915_add_request(ring, NULL);
1897
1898 idle &= list_empty(&ring->request_list);
1899 }
1900
1901 if (!dev_priv->mm.suspended && !idle && !dev_priv->gpu_hang)
1902 {
1903 DRM_DEBUG("i915_gem: schedule_delayed_work");
1904 test_set_timer(&dev_priv->mm.retire_timer, DRM_HZ);
1905 }
1906 if (idle)
1907 intel_mark_idle(dev);
1908 mutex_unlock(&dev->struct_mutex);
1909 }
1910
1911 void
i915_gem_retire_work_timer(void * device)1912 i915_gem_retire_work_timer(void *device)
1913 {
1914 struct drm_device *dev = (struct drm_device *)device;
1915 drm_i915_private_t *dev_priv = dev->dev_private;
1916 queue_work(dev_priv->wq, &dev_priv->mm.retire_work);
1917 }
1918
1919 /**
1920 * Ensures that an object will eventually get non-busy by flushing any required
1921 * write domains, emitting any outstanding lazy request and retiring and
1922 * completed requests.
1923 */
1924 static int
i915_gem_object_flush_active(struct drm_i915_gem_object * obj)1925 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
1926 {
1927 int ret;
1928
1929 if (obj->active) {
1930 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
1931 if (ret)
1932 return ret;
1933
1934 i915_gem_retire_requests_ring(obj->ring);
1935 }
1936
1937 return 0;
1938 }
1939
1940 /**
1941 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
1942 * @DRM_IOCTL_ARGS: standard ioctl arguments
1943 *
1944 * Returns 0 if successful, else an error is returned with the remaining time in
1945 * the timeout parameter.
1946 * -ETIME: object is still busy after timeout
1947 * -ERESTARTSYS: signal interrupted the wait
1948 * -ENONENT: object doesn't exist
1949 * Also possible, but rare:
1950 * -EAGAIN: GPU wedged
1951 * -ENOMEM: damn
1952 * -ENODEV: Internal IRQ fail
1953 * -E?: The add request failed
1954 *
1955 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
1956 * non-zero timeout parameter the wait ioctl will wait for the given number of
1957 * nanoseconds on an object becoming unbusy. Since the wait itself does so
1958 * without holding struct_mutex the object may become re-busied before this
1959 * function completes. A similar but shorter * race condition exists in the busy
1960 * ioctl
1961 */
1962 int
i915_gem_wait_ioctl(DRM_IOCTL_ARGS)1963 i915_gem_wait_ioctl(DRM_IOCTL_ARGS)
1964 {
1965 drm_i915_private_t *dev_priv = dev->dev_private;
1966 struct drm_i915_gem_wait *args = data;
1967 struct drm_i915_gem_object *obj;
1968 struct intel_ring_buffer *ring = NULL;
1969 clock_t timeout = 0;
1970 unsigned reset_counter;
1971 u32 seqno = 0;
1972 int ret = 0;
1973
1974 if (args->timeout_ns >= 0) {
1975 timeout = drv_usectohz(args->timeout_ns / 1000);
1976 }
1977
1978 ret = i915_mutex_lock_interruptible(dev);
1979 if (ret)
1980 return ret;
1981
1982 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
1983 if (&obj->base == NULL) {
1984 mutex_unlock(&dev->struct_mutex);
1985 return -ENOENT;
1986 }
1987
1988 /* Need to make sure the object gets inactive eventually. */
1989 ret = i915_gem_object_flush_active(obj);
1990 if (ret)
1991 goto out;
1992
1993 if (obj->active) {
1994 seqno = obj->last_read_seqno;
1995 ring = obj->ring;
1996 }
1997
1998 if (seqno == 0)
1999 goto out;
2000
2001 /* Do this after OLR check to make sure we make forward progress polling
2002 * on this IOCTL with a 0 timeout (like busy ioctl)
2003 */
2004 if (!args->timeout_ns) {
2005 ret = -ETIME;
2006 goto out;
2007 }
2008
2009 drm_gem_object_unreference(&obj->base);
2010 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2011 mutex_unlock(&dev->struct_mutex);
2012
2013 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2014 if (timeout) {
2015 args->timeout_ns = drv_hztousec(timeout) * 1000;
2016 }
2017 return ret;
2018
2019 out:
2020 drm_gem_object_unreference(&obj->base);
2021 mutex_unlock(&dev->struct_mutex);
2022 return ret;
2023 }
2024
2025 /**
2026 * i915_gem_object_sync - sync an object to a ring.
2027 *
2028 * @obj: object which may be in use on another ring.
2029 * @to: ring we wish to use the object on. May be NULL.
2030 *
2031 * This code is meant to abstract object synchronization with the GPU.
2032 * Calling with NULL implies synchronizing the object with the CPU
2033 * rather than a particular GPU ring.
2034 *
2035 * Returns 0 if successful, else propagates up the lower layer error.
2036 */
2037 int
i915_gem_object_sync(struct drm_i915_gem_object * obj,struct intel_ring_buffer * to)2038 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2039 struct intel_ring_buffer *to)
2040 {
2041 struct intel_ring_buffer *from = obj->ring;
2042 u32 seqno;
2043 int ret, idx;
2044
2045 if (from == NULL || to == from)
2046 return 0;
2047
2048 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2049 return i915_gem_object_wait_rendering(obj, false);
2050
2051 idx = intel_ring_sync_index(from, to);
2052
2053 seqno = obj->last_read_seqno;
2054 if (seqno <= from->sync_seqno[idx])
2055 return 0;
2056
2057 ret = i915_gem_check_olr(obj->ring, seqno);
2058 if (ret)
2059 return ret;
2060
2061 ret = to->sync_to(to, from, seqno);
2062 if (!ret)
2063 /* We use last_read_seqno because sync_to()
2064 * might have just caused seqno wrap under
2065 * the radar.
2066 */
2067 from->sync_seqno[idx] = obj->last_read_seqno;
2068
2069 return ret;
2070 }
2071
i915_gem_object_finish_gtt(struct drm_i915_gem_object * obj)2072 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2073 {
2074
2075 /* Force a pagefault for domain tracking on next user access */
2076 i915_gem_release_mmap(obj);
2077
2078 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2079 return;
2080
2081 membar_producer();
2082 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2083 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2084 }
2085
2086 /**
2087 * Unbinds an object from the GTT aperture.
2088 */
2089 int
i915_gem_object_unbind(struct drm_i915_gem_object * obj,uint32_t type)2090 i915_gem_object_unbind(struct drm_i915_gem_object *obj, uint32_t type)
2091 {
2092 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2093 int ret;
2094
2095 if (obj->gtt_space == NULL)
2096 return 0;
2097
2098 if (obj->pin_count)
2099 return -EBUSY;
2100
2101 BUG_ON(obj->page_list == NULL);
2102
2103 ret = i915_gem_object_finish_gpu(obj);
2104 if (ret)
2105 return ret;
2106 /* Continue on if we fail due to EIO, the GPU is hung so we
2107 * should be safe and we need to cleanup or else we might
2108 * cause memory corruption through use-after-free.
2109 */
2110
2111 i915_gem_object_finish_gtt(obj);
2112
2113 /* release the fence reg _after_ flushing */
2114 ret = i915_gem_object_put_fence(obj);
2115 if (ret)
2116 return ret;
2117
2118 if (obj->has_global_gtt_mapping)
2119 i915_gem_gtt_unbind_object(obj, type);
2120 if (obj->has_aliasing_ppgtt_mapping) {
2121 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2122 obj->has_aliasing_ppgtt_mapping = 0;
2123 }
2124 i915_gem_gtt_finish_object(obj);
2125 i915_gem_object_unpin_pages(obj);
2126
2127 list_del(&obj->mm_list);
2128 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list, (caddr_t)obj);
2129 /* Avoid an unnecessary call to unbind on rebind. */
2130 obj->map_and_fenceable = true;
2131
2132 drm_mm_put_block(obj->gtt_space);
2133 obj->gtt_space = NULL;
2134 obj->gtt_offset = 0;
2135 TRACE_GEM_OBJ_HISTORY(obj, "unbind");
2136 return 0;
2137 }
2138
2139
i915_gpu_idle(struct drm_device * dev)2140 int i915_gpu_idle(struct drm_device *dev)
2141 {
2142 drm_i915_private_t *dev_priv = dev->dev_private;
2143 struct intel_ring_buffer *ring;
2144 int ret, i;
2145
2146 /* Flush everything onto the inactive list. */
2147 for_each_ring(ring, dev_priv, i) {
2148 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2149 if (ret)
2150 return ret;
2151
2152 ret = intel_ring_idle(ring);
2153 if (ret)
2154 return ret;
2155 }
2156
2157 return 0;
2158 }
2159
i965_write_fence_reg(struct drm_device * dev,int reg,struct drm_i915_gem_object * obj)2160 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2161 struct drm_i915_gem_object *obj)
2162 {
2163 drm_i915_private_t *dev_priv = dev->dev_private;
2164 int fence_reg;
2165 int fence_pitch_shift;
2166
2167 if (INTEL_INFO(dev)->gen >= 6) {
2168 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2169 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2170 } else {
2171 fence_reg = FENCE_REG_965_0;
2172 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2173 }
2174
2175 fence_reg += reg * 8;
2176
2177 /* To w/a incoherency with non-atomic 64-bit register updates,
2178 * we split the 64-bit update into two 32-bit writes. In order
2179 * for a partial fence not to be evaluated between writes, we
2180 * precede the update with write to turn off the fence register,
2181 * and only enable the fence as the last step.
2182 *
2183 * For extra levels of paranoia, we make sure each step lands
2184 * before applying the next step.
2185 */
2186 I915_WRITE(fence_reg, 0);
2187 POSTING_READ(fence_reg);
2188
2189 if (obj) {
2190 u32 size = obj->gtt_space->size;
2191 uint64_t val;
2192
2193 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2194 0xfffff000) << 32;
2195 val |= obj->gtt_offset & 0xfffff000;
2196 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2197 if (obj->tiling_mode == I915_TILING_Y)
2198 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2199 val |= I965_FENCE_REG_VALID;
2200
2201 I915_WRITE(fence_reg + 4, val >> 32);
2202 POSTING_READ(fence_reg + 4);
2203
2204 I915_WRITE(fence_reg + 0, val);
2205 POSTING_READ(fence_reg);
2206 } else {
2207 I915_WRITE(fence_reg + 4, 0);
2208 POSTING_READ(fence_reg + 4);
2209 }
2210 }
2211
i915_write_fence_reg(struct drm_device * dev,int reg,struct drm_i915_gem_object * obj)2212 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2213 struct drm_i915_gem_object *obj)
2214 {
2215 drm_i915_private_t *dev_priv = dev->dev_private;
2216 u32 val;
2217
2218 if (obj) {
2219 u32 size = obj->gtt_space->size;
2220 int pitch_val;
2221 int tile_width;
2222
2223 if((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2224 (size & -size) != size ||
2225 (obj->gtt_offset & (size - 1)))
2226 DRM_ERROR("object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2227 obj->gtt_offset, obj->map_and_fenceable, size);
2228
2229 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2230 tile_width = 128;
2231 else
2232 tile_width = 512;
2233
2234 /* Note: pitch better be a power of two tile widths */
2235 pitch_val = obj->stride / tile_width;
2236 pitch_val = ffs(pitch_val) - 1;
2237
2238 val = obj->gtt_offset;
2239 if (obj->tiling_mode == I915_TILING_Y)
2240 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2241 val |= I915_FENCE_SIZE_BITS(size);
2242 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2243 val |= I830_FENCE_REG_VALID;
2244 } else
2245 val = 0;
2246
2247 if (reg < 8)
2248 reg = FENCE_REG_830_0 + reg * 4;
2249 else
2250 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2251
2252 I915_WRITE(reg, val);
2253 POSTING_READ(reg);
2254 }
2255
i830_write_fence_reg(struct drm_device * dev,int reg,struct drm_i915_gem_object * obj)2256 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2257 struct drm_i915_gem_object *obj)
2258 {
2259 drm_i915_private_t *dev_priv = dev->dev_private;
2260 uint32_t val;
2261
2262 if (obj) {
2263 u32 size = obj->gtt_space->size;
2264 uint32_t pitch_val;
2265
2266 if((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2267 (size & -size) != size ||
2268 (obj->gtt_offset & (size - 1)))
2269 DRM_ERROR("object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2270 obj->gtt_offset, size);
2271
2272 pitch_val = obj->stride / 128;
2273 pitch_val = ffs(pitch_val) - 1;
2274
2275 val = obj->gtt_offset;
2276 if (obj->tiling_mode == I915_TILING_Y)
2277 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2278 val |= I830_FENCE_SIZE_BITS(size);
2279 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2280 val |= I830_FENCE_REG_VALID;
2281 } else
2282 val = 0;
2283
2284 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2285 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2286 }
2287
i915_gem_object_needs_mb(struct drm_i915_gem_object * obj)2288 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2289 {
2290 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2291 }
2292
i915_gem_write_fence(struct drm_device * dev,int reg,struct drm_i915_gem_object * obj)2293 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2294 struct drm_i915_gem_object *obj)
2295 {
2296 struct drm_i915_private *dev_priv = dev->dev_private;
2297
2298 /* Ensure that all CPU reads are completed before installing a fence
2299 * and all writes before removing the fence.
2300 */
2301 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2302 membar_producer();
2303
2304 if(obj && (!obj->stride || !obj->tiling_mode))
2305 DRM_ERROR("bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2306 obj->stride, obj->tiling_mode);
2307
2308 switch (INTEL_INFO(dev)->gen) {
2309 case 7:
2310 case 6:
2311 case 5:
2312 case 4: i965_write_fence_reg(dev, reg, obj); break;
2313 case 3: i915_write_fence_reg(dev, reg, obj); break;
2314 case 2: i830_write_fence_reg(dev, reg, obj); break;
2315 default: BUG();
2316 }
2317
2318 /* And similarly be paranoid that no direct access to this region
2319 * is reordered to before the fence is installed.
2320 */
2321 if (i915_gem_object_needs_mb(obj))
2322 membar_producer();
2323 }
2324
fence_number(struct drm_i915_private * dev_priv,struct drm_i915_fence_reg * fence)2325 static inline int fence_number(struct drm_i915_private *dev_priv,
2326 struct drm_i915_fence_reg *fence)
2327 {
2328 return fence - dev_priv->fence_regs;
2329 }
2330
i915_gem_object_update_fence(struct drm_i915_gem_object * obj,struct drm_i915_fence_reg * fence,bool enable)2331 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2332 struct drm_i915_fence_reg *fence,
2333 bool enable)
2334 {
2335 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2336 int reg = fence_number(dev_priv, fence);
2337
2338 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2339
2340 if (enable) {
2341 obj->fence_reg = reg;
2342 fence->obj = obj;
2343 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list, (caddr_t)fence);
2344 } else {
2345 obj->fence_reg = I915_FENCE_REG_NONE;
2346 fence->obj = NULL;
2347 list_del_init(&fence->lru_list);
2348 }
2349 obj->fence_dirty = false;
2350 }
2351
2352 static int
i915_gem_object_wait_fence(struct drm_i915_gem_object * obj)2353 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2354 {
2355 if (obj->last_fenced_seqno) {
2356 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2357 if (ret)
2358 return ret;
2359
2360 obj->last_fenced_seqno = 0;
2361 }
2362
2363 obj->fenced_gpu_access = false;
2364 return 0;
2365 }
2366
2367 int
i915_gem_object_put_fence(struct drm_i915_gem_object * obj)2368 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2369 {
2370 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2371 struct drm_i915_fence_reg *fence;
2372 int ret;
2373
2374 ret = i915_gem_object_wait_fence(obj);
2375 if (ret)
2376 return ret;
2377
2378 if (obj->fence_reg == I915_FENCE_REG_NONE)
2379 return 0;
2380
2381 fence = &dev_priv->fence_regs[obj->fence_reg];
2382
2383 i915_gem_object_fence_lost(obj);
2384 i915_gem_object_update_fence(obj, fence, false);
2385
2386 return 0;
2387 }
2388
2389 static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device * dev)2390 i915_find_fence_reg(struct drm_device *dev)
2391 {
2392 struct drm_i915_private *dev_priv = dev->dev_private;
2393 struct drm_i915_fence_reg *reg, *avail;
2394 int i;
2395
2396 /* First try to find a free reg */
2397 avail = NULL;
2398 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2399 reg = &dev_priv->fence_regs[i];
2400 if (!reg->obj)
2401 return reg;
2402
2403 if (!reg->pin_count)
2404 avail = reg;
2405 }
2406
2407 if (avail == NULL)
2408 return NULL;
2409
2410 /* None available, try to steal one or wait for a user to finish */
2411 list_for_each_entry(reg, struct drm_i915_fence_reg, &dev_priv->mm.fence_list, lru_list) {
2412 if (reg->pin_count)
2413 continue;
2414
2415 return reg;
2416 }
2417
2418 return NULL;
2419 }
2420
2421 /**
2422 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2423 * @obj: object to map through a fence reg
2424 *
2425 * When mapping objects through the GTT, userspace wants to be able to write
2426 * to them without having to worry about swizzling if the object is tiled.
2427 * This function walks the fence regs looking for a free one for @obj,
2428 * stealing one if it can't find any.
2429 *
2430 * It then sets up the reg based on the object's properties: address, pitch
2431 * and tiling format.
2432 *
2433 * For an untiled surface, this removes any existing fence.
2434 */
2435 int
i915_gem_object_get_fence(struct drm_i915_gem_object * obj)2436 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2437 {
2438 struct drm_device *dev = obj->base.dev;
2439 struct drm_i915_private *dev_priv = dev->dev_private;
2440 bool enable = obj->tiling_mode != I915_TILING_NONE;
2441 struct drm_i915_fence_reg *reg;
2442 int ret;
2443
2444 /* Have we updated the tiling parameters upon the object and so
2445 * will need to serialise the write to the associated fence register?
2446 */
2447 if (obj->fence_dirty) {
2448 ret = i915_gem_object_wait_fence(obj);
2449 if (ret)
2450 return ret;
2451 }
2452
2453 /* Just update our place in the LRU if our fence is getting reused. */
2454 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2455 reg = &dev_priv->fence_regs[obj->fence_reg];
2456 if (!obj->fence_dirty) {
2457 list_move_tail(®->lru_list,
2458 &dev_priv->mm.fence_list, (caddr_t)reg);
2459 return 0;
2460 }
2461 } else if (enable) {
2462 reg = i915_find_fence_reg(dev);
2463 if (reg == NULL)
2464 return -EDEADLK;
2465
2466 if (reg->obj) {
2467 struct drm_i915_gem_object *old = reg->obj;
2468
2469 ret = i915_gem_object_wait_fence(old);
2470 if (ret)
2471 return ret;
2472
2473 i915_gem_object_fence_lost(old);
2474 }
2475 } else
2476 return 0;
2477
2478 i915_gem_object_update_fence(obj, reg, enable);
2479
2480 return 0;
2481 }
2482
i915_gem_valid_gtt_space(struct drm_device * dev,struct drm_mm_node * gtt_space,unsigned long cache_level)2483 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2484 struct drm_mm_node *gtt_space,
2485 unsigned long cache_level)
2486 {
2487 struct drm_mm_node *other;
2488
2489 /* On non-LLC machines we have to be careful when putting differing
2490 * types of snoopable memory together to avoid the prefetcher
2491 * crossing memory domains and dieing.
2492 */
2493 if (HAS_LLC(dev))
2494 return true;
2495
2496 if (gtt_space == NULL)
2497 return true;
2498
2499 if (list_empty(>t_space->node_list))
2500 return true;
2501
2502 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2503 if (other->allocated && !other->hole_follows && other->color != cache_level)
2504 return false;
2505
2506 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2507 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2508 return false;
2509
2510 return true;
2511 }
2512
i915_gem_verify_gtt(struct drm_device * dev)2513 static void i915_gem_verify_gtt(struct drm_device *dev)
2514 {
2515 #if WATCH_GTT
2516 struct drm_i915_private *dev_priv = dev->dev_private;
2517 struct drm_i915_gem_object *obj;
2518 int err = 0;
2519
2520 list_for_each_entry(obj, struct drm_i915_gem_object, &dev_priv->mm.gtt_list, global_list) {
2521 if (obj->gtt_space == NULL) {
2522 DRM_ERROR("object found on GTT list with no space reserved\n");
2523 err++;
2524 continue;
2525 }
2526
2527 if (obj->cache_level != obj->gtt_space->color) {
2528 DRM_ERROR("object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2529 obj->gtt_space->start,
2530 obj->gtt_space->start + obj->gtt_space->size,
2531 obj->cache_level,
2532 obj->gtt_space->color);
2533 err++;
2534 continue;
2535 }
2536
2537 if (!i915_gem_valid_gtt_space(dev,
2538 obj->gtt_space,
2539 obj->cache_level)) {
2540 DRM_ERROR("invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2541 obj->gtt_space->start,
2542 obj->gtt_space->start + obj->gtt_space->size,
2543 obj->cache_level);
2544 err++;
2545 continue;
2546 }
2547 }
2548
2549 WARN_ON(err);
2550 #endif
2551 }
2552
2553 /**
2554 * Finds free space in the GTT aperture and binds the object there.
2555 */
2556 static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object * obj,unsigned alignment,bool map_and_fenceable,bool nonblocking)2557 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2558 unsigned alignment,
2559 bool map_and_fenceable,
2560 bool nonblocking)
2561 {
2562 struct drm_device *dev = obj->base.dev;
2563 drm_i915_private_t *dev_priv = dev->dev_private;
2564 struct drm_mm_node *node;
2565 u32 size, fence_size, fence_alignment, unfenced_alignment;
2566 bool mappable, fenceable;
2567 size_t gtt_max = map_and_fenceable ?
2568 dev_priv->gtt.mappable_end : dev_priv->gtt.total;
2569 int ret;
2570
2571 fence_size = i915_gem_get_gtt_size(dev,
2572 obj->base.size,
2573 obj->tiling_mode);
2574 fence_alignment = i915_gem_get_gtt_alignment(dev,
2575 obj->base.size,
2576 obj->tiling_mode, true);
2577 unfenced_alignment =
2578 i915_gem_get_gtt_alignment(dev,
2579 obj->base.size,
2580 obj->tiling_mode, false);
2581
2582 if (alignment == 0)
2583 alignment = map_and_fenceable ? fence_alignment :
2584 unfenced_alignment;
2585 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2586 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2587 return -EINVAL;
2588 }
2589
2590 size = map_and_fenceable ? fence_size : obj->base.size;
2591
2592 /* If the object is bigger than the entire aperture, reject it early
2593 * before evicting everything in a vain attempt to find space.
2594 */
2595 if (obj->base.size > gtt_max) {
2596 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
2597 obj->base.size,
2598 map_and_fenceable ? "mappable" : "total",
2599 gtt_max);
2600 return -E2BIG;
2601 }
2602
2603 ret = i915_gem_object_get_pages(obj);
2604 if (ret)
2605 return ret;
2606
2607 i915_gem_object_pin_pages(obj);
2608
2609 node = kzalloc(sizeof(*node), GFP_KERNEL);
2610 if (node == NULL) {
2611 i915_gem_object_unpin_pages(obj);
2612 return -ENOMEM;
2613 }
2614
2615 search_free:
2616 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2617 size, alignment,
2618 obj->cache_level, 0, gtt_max);
2619 if (ret) {
2620 ret = i915_gem_evict_something(dev, size, alignment,
2621 obj->cache_level,
2622 map_and_fenceable,
2623 nonblocking);
2624 if (ret == 0)
2625 goto search_free;
2626
2627 i915_gem_object_unpin_pages(obj);
2628 kfree(node, sizeof(*node));
2629 return ret;
2630 }
2631 if ((!i915_gem_valid_gtt_space(dev,
2632 node,
2633 obj->cache_level))) {
2634 i915_gem_object_unpin_pages(obj);
2635 drm_mm_put_block(node);
2636 return -EINVAL;
2637 }
2638
2639 ret = i915_gem_gtt_prepare_object(obj);
2640 if (ret) {
2641 i915_gem_object_unpin_pages(obj);
2642 drm_mm_put_block(node);
2643 return ret;
2644 }
2645
2646 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list, (caddr_t)obj);
2647 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
2648
2649 obj->gtt_space = node;
2650 obj->gtt_offset = node->start;
2651
2652 fenceable =
2653 node->size == fence_size &&
2654 (node->start & (fence_alignment - 1)) == 0;
2655
2656 mappable =
2657 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
2658
2659 obj->map_and_fenceable = mappable && fenceable;
2660
2661 TRACE_GEM_OBJ_HISTORY(obj, "bind gtt");
2662 i915_gem_verify_gtt(dev);
2663 return 0;
2664 }
2665
2666 void
i915_gem_clflush_object(struct drm_i915_gem_object * obj)2667 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2668 {
2669 /* If we don't have a page list set up, then we're not pinned
2670 * to GPU, and we can ignore the cache flush because it'll happen
2671 * again at bind time.
2672 */
2673 if (obj->page_list == NULL)
2674 return;
2675
2676 /*
2677 * Stolen memory is always coherent with the GPU as it is explicitly
2678 * marked as wc by the system, or the system is cache-coherent.
2679 */
2680 if (obj->stolen)
2681 return;
2682
2683 /* If the GPU is snooping the contents of the CPU cache,
2684 * we do not need to manually clear the CPU cache lines. However,
2685 * the caches are only snooped when the render cache is
2686 * flushed/invalidated. As we always have to emit invalidations
2687 * and flushes when moving into and out of the RENDER domain, correct
2688 * snooping behaviour occurs naturally as the result of our domain
2689 * tracking.
2690 */
2691 if (obj->cache_level != I915_CACHE_NONE)
2692 return;
2693
2694 drm_clflush_pages(obj->page_list, obj->base.size / PAGE_SIZE);
2695 TRACE_GEM_OBJ_HISTORY(obj, "clflush");
2696 }
2697
2698 /** Flushes the GTT write domain for the object if it's dirty. */
2699 static void
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object * obj)2700 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2701 {
2702 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2703 return;
2704
2705 /* No actual flushing is required for the GTT write domain. Writes
2706 * to it immediately go to main memory as far as we know, so there's
2707 * no chipset flush. It also doesn't land in render cache.
2708 *
2709 * However, we do have to enforce the order so that all writes through
2710 * the GTT land before any writes to the device, such as updates to
2711 * the GATT itself.
2712 */
2713 membar_producer();
2714
2715 obj->base.write_domain = 0;
2716 }
2717
2718 /** Flushes the CPU write domain for the object if it's dirty. */
2719 static void
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object * obj)2720 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2721 {
2722 struct drm_device *dev = obj->base.dev;
2723
2724 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2725 return;
2726
2727 i915_gem_clflush_object(obj);
2728 i915_gem_chipset_flush(dev);
2729 obj->base.write_domain = 0;
2730 }
2731
2732 /**
2733 * Moves a single object to the GTT read, and possibly write domain.
2734 *
2735 * This function returns when the move is complete, including waiting on
2736 * flushes to occur.
2737 */
2738 int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object * obj,int write)2739 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
2740 {
2741 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2742 int ret;
2743
2744 /* Not valid to be called on unbound objects. */
2745 if (obj->gtt_space == NULL)
2746 return -EINVAL;
2747
2748 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2749 return 0;
2750
2751 ret = i915_gem_object_wait_rendering(obj, !write);
2752 if (ret)
2753 return ret;
2754
2755 i915_gem_object_flush_cpu_write_domain(obj);
2756
2757 /* Serialise direct access to this object with the barriers for
2758 * coherent writes from the GPU, by effectively invalidating the
2759 * GTT domain upon first access.
2760 */
2761 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2762 membar_producer();
2763
2764 /* It should now be out of any other write domains, and we can update
2765 * the domain values for our changes.
2766 */
2767 /* GPU reset can handle this error */
2768 // BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2769 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2770 if (write) {
2771 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2772 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2773 obj->dirty = 1;
2774 }
2775
2776 /* And bump the LRU for this access */
2777 if (i915_gem_object_is_inactive(obj))
2778 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list, (caddr_t)obj);
2779
2780 return 0;
2781 }
2782
i915_gem_object_set_cache_level(struct drm_i915_gem_object * obj,enum i915_cache_level cache_level)2783 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2784 enum i915_cache_level cache_level)
2785 {
2786 struct drm_device *dev = obj->base.dev;
2787 drm_i915_private_t *dev_priv = dev->dev_private;
2788 int ret;
2789
2790 if (obj->cache_level == cache_level)
2791 return 0;
2792
2793 if (obj->pin_count) {
2794 DRM_DEBUG("can not change the cache level of pinned objects\n");
2795 return -EBUSY;
2796 }
2797
2798 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
2799 ret = i915_gem_object_unbind(obj, true);
2800 if (ret)
2801 return ret;
2802 }
2803
2804 if (obj->gtt_space) {
2805 ret = i915_gem_object_finish_gpu(obj);
2806 if (ret)
2807 return ret;
2808
2809 i915_gem_object_finish_gtt(obj);
2810
2811 /* Before SandyBridge, you could not use tiling or fence
2812 * registers with snooped memory, so relinquish any fences
2813 * currently pointing to our region in the aperture.
2814 */
2815 if (INTEL_INFO(dev)->gen < 6) {
2816 ret = i915_gem_object_put_fence(obj);
2817 if (ret)
2818 return ret;
2819 }
2820
2821 if (obj->has_global_gtt_mapping)
2822 i915_gem_gtt_bind_object(obj, cache_level);
2823 if (obj->has_aliasing_ppgtt_mapping)
2824 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2825 obj, cache_level);
2826
2827 obj->gtt_space->color = cache_level;
2828 }
2829
2830 if (cache_level == I915_CACHE_NONE) {
2831 /* If we're coming from LLC cached, then we haven't
2832 * actually been tracking whether the data is in the
2833 * CPU cache or not, since we only allow one bit set
2834 * in obj->write_domain and have been skipping the clflushes.
2835 * Just set it to the CPU cache for now.
2836 */
2837 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2838 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
2839
2840 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2841 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2842 }
2843
2844 obj->cache_level = cache_level;
2845 i915_gem_verify_gtt(dev);
2846 return 0;
2847 }
2848
i915_gem_get_caching_ioctl(DRM_IOCTL_ARGS)2849 int i915_gem_get_caching_ioctl(DRM_IOCTL_ARGS)
2850 {
2851 struct drm_i915_gem_caching *args = data;
2852 struct drm_i915_gem_object *obj;
2853 int ret;
2854
2855 ret = i915_mutex_lock_interruptible(dev);
2856 if (ret)
2857 return ret;
2858
2859 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2860 if (&obj->base == NULL) {
2861 ret = -ENOENT;
2862 goto unlock;
2863 }
2864
2865 args->caching = obj->cache_level != I915_CACHE_NONE;
2866
2867 drm_gem_object_unreference(&obj->base);
2868 unlock:
2869 mutex_unlock(&dev->struct_mutex);
2870 return ret;
2871 }
2872
i915_gem_set_caching_ioctl(DRM_IOCTL_ARGS)2873 int i915_gem_set_caching_ioctl(DRM_IOCTL_ARGS)
2874 {
2875 struct drm_i915_gem_caching *args = data;
2876 struct drm_i915_gem_object *obj;
2877 enum i915_cache_level level;
2878 int ret;
2879
2880 switch (args->caching) {
2881 case I915_CACHING_NONE:
2882 level = I915_CACHE_NONE;
2883 break;
2884 case I915_CACHING_CACHED:
2885 level = I915_CACHE_LLC;
2886 break;
2887 default:
2888 return -EINVAL;
2889 }
2890
2891 ret = i915_mutex_lock_interruptible(dev);
2892 if (ret)
2893 return ret;
2894
2895 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2896 if (&obj->base == NULL) {
2897 ret = -ENOENT;
2898 goto unlock;
2899 }
2900
2901 ret = i915_gem_object_set_cache_level(obj, level);
2902
2903 drm_gem_object_unreference(&obj->base);
2904 unlock:
2905 mutex_unlock(&dev->struct_mutex);
2906 return ret;
2907 }
2908
2909 /*
2910 * Prepare buffer for display plane (scanout, cursors, etc).
2911 * Can be called from an uninterruptible phase (modesetting) and allows
2912 * any flushes to be pipelined (for pageflips).
2913 */
2914 int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object * obj,u32 alignment,struct intel_ring_buffer * pipelined)2915 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2916 u32 alignment,
2917 struct intel_ring_buffer *pipelined)
2918 {
2919 /* LINTED */
2920 u32 old_read_domains, old_write_domain;
2921 int ret;
2922
2923 if (pipelined != obj->ring) {
2924 ret = i915_gem_object_sync(obj, pipelined);
2925 if (ret)
2926 return ret;
2927 }
2928
2929 /* The display engine is not coherent with the LLC cache on gen6. As
2930 * a result, we make sure that the pinning that is about to occur is
2931 * done with uncached PTEs. This is lowest common denominator for all
2932 * chipsets.
2933 *
2934 * However for gen6+, we could do better by using the GFDT bit instead
2935 * of uncaching, which would allow us to flush all the LLC-cached data
2936 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2937 */
2938 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2939 if (ret)
2940 return ret;
2941
2942 /* As the user may map the buffer once pinned in the display plane
2943 * (e.g. libkms for the bootup splash), we have to ensure that we
2944 * always use map_and_fenceable for all scanout buffers.
2945 */
2946 ret = i915_gem_object_pin(obj, alignment, true, false);
2947 if (ret)
2948 return ret;
2949
2950 i915_gem_object_flush_cpu_write_domain(obj);
2951
2952 old_write_domain = obj->base.write_domain;
2953 old_read_domains = obj->base.read_domains;
2954
2955 /* It should now be out of any other write domains, and we can update
2956 * the domain values for our changes.
2957 */
2958 obj->base.write_domain = 0;
2959 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2960
2961 return 0;
2962 }
2963
2964 int
i915_gem_object_finish_gpu(struct drm_i915_gem_object * obj)2965 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
2966 {
2967 int ret;
2968
2969 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
2970 return 0;
2971
2972 ret = i915_gem_object_wait_rendering(obj, false);
2973 if (ret)
2974 return ret;
2975
2976 /* Ensure that we invalidate the GPU's caches and TLBs. */
2977 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2978 return 0;
2979 }
2980
2981 /**
2982 * Moves a single object to the CPU read, and possibly write domain.
2983 *
2984 * This function returns when the move is complete, including waiting on
2985 * flushes to occur.
2986 */
2987 int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object * obj,bool write)2988 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2989 {
2990 /* LINTED */
2991 uint32_t old_write_domain, old_read_domains;
2992 int ret;
2993
2994 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2995 return 0;
2996
2997 ret = i915_gem_object_wait_rendering(obj, !write);
2998 if (ret)
2999 return ret;
3000
3001 i915_gem_object_flush_gtt_write_domain(obj);
3002
3003 old_write_domain = obj->base.write_domain;
3004 old_read_domains = obj->base.read_domains;
3005
3006 /* Flush the CPU cache if it's still invalid. */
3007 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3008 i915_gem_clflush_object(obj);
3009
3010 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3011 }
3012
3013 /* It should now be out of any other write domains, and we can update
3014 * the domain values for our changes.
3015 */
3016 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3017
3018 /* If we're writing through the CPU, then the GPU read domains will
3019 * need to be invalidated at next use.
3020 */
3021 if (write) {
3022 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3023 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3024 }
3025
3026 return 0;
3027 }
3028
3029 /* Throttle our rendering by waiting until the ring has completed our requests
3030 * emitted over 20 msec ago.
3031 *
3032 * Note that if we were to use the current jiffies each time around the loop,
3033 * we wouldn't escape the function with any frames outstanding if the time to
3034 * render a frame was over 20ms.
3035 *
3036 * This should get us reasonable parallelism between CPU and GPU but also
3037 * relatively low latency when blocking on a particular request to finish.
3038 */
3039 static int
i915_gem_ring_throttle(struct drm_device * dev,struct drm_file * file)3040 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3041 {
3042 struct drm_i915_private *dev_priv = dev->dev_private;
3043 struct drm_i915_file_private *file_priv = file->driver_priv;
3044 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3045 struct drm_i915_gem_request *request;
3046 struct intel_ring_buffer *ring = NULL;
3047 unsigned reset_counter;
3048 u32 seqno = 0;
3049 int ret;
3050
3051 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3052 if (ret)
3053 return ret;
3054
3055 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3056 if (ret)
3057 return ret;
3058 spin_lock(&file_priv->mm.lock);
3059 list_for_each_entry(request, struct drm_i915_gem_request, &file_priv->mm.request_list, client_list) {
3060 if (time_after_eq(request->emitted_jiffies, recent_enough))
3061 break;
3062
3063 ring = request->ring;
3064 seqno = request->seqno;
3065 }
3066 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3067 spin_unlock(&file_priv->mm.lock);
3068
3069 if (seqno == 0)
3070 return 0;
3071
3072 ret = __wait_seqno(ring, seqno, reset_counter, true, 0);
3073 if (ret == 0)
3074 test_set_timer(&dev_priv->mm.retire_timer, 0);
3075
3076 return ret;
3077 }
3078
3079 int
i915_gem_object_pin(struct drm_i915_gem_object * obj,uint32_t alignment,bool map_and_fenceable,bool nonblocking)3080 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3081 uint32_t alignment,
3082 bool map_and_fenceable,
3083 bool nonblocking)
3084 {
3085 int ret;
3086
3087 if ((obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3088 return -EBUSY;
3089
3090 if (obj->gtt_space != NULL) {
3091 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3092 (map_and_fenceable && !obj->map_and_fenceable)) {
3093 DRM_INFO("bo is already pinned with incorrect alignment:"
3094 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3095 " obj->map_and_fenceable=%d\n",
3096 obj->gtt_offset, alignment,
3097 map_and_fenceable,
3098 obj->map_and_fenceable);
3099 ret = i915_gem_object_unbind(obj, 1);
3100 if (ret)
3101 return ret;
3102 }
3103 }
3104
3105 if (obj->gtt_space == NULL) {
3106 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3107
3108 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3109 map_and_fenceable,
3110 nonblocking);
3111 if (ret)
3112 return ret;
3113
3114 if (!dev_priv->mm.aliasing_ppgtt)
3115 i915_gem_gtt_bind_object(obj, obj->cache_level);
3116 }
3117
3118 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3119 i915_gem_gtt_bind_object(obj, obj->cache_level);
3120
3121 obj->pin_count++;
3122 obj->pin_mappable |= map_and_fenceable;
3123
3124 return 0;
3125 }
3126
3127 void
i915_gem_object_unpin(struct drm_i915_gem_object * obj)3128 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3129 {
3130 BUG_ON(obj->pin_count == 0);
3131 BUG_ON(obj->gtt_space == NULL);
3132
3133 if (--obj->pin_count == 0)
3134 obj->pin_mappable = false;
3135 }
3136
3137 int
3138 /* LINTED */
i915_gem_pin_ioctl(DRM_IOCTL_ARGS)3139 i915_gem_pin_ioctl(DRM_IOCTL_ARGS)
3140 {
3141 struct drm_i915_gem_pin *args = data;
3142 struct drm_i915_gem_object *obj;
3143 int ret;
3144
3145 ret = i915_mutex_lock_interruptible(dev);
3146 if (ret)
3147 return ret;
3148
3149 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3150 if (&obj->base == NULL) {
3151 ret = -ENOENT;
3152 goto unlock;
3153 }
3154
3155 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3156 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3157 args->handle);
3158 ret = -EINVAL;
3159 goto out;
3160 }
3161
3162 obj->user_pin_count++;
3163 obj->pin_filp = file;
3164 if (obj->user_pin_count == 1) {
3165 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3166 if (ret)
3167 goto out;
3168 }
3169
3170 /* XXX - flush the CPU caches for pinned objects
3171 * as the X server doesn't manage domains yet
3172 */
3173 i915_gem_object_flush_cpu_write_domain(obj);
3174 args->offset = obj->gtt_offset;
3175 out:
3176 drm_gem_object_unreference(&obj->base);
3177 unlock:
3178 mutex_unlock(&dev->struct_mutex);
3179 return ret;
3180 }
3181
3182 int
3183 /* LINTED */
i915_gem_unpin_ioctl(DRM_IOCTL_ARGS)3184 i915_gem_unpin_ioctl(DRM_IOCTL_ARGS)
3185 {
3186 struct drm_i915_gem_pin *args = data;
3187 struct drm_i915_gem_object *obj;
3188 int ret;
3189
3190 ret = i915_mutex_lock_interruptible(dev);
3191 if (ret)
3192 return ret;
3193
3194 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3195 if (&obj->base == NULL) {
3196 ret = -ENOENT;
3197 goto unlock;
3198 }
3199
3200 if (obj->pin_filp != file) {
3201 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3202 args->handle);
3203 ret = -EINVAL;
3204 goto out;
3205 }
3206 obj->user_pin_count--;
3207 if (obj->user_pin_count == 0) {
3208 obj->pin_filp = NULL;
3209 i915_gem_object_unpin(obj);
3210 }
3211
3212 out:
3213 drm_gem_object_unreference(&obj->base);
3214 unlock:
3215 mutex_unlock(&dev->struct_mutex);
3216 return ret;
3217 }
3218
3219 int
3220 /* LINTED */
i915_gem_busy_ioctl(DRM_IOCTL_ARGS)3221 i915_gem_busy_ioctl(DRM_IOCTL_ARGS)
3222 {
3223 struct drm_i915_gem_busy *args = data;
3224 struct drm_i915_gem_object *obj;
3225 int ret;
3226
3227 ret = i915_mutex_lock_interruptible(dev);
3228 if (ret)
3229 return ret;
3230
3231 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3232 if (&obj->base == NULL) {
3233 ret = -ENOENT;
3234 goto unlock;
3235 }
3236
3237 /* Count all active objects as busy, even if they are currently not used
3238 * by the gpu. Users of this interface expect objects to eventually
3239 * become non-busy without any further actions, therefore emit any
3240 * necessary flushes here.
3241 */
3242 ret = i915_gem_object_flush_active(obj);
3243
3244 args->busy = obj->active;
3245 if (obj->ring) {
3246 args->busy |= intel_ring_flag(obj->ring) << 16;
3247 }
3248
3249 drm_gem_object_unreference(&obj->base);
3250 unlock:
3251 mutex_unlock(&dev->struct_mutex);
3252 return ret;
3253 }
3254
3255 int
3256 /* LINTED */
i915_gem_throttle_ioctl(DRM_IOCTL_ARGS)3257 i915_gem_throttle_ioctl(DRM_IOCTL_ARGS)
3258 {
3259 return i915_gem_ring_throttle(dev, file);
3260 }
3261
3262 int
3263 /* LINTED */
i915_gem_madvise_ioctl(DRM_IOCTL_ARGS)3264 i915_gem_madvise_ioctl(DRM_IOCTL_ARGS)
3265 {
3266 struct drm_i915_gem_madvise *args = data;
3267
3268 /* Don't enable buffer catch */
3269 args->retained = 0;
3270 return 0;
3271 }
3272
i915_gem_object_init(struct drm_i915_gem_object * obj,const struct drm_i915_gem_object_ops * ops)3273 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3274 const struct drm_i915_gem_object_ops *ops)
3275 {
3276 INIT_LIST_HEAD(&obj->mm_list);
3277 INIT_LIST_HEAD(&obj->global_list);
3278 INIT_LIST_HEAD(&obj->ring_list);
3279 INIT_LIST_HEAD(&obj->exec_list);
3280
3281 obj->ops = ops;
3282
3283 obj->fence_reg = I915_FENCE_REG_NONE;
3284 obj->madv = I915_MADV_WILLNEED;
3285 /* Avoid an unnecessary call to unbind on the first bind. */
3286 obj->map_and_fenceable = true;
3287
3288 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3289 }
3290
3291 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3292 .get_pages = i915_gem_object_get_pages_gtt,
3293 .put_pages = i915_gem_object_put_pages_gtt,
3294 };
3295
i915_gem_alloc_object(struct drm_device * dev,size_t size)3296 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3297 size_t size)
3298 {
3299 struct drm_i915_gem_object *obj;
3300 int gen;
3301
3302 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3303 if (obj == NULL)
3304 return NULL;
3305
3306 if (IS_G33(dev))
3307 gen = 33;
3308 else
3309 gen = INTEL_INFO(dev)->gen * 10;
3310
3311 if (drm_gem_object_init(dev, &obj->base, size, gen) != 0) {
3312 kfree(obj, sizeof(*obj));
3313 DRM_ERROR("failed to init gem object");
3314 return NULL;
3315 }
3316
3317
3318 i915_gem_object_init(obj, &i915_gem_object_ops);
3319
3320 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3321 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3322
3323 if (HAS_LLC(dev)) {
3324 /* On Gen6, we can have the GPU use the LLC (the CPU
3325 * cache) for about a 10% performance improvement
3326 * compared to uncached. Graphics requests other than
3327 * display scanout are coherent with the CPU in
3328 * accessing this cache. This means in this mode we
3329 * don't need to clflush on the CPU side, and on the
3330 * GPU side we only need to flush internal caches to
3331 * get data visible to the CPU.
3332 *
3333 * However, we maintain the display planes as UC, and so
3334 * need to rebind when first used as such.
3335 */
3336 obj->cache_level = I915_CACHE_LLC;
3337 } else
3338 obj->cache_level = I915_CACHE_NONE;
3339
3340 return obj;
3341 }
3342
i915_gem_init_object(struct drm_gem_object * obj)3343 int i915_gem_init_object(struct drm_gem_object *obj)
3344 {
3345 DRM_ERROR("i915_gem_init_object is not supported, BUG!");
3346 return 0;
3347 }
3348
i915_gem_free_object(struct drm_gem_object * gem_obj)3349 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3350 {
3351 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3352 struct drm_device *dev = obj->base.dev;
3353 drm_i915_private_t *dev_priv = dev->dev_private;
3354 int ret;
3355
3356 if (obj->phys_obj)
3357 i915_gem_detach_phys_object(dev, obj);
3358
3359 obj->pin_count = 0;
3360 ret = i915_gem_object_unbind(obj, 1);
3361 if (ret) {
3362 bool was_interruptible;
3363 was_interruptible = dev_priv->mm.interruptible;
3364 dev_priv->mm.interruptible = false;
3365
3366 WARN_ON(i915_gem_object_unbind(obj, 1));
3367
3368 dev_priv->mm.interruptible = was_interruptible;
3369 }
3370
3371 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
3372 * before progressing. */
3373 if (obj->stolen)
3374 i915_gem_object_unpin_pages(obj);
3375
3376 if (obj->pages_pin_count)
3377 obj->pages_pin_count = 0;
3378 i915_gem_object_put_pages(obj);
3379 if (obj->mmap_offset)
3380 i915_gem_free_mmap_offset(obj);
3381
3382 // if (obj->base.import_attach)
3383 // drm_prime_gem_destroy(&obj->base, NULL);
3384
3385 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3386
3387 if (obj->bit_17 != NULL)
3388 kfree(obj->bit_17, BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * sizeof(long));
3389 drm_gem_object_release(&obj->base);
3390 kfree(obj, sizeof(*obj));
3391 }
3392
3393 int
i915_gem_idle(struct drm_device * dev,uint32_t type)3394 i915_gem_idle(struct drm_device *dev, uint32_t type)
3395 {
3396 drm_i915_private_t *dev_priv = dev->dev_private;
3397 int ret;
3398
3399 mutex_lock(&dev->struct_mutex);
3400
3401 if (dev_priv->mm.suspended) {
3402 mutex_unlock(&dev->struct_mutex);
3403 return 0;
3404 }
3405
3406 ret = i915_gpu_idle(dev);
3407 if (ret) {
3408 mutex_unlock(&dev->struct_mutex);
3409 return ret;
3410 }
3411 i915_gem_retire_requests(dev);
3412
3413 /* Under UMS, be paranoid and evict. */
3414 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3415 i915_gem_evict_everything(dev);
3416
3417 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3418 * We need to replace this with a semaphore, or something.
3419 * And not confound mm.suspended!
3420 */
3421 dev_priv->mm.suspended = 1;
3422 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3423
3424 i915_kernel_lost_context(dev);
3425 i915_gem_cleanup_ringbuffer(dev);
3426
3427 mutex_unlock(&dev->struct_mutex);
3428
3429 /* Cancel the retire work handler, wait for it to finish if running
3430 */
3431 del_timer_sync(&dev_priv->mm.retire_timer);
3432 cancel_delayed_work(dev_priv->wq);
3433
3434 return 0;
3435 }
3436
i915_gem_l3_remap(struct drm_device * dev)3437 void i915_gem_l3_remap(struct drm_device *dev)
3438 {
3439 drm_i915_private_t *dev_priv = dev->dev_private;
3440 u32 misccpctl;
3441 int i;
3442
3443 if (!HAS_L3_GPU_CACHE(dev))
3444 return;
3445
3446 if (!dev_priv->l3_parity.remap_info)
3447 return;
3448
3449 misccpctl = I915_READ(GEN7_MISCCPCTL);
3450 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3451 POSTING_READ(GEN7_MISCCPCTL);
3452
3453 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3454 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3455 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3456 DRM_DEBUG("0x%x was already programmed to %x\n",
3457 GEN7_L3LOG_BASE + i, remap);
3458 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3459 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3460 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3461 }
3462
3463 /* Make sure all the writes land before disabling dop clock gating */
3464 POSTING_READ(GEN7_L3LOG_BASE);
3465
3466 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3467 }
3468
i915_gem_init_swizzling(struct drm_device * dev)3469 void i915_gem_init_swizzling(struct drm_device *dev)
3470 {
3471 drm_i915_private_t *dev_priv = dev->dev_private;
3472
3473 if (INTEL_INFO(dev)->gen < 5 ||
3474 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3475 return;
3476
3477 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3478 DISP_TILE_SURFACE_SWIZZLING);
3479
3480 if (IS_GEN5(dev))
3481 return;
3482
3483 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3484 if (IS_GEN6(dev))
3485 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3486 else if (IS_GEN7(dev))
3487 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3488 /* LINTED */
3489 else
3490 BUG();
3491 }
3492
3493 static bool
intel_enable_blt(struct drm_device * dev)3494 intel_enable_blt(struct drm_device *dev)
3495 {
3496 if (!HAS_BLT(dev))
3497 return false;
3498
3499 return true;
3500 }
3501
i915_gem_init_rings(struct drm_device * dev)3502 static int i915_gem_init_rings(struct drm_device *dev)
3503 {
3504 struct drm_i915_private *dev_priv = dev->dev_private;
3505 int ret;
3506
3507 ret = intel_init_render_ring_buffer(dev);
3508 if (ret)
3509 return ret;
3510
3511 if (HAS_BSD(dev)) {
3512 ret = intel_init_bsd_ring_buffer(dev);
3513 if (ret)
3514 goto cleanup_render_ring;
3515 }
3516
3517 if (intel_enable_blt(dev)) {
3518 ret = intel_init_blt_ring_buffer(dev);
3519 if (ret)
3520 goto cleanup_bsd_ring;
3521 }
3522
3523 if (HAS_VEBOX(dev)) {
3524 ret = intel_init_vebox_ring_buffer(dev);
3525 if (ret)
3526 goto cleanup_blt_ring;
3527 }
3528
3529
3530 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
3531 if (ret)
3532 goto cleanup_vebox_ring;
3533
3534 return 0;
3535
3536 cleanup_vebox_ring:
3537 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
3538 cleanup_blt_ring:
3539 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
3540 cleanup_bsd_ring:
3541 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3542 cleanup_render_ring:
3543 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3544
3545 return ret;
3546 }
3547
3548 int
i915_gem_init_hw(struct drm_device * dev)3549 i915_gem_init_hw(struct drm_device *dev)
3550 {
3551 drm_i915_private_t *dev_priv = dev->dev_private;
3552 int ret;
3553
3554 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3555 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3556
3557 if (HAS_PCH_NOP(dev)) {
3558 u32 temp = I915_READ(GEN7_MSG_CTL);
3559 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
3560 I915_WRITE(GEN7_MSG_CTL, temp);
3561 }
3562
3563 i915_gem_l3_remap(dev);
3564
3565 i915_gem_init_swizzling(dev);
3566
3567 ret = i915_gem_init_rings(dev);
3568 if (ret)
3569 return ret;
3570
3571 /*
3572 * XXX: There was some w/a described somewhere suggesting loading
3573 * contexts before PPGTT.
3574 */
3575 i915_gem_context_init(dev);
3576 if (dev_priv->mm.aliasing_ppgtt) {
3577 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
3578 if (ret) {
3579 i915_gem_cleanup_aliasing_ppgtt(dev);
3580 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
3581 }
3582 }
3583
3584 return 0;
3585 }
3586
i915_gem_init(struct drm_device * dev)3587 int i915_gem_init(struct drm_device *dev)
3588 {
3589 struct drm_i915_private *dev_priv = dev->dev_private;
3590 int ret;
3591 int size;
3592
3593 mutex_lock(&dev->struct_mutex);
3594
3595 if (IS_VALLEYVIEW(dev)) {
3596 /* VLVA0 (potential hack), BIOS isn't actually waking us */
3597 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
3598 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
3599 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
3600 }
3601
3602 ret = i915_gem_init_global_gtt(dev);
3603 if (ret) {
3604 mutex_unlock(&dev->struct_mutex);
3605 return ret;
3606 }
3607
3608 size = drm_getfb_size(dev);
3609 dev_priv->fbcon_obj = NULL;
3610 if (size > 0) {
3611 /* save original fb GTT */
3612 dev->old_gtt_size = size;
3613 dev->old_gtt = kmem_zalloc(dev->old_gtt_size, KM_NOSLEEP);
3614 intel_rw_gtt(dev, dev->old_gtt_size,
3615 0, (void *) dev->old_gtt, 0);
3616
3617 /*
3618 * Some BIOSes fail to initialise the GTT, which will cause DMA faults when
3619 * the IOMMU is enabled. We need to clear the whole GTT.
3620 */
3621 i915_clean_gtt(dev, size);
3622
3623 /* workaround: prealloc fb buffer, make sure the start address 0 */
3624 dev_priv->fbcon_obj = i915_gem_alloc_object(dev, size);
3625 if (!dev_priv->fbcon_obj) {
3626 DRM_ERROR("failed to allocate framebuffer");
3627 mutex_unlock(&dev->struct_mutex);
3628 i915_teardown_scratch_page(dev);
3629 return (-ENOMEM);
3630 }
3631
3632 /* copy old content to fb buffer */
3633 (void) memcpy(dev_priv->fbcon_obj->base.kaddr, dev->old_gtt, size);
3634
3635 /* Flush everything out, we'll be doing GTT only from now on */
3636 ret = intel_pin_and_fence_fb_obj(dev, dev_priv->fbcon_obj, false);
3637 if (ret) {
3638 DRM_ERROR("failed to pin fb ret %d", ret);
3639 mutex_unlock(&dev->struct_mutex);
3640 i915_teardown_scratch_page(dev);
3641 i915_gem_free_object(&dev_priv->fbcon_obj->base);
3642 return ret;
3643 }
3644 }
3645
3646 dev_priv->mm.interruptible = true;
3647
3648 ret = i915_gem_init_hw(dev);
3649 mutex_unlock(&dev->struct_mutex);
3650 if (ret) {
3651 i915_gem_cleanup_aliasing_ppgtt(dev);
3652 return ret;
3653 }
3654
3655 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3656 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3657 dev_priv->dri1.allow_batchbuffer = 1;
3658 return 0;
3659 }
3660
3661 void
i915_gem_cleanup_ringbuffer(struct drm_device * dev)3662 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3663 {
3664 drm_i915_private_t *dev_priv = dev->dev_private;
3665 struct intel_ring_buffer *ring;
3666 int i;
3667
3668 for_each_ring(ring, dev_priv, i)
3669 intel_cleanup_ring_buffer(ring);
3670 }
3671
3672 int
3673 /* LINTED */
i915_gem_entervt_ioctl(DRM_IOCTL_ARGS)3674 i915_gem_entervt_ioctl(DRM_IOCTL_ARGS)
3675 {
3676 drm_i915_private_t *dev_priv = dev->dev_private;
3677 int ret;
3678
3679 if (drm_core_check_feature(dev, DRIVER_MODESET))
3680 return 0;
3681
3682 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
3683 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3684 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
3685 }
3686
3687 mutex_lock(&dev->struct_mutex);
3688 dev_priv->mm.suspended = 0;
3689
3690 ret = i915_gem_init_hw(dev);
3691 if (ret != 0) {
3692 mutex_unlock(&dev->struct_mutex);
3693 return ret;
3694 }
3695
3696 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3697 mutex_unlock(&dev->struct_mutex);
3698
3699 ret = drm_irq_install(dev);
3700 if (ret)
3701 goto cleanup_ringbuffer;
3702
3703 return 0;
3704
3705 cleanup_ringbuffer:
3706 mutex_lock(&dev->struct_mutex);
3707 i915_gem_cleanup_ringbuffer(dev);
3708 dev_priv->mm.suspended = 1;
3709 mutex_unlock(&dev->struct_mutex);
3710
3711 return ret;
3712 }
3713
3714 int
3715 /* LINTED */
i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS)3716 i915_gem_leavevt_ioctl(DRM_IOCTL_ARGS)
3717 {
3718 if (drm_core_check_feature(dev, DRIVER_MODESET))
3719 return 0;
3720
3721 (void ) drm_irq_uninstall(dev);
3722 return i915_gem_idle(dev, 0);
3723 }
3724
3725 void
i915_gem_lastclose(struct drm_device * dev)3726 i915_gem_lastclose(struct drm_device *dev)
3727 {
3728 int ret;
3729
3730 if (drm_core_check_feature(dev, DRIVER_MODESET))
3731 return;
3732
3733 ret = i915_gem_idle(dev, 1);
3734 if (ret)
3735 DRM_ERROR("failed to idle hardware: %d\n", ret);
3736 }
3737
3738 static void
init_ring_lists(struct intel_ring_buffer * ring)3739 init_ring_lists(struct intel_ring_buffer *ring)
3740 {
3741 INIT_LIST_HEAD(&ring->active_list);
3742 INIT_LIST_HEAD(&ring->request_list);
3743 }
3744
3745 void
i915_gem_load(struct drm_device * dev)3746 i915_gem_load(struct drm_device *dev)
3747 {
3748 int i;
3749 drm_i915_private_t *dev_priv = dev->dev_private;
3750
3751 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3752 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3753 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
3754 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
3755 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3756 for (i = 0; i < I915_NUM_RINGS; i++)
3757 init_ring_lists(&dev_priv->ring[i]);
3758 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3759 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3760
3761 INIT_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler);
3762 setup_timer(&dev_priv->mm.retire_timer, i915_gem_retire_work_timer,
3763 (void *)dev);
3764
3765 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3766 if (IS_GEN3(dev)) {
3767 I915_WRITE(MI_ARB_STATE,
3768 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
3769 }
3770
3771 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3772
3773 /* Old X drivers will take 0-2 for front, back, depth buffers */
3774 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3775 dev_priv->fence_reg_start = 3;
3776
3777 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
3778 dev_priv->num_fence_regs = 32;
3779 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3780 dev_priv->num_fence_regs = 16;
3781 else
3782 dev_priv->num_fence_regs = 8;
3783
3784 /* Initialize fence registers to zero */
3785 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3786 i915_gem_restore_fences(dev);
3787
3788 i915_gem_detect_bit_6_swizzle(dev);
3789 DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue, DRM_INTR_PRI(dev));
3790 dev_priv->mm.interruptible = true;
3791 }
3792
3793 /*
3794 * Create a physically contiguous memory object for this object
3795 * e.g. for cursor + overlay regs
3796 */
i915_gem_init_phys_object(struct drm_device * dev,int id,int size,int align)3797 static int i915_gem_init_phys_object(struct drm_device *dev,
3798 int id, int size, int align)
3799 {
3800 drm_i915_private_t *dev_priv = dev->dev_private;
3801 struct drm_i915_gem_phys_object *phys_obj;
3802 int ret;
3803
3804 if (dev_priv->mm.phys_objs[id - 1] || !size)
3805 return 0;
3806
3807 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3808 if (!phys_obj)
3809 return -ENOMEM;
3810
3811 phys_obj->id = id;
3812
3813 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff, 1);
3814 if (!phys_obj->handle) {
3815 ret = -ENOMEM;
3816 goto kfree_obj;
3817 }
3818
3819 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3820
3821 return 0;
3822 kfree_obj:
3823 kfree(phys_obj, sizeof (struct drm_i915_gem_phys_object));
3824 return ret;
3825 }
3826
i915_gem_free_phys_object(struct drm_device * dev,int id)3827 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3828 {
3829 drm_i915_private_t *dev_priv = dev->dev_private;
3830 struct drm_i915_gem_phys_object *phys_obj;
3831
3832 if (!dev_priv->mm.phys_objs[id - 1])
3833 return;
3834
3835 phys_obj = dev_priv->mm.phys_objs[id - 1];
3836 if (phys_obj->cur_obj) {
3837 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3838 }
3839
3840 drm_pci_free(phys_obj->handle);
3841 kfree(phys_obj, sizeof (struct drm_i915_gem_phys_object));
3842 dev_priv->mm.phys_objs[id - 1] = NULL;
3843 }
3844
i915_gem_free_all_phys_object(struct drm_device * dev)3845 void i915_gem_free_all_phys_object(struct drm_device *dev)
3846 {
3847 int i;
3848
3849 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3850 i915_gem_free_phys_object(dev, i);
3851 }
3852
i915_gem_detach_phys_object(struct drm_device * dev,struct drm_i915_gem_object * obj)3853 void i915_gem_detach_phys_object(struct drm_device *dev,
3854 struct drm_i915_gem_object *obj)
3855 {
3856 int i, ret;
3857 int page_count;
3858
3859 if (!obj->phys_obj)
3860 return;
3861
3862 if (!obj->page_list) {
3863 ret = i915_gem_object_get_pages_gtt(obj);
3864 if (ret)
3865 goto out;
3866 }
3867
3868 page_count = obj->base.size / PAGE_SIZE;
3869
3870 for (i = 0; i < page_count; i++) {
3871 char *dst = obj->page_list[i];
3872 char *src = (caddr_t)(obj->phys_obj->handle->vaddr + (i * PAGE_SIZE));
3873
3874 (void) memcpy(dst, src, PAGE_SIZE);
3875 }
3876 drm_clflush_pages(obj->page_list, page_count);
3877 i915_gem_chipset_flush(dev);
3878
3879 i915_gem_object_put_pages_gtt(obj);
3880 out:
3881 obj->phys_obj->cur_obj = NULL;
3882 obj->phys_obj = NULL;
3883 }
3884
3885 int
i915_gem_attach_phys_object(struct drm_device * dev,struct drm_i915_gem_object * obj,int id,int align)3886 i915_gem_attach_phys_object(struct drm_device *dev,
3887 struct drm_i915_gem_object *obj,
3888 int id,
3889 int align)
3890 {
3891 drm_i915_private_t *dev_priv = dev->dev_private;
3892 int ret = 0;
3893 int page_count;
3894 int i;
3895
3896 if (id > I915_MAX_PHYS_OBJECT)
3897 return -EINVAL;
3898
3899 if (obj->phys_obj) {
3900 if (obj->phys_obj->id == id)
3901 return 0;
3902 i915_gem_detach_phys_object(dev, obj);
3903 }
3904
3905 /* create a new object */
3906 if (!dev_priv->mm.phys_objs[id - 1]) {
3907 ret = i915_gem_init_phys_object(dev, id,
3908 obj->base.size, align);
3909 if (ret) {
3910 DRM_ERROR("failed to init phys object %d size: %lu\n", id, obj->base.size);
3911 goto out;
3912 }
3913 }
3914
3915 /* bind to the object */
3916 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3917 obj->phys_obj->cur_obj = obj;
3918
3919 if (!obj->page_list) {
3920 ret = i915_gem_object_get_pages_gtt(obj);
3921 if (ret) {
3922 DRM_ERROR("failed to get page list\n");
3923 goto out;
3924 }
3925 }
3926
3927 page_count = obj->base.size / PAGE_SIZE;
3928
3929 for (i = 0; i < page_count; i++) {
3930 char *dst = obj->page_list[i];
3931 char *src = (caddr_t)(obj->phys_obj->handle->vaddr + (i * PAGE_SIZE));
3932 (void) memcpy(dst, src, PAGE_SIZE);
3933
3934 }
3935
3936 i915_gem_object_put_pages_gtt(obj);
3937
3938 return 0;
3939 out:
3940 return ret;
3941 }
3942
3943 static int
i915_gem_phys_pwrite(struct drm_device * dev,struct drm_i915_gem_object * obj,struct drm_i915_gem_pwrite * args,struct drm_file * file_priv)3944 i915_gem_phys_pwrite(struct drm_device *dev,
3945 struct drm_i915_gem_object *obj,
3946 struct drm_i915_gem_pwrite *args,
3947 /* LINTED */
3948 struct drm_file *file_priv)
3949 {
3950 void *obj_addr;
3951 int ret;
3952 char __user *user_data;
3953
3954 user_data = (char __user *) (uintptr_t) args->data_ptr;
3955 obj_addr = (void *)(uintptr_t)(obj->phys_obj->handle->vaddr + args->offset);
3956
3957 DRM_DEBUG("obj_addr %p, %ld\n", obj_addr, args->size);
3958 ret = DRM_COPY_FROM_USER(obj_addr, user_data, args->size);
3959 if (ret)
3960 return -EFAULT;
3961
3962 i915_gem_chipset_flush(dev);
3963 return 0;
3964 }
3965
i915_gem_release(struct drm_device * dev,struct drm_file * file)3966 void i915_gem_release(struct drm_device * dev, struct drm_file *file)
3967 {
3968 struct drm_i915_file_private *file_priv = file->driver_priv;
3969
3970 file_priv->status = 0;
3971
3972 mutex_lock(&dev->struct_mutex);
3973 /* i915_gpu_idle() generates warning message, so just ignore return */
3974 (void) i915_gpu_idle(dev);
3975 mutex_unlock(&dev->struct_mutex);
3976
3977 /* Clean up our request list when the client is going away, so that
3978 * later retire_requests won't dereference our soon-to-be-gone
3979 * file_priv.
3980 */
3981 spin_lock(&file_priv->mm.lock);
3982 while (!list_empty(&file_priv->mm.request_list)) {
3983 struct drm_i915_gem_request *request;
3984
3985 request = list_first_entry(&file_priv->mm.request_list,
3986 struct drm_i915_gem_request,
3987 client_list);
3988 list_del(&request->client_list);
3989 request->file_priv = NULL;
3990 }
3991 spin_unlock(&file_priv->mm.lock);
3992 }
3993