xref: /gfx-drm/usr/src/uts/common/io/drm/drm_mm.c (revision 47dc10d7)
1 /*
2  * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
3  */
4 
5 /**************************************************************************
6  *
7  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
8  * Copyright (c) 2009, 2013, Intel Corporation.
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  *
32  **************************************************************************/
33 
34 /*
35  * Generic simple memory manager implementation. Intended to be used as a base
36  * class implementation for more advanced memory managers.
37  *
38  * Note that the algorithm used is quite simple and there might be substantial
39  * performance gains if a smarter free list is implemented. Currently it is just an
40  * unordered stack of free regions. This could easily be improved if an RB-tree
41  * is used instead. At least if we expect heavy fragmentation.
42  *
43  * Aligned allocations can also see improvement.
44  *
45  * Authors:
46  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
47  */
48 
49 #include "drmP.h"
50 #include "drm_mm.h"
51 #define MM_UNUSED_TARGET 4
52 
drm_mm_kmalloc(struct drm_mm * mm,int atomic)53 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
54 {
55 	struct drm_mm_node *child;
56 
57 	if (atomic)
58 		child = kzalloc(sizeof(*child), GFP_ATOMIC);
59 	else
60 		child = kzalloc(sizeof(*child), GFP_KERNEL);
61 
62 	if (unlikely(child == NULL)) {
63 		spin_lock(&mm->unused_lock);
64 		if (list_empty(&mm->unused_nodes))
65 			child = NULL;
66 		else {
67 			child =
68 			    list_entry(mm->unused_nodes.next,
69 				       struct drm_mm_node, node_list);
70 			list_del(&child->node_list);
71 			--mm->num_unused;
72 		}
73 		spin_unlock(&mm->unused_lock);
74 	}
75 	return child;
76 }
77 
78 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
79  * drm_mm:	memory manager struct we are pre-allocating for
80  *
81  * Returns 0 on success or -ENOMEM if allocation fails.
82  */
drm_mm_pre_get(struct drm_mm * mm)83 int drm_mm_pre_get(struct drm_mm *mm)
84 {
85 	struct drm_mm_node *node;
86 
87 	spin_lock(&mm->unused_lock);
88 	while (mm->num_unused < MM_UNUSED_TARGET) {
89 		spin_unlock(&mm->unused_lock);
90 		node = kzalloc(sizeof(*node), GFP_KERNEL);
91 		spin_lock(&mm->unused_lock);
92 
93 		if (unlikely(node == NULL)) {
94 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
95 			spin_unlock(&mm->unused_lock);
96 			return ret;
97 		}
98 		++mm->num_unused;
99 		list_add_tail(&node->node_list, &mm->unused_nodes, (caddr_t)node);
100 	}
101 	spin_unlock(&mm->unused_lock);
102 	return 0;
103 }
104 
drm_mm_insert_helper(struct drm_mm_node * hole_node,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long color)105 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106 				 struct drm_mm_node *node,
107 				 unsigned long size, unsigned alignment,
108 				 unsigned long color)
109 {
110 	struct drm_mm *mm = hole_node->mm;
111 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
112 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
113 	unsigned long adj_start = hole_start;
114 	unsigned long adj_end = hole_end;
115 
116 	BUG_ON(node->allocated);
117 
118 	if (mm->color_adjust)
119 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
120 
121 	if (alignment) {
122 		unsigned tmp = adj_start % alignment;
123 		if (tmp)
124 			adj_start += alignment - tmp;
125 	}
126 
127 	if (adj_start == hole_start) {
128 		hole_node->hole_follows = 0;
129 		list_del(&hole_node->hole_stack);
130 	}
131 
132 	node->start = adj_start;
133 	node->size = size;
134 	node->mm = mm;
135 	node->color = color;
136 	node->allocated = 1;
137 
138 	INIT_LIST_HEAD(&node->hole_stack);
139 	list_add(&node->node_list, &hole_node->node_list, (caddr_t)node);
140 
141 	BUG_ON(node->start + node->size > adj_end);
142 
143 	node->hole_follows = 0;
144 	if (__drm_mm_hole_node_start(node) < hole_end) {
145 		list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
146 		node->hole_follows = 1;
147 	}
148 }
149 
drm_mm_create_block(struct drm_mm * mm,unsigned long start,unsigned long size,bool atomic)150 struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
151 					unsigned long start,
152 					unsigned long size,
153 					bool atomic)
154 {
155 	struct drm_mm_node *hole, *node;
156 	unsigned long end = start + size;
157 	unsigned long hole_start;
158 	unsigned long hole_end;
159 
160 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 		if (hole_start > start || hole_end < end)
162 			continue;
163 
164 		node = drm_mm_kmalloc(mm, atomic);
165 		if (unlikely(node == NULL))
166 			return NULL;
167 
168 		node->start = start;
169 		node->size = size;
170 		node->mm = mm;
171 		node->allocated = 1;
172 
173 		INIT_LIST_HEAD(&node->hole_stack);
174 		list_add(&node->node_list, &hole->node_list, (caddr_t)node);
175 
176 		if (start == hole_start) {
177 			hole->hole_follows = 0;
178 			list_del_init(&hole->hole_stack);
179 		}
180 
181 		node->hole_follows = 0;
182 		if (end != hole_end) {
183 			list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
184 			node->hole_follows = 1;
185 		}
186 
187 		return node;
188 	}
189 
190 	DRM_ERROR("no hole found for block 0x%lx + 0x%lx\n", start, size);
191 	return NULL;
192 }
193 
drm_mm_get_block_generic(struct drm_mm_node * hole_node,unsigned long size,unsigned alignment,unsigned long color,int atomic)194 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
195 					     unsigned long size,
196 					     unsigned alignment,
197 					     unsigned long color,
198 					     int atomic)
199 {
200 	struct drm_mm_node *node;
201 
202 	node = drm_mm_kmalloc(hole_node->mm, atomic);
203 	if (unlikely(node == NULL))
204 		return NULL;
205 
206 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
207 
208 	return node;
209 }
210 
211 /**
212  * Search for free space and insert a preallocated memory node. Returns
213  * -ENOSPC if no suitable free area is available. The preallocated memory node
214  * must be cleared.
215  */
drm_mm_insert_node_generic(struct drm_mm * mm,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long color)216 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
217 			       unsigned long size, unsigned alignment,
218 			       unsigned long color)
219 {
220 	struct drm_mm_node *hole_node;
221 
222 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
223 					       color, 0);
224 	if (!hole_node)
225 		return -ENOSPC;
226 
227 	drm_mm_insert_helper(hole_node, node, size, alignment, color);
228 	return 0;
229 }
230 
drm_mm_insert_node(struct drm_mm * mm,struct drm_mm_node * node,unsigned long size,unsigned alignment)231 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
232 		       unsigned long size, unsigned alignment)
233 {
234 	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
235 }
236 
drm_mm_insert_helper_range(struct drm_mm_node * hole_node,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end)237 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
238 				       struct drm_mm_node *node,
239 				       unsigned long size, unsigned alignment,
240 				       unsigned long color,
241 				       unsigned long start, unsigned long end)
242 {
243 	struct drm_mm *mm = hole_node->mm;
244 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
245 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
246 	unsigned long adj_start = hole_start;
247 	unsigned long adj_end = hole_end;
248 
249 	BUG_ON(!hole_node->hole_follows || node->allocated);
250 
251 	if (adj_start < start)
252 		adj_start = start;
253 	if (adj_end > end)
254 		adj_end = end;
255 
256 	if (mm->color_adjust)
257 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
258 
259 	if (alignment) {
260 		unsigned tmp = adj_start % alignment;
261 		if (tmp)
262 			adj_start += alignment - tmp;
263 	}
264 
265 	if (adj_start == hole_start) {
266 		hole_node->hole_follows = 0;
267 		list_del(&hole_node->hole_stack);
268 	}
269 
270 	node->start = adj_start;
271 	node->size = size;
272 	node->mm = mm;
273 	node->color = color;
274 	node->allocated = 1;
275 
276 	INIT_LIST_HEAD(&node->hole_stack);
277 	list_add(&node->node_list, &hole_node->node_list, (caddr_t)node);
278 
279 	BUG_ON(node->start + node->size > adj_end);
280 	BUG_ON(node->start + node->size > end);
281 
282 	node->hole_follows = 0;
283 	if (__drm_mm_hole_node_start(node) < hole_end) {
284 		list_add(&node->hole_stack, &mm->hole_stack, (caddr_t)node);
285 		node->hole_follows = 1;
286 	}
287 }
288 
drm_mm_get_block_range_generic(struct drm_mm_node * hole_node,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end,int atomic)289 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
290 						unsigned long size,
291 						unsigned alignment,
292 						unsigned long color,
293 						unsigned long start,
294 						unsigned long end,
295 						int atomic)
296 {
297 	struct drm_mm_node *node;
298 
299 	node = drm_mm_kmalloc(hole_node->mm, atomic);
300 	if (unlikely(node == NULL))
301 		return NULL;
302 
303 	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
304 				   start, end);
305 
306 	return node;
307 }
308 
309 /**
310  * Search for free space and insert a preallocated memory node. Returns
311  * -ENOSPC if no suitable free area is available. This is for range
312  * restricted allocations. The preallocated memory node must be cleared.
313  */
drm_mm_insert_node_in_range_generic(struct drm_mm * mm,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end)314 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
315 					unsigned long size, unsigned alignment, unsigned long color,
316 				unsigned long start, unsigned long end)
317 {
318 	struct drm_mm_node *hole_node;
319 
320 	hole_node = drm_mm_search_free_in_range_generic(mm,
321 							size, alignment, color,
322 							start, end, 0);
323 	if (!hole_node)
324 		return -ENOSPC;
325 
326 	drm_mm_insert_helper_range(hole_node, node,
327 				   size, alignment, color,
328 				   start, end);
329 	return 0;
330 }
331 
drm_mm_insert_node_in_range(struct drm_mm * mm,struct drm_mm_node * node,unsigned long size,unsigned alignment,unsigned long start,unsigned long end)332 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
333 				unsigned long size, unsigned alignment,
334 				unsigned long start, unsigned long end)
335 {
336 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
337 }
338 
339 /**
340  * Remove a memory node from the allocator.
341  */
drm_mm_remove_node(struct drm_mm_node * node)342 void drm_mm_remove_node(struct drm_mm_node *node)
343 {
344 	struct drm_mm *mm = node->mm;
345 	struct drm_mm_node *prev_node;
346 
347 	BUG_ON(node->scanned_block || node->scanned_prev_free
348 				   || node->scanned_next_free);
349 
350 	prev_node =
351 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
352 
353 	if (node->hole_follows) {
354 		BUG_ON(__drm_mm_hole_node_start(node) ==
355 		       __drm_mm_hole_node_end(node));
356 		list_del(&node->hole_stack);
357 	/* LINTED */
358 	} else
359 		BUG_ON(__drm_mm_hole_node_start(node) !=
360 		       __drm_mm_hole_node_end(node));
361 
362 
363 	if (!prev_node->hole_follows) {
364 		prev_node->hole_follows = 1;
365 		list_add(&prev_node->hole_stack, &mm->hole_stack, (caddr_t)prev_node);
366 	} else
367 		list_move(&prev_node->hole_stack, &mm->hole_stack, (caddr_t)prev_node);
368 
369 	list_del(&node->node_list);
370 	node->allocated = 0;
371 }
372 
373 /*
374  * Remove a memory node from the allocator and free the allocated struct
375  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
376  * drm_mm_get_block functions.
377  */
drm_mm_put_block(struct drm_mm_node * node)378 void drm_mm_put_block(struct drm_mm_node *node)
379 {
380 
381 	struct drm_mm *mm = node->mm;
382 
383 	drm_mm_remove_node(node);
384 
385 	spin_lock(&mm->unused_lock);
386 	if (mm->num_unused < MM_UNUSED_TARGET) {
387 		list_add(&node->node_list, &mm->unused_nodes, (caddr_t)node);
388 		++mm->num_unused;
389 	} else
390 		kfree(node, sizeof(struct drm_mm_node));
391 	spin_unlock(&mm->unused_lock);
392 }
393 
check_free_hole(unsigned long start,unsigned long end,unsigned long size,unsigned alignment)394 static int check_free_hole(unsigned long start, unsigned long end,
395 			   unsigned long size, unsigned alignment)
396 {
397 	if (end - start < size)
398 		return 0;
399 
400 	if (alignment) {
401 		unsigned tmp = start % alignment;
402 		if (tmp)
403 			start += alignment - tmp;
404 	}
405 
406 	return end >= start + size;
407 }
408 
drm_mm_search_free_generic(const struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long color,bool best_match)409 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
410 					       unsigned long size,
411 					       unsigned alignment,
412 					       unsigned long color,
413 					       bool best_match)
414 {
415 	struct drm_mm_node *entry;
416 	struct drm_mm_node *best;
417 	unsigned long adj_start;
418 	unsigned long adj_end;
419 	unsigned long best_size;
420 
421 	BUG_ON(mm->scanned_blocks);
422 
423 	best = NULL;
424 	best_size = ~0UL;
425 
426 	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
427 		if (mm->color_adjust) {
428 			mm->color_adjust(entry, color, &adj_start, &adj_end);
429 			if (adj_end <= adj_start)
430 			continue;
431 		}
432 
433 		if (!check_free_hole(adj_start, adj_end, size, alignment))
434 			continue;
435 
436 		if (!best_match)
437 			return entry;
438 
439 		if (entry->size < best_size) {
440 			best = entry;
441 			best_size = entry->size;
442 		}
443 	}
444 
445 	return best;
446 }
447 
drm_mm_search_free_in_range_generic(const struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end,bool best_match)448 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
449 							unsigned long size,
450 							unsigned alignment,
451 							unsigned long color,
452 							unsigned long start,
453 							unsigned long end,
454 							bool best_match)
455 {
456 	struct drm_mm_node *entry;
457 	struct drm_mm_node *best;
458 	unsigned long adj_start;
459 	unsigned long adj_end;
460 	unsigned long best_size;
461 
462 	BUG_ON(mm->scanned_blocks);
463 
464 	best = NULL;
465 	best_size = ~0UL;
466 
467 	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
468 		if (adj_start < start)
469 			adj_start = start;
470 		if (adj_end > end)
471 			adj_end = end;
472 
473 		if (mm->color_adjust) {
474 			mm->color_adjust(entry, color, &adj_start, &adj_end);
475 			if (adj_end <= adj_start)
476 				continue;
477 		}
478 
479 		if (!check_free_hole(adj_start, adj_end, size, alignment))
480 			continue;
481 
482 		if (!best_match)
483 			return entry;
484 
485 		if (entry->size < best_size) {
486 			best = entry;
487 			best_size = entry->size;
488 		}
489 	}
490 
491 	return best;
492 }
493 
494 /**
495  * Moves an allocation. To be used with embedded struct drm_mm_node.
496  */
drm_mm_replace_node(struct drm_mm_node * old,struct drm_mm_node * new)497 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
498 {
499 	list_replace(&old->node_list, &new->node_list);
500 	list_replace(&old->hole_stack, &new->hole_stack);
501 	new->hole_follows = old->hole_follows;
502 	new->mm = old->mm;
503 	new->start = old->start;
504 	new->size = old->size;
505 	new->color = old->color;
506 
507 	old->allocated = 0;
508 	new->allocated = 1;
509 }
510 
511 /**
512  * Initializa lru scanning.
513  *
514  * This simply sets up the scanning routines with the parameters for the desired
515  * hole.
516  *
517  * Warning: As long as the scan list is non-empty, no other operations than
518  * adding/removing nodes to/from the scan list are allowed.
519  */
drm_mm_init_scan(struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long color)520 void drm_mm_init_scan(struct drm_mm *mm,
521 		      unsigned long size,
522 		      unsigned alignment,
523 		      unsigned long color)
524 {
525 	mm->scan_color = color;
526 	mm->scan_alignment = alignment;
527 	mm->scan_size = size;
528 	mm->scanned_blocks = 0;
529 	mm->scan_hit_start = 0;
530 	mm->scan_hit_end = 0;
531 	mm->scan_check_range = 0;
532 	mm->prev_scanned_node = NULL;
533 }
534 
535 /**
536  * Initializa lru scanning.
537  *
538  * This simply sets up the scanning routines with the parameters for the desired
539  * hole. This version is for range-restricted scans.
540  *
541  * Warning: As long as the scan list is non-empty, no other operations than
542  * adding/removing nodes to/from the scan list are allowed.
543  */
drm_mm_init_scan_with_range(struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long color,unsigned long start,unsigned long end)544 void drm_mm_init_scan_with_range(struct drm_mm *mm,
545 				 unsigned long size,
546 				 unsigned alignment,
547 				 unsigned long color,
548 				 unsigned long start,
549 				 unsigned long end)
550 {
551 	mm->scan_color = color;
552 	mm->scan_alignment = alignment;
553 	mm->scan_size = size;
554 	mm->scanned_blocks = 0;
555 	mm->scan_hit_start = 0;
556 	mm->scan_hit_end = 0;
557 	mm->scan_start = start;
558 	mm->scan_end = end;
559 	mm->scan_check_range = 1;
560 	mm->prev_scanned_node = NULL;
561 }
562 
563 /**
564  * Add a node to the scan list that might be freed to make space for the desired
565  * hole.
566  *
567  * Returns non-zero, if a hole has been found, zero otherwise.
568  */
drm_mm_scan_add_block(struct drm_mm_node * node)569 int drm_mm_scan_add_block(struct drm_mm_node *node)
570 {
571 	struct drm_mm *mm = node->mm;
572 	struct drm_mm_node *prev_node;
573 	unsigned long hole_start, hole_end;
574 	unsigned long adj_start, adj_end;
575 
576 	mm->scanned_blocks++;
577 
578 	BUG_ON(node->scanned_block);
579 	node->scanned_block = 1;
580 
581 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
582 			       node_list);
583 
584 	node->scanned_preceeds_hole = prev_node->hole_follows;
585 	prev_node->hole_follows = 1;
586 	list_del(&node->node_list);
587 	node->node_list.prev = &prev_node->node_list;
588 	node->node_list.next = &mm->prev_scanned_node->node_list;
589 	mm->prev_scanned_node = node;
590 
591 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
592 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
593 
594 	if (mm->scan_check_range) {
595 		if (adj_start < mm->scan_start)
596 			adj_start = mm->scan_start;
597 		if (adj_end > mm->scan_end)
598 			adj_end = mm->scan_end;
599 	}
600 
601 	if (mm->color_adjust)
602 		mm->color_adjust(prev_node, mm->scan_color,
603 				 &adj_start, &adj_end);
604 
605 	if (check_free_hole(adj_start , adj_end,
606 			    mm->scan_size, mm->scan_alignment)) {
607 		mm->scan_hit_start = hole_start;
608 		mm->scan_hit_end = hole_end;
609 		return 1;
610 	}
611 
612 	return 0;
613 }
614 
615 /**
616  * Remove a node from the scan list.
617  *
618  * Nodes _must_ be removed in the exact same order from the scan list as they
619  * have been added, otherwise the internal state of the memory manager will be
620  * corrupted.
621  *
622  * When the scan list is empty, the selected memory nodes can be freed. An
623  * immediatly following drm_mm_search_free with best_match = 0 will then return
624  * the just freed block (because its at the top of the free_stack list).
625  *
626  * Returns one if this block should be evicted, zero otherwise. Will always
627  * return zero when no hole has been found.
628  */
drm_mm_scan_remove_block(struct drm_mm_node * node)629 int drm_mm_scan_remove_block(struct drm_mm_node *node)
630 {
631 	struct drm_mm *mm = node->mm;
632 	struct drm_mm_node *prev_node;
633 
634 	mm->scanned_blocks--;
635 
636 	BUG_ON(!node->scanned_block);
637 	node->scanned_block = 0;
638 
639 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
640 			       node_list);
641 
642 	prev_node->hole_follows = node->scanned_preceeds_hole;
643 	list_add(&node->node_list, &prev_node->node_list, (caddr_t)node);
644 
645 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
646 		 node->start < mm->scan_hit_end);
647 }
648 
649 
drm_mm_clean(struct drm_mm * mm)650 int drm_mm_clean(struct drm_mm * mm)
651 {
652 	struct list_head *head = &mm->head_node.node_list;
653 
654 	return (head->next->next == head);
655 }
656 
657 
drm_mm_init(struct drm_mm * mm,unsigned long start,unsigned long size)658 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
659 {
660 	INIT_LIST_HEAD(&mm->hole_stack);
661 	INIT_LIST_HEAD(&mm->unused_nodes);
662 	mm->num_unused = 0;
663 	mm->scanned_blocks = 0;
664 	spin_lock_init(&mm->unused_lock);
665 
666 	/* Clever trick to avoid a special case in the free hole tracking. */
667 	INIT_LIST_HEAD(&mm->head_node.node_list);
668 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
669 	mm->head_node.hole_follows = 1;
670 	mm->head_node.scanned_block = 0;
671 	mm->head_node.scanned_prev_free = 0;
672 	mm->head_node.scanned_next_free = 0;
673 	mm->head_node.mm = mm;
674 	mm->head_node.start = start + size;
675 	mm->head_node.size = start - mm->head_node.start;
676 	mm->head_node.node_list.contain_ptr = (caddr_t)&mm->head_node;
677 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack, (caddr_t)&mm->head_node);
678 	mm->color_adjust = NULL;
679 }
680 
681 
drm_mm_takedown(struct drm_mm * mm)682 void drm_mm_takedown(struct drm_mm * mm)
683 {
684 	struct drm_mm_node *entry, *next;
685 
686 	if (!list_empty(&mm->head_node.node_list)) {
687 		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
688 		return;
689 	}
690 
691 	spin_lock(&mm->unused_lock);
692 	list_for_each_entry_safe(entry, next, struct drm_mm_node, &mm->unused_nodes, node_list) {
693 		list_del(&entry->node_list);
694 		kfree(entry, sizeof(struct drm_mm_node));
695 		--mm->num_unused;
696 	}
697 	spin_unlock(&mm->unused_lock);
698 
699 	BUG_ON(mm->num_unused != 0);
700 }
701 
drm_mm_debug_hole(struct drm_mm_node * entry,const char * prefix)702 static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
703 				       const char *prefix)
704 {
705 	unsigned long hole_start, hole_end, hole_size;
706 
707 		if (entry->hole_follows) {
708 			hole_start = drm_mm_hole_node_start(entry);
709 			hole_end = drm_mm_hole_node_end(entry);
710 			hole_size = hole_end - hole_start;
711 			DRM_DEBUG("%s 0x%08lx-0x%08lx: %8lu: free\n",
712 				prefix, hole_start, hole_end,
713 				hole_size);
714 		return hole_size;
715 	}
716 
717 	return 0;
718 }
719 
drm_mm_debug_table(struct drm_mm * mm,const char * prefix)720 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
721 {
722 	struct drm_mm_node *entry;
723 	unsigned long total_used = 0, total_free = 0, total = 0;
724 
725 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
726 
727 	drm_mm_for_each_node(entry, struct drm_mm_node, mm) {
728 		DRM_DEBUG("%s 0x%08lx-0x%08lx: %8lu: used\n",
729 			prefix, entry->start, entry->start + entry->size,
730 			entry->size);
731 		total_used += entry->size;
732 		total_free += drm_mm_debug_hole(entry, prefix);
733 	}
734 	total = total_free + total_used;
735 
736 	DRM_DEBUG("%s total: %lu, used %lu free %lu\n", prefix, total,
737 		total_used, total_free);
738 }
739 
drm_mm_initialized(struct drm_mm * mm)740 bool drm_mm_initialized(struct drm_mm *mm)
741 {
742 	return (mm->hole_stack.next != NULL);
743 }
744 
__drm_mm_hole_node_start(struct drm_mm_node * hole_node)745 unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
746 {
747 	return hole_node->start + hole_node->size;
748 }
749 
drm_mm_hole_node_start(struct drm_mm_node * hole_node)750 unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
751 {
752 	BUG_ON(!hole_node->hole_follows);
753 	return __drm_mm_hole_node_start(hole_node);
754 }
755 
__drm_mm_hole_node_end(struct drm_mm_node * hole_node)756 unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
757 {
758 	struct drm_mm_node *node;
759 	node = list_entry(hole_node->node_list.next,
760 			struct drm_mm_node, node_list);
761 	return node->start;
762 }
763 
drm_mm_hole_node_end(struct drm_mm_node * hole_node)764 unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
765 {
766 	return __drm_mm_hole_node_end(hole_node);
767 }
768 
drm_mm_get_block(struct drm_mm_node * parent,unsigned long size,unsigned alignment)769 struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
770 						   unsigned long size,
771 						   unsigned alignment)
772 {
773 	return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
774 }
drm_mm_get_block_atomic(struct drm_mm_node * parent,unsigned long size,unsigned alignment)775 struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
776 							  unsigned long size,
777 							  unsigned alignment)
778 {
779 	return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
780 }
drm_mm_get_block_range(struct drm_mm_node * parent,unsigned long size,unsigned alignment,unsigned long start,unsigned long end)781 struct drm_mm_node *drm_mm_get_block_range(
782 						struct drm_mm_node *parent,
783 						unsigned long size,
784 						unsigned alignment,
785 						unsigned long start,
786 						unsigned long end)
787 {
788 	return drm_mm_get_block_range_generic(parent, size, alignment, 0,
789 					      start, end, 0);
790 }
drm_mm_get_block_atomic_range(struct drm_mm_node * parent,unsigned long size,unsigned alignment,unsigned long start,unsigned long end)791 struct drm_mm_node *drm_mm_get_block_atomic_range(
792 						struct drm_mm_node *parent,
793 						unsigned long size,
794 						unsigned alignment,
795 						unsigned long start,
796 						unsigned long end)
797 {
798 	return drm_mm_get_block_range_generic(parent, size, alignment, 0,
799 						start, end, 1);
800 }
801 
drm_mm_search_free(const struct drm_mm * mm,unsigned long size,unsigned alignment,bool best_match)802 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
803 						     unsigned long size,
804 						     unsigned alignment,
805 						     bool best_match)
806 {
807 	return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
808 }
drm_mm_search_free_in_range(const struct drm_mm * mm,unsigned long size,unsigned alignment,unsigned long start,unsigned long end,bool best_match)809 struct drm_mm_node *drm_mm_search_free_in_range(
810 						const struct drm_mm *mm,
811 						unsigned long size,
812 						unsigned alignment,
813 						unsigned long start,
814 						unsigned long end,
815 						bool best_match)
816 {
817 	return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
818 						   start, end, best_match);
819 }
820