1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 #include <drm/drm_mm.h>
46 #include <linux/export.h>
48 #define MM_UNUSED_TARGET 4
50 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52 struct drm_mm_node *child;
55 child = kmalloc(sizeof(*child), DRM_MEM_MM, M_ZERO | M_NOWAIT);
57 child = kmalloc(sizeof(*child), DRM_MEM_MM, M_ZERO | M_WAITOK);
59 if (unlikely(child == NULL)) {
60 spin_lock(&mm->unused_lock);
61 if (list_empty(&mm->unused_nodes))
65 list_entry(mm->unused_nodes.next,
66 struct drm_mm_node, node_list);
67 list_del(&child->node_list);
70 spin_unlock(&mm->unused_lock);
75 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
76 * drm_mm: memory manager struct we are pre-allocating for
78 * Returns 0 on success or -ENOMEM if allocation fails.
80 int drm_mm_pre_get(struct drm_mm *mm)
82 struct drm_mm_node *node;
84 spin_lock(&mm->unused_lock);
85 while (mm->num_unused < MM_UNUSED_TARGET) {
86 spin_unlock(&mm->unused_lock);
87 node = kmalloc(sizeof(*node), DRM_MEM_MM, M_ZERO | M_WAITOK);
88 spin_lock(&mm->unused_lock);
90 if (unlikely(node == NULL)) {
91 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
92 spin_unlock(&mm->unused_lock);
96 list_add_tail(&node->node_list, &mm->unused_nodes);
98 spin_unlock(&mm->unused_lock);
101 EXPORT_SYMBOL(drm_mm_pre_get);
103 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
105 return hole_node->start + hole_node->size;
108 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
110 struct drm_mm_node *next_node =
111 list_entry(hole_node->node_list.next, struct drm_mm_node,
114 return next_node->start;
117 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
118 struct drm_mm_node *node,
119 unsigned long size, unsigned alignment,
122 struct drm_mm *mm = hole_node->mm;
123 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
124 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
125 unsigned long adj_start = hole_start;
126 unsigned long adj_end = hole_end;
128 BUG_ON(!hole_node->hole_follows || node->allocated);
130 if (mm->color_adjust)
131 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
134 unsigned tmp = adj_start % alignment;
136 adj_start += alignment - tmp;
139 if (adj_start == hole_start) {
140 hole_node->hole_follows = 0;
141 list_del(&hole_node->hole_stack);
144 node->start = adj_start;
150 INIT_LIST_HEAD(&node->hole_stack);
151 list_add(&node->node_list, &hole_node->node_list);
153 BUG_ON(node->start + node->size > adj_end);
155 node->hole_follows = 0;
156 if (node->start + node->size < hole_end) {
157 list_add(&node->hole_stack, &mm->hole_stack);
158 node->hole_follows = 1;
162 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
168 struct drm_mm_node *node;
170 node = drm_mm_kmalloc(hole_node->mm, atomic);
171 if (unlikely(node == NULL))
174 drm_mm_insert_helper(hole_node, node, size, alignment, color);
178 EXPORT_SYMBOL(drm_mm_get_block_generic);
181 * Search for free space and insert a preallocated memory node. Returns
182 * -ENOSPC if no suitable free area is available. The preallocated memory node
185 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
186 unsigned long size, unsigned alignment,
189 struct drm_mm_node *hole_node;
191 hole_node = drm_mm_search_free_generic(mm, size, alignment,
196 drm_mm_insert_helper(hole_node, node, size, alignment, color);
199 EXPORT_SYMBOL(drm_mm_insert_node_generic);
201 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
202 unsigned long size, unsigned alignment)
204 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
206 EXPORT_SYMBOL(drm_mm_insert_node);
208 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
209 struct drm_mm_node *node,
210 unsigned long size, unsigned alignment,
212 unsigned long start, unsigned long end)
214 struct drm_mm *mm = hole_node->mm;
215 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
216 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
217 unsigned long adj_start = hole_start;
218 unsigned long adj_end = hole_end;
220 BUG_ON(!hole_node->hole_follows || node->allocated);
222 if (adj_start < start)
227 if (mm->color_adjust)
228 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
231 unsigned tmp = adj_start % alignment;
233 adj_start += alignment - tmp;
236 if (adj_start == hole_start) {
237 hole_node->hole_follows = 0;
238 list_del(&hole_node->hole_stack);
241 node->start = adj_start;
247 INIT_LIST_HEAD(&node->hole_stack);
248 list_add(&node->node_list, &hole_node->node_list);
250 BUG_ON(node->start + node->size > adj_end);
251 BUG_ON(node->start + node->size > end);
253 node->hole_follows = 0;
254 if (node->start + node->size < hole_end) {
255 list_add(&node->hole_stack, &mm->hole_stack);
256 node->hole_follows = 1;
260 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
268 struct drm_mm_node *node;
270 node = drm_mm_kmalloc(hole_node->mm, atomic);
271 if (unlikely(node == NULL))
274 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
279 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
282 * Search for free space and insert a preallocated memory node. Returns
283 * -ENOSPC if no suitable free area is available. This is for range
284 * restricted allocations. The preallocated memory node must be cleared.
286 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
287 unsigned long size, unsigned alignment, unsigned long color,
288 unsigned long start, unsigned long end)
290 struct drm_mm_node *hole_node;
292 hole_node = drm_mm_search_free_in_range_generic(mm,
293 size, alignment, color,
298 drm_mm_insert_helper_range(hole_node, node,
299 size, alignment, color,
303 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
305 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
306 unsigned long size, unsigned alignment,
307 unsigned long start, unsigned long end)
309 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
311 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
314 * Remove a memory node from the allocator.
316 void drm_mm_remove_node(struct drm_mm_node *node)
318 struct drm_mm *mm = node->mm;
319 struct drm_mm_node *prev_node;
321 BUG_ON(node->scanned_block || node->scanned_prev_free
322 || node->scanned_next_free);
325 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
327 if (node->hole_follows) {
328 BUG_ON(drm_mm_hole_node_start(node)
329 == drm_mm_hole_node_end(node));
330 list_del(&node->hole_stack);
332 BUG_ON(drm_mm_hole_node_start(node)
333 != drm_mm_hole_node_end(node));
335 if (!prev_node->hole_follows) {
336 prev_node->hole_follows = 1;
337 list_add(&prev_node->hole_stack, &mm->hole_stack);
339 list_move(&prev_node->hole_stack, &mm->hole_stack);
341 list_del(&node->node_list);
344 EXPORT_SYMBOL(drm_mm_remove_node);
347 * Remove a memory node from the allocator and free the allocated struct
348 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
349 * drm_mm_get_block functions.
351 void drm_mm_put_block(struct drm_mm_node *node)
354 struct drm_mm *mm = node->mm;
356 drm_mm_remove_node(node);
358 spin_lock(&mm->unused_lock);
359 if (mm->num_unused < MM_UNUSED_TARGET) {
360 list_add(&node->node_list, &mm->unused_nodes);
363 drm_free(node, DRM_MEM_MM);
364 spin_unlock(&mm->unused_lock);
366 EXPORT_SYMBOL(drm_mm_put_block);
368 static int check_free_hole(unsigned long start, unsigned long end,
369 unsigned long size, unsigned alignment)
371 if (end - start < size)
375 unsigned tmp = start % alignment;
377 start += alignment - tmp;
380 return end >= start + size;
383 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
389 struct drm_mm_node *entry;
390 struct drm_mm_node *best;
391 unsigned long best_size;
393 BUG_ON(mm->scanned_blocks);
398 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
399 unsigned long adj_start = drm_mm_hole_node_start(entry);
400 unsigned long adj_end = drm_mm_hole_node_end(entry);
402 if (mm->color_adjust) {
403 mm->color_adjust(entry, color, &adj_start, &adj_end);
404 if (adj_end <= adj_start)
408 BUG_ON(!entry->hole_follows);
409 if (!check_free_hole(adj_start, adj_end, size, alignment))
415 if (entry->size < best_size) {
417 best_size = entry->size;
423 EXPORT_SYMBOL(drm_mm_search_free_generic);
425 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
433 struct drm_mm_node *entry;
434 struct drm_mm_node *best;
435 unsigned long best_size;
437 BUG_ON(mm->scanned_blocks);
442 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
443 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
444 start : drm_mm_hole_node_start(entry);
445 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
446 end : drm_mm_hole_node_end(entry);
448 BUG_ON(!entry->hole_follows);
450 if (mm->color_adjust) {
451 mm->color_adjust(entry, color, &adj_start, &adj_end);
452 if (adj_end <= adj_start)
456 if (!check_free_hole(adj_start, adj_end, size, alignment))
462 if (entry->size < best_size) {
464 best_size = entry->size;
470 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
473 * Moves an allocation. To be used with embedded struct drm_mm_node.
475 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
477 list_replace(&old->node_list, &new->node_list);
478 list_replace(&old->hole_stack, &new->hole_stack);
479 new->hole_follows = old->hole_follows;
481 new->start = old->start;
482 new->size = old->size;
483 new->color = old->color;
488 EXPORT_SYMBOL(drm_mm_replace_node);
491 * Initializa lru scanning.
493 * This simply sets up the scanning routines with the parameters for the desired
496 * Warning: As long as the scan list is non-empty, no other operations than
497 * adding/removing nodes to/from the scan list are allowed.
499 void drm_mm_init_scan(struct drm_mm *mm,
504 mm->scan_color = color;
505 mm->scan_alignment = alignment;
506 mm->scan_size = size;
507 mm->scanned_blocks = 0;
508 mm->scan_hit_start = 0;
509 mm->scan_hit_end = 0;
510 mm->scan_check_range = 0;
511 mm->prev_scanned_node = NULL;
513 EXPORT_SYMBOL(drm_mm_init_scan);
516 * Initializa lru scanning.
518 * This simply sets up the scanning routines with the parameters for the desired
519 * hole. This version is for range-restricted scans.
521 * Warning: As long as the scan list is non-empty, no other operations than
522 * adding/removing nodes to/from the scan list are allowed.
524 void drm_mm_init_scan_with_range(struct drm_mm *mm,
531 mm->scan_color = color;
532 mm->scan_alignment = alignment;
533 mm->scan_size = size;
534 mm->scanned_blocks = 0;
535 mm->scan_hit_start = 0;
536 mm->scan_hit_end = 0;
537 mm->scan_start = start;
539 mm->scan_check_range = 1;
540 mm->prev_scanned_node = NULL;
542 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
545 * Add a node to the scan list that might be freed to make space for the desired
548 * Returns non-zero, if a hole has been found, zero otherwise.
550 int drm_mm_scan_add_block(struct drm_mm_node *node)
552 struct drm_mm *mm = node->mm;
553 struct drm_mm_node *prev_node;
554 unsigned long hole_start, hole_end;
555 unsigned long adj_start, adj_end;
557 mm->scanned_blocks++;
559 BUG_ON(node->scanned_block);
560 node->scanned_block = 1;
562 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
565 node->scanned_preceeds_hole = prev_node->hole_follows;
566 prev_node->hole_follows = 1;
567 list_del(&node->node_list);
568 node->node_list.prev = &prev_node->node_list;
569 node->node_list.next = &mm->prev_scanned_node->node_list;
570 mm->prev_scanned_node = node;
572 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
573 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
575 if (mm->scan_check_range) {
576 if (adj_start < mm->scan_start)
577 adj_start = mm->scan_start;
578 if (adj_end > mm->scan_end)
579 adj_end = mm->scan_end;
582 if (mm->color_adjust)
583 mm->color_adjust(prev_node, mm->scan_color,
584 &adj_start, &adj_end);
586 if (check_free_hole(adj_start, adj_end,
587 mm->scan_size, mm->scan_alignment)) {
588 mm->scan_hit_start = hole_start;
589 mm->scan_hit_end = hole_end;
595 EXPORT_SYMBOL(drm_mm_scan_add_block);
598 * Remove a node from the scan list.
600 * Nodes _must_ be removed in the exact same order from the scan list as they
601 * have been added, otherwise the internal state of the memory manager will be
604 * When the scan list is empty, the selected memory nodes can be freed. An
605 * immediately following drm_mm_search_free with best_match = 0 will then return
606 * the just freed block (because its at the top of the free_stack list).
608 * Returns one if this block should be evicted, zero otherwise. Will always
609 * return zero when no hole has been found.
611 int drm_mm_scan_remove_block(struct drm_mm_node *node)
613 struct drm_mm *mm = node->mm;
614 struct drm_mm_node *prev_node;
616 mm->scanned_blocks--;
618 BUG_ON(!node->scanned_block);
619 node->scanned_block = 0;
621 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
624 prev_node->hole_follows = node->scanned_preceeds_hole;
625 list_add(&node->node_list, &prev_node->node_list);
627 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
628 node->start < mm->scan_hit_end);
630 EXPORT_SYMBOL(drm_mm_scan_remove_block);
632 int drm_mm_clean(struct drm_mm * mm)
634 struct list_head *head = &mm->head_node.node_list;
636 return (head->next->next == head);
638 EXPORT_SYMBOL(drm_mm_clean);
640 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
642 INIT_LIST_HEAD(&mm->hole_stack);
643 INIT_LIST_HEAD(&mm->unused_nodes);
645 mm->scanned_blocks = 0;
646 spin_init(&mm->unused_lock);
648 /* Clever trick to avoid a special case in the free hole tracking. */
649 INIT_LIST_HEAD(&mm->head_node.node_list);
650 INIT_LIST_HEAD(&mm->head_node.hole_stack);
651 mm->head_node.hole_follows = 1;
652 mm->head_node.scanned_block = 0;
653 mm->head_node.scanned_prev_free = 0;
654 mm->head_node.scanned_next_free = 0;
655 mm->head_node.mm = mm;
656 mm->head_node.start = start + size;
657 mm->head_node.size = start - mm->head_node.start;
658 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
660 mm->color_adjust = NULL;
664 EXPORT_SYMBOL(drm_mm_init);
666 void drm_mm_takedown(struct drm_mm * mm)
668 struct drm_mm_node *entry, *next;
670 if (!list_empty(&mm->head_node.node_list)) {
671 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
675 spin_lock(&mm->unused_lock);
676 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
677 list_del(&entry->node_list);
678 drm_free(entry, DRM_MEM_MM);
681 spin_unlock(&mm->unused_lock);
683 BUG_ON(mm->num_unused != 0);
685 EXPORT_SYMBOL(drm_mm_takedown);
687 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
689 struct drm_mm_node *entry;
690 unsigned long total_used = 0, total_free = 0, total = 0;
691 unsigned long hole_start, hole_end, hole_size;
693 hole_start = drm_mm_hole_node_start(&mm->head_node);
694 hole_end = drm_mm_hole_node_end(&mm->head_node);
695 hole_size = hole_end - hole_start;
697 kprintf("%s 0x%08lx-0x%08lx: %8lu: free\n",
698 prefix, hole_start, hole_end,
700 total_free += hole_size;
702 drm_mm_for_each_node(entry, mm) {
703 kprintf("%s 0x%08lx-0x%08lx: %8lu: used\n",
704 prefix, entry->start, entry->start + entry->size,
706 total_used += entry->size;
708 if (entry->hole_follows) {
709 hole_start = drm_mm_hole_node_start(entry);
710 hole_end = drm_mm_hole_node_end(entry);
711 hole_size = hole_end - hole_start;
712 kprintf("%s 0x%08lx-0x%08lx: %8lu: free\n",
713 prefix, hole_start, hole_end,
715 total_free += hole_size;
718 total = total_free + total_used;
720 kprintf("%s total: %lu, used %lu free %lu\n", prefix, total,
721 total_used, total_free);
723 EXPORT_SYMBOL(drm_mm_debug_table);
725 #if defined(CONFIG_DEBUG_FS)
726 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
728 struct drm_mm_node *entry;
729 unsigned long total_used = 0, total_free = 0, total = 0;
730 unsigned long hole_start, hole_end, hole_size;
732 hole_start = drm_mm_hole_node_start(&mm->head_node);
733 hole_end = drm_mm_hole_node_end(&mm->head_node);
734 hole_size = hole_end - hole_start;
736 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
737 hole_start, hole_end, hole_size);
738 total_free += hole_size;
740 drm_mm_for_each_node(entry, mm) {
741 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
742 entry->start, entry->start + entry->size,
744 total_used += entry->size;
745 if (entry->hole_follows) {
746 hole_start = drm_mm_hole_node_start(entry);
747 hole_end = drm_mm_hole_node_end(entry);
748 hole_size = hole_end - hole_start;
749 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
750 hole_start, hole_end, hole_size);
751 total_free += hole_size;
754 total = total_free + total_used;
756 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
759 EXPORT_SYMBOL(drm_mm_dump_table);