1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
50 #define MM_UNUSED_TARGET 4
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
54 struct drm_mm_node *child;
57 child = kzalloc(sizeof(*child), GFP_ATOMIC);
59 child = kzalloc(sizeof(*child), GFP_KERNEL);
61 if (unlikely(child == NULL)) {
62 spin_lock(&mm->unused_lock);
63 if (list_empty(&mm->unused_nodes))
67 list_entry(mm->unused_nodes.next,
68 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
72 spin_unlock(&mm->unused_lock);
77 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
80 * Returns 0 on success or -ENOMEM if allocation fails.
82 int drm_mm_pre_get(struct drm_mm *mm)
84 struct drm_mm_node *node;
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
89 node = kzalloc(sizeof(*node), GFP_KERNEL);
90 spin_lock(&mm->unused_lock);
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
98 list_add_tail(&node->node_list, &mm->unused_nodes);
100 spin_unlock(&mm->unused_lock);
103 EXPORT_SYMBOL(drm_mm_pre_get);
105 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106 struct drm_mm_node *node,
107 unsigned long size, unsigned alignment,
110 struct drm_mm *mm = hole_node->mm;
111 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
112 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
113 unsigned long adj_start = hole_start;
114 unsigned long adj_end = hole_end;
116 BUG_ON(node->allocated);
118 if (mm->color_adjust)
119 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
122 unsigned tmp = adj_start % alignment;
124 adj_start += alignment - tmp;
127 if (adj_start == hole_start) {
128 hole_node->hole_follows = 0;
129 list_del(&hole_node->hole_stack);
132 node->start = adj_start;
138 INIT_LIST_HEAD(&node->hole_stack);
139 list_add(&node->node_list, &hole_node->node_list);
141 BUG_ON(node->start + node->size > adj_end);
143 node->hole_follows = 0;
144 if (__drm_mm_hole_node_start(node) < hole_end) {
145 list_add(&node->hole_stack, &mm->hole_stack);
146 node->hole_follows = 1;
150 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
152 struct drm_mm_node *hole;
153 unsigned long end = node->start + node->size;
154 unsigned long hole_start;
155 unsigned long hole_end;
157 BUG_ON(node == NULL);
159 /* Find the relevant hole to add our node to */
160 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 if (hole_start > node->start || hole_end < end)
167 INIT_LIST_HEAD(&node->hole_stack);
168 list_add(&node->node_list, &hole->node_list);
170 if (node->start == hole_start) {
171 hole->hole_follows = 0;
172 list_del_init(&hole->hole_stack);
175 node->hole_follows = 0;
176 if (end != hole_end) {
177 list_add(&node->hole_stack, &mm->hole_stack);
178 node->hole_follows = 1;
184 WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
185 node->start, node->size);
188 EXPORT_SYMBOL(drm_mm_reserve_node);
190 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
196 struct drm_mm_node *node;
198 node = drm_mm_kmalloc(hole_node->mm, atomic);
199 if (unlikely(node == NULL))
202 drm_mm_insert_helper(hole_node, node, size, alignment, color);
206 EXPORT_SYMBOL(drm_mm_get_block_generic);
209 * Search for free space and insert a preallocated memory node. Returns
210 * -ENOSPC if no suitable free area is available. The preallocated memory node
213 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
214 unsigned long size, unsigned alignment,
216 enum drm_mm_search_flags flags)
218 struct drm_mm_node *hole_node;
220 hole_node = drm_mm_search_free_generic(mm, size, alignment,
225 drm_mm_insert_helper(hole_node, node, size, alignment, color);
228 EXPORT_SYMBOL(drm_mm_insert_node_generic);
230 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
231 struct drm_mm_node *node,
232 unsigned long size, unsigned alignment,
234 unsigned long start, unsigned long end)
236 struct drm_mm *mm = hole_node->mm;
237 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
238 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
239 unsigned long adj_start = hole_start;
240 unsigned long adj_end = hole_end;
242 BUG_ON(!hole_node->hole_follows || node->allocated);
244 if (adj_start < start)
249 if (mm->color_adjust)
250 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
253 unsigned tmp = adj_start % alignment;
255 adj_start += alignment - tmp;
258 if (adj_start == hole_start) {
259 hole_node->hole_follows = 0;
260 list_del(&hole_node->hole_stack);
263 node->start = adj_start;
269 INIT_LIST_HEAD(&node->hole_stack);
270 list_add(&node->node_list, &hole_node->node_list);
272 BUG_ON(node->start + node->size > adj_end);
273 BUG_ON(node->start + node->size > end);
275 node->hole_follows = 0;
276 if (__drm_mm_hole_node_start(node) < hole_end) {
277 list_add(&node->hole_stack, &mm->hole_stack);
278 node->hole_follows = 1;
282 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
290 struct drm_mm_node *node;
292 node = drm_mm_kmalloc(hole_node->mm, atomic);
293 if (unlikely(node == NULL))
296 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
301 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
304 * Search for free space and insert a preallocated memory node. Returns
305 * -ENOSPC if no suitable free area is available. This is for range
306 * restricted allocations. The preallocated memory node must be cleared.
308 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
309 unsigned long size, unsigned alignment, unsigned long color,
310 unsigned long start, unsigned long end,
311 enum drm_mm_search_flags flags)
313 struct drm_mm_node *hole_node;
315 hole_node = drm_mm_search_free_in_range_generic(mm,
316 size, alignment, color,
321 drm_mm_insert_helper_range(hole_node, node,
322 size, alignment, color,
326 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
329 * Remove a memory node from the allocator.
331 void drm_mm_remove_node(struct drm_mm_node *node)
333 struct drm_mm *mm = node->mm;
334 struct drm_mm_node *prev_node;
336 if (WARN_ON(!node->allocated))
339 BUG_ON(node->scanned_block || node->scanned_prev_free
340 || node->scanned_next_free);
343 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
345 if (node->hole_follows) {
346 BUG_ON(__drm_mm_hole_node_start(node) ==
347 __drm_mm_hole_node_end(node));
348 list_del(&node->hole_stack);
350 BUG_ON(__drm_mm_hole_node_start(node) !=
351 __drm_mm_hole_node_end(node));
354 if (!prev_node->hole_follows) {
355 prev_node->hole_follows = 1;
356 list_add(&prev_node->hole_stack, &mm->hole_stack);
358 list_move(&prev_node->hole_stack, &mm->hole_stack);
360 list_del(&node->node_list);
363 EXPORT_SYMBOL(drm_mm_remove_node);
366 * Remove a memory node from the allocator and free the allocated struct
367 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
368 * drm_mm_get_block functions.
370 void drm_mm_put_block(struct drm_mm_node *node)
373 struct drm_mm *mm = node->mm;
375 drm_mm_remove_node(node);
377 spin_lock(&mm->unused_lock);
378 if (mm->num_unused < MM_UNUSED_TARGET) {
379 list_add(&node->node_list, &mm->unused_nodes);
383 spin_unlock(&mm->unused_lock);
385 EXPORT_SYMBOL(drm_mm_put_block);
387 static int check_free_hole(unsigned long start, unsigned long end,
388 unsigned long size, unsigned alignment)
390 if (end - start < size)
394 unsigned tmp = start % alignment;
396 start += alignment - tmp;
399 return end >= start + size;
402 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
406 enum drm_mm_search_flags flags)
408 struct drm_mm_node *entry;
409 struct drm_mm_node *best;
410 unsigned long adj_start;
411 unsigned long adj_end;
412 unsigned long best_size;
414 BUG_ON(mm->scanned_blocks);
419 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
420 if (mm->color_adjust) {
421 mm->color_adjust(entry, color, &adj_start, &adj_end);
422 if (adj_end <= adj_start)
426 if (!check_free_hole(adj_start, adj_end, size, alignment))
429 if (!(flags & DRM_MM_SEARCH_BEST))
432 if (entry->size < best_size) {
434 best_size = entry->size;
440 EXPORT_SYMBOL(drm_mm_search_free_generic);
442 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
448 enum drm_mm_search_flags flags)
450 struct drm_mm_node *entry;
451 struct drm_mm_node *best;
452 unsigned long adj_start;
453 unsigned long adj_end;
454 unsigned long best_size;
456 BUG_ON(mm->scanned_blocks);
461 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
462 if (adj_start < start)
467 if (mm->color_adjust) {
468 mm->color_adjust(entry, color, &adj_start, &adj_end);
469 if (adj_end <= adj_start)
473 if (!check_free_hole(adj_start, adj_end, size, alignment))
476 if (!(flags & DRM_MM_SEARCH_BEST))
479 if (entry->size < best_size) {
481 best_size = entry->size;
487 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
490 * Moves an allocation. To be used with embedded struct drm_mm_node.
492 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
494 list_replace(&old->node_list, &new->node_list);
495 list_replace(&old->hole_stack, &new->hole_stack);
496 new->hole_follows = old->hole_follows;
498 new->start = old->start;
499 new->size = old->size;
500 new->color = old->color;
505 EXPORT_SYMBOL(drm_mm_replace_node);
508 * Initializa lru scanning.
510 * This simply sets up the scanning routines with the parameters for the desired
513 * Warning: As long as the scan list is non-empty, no other operations than
514 * adding/removing nodes to/from the scan list are allowed.
516 void drm_mm_init_scan(struct drm_mm *mm,
521 mm->scan_color = color;
522 mm->scan_alignment = alignment;
523 mm->scan_size = size;
524 mm->scanned_blocks = 0;
525 mm->scan_hit_start = 0;
526 mm->scan_hit_end = 0;
527 mm->scan_check_range = 0;
528 mm->prev_scanned_node = NULL;
530 EXPORT_SYMBOL(drm_mm_init_scan);
533 * Initializa lru scanning.
535 * This simply sets up the scanning routines with the parameters for the desired
536 * hole. This version is for range-restricted scans.
538 * Warning: As long as the scan list is non-empty, no other operations than
539 * adding/removing nodes to/from the scan list are allowed.
541 void drm_mm_init_scan_with_range(struct drm_mm *mm,
548 mm->scan_color = color;
549 mm->scan_alignment = alignment;
550 mm->scan_size = size;
551 mm->scanned_blocks = 0;
552 mm->scan_hit_start = 0;
553 mm->scan_hit_end = 0;
554 mm->scan_start = start;
556 mm->scan_check_range = 1;
557 mm->prev_scanned_node = NULL;
559 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
562 * Add a node to the scan list that might be freed to make space for the desired
565 * Returns non-zero, if a hole has been found, zero otherwise.
567 int drm_mm_scan_add_block(struct drm_mm_node *node)
569 struct drm_mm *mm = node->mm;
570 struct drm_mm_node *prev_node;
571 unsigned long hole_start, hole_end;
572 unsigned long adj_start, adj_end;
574 mm->scanned_blocks++;
576 BUG_ON(node->scanned_block);
577 node->scanned_block = 1;
579 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
582 node->scanned_preceeds_hole = prev_node->hole_follows;
583 prev_node->hole_follows = 1;
584 list_del(&node->node_list);
585 node->node_list.prev = &prev_node->node_list;
586 node->node_list.next = &mm->prev_scanned_node->node_list;
587 mm->prev_scanned_node = node;
589 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
590 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
592 if (mm->scan_check_range) {
593 if (adj_start < mm->scan_start)
594 adj_start = mm->scan_start;
595 if (adj_end > mm->scan_end)
596 adj_end = mm->scan_end;
599 if (mm->color_adjust)
600 mm->color_adjust(prev_node, mm->scan_color,
601 &adj_start, &adj_end);
603 if (check_free_hole(adj_start, adj_end,
604 mm->scan_size, mm->scan_alignment)) {
605 mm->scan_hit_start = hole_start;
606 mm->scan_hit_end = hole_end;
612 EXPORT_SYMBOL(drm_mm_scan_add_block);
615 * Remove a node from the scan list.
617 * Nodes _must_ be removed in the exact same order from the scan list as they
618 * have been added, otherwise the internal state of the memory manager will be
621 * When the scan list is empty, the selected memory nodes can be freed. An
622 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
623 * return the just freed block (because its at the top of the free_stack list).
625 * Returns one if this block should be evicted, zero otherwise. Will always
626 * return zero when no hole has been found.
628 int drm_mm_scan_remove_block(struct drm_mm_node *node)
630 struct drm_mm *mm = node->mm;
631 struct drm_mm_node *prev_node;
633 mm->scanned_blocks--;
635 BUG_ON(!node->scanned_block);
636 node->scanned_block = 0;
638 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
641 prev_node->hole_follows = node->scanned_preceeds_hole;
642 list_add(&node->node_list, &prev_node->node_list);
644 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
645 node->start < mm->scan_hit_end);
647 EXPORT_SYMBOL(drm_mm_scan_remove_block);
649 int drm_mm_clean(struct drm_mm * mm)
651 struct list_head *head = &mm->head_node.node_list;
653 return (head->next->next == head);
655 EXPORT_SYMBOL(drm_mm_clean);
657 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
659 INIT_LIST_HEAD(&mm->hole_stack);
660 INIT_LIST_HEAD(&mm->unused_nodes);
662 mm->scanned_blocks = 0;
663 spin_init(&mm->unused_lock, "drmmminit");
665 /* Clever trick to avoid a special case in the free hole tracking. */
666 INIT_LIST_HEAD(&mm->head_node.node_list);
667 INIT_LIST_HEAD(&mm->head_node.hole_stack);
668 mm->head_node.hole_follows = 1;
669 mm->head_node.scanned_block = 0;
670 mm->head_node.scanned_prev_free = 0;
671 mm->head_node.scanned_next_free = 0;
672 mm->head_node.mm = mm;
673 mm->head_node.start = start + size;
674 mm->head_node.size = start - mm->head_node.start;
675 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
677 mm->color_adjust = NULL;
679 EXPORT_SYMBOL(drm_mm_init);
681 void drm_mm_takedown(struct drm_mm * mm)
683 struct drm_mm_node *entry, *next;
685 if (!list_empty(&mm->head_node.node_list)) {
686 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
690 spin_lock(&mm->unused_lock);
691 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
692 list_del(&entry->node_list);
696 spin_unlock(&mm->unused_lock);
698 BUG_ON(mm->num_unused != 0);
700 EXPORT_SYMBOL(drm_mm_takedown);
702 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
704 struct drm_mm_node *entry;
705 unsigned long total_used = 0, total_free = 0, total = 0;
706 unsigned long hole_start, hole_end, hole_size;
708 hole_start = drm_mm_hole_node_start(&mm->head_node);
709 hole_end = drm_mm_hole_node_end(&mm->head_node);
710 hole_size = hole_end - hole_start;
712 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
713 prefix, hole_start, hole_end,
715 total_free += hole_size;
717 drm_mm_for_each_node(entry, mm) {
718 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
719 prefix, entry->start, entry->start + entry->size,
721 total_used += entry->size;
723 if (entry->hole_follows) {
724 hole_start = drm_mm_hole_node_start(entry);
725 hole_end = drm_mm_hole_node_end(entry);
726 hole_size = hole_end - hole_start;
727 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
728 prefix, hole_start, hole_end,
730 total_free += hole_size;
733 total = total_free + total_used;
735 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
736 total_used, total_free);
738 EXPORT_SYMBOL(drm_mm_debug_table);
740 #if defined(CONFIG_DEBUG_FS)
741 static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
743 unsigned long hole_start, hole_end, hole_size;
745 if (entry->hole_follows) {
746 hole_start = drm_mm_hole_node_start(entry);
747 hole_end = drm_mm_hole_node_end(entry);
748 hole_size = hole_end - hole_start;
749 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
750 hole_start, hole_end, hole_size);
757 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
759 struct drm_mm_node *entry;
760 unsigned long total_used = 0, total_free = 0, total = 0;
762 total_free += drm_mm_dump_hole(m, &mm->head_node);
764 drm_mm_for_each_node(entry, mm) {
765 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
766 entry->start, entry->start + entry->size,
768 total_used += entry->size;
769 total_free += drm_mm_dump_hole(m, entry);
771 total = total_free + total_used;
773 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
776 EXPORT_SYMBOL(drm_mm_dump_table);