1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * __FBSDID("$FreeBSD: src/sys/dev/drm/drm_mm.c,v 1.1 2010/01/31 14:25:29 rnoland Exp $");
28 **************************************************************************/
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
34 * Note that the algorithm used is quite simple and there might be substantial
35 * performance gains if a smarter free list is implemented. Currently it is just an
36 * unordered stack of free regions. This could easily be improved if an RB-tree
37 * is used instead. At least if we expect heavy fragmentation.
39 * Aligned allocations can also see improvement.
42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 #include "dev/drm/drmP.h"
46 #include "dev/drm/drm_mm.h"
48 #define MM_UNUSED_TARGET 4
50 unsigned long drm_mm_tail_space(struct drm_mm *mm)
52 struct list_head *tail_node;
53 struct drm_mm_node *entry;
55 tail_node = mm->ml_entry.prev;
56 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
63 int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65 struct list_head *tail_node;
66 struct drm_mm_node *entry;
68 tail_node = mm->ml_entry.prev;
69 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
73 if (entry->size <= size)
80 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82 struct drm_mm_node *child;
85 child = malloc(sizeof(*child), DRM_MEM_MM, M_NOWAIT);
87 child = malloc(sizeof(*child), DRM_MEM_MM, M_WAITOK);
89 if (unlikely(child == NULL)) {
90 DRM_SPINLOCK(&mm->unused_lock);
91 if (list_empty(&mm->unused_nodes))
95 list_entry(mm->unused_nodes.next,
96 struct drm_mm_node, fl_entry);
97 list_del(&child->fl_entry);
100 DRM_SPINUNLOCK(&mm->unused_lock);
105 int drm_mm_pre_get(struct drm_mm *mm)
107 struct drm_mm_node *node;
109 DRM_SPINLOCK(&mm->unused_lock);
110 while (mm->num_unused < MM_UNUSED_TARGET) {
111 DRM_SPINUNLOCK(&mm->unused_lock);
112 node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
113 DRM_SPINLOCK(&mm->unused_lock);
115 if (unlikely(node == NULL)) {
116 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117 DRM_SPINUNLOCK(&mm->unused_lock);
121 list_add_tail(&node->fl_entry, &mm->unused_nodes);
123 DRM_SPINUNLOCK(&mm->unused_lock);
127 static int drm_mm_create_tail_node(struct drm_mm *mm,
129 unsigned long size, int atomic)
131 struct drm_mm_node *child;
133 child = drm_mm_kmalloc(mm, atomic);
134 if (unlikely(child == NULL))
139 child->start = start;
142 list_add_tail(&child->ml_entry, &mm->ml_entry);
143 list_add_tail(&child->fl_entry, &mm->fl_entry);
148 int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
150 struct list_head *tail_node;
151 struct drm_mm_node *entry;
153 tail_node = mm->ml_entry.prev;
154 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
156 return drm_mm_create_tail_node(mm, entry->start + entry->size,
163 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
167 struct drm_mm_node *child;
169 child = drm_mm_kmalloc(parent->mm, atomic);
170 if (unlikely(child == NULL))
173 INIT_LIST_HEAD(&child->fl_entry);
177 child->start = parent->start;
178 child->mm = parent->mm;
180 list_add_tail(&child->ml_entry, &parent->ml_entry);
181 INIT_LIST_HEAD(&child->fl_entry);
183 parent->size -= size;
184 parent->start += size;
189 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
195 struct drm_mm_node *align_splitoff = NULL;
199 tmp = node->start % alignment;
203 drm_mm_split_at_start(node, alignment - tmp, atomic);
204 if (unlikely(align_splitoff == NULL))
208 if (node->size == size) {
209 list_del_init(&node->fl_entry);
212 node = drm_mm_split_at_start(node, size, atomic);
216 drm_mm_put_block(align_splitoff);
222 * Put a block. Merge with the previous and / or next block if they are free.
223 * Otherwise add to the free stack.
226 void drm_mm_put_block(struct drm_mm_node *cur)
229 struct drm_mm *mm = cur->mm;
230 struct list_head *cur_head = &cur->ml_entry;
231 struct list_head *root_head = &mm->ml_entry;
232 struct drm_mm_node *prev_node = NULL;
233 struct drm_mm_node *next_node;
237 if (cur_head->prev != root_head) {
239 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
240 if (prev_node->free) {
241 prev_node->size += cur->size;
245 if (cur_head->next != root_head) {
247 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
248 if (next_node->free) {
250 prev_node->size += next_node->size;
251 list_del(&next_node->ml_entry);
252 list_del(&next_node->fl_entry);
253 if (mm->num_unused < MM_UNUSED_TARGET) {
254 list_add(&next_node->fl_entry,
258 free(next_node, DRM_MEM_MM);
260 next_node->size += cur->size;
261 next_node->start = cur->start;
268 list_add(&cur->fl_entry, &mm->fl_entry);
270 list_del(&cur->ml_entry);
271 if (mm->num_unused < MM_UNUSED_TARGET) {
272 list_add(&cur->fl_entry, &mm->unused_nodes);
275 free(cur, DRM_MEM_MM);
279 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
281 unsigned alignment, int best_match)
283 struct list_head *list;
284 const struct list_head *free_stack = &mm->fl_entry;
285 struct drm_mm_node *entry;
286 struct drm_mm_node *best;
287 unsigned long best_size;
293 list_for_each(list, free_stack) {
294 entry = list_entry(list, struct drm_mm_node, fl_entry);
297 if (entry->size < size)
301 register unsigned tmp = entry->start % alignment;
303 wasted += alignment - tmp;
306 if (entry->size >= size + wasted) {
309 if (size < best_size) {
311 best_size = entry->size;
319 int drm_mm_clean(struct drm_mm * mm)
321 struct list_head *head = &mm->ml_entry;
323 return (head->next->next == head);
326 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
328 INIT_LIST_HEAD(&mm->ml_entry);
329 INIT_LIST_HEAD(&mm->fl_entry);
330 INIT_LIST_HEAD(&mm->unused_nodes);
332 DRM_SPININIT(&mm->unused_lock, "drm_unused");
334 return drm_mm_create_tail_node(mm, start, size, 0);
337 void drm_mm_takedown(struct drm_mm * mm)
339 struct list_head *bnode = mm->fl_entry.next;
340 struct drm_mm_node *entry;
341 struct drm_mm_node *next;
343 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
345 if (entry->ml_entry.next != &mm->ml_entry ||
346 entry->fl_entry.next != &mm->fl_entry) {
347 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
351 list_del(&entry->fl_entry);
352 list_del(&entry->ml_entry);
353 free(entry, DRM_MEM_MM);
355 DRM_SPINLOCK(&mm->unused_lock);
356 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
357 list_del(&entry->fl_entry);
358 free(entry, DRM_MEM_MM);
361 DRM_SPINUNLOCK(&mm->unused_lock);
363 DRM_SPINUNINIT(&mm->unused_lock);
365 KASSERT(mm->num_unused == 0, ("num_unused != 0"));