Merge branch 'vendor/GCC47'
[dragonfly.git] / sys / dev / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/export.h>
47
48 #define MM_UNUSED_TARGET 4
49
50 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
51 {
52         struct drm_mm_node *child;
53
54         if (atomic)
55                 child = kmalloc(sizeof(*child), DRM_MEM_MM, M_ZERO | M_NOWAIT);
56         else
57                 child = kmalloc(sizeof(*child), DRM_MEM_MM, M_ZERO | M_WAITOK);
58
59         if (unlikely(child == NULL)) {
60                 spin_lock(&mm->unused_lock);
61                 if (list_empty(&mm->unused_nodes))
62                         child = NULL;
63                 else {
64                         child =
65                             list_entry(mm->unused_nodes.next,
66                                        struct drm_mm_node, node_list);
67                         list_del(&child->node_list);
68                         --mm->num_unused;
69                 }
70                 spin_unlock(&mm->unused_lock);
71         }
72         return child;
73 }
74
75 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
76  * drm_mm:      memory manager struct we are pre-allocating for
77  *
78  * Returns 0 on success or -ENOMEM if allocation fails.
79  */
80 int drm_mm_pre_get(struct drm_mm *mm)
81 {
82         struct drm_mm_node *node;
83
84         spin_lock(&mm->unused_lock);
85         while (mm->num_unused < MM_UNUSED_TARGET) {
86                 spin_unlock(&mm->unused_lock);
87                 node = kmalloc(sizeof(*node), DRM_MEM_MM, M_ZERO | M_WAITOK);
88                 spin_lock(&mm->unused_lock);
89
90                 if (unlikely(node == NULL)) {
91                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
92                         spin_unlock(&mm->unused_lock);
93                         return ret;
94                 }
95                 ++mm->num_unused;
96                 list_add_tail(&node->node_list, &mm->unused_nodes);
97         }
98         spin_unlock(&mm->unused_lock);
99         return 0;
100 }
101 EXPORT_SYMBOL(drm_mm_pre_get);
102
103 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
104 {
105         return hole_node->start + hole_node->size;
106 }
107
108 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
109 {
110         struct drm_mm_node *next_node =
111                 list_entry(hole_node->node_list.next, struct drm_mm_node,
112                            node_list);
113
114         return next_node->start;
115 }
116
117 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
118                                  struct drm_mm_node *node,
119                                  unsigned long size, unsigned alignment,
120                                  unsigned long color)
121 {
122         struct drm_mm *mm = hole_node->mm;
123         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
124         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
125         unsigned long adj_start = hole_start;
126         unsigned long adj_end = hole_end;
127
128         BUG_ON(!hole_node->hole_follows || node->allocated);
129
130         if (mm->color_adjust)
131                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
132
133         if (alignment) {
134                 unsigned tmp = adj_start % alignment;
135                 if (tmp)
136                         adj_start += alignment - tmp;
137         }
138
139         if (adj_start == hole_start) {
140                 hole_node->hole_follows = 0;
141                 list_del(&hole_node->hole_stack);
142         }
143
144         node->start = adj_start;
145         node->size = size;
146         node->mm = mm;
147         node->color = color;
148         node->allocated = 1;
149
150         INIT_LIST_HEAD(&node->hole_stack);
151         list_add(&node->node_list, &hole_node->node_list);
152
153         BUG_ON(node->start + node->size > adj_end);
154
155         node->hole_follows = 0;
156         if (node->start + node->size < hole_end) {
157                 list_add(&node->hole_stack, &mm->hole_stack);
158                 node->hole_follows = 1;
159         }
160 }
161
162 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
163                                              unsigned long size,
164                                              unsigned alignment,
165                                              unsigned long color,
166                                              int atomic)
167 {
168         struct drm_mm_node *node;
169
170         node = drm_mm_kmalloc(hole_node->mm, atomic);
171         if (unlikely(node == NULL))
172                 return NULL;
173
174         drm_mm_insert_helper(hole_node, node, size, alignment, color);
175
176         return node;
177 }
178 EXPORT_SYMBOL(drm_mm_get_block_generic);
179
180 /**
181  * Search for free space and insert a preallocated memory node. Returns
182  * -ENOSPC if no suitable free area is available. The preallocated memory node
183  * must be cleared.
184  */
185 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
186                                unsigned long size, unsigned alignment,
187                                unsigned long color)
188 {
189         struct drm_mm_node *hole_node;
190
191         hole_node = drm_mm_search_free_generic(mm, size, alignment,
192                                                color, 0);
193         if (!hole_node)
194                 return -ENOSPC;
195
196         drm_mm_insert_helper(hole_node, node, size, alignment, color);
197         return 0;
198 }
199 EXPORT_SYMBOL(drm_mm_insert_node_generic);
200
201 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
202                        unsigned long size, unsigned alignment)
203 {
204         return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
205 }
206 EXPORT_SYMBOL(drm_mm_insert_node);
207
208 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
209                                        struct drm_mm_node *node,
210                                        unsigned long size, unsigned alignment,
211                                        unsigned long color,
212                                        unsigned long start, unsigned long end)
213 {
214         struct drm_mm *mm = hole_node->mm;
215         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
216         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
217         unsigned long adj_start = hole_start;
218         unsigned long adj_end = hole_end;
219
220         BUG_ON(!hole_node->hole_follows || node->allocated);
221
222         if (adj_start < start)
223                 adj_start = start;
224         if (adj_end > end)
225                 adj_end = end;
226
227         if (mm->color_adjust)
228                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
229
230         if (alignment) {
231                 unsigned tmp = adj_start % alignment;
232                 if (tmp)
233                         adj_start += alignment - tmp;
234         }
235
236         if (adj_start == hole_start) {
237                 hole_node->hole_follows = 0;
238                 list_del(&hole_node->hole_stack);
239         }
240
241         node->start = adj_start;
242         node->size = size;
243         node->mm = mm;
244         node->color = color;
245         node->allocated = 1;
246
247         INIT_LIST_HEAD(&node->hole_stack);
248         list_add(&node->node_list, &hole_node->node_list);
249
250         BUG_ON(node->start + node->size > adj_end);
251         BUG_ON(node->start + node->size > end);
252
253         node->hole_follows = 0;
254         if (node->start + node->size < hole_end) {
255                 list_add(&node->hole_stack, &mm->hole_stack);
256                 node->hole_follows = 1;
257         }
258 }
259
260 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
261                                                 unsigned long size,
262                                                 unsigned alignment,
263                                                 unsigned long color,
264                                                 unsigned long start,
265                                                 unsigned long end,
266                                                 int atomic)
267 {
268         struct drm_mm_node *node;
269
270         node = drm_mm_kmalloc(hole_node->mm, atomic);
271         if (unlikely(node == NULL))
272                 return NULL;
273
274         drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
275                                    start, end);
276
277         return node;
278 }
279 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
280
281 /**
282  * Search for free space and insert a preallocated memory node. Returns
283  * -ENOSPC if no suitable free area is available. This is for range
284  * restricted allocations. The preallocated memory node must be cleared.
285  */
286 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
287                                         unsigned long size, unsigned alignment, unsigned long color,
288                                         unsigned long start, unsigned long end)
289 {
290         struct drm_mm_node *hole_node;
291
292         hole_node = drm_mm_search_free_in_range_generic(mm,
293                                                         size, alignment, color,
294                                                         start, end, 0);
295         if (!hole_node)
296                 return -ENOSPC;
297
298         drm_mm_insert_helper_range(hole_node, node,
299                                    size, alignment, color,
300                                    start, end);
301         return 0;
302 }
303 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
304
305 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
306                                 unsigned long size, unsigned alignment,
307                                 unsigned long start, unsigned long end)
308 {
309         return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
310 }
311 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
312
313 /**
314  * Remove a memory node from the allocator.
315  */
316 void drm_mm_remove_node(struct drm_mm_node *node)
317 {
318         struct drm_mm *mm = node->mm;
319         struct drm_mm_node *prev_node;
320
321         BUG_ON(node->scanned_block || node->scanned_prev_free
322                                    || node->scanned_next_free);
323
324         prev_node =
325             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
326
327         if (node->hole_follows) {
328                 BUG_ON(drm_mm_hole_node_start(node)
329                                 == drm_mm_hole_node_end(node));
330                 list_del(&node->hole_stack);
331         } else
332                 BUG_ON(drm_mm_hole_node_start(node)
333                                 != drm_mm_hole_node_end(node));
334
335         if (!prev_node->hole_follows) {
336                 prev_node->hole_follows = 1;
337                 list_add(&prev_node->hole_stack, &mm->hole_stack);
338         } else
339                 list_move(&prev_node->hole_stack, &mm->hole_stack);
340
341         list_del(&node->node_list);
342         node->allocated = 0;
343 }
344 EXPORT_SYMBOL(drm_mm_remove_node);
345
346 /*
347  * Remove a memory node from the allocator and free the allocated struct
348  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
349  * drm_mm_get_block functions.
350  */
351 void drm_mm_put_block(struct drm_mm_node *node)
352 {
353
354         struct drm_mm *mm = node->mm;
355
356         drm_mm_remove_node(node);
357
358         spin_lock(&mm->unused_lock);
359         if (mm->num_unused < MM_UNUSED_TARGET) {
360                 list_add(&node->node_list, &mm->unused_nodes);
361                 ++mm->num_unused;
362         } else
363                 drm_free(node, DRM_MEM_MM);
364         spin_unlock(&mm->unused_lock);
365 }
366 EXPORT_SYMBOL(drm_mm_put_block);
367
368 static int check_free_hole(unsigned long start, unsigned long end,
369                            unsigned long size, unsigned alignment)
370 {
371         if (end - start < size)
372                 return 0;
373
374         if (alignment) {
375                 unsigned tmp = start % alignment;
376                 if (tmp)
377                         start += alignment - tmp;
378         }
379
380         return end >= start + size;
381 }
382
383 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
384                                                unsigned long size,
385                                                unsigned alignment,
386                                                unsigned long color,
387                                                bool best_match)
388 {
389         struct drm_mm_node *entry;
390         struct drm_mm_node *best;
391         unsigned long best_size;
392
393         BUG_ON(mm->scanned_blocks);
394
395         best = NULL;
396         best_size = ~0UL;
397
398         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
399                 unsigned long adj_start = drm_mm_hole_node_start(entry);
400                 unsigned long adj_end = drm_mm_hole_node_end(entry);
401
402                 if (mm->color_adjust) {
403                         mm->color_adjust(entry, color, &adj_start, &adj_end);
404                         if (adj_end <= adj_start)
405                                 continue;
406                 }
407
408                 BUG_ON(!entry->hole_follows);
409                 if (!check_free_hole(adj_start, adj_end, size, alignment))
410                         continue;
411
412                 if (!best_match)
413                         return entry;
414
415                 if (entry->size < best_size) {
416                         best = entry;
417                         best_size = entry->size;
418                 }
419         }
420
421         return best;
422 }
423 EXPORT_SYMBOL(drm_mm_search_free_generic);
424
425 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
426                                                         unsigned long size,
427                                                         unsigned alignment,
428                                                         unsigned long color,
429                                                         unsigned long start,
430                                                         unsigned long end,
431                                                         bool best_match)
432 {
433         struct drm_mm_node *entry;
434         struct drm_mm_node *best;
435         unsigned long best_size;
436
437         BUG_ON(mm->scanned_blocks);
438
439         best = NULL;
440         best_size = ~0UL;
441
442         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
443                 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
444                         start : drm_mm_hole_node_start(entry);
445                 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
446                         end : drm_mm_hole_node_end(entry);
447
448                 BUG_ON(!entry->hole_follows);
449
450                 if (mm->color_adjust) {
451                         mm->color_adjust(entry, color, &adj_start, &adj_end);
452                         if (adj_end <= adj_start)
453                                 continue;
454                 }
455
456                 if (!check_free_hole(adj_start, adj_end, size, alignment))
457                         continue;
458
459                 if (!best_match)
460                         return entry;
461
462                 if (entry->size < best_size) {
463                         best = entry;
464                         best_size = entry->size;
465                 }
466         }
467
468         return best;
469 }
470 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
471
472 /**
473  * Moves an allocation. To be used with embedded struct drm_mm_node.
474  */
475 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
476 {
477         list_replace(&old->node_list, &new->node_list);
478         list_replace(&old->hole_stack, &new->hole_stack);
479         new->hole_follows = old->hole_follows;
480         new->mm = old->mm;
481         new->start = old->start;
482         new->size = old->size;
483         new->color = old->color;
484
485         old->allocated = 0;
486         new->allocated = 1;
487 }
488 EXPORT_SYMBOL(drm_mm_replace_node);
489
490 /**
491  * Initializa lru scanning.
492  *
493  * This simply sets up the scanning routines with the parameters for the desired
494  * hole.
495  *
496  * Warning: As long as the scan list is non-empty, no other operations than
497  * adding/removing nodes to/from the scan list are allowed.
498  */
499 void drm_mm_init_scan(struct drm_mm *mm,
500                       unsigned long size,
501                       unsigned alignment,
502                       unsigned long color)
503 {
504         mm->scan_color = color;
505         mm->scan_alignment = alignment;
506         mm->scan_size = size;
507         mm->scanned_blocks = 0;
508         mm->scan_hit_start = 0;
509         mm->scan_hit_end = 0;
510         mm->scan_check_range = 0;
511         mm->prev_scanned_node = NULL;
512 }
513 EXPORT_SYMBOL(drm_mm_init_scan);
514
515 /**
516  * Initializa lru scanning.
517  *
518  * This simply sets up the scanning routines with the parameters for the desired
519  * hole. This version is for range-restricted scans.
520  *
521  * Warning: As long as the scan list is non-empty, no other operations than
522  * adding/removing nodes to/from the scan list are allowed.
523  */
524 void drm_mm_init_scan_with_range(struct drm_mm *mm,
525                                  unsigned long size,
526                                  unsigned alignment,
527                                  unsigned long color,
528                                  unsigned long start,
529                                  unsigned long end)
530 {
531         mm->scan_color = color;
532         mm->scan_alignment = alignment;
533         mm->scan_size = size;
534         mm->scanned_blocks = 0;
535         mm->scan_hit_start = 0;
536         mm->scan_hit_end = 0;
537         mm->scan_start = start;
538         mm->scan_end = end;
539         mm->scan_check_range = 1;
540         mm->prev_scanned_node = NULL;
541 }
542 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
543
544 /**
545  * Add a node to the scan list that might be freed to make space for the desired
546  * hole.
547  *
548  * Returns non-zero, if a hole has been found, zero otherwise.
549  */
550 int drm_mm_scan_add_block(struct drm_mm_node *node)
551 {
552         struct drm_mm *mm = node->mm;
553         struct drm_mm_node *prev_node;
554         unsigned long hole_start, hole_end;
555         unsigned long adj_start, adj_end;
556
557         mm->scanned_blocks++;
558
559         BUG_ON(node->scanned_block);
560         node->scanned_block = 1;
561
562         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
563                                node_list);
564
565         node->scanned_preceeds_hole = prev_node->hole_follows;
566         prev_node->hole_follows = 1;
567         list_del(&node->node_list);
568         node->node_list.prev = &prev_node->node_list;
569         node->node_list.next = &mm->prev_scanned_node->node_list;
570         mm->prev_scanned_node = node;
571
572         adj_start = hole_start = drm_mm_hole_node_start(prev_node);
573         adj_end = hole_end = drm_mm_hole_node_end(prev_node);
574
575         if (mm->scan_check_range) {
576                 if (adj_start < mm->scan_start)
577                         adj_start = mm->scan_start;
578                 if (adj_end > mm->scan_end)
579                         adj_end = mm->scan_end;
580         }
581
582         if (mm->color_adjust)
583                 mm->color_adjust(prev_node, mm->scan_color,
584                                  &adj_start, &adj_end);
585
586         if (check_free_hole(adj_start, adj_end,
587                             mm->scan_size, mm->scan_alignment)) {
588                 mm->scan_hit_start = hole_start;
589                 mm->scan_hit_end = hole_end;
590                 return 1;
591         }
592
593         return 0;
594 }
595 EXPORT_SYMBOL(drm_mm_scan_add_block);
596
597 /**
598  * Remove a node from the scan list.
599  *
600  * Nodes _must_ be removed in the exact same order from the scan list as they
601  * have been added, otherwise the internal state of the memory manager will be
602  * corrupted.
603  *
604  * When the scan list is empty, the selected memory nodes can be freed. An
605  * immediately following drm_mm_search_free with best_match = 0 will then return
606  * the just freed block (because its at the top of the free_stack list).
607  *
608  * Returns one if this block should be evicted, zero otherwise. Will always
609  * return zero when no hole has been found.
610  */
611 int drm_mm_scan_remove_block(struct drm_mm_node *node)
612 {
613         struct drm_mm *mm = node->mm;
614         struct drm_mm_node *prev_node;
615
616         mm->scanned_blocks--;
617
618         BUG_ON(!node->scanned_block);
619         node->scanned_block = 0;
620
621         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
622                                node_list);
623
624         prev_node->hole_follows = node->scanned_preceeds_hole;
625         list_add(&node->node_list, &prev_node->node_list);
626
627          return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
628                  node->start < mm->scan_hit_end);
629 }
630 EXPORT_SYMBOL(drm_mm_scan_remove_block);
631
632 int drm_mm_clean(struct drm_mm * mm)
633 {
634         struct list_head *head = &mm->head_node.node_list;
635
636         return (head->next->next == head);
637 }
638 EXPORT_SYMBOL(drm_mm_clean);
639
640 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
641 {
642         INIT_LIST_HEAD(&mm->hole_stack);
643         INIT_LIST_HEAD(&mm->unused_nodes);
644         mm->num_unused = 0;
645         mm->scanned_blocks = 0;
646         spin_init(&mm->unused_lock, "drmmminit");
647
648         /* Clever trick to avoid a special case in the free hole tracking. */
649         INIT_LIST_HEAD(&mm->head_node.node_list);
650         INIT_LIST_HEAD(&mm->head_node.hole_stack);
651         mm->head_node.hole_follows = 1;
652         mm->head_node.scanned_block = 0;
653         mm->head_node.scanned_prev_free = 0;
654         mm->head_node.scanned_next_free = 0;
655         mm->head_node.mm = mm;
656         mm->head_node.start = start + size;
657         mm->head_node.size = start - mm->head_node.start;
658         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
659
660         mm->color_adjust = NULL;
661
662         return 0;
663 }
664 EXPORT_SYMBOL(drm_mm_init);
665
666 void drm_mm_takedown(struct drm_mm * mm)
667 {
668         struct drm_mm_node *entry, *next;
669
670         if (!list_empty(&mm->head_node.node_list)) {
671                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
672                 return;
673         }
674
675         spin_lock(&mm->unused_lock);
676         list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
677                 list_del(&entry->node_list);
678                 drm_free(entry, DRM_MEM_MM);
679                 --mm->num_unused;
680         }
681         spin_unlock(&mm->unused_lock);
682
683         BUG_ON(mm->num_unused != 0);
684 }
685 EXPORT_SYMBOL(drm_mm_takedown);
686
687 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
688 {
689         struct drm_mm_node *entry;
690         unsigned long total_used = 0, total_free = 0, total = 0;
691         unsigned long hole_start, hole_end, hole_size;
692
693         hole_start = drm_mm_hole_node_start(&mm->head_node);
694         hole_end = drm_mm_hole_node_end(&mm->head_node);
695         hole_size = hole_end - hole_start;
696         if (hole_size)
697                 kprintf("%s 0x%08lx-0x%08lx: %8lu: free\n",
698                         prefix, hole_start, hole_end,
699                         hole_size);
700         total_free += hole_size;
701
702         drm_mm_for_each_node(entry, mm) {
703                 kprintf("%s 0x%08lx-0x%08lx: %8lu: used\n",
704                         prefix, entry->start, entry->start + entry->size,
705                         entry->size);
706                 total_used += entry->size;
707
708                 if (entry->hole_follows) {
709                         hole_start = drm_mm_hole_node_start(entry);
710                         hole_end = drm_mm_hole_node_end(entry);
711                         hole_size = hole_end - hole_start;
712                         kprintf("%s 0x%08lx-0x%08lx: %8lu: free\n",
713                                 prefix, hole_start, hole_end,
714                                 hole_size);
715                         total_free += hole_size;
716                 }
717         }
718         total = total_free + total_used;
719
720         kprintf("%s total: %lu, used %lu free %lu\n", prefix, total,
721                 total_used, total_free);
722 }
723 EXPORT_SYMBOL(drm_mm_debug_table);
724
725 #if defined(CONFIG_DEBUG_FS)
726 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
727 {
728         struct drm_mm_node *entry;
729         unsigned long total_used = 0, total_free = 0, total = 0;
730         unsigned long hole_start, hole_end, hole_size;
731
732         hole_start = drm_mm_hole_node_start(&mm->head_node);
733         hole_end = drm_mm_hole_node_end(&mm->head_node);
734         hole_size = hole_end - hole_start;
735         if (hole_size)
736                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
737                                 hole_start, hole_end, hole_size);
738         total_free += hole_size;
739
740         drm_mm_for_each_node(entry, mm) {
741                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
742                                 entry->start, entry->start + entry->size,
743                                 entry->size);
744                 total_used += entry->size;
745                 if (entry->hole_follows) {
746                         hole_start = drm_mm_hole_node_start(entry);
747                         hole_end = drm_mm_hole_node_end(entry);
748                         hole_size = hole_end - hole_start;
749                         seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
750                                         hole_start, hole_end, hole_size);
751                         total_free += hole_size;
752                 }
753         }
754         total = total_free + total_used;
755
756         seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
757         return 0;
758 }
759 EXPORT_SYMBOL(drm_mm_dump_table);
760 #endif