Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / dev / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49
50 #define MM_UNUSED_TARGET 4
51
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 {
54         struct drm_mm_node *child;
55
56         if (atomic)
57                 child = kzalloc(sizeof(*child), GFP_ATOMIC);
58         else
59                 child = kzalloc(sizeof(*child), GFP_KERNEL);
60
61         if (unlikely(child == NULL)) {
62                 spin_lock(&mm->unused_lock);
63                 if (list_empty(&mm->unused_nodes))
64                         child = NULL;
65                 else {
66                         child =
67                             list_entry(mm->unused_nodes.next,
68                                        struct drm_mm_node, node_list);
69                         list_del(&child->node_list);
70                         --mm->num_unused;
71                 }
72                 spin_unlock(&mm->unused_lock);
73         }
74         return child;
75 }
76
77 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
78  * drm_mm:      memory manager struct we are pre-allocating for
79  *
80  * Returns 0 on success or -ENOMEM if allocation fails.
81  */
82 int drm_mm_pre_get(struct drm_mm *mm)
83 {
84         struct drm_mm_node *node;
85
86         spin_lock(&mm->unused_lock);
87         while (mm->num_unused < MM_UNUSED_TARGET) {
88                 spin_unlock(&mm->unused_lock);
89                 node = kzalloc(sizeof(*node), GFP_KERNEL);
90                 spin_lock(&mm->unused_lock);
91
92                 if (unlikely(node == NULL)) {
93                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94                         spin_unlock(&mm->unused_lock);
95                         return ret;
96                 }
97                 ++mm->num_unused;
98                 list_add_tail(&node->node_list, &mm->unused_nodes);
99         }
100         spin_unlock(&mm->unused_lock);
101         return 0;
102 }
103 EXPORT_SYMBOL(drm_mm_pre_get);
104
105 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106                                  struct drm_mm_node *node,
107                                  unsigned long size, unsigned alignment,
108                                  unsigned long color)
109 {
110         struct drm_mm *mm = hole_node->mm;
111         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
112         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
113         unsigned long adj_start = hole_start;
114         unsigned long adj_end = hole_end;
115
116         BUG_ON(node->allocated);
117
118         if (mm->color_adjust)
119                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
120
121         if (alignment) {
122                 unsigned tmp = adj_start % alignment;
123                 if (tmp)
124                         adj_start += alignment - tmp;
125         }
126
127         if (adj_start == hole_start) {
128                 hole_node->hole_follows = 0;
129                 list_del(&hole_node->hole_stack);
130         }
131
132         node->start = adj_start;
133         node->size = size;
134         node->mm = mm;
135         node->color = color;
136         node->allocated = 1;
137
138         INIT_LIST_HEAD(&node->hole_stack);
139         list_add(&node->node_list, &hole_node->node_list);
140
141         BUG_ON(node->start + node->size > adj_end);
142
143         node->hole_follows = 0;
144         if (__drm_mm_hole_node_start(node) < hole_end) {
145                 list_add(&node->hole_stack, &mm->hole_stack);
146                 node->hole_follows = 1;
147         }
148 }
149
150 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
151 {
152         struct drm_mm_node *hole;
153         unsigned long end = node->start + node->size;
154         unsigned long hole_start;
155         unsigned long hole_end;
156
157         BUG_ON(node == NULL);
158
159         /* Find the relevant hole to add our node to */
160         drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161                 if (hole_start > node->start || hole_end < end)
162                         continue;
163
164                 node->mm = mm;
165                 node->allocated = 1;
166
167                 INIT_LIST_HEAD(&node->hole_stack);
168                 list_add(&node->node_list, &hole->node_list);
169
170                 if (node->start == hole_start) {
171                         hole->hole_follows = 0;
172                         list_del_init(&hole->hole_stack);
173                 }
174
175                 node->hole_follows = 0;
176                 if (end != hole_end) {
177                         list_add(&node->hole_stack, &mm->hole_stack);
178                         node->hole_follows = 1;
179                 }
180
181                 return 0;
182         }
183
184         WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
185              node->start, node->size);
186         return -ENOSPC;
187 }
188 EXPORT_SYMBOL(drm_mm_reserve_node);
189
190 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
191                                              unsigned long size,
192                                              unsigned alignment,
193                                              unsigned long color,
194                                              int atomic)
195 {
196         struct drm_mm_node *node;
197
198         node = drm_mm_kmalloc(hole_node->mm, atomic);
199         if (unlikely(node == NULL))
200                 return NULL;
201
202         drm_mm_insert_helper(hole_node, node, size, alignment, color);
203
204         return node;
205 }
206 EXPORT_SYMBOL(drm_mm_get_block_generic);
207
208 /**
209  * Search for free space and insert a preallocated memory node. Returns
210  * -ENOSPC if no suitable free area is available. The preallocated memory node
211  * must be cleared.
212  */
213 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
214                                unsigned long size, unsigned alignment,
215                                unsigned long color,
216                                enum drm_mm_search_flags flags)
217 {
218         struct drm_mm_node *hole_node;
219
220         hole_node = drm_mm_search_free_generic(mm, size, alignment,
221                                                color, flags);
222         if (!hole_node)
223                 return -ENOSPC;
224
225         drm_mm_insert_helper(hole_node, node, size, alignment, color);
226         return 0;
227 }
228 EXPORT_SYMBOL(drm_mm_insert_node_generic);
229
230 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
231                                        struct drm_mm_node *node,
232                                        unsigned long size, unsigned alignment,
233                                        unsigned long color,
234                                        unsigned long start, unsigned long end)
235 {
236         struct drm_mm *mm = hole_node->mm;
237         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
238         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
239         unsigned long adj_start = hole_start;
240         unsigned long adj_end = hole_end;
241
242         BUG_ON(!hole_node->hole_follows || node->allocated);
243
244         if (adj_start < start)
245                 adj_start = start;
246         if (adj_end > end)
247                 adj_end = end;
248
249         if (mm->color_adjust)
250                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
251
252         if (alignment) {
253                 unsigned tmp = adj_start % alignment;
254                 if (tmp)
255                         adj_start += alignment - tmp;
256         }
257
258         if (adj_start == hole_start) {
259                 hole_node->hole_follows = 0;
260                 list_del(&hole_node->hole_stack);
261         }
262
263         node->start = adj_start;
264         node->size = size;
265         node->mm = mm;
266         node->color = color;
267         node->allocated = 1;
268
269         INIT_LIST_HEAD(&node->hole_stack);
270         list_add(&node->node_list, &hole_node->node_list);
271
272         BUG_ON(node->start + node->size > adj_end);
273         BUG_ON(node->start + node->size > end);
274
275         node->hole_follows = 0;
276         if (__drm_mm_hole_node_start(node) < hole_end) {
277                 list_add(&node->hole_stack, &mm->hole_stack);
278                 node->hole_follows = 1;
279         }
280 }
281
282 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
283                                                 unsigned long size,
284                                                 unsigned alignment,
285                                                 unsigned long color,
286                                                 unsigned long start,
287                                                 unsigned long end,
288                                                 int atomic)
289 {
290         struct drm_mm_node *node;
291
292         node = drm_mm_kmalloc(hole_node->mm, atomic);
293         if (unlikely(node == NULL))
294                 return NULL;
295
296         drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
297                                    start, end);
298
299         return node;
300 }
301 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
302
303 /**
304  * Search for free space and insert a preallocated memory node. Returns
305  * -ENOSPC if no suitable free area is available. This is for range
306  * restricted allocations. The preallocated memory node must be cleared.
307  */
308 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
309                                         unsigned long size, unsigned alignment, unsigned long color,
310                                         unsigned long start, unsigned long end,
311                                         enum drm_mm_search_flags flags)
312 {
313         struct drm_mm_node *hole_node;
314
315         hole_node = drm_mm_search_free_in_range_generic(mm,
316                                                         size, alignment, color,
317                                                         start, end, flags);
318         if (!hole_node)
319                 return -ENOSPC;
320
321         drm_mm_insert_helper_range(hole_node, node,
322                                    size, alignment, color,
323                                    start, end);
324         return 0;
325 }
326 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
327
328 /**
329  * Remove a memory node from the allocator.
330  */
331 void drm_mm_remove_node(struct drm_mm_node *node)
332 {
333         struct drm_mm *mm = node->mm;
334         struct drm_mm_node *prev_node;
335
336         if (WARN_ON(!node->allocated))
337                 return;
338
339         BUG_ON(node->scanned_block || node->scanned_prev_free
340                                    || node->scanned_next_free);
341
342         prev_node =
343             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
344
345         if (node->hole_follows) {
346                 BUG_ON(__drm_mm_hole_node_start(node) ==
347                        __drm_mm_hole_node_end(node));
348                 list_del(&node->hole_stack);
349         } else
350                 BUG_ON(__drm_mm_hole_node_start(node) !=
351                        __drm_mm_hole_node_end(node));
352
353
354         if (!prev_node->hole_follows) {
355                 prev_node->hole_follows = 1;
356                 list_add(&prev_node->hole_stack, &mm->hole_stack);
357         } else
358                 list_move(&prev_node->hole_stack, &mm->hole_stack);
359
360         list_del(&node->node_list);
361         node->allocated = 0;
362 }
363 EXPORT_SYMBOL(drm_mm_remove_node);
364
365 /*
366  * Remove a memory node from the allocator and free the allocated struct
367  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
368  * drm_mm_get_block functions.
369  */
370 void drm_mm_put_block(struct drm_mm_node *node)
371 {
372
373         struct drm_mm *mm = node->mm;
374
375         drm_mm_remove_node(node);
376
377         spin_lock(&mm->unused_lock);
378         if (mm->num_unused < MM_UNUSED_TARGET) {
379                 list_add(&node->node_list, &mm->unused_nodes);
380                 ++mm->num_unused;
381         } else
382                 kfree(node);
383         spin_unlock(&mm->unused_lock);
384 }
385 EXPORT_SYMBOL(drm_mm_put_block);
386
387 static int check_free_hole(unsigned long start, unsigned long end,
388                            unsigned long size, unsigned alignment)
389 {
390         if (end - start < size)
391                 return 0;
392
393         if (alignment) {
394                 unsigned tmp = start % alignment;
395                 if (tmp)
396                         start += alignment - tmp;
397         }
398
399         return end >= start + size;
400 }
401
402 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
403                                                unsigned long size,
404                                                unsigned alignment,
405                                                unsigned long color,
406                                                enum drm_mm_search_flags flags)
407 {
408         struct drm_mm_node *entry;
409         struct drm_mm_node *best;
410         unsigned long adj_start;
411         unsigned long adj_end;
412         unsigned long best_size;
413
414         BUG_ON(mm->scanned_blocks);
415
416         best = NULL;
417         best_size = ~0UL;
418
419         drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
420                 if (mm->color_adjust) {
421                         mm->color_adjust(entry, color, &adj_start, &adj_end);
422                         if (adj_end <= adj_start)
423                                 continue;
424                 }
425
426                 if (!check_free_hole(adj_start, adj_end, size, alignment))
427                         continue;
428
429                 if (!(flags & DRM_MM_SEARCH_BEST))
430                         return entry;
431
432                 if (entry->size < best_size) {
433                         best = entry;
434                         best_size = entry->size;
435                 }
436         }
437
438         return best;
439 }
440 EXPORT_SYMBOL(drm_mm_search_free_generic);
441
442 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
443                                                         unsigned long size,
444                                                         unsigned alignment,
445                                                         unsigned long color,
446                                                         unsigned long start,
447                                                         unsigned long end,
448                                                         enum drm_mm_search_flags flags)
449 {
450         struct drm_mm_node *entry;
451         struct drm_mm_node *best;
452         unsigned long adj_start;
453         unsigned long adj_end;
454         unsigned long best_size;
455
456         BUG_ON(mm->scanned_blocks);
457
458         best = NULL;
459         best_size = ~0UL;
460
461         drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
462                 if (adj_start < start)
463                         adj_start = start;
464                 if (adj_end > end)
465                         adj_end = end;
466
467                 if (mm->color_adjust) {
468                         mm->color_adjust(entry, color, &adj_start, &adj_end);
469                         if (adj_end <= adj_start)
470                                 continue;
471                 }
472
473                 if (!check_free_hole(adj_start, adj_end, size, alignment))
474                         continue;
475
476                 if (!(flags & DRM_MM_SEARCH_BEST))
477                         return entry;
478
479                 if (entry->size < best_size) {
480                         best = entry;
481                         best_size = entry->size;
482                 }
483         }
484
485         return best;
486 }
487 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
488
489 /**
490  * Moves an allocation. To be used with embedded struct drm_mm_node.
491  */
492 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
493 {
494         list_replace(&old->node_list, &new->node_list);
495         list_replace(&old->hole_stack, &new->hole_stack);
496         new->hole_follows = old->hole_follows;
497         new->mm = old->mm;
498         new->start = old->start;
499         new->size = old->size;
500         new->color = old->color;
501
502         old->allocated = 0;
503         new->allocated = 1;
504 }
505 EXPORT_SYMBOL(drm_mm_replace_node);
506
507 /**
508  * Initializa lru scanning.
509  *
510  * This simply sets up the scanning routines with the parameters for the desired
511  * hole.
512  *
513  * Warning: As long as the scan list is non-empty, no other operations than
514  * adding/removing nodes to/from the scan list are allowed.
515  */
516 void drm_mm_init_scan(struct drm_mm *mm,
517                       unsigned long size,
518                       unsigned alignment,
519                       unsigned long color)
520 {
521         mm->scan_color = color;
522         mm->scan_alignment = alignment;
523         mm->scan_size = size;
524         mm->scanned_blocks = 0;
525         mm->scan_hit_start = 0;
526         mm->scan_hit_end = 0;
527         mm->scan_check_range = 0;
528         mm->prev_scanned_node = NULL;
529 }
530 EXPORT_SYMBOL(drm_mm_init_scan);
531
532 /**
533  * Initializa lru scanning.
534  *
535  * This simply sets up the scanning routines with the parameters for the desired
536  * hole. This version is for range-restricted scans.
537  *
538  * Warning: As long as the scan list is non-empty, no other operations than
539  * adding/removing nodes to/from the scan list are allowed.
540  */
541 void drm_mm_init_scan_with_range(struct drm_mm *mm,
542                                  unsigned long size,
543                                  unsigned alignment,
544                                  unsigned long color,
545                                  unsigned long start,
546                                  unsigned long end)
547 {
548         mm->scan_color = color;
549         mm->scan_alignment = alignment;
550         mm->scan_size = size;
551         mm->scanned_blocks = 0;
552         mm->scan_hit_start = 0;
553         mm->scan_hit_end = 0;
554         mm->scan_start = start;
555         mm->scan_end = end;
556         mm->scan_check_range = 1;
557         mm->prev_scanned_node = NULL;
558 }
559 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
560
561 /**
562  * Add a node to the scan list that might be freed to make space for the desired
563  * hole.
564  *
565  * Returns non-zero, if a hole has been found, zero otherwise.
566  */
567 int drm_mm_scan_add_block(struct drm_mm_node *node)
568 {
569         struct drm_mm *mm = node->mm;
570         struct drm_mm_node *prev_node;
571         unsigned long hole_start, hole_end;
572         unsigned long adj_start, adj_end;
573
574         mm->scanned_blocks++;
575
576         BUG_ON(node->scanned_block);
577         node->scanned_block = 1;
578
579         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
580                                node_list);
581
582         node->scanned_preceeds_hole = prev_node->hole_follows;
583         prev_node->hole_follows = 1;
584         list_del(&node->node_list);
585         node->node_list.prev = &prev_node->node_list;
586         node->node_list.next = &mm->prev_scanned_node->node_list;
587         mm->prev_scanned_node = node;
588
589         adj_start = hole_start = drm_mm_hole_node_start(prev_node);
590         adj_end = hole_end = drm_mm_hole_node_end(prev_node);
591
592         if (mm->scan_check_range) {
593                 if (adj_start < mm->scan_start)
594                         adj_start = mm->scan_start;
595                 if (adj_end > mm->scan_end)
596                         adj_end = mm->scan_end;
597         }
598
599         if (mm->color_adjust)
600                 mm->color_adjust(prev_node, mm->scan_color,
601                                  &adj_start, &adj_end);
602
603         if (check_free_hole(adj_start, adj_end,
604                             mm->scan_size, mm->scan_alignment)) {
605                 mm->scan_hit_start = hole_start;
606                 mm->scan_hit_end = hole_end;
607                 return 1;
608         }
609
610         return 0;
611 }
612 EXPORT_SYMBOL(drm_mm_scan_add_block);
613
614 /**
615  * Remove a node from the scan list.
616  *
617  * Nodes _must_ be removed in the exact same order from the scan list as they
618  * have been added, otherwise the internal state of the memory manager will be
619  * corrupted.
620  *
621  * When the scan list is empty, the selected memory nodes can be freed. An
622  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
623  * return the just freed block (because its at the top of the free_stack list).
624  *
625  * Returns one if this block should be evicted, zero otherwise. Will always
626  * return zero when no hole has been found.
627  */
628 int drm_mm_scan_remove_block(struct drm_mm_node *node)
629 {
630         struct drm_mm *mm = node->mm;
631         struct drm_mm_node *prev_node;
632
633         mm->scanned_blocks--;
634
635         BUG_ON(!node->scanned_block);
636         node->scanned_block = 0;
637
638         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
639                                node_list);
640
641         prev_node->hole_follows = node->scanned_preceeds_hole;
642         list_add(&node->node_list, &prev_node->node_list);
643
644          return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
645                  node->start < mm->scan_hit_end);
646 }
647 EXPORT_SYMBOL(drm_mm_scan_remove_block);
648
649 int drm_mm_clean(struct drm_mm * mm)
650 {
651         struct list_head *head = &mm->head_node.node_list;
652
653         return (head->next->next == head);
654 }
655 EXPORT_SYMBOL(drm_mm_clean);
656
657 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
658 {
659         INIT_LIST_HEAD(&mm->hole_stack);
660         INIT_LIST_HEAD(&mm->unused_nodes);
661         mm->num_unused = 0;
662         mm->scanned_blocks = 0;
663         spin_init(&mm->unused_lock, "drmmminit");
664
665         /* Clever trick to avoid a special case in the free hole tracking. */
666         INIT_LIST_HEAD(&mm->head_node.node_list);
667         INIT_LIST_HEAD(&mm->head_node.hole_stack);
668         mm->head_node.hole_follows = 1;
669         mm->head_node.scanned_block = 0;
670         mm->head_node.scanned_prev_free = 0;
671         mm->head_node.scanned_next_free = 0;
672         mm->head_node.mm = mm;
673         mm->head_node.start = start + size;
674         mm->head_node.size = start - mm->head_node.start;
675         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
676
677         mm->color_adjust = NULL;
678 }
679 EXPORT_SYMBOL(drm_mm_init);
680
681 void drm_mm_takedown(struct drm_mm * mm)
682 {
683         struct drm_mm_node *entry, *next;
684
685         if (!list_empty(&mm->head_node.node_list)) {
686                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
687                 return;
688         }
689
690         spin_lock(&mm->unused_lock);
691         list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
692                 list_del(&entry->node_list);
693                 kfree(entry);
694                 --mm->num_unused;
695         }
696         spin_unlock(&mm->unused_lock);
697
698         BUG_ON(mm->num_unused != 0);
699 }
700 EXPORT_SYMBOL(drm_mm_takedown);
701
702 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
703 {
704         struct drm_mm_node *entry;
705         unsigned long total_used = 0, total_free = 0, total = 0;
706         unsigned long hole_start, hole_end, hole_size;
707
708         hole_start = drm_mm_hole_node_start(&mm->head_node);
709         hole_end = drm_mm_hole_node_end(&mm->head_node);
710         hole_size = hole_end - hole_start;
711         if (hole_size)
712                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
713                         prefix, hole_start, hole_end,
714                         hole_size);
715         total_free += hole_size;
716
717         drm_mm_for_each_node(entry, mm) {
718                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
719                         prefix, entry->start, entry->start + entry->size,
720                         entry->size);
721                 total_used += entry->size;
722
723                 if (entry->hole_follows) {
724                         hole_start = drm_mm_hole_node_start(entry);
725                         hole_end = drm_mm_hole_node_end(entry);
726                         hole_size = hole_end - hole_start;
727                         printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
728                                 prefix, hole_start, hole_end,
729                                 hole_size);
730                         total_free += hole_size;
731                 }
732         }
733         total = total_free + total_used;
734
735         printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
736                 total_used, total_free);
737 }
738 EXPORT_SYMBOL(drm_mm_debug_table);
739
740 #if defined(CONFIG_DEBUG_FS)
741 static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
742 {
743         unsigned long hole_start, hole_end, hole_size;
744
745         if (entry->hole_follows) {
746                 hole_start = drm_mm_hole_node_start(entry);
747                 hole_end = drm_mm_hole_node_end(entry);
748                 hole_size = hole_end - hole_start;
749                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
750                                 hole_start, hole_end, hole_size);
751                 return hole_size;
752         }
753
754         return 0;
755 }
756
757 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
758 {
759         struct drm_mm_node *entry;
760         unsigned long total_used = 0, total_free = 0, total = 0;
761
762         total_free += drm_mm_dump_hole(m, &mm->head_node);
763
764         drm_mm_for_each_node(entry, mm) {
765                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
766                                 entry->start, entry->start + entry->size,
767                                 entry->size);
768                 total_used += entry->size;
769                 total_free += drm_mm_dump_hole(m, entry);
770         }
771         total = total_free + total_used;
772
773         seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
774         return 0;
775 }
776 EXPORT_SYMBOL(drm_mm_dump_table);
777 #endif