Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / dev / drm / ttm / ttm_page_alloc.c
1 /*
2  * Copyright (c) Red Hat Inc.
3
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27 /*
28  * Copyright (c) 2013 The FreeBSD Foundation
29  * All rights reserved.
30  *
31  * Portions of this software were developed by Konstantin Belousov
32  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33  *
34  * $FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.c 247849 2013-03-05 16:15:34Z kib $
35  */
36
37 /* simple list based uncached page pool
38  * - Pool collects resently freed pages for reuse
39  * - Use page->lru to keep a free list
40  * - doesn't track currently in use pages
41  */
42
43 #include <sys/eventhandler.h>
44
45 #include <drm/drmP.h>
46 #include <drm/ttm/ttm_bo_driver.h>
47 #include <drm/ttm/ttm_page_alloc.h>
48
49 #ifdef TTM_HAS_AGP
50 #include <asm/agp.h>
51 #endif
52
53 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(vm_page_t))
54 #define SMALL_ALLOCATION                16
55 #define FREE_ALL_PAGES                  (~0U)
56 /* times are in msecs */
57 #define PAGE_FREE_INTERVAL              1000
58
59 /**
60  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
61  *
62  * @lock: Protects the shared pool from concurrnet access. Must be used with
63  * irqsave/irqrestore variants because pool allocator maybe called from
64  * delayed work.
65  * @fill_lock: Prevent concurrent calls to fill.
66  * @list: Pool of free uc/wc pages for fast reuse.
67  * @gfp_flags: Flags to pass for alloc_page.
68  * @npages: Number of pages in pool.
69  */
70 struct ttm_page_pool {
71         struct lock             lock;
72         bool                    fill_lock;
73         bool                    dma32;
74         struct pglist           list;
75         int                     ttm_page_alloc_flags;
76         unsigned                npages;
77         char                    *name;
78         unsigned long           nfrees;
79         unsigned long           nrefills;
80 };
81
82 /**
83  * Limits for the pool. They are handled without locks because only place where
84  * they may change is in sysfs store. They won't have immediate effect anyway
85  * so forcing serialization to access them is pointless.
86  */
87
88 struct ttm_pool_opts {
89         unsigned        alloc_size;
90         unsigned        max_size;
91         unsigned        small;
92 };
93
94 #define NUM_POOLS 4
95
96 /**
97  * struct ttm_pool_manager - Holds memory pools for fst allocation
98  *
99  * Manager is read only object for pool code so it doesn't need locking.
100  *
101  * @free_interval: minimum number of jiffies between freeing pages from pool.
102  * @page_alloc_inited: reference counting for pool allocation.
103  * @work: Work that is used to shrink the pool. Work is only run when there is
104  * some pages to free.
105  * @small_allocation: Limit in number of pages what is small allocation.
106  *
107  * @pools: All pool objects in use.
108  **/
109 struct ttm_pool_manager {
110         unsigned int kobj_ref;
111         eventhandler_tag lowmem_handler;
112         struct ttm_pool_opts    options;
113
114         union {
115                 struct ttm_page_pool    u_pools[NUM_POOLS];
116                 struct _utag {
117                         struct ttm_page_pool    u_wc_pool;
118                         struct ttm_page_pool    u_uc_pool;
119                         struct ttm_page_pool    u_wc_pool_dma32;
120                         struct ttm_page_pool    u_uc_pool_dma32;
121                 } _ut;
122         } _u;
123 };
124
125 #define pools _u.u_pools
126 #define wc_pool _u._ut.u_wc_pool
127 #define uc_pool _u._ut.u_uc_pool
128 #define wc_pool_dma32 _u._ut.u_wc_pool_dma32
129 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32
130
131 MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
132
133 static void
134 ttm_vm_page_free(vm_page_t m)
135 {
136
137         KASSERT(m->object == NULL, ("ttm page %p is owned", m));
138         KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
139         KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
140 #if 0
141         KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
142         m->oflags |= VPO_UNMANAGED;
143 #endif
144         m->flags &= ~PG_FICTITIOUS;
145         vm_page_busy_wait(m, FALSE, "ttmvpf");
146         vm_page_wakeup(m);
147         vm_page_free_contig(m, PAGE_SIZE);
148         /*
149         vm_page_unwire(m, 0);
150         vm_page_free(m);
151         */
152 }
153
154 static vm_memattr_t
155 ttm_caching_state_to_vm(enum ttm_caching_state cstate)
156 {
157
158         switch (cstate) {
159         case tt_uncached:
160                 return (VM_MEMATTR_UNCACHEABLE);
161         case tt_wc:
162                 return (VM_MEMATTR_WRITE_COMBINING);
163         case tt_cached:
164                 return (VM_MEMATTR_WRITE_BACK);
165         }
166         panic("caching state %d\n", cstate);
167 }
168
169 static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
170 {
171
172         drm_free(m, M_TTM_POOLMGR);
173 }
174
175 #if 0
176 /* XXXKIB sysctl */
177 static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
178                 struct attribute *attr, const char *buffer, size_t size)
179 {
180         int chars;
181         unsigned val;
182         chars = sscanf(buffer, "%u", &val);
183         if (chars == 0)
184                 return size;
185
186         /* Convert kb to number of pages */
187         val = val / (PAGE_SIZE >> 10);
188
189         if (attr == &ttm_page_pool_max)
190                 m->options.max_size = val;
191         else if (attr == &ttm_page_pool_small)
192                 m->options.small = val;
193         else if (attr == &ttm_page_pool_alloc_size) {
194                 if (val > NUM_PAGES_TO_ALLOC*8) {
195                         pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
196                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
197                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
198                         return size;
199                 } else if (val > NUM_PAGES_TO_ALLOC) {
200                         pr_warn("Setting allocation size to larger than %lu is not recommended\n",
201                                 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
202                 }
203                 m->options.alloc_size = val;
204         }
205
206         return size;
207 }
208
209 static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
210                 struct attribute *attr, char *buffer)
211 {
212         unsigned val = 0;
213
214         if (attr == &ttm_page_pool_max)
215                 val = m->options.max_size;
216         else if (attr == &ttm_page_pool_small)
217                 val = m->options.small;
218         else if (attr == &ttm_page_pool_alloc_size)
219                 val = m->options.alloc_size;
220
221         val = val * (PAGE_SIZE >> 10);
222
223         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
224 }
225 #endif
226
227 static struct ttm_pool_manager *_manager;
228
229 static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
230 {
231         vm_page_t m;
232         int i;
233
234         for (i = 0; i < addrinarray; i++) {
235                 m = pages[i];
236 #ifdef TTM_HAS_AGP
237                 unmap_page_from_agp(m);
238 #endif
239                 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
240         }
241         return 0;
242 }
243
244 static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
245 {
246         vm_page_t m;
247         int i;
248
249         for (i = 0; i < addrinarray; i++) {
250                 m = pages[i];
251 #ifdef TTM_HAS_AGP
252                 map_page_into_agp(pages[i]);
253 #endif
254                 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING);
255         }
256         return 0;
257 }
258
259 static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
260 {
261         vm_page_t m;
262         int i;
263
264         for (i = 0; i < addrinarray; i++) {
265                 m = pages[i];
266 #ifdef TTM_HAS_AGP
267                 map_page_into_agp(pages[i]);
268 #endif
269                 pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE);
270         }
271         return 0;
272 }
273
274 /**
275  * Select the right pool or requested caching state and ttm flags. */
276 static struct ttm_page_pool *ttm_get_pool(int flags,
277                 enum ttm_caching_state cstate)
278 {
279         int pool_index;
280
281         if (cstate == tt_cached)
282                 return NULL;
283
284         if (cstate == tt_wc)
285                 pool_index = 0x0;
286         else
287                 pool_index = 0x1;
288
289         if (flags & TTM_PAGE_FLAG_DMA32)
290                 pool_index |= 0x2;
291
292         return &_manager->pools[pool_index];
293 }
294
295 /* set memory back to wb and free the pages. */
296 static void ttm_pages_put(vm_page_t *pages, unsigned npages)
297 {
298         unsigned i;
299
300         /* Our VM handles vm memattr automatically on the page free. */
301         if (set_pages_array_wb(pages, npages))
302                 kprintf("[TTM] Failed to set %d pages to wb!\n", npages);
303         for (i = 0; i < npages; ++i)
304                 ttm_vm_page_free(pages[i]);
305 }
306
307 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
308                 unsigned freed_pages)
309 {
310         pool->npages -= freed_pages;
311         pool->nfrees += freed_pages;
312 }
313
314 /**
315  * Free pages from pool.
316  *
317  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
318  * number of pages in one go.
319  *
320  * @pool: to free the pages from
321  * @free_all: If set to true will free all pages in pool
322  **/
323 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
324 {
325         vm_page_t p, p1;
326         vm_page_t *pages_to_free;
327         unsigned freed_pages = 0,
328                  npages_to_free = nr_free;
329         unsigned i;
330
331         if (NUM_PAGES_TO_ALLOC < nr_free)
332                 npages_to_free = NUM_PAGES_TO_ALLOC;
333
334         pages_to_free = kmalloc(npages_to_free * sizeof(vm_page_t),
335             M_TEMP, M_WAITOK | M_ZERO);
336
337 restart:
338         lockmgr(&pool->lock, LK_EXCLUSIVE);
339
340         TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) {
341                 if (freed_pages >= npages_to_free)
342                         break;
343
344                 pages_to_free[freed_pages++] = p;
345                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
346                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
347                         /* remove range of pages from the pool */
348                         for (i = 0; i < freed_pages; i++)
349                                 TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq);
350
351                         ttm_pool_update_free_locked(pool, freed_pages);
352                         /**
353                          * Because changing page caching is costly
354                          * we unlock the pool to prevent stalling.
355                          */
356                         lockmgr(&pool->lock, LK_RELEASE);
357
358                         ttm_pages_put(pages_to_free, freed_pages);
359                         if (likely(nr_free != FREE_ALL_PAGES))
360                                 nr_free -= freed_pages;
361
362                         if (NUM_PAGES_TO_ALLOC >= nr_free)
363                                 npages_to_free = nr_free;
364                         else
365                                 npages_to_free = NUM_PAGES_TO_ALLOC;
366
367                         freed_pages = 0;
368
369                         /* free all so restart the processing */
370                         if (nr_free)
371                                 goto restart;
372
373                         /* Not allowed to fall through or break because
374                          * following context is inside spinlock while we are
375                          * outside here.
376                          */
377                         goto out;
378
379                 }
380         }
381
382         /* remove range of pages from the pool */
383         if (freed_pages) {
384                 for (i = 0; i < freed_pages; i++)
385                         TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq);
386
387                 ttm_pool_update_free_locked(pool, freed_pages);
388                 nr_free -= freed_pages;
389         }
390
391         lockmgr(&pool->lock, LK_RELEASE);
392
393         if (freed_pages)
394                 ttm_pages_put(pages_to_free, freed_pages);
395 out:
396         drm_free(pages_to_free, M_TEMP);
397         return nr_free;
398 }
399
400 /* Get good estimation how many pages are free in pools */
401 static int ttm_pool_get_num_unused_pages(void)
402 {
403         unsigned i;
404         int total = 0;
405         for (i = 0; i < NUM_POOLS; ++i)
406                 total += _manager->pools[i].npages;
407
408         return total;
409 }
410
411 /**
412  * Callback for mm to request pool to reduce number of page held.
413  */
414 static int ttm_pool_mm_shrink(void *arg)
415 {
416         static unsigned int start_pool = 0;
417         unsigned i;
418         unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
419         struct ttm_page_pool *pool;
420         int shrink_pages = 100; /* XXXKIB */
421
422         pool_offset = pool_offset % NUM_POOLS;
423         /* select start pool in round robin fashion */
424         for (i = 0; i < NUM_POOLS; ++i) {
425                 unsigned nr_free = shrink_pages;
426                 if (shrink_pages == 0)
427                         break;
428                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
429                 shrink_pages = ttm_page_pool_free(pool, nr_free);
430         }
431         /* return estimated number of unused pages in pool */
432         return ttm_pool_get_num_unused_pages();
433 }
434
435 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
436 {
437
438         manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
439             ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
440 }
441
442 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
443 {
444
445         EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
446 }
447
448 static int ttm_set_pages_caching(vm_page_t *pages,
449                 enum ttm_caching_state cstate, unsigned cpages)
450 {
451         int r = 0;
452         /* Set page caching */
453         switch (cstate) {
454         case tt_uncached:
455                 r = set_pages_array_uc(pages, cpages);
456                 if (r)
457                         kprintf("[TTM] Failed to set %d pages to uc!\n", cpages);
458                 break;
459         case tt_wc:
460                 r = set_pages_array_wc(pages, cpages);
461                 if (r)
462                         kprintf("[TTM] Failed to set %d pages to wc!\n", cpages);
463                 break;
464         default:
465                 break;
466         }
467         return r;
468 }
469
470 /**
471  * Free pages the pages that failed to change the caching state. If there is
472  * any pages that have changed their caching state already put them to the
473  * pool.
474  */
475 static void ttm_handle_caching_state_failure(struct pglist *pages,
476                 int ttm_flags, enum ttm_caching_state cstate,
477                 vm_page_t *failed_pages, unsigned cpages)
478 {
479         unsigned i;
480         /* Failed pages have to be freed */
481         for (i = 0; i < cpages; ++i) {
482                 TAILQ_REMOVE(pages, failed_pages[i], pageq);
483                 ttm_vm_page_free(failed_pages[i]);
484         }
485 }
486
487 /**
488  * Allocate new pages with correct caching.
489  *
490  * This function is reentrant if caller updates count depending on number of
491  * pages returned in pages array.
492  */
493 static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
494                 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
495 {
496         vm_page_t *caching_array;
497         vm_page_t p;
498         int r = 0;
499         unsigned i, cpages, aflags;
500         unsigned max_cpages = min(count,
501                         (unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
502
503         aflags = VM_ALLOC_NORMAL |
504             ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
505             VM_ALLOC_ZERO : 0);
506         
507         /* allocate array for page caching change */
508         caching_array = kmalloc(max_cpages * sizeof(vm_page_t), M_TEMP,
509             M_WAITOK | M_ZERO);
510
511         for (i = 0, cpages = 0; i < count; ++i) {
512                 p = vm_page_alloc_contig(0,
513                     (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
514                     VM_MAX_ADDRESS, PAGE_SIZE, 0,
515                     1*PAGE_SIZE, ttm_caching_state_to_vm(cstate));
516                 if (!p) {
517                         kprintf("[TTM] Unable to get page %u\n", i);
518
519                         /* store already allocated pages in the pool after
520                          * setting the caching state */
521                         if (cpages) {
522                                 r = ttm_set_pages_caching(caching_array,
523                                                           cstate, cpages);
524                                 if (r)
525                                         ttm_handle_caching_state_failure(pages,
526                                                 ttm_flags, cstate,
527                                                 caching_array, cpages);
528                         }
529                         r = -ENOMEM;
530                         goto out;
531                 }
532 #if 0
533                 p->oflags &= ~VPO_UNMANAGED;
534 #endif
535                 p->flags |= PG_FICTITIOUS;
536
537 #ifdef CONFIG_HIGHMEM /* KIB: nop */
538                 /* gfp flags of highmem page should never be dma32 so we
539                  * we should be fine in such case
540                  */
541                 if (!PageHighMem(p))
542 #endif
543                 {
544                         caching_array[cpages++] = p;
545                         if (cpages == max_cpages) {
546
547                                 r = ttm_set_pages_caching(caching_array,
548                                                 cstate, cpages);
549                                 if (r) {
550                                         ttm_handle_caching_state_failure(pages,
551                                                 ttm_flags, cstate,
552                                                 caching_array, cpages);
553                                         goto out;
554                                 }
555                                 cpages = 0;
556                         }
557                 }
558
559                 TAILQ_INSERT_HEAD(pages, p, pageq);
560         }
561
562         if (cpages) {
563                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
564                 if (r)
565                         ttm_handle_caching_state_failure(pages,
566                                         ttm_flags, cstate,
567                                         caching_array, cpages);
568         }
569 out:
570         drm_free(caching_array, M_TEMP);
571
572         return r;
573 }
574
575 /**
576  * Fill the given pool if there aren't enough pages and the requested number of
577  * pages is small.
578  */
579 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
580     int ttm_flags, enum ttm_caching_state cstate, unsigned count)
581 {
582         vm_page_t p;
583         int r;
584         unsigned cpages = 0;
585         /**
586          * Only allow one pool fill operation at a time.
587          * If pool doesn't have enough pages for the allocation new pages are
588          * allocated from outside of pool.
589          */
590         if (pool->fill_lock)
591                 return;
592
593         pool->fill_lock = true;
594
595         /* If allocation request is small and there are not enough
596          * pages in a pool we fill the pool up first. */
597         if (count < _manager->options.small
598                 && count > pool->npages) {
599                 struct pglist new_pages;
600                 unsigned alloc_size = _manager->options.alloc_size;
601
602                 /**
603                  * Can't change page caching if in irqsave context. We have to
604                  * drop the pool->lock.
605                  */
606                 lockmgr(&pool->lock, LK_RELEASE);
607
608                 TAILQ_INIT(&new_pages);
609                 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
610                     ttm_flags, cstate, alloc_size);
611                 lockmgr(&pool->lock, LK_EXCLUSIVE);
612
613                 if (!r) {
614                         TAILQ_CONCAT(&pool->list, &new_pages, pageq);
615                         ++pool->nrefills;
616                         pool->npages += alloc_size;
617                 } else {
618                         kprintf("[TTM] Failed to fill pool (%p)\n", pool);
619                         /* If we have any pages left put them to the pool. */
620                         TAILQ_FOREACH(p, &pool->list, pageq) {
621                                 ++cpages;
622                         }
623                         TAILQ_CONCAT(&pool->list, &new_pages, pageq);
624                         pool->npages += cpages;
625                 }
626
627         }
628         pool->fill_lock = false;
629 }
630
631 /**
632  * Cut 'count' number of pages from the pool and put them on the return list.
633  *
634  * @return count of pages still required to fulfill the request.
635  */
636 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
637                                         struct pglist *pages,
638                                         int ttm_flags,
639                                         enum ttm_caching_state cstate,
640                                         unsigned count)
641 {
642         vm_page_t p;
643         unsigned i;
644
645         lockmgr(&pool->lock, LK_EXCLUSIVE);
646         ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
647
648         if (count >= pool->npages) {
649                 /* take all pages from the pool */
650                 TAILQ_CONCAT(pages, &pool->list, pageq);
651                 count -= pool->npages;
652                 pool->npages = 0;
653                 goto out;
654         }
655         for (i = 0; i < count; i++) {
656                 p = TAILQ_FIRST(&pool->list);
657                 TAILQ_REMOVE(&pool->list, p, pageq);
658                 TAILQ_INSERT_TAIL(pages, p, pageq);
659         }
660         pool->npages -= count;
661         count = 0;
662 out:
663         lockmgr(&pool->lock, LK_RELEASE);
664         return count;
665 }
666
667 /* Put all pages in pages list to correct pool to wait for reuse */
668 static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
669                           enum ttm_caching_state cstate)
670 {
671         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
672         unsigned i;
673
674         if (pool == NULL) {
675                 /* No pool for this memory type so free the pages */
676                 for (i = 0; i < npages; i++) {
677                         if (pages[i]) {
678                                 ttm_vm_page_free(pages[i]);
679                                 pages[i] = NULL;
680                         }
681                 }
682                 return;
683         }
684
685         lockmgr(&pool->lock, LK_EXCLUSIVE);
686         for (i = 0; i < npages; i++) {
687                 if (pages[i]) {
688                         TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
689                         pages[i] = NULL;
690                         pool->npages++;
691                 }
692         }
693         /* Check that we don't go over the pool limit */
694         npages = 0;
695         if (pool->npages > _manager->options.max_size) {
696                 npages = pool->npages - _manager->options.max_size;
697                 /* free at least NUM_PAGES_TO_ALLOC number of pages
698                  * to reduce calls to set_memory_wb */
699                 if (npages < NUM_PAGES_TO_ALLOC)
700                         npages = NUM_PAGES_TO_ALLOC;
701         }
702         lockmgr(&pool->lock, LK_RELEASE);
703         if (npages)
704                 ttm_page_pool_free(pool, npages);
705 }
706
707 /*
708  * On success pages list will hold count number of correctly
709  * cached pages.
710  */
711 static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
712                          enum ttm_caching_state cstate)
713 {
714         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
715         struct pglist plist;
716         vm_page_t p = NULL;
717         int gfp_flags, aflags;
718         unsigned count;
719         int r;
720
721         aflags = VM_ALLOC_NORMAL |
722             ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
723
724         /* No pool for cached pages */
725         if (pool == NULL) {
726                 for (r = 0; r < npages; ++r) {
727                         p = vm_page_alloc_contig(0,
728                             (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
729                             VM_MAX_ADDRESS, PAGE_SIZE,
730                             0, 1*PAGE_SIZE, ttm_caching_state_to_vm(cstate));
731                         if (!p) {
732                                 kprintf("[TTM] Unable to allocate page\n");
733                                 return -ENOMEM;
734                         }
735 #if 0
736                         p->oflags &= ~VPO_UNMANAGED;
737 #endif
738                         p->flags |= PG_FICTITIOUS;
739                         pages[r] = p;
740                 }
741                 return 0;
742         }
743
744         /* combine zero flag to pool flags */
745         gfp_flags = flags | pool->ttm_page_alloc_flags;
746
747         /* First we take pages from the pool */
748         TAILQ_INIT(&plist);
749         npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
750         count = 0;
751         TAILQ_FOREACH(p, &plist, pageq) {
752                 pages[count++] = p;
753         }
754
755         /* clear the pages coming from the pool if requested */
756         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
757                 TAILQ_FOREACH(p, &plist, pageq) {
758                         pmap_zero_page(VM_PAGE_TO_PHYS(p));
759                 }
760         }
761
762         /* If pool didn't have enough pages allocate new one. */
763         if (npages > 0) {
764                 /* ttm_alloc_new_pages doesn't reference pool so we can run
765                  * multiple requests in parallel.
766                  **/
767                 TAILQ_INIT(&plist);
768                 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
769                     npages);
770                 TAILQ_FOREACH(p, &plist, pageq) {
771                         pages[count++] = p;
772                 }
773                 if (r) {
774                         /* If there is any pages in the list put them back to
775                          * the pool. */
776                         kprintf("[TTM] Failed to allocate extra pages for large request\n");
777                         ttm_put_pages(pages, count, flags, cstate);
778                         return r;
779                 }
780         }
781
782         return 0;
783 }
784
785 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
786                                       char *name)
787 {
788         lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE);
789         pool->fill_lock = false;
790         TAILQ_INIT(&pool->list);
791         pool->npages = pool->nfrees = 0;
792         pool->ttm_page_alloc_flags = flags;
793         pool->name = name;
794 }
795
796 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
797 {
798
799         if (_manager != NULL)
800                 kprintf("[TTM] manager != NULL\n");
801         kprintf("[TTM] Initializing pool allocator\n");
802
803         _manager = kmalloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
804
805         ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
806         ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
807         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
808             TTM_PAGE_FLAG_DMA32, "wc dma");
809         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
810             TTM_PAGE_FLAG_DMA32, "uc dma");
811
812         _manager->options.max_size = max_pages;
813         _manager->options.small = SMALL_ALLOCATION;
814         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
815
816         refcount_init(&_manager->kobj_ref, 1);
817         ttm_pool_mm_shrink_init(_manager);
818
819         return 0;
820 }
821
822 void ttm_page_alloc_fini(void)
823 {
824         int i;
825
826         kprintf("[TTM] Finalizing pool allocator\n");
827         ttm_pool_mm_shrink_fini(_manager);
828
829         for (i = 0; i < NUM_POOLS; ++i)
830                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
831
832         if (refcount_release(&_manager->kobj_ref))
833                 ttm_pool_kobj_release(_manager);
834         _manager = NULL;
835 }
836
837 int ttm_pool_populate(struct ttm_tt *ttm)
838 {
839         struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
840         unsigned i;
841         int ret;
842
843         if (ttm->state != tt_unpopulated)
844                 return 0;
845
846         for (i = 0; i < ttm->num_pages; ++i) {
847                 ret = ttm_get_pages(&ttm->pages[i], 1,
848                                     ttm->page_flags,
849                                     ttm->caching_state);
850                 if (ret != 0) {
851                         ttm_pool_unpopulate(ttm);
852                         return -ENOMEM;
853                 }
854
855                 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
856                                                 false, false);
857                 if (unlikely(ret != 0)) {
858                         ttm_pool_unpopulate(ttm);
859                         return -ENOMEM;
860                 }
861         }
862
863         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
864                 ret = ttm_tt_swapin(ttm);
865                 if (unlikely(ret != 0)) {
866                         ttm_pool_unpopulate(ttm);
867                         return ret;
868                 }
869         }
870
871         ttm->state = tt_unbound;
872         return 0;
873 }
874
875 void ttm_pool_unpopulate(struct ttm_tt *ttm)
876 {
877         unsigned i;
878
879         for (i = 0; i < ttm->num_pages; ++i) {
880                 if (ttm->pages[i]) {
881                         ttm_mem_global_free_page(ttm->glob->mem_glob,
882                                                  ttm->pages[i]);
883                         ttm_put_pages(&ttm->pages[i], 1,
884                                       ttm->page_flags,
885                                       ttm->caching_state);
886                 }
887         }
888         ttm->state = tt_unpopulated;
889 }
890
891 #if 0
892 /* XXXKIB sysctl */
893 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
894 {
895         struct ttm_page_pool *p;
896         unsigned i;
897         char *h[] = {"pool", "refills", "pages freed", "size"};
898         if (!_manager) {
899                 seq_printf(m, "No pool allocator running.\n");
900                 return 0;
901         }
902         seq_printf(m, "%6s %12s %13s %8s\n",
903                         h[0], h[1], h[2], h[3]);
904         for (i = 0; i < NUM_POOLS; ++i) {
905                 p = &_manager->pools[i];
906
907                 seq_printf(m, "%6s %12ld %13ld %8d\n",
908                                 p->name, p->nrefills,
909                                 p->nfrees, p->npages);
910         }
911         return 0;
912 }
913 #endif