Merge branch 'vendor/LIBRESSL'
[dragonfly.git] / sys / dev / drm / ttm / ttm_page_alloc.c
1 /*
2  * Copyright (c) Red Hat Inc.
3
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27 /*
28  * Copyright (c) 2013 The FreeBSD Foundation
29  * All rights reserved.
30  *
31  * Portions of this software were developed by Konstantin Belousov
32  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33  *
34  * $FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.c 247849 2013-03-05 16:15:34Z kib $
35  */
36
37 /* simple list based uncached page pool
38  * - Pool collects resently freed pages for reuse
39  * - Use page->lru to keep a free list
40  * - doesn't track currently in use pages
41  */
42
43 #define pr_fmt(fmt) "[TTM] " fmt
44
45 #include <sys/eventhandler.h>
46
47 #include <drm/drmP.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
50
51 #ifdef TTM_HAS_AGP
52 #include <asm/agp.h>
53 #endif
54
55 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(vm_page_t))
56 #define SMALL_ALLOCATION                16
57 #define FREE_ALL_PAGES                  (~0U)
58 /* times are in msecs */
59 #define PAGE_FREE_INTERVAL              1000
60
61 /**
62  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
63  *
64  * @lock: Protects the shared pool from concurrnet access. Must be used with
65  * irqsave/irqrestore variants because pool allocator maybe called from
66  * delayed work.
67  * @fill_lock: Prevent concurrent calls to fill.
68  * @list: Pool of free uc/wc pages for fast reuse.
69  * @gfp_flags: Flags to pass for alloc_page.
70  * @npages: Number of pages in pool.
71  */
72 struct ttm_page_pool {
73         struct lock             lock;
74         bool                    fill_lock;
75         bool                    dma32;
76         struct pglist           list;
77         int                     ttm_page_alloc_flags;
78         unsigned                npages;
79         char                    *name;
80         unsigned long           nfrees;
81         unsigned long           nrefills;
82 };
83
84 /**
85  * Limits for the pool. They are handled without locks because only place where
86  * they may change is in sysfs store. They won't have immediate effect anyway
87  * so forcing serialization to access them is pointless.
88  */
89
90 struct ttm_pool_opts {
91         unsigned        alloc_size;
92         unsigned        max_size;
93         unsigned        small;
94 };
95
96 #define NUM_POOLS 4
97
98 /**
99  * struct ttm_pool_manager - Holds memory pools for fst allocation
100  *
101  * Manager is read only object for pool code so it doesn't need locking.
102  *
103  * @free_interval: minimum number of jiffies between freeing pages from pool.
104  * @page_alloc_inited: reference counting for pool allocation.
105  * @work: Work that is used to shrink the pool. Work is only run when there is
106  * some pages to free.
107  * @small_allocation: Limit in number of pages what is small allocation.
108  *
109  * @pools: All pool objects in use.
110  **/
111 struct ttm_pool_manager {
112         unsigned int kobj_ref;
113         eventhandler_tag lowmem_handler;
114         struct ttm_pool_opts    options;
115
116         union {
117                 struct ttm_page_pool    u_pools[NUM_POOLS];
118                 struct _utag {
119                         struct ttm_page_pool    u_wc_pool;
120                         struct ttm_page_pool    u_uc_pool;
121                         struct ttm_page_pool    u_wc_pool_dma32;
122                         struct ttm_page_pool    u_uc_pool_dma32;
123                 } _ut;
124         } _u;
125 };
126
127 #define pools _u.u_pools
128 #define wc_pool _u._ut.u_wc_pool
129 #define uc_pool _u._ut.u_uc_pool
130 #define wc_pool_dma32 _u._ut.u_wc_pool_dma32
131 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32
132
133 static void
134 ttm_vm_page_free(vm_page_t m)
135 {
136
137         KASSERT(m->object == NULL, ("ttm page %p is owned", m));
138         KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
139         KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
140 #if 0
141         KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
142         m->oflags |= VPO_UNMANAGED;
143 #endif
144         m->flags &= ~PG_FICTITIOUS;
145         vm_page_busy_wait(m, FALSE, "ttmvpf");
146         vm_page_wakeup(m);
147         vm_page_free_contig(m, PAGE_SIZE);
148         /*
149         vm_page_unwire(m, 0);
150         vm_page_free(m);
151         */
152 }
153
154 static vm_memattr_t
155 ttm_caching_state_to_vm(enum ttm_caching_state cstate)
156 {
157
158         switch (cstate) {
159         case tt_uncached:
160                 return (VM_MEMATTR_UNCACHEABLE);
161         case tt_wc:
162                 return (VM_MEMATTR_WRITE_COMBINING);
163         case tt_cached:
164                 return (VM_MEMATTR_WRITE_BACK);
165         }
166         panic("caching state %d\n", cstate);
167 }
168
169 static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
170 {
171         kfree(m);
172 }
173
174 #if 0
175 /* XXXKIB sysctl */
176 static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
177                 struct attribute *attr, const char *buffer, size_t size)
178 {
179         int chars;
180         unsigned val;
181         chars = sscanf(buffer, "%u", &val);
182         if (chars == 0)
183                 return size;
184
185         /* Convert kb to number of pages */
186         val = val / (PAGE_SIZE >> 10);
187
188         if (attr == &ttm_page_pool_max)
189                 m->options.max_size = val;
190         else if (attr == &ttm_page_pool_small)
191                 m->options.small = val;
192         else if (attr == &ttm_page_pool_alloc_size) {
193                 if (val > NUM_PAGES_TO_ALLOC*8) {
194                         pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
195                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
196                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
197                         return size;
198                 } else if (val > NUM_PAGES_TO_ALLOC) {
199                         pr_warn("Setting allocation size to larger than %lu is not recommended\n",
200                                 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
201                 }
202                 m->options.alloc_size = val;
203         }
204
205         return size;
206 }
207
208 static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
209                 struct attribute *attr, char *buffer)
210 {
211         unsigned val = 0;
212
213         if (attr == &ttm_page_pool_max)
214                 val = m->options.max_size;
215         else if (attr == &ttm_page_pool_small)
216                 val = m->options.small;
217         else if (attr == &ttm_page_pool_alloc_size)
218                 val = m->options.alloc_size;
219
220         val = val * (PAGE_SIZE >> 10);
221
222         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
223 }
224 #endif
225
226 static struct ttm_pool_manager *_manager;
227
228 static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
229 {
230         vm_page_t m;
231         int i;
232
233         for (i = 0; i < addrinarray; i++) {
234                 m = pages[i];
235 #ifdef TTM_HAS_AGP
236                 unmap_page_from_agp(m);
237 #endif
238                 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
239         }
240         return 0;
241 }
242
243 static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
244 {
245         vm_page_t m;
246         int i;
247
248         for (i = 0; i < addrinarray; i++) {
249                 m = pages[i];
250 #ifdef TTM_HAS_AGP
251                 map_page_into_agp(pages[i]);
252 #endif
253                 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING);
254         }
255         return 0;
256 }
257
258 static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
259 {
260         vm_page_t m;
261         int i;
262
263         for (i = 0; i < addrinarray; i++) {
264                 m = pages[i];
265 #ifdef TTM_HAS_AGP
266                 map_page_into_agp(pages[i]);
267 #endif
268                 pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE);
269         }
270         return 0;
271 }
272
273 /**
274  * Select the right pool or requested caching state and ttm flags. */
275 static struct ttm_page_pool *ttm_get_pool(int flags,
276                 enum ttm_caching_state cstate)
277 {
278         int pool_index;
279
280         if (cstate == tt_cached)
281                 return NULL;
282
283         if (cstate == tt_wc)
284                 pool_index = 0x0;
285         else
286                 pool_index = 0x1;
287
288         if (flags & TTM_PAGE_FLAG_DMA32)
289                 pool_index |= 0x2;
290
291         return &_manager->pools[pool_index];
292 }
293
294 /* set memory back to wb and free the pages. */
295 static void ttm_pages_put(vm_page_t *pages, unsigned npages)
296 {
297         unsigned i;
298
299         /* Our VM handles vm memattr automatically on the page free. */
300         if (set_pages_array_wb(pages, npages))
301                 pr_err("Failed to set %d pages to wb!\n", npages);
302         for (i = 0; i < npages; ++i)
303                 ttm_vm_page_free(pages[i]);
304 }
305
306 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
307                 unsigned freed_pages)
308 {
309         pool->npages -= freed_pages;
310         pool->nfrees += freed_pages;
311 }
312
313 /**
314  * Free pages from pool.
315  *
316  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
317  * number of pages in one go.
318  *
319  * @pool: to free the pages from
320  * @free_all: If set to true will free all pages in pool
321  **/
322 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
323 {
324         vm_page_t p, p1;
325         vm_page_t *pages_to_free;
326         unsigned freed_pages = 0,
327                  npages_to_free = nr_free;
328         unsigned i;
329
330         if (NUM_PAGES_TO_ALLOC < nr_free)
331                 npages_to_free = NUM_PAGES_TO_ALLOC;
332
333         pages_to_free = kmalloc(npages_to_free * sizeof(vm_page_t),
334             M_TEMP, M_WAITOK | M_ZERO);
335
336 restart:
337         lockmgr(&pool->lock, LK_EXCLUSIVE);
338
339         TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) {
340                 if (freed_pages >= npages_to_free)
341                         break;
342
343                 pages_to_free[freed_pages++] = p;
344                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
345                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
346                         /* remove range of pages from the pool */
347                         for (i = 0; i < freed_pages; i++)
348                                 TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq);
349
350                         ttm_pool_update_free_locked(pool, freed_pages);
351                         /**
352                          * Because changing page caching is costly
353                          * we unlock the pool to prevent stalling.
354                          */
355                         lockmgr(&pool->lock, LK_RELEASE);
356
357                         ttm_pages_put(pages_to_free, freed_pages);
358                         if (likely(nr_free != FREE_ALL_PAGES))
359                                 nr_free -= freed_pages;
360
361                         if (NUM_PAGES_TO_ALLOC >= nr_free)
362                                 npages_to_free = nr_free;
363                         else
364                                 npages_to_free = NUM_PAGES_TO_ALLOC;
365
366                         freed_pages = 0;
367
368                         /* free all so restart the processing */
369                         if (nr_free)
370                                 goto restart;
371
372                         /* Not allowed to fall through or break because
373                          * following context is inside spinlock while we are
374                          * outside here.
375                          */
376                         goto out;
377
378                 }
379         }
380
381         /* remove range of pages from the pool */
382         if (freed_pages) {
383                 for (i = 0; i < freed_pages; i++)
384                         TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq);
385
386                 ttm_pool_update_free_locked(pool, freed_pages);
387                 nr_free -= freed_pages;
388         }
389
390         lockmgr(&pool->lock, LK_RELEASE);
391
392         if (freed_pages)
393                 ttm_pages_put(pages_to_free, freed_pages);
394 out:
395         drm_free(pages_to_free, M_TEMP);
396         return nr_free;
397 }
398
399 /* Get good estimation how many pages are free in pools */
400 static int ttm_pool_get_num_unused_pages(void)
401 {
402         unsigned i;
403         int total = 0;
404         for (i = 0; i < NUM_POOLS; ++i)
405                 total += _manager->pools[i].npages;
406
407         return total;
408 }
409
410 /**
411  * Callback for mm to request pool to reduce number of page held.
412  */
413 static int ttm_pool_mm_shrink(void *arg)
414 {
415         static unsigned int start_pool = 0;
416         unsigned i;
417         unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
418         struct ttm_page_pool *pool;
419         int shrink_pages = 100; /* XXXKIB */
420
421         pool_offset = pool_offset % NUM_POOLS;
422         /* select start pool in round robin fashion */
423         for (i = 0; i < NUM_POOLS; ++i) {
424                 unsigned nr_free = shrink_pages;
425                 if (shrink_pages == 0)
426                         break;
427                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
428                 shrink_pages = ttm_page_pool_free(pool, nr_free);
429         }
430         /* return estimated number of unused pages in pool */
431         return ttm_pool_get_num_unused_pages();
432 }
433
434 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
435 {
436
437         manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
438             ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
439 }
440
441 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
442 {
443
444         EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
445 }
446
447 static int ttm_set_pages_caching(vm_page_t *pages,
448                 enum ttm_caching_state cstate, unsigned cpages)
449 {
450         int r = 0;
451         /* Set page caching */
452         switch (cstate) {
453         case tt_uncached:
454                 r = set_pages_array_uc(pages, cpages);
455                 if (r)
456                         pr_err("Failed to set %d pages to uc!\n", cpages);
457                 break;
458         case tt_wc:
459                 r = set_pages_array_wc(pages, cpages);
460                 if (r)
461                         pr_err("Failed to set %d pages to wc!\n", cpages);
462                 break;
463         default:
464                 break;
465         }
466         return r;
467 }
468
469 /**
470  * Free pages the pages that failed to change the caching state. If there is
471  * any pages that have changed their caching state already put them to the
472  * pool.
473  */
474 static void ttm_handle_caching_state_failure(struct pglist *pages,
475                 int ttm_flags, enum ttm_caching_state cstate,
476                 vm_page_t *failed_pages, unsigned cpages)
477 {
478         unsigned i;
479         /* Failed pages have to be freed */
480         for (i = 0; i < cpages; ++i) {
481                 TAILQ_REMOVE(pages, failed_pages[i], pageq);
482                 ttm_vm_page_free(failed_pages[i]);
483         }
484 }
485
486 /**
487  * Allocate new pages with correct caching.
488  *
489  * This function is reentrant if caller updates count depending on number of
490  * pages returned in pages array.
491  */
492 static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
493                 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
494 {
495         vm_page_t *caching_array;
496         vm_page_t p;
497         int r = 0;
498         unsigned i, cpages, aflags;
499         unsigned max_cpages = min(count,
500                         (unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
501
502         aflags = VM_ALLOC_NORMAL |
503             ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
504             VM_ALLOC_ZERO : 0);
505         
506         /* allocate array for page caching change */
507         caching_array = kmalloc(max_cpages * sizeof(vm_page_t), M_TEMP,
508             M_WAITOK | M_ZERO);
509
510         for (i = 0, cpages = 0; i < count; ++i) {
511                 p = vm_page_alloc_contig(0,
512                     (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
513                     VM_MAX_ADDRESS, PAGE_SIZE, 0,
514                     1*PAGE_SIZE, ttm_caching_state_to_vm(cstate));
515                 if (!p) {
516                         pr_err("Unable to get page %u\n", i);
517
518                         /* store already allocated pages in the pool after
519                          * setting the caching state */
520                         if (cpages) {
521                                 r = ttm_set_pages_caching(caching_array,
522                                                           cstate, cpages);
523                                 if (r)
524                                         ttm_handle_caching_state_failure(pages,
525                                                 ttm_flags, cstate,
526                                                 caching_array, cpages);
527                         }
528                         r = -ENOMEM;
529                         goto out;
530                 }
531 #if 0
532                 p->oflags &= ~VPO_UNMANAGED;
533 #endif
534                 p->flags |= PG_FICTITIOUS;
535
536 #ifdef CONFIG_HIGHMEM /* KIB: nop */
537                 /* gfp flags of highmem page should never be dma32 so we
538                  * we should be fine in such case
539                  */
540                 if (!PageHighMem(p))
541 #endif
542                 {
543                         caching_array[cpages++] = p;
544                         if (cpages == max_cpages) {
545
546                                 r = ttm_set_pages_caching(caching_array,
547                                                 cstate, cpages);
548                                 if (r) {
549                                         ttm_handle_caching_state_failure(pages,
550                                                 ttm_flags, cstate,
551                                                 caching_array, cpages);
552                                         goto out;
553                                 }
554                                 cpages = 0;
555                         }
556                 }
557
558                 TAILQ_INSERT_HEAD(pages, p, pageq);
559         }
560
561         if (cpages) {
562                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
563                 if (r)
564                         ttm_handle_caching_state_failure(pages,
565                                         ttm_flags, cstate,
566                                         caching_array, cpages);
567         }
568 out:
569         drm_free(caching_array, M_TEMP);
570
571         return r;
572 }
573
574 /**
575  * Fill the given pool if there aren't enough pages and the requested number of
576  * pages is small.
577  */
578 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
579     int ttm_flags, enum ttm_caching_state cstate, unsigned count)
580 {
581         vm_page_t p;
582         int r;
583         unsigned cpages = 0;
584         /**
585          * Only allow one pool fill operation at a time.
586          * If pool doesn't have enough pages for the allocation new pages are
587          * allocated from outside of pool.
588          */
589         if (pool->fill_lock)
590                 return;
591
592         pool->fill_lock = true;
593
594         /* If allocation request is small and there are not enough
595          * pages in a pool we fill the pool up first. */
596         if (count < _manager->options.small
597                 && count > pool->npages) {
598                 struct pglist new_pages;
599                 unsigned alloc_size = _manager->options.alloc_size;
600
601                 /**
602                  * Can't change page caching if in irqsave context. We have to
603                  * drop the pool->lock.
604                  */
605                 lockmgr(&pool->lock, LK_RELEASE);
606
607                 TAILQ_INIT(&new_pages);
608                 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
609                     ttm_flags, cstate, alloc_size);
610                 lockmgr(&pool->lock, LK_EXCLUSIVE);
611
612                 if (!r) {
613                         TAILQ_CONCAT(&pool->list, &new_pages, pageq);
614                         ++pool->nrefills;
615                         pool->npages += alloc_size;
616                 } else {
617                         pr_err("Failed to fill pool (%p)\n", pool);
618                         /* If we have any pages left put them to the pool. */
619                         TAILQ_FOREACH(p, &pool->list, pageq) {
620                                 ++cpages;
621                         }
622                         TAILQ_CONCAT(&pool->list, &new_pages, pageq);
623                         pool->npages += cpages;
624                 }
625
626         }
627         pool->fill_lock = false;
628 }
629
630 /**
631  * Cut 'count' number of pages from the pool and put them on the return list.
632  *
633  * @return count of pages still required to fulfill the request.
634  */
635 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
636                                         struct pglist *pages,
637                                         int ttm_flags,
638                                         enum ttm_caching_state cstate,
639                                         unsigned count)
640 {
641         vm_page_t p;
642         unsigned i;
643
644         lockmgr(&pool->lock, LK_EXCLUSIVE);
645         ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
646
647         if (count >= pool->npages) {
648                 /* take all pages from the pool */
649                 TAILQ_CONCAT(pages, &pool->list, pageq);
650                 count -= pool->npages;
651                 pool->npages = 0;
652                 goto out;
653         }
654         for (i = 0; i < count; i++) {
655                 p = TAILQ_FIRST(&pool->list);
656                 TAILQ_REMOVE(&pool->list, p, pageq);
657                 TAILQ_INSERT_TAIL(pages, p, pageq);
658         }
659         pool->npages -= count;
660         count = 0;
661 out:
662         lockmgr(&pool->lock, LK_RELEASE);
663         return count;
664 }
665
666 /* Put all pages in pages list to correct pool to wait for reuse */
667 static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
668                           enum ttm_caching_state cstate)
669 {
670         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
671         unsigned i;
672
673         if (pool == NULL) {
674                 /* No pool for this memory type so free the pages */
675                 for (i = 0; i < npages; i++) {
676                         if (pages[i]) {
677                                 ttm_vm_page_free(pages[i]);
678                                 pages[i] = NULL;
679                         }
680                 }
681                 return;
682         }
683
684         lockmgr(&pool->lock, LK_EXCLUSIVE);
685         for (i = 0; i < npages; i++) {
686                 if (pages[i]) {
687                         TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
688                         pages[i] = NULL;
689                         pool->npages++;
690                 }
691         }
692         /* Check that we don't go over the pool limit */
693         npages = 0;
694         if (pool->npages > _manager->options.max_size) {
695                 npages = pool->npages - _manager->options.max_size;
696                 /* free at least NUM_PAGES_TO_ALLOC number of pages
697                  * to reduce calls to set_memory_wb */
698                 if (npages < NUM_PAGES_TO_ALLOC)
699                         npages = NUM_PAGES_TO_ALLOC;
700         }
701         lockmgr(&pool->lock, LK_RELEASE);
702         if (npages)
703                 ttm_page_pool_free(pool, npages);
704 }
705
706 /*
707  * On success pages list will hold count number of correctly
708  * cached pages.
709  */
710 static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
711                          enum ttm_caching_state cstate)
712 {
713         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
714         struct pglist plist;
715         vm_page_t p = NULL;
716         int gfp_flags, aflags;
717         unsigned count;
718         int r;
719
720         aflags = VM_ALLOC_NORMAL |
721             ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
722
723         /* No pool for cached pages */
724         if (pool == NULL) {
725                 for (r = 0; r < npages; ++r) {
726                         p = vm_page_alloc_contig(0,
727                             (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
728                             VM_MAX_ADDRESS, PAGE_SIZE,
729                             0, 1*PAGE_SIZE, ttm_caching_state_to_vm(cstate));
730                         if (!p) {
731                                 pr_err("Unable to allocate page\n");
732                                 return -ENOMEM;
733                         }
734 #if 0
735                         p->oflags &= ~VPO_UNMANAGED;
736 #endif
737                         p->flags |= PG_FICTITIOUS;
738                         pages[r] = p;
739                 }
740                 return 0;
741         }
742
743         /* combine zero flag to pool flags */
744         gfp_flags = flags | pool->ttm_page_alloc_flags;
745
746         /* First we take pages from the pool */
747         TAILQ_INIT(&plist);
748         npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
749         count = 0;
750         TAILQ_FOREACH(p, &plist, pageq) {
751                 pages[count++] = p;
752         }
753
754         /* clear the pages coming from the pool if requested */
755         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
756                 TAILQ_FOREACH(p, &plist, pageq) {
757                         pmap_zero_page(VM_PAGE_TO_PHYS(p));
758                 }
759         }
760
761         /* If pool didn't have enough pages allocate new one. */
762         if (npages > 0) {
763                 /* ttm_alloc_new_pages doesn't reference pool so we can run
764                  * multiple requests in parallel.
765                  **/
766                 TAILQ_INIT(&plist);
767                 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
768                     npages);
769                 TAILQ_FOREACH(p, &plist, pageq) {
770                         pages[count++] = p;
771                 }
772                 if (r) {
773                         /* If there is any pages in the list put them back to
774                          * the pool. */
775                         pr_err("Failed to allocate extra pages for large request\n");
776                         ttm_put_pages(pages, count, flags, cstate);
777                         return r;
778                 }
779         }
780
781         return 0;
782 }
783
784 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
785                                       char *name)
786 {
787         lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE);
788         pool->fill_lock = false;
789         TAILQ_INIT(&pool->list);
790         pool->npages = pool->nfrees = 0;
791         pool->ttm_page_alloc_flags = flags;
792         pool->name = name;
793 }
794
795 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
796 {
797         WARN_ON(_manager);
798
799         pr_info("Initializing pool allocator\n");
800
801         _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
802
803         ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
804         ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
805         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
806             TTM_PAGE_FLAG_DMA32, "wc dma");
807         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
808             TTM_PAGE_FLAG_DMA32, "uc dma");
809
810         _manager->options.max_size = max_pages;
811         _manager->options.small = SMALL_ALLOCATION;
812         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
813
814         refcount_init(&_manager->kobj_ref, 1);
815         ttm_pool_mm_shrink_init(_manager);
816
817         return 0;
818 }
819
820 void ttm_page_alloc_fini(void)
821 {
822         int i;
823
824         pr_info("Finalizing pool allocator\n");
825         ttm_pool_mm_shrink_fini(_manager);
826
827         for (i = 0; i < NUM_POOLS; ++i)
828                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
829
830         if (refcount_release(&_manager->kobj_ref))
831                 ttm_pool_kobj_release(_manager);
832         _manager = NULL;
833 }
834
835 int ttm_pool_populate(struct ttm_tt *ttm)
836 {
837         struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
838         unsigned i;
839         int ret;
840
841         if (ttm->state != tt_unpopulated)
842                 return 0;
843
844         for (i = 0; i < ttm->num_pages; ++i) {
845                 ret = ttm_get_pages(&ttm->pages[i], 1,
846                                     ttm->page_flags,
847                                     ttm->caching_state);
848                 if (ret != 0) {
849                         ttm_pool_unpopulate(ttm);
850                         return -ENOMEM;
851                 }
852
853                 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
854                                                 false, false);
855                 if (unlikely(ret != 0)) {
856                         ttm_pool_unpopulate(ttm);
857                         return -ENOMEM;
858                 }
859         }
860
861         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
862                 ret = ttm_tt_swapin(ttm);
863                 if (unlikely(ret != 0)) {
864                         ttm_pool_unpopulate(ttm);
865                         return ret;
866                 }
867         }
868
869         ttm->state = tt_unbound;
870         return 0;
871 }
872
873 void ttm_pool_unpopulate(struct ttm_tt *ttm)
874 {
875         unsigned i;
876
877         for (i = 0; i < ttm->num_pages; ++i) {
878                 if (ttm->pages[i]) {
879                         ttm_mem_global_free_page(ttm->glob->mem_glob,
880                                                  ttm->pages[i]);
881                         ttm_put_pages(&ttm->pages[i], 1,
882                                       ttm->page_flags,
883                                       ttm->caching_state);
884                 }
885         }
886         ttm->state = tt_unpopulated;
887 }
888
889 #if 0
890 /* XXXKIB sysctl */
891 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
892 {
893         struct ttm_page_pool *p;
894         unsigned i;
895         char *h[] = {"pool", "refills", "pages freed", "size"};
896         if (!_manager) {
897                 seq_printf(m, "No pool allocator running.\n");
898                 return 0;
899         }
900         seq_printf(m, "%6s %12s %13s %8s\n",
901                         h[0], h[1], h[2], h[3]);
902         for (i = 0; i < NUM_POOLS; ++i) {
903                 p = &_manager->pools[i];
904
905                 seq_printf(m, "%6s %12ld %13ld %8d\n",
906                                 p->name, p->nrefills,
907                                 p->nfrees, p->npages);
908         }
909         return 0;
910 }
911 #endif