742870a5559ab409c0b338e0d2c4c677b6a968bc
[dragonfly.git] / sys / dev / drm / ttm / ttm_page_alloc.c
1 /*
2  * Copyright (c) Red Hat Inc.
3
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27 /*
28  * Copyright (c) 2013 The FreeBSD Foundation
29  * All rights reserved.
30  *
31  * Portions of this software were developed by Konstantin Belousov
32  * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
33  *
34  * $FreeBSD: head/sys/dev/drm2/ttm/ttm_page_alloc.c 247849 2013-03-05 16:15:34Z kib $
35  */
36
37 /* simple list based uncached page pool
38  * - Pool collects resently freed pages for reuse
39  * - Use page->lru to keep a free list
40  * - doesn't track currently in use pages
41  */
42
43 #define pr_fmt(fmt) "[TTM] " fmt
44
45 #include <sys/eventhandler.h>
46
47 #include <drm/drmP.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
50
51 #ifdef TTM_HAS_AGP
52 #include <asm/agp.h>
53 #endif
54
55 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(struct page *))
56 #define SMALL_ALLOCATION                16
57 #define FREE_ALL_PAGES                  (~0U)
58 /* times are in msecs */
59 #define PAGE_FREE_INTERVAL              1000
60
61 /**
62  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
63  *
64  * @lock: Protects the shared pool from concurrnet access. Must be used with
65  * irqsave/irqrestore variants because pool allocator maybe called from
66  * delayed work.
67  * @fill_lock: Prevent concurrent calls to fill.
68  * @list: Pool of free uc/wc pages for fast reuse.
69  * @gfp_flags: Flags to pass for alloc_page.
70  * @npages: Number of pages in pool.
71  */
72 struct ttm_page_pool {
73         struct lock             lock;
74         bool                    fill_lock;
75         bool                    dma32;
76         struct pglist           list;
77         int                     ttm_page_alloc_flags;
78         unsigned                npages;
79         char                    *name;
80         unsigned long           nfrees;
81         unsigned long           nrefills;
82 };
83
84 /**
85  * Limits for the pool. They are handled without locks because only place where
86  * they may change is in sysfs store. They won't have immediate effect anyway
87  * so forcing serialization to access them is pointless.
88  */
89
90 struct ttm_pool_opts {
91         unsigned        alloc_size;
92         unsigned        max_size;
93         unsigned        small;
94 };
95
96 #define NUM_POOLS 4
97
98 /**
99  * struct ttm_pool_manager - Holds memory pools for fst allocation
100  *
101  * Manager is read only object for pool code so it doesn't need locking.
102  *
103  * @free_interval: minimum number of jiffies between freeing pages from pool.
104  * @page_alloc_inited: reference counting for pool allocation.
105  * @work: Work that is used to shrink the pool. Work is only run when there is
106  * some pages to free.
107  * @small_allocation: Limit in number of pages what is small allocation.
108  *
109  * @pools: All pool objects in use.
110  **/
111 struct ttm_pool_manager {
112         unsigned int kobj_ref;
113         eventhandler_tag lowmem_handler;
114         struct ttm_pool_opts    options;
115
116         union {
117                 struct ttm_page_pool    u_pools[NUM_POOLS];
118                 struct _utag {
119                         struct ttm_page_pool    u_wc_pool;
120                         struct ttm_page_pool    u_uc_pool;
121                         struct ttm_page_pool    u_wc_pool_dma32;
122                         struct ttm_page_pool    u_uc_pool_dma32;
123                 } _ut;
124         } _u;
125 };
126
127 #define pools _u.u_pools
128 #define wc_pool _u._ut.u_wc_pool
129 #define uc_pool _u._ut.u_uc_pool
130 #define wc_pool_dma32 _u._ut.u_wc_pool_dma32
131 #define uc_pool_dma32 _u._ut.u_uc_pool_dma32
132
133 static vm_memattr_t
134 ttm_caching_state_to_vm(enum ttm_caching_state cstate)
135 {
136
137         switch (cstate) {
138         case tt_uncached:
139                 return (VM_MEMATTR_UNCACHEABLE);
140         case tt_wc:
141                 return (VM_MEMATTR_WRITE_COMBINING);
142         case tt_cached:
143                 return (VM_MEMATTR_WRITE_BACK);
144         }
145         panic("caching state %d\n", cstate);
146 }
147
148 static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
149 {
150         kfree(m);
151 }
152
153 #if 0
154 /* XXXKIB sysctl */
155 static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
156                 struct attribute *attr, const char *buffer, size_t size)
157 {
158         int chars;
159         unsigned val;
160         chars = sscanf(buffer, "%u", &val);
161         if (chars == 0)
162                 return size;
163
164         /* Convert kb to number of pages */
165         val = val / (PAGE_SIZE >> 10);
166
167         if (attr == &ttm_page_pool_max)
168                 m->options.max_size = val;
169         else if (attr == &ttm_page_pool_small)
170                 m->options.small = val;
171         else if (attr == &ttm_page_pool_alloc_size) {
172                 if (val > NUM_PAGES_TO_ALLOC*8) {
173                         pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
174                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176                         return size;
177                 } else if (val > NUM_PAGES_TO_ALLOC) {
178                         pr_warn("Setting allocation size to larger than %lu is not recommended\n",
179                                 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
180                 }
181                 m->options.alloc_size = val;
182         }
183
184         return size;
185 }
186
187 static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
188                 struct attribute *attr, char *buffer)
189 {
190         unsigned val = 0;
191
192         if (attr == &ttm_page_pool_max)
193                 val = m->options.max_size;
194         else if (attr == &ttm_page_pool_small)
195                 val = m->options.small;
196         else if (attr == &ttm_page_pool_alloc_size)
197                 val = m->options.alloc_size;
198
199         val = val * (PAGE_SIZE >> 10);
200
201         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
202 }
203 #endif
204
205 static struct ttm_pool_manager *_manager;
206
207 static int set_pages_array_wb(struct page **pages, int addrinarray)
208 {
209         vm_page_t m;
210         int i;
211
212         for (i = 0; i < addrinarray; i++) {
213                 m = (struct vm_page *)pages[i];
214 #ifdef TTM_HAS_AGP
215                 unmap_page_from_agp(pages[i]);
216 #endif
217                 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
218         }
219         return 0;
220 }
221
222 static int set_pages_array_wc(struct page **pages, int addrinarray)
223 {
224         vm_page_t m;
225         int i;
226
227         for (i = 0; i < addrinarray; i++) {
228                 m = (struct vm_page *)pages[i];
229 #ifdef TTM_HAS_AGP
230                 map_page_into_agp(pages[i]);
231 #endif
232                 pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING);
233         }
234         return 0;
235 }
236
237 static int set_pages_array_uc(struct page **pages, int addrinarray)
238 {
239         vm_page_t m;
240         int i;
241
242         for (i = 0; i < addrinarray; i++) {
243                 m = (struct vm_page *)pages[i];
244 #ifdef TTM_HAS_AGP
245                 map_page_into_agp(pages[i]);
246 #endif
247                 pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE);
248         }
249         return 0;
250 }
251
252 /**
253  * Select the right pool or requested caching state and ttm flags. */
254 static struct ttm_page_pool *ttm_get_pool(int flags,
255                 enum ttm_caching_state cstate)
256 {
257         int pool_index;
258
259         if (cstate == tt_cached)
260                 return NULL;
261
262         if (cstate == tt_wc)
263                 pool_index = 0x0;
264         else
265                 pool_index = 0x1;
266
267         if (flags & TTM_PAGE_FLAG_DMA32)
268                 pool_index |= 0x2;
269
270         return &_manager->pools[pool_index];
271 }
272
273 /* set memory back to wb and free the pages. */
274 static void ttm_pages_put(struct page *pages[], unsigned npages)
275 {
276         unsigned i;
277         if (set_pages_array_wb(pages, npages))
278                 pr_err("Failed to set %d pages to wb!\n", npages);
279         for (i = 0; i < npages; ++i)
280                 __free_page(pages[i]);
281 }
282
283 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
284                 unsigned freed_pages)
285 {
286         pool->npages -= freed_pages;
287         pool->nfrees += freed_pages;
288 }
289
290 /**
291  * Free pages from pool.
292  *
293  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
294  * number of pages in one go.
295  *
296  * @pool: to free the pages from
297  * @free_all: If set to true will free all pages in pool
298  **/
299 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
300 {
301         vm_page_t p, p1;
302         struct page **pages_to_free;
303         unsigned freed_pages = 0,
304                  npages_to_free = nr_free;
305         unsigned i;
306
307         if (NUM_PAGES_TO_ALLOC < nr_free)
308                 npages_to_free = NUM_PAGES_TO_ALLOC;
309
310         pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
311             M_TEMP, M_WAITOK | M_ZERO);
312
313 restart:
314         lockmgr(&pool->lock, LK_EXCLUSIVE);
315
316         TAILQ_FOREACH_REVERSE_MUTABLE(p, &pool->list, pglist, pageq, p1) {
317                 if (freed_pages >= npages_to_free)
318                         break;
319
320                 pages_to_free[freed_pages++] = (struct page *)p;
321                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
322                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
323                         /* remove range of pages from the pool */
324                         for (i = 0; i < freed_pages; i++)
325                                 TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
326
327                         ttm_pool_update_free_locked(pool, freed_pages);
328                         /**
329                          * Because changing page caching is costly
330                          * we unlock the pool to prevent stalling.
331                          */
332                         lockmgr(&pool->lock, LK_RELEASE);
333
334                         ttm_pages_put(pages_to_free, freed_pages);
335                         if (likely(nr_free != FREE_ALL_PAGES))
336                                 nr_free -= freed_pages;
337
338                         if (NUM_PAGES_TO_ALLOC >= nr_free)
339                                 npages_to_free = nr_free;
340                         else
341                                 npages_to_free = NUM_PAGES_TO_ALLOC;
342
343                         freed_pages = 0;
344
345                         /* free all so restart the processing */
346                         if (nr_free)
347                                 goto restart;
348
349                         /* Not allowed to fall through or break because
350                          * following context is inside spinlock while we are
351                          * outside here.
352                          */
353                         goto out;
354
355                 }
356         }
357
358         /* remove range of pages from the pool */
359         if (freed_pages) {
360                 for (i = 0; i < freed_pages; i++)
361                         TAILQ_REMOVE(&pool->list, (struct vm_page *)pages_to_free[i], pageq);
362
363                 ttm_pool_update_free_locked(pool, freed_pages);
364                 nr_free -= freed_pages;
365         }
366
367         lockmgr(&pool->lock, LK_RELEASE);
368
369         if (freed_pages)
370                 ttm_pages_put(pages_to_free, freed_pages);
371 out:
372         drm_free(pages_to_free, M_TEMP);
373         return nr_free;
374 }
375
376 /* Get good estimation how many pages are free in pools */
377 static int ttm_pool_get_num_unused_pages(void)
378 {
379         unsigned i;
380         int total = 0;
381         for (i = 0; i < NUM_POOLS; ++i)
382                 total += _manager->pools[i].npages;
383
384         return total;
385 }
386
387 /**
388  * Callback for mm to request pool to reduce number of page held.
389  */
390 static int ttm_pool_mm_shrink(void *arg)
391 {
392         static unsigned int start_pool = 0;
393         unsigned i;
394         unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
395         struct ttm_page_pool *pool;
396         int shrink_pages = 100; /* XXXKIB */
397
398         pool_offset = pool_offset % NUM_POOLS;
399         /* select start pool in round robin fashion */
400         for (i = 0; i < NUM_POOLS; ++i) {
401                 unsigned nr_free = shrink_pages;
402                 if (shrink_pages == 0)
403                         break;
404                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
405                 shrink_pages = ttm_page_pool_free(pool, nr_free);
406         }
407         /* return estimated number of unused pages in pool */
408         return ttm_pool_get_num_unused_pages();
409 }
410
411 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
412 {
413
414         manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
415             ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
416 }
417
418 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
419 {
420
421         EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
422 }
423
424 static int ttm_set_pages_caching(struct page **pages,
425                 enum ttm_caching_state cstate, unsigned cpages)
426 {
427         int r = 0;
428         /* Set page caching */
429         switch (cstate) {
430         case tt_uncached:
431                 r = set_pages_array_uc(pages, cpages);
432                 if (r)
433                         pr_err("Failed to set %d pages to uc!\n", cpages);
434                 break;
435         case tt_wc:
436                 r = set_pages_array_wc(pages, cpages);
437                 if (r)
438                         pr_err("Failed to set %d pages to wc!\n", cpages);
439                 break;
440         default:
441                 break;
442         }
443         return r;
444 }
445
446 /**
447  * Free pages the pages that failed to change the caching state. If there is
448  * any pages that have changed their caching state already put them to the
449  * pool.
450  */
451 static void ttm_handle_caching_state_failure(struct pglist *pages,
452                 int ttm_flags, enum ttm_caching_state cstate,
453                 struct page **failed_pages, unsigned cpages)
454 {
455         unsigned i;
456         /* Failed pages have to be freed */
457         for (i = 0; i < cpages; ++i) {
458                 TAILQ_REMOVE(pages, (struct vm_page *)failed_pages[i], pageq);
459                 __free_page(failed_pages[i]);
460         }
461 }
462
463 /**
464  * Allocate new pages with correct caching.
465  *
466  * This function is reentrant if caller updates count depending on number of
467  * pages returned in pages array.
468  */
469 static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
470                 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
471 {
472         struct page **caching_array;
473         struct vm_page *p;
474         int r = 0;
475         unsigned i, cpages, aflags;
476         unsigned max_cpages = min(count,
477                         (unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
478
479         aflags = VM_ALLOC_NORMAL |
480             ((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
481             VM_ALLOC_ZERO : 0);
482         
483         /* allocate array for page caching change */
484         caching_array = kmalloc(max_cpages * sizeof(vm_page_t), M_TEMP,
485             M_WAITOK | M_ZERO);
486
487         for (i = 0, cpages = 0; i < count; ++i) {
488                 p = vm_page_alloc_contig(0,
489                     (ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
490                     VM_MAX_ADDRESS, PAGE_SIZE, 0,
491                     1*PAGE_SIZE, ttm_caching_state_to_vm(cstate));
492                 if (!p) {
493                         pr_err("Unable to get page %u\n", i);
494
495                         /* store already allocated pages in the pool after
496                          * setting the caching state */
497                         if (cpages) {
498                                 r = ttm_set_pages_caching(caching_array,
499                                                           cstate, cpages);
500                                 if (r)
501                                         ttm_handle_caching_state_failure(pages,
502                                                 ttm_flags, cstate,
503                                                 caching_array, cpages);
504                         }
505                         r = -ENOMEM;
506                         goto out;
507                 }
508 #if 0
509                 p->oflags &= ~VPO_UNMANAGED;
510 #endif
511                 p->flags |= PG_FICTITIOUS;
512
513 #ifdef CONFIG_HIGHMEM /* KIB: nop */
514                 /* gfp flags of highmem page should never be dma32 so we
515                  * we should be fine in such case
516                  */
517                 if (!PageHighMem(p))
518 #endif
519                 {
520                         caching_array[cpages++] = (struct page *)p;
521                         if (cpages == max_cpages) {
522
523                                 r = ttm_set_pages_caching(caching_array,
524                                                 cstate, cpages);
525                                 if (r) {
526                                         ttm_handle_caching_state_failure(pages,
527                                                 ttm_flags, cstate,
528                                                 caching_array, cpages);
529                                         goto out;
530                                 }
531                                 cpages = 0;
532                         }
533                 }
534
535                 TAILQ_INSERT_HEAD(pages, p, pageq);
536         }
537
538         if (cpages) {
539                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
540                 if (r)
541                         ttm_handle_caching_state_failure(pages,
542                                         ttm_flags, cstate,
543                                         caching_array, cpages);
544         }
545 out:
546         drm_free(caching_array, M_TEMP);
547
548         return r;
549 }
550
551 /**
552  * Fill the given pool if there aren't enough pages and the requested number of
553  * pages is small.
554  */
555 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
556     int ttm_flags, enum ttm_caching_state cstate, unsigned count)
557 {
558         vm_page_t p;
559         int r;
560         unsigned cpages = 0;
561         /**
562          * Only allow one pool fill operation at a time.
563          * If pool doesn't have enough pages for the allocation new pages are
564          * allocated from outside of pool.
565          */
566         if (pool->fill_lock)
567                 return;
568
569         pool->fill_lock = true;
570
571         /* If allocation request is small and there are not enough
572          * pages in a pool we fill the pool up first. */
573         if (count < _manager->options.small
574                 && count > pool->npages) {
575                 struct pglist new_pages;
576                 unsigned alloc_size = _manager->options.alloc_size;
577
578                 /**
579                  * Can't change page caching if in irqsave context. We have to
580                  * drop the pool->lock.
581                  */
582                 lockmgr(&pool->lock, LK_RELEASE);
583
584                 TAILQ_INIT(&new_pages);
585                 r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
586                     ttm_flags, cstate, alloc_size);
587                 lockmgr(&pool->lock, LK_EXCLUSIVE);
588
589                 if (!r) {
590                         TAILQ_CONCAT(&pool->list, &new_pages, pageq);
591                         ++pool->nrefills;
592                         pool->npages += alloc_size;
593                 } else {
594                         pr_err("Failed to fill pool (%p)\n", pool);
595                         /* If we have any pages left put them to the pool. */
596                         TAILQ_FOREACH(p, &pool->list, pageq) {
597                                 ++cpages;
598                         }
599                         TAILQ_CONCAT(&pool->list, &new_pages, pageq);
600                         pool->npages += cpages;
601                 }
602
603         }
604         pool->fill_lock = false;
605 }
606
607 /**
608  * Cut 'count' number of pages from the pool and put them on the return list.
609  *
610  * @return count of pages still required to fulfill the request.
611  */
612 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
613                                         struct pglist *pages,
614                                         int ttm_flags,
615                                         enum ttm_caching_state cstate,
616                                         unsigned count)
617 {
618         vm_page_t p;
619         unsigned i;
620
621         lockmgr(&pool->lock, LK_EXCLUSIVE);
622         ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
623
624         if (count >= pool->npages) {
625                 /* take all pages from the pool */
626                 TAILQ_CONCAT(pages, &pool->list, pageq);
627                 count -= pool->npages;
628                 pool->npages = 0;
629                 goto out;
630         }
631         for (i = 0; i < count; i++) {
632                 p = TAILQ_FIRST(&pool->list);
633                 TAILQ_REMOVE(&pool->list, p, pageq);
634                 TAILQ_INSERT_TAIL(pages, p, pageq);
635         }
636         pool->npages -= count;
637         count = 0;
638 out:
639         lockmgr(&pool->lock, LK_RELEASE);
640         return count;
641 }
642
643 /* Put all pages in pages list to correct pool to wait for reuse */
644 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
645                           enum ttm_caching_state cstate)
646 {
647         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
648         unsigned i;
649         struct vm_page *page;
650
651         if (pool == NULL) {
652                 /* No pool for this memory type so free the pages */
653                 for (i = 0; i < npages; i++) {
654                         if (pages[i]) {
655 #if 0
656                                 if (page_count(pages[i]) != 1)
657                                         pr_err("Erroneous page count. Leaking pages.\n");
658 #endif
659                                 __free_page(pages[i]);
660                                 pages[i] = NULL;
661                         }
662                 }
663                 return;
664         }
665
666         lockmgr(&pool->lock, LK_EXCLUSIVE);
667         for (i = 0; i < npages; i++) {
668                 if (pages[i]) {
669                         page = (struct vm_page *)pages[i];
670                         TAILQ_INSERT_TAIL(&pool->list, page, pageq);
671                         pages[i] = NULL;
672                         pool->npages++;
673                 }
674         }
675         /* Check that we don't go over the pool limit */
676         npages = 0;
677         if (pool->npages > _manager->options.max_size) {
678                 npages = pool->npages - _manager->options.max_size;
679                 /* free at least NUM_PAGES_TO_ALLOC number of pages
680                  * to reduce calls to set_memory_wb */
681                 if (npages < NUM_PAGES_TO_ALLOC)
682                         npages = NUM_PAGES_TO_ALLOC;
683         }
684         lockmgr(&pool->lock, LK_RELEASE);
685         if (npages)
686                 ttm_page_pool_free(pool, npages);
687 }
688
689 /*
690  * On success pages list will hold count number of correctly
691  * cached pages.
692  */
693 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
694                          enum ttm_caching_state cstate)
695 {
696         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
697         struct pglist plist;
698         struct vm_page *p = NULL;
699         int gfp_flags, aflags;
700         unsigned count;
701         int r;
702
703         aflags = VM_ALLOC_NORMAL |
704             ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
705
706         /* No pool for cached pages */
707         if (pool == NULL) {
708                 for (r = 0; r < npages; ++r) {
709                         p = vm_page_alloc_contig(0,
710                             (flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
711                             VM_MAX_ADDRESS, PAGE_SIZE,
712                             0, 1*PAGE_SIZE, ttm_caching_state_to_vm(cstate));
713                         if (!p) {
714                                 pr_err("Unable to allocate page\n");
715                                 return -ENOMEM;
716                         }
717 #if 0
718                         p->oflags &= ~VPO_UNMANAGED;
719 #endif
720                         p->flags |= PG_FICTITIOUS;
721                         pages[r] = (struct page *)p;
722                 }
723                 return 0;
724         }
725
726         /* combine zero flag to pool flags */
727         gfp_flags = flags | pool->ttm_page_alloc_flags;
728
729         /* First we take pages from the pool */
730         TAILQ_INIT(&plist);
731         npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
732         count = 0;
733         TAILQ_FOREACH(p, &plist, pageq) {
734                 pages[count++] = (struct page *)p;
735         }
736
737         /* clear the pages coming from the pool if requested */
738         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
739                 TAILQ_FOREACH(p, &plist, pageq) {
740                         pmap_zero_page(VM_PAGE_TO_PHYS(p));
741                 }
742         }
743
744         /* If pool didn't have enough pages allocate new one. */
745         if (npages > 0) {
746                 /* ttm_alloc_new_pages doesn't reference pool so we can run
747                  * multiple requests in parallel.
748                  **/
749                 TAILQ_INIT(&plist);
750                 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
751                     npages);
752                 TAILQ_FOREACH(p, &plist, pageq) {
753                         pages[count++] = (struct page *)p;
754                 }
755                 if (r) {
756                         /* If there is any pages in the list put them back to
757                          * the pool. */
758                         pr_err("Failed to allocate extra pages for large request\n");
759                         ttm_put_pages(pages, count, flags, cstate);
760                         return r;
761                 }
762         }
763
764         return 0;
765 }
766
767 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
768                                       char *name)
769 {
770         lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE);
771         pool->fill_lock = false;
772         TAILQ_INIT(&pool->list);
773         pool->npages = pool->nfrees = 0;
774         pool->ttm_page_alloc_flags = flags;
775         pool->name = name;
776 }
777
778 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
779 {
780         WARN_ON(_manager);
781
782         pr_info("Initializing pool allocator\n");
783
784         _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
785
786         ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
787         ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
788         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
789             TTM_PAGE_FLAG_DMA32, "wc dma");
790         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
791             TTM_PAGE_FLAG_DMA32, "uc dma");
792
793         _manager->options.max_size = max_pages;
794         _manager->options.small = SMALL_ALLOCATION;
795         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
796
797         refcount_init(&_manager->kobj_ref, 1);
798         ttm_pool_mm_shrink_init(_manager);
799
800         return 0;
801 }
802
803 void ttm_page_alloc_fini(void)
804 {
805         int i;
806
807         pr_info("Finalizing pool allocator\n");
808         ttm_pool_mm_shrink_fini(_manager);
809
810         for (i = 0; i < NUM_POOLS; ++i)
811                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
812
813         if (refcount_release(&_manager->kobj_ref))
814                 ttm_pool_kobj_release(_manager);
815         _manager = NULL;
816 }
817
818 int ttm_pool_populate(struct ttm_tt *ttm)
819 {
820         struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
821         unsigned i;
822         int ret;
823
824         if (ttm->state != tt_unpopulated)
825                 return 0;
826
827         for (i = 0; i < ttm->num_pages; ++i) {
828                 ret = ttm_get_pages(&ttm->pages[i], 1,
829                                     ttm->page_flags,
830                                     ttm->caching_state);
831                 if (ret != 0) {
832                         ttm_pool_unpopulate(ttm);
833                         return -ENOMEM;
834                 }
835
836                 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
837                                                 false, false);
838                 if (unlikely(ret != 0)) {
839                         ttm_pool_unpopulate(ttm);
840                         return -ENOMEM;
841                 }
842         }
843
844         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
845                 ret = ttm_tt_swapin(ttm);
846                 if (unlikely(ret != 0)) {
847                         ttm_pool_unpopulate(ttm);
848                         return ret;
849                 }
850         }
851
852         ttm->state = tt_unbound;
853         return 0;
854 }
855
856 void ttm_pool_unpopulate(struct ttm_tt *ttm)
857 {
858         unsigned i;
859
860         for (i = 0; i < ttm->num_pages; ++i) {
861                 if (ttm->pages[i]) {
862                         ttm_mem_global_free_page(ttm->glob->mem_glob,
863                                                  ttm->pages[i]);
864                         ttm_put_pages(&ttm->pages[i], 1,
865                                       ttm->page_flags,
866                                       ttm->caching_state);
867                 }
868         }
869         ttm->state = tt_unpopulated;
870 }
871
872 #if 0
873 /* XXXKIB sysctl */
874 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
875 {
876         struct ttm_page_pool *p;
877         unsigned i;
878         char *h[] = {"pool", "refills", "pages freed", "size"};
879         if (!_manager) {
880                 seq_printf(m, "No pool allocator running.\n");
881                 return 0;
882         }
883         seq_printf(m, "%6s %12s %13s %8s\n",
884                         h[0], h[1], h[2], h[3]);
885         for (i = 0; i < NUM_POOLS; ++i) {
886                 p = &_manager->pools[i];
887
888                 seq_printf(m, "%6s %12ld %13ld %8d\n",
889                                 p->name, p->nrefills,
890                                 p->nfrees, p->npages);
891         }
892         return 0;
893 }
894 #endif