mm/memblock.c: remove unnecessary always-true comparison
[linux.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/rmap.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/page-isolation.h>
25 #include <linux/jhash.h>
26
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/tlb.h>
30
31 #include <linux/io.h>
32 #include <linux/hugetlb.h>
33 #include <linux/hugetlb_cgroup.h>
34 #include <linux/node.h>
35 #include "internal.h"
36
37 int hugepages_treat_as_movable;
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42 /*
43  * Minimum page order among possible hugepage sizes, set to a proper value
44  * at boot time.
45  */
46 static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48 __initdata LIST_HEAD(huge_boot_pages);
49
50 /* for command line parsing */
51 static struct hstate * __initdata parsed_hstate;
52 static unsigned long __initdata default_hstate_max_huge_pages;
53 static unsigned long __initdata default_hstate_size;
54 static bool __initdata parsed_valid_hugepagesz = true;
55
56 /*
57  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58  * free_huge_pages, and surplus_huge_pages.
59  */
60 DEFINE_SPINLOCK(hugetlb_lock);
61
62 /*
63  * Serializes faults on the same logical page.  This is used to
64  * prevent spurious OOMs when the hugepage pool is fully utilized.
65  */
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74         bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76         spin_unlock(&spool->lock);
77
78         /* If no pages are used, and no other handles to the subpool
79          * remain, give up any reservations mased on minimum size and
80          * free the subpool */
81         if (free) {
82                 if (spool->min_hpages != -1)
83                         hugetlb_acct_memory(spool->hstate,
84                                                 -spool->min_hpages);
85                 kfree(spool);
86         }
87 }
88
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90                                                 long min_hpages)
91 {
92         struct hugepage_subpool *spool;
93
94         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95         if (!spool)
96                 return NULL;
97
98         spin_lock_init(&spool->lock);
99         spool->count = 1;
100         spool->max_hpages = max_hpages;
101         spool->hstate = h;
102         spool->min_hpages = min_hpages;
103
104         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105                 kfree(spool);
106                 return NULL;
107         }
108         spool->rsv_hpages = min_hpages;
109
110         return spool;
111 }
112
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115         spin_lock(&spool->lock);
116         BUG_ON(!spool->count);
117         spool->count--;
118         unlock_or_release_subpool(spool);
119 }
120
121 /*
122  * Subpool accounting for allocating and reserving pages.
123  * Return -ENOMEM if there are not enough resources to satisfy the
124  * the request.  Otherwise, return the number of pages by which the
125  * global pools must be adjusted (upward).  The returned value may
126  * only be different than the passed value (delta) in the case where
127  * a subpool minimum size must be manitained.
128  */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130                                       long delta)
131 {
132         long ret = delta;
133
134         if (!spool)
135                 return ret;
136
137         spin_lock(&spool->lock);
138
139         if (spool->max_hpages != -1) {          /* maximum size accounting */
140                 if ((spool->used_hpages + delta) <= spool->max_hpages)
141                         spool->used_hpages += delta;
142                 else {
143                         ret = -ENOMEM;
144                         goto unlock_ret;
145                 }
146         }
147
148         /* minimum size accounting */
149         if (spool->min_hpages != -1 && spool->rsv_hpages) {
150                 if (delta > spool->rsv_hpages) {
151                         /*
152                          * Asking for more reserves than those already taken on
153                          * behalf of subpool.  Return difference.
154                          */
155                         ret = delta - spool->rsv_hpages;
156                         spool->rsv_hpages = 0;
157                 } else {
158                         ret = 0;        /* reserves already accounted for */
159                         spool->rsv_hpages -= delta;
160                 }
161         }
162
163 unlock_ret:
164         spin_unlock(&spool->lock);
165         return ret;
166 }
167
168 /*
169  * Subpool accounting for freeing and unreserving pages.
170  * Return the number of global page reservations that must be dropped.
171  * The return value may only be different than the passed value (delta)
172  * in the case where a subpool minimum size must be maintained.
173  */
174 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
175                                        long delta)
176 {
177         long ret = delta;
178
179         if (!spool)
180                 return delta;
181
182         spin_lock(&spool->lock);
183
184         if (spool->max_hpages != -1)            /* maximum size accounting */
185                 spool->used_hpages -= delta;
186
187          /* minimum size accounting */
188         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
189                 if (spool->rsv_hpages + delta <= spool->min_hpages)
190                         ret = 0;
191                 else
192                         ret = spool->rsv_hpages + delta - spool->min_hpages;
193
194                 spool->rsv_hpages += delta;
195                 if (spool->rsv_hpages > spool->min_hpages)
196                         spool->rsv_hpages = spool->min_hpages;
197         }
198
199         /*
200          * If hugetlbfs_put_super couldn't free spool due to an outstanding
201          * quota reference, free it now.
202          */
203         unlock_or_release_subpool(spool);
204
205         return ret;
206 }
207
208 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
209 {
210         return HUGETLBFS_SB(inode->i_sb)->spool;
211 }
212
213 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
214 {
215         return subpool_inode(file_inode(vma->vm_file));
216 }
217
218 /*
219  * Region tracking -- allows tracking of reservations and instantiated pages
220  *                    across the pages in a mapping.
221  *
222  * The region data structures are embedded into a resv_map and protected
223  * by a resv_map's lock.  The set of regions within the resv_map represent
224  * reservations for huge pages, or huge pages that have already been
225  * instantiated within the map.  The from and to elements are huge page
226  * indicies into the associated mapping.  from indicates the starting index
227  * of the region.  to represents the first index past the end of  the region.
228  *
229  * For example, a file region structure with from == 0 and to == 4 represents
230  * four huge pages in a mapping.  It is important to note that the to element
231  * represents the first element past the end of the region. This is used in
232  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
233  *
234  * Interval notation of the form [from, to) will be used to indicate that
235  * the endpoint from is inclusive and to is exclusive.
236  */
237 struct file_region {
238         struct list_head link;
239         long from;
240         long to;
241 };
242
243 /*
244  * Add the huge page range represented by [f, t) to the reserve
245  * map.  In the normal case, existing regions will be expanded
246  * to accommodate the specified range.  Sufficient regions should
247  * exist for expansion due to the previous call to region_chg
248  * with the same range.  However, it is possible that region_del
249  * could have been called after region_chg and modifed the map
250  * in such a way that no region exists to be expanded.  In this
251  * case, pull a region descriptor from the cache associated with
252  * the map and use that for the new range.
253  *
254  * Return the number of new huge pages added to the map.  This
255  * number is greater than or equal to zero.
256  */
257 static long region_add(struct resv_map *resv, long f, long t)
258 {
259         struct list_head *head = &resv->regions;
260         struct file_region *rg, *nrg, *trg;
261         long add = 0;
262
263         spin_lock(&resv->lock);
264         /* Locate the region we are either in or before. */
265         list_for_each_entry(rg, head, link)
266                 if (f <= rg->to)
267                         break;
268
269         /*
270          * If no region exists which can be expanded to include the
271          * specified range, the list must have been modified by an
272          * interleving call to region_del().  Pull a region descriptor
273          * from the cache and use it for this range.
274          */
275         if (&rg->link == head || t < rg->from) {
276                 VM_BUG_ON(resv->region_cache_count <= 0);
277
278                 resv->region_cache_count--;
279                 nrg = list_first_entry(&resv->region_cache, struct file_region,
280                                         link);
281                 list_del(&nrg->link);
282
283                 nrg->from = f;
284                 nrg->to = t;
285                 list_add(&nrg->link, rg->link.prev);
286
287                 add += t - f;
288                 goto out_locked;
289         }
290
291         /* Round our left edge to the current segment if it encloses us. */
292         if (f > rg->from)
293                 f = rg->from;
294
295         /* Check for and consume any regions we now overlap with. */
296         nrg = rg;
297         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
298                 if (&rg->link == head)
299                         break;
300                 if (rg->from > t)
301                         break;
302
303                 /* If this area reaches higher then extend our area to
304                  * include it completely.  If this is not the first area
305                  * which we intend to reuse, free it. */
306                 if (rg->to > t)
307                         t = rg->to;
308                 if (rg != nrg) {
309                         /* Decrement return value by the deleted range.
310                          * Another range will span this area so that by
311                          * end of routine add will be >= zero
312                          */
313                         add -= (rg->to - rg->from);
314                         list_del(&rg->link);
315                         kfree(rg);
316                 }
317         }
318
319         add += (nrg->from - f);         /* Added to beginning of region */
320         nrg->from = f;
321         add += t - nrg->to;             /* Added to end of region */
322         nrg->to = t;
323
324 out_locked:
325         resv->adds_in_progress--;
326         spin_unlock(&resv->lock);
327         VM_BUG_ON(add < 0);
328         return add;
329 }
330
331 /*
332  * Examine the existing reserve map and determine how many
333  * huge pages in the specified range [f, t) are NOT currently
334  * represented.  This routine is called before a subsequent
335  * call to region_add that will actually modify the reserve
336  * map to add the specified range [f, t).  region_chg does
337  * not change the number of huge pages represented by the
338  * map.  However, if the existing regions in the map can not
339  * be expanded to represent the new range, a new file_region
340  * structure is added to the map as a placeholder.  This is
341  * so that the subsequent region_add call will have all the
342  * regions it needs and will not fail.
343  *
344  * Upon entry, region_chg will also examine the cache of region descriptors
345  * associated with the map.  If there are not enough descriptors cached, one
346  * will be allocated for the in progress add operation.
347  *
348  * Returns the number of huge pages that need to be added to the existing
349  * reservation map for the range [f, t).  This number is greater or equal to
350  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
351  * is needed and can not be allocated.
352  */
353 static long region_chg(struct resv_map *resv, long f, long t)
354 {
355         struct list_head *head = &resv->regions;
356         struct file_region *rg, *nrg = NULL;
357         long chg = 0;
358
359 retry:
360         spin_lock(&resv->lock);
361 retry_locked:
362         resv->adds_in_progress++;
363
364         /*
365          * Check for sufficient descriptors in the cache to accommodate
366          * the number of in progress add operations.
367          */
368         if (resv->adds_in_progress > resv->region_cache_count) {
369                 struct file_region *trg;
370
371                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
372                 /* Must drop lock to allocate a new descriptor. */
373                 resv->adds_in_progress--;
374                 spin_unlock(&resv->lock);
375
376                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
377                 if (!trg) {
378                         kfree(nrg);
379                         return -ENOMEM;
380                 }
381
382                 spin_lock(&resv->lock);
383                 list_add(&trg->link, &resv->region_cache);
384                 resv->region_cache_count++;
385                 goto retry_locked;
386         }
387
388         /* Locate the region we are before or in. */
389         list_for_each_entry(rg, head, link)
390                 if (f <= rg->to)
391                         break;
392
393         /* If we are below the current region then a new region is required.
394          * Subtle, allocate a new region at the position but make it zero
395          * size such that we can guarantee to record the reservation. */
396         if (&rg->link == head || t < rg->from) {
397                 if (!nrg) {
398                         resv->adds_in_progress--;
399                         spin_unlock(&resv->lock);
400                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
401                         if (!nrg)
402                                 return -ENOMEM;
403
404                         nrg->from = f;
405                         nrg->to   = f;
406                         INIT_LIST_HEAD(&nrg->link);
407                         goto retry;
408                 }
409
410                 list_add(&nrg->link, rg->link.prev);
411                 chg = t - f;
412                 goto out_nrg;
413         }
414
415         /* Round our left edge to the current segment if it encloses us. */
416         if (f > rg->from)
417                 f = rg->from;
418         chg = t - f;
419
420         /* Check for and consume any regions we now overlap with. */
421         list_for_each_entry(rg, rg->link.prev, link) {
422                 if (&rg->link == head)
423                         break;
424                 if (rg->from > t)
425                         goto out;
426
427                 /* We overlap with this area, if it extends further than
428                  * us then we must extend ourselves.  Account for its
429                  * existing reservation. */
430                 if (rg->to > t) {
431                         chg += rg->to - t;
432                         t = rg->to;
433                 }
434                 chg -= rg->to - rg->from;
435         }
436
437 out:
438         spin_unlock(&resv->lock);
439         /*  We already know we raced and no longer need the new region */
440         kfree(nrg);
441         return chg;
442 out_nrg:
443         spin_unlock(&resv->lock);
444         return chg;
445 }
446
447 /*
448  * Abort the in progress add operation.  The adds_in_progress field
449  * of the resv_map keeps track of the operations in progress between
450  * calls to region_chg and region_add.  Operations are sometimes
451  * aborted after the call to region_chg.  In such cases, region_abort
452  * is called to decrement the adds_in_progress counter.
453  *
454  * NOTE: The range arguments [f, t) are not needed or used in this
455  * routine.  They are kept to make reading the calling code easier as
456  * arguments will match the associated region_chg call.
457  */
458 static void region_abort(struct resv_map *resv, long f, long t)
459 {
460         spin_lock(&resv->lock);
461         VM_BUG_ON(!resv->region_cache_count);
462         resv->adds_in_progress--;
463         spin_unlock(&resv->lock);
464 }
465
466 /*
467  * Delete the specified range [f, t) from the reserve map.  If the
468  * t parameter is LONG_MAX, this indicates that ALL regions after f
469  * should be deleted.  Locate the regions which intersect [f, t)
470  * and either trim, delete or split the existing regions.
471  *
472  * Returns the number of huge pages deleted from the reserve map.
473  * In the normal case, the return value is zero or more.  In the
474  * case where a region must be split, a new region descriptor must
475  * be allocated.  If the allocation fails, -ENOMEM will be returned.
476  * NOTE: If the parameter t == LONG_MAX, then we will never split
477  * a region and possibly return -ENOMEM.  Callers specifying
478  * t == LONG_MAX do not need to check for -ENOMEM error.
479  */
480 static long region_del(struct resv_map *resv, long f, long t)
481 {
482         struct list_head *head = &resv->regions;
483         struct file_region *rg, *trg;
484         struct file_region *nrg = NULL;
485         long del = 0;
486
487 retry:
488         spin_lock(&resv->lock);
489         list_for_each_entry_safe(rg, trg, head, link) {
490                 /*
491                  * Skip regions before the range to be deleted.  file_region
492                  * ranges are normally of the form [from, to).  However, there
493                  * may be a "placeholder" entry in the map which is of the form
494                  * (from, to) with from == to.  Check for placeholder entries
495                  * at the beginning of the range to be deleted.
496                  */
497                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
498                         continue;
499
500                 if (rg->from >= t)
501                         break;
502
503                 if (f > rg->from && t < rg->to) { /* Must split region */
504                         /*
505                          * Check for an entry in the cache before dropping
506                          * lock and attempting allocation.
507                          */
508                         if (!nrg &&
509                             resv->region_cache_count > resv->adds_in_progress) {
510                                 nrg = list_first_entry(&resv->region_cache,
511                                                         struct file_region,
512                                                         link);
513                                 list_del(&nrg->link);
514                                 resv->region_cache_count--;
515                         }
516
517                         if (!nrg) {
518                                 spin_unlock(&resv->lock);
519                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
520                                 if (!nrg)
521                                         return -ENOMEM;
522                                 goto retry;
523                         }
524
525                         del += t - f;
526
527                         /* New entry for end of split region */
528                         nrg->from = t;
529                         nrg->to = rg->to;
530                         INIT_LIST_HEAD(&nrg->link);
531
532                         /* Original entry is trimmed */
533                         rg->to = f;
534
535                         list_add(&nrg->link, &rg->link);
536                         nrg = NULL;
537                         break;
538                 }
539
540                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
541                         del += rg->to - rg->from;
542                         list_del(&rg->link);
543                         kfree(rg);
544                         continue;
545                 }
546
547                 if (f <= rg->from) {    /* Trim beginning of region */
548                         del += t - rg->from;
549                         rg->from = t;
550                 } else {                /* Trim end of region */
551                         del += rg->to - f;
552                         rg->to = f;
553                 }
554         }
555
556         spin_unlock(&resv->lock);
557         kfree(nrg);
558         return del;
559 }
560
561 /*
562  * A rare out of memory error was encountered which prevented removal of
563  * the reserve map region for a page.  The huge page itself was free'ed
564  * and removed from the page cache.  This routine will adjust the subpool
565  * usage count, and the global reserve count if needed.  By incrementing
566  * these counts, the reserve map entry which could not be deleted will
567  * appear as a "reserved" entry instead of simply dangling with incorrect
568  * counts.
569  */
570 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
571 {
572         struct hugepage_subpool *spool = subpool_inode(inode);
573         long rsv_adjust;
574
575         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
576         if (restore_reserve && rsv_adjust) {
577                 struct hstate *h = hstate_inode(inode);
578
579                 hugetlb_acct_memory(h, 1);
580         }
581 }
582
583 /*
584  * Count and return the number of huge pages in the reserve map
585  * that intersect with the range [f, t).
586  */
587 static long region_count(struct resv_map *resv, long f, long t)
588 {
589         struct list_head *head = &resv->regions;
590         struct file_region *rg;
591         long chg = 0;
592
593         spin_lock(&resv->lock);
594         /* Locate each segment we overlap with, and count that overlap. */
595         list_for_each_entry(rg, head, link) {
596                 long seg_from;
597                 long seg_to;
598
599                 if (rg->to <= f)
600                         continue;
601                 if (rg->from >= t)
602                         break;
603
604                 seg_from = max(rg->from, f);
605                 seg_to = min(rg->to, t);
606
607                 chg += seg_to - seg_from;
608         }
609         spin_unlock(&resv->lock);
610
611         return chg;
612 }
613
614 /*
615  * Convert the address within this vma to the page offset within
616  * the mapping, in pagecache page units; huge pages here.
617  */
618 static pgoff_t vma_hugecache_offset(struct hstate *h,
619                         struct vm_area_struct *vma, unsigned long address)
620 {
621         return ((address - vma->vm_start) >> huge_page_shift(h)) +
622                         (vma->vm_pgoff >> huge_page_order(h));
623 }
624
625 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
626                                      unsigned long address)
627 {
628         return vma_hugecache_offset(hstate_vma(vma), vma, address);
629 }
630
631 /*
632  * Return the size of the pages allocated when backing a VMA. In the majority
633  * cases this will be same size as used by the page table entries.
634  */
635 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
636 {
637         struct hstate *hstate;
638
639         if (!is_vm_hugetlb_page(vma))
640                 return PAGE_SIZE;
641
642         hstate = hstate_vma(vma);
643
644         return 1UL << huge_page_shift(hstate);
645 }
646 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
647
648 /*
649  * Return the page size being used by the MMU to back a VMA. In the majority
650  * of cases, the page size used by the kernel matches the MMU size. On
651  * architectures where it differs, an architecture-specific version of this
652  * function is required.
653  */
654 #ifndef vma_mmu_pagesize
655 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
656 {
657         return vma_kernel_pagesize(vma);
658 }
659 #endif
660
661 /*
662  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
663  * bits of the reservation map pointer, which are always clear due to
664  * alignment.
665  */
666 #define HPAGE_RESV_OWNER    (1UL << 0)
667 #define HPAGE_RESV_UNMAPPED (1UL << 1)
668 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
669
670 /*
671  * These helpers are used to track how many pages are reserved for
672  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
673  * is guaranteed to have their future faults succeed.
674  *
675  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
676  * the reserve counters are updated with the hugetlb_lock held. It is safe
677  * to reset the VMA at fork() time as it is not in use yet and there is no
678  * chance of the global counters getting corrupted as a result of the values.
679  *
680  * The private mapping reservation is represented in a subtly different
681  * manner to a shared mapping.  A shared mapping has a region map associated
682  * with the underlying file, this region map represents the backing file
683  * pages which have ever had a reservation assigned which this persists even
684  * after the page is instantiated.  A private mapping has a region map
685  * associated with the original mmap which is attached to all VMAs which
686  * reference it, this region map represents those offsets which have consumed
687  * reservation ie. where pages have been instantiated.
688  */
689 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
690 {
691         return (unsigned long)vma->vm_private_data;
692 }
693
694 static void set_vma_private_data(struct vm_area_struct *vma,
695                                                         unsigned long value)
696 {
697         vma->vm_private_data = (void *)value;
698 }
699
700 struct resv_map *resv_map_alloc(void)
701 {
702         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
703         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
704
705         if (!resv_map || !rg) {
706                 kfree(resv_map);
707                 kfree(rg);
708                 return NULL;
709         }
710
711         kref_init(&resv_map->refs);
712         spin_lock_init(&resv_map->lock);
713         INIT_LIST_HEAD(&resv_map->regions);
714
715         resv_map->adds_in_progress = 0;
716
717         INIT_LIST_HEAD(&resv_map->region_cache);
718         list_add(&rg->link, &resv_map->region_cache);
719         resv_map->region_cache_count = 1;
720
721         return resv_map;
722 }
723
724 void resv_map_release(struct kref *ref)
725 {
726         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
727         struct list_head *head = &resv_map->region_cache;
728         struct file_region *rg, *trg;
729
730         /* Clear out any active regions before we release the map. */
731         region_del(resv_map, 0, LONG_MAX);
732
733         /* ... and any entries left in the cache */
734         list_for_each_entry_safe(rg, trg, head, link) {
735                 list_del(&rg->link);
736                 kfree(rg);
737         }
738
739         VM_BUG_ON(resv_map->adds_in_progress);
740
741         kfree(resv_map);
742 }
743
744 static inline struct resv_map *inode_resv_map(struct inode *inode)
745 {
746         return inode->i_mapping->private_data;
747 }
748
749 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
750 {
751         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
752         if (vma->vm_flags & VM_MAYSHARE) {
753                 struct address_space *mapping = vma->vm_file->f_mapping;
754                 struct inode *inode = mapping->host;
755
756                 return inode_resv_map(inode);
757
758         } else {
759                 return (struct resv_map *)(get_vma_private_data(vma) &
760                                                         ~HPAGE_RESV_MASK);
761         }
762 }
763
764 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
765 {
766         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
767         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
768
769         set_vma_private_data(vma, (get_vma_private_data(vma) &
770                                 HPAGE_RESV_MASK) | (unsigned long)map);
771 }
772
773 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
774 {
775         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
776         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
777
778         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
779 }
780
781 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
782 {
783         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
784
785         return (get_vma_private_data(vma) & flag) != 0;
786 }
787
788 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
789 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
790 {
791         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
792         if (!(vma->vm_flags & VM_MAYSHARE))
793                 vma->vm_private_data = (void *)0;
794 }
795
796 /* Returns true if the VMA has associated reserve pages */
797 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
798 {
799         if (vma->vm_flags & VM_NORESERVE) {
800                 /*
801                  * This address is already reserved by other process(chg == 0),
802                  * so, we should decrement reserved count. Without decrementing,
803                  * reserve count remains after releasing inode, because this
804                  * allocated page will go into page cache and is regarded as
805                  * coming from reserved pool in releasing step.  Currently, we
806                  * don't have any other solution to deal with this situation
807                  * properly, so add work-around here.
808                  */
809                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
810                         return true;
811                 else
812                         return false;
813         }
814
815         /* Shared mappings always use reserves */
816         if (vma->vm_flags & VM_MAYSHARE) {
817                 /*
818                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
819                  * be a region map for all pages.  The only situation where
820                  * there is no region map is if a hole was punched via
821                  * fallocate.  In this case, there really are no reverves to
822                  * use.  This situation is indicated if chg != 0.
823                  */
824                 if (chg)
825                         return false;
826                 else
827                         return true;
828         }
829
830         /*
831          * Only the process that called mmap() has reserves for
832          * private mappings.
833          */
834         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
835                 return true;
836
837         return false;
838 }
839
840 static void enqueue_huge_page(struct hstate *h, struct page *page)
841 {
842         int nid = page_to_nid(page);
843         list_move(&page->lru, &h->hugepage_freelists[nid]);
844         h->free_huge_pages++;
845         h->free_huge_pages_node[nid]++;
846 }
847
848 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
849 {
850         struct page *page;
851
852         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
853                 if (!is_migrate_isolate_page(page))
854                         break;
855         /*
856          * if 'non-isolated free hugepage' not found on the list,
857          * the allocation fails.
858          */
859         if (&h->hugepage_freelists[nid] == &page->lru)
860                 return NULL;
861         list_move(&page->lru, &h->hugepage_activelist);
862         set_page_refcounted(page);
863         h->free_huge_pages--;
864         h->free_huge_pages_node[nid]--;
865         return page;
866 }
867
868 /* Movability of hugepages depends on migration support. */
869 static inline gfp_t htlb_alloc_mask(struct hstate *h)
870 {
871         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
872                 return GFP_HIGHUSER_MOVABLE;
873         else
874                 return GFP_HIGHUSER;
875 }
876
877 static struct page *dequeue_huge_page_vma(struct hstate *h,
878                                 struct vm_area_struct *vma,
879                                 unsigned long address, int avoid_reserve,
880                                 long chg)
881 {
882         struct page *page = NULL;
883         struct mempolicy *mpol;
884         nodemask_t *nodemask;
885         struct zonelist *zonelist;
886         struct zone *zone;
887         struct zoneref *z;
888         unsigned int cpuset_mems_cookie;
889
890         /*
891          * A child process with MAP_PRIVATE mappings created by their parent
892          * have no page reserves. This check ensures that reservations are
893          * not "stolen". The child may still get SIGKILLed
894          */
895         if (!vma_has_reserves(vma, chg) &&
896                         h->free_huge_pages - h->resv_huge_pages == 0)
897                 goto err;
898
899         /* If reserves cannot be used, ensure enough pages are in the pool */
900         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
901                 goto err;
902
903 retry_cpuset:
904         cpuset_mems_cookie = read_mems_allowed_begin();
905         zonelist = huge_zonelist(vma, address,
906                                         htlb_alloc_mask(h), &mpol, &nodemask);
907
908         for_each_zone_zonelist_nodemask(zone, z, zonelist,
909                                                 MAX_NR_ZONES - 1, nodemask) {
910                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
911                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
912                         if (page) {
913                                 if (avoid_reserve)
914                                         break;
915                                 if (!vma_has_reserves(vma, chg))
916                                         break;
917
918                                 SetPagePrivate(page);
919                                 h->resv_huge_pages--;
920                                 break;
921                         }
922                 }
923         }
924
925         mpol_cond_put(mpol);
926         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
927                 goto retry_cpuset;
928         return page;
929
930 err:
931         return NULL;
932 }
933
934 /*
935  * common helper functions for hstate_next_node_to_{alloc|free}.
936  * We may have allocated or freed a huge page based on a different
937  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
938  * be outside of *nodes_allowed.  Ensure that we use an allowed
939  * node for alloc or free.
940  */
941 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
942 {
943         nid = next_node_in(nid, *nodes_allowed);
944         VM_BUG_ON(nid >= MAX_NUMNODES);
945
946         return nid;
947 }
948
949 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
950 {
951         if (!node_isset(nid, *nodes_allowed))
952                 nid = next_node_allowed(nid, nodes_allowed);
953         return nid;
954 }
955
956 /*
957  * returns the previously saved node ["this node"] from which to
958  * allocate a persistent huge page for the pool and advance the
959  * next node from which to allocate, handling wrap at end of node
960  * mask.
961  */
962 static int hstate_next_node_to_alloc(struct hstate *h,
963                                         nodemask_t *nodes_allowed)
964 {
965         int nid;
966
967         VM_BUG_ON(!nodes_allowed);
968
969         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
970         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
971
972         return nid;
973 }
974
975 /*
976  * helper for free_pool_huge_page() - return the previously saved
977  * node ["this node"] from which to free a huge page.  Advance the
978  * next node id whether or not we find a free huge page to free so
979  * that the next attempt to free addresses the next node.
980  */
981 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
982 {
983         int nid;
984
985         VM_BUG_ON(!nodes_allowed);
986
987         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
988         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
989
990         return nid;
991 }
992
993 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
994         for (nr_nodes = nodes_weight(*mask);                            \
995                 nr_nodes > 0 &&                                         \
996                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
997                 nr_nodes--)
998
999 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1000         for (nr_nodes = nodes_weight(*mask);                            \
1001                 nr_nodes > 0 &&                                         \
1002                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1003                 nr_nodes--)
1004
1005 #if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
1006 static void destroy_compound_gigantic_page(struct page *page,
1007                                         unsigned int order)
1008 {
1009         int i;
1010         int nr_pages = 1 << order;
1011         struct page *p = page + 1;
1012
1013         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1014                 clear_compound_head(p);
1015                 set_page_refcounted(p);
1016         }
1017
1018         set_compound_order(page, 0);
1019         __ClearPageHead(page);
1020 }
1021
1022 static void free_gigantic_page(struct page *page, unsigned int order)
1023 {
1024         free_contig_range(page_to_pfn(page), 1 << order);
1025 }
1026
1027 static int __alloc_gigantic_page(unsigned long start_pfn,
1028                                 unsigned long nr_pages)
1029 {
1030         unsigned long end_pfn = start_pfn + nr_pages;
1031         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1032 }
1033
1034 static bool pfn_range_valid_gigantic(struct zone *z,
1035                         unsigned long start_pfn, unsigned long nr_pages)
1036 {
1037         unsigned long i, end_pfn = start_pfn + nr_pages;
1038         struct page *page;
1039
1040         for (i = start_pfn; i < end_pfn; i++) {
1041                 if (!pfn_valid(i))
1042                         return false;
1043
1044                 page = pfn_to_page(i);
1045
1046                 if (page_zone(page) != z)
1047                         return false;
1048
1049                 if (PageReserved(page))
1050                         return false;
1051
1052                 if (page_count(page) > 0)
1053                         return false;
1054
1055                 if (PageHuge(page))
1056                         return false;
1057         }
1058
1059         return true;
1060 }
1061
1062 static bool zone_spans_last_pfn(const struct zone *zone,
1063                         unsigned long start_pfn, unsigned long nr_pages)
1064 {
1065         unsigned long last_pfn = start_pfn + nr_pages - 1;
1066         return zone_spans_pfn(zone, last_pfn);
1067 }
1068
1069 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1070 {
1071         unsigned long nr_pages = 1 << order;
1072         unsigned long ret, pfn, flags;
1073         struct zone *z;
1074
1075         z = NODE_DATA(nid)->node_zones;
1076         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1077                 spin_lock_irqsave(&z->lock, flags);
1078
1079                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1080                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1081                         if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
1082                                 /*
1083                                  * We release the zone lock here because
1084                                  * alloc_contig_range() will also lock the zone
1085                                  * at some point. If there's an allocation
1086                                  * spinning on this lock, it may win the race
1087                                  * and cause alloc_contig_range() to fail...
1088                                  */
1089                                 spin_unlock_irqrestore(&z->lock, flags);
1090                                 ret = __alloc_gigantic_page(pfn, nr_pages);
1091                                 if (!ret)
1092                                         return pfn_to_page(pfn);
1093                                 spin_lock_irqsave(&z->lock, flags);
1094                         }
1095                         pfn += nr_pages;
1096                 }
1097
1098                 spin_unlock_irqrestore(&z->lock, flags);
1099         }
1100
1101         return NULL;
1102 }
1103
1104 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1105 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1106
1107 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1108 {
1109         struct page *page;
1110
1111         page = alloc_gigantic_page(nid, huge_page_order(h));
1112         if (page) {
1113                 prep_compound_gigantic_page(page, huge_page_order(h));
1114                 prep_new_huge_page(h, page, nid);
1115         }
1116
1117         return page;
1118 }
1119
1120 static int alloc_fresh_gigantic_page(struct hstate *h,
1121                                 nodemask_t *nodes_allowed)
1122 {
1123         struct page *page = NULL;
1124         int nr_nodes, node;
1125
1126         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1127                 page = alloc_fresh_gigantic_page_node(h, node);
1128                 if (page)
1129                         return 1;
1130         }
1131
1132         return 0;
1133 }
1134
1135 static inline bool gigantic_page_supported(void) { return true; }
1136 #else
1137 static inline bool gigantic_page_supported(void) { return false; }
1138 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1139 static inline void destroy_compound_gigantic_page(struct page *page,
1140                                                 unsigned int order) { }
1141 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1142                                         nodemask_t *nodes_allowed) { return 0; }
1143 #endif
1144
1145 static void update_and_free_page(struct hstate *h, struct page *page)
1146 {
1147         int i;
1148
1149         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1150                 return;
1151
1152         h->nr_huge_pages--;
1153         h->nr_huge_pages_node[page_to_nid(page)]--;
1154         for (i = 0; i < pages_per_huge_page(h); i++) {
1155                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1156                                 1 << PG_referenced | 1 << PG_dirty |
1157                                 1 << PG_active | 1 << PG_private |
1158                                 1 << PG_writeback);
1159         }
1160         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1161         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1162         set_page_refcounted(page);
1163         if (hstate_is_gigantic(h)) {
1164                 destroy_compound_gigantic_page(page, huge_page_order(h));
1165                 free_gigantic_page(page, huge_page_order(h));
1166         } else {
1167                 __free_pages(page, huge_page_order(h));
1168         }
1169 }
1170
1171 struct hstate *size_to_hstate(unsigned long size)
1172 {
1173         struct hstate *h;
1174
1175         for_each_hstate(h) {
1176                 if (huge_page_size(h) == size)
1177                         return h;
1178         }
1179         return NULL;
1180 }
1181
1182 /*
1183  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1184  * to hstate->hugepage_activelist.)
1185  *
1186  * This function can be called for tail pages, but never returns true for them.
1187  */
1188 bool page_huge_active(struct page *page)
1189 {
1190         VM_BUG_ON_PAGE(!PageHuge(page), page);
1191         return PageHead(page) && PagePrivate(&page[1]);
1192 }
1193
1194 /* never called for tail page */
1195 static void set_page_huge_active(struct page *page)
1196 {
1197         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1198         SetPagePrivate(&page[1]);
1199 }
1200
1201 static void clear_page_huge_active(struct page *page)
1202 {
1203         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1204         ClearPagePrivate(&page[1]);
1205 }
1206
1207 void free_huge_page(struct page *page)
1208 {
1209         /*
1210          * Can't pass hstate in here because it is called from the
1211          * compound page destructor.
1212          */
1213         struct hstate *h = page_hstate(page);
1214         int nid = page_to_nid(page);
1215         struct hugepage_subpool *spool =
1216                 (struct hugepage_subpool *)page_private(page);
1217         bool restore_reserve;
1218
1219         set_page_private(page, 0);
1220         page->mapping = NULL;
1221         VM_BUG_ON_PAGE(page_count(page), page);
1222         VM_BUG_ON_PAGE(page_mapcount(page), page);
1223         restore_reserve = PagePrivate(page);
1224         ClearPagePrivate(page);
1225
1226         /*
1227          * A return code of zero implies that the subpool will be under its
1228          * minimum size if the reservation is not restored after page is free.
1229          * Therefore, force restore_reserve operation.
1230          */
1231         if (hugepage_subpool_put_pages(spool, 1) == 0)
1232                 restore_reserve = true;
1233
1234         spin_lock(&hugetlb_lock);
1235         clear_page_huge_active(page);
1236         hugetlb_cgroup_uncharge_page(hstate_index(h),
1237                                      pages_per_huge_page(h), page);
1238         if (restore_reserve)
1239                 h->resv_huge_pages++;
1240
1241         if (h->surplus_huge_pages_node[nid]) {
1242                 /* remove the page from active list */
1243                 list_del(&page->lru);
1244                 update_and_free_page(h, page);
1245                 h->surplus_huge_pages--;
1246                 h->surplus_huge_pages_node[nid]--;
1247         } else {
1248                 arch_clear_hugepage_flags(page);
1249                 enqueue_huge_page(h, page);
1250         }
1251         spin_unlock(&hugetlb_lock);
1252 }
1253
1254 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1255 {
1256         INIT_LIST_HEAD(&page->lru);
1257         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1258         spin_lock(&hugetlb_lock);
1259         set_hugetlb_cgroup(page, NULL);
1260         h->nr_huge_pages++;
1261         h->nr_huge_pages_node[nid]++;
1262         spin_unlock(&hugetlb_lock);
1263         put_page(page); /* free it into the hugepage allocator */
1264 }
1265
1266 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1267 {
1268         int i;
1269         int nr_pages = 1 << order;
1270         struct page *p = page + 1;
1271
1272         /* we rely on prep_new_huge_page to set the destructor */
1273         set_compound_order(page, order);
1274         __ClearPageReserved(page);
1275         __SetPageHead(page);
1276         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1277                 /*
1278                  * For gigantic hugepages allocated through bootmem at
1279                  * boot, it's safer to be consistent with the not-gigantic
1280                  * hugepages and clear the PG_reserved bit from all tail pages
1281                  * too.  Otherwse drivers using get_user_pages() to access tail
1282                  * pages may get the reference counting wrong if they see
1283                  * PG_reserved set on a tail page (despite the head page not
1284                  * having PG_reserved set).  Enforcing this consistency between
1285                  * head and tail pages allows drivers to optimize away a check
1286                  * on the head page when they need know if put_page() is needed
1287                  * after get_user_pages().
1288                  */
1289                 __ClearPageReserved(p);
1290                 set_page_count(p, 0);
1291                 set_compound_head(p, page);
1292         }
1293         atomic_set(compound_mapcount_ptr(page), -1);
1294 }
1295
1296 /*
1297  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1298  * transparent huge pages.  See the PageTransHuge() documentation for more
1299  * details.
1300  */
1301 int PageHuge(struct page *page)
1302 {
1303         if (!PageCompound(page))
1304                 return 0;
1305
1306         page = compound_head(page);
1307         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1308 }
1309 EXPORT_SYMBOL_GPL(PageHuge);
1310
1311 /*
1312  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1313  * normal or transparent huge pages.
1314  */
1315 int PageHeadHuge(struct page *page_head)
1316 {
1317         if (!PageHead(page_head))
1318                 return 0;
1319
1320         return get_compound_page_dtor(page_head) == free_huge_page;
1321 }
1322
1323 pgoff_t __basepage_index(struct page *page)
1324 {
1325         struct page *page_head = compound_head(page);
1326         pgoff_t index = page_index(page_head);
1327         unsigned long compound_idx;
1328
1329         if (!PageHuge(page_head))
1330                 return page_index(page);
1331
1332         if (compound_order(page_head) >= MAX_ORDER)
1333                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1334         else
1335                 compound_idx = page - page_head;
1336
1337         return (index << compound_order(page_head)) + compound_idx;
1338 }
1339
1340 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1341 {
1342         struct page *page;
1343
1344         page = __alloc_pages_node(nid,
1345                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1346                                                 __GFP_REPEAT|__GFP_NOWARN,
1347                 huge_page_order(h));
1348         if (page) {
1349                 prep_new_huge_page(h, page, nid);
1350         }
1351
1352         return page;
1353 }
1354
1355 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1356 {
1357         struct page *page;
1358         int nr_nodes, node;
1359         int ret = 0;
1360
1361         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1362                 page = alloc_fresh_huge_page_node(h, node);
1363                 if (page) {
1364                         ret = 1;
1365                         break;
1366                 }
1367         }
1368
1369         if (ret)
1370                 count_vm_event(HTLB_BUDDY_PGALLOC);
1371         else
1372                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1373
1374         return ret;
1375 }
1376
1377 /*
1378  * Free huge page from pool from next node to free.
1379  * Attempt to keep persistent huge pages more or less
1380  * balanced over allowed nodes.
1381  * Called with hugetlb_lock locked.
1382  */
1383 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1384                                                          bool acct_surplus)
1385 {
1386         int nr_nodes, node;
1387         int ret = 0;
1388
1389         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1390                 /*
1391                  * If we're returning unused surplus pages, only examine
1392                  * nodes with surplus pages.
1393                  */
1394                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1395                     !list_empty(&h->hugepage_freelists[node])) {
1396                         struct page *page =
1397                                 list_entry(h->hugepage_freelists[node].next,
1398                                           struct page, lru);
1399                         list_del(&page->lru);
1400                         h->free_huge_pages--;
1401                         h->free_huge_pages_node[node]--;
1402                         if (acct_surplus) {
1403                                 h->surplus_huge_pages--;
1404                                 h->surplus_huge_pages_node[node]--;
1405                         }
1406                         update_and_free_page(h, page);
1407                         ret = 1;
1408                         break;
1409                 }
1410         }
1411
1412         return ret;
1413 }
1414
1415 /*
1416  * Dissolve a given free hugepage into free buddy pages. This function does
1417  * nothing for in-use (including surplus) hugepages.
1418  */
1419 static void dissolve_free_huge_page(struct page *page)
1420 {
1421         spin_lock(&hugetlb_lock);
1422         if (PageHuge(page) && !page_count(page)) {
1423                 struct hstate *h = page_hstate(page);
1424                 int nid = page_to_nid(page);
1425                 list_del(&page->lru);
1426                 h->free_huge_pages--;
1427                 h->free_huge_pages_node[nid]--;
1428                 update_and_free_page(h, page);
1429         }
1430         spin_unlock(&hugetlb_lock);
1431 }
1432
1433 /*
1434  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1435  * make specified memory blocks removable from the system.
1436  * Note that start_pfn should aligned with (minimum) hugepage size.
1437  */
1438 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1439 {
1440         unsigned long pfn;
1441
1442         if (!hugepages_supported())
1443                 return;
1444
1445         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1446         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1447                 dissolve_free_huge_page(pfn_to_page(pfn));
1448 }
1449
1450 /*
1451  * There are 3 ways this can get called:
1452  * 1. With vma+addr: we use the VMA's memory policy
1453  * 2. With !vma, but nid=NUMA_NO_NODE:  We try to allocate a huge
1454  *    page from any node, and let the buddy allocator itself figure
1455  *    it out.
1456  * 3. With !vma, but nid!=NUMA_NO_NODE.  We allocate a huge page
1457  *    strictly from 'nid'
1458  */
1459 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1460                 struct vm_area_struct *vma, unsigned long addr, int nid)
1461 {
1462         int order = huge_page_order(h);
1463         gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1464         unsigned int cpuset_mems_cookie;
1465
1466         /*
1467          * We need a VMA to get a memory policy.  If we do not
1468          * have one, we use the 'nid' argument.
1469          *
1470          * The mempolicy stuff below has some non-inlined bits
1471          * and calls ->vm_ops.  That makes it hard to optimize at
1472          * compile-time, even when NUMA is off and it does
1473          * nothing.  This helps the compiler optimize it out.
1474          */
1475         if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1476                 /*
1477                  * If a specific node is requested, make sure to
1478                  * get memory from there, but only when a node
1479                  * is explicitly specified.
1480                  */
1481                 if (nid != NUMA_NO_NODE)
1482                         gfp |= __GFP_THISNODE;
1483                 /*
1484                  * Make sure to call something that can handle
1485                  * nid=NUMA_NO_NODE
1486                  */
1487                 return alloc_pages_node(nid, gfp, order);
1488         }
1489
1490         /*
1491          * OK, so we have a VMA.  Fetch the mempolicy and try to
1492          * allocate a huge page with it.  We will only reach this
1493          * when CONFIG_NUMA=y.
1494          */
1495         do {
1496                 struct page *page;
1497                 struct mempolicy *mpol;
1498                 struct zonelist *zl;
1499                 nodemask_t *nodemask;
1500
1501                 cpuset_mems_cookie = read_mems_allowed_begin();
1502                 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1503                 mpol_cond_put(mpol);
1504                 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1505                 if (page)
1506                         return page;
1507         } while (read_mems_allowed_retry(cpuset_mems_cookie));
1508
1509         return NULL;
1510 }
1511
1512 /*
1513  * There are two ways to allocate a huge page:
1514  * 1. When you have a VMA and an address (like a fault)
1515  * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1516  *
1517  * 'vma' and 'addr' are only for (1).  'nid' is always NUMA_NO_NODE in
1518  * this case which signifies that the allocation should be done with
1519  * respect for the VMA's memory policy.
1520  *
1521  * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1522  * implies that memory policies will not be taken in to account.
1523  */
1524 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1525                 struct vm_area_struct *vma, unsigned long addr, int nid)
1526 {
1527         struct page *page;
1528         unsigned int r_nid;
1529
1530         if (hstate_is_gigantic(h))
1531                 return NULL;
1532
1533         /*
1534          * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1535          * This makes sure the caller is picking _one_ of the modes with which
1536          * we can call this function, not both.
1537          */
1538         if (vma || (addr != -1)) {
1539                 VM_WARN_ON_ONCE(addr == -1);
1540                 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1541         }
1542         /*
1543          * Assume we will successfully allocate the surplus page to
1544          * prevent racing processes from causing the surplus to exceed
1545          * overcommit
1546          *
1547          * This however introduces a different race, where a process B
1548          * tries to grow the static hugepage pool while alloc_pages() is
1549          * called by process A. B will only examine the per-node
1550          * counters in determining if surplus huge pages can be
1551          * converted to normal huge pages in adjust_pool_surplus(). A
1552          * won't be able to increment the per-node counter, until the
1553          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1554          * no more huge pages can be converted from surplus to normal
1555          * state (and doesn't try to convert again). Thus, we have a
1556          * case where a surplus huge page exists, the pool is grown, and
1557          * the surplus huge page still exists after, even though it
1558          * should just have been converted to a normal huge page. This
1559          * does not leak memory, though, as the hugepage will be freed
1560          * once it is out of use. It also does not allow the counters to
1561          * go out of whack in adjust_pool_surplus() as we don't modify
1562          * the node values until we've gotten the hugepage and only the
1563          * per-node value is checked there.
1564          */
1565         spin_lock(&hugetlb_lock);
1566         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1567                 spin_unlock(&hugetlb_lock);
1568                 return NULL;
1569         } else {
1570                 h->nr_huge_pages++;
1571                 h->surplus_huge_pages++;
1572         }
1573         spin_unlock(&hugetlb_lock);
1574
1575         page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1576
1577         spin_lock(&hugetlb_lock);
1578         if (page) {
1579                 INIT_LIST_HEAD(&page->lru);
1580                 r_nid = page_to_nid(page);
1581                 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1582                 set_hugetlb_cgroup(page, NULL);
1583                 /*
1584                  * We incremented the global counters already
1585                  */
1586                 h->nr_huge_pages_node[r_nid]++;
1587                 h->surplus_huge_pages_node[r_nid]++;
1588                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1589         } else {
1590                 h->nr_huge_pages--;
1591                 h->surplus_huge_pages--;
1592                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1593         }
1594         spin_unlock(&hugetlb_lock);
1595
1596         return page;
1597 }
1598
1599 /*
1600  * Allocate a huge page from 'nid'.  Note, 'nid' may be
1601  * NUMA_NO_NODE, which means that it may be allocated
1602  * anywhere.
1603  */
1604 static
1605 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1606 {
1607         unsigned long addr = -1;
1608
1609         return __alloc_buddy_huge_page(h, NULL, addr, nid);
1610 }
1611
1612 /*
1613  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1614  */
1615 static
1616 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1617                 struct vm_area_struct *vma, unsigned long addr)
1618 {
1619         return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1620 }
1621
1622 /*
1623  * This allocation function is useful in the context where vma is irrelevant.
1624  * E.g. soft-offlining uses this function because it only cares physical
1625  * address of error page.
1626  */
1627 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1628 {
1629         struct page *page = NULL;
1630
1631         spin_lock(&hugetlb_lock);
1632         if (h->free_huge_pages - h->resv_huge_pages > 0)
1633                 page = dequeue_huge_page_node(h, nid);
1634         spin_unlock(&hugetlb_lock);
1635
1636         if (!page)
1637                 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1638
1639         return page;
1640 }
1641
1642 /*
1643  * Increase the hugetlb pool such that it can accommodate a reservation
1644  * of size 'delta'.
1645  */
1646 static int gather_surplus_pages(struct hstate *h, int delta)
1647 {
1648         struct list_head surplus_list;
1649         struct page *page, *tmp;
1650         int ret, i;
1651         int needed, allocated;
1652         bool alloc_ok = true;
1653
1654         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1655         if (needed <= 0) {
1656                 h->resv_huge_pages += delta;
1657                 return 0;
1658         }
1659
1660         allocated = 0;
1661         INIT_LIST_HEAD(&surplus_list);
1662
1663         ret = -ENOMEM;
1664 retry:
1665         spin_unlock(&hugetlb_lock);
1666         for (i = 0; i < needed; i++) {
1667                 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1668                 if (!page) {
1669                         alloc_ok = false;
1670                         break;
1671                 }
1672                 list_add(&page->lru, &surplus_list);
1673         }
1674         allocated += i;
1675
1676         /*
1677          * After retaking hugetlb_lock, we need to recalculate 'needed'
1678          * because either resv_huge_pages or free_huge_pages may have changed.
1679          */
1680         spin_lock(&hugetlb_lock);
1681         needed = (h->resv_huge_pages + delta) -
1682                         (h->free_huge_pages + allocated);
1683         if (needed > 0) {
1684                 if (alloc_ok)
1685                         goto retry;
1686                 /*
1687                  * We were not able to allocate enough pages to
1688                  * satisfy the entire reservation so we free what
1689                  * we've allocated so far.
1690                  */
1691                 goto free;
1692         }
1693         /*
1694          * The surplus_list now contains _at_least_ the number of extra pages
1695          * needed to accommodate the reservation.  Add the appropriate number
1696          * of pages to the hugetlb pool and free the extras back to the buddy
1697          * allocator.  Commit the entire reservation here to prevent another
1698          * process from stealing the pages as they are added to the pool but
1699          * before they are reserved.
1700          */
1701         needed += allocated;
1702         h->resv_huge_pages += delta;
1703         ret = 0;
1704
1705         /* Free the needed pages to the hugetlb pool */
1706         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1707                 if ((--needed) < 0)
1708                         break;
1709                 /*
1710                  * This page is now managed by the hugetlb allocator and has
1711                  * no users -- drop the buddy allocator's reference.
1712                  */
1713                 put_page_testzero(page);
1714                 VM_BUG_ON_PAGE(page_count(page), page);
1715                 enqueue_huge_page(h, page);
1716         }
1717 free:
1718         spin_unlock(&hugetlb_lock);
1719
1720         /* Free unnecessary surplus pages to the buddy allocator */
1721         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1722                 put_page(page);
1723         spin_lock(&hugetlb_lock);
1724
1725         return ret;
1726 }
1727
1728 /*
1729  * When releasing a hugetlb pool reservation, any surplus pages that were
1730  * allocated to satisfy the reservation must be explicitly freed if they were
1731  * never used.
1732  * Called with hugetlb_lock held.
1733  */
1734 static void return_unused_surplus_pages(struct hstate *h,
1735                                         unsigned long unused_resv_pages)
1736 {
1737         unsigned long nr_pages;
1738
1739         /* Uncommit the reservation */
1740         h->resv_huge_pages -= unused_resv_pages;
1741
1742         /* Cannot return gigantic pages currently */
1743         if (hstate_is_gigantic(h))
1744                 return;
1745
1746         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1747
1748         /*
1749          * We want to release as many surplus pages as possible, spread
1750          * evenly across all nodes with memory. Iterate across these nodes
1751          * until we can no longer free unreserved surplus pages. This occurs
1752          * when the nodes with surplus pages have no free pages.
1753          * free_pool_huge_page() will balance the the freed pages across the
1754          * on-line nodes with memory and will handle the hstate accounting.
1755          */
1756         while (nr_pages--) {
1757                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1758                         break;
1759                 cond_resched_lock(&hugetlb_lock);
1760         }
1761 }
1762
1763
1764 /*
1765  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1766  * are used by the huge page allocation routines to manage reservations.
1767  *
1768  * vma_needs_reservation is called to determine if the huge page at addr
1769  * within the vma has an associated reservation.  If a reservation is
1770  * needed, the value 1 is returned.  The caller is then responsible for
1771  * managing the global reservation and subpool usage counts.  After
1772  * the huge page has been allocated, vma_commit_reservation is called
1773  * to add the page to the reservation map.  If the page allocation fails,
1774  * the reservation must be ended instead of committed.  vma_end_reservation
1775  * is called in such cases.
1776  *
1777  * In the normal case, vma_commit_reservation returns the same value
1778  * as the preceding vma_needs_reservation call.  The only time this
1779  * is not the case is if a reserve map was changed between calls.  It
1780  * is the responsibility of the caller to notice the difference and
1781  * take appropriate action.
1782  */
1783 enum vma_resv_mode {
1784         VMA_NEEDS_RESV,
1785         VMA_COMMIT_RESV,
1786         VMA_END_RESV,
1787 };
1788 static long __vma_reservation_common(struct hstate *h,
1789                                 struct vm_area_struct *vma, unsigned long addr,
1790                                 enum vma_resv_mode mode)
1791 {
1792         struct resv_map *resv;
1793         pgoff_t idx;
1794         long ret;
1795
1796         resv = vma_resv_map(vma);
1797         if (!resv)
1798                 return 1;
1799
1800         idx = vma_hugecache_offset(h, vma, addr);
1801         switch (mode) {
1802         case VMA_NEEDS_RESV:
1803                 ret = region_chg(resv, idx, idx + 1);
1804                 break;
1805         case VMA_COMMIT_RESV:
1806                 ret = region_add(resv, idx, idx + 1);
1807                 break;
1808         case VMA_END_RESV:
1809                 region_abort(resv, idx, idx + 1);
1810                 ret = 0;
1811                 break;
1812         default:
1813                 BUG();
1814         }
1815
1816         if (vma->vm_flags & VM_MAYSHARE)
1817                 return ret;
1818         else
1819                 return ret < 0 ? ret : 0;
1820 }
1821
1822 static long vma_needs_reservation(struct hstate *h,
1823                         struct vm_area_struct *vma, unsigned long addr)
1824 {
1825         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1826 }
1827
1828 static long vma_commit_reservation(struct hstate *h,
1829                         struct vm_area_struct *vma, unsigned long addr)
1830 {
1831         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1832 }
1833
1834 static void vma_end_reservation(struct hstate *h,
1835                         struct vm_area_struct *vma, unsigned long addr)
1836 {
1837         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1838 }
1839
1840 struct page *alloc_huge_page(struct vm_area_struct *vma,
1841                                     unsigned long addr, int avoid_reserve)
1842 {
1843         struct hugepage_subpool *spool = subpool_vma(vma);
1844         struct hstate *h = hstate_vma(vma);
1845         struct page *page;
1846         long map_chg, map_commit;
1847         long gbl_chg;
1848         int ret, idx;
1849         struct hugetlb_cgroup *h_cg;
1850
1851         idx = hstate_index(h);
1852         /*
1853          * Examine the region/reserve map to determine if the process
1854          * has a reservation for the page to be allocated.  A return
1855          * code of zero indicates a reservation exists (no change).
1856          */
1857         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1858         if (map_chg < 0)
1859                 return ERR_PTR(-ENOMEM);
1860
1861         /*
1862          * Processes that did not create the mapping will have no
1863          * reserves as indicated by the region/reserve map. Check
1864          * that the allocation will not exceed the subpool limit.
1865          * Allocations for MAP_NORESERVE mappings also need to be
1866          * checked against any subpool limit.
1867          */
1868         if (map_chg || avoid_reserve) {
1869                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
1870                 if (gbl_chg < 0) {
1871                         vma_end_reservation(h, vma, addr);
1872                         return ERR_PTR(-ENOSPC);
1873                 }
1874
1875                 /*
1876                  * Even though there was no reservation in the region/reserve
1877                  * map, there could be reservations associated with the
1878                  * subpool that can be used.  This would be indicated if the
1879                  * return value of hugepage_subpool_get_pages() is zero.
1880                  * However, if avoid_reserve is specified we still avoid even
1881                  * the subpool reservations.
1882                  */
1883                 if (avoid_reserve)
1884                         gbl_chg = 1;
1885         }
1886
1887         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1888         if (ret)
1889                 goto out_subpool_put;
1890
1891         spin_lock(&hugetlb_lock);
1892         /*
1893          * glb_chg is passed to indicate whether or not a page must be taken
1894          * from the global free pool (global change).  gbl_chg == 0 indicates
1895          * a reservation exists for the allocation.
1896          */
1897         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1898         if (!page) {
1899                 spin_unlock(&hugetlb_lock);
1900                 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1901                 if (!page)
1902                         goto out_uncharge_cgroup;
1903                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1904                         SetPagePrivate(page);
1905                         h->resv_huge_pages--;
1906                 }
1907                 spin_lock(&hugetlb_lock);
1908                 list_move(&page->lru, &h->hugepage_activelist);
1909                 /* Fall through */
1910         }
1911         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1912         spin_unlock(&hugetlb_lock);
1913
1914         set_page_private(page, (unsigned long)spool);
1915
1916         map_commit = vma_commit_reservation(h, vma, addr);
1917         if (unlikely(map_chg > map_commit)) {
1918                 /*
1919                  * The page was added to the reservation map between
1920                  * vma_needs_reservation and vma_commit_reservation.
1921                  * This indicates a race with hugetlb_reserve_pages.
1922                  * Adjust for the subpool count incremented above AND
1923                  * in hugetlb_reserve_pages for the same page.  Also,
1924                  * the reservation count added in hugetlb_reserve_pages
1925                  * no longer applies.
1926                  */
1927                 long rsv_adjust;
1928
1929                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1930                 hugetlb_acct_memory(h, -rsv_adjust);
1931         }
1932         return page;
1933
1934 out_uncharge_cgroup:
1935         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1936 out_subpool_put:
1937         if (map_chg || avoid_reserve)
1938                 hugepage_subpool_put_pages(spool, 1);
1939         vma_end_reservation(h, vma, addr);
1940         return ERR_PTR(-ENOSPC);
1941 }
1942
1943 /*
1944  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1945  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1946  * where no ERR_VALUE is expected to be returned.
1947  */
1948 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1949                                 unsigned long addr, int avoid_reserve)
1950 {
1951         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1952         if (IS_ERR(page))
1953                 page = NULL;
1954         return page;
1955 }
1956
1957 int __weak alloc_bootmem_huge_page(struct hstate *h)
1958 {
1959         struct huge_bootmem_page *m;
1960         int nr_nodes, node;
1961
1962         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1963                 void *addr;
1964
1965                 addr = memblock_virt_alloc_try_nid_nopanic(
1966                                 huge_page_size(h), huge_page_size(h),
1967                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1968                 if (addr) {
1969                         /*
1970                          * Use the beginning of the huge page to store the
1971                          * huge_bootmem_page struct (until gather_bootmem
1972                          * puts them into the mem_map).
1973                          */
1974                         m = addr;
1975                         goto found;
1976                 }
1977         }
1978         return 0;
1979
1980 found:
1981         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1982         /* Put them into a private list first because mem_map is not up yet */
1983         list_add(&m->list, &huge_boot_pages);
1984         m->hstate = h;
1985         return 1;
1986 }
1987
1988 static void __init prep_compound_huge_page(struct page *page,
1989                 unsigned int order)
1990 {
1991         if (unlikely(order > (MAX_ORDER - 1)))
1992                 prep_compound_gigantic_page(page, order);
1993         else
1994                 prep_compound_page(page, order);
1995 }
1996
1997 /* Put bootmem huge pages into the standard lists after mem_map is up */
1998 static void __init gather_bootmem_prealloc(void)
1999 {
2000         struct huge_bootmem_page *m;
2001
2002         list_for_each_entry(m, &huge_boot_pages, list) {
2003                 struct hstate *h = m->hstate;
2004                 struct page *page;
2005
2006 #ifdef CONFIG_HIGHMEM
2007                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2008                 memblock_free_late(__pa(m),
2009                                    sizeof(struct huge_bootmem_page));
2010 #else
2011                 page = virt_to_page(m);
2012 #endif
2013                 WARN_ON(page_count(page) != 1);
2014                 prep_compound_huge_page(page, h->order);
2015                 WARN_ON(PageReserved(page));
2016                 prep_new_huge_page(h, page, page_to_nid(page));
2017                 /*
2018                  * If we had gigantic hugepages allocated at boot time, we need
2019                  * to restore the 'stolen' pages to totalram_pages in order to
2020                  * fix confusing memory reports from free(1) and another
2021                  * side-effects, like CommitLimit going negative.
2022                  */
2023                 if (hstate_is_gigantic(h))
2024                         adjust_managed_page_count(page, 1 << h->order);
2025         }
2026 }
2027
2028 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2029 {
2030         unsigned long i;
2031
2032         for (i = 0; i < h->max_huge_pages; ++i) {
2033                 if (hstate_is_gigantic(h)) {
2034                         if (!alloc_bootmem_huge_page(h))
2035                                 break;
2036                 } else if (!alloc_fresh_huge_page(h,
2037                                          &node_states[N_MEMORY]))
2038                         break;
2039         }
2040         h->max_huge_pages = i;
2041 }
2042
2043 static void __init hugetlb_init_hstates(void)
2044 {
2045         struct hstate *h;
2046
2047         for_each_hstate(h) {
2048                 if (minimum_order > huge_page_order(h))
2049                         minimum_order = huge_page_order(h);
2050
2051                 /* oversize hugepages were init'ed in early boot */
2052                 if (!hstate_is_gigantic(h))
2053                         hugetlb_hstate_alloc_pages(h);
2054         }
2055         VM_BUG_ON(minimum_order == UINT_MAX);
2056 }
2057
2058 static char * __init memfmt(char *buf, unsigned long n)
2059 {
2060         if (n >= (1UL << 30))
2061                 sprintf(buf, "%lu GB", n >> 30);
2062         else if (n >= (1UL << 20))
2063                 sprintf(buf, "%lu MB", n >> 20);
2064         else
2065                 sprintf(buf, "%lu KB", n >> 10);
2066         return buf;
2067 }
2068
2069 static void __init report_hugepages(void)
2070 {
2071         struct hstate *h;
2072
2073         for_each_hstate(h) {
2074                 char buf[32];
2075                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2076                         memfmt(buf, huge_page_size(h)),
2077                         h->free_huge_pages);
2078         }
2079 }
2080
2081 #ifdef CONFIG_HIGHMEM
2082 static void try_to_free_low(struct hstate *h, unsigned long count,
2083                                                 nodemask_t *nodes_allowed)
2084 {
2085         int i;
2086
2087         if (hstate_is_gigantic(h))
2088                 return;
2089
2090         for_each_node_mask(i, *nodes_allowed) {
2091                 struct page *page, *next;
2092                 struct list_head *freel = &h->hugepage_freelists[i];
2093                 list_for_each_entry_safe(page, next, freel, lru) {
2094                         if (count >= h->nr_huge_pages)
2095                                 return;
2096                         if (PageHighMem(page))
2097                                 continue;
2098                         list_del(&page->lru);
2099                         update_and_free_page(h, page);
2100                         h->free_huge_pages--;
2101                         h->free_huge_pages_node[page_to_nid(page)]--;
2102                 }
2103         }
2104 }
2105 #else
2106 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2107                                                 nodemask_t *nodes_allowed)
2108 {
2109 }
2110 #endif
2111
2112 /*
2113  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2114  * balanced by operating on them in a round-robin fashion.
2115  * Returns 1 if an adjustment was made.
2116  */
2117 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2118                                 int delta)
2119 {
2120         int nr_nodes, node;
2121
2122         VM_BUG_ON(delta != -1 && delta != 1);
2123
2124         if (delta < 0) {
2125                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2126                         if (h->surplus_huge_pages_node[node])
2127                                 goto found;
2128                 }
2129         } else {
2130                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2131                         if (h->surplus_huge_pages_node[node] <
2132                                         h->nr_huge_pages_node[node])
2133                                 goto found;
2134                 }
2135         }
2136         return 0;
2137
2138 found:
2139         h->surplus_huge_pages += delta;
2140         h->surplus_huge_pages_node[node] += delta;
2141         return 1;
2142 }
2143
2144 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2145 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2146                                                 nodemask_t *nodes_allowed)
2147 {
2148         unsigned long min_count, ret;
2149
2150         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2151                 return h->max_huge_pages;
2152
2153         /*
2154          * Increase the pool size
2155          * First take pages out of surplus state.  Then make up the
2156          * remaining difference by allocating fresh huge pages.
2157          *
2158          * We might race with __alloc_buddy_huge_page() here and be unable
2159          * to convert a surplus huge page to a normal huge page. That is
2160          * not critical, though, it just means the overall size of the
2161          * pool might be one hugepage larger than it needs to be, but
2162          * within all the constraints specified by the sysctls.
2163          */
2164         spin_lock(&hugetlb_lock);
2165         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2166                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2167                         break;
2168         }
2169
2170         while (count > persistent_huge_pages(h)) {
2171                 /*
2172                  * If this allocation races such that we no longer need the
2173                  * page, free_huge_page will handle it by freeing the page
2174                  * and reducing the surplus.
2175                  */
2176                 spin_unlock(&hugetlb_lock);
2177                 if (hstate_is_gigantic(h))
2178                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2179                 else
2180                         ret = alloc_fresh_huge_page(h, nodes_allowed);
2181                 spin_lock(&hugetlb_lock);
2182                 if (!ret)
2183                         goto out;
2184
2185                 /* Bail for signals. Probably ctrl-c from user */
2186                 if (signal_pending(current))
2187                         goto out;
2188         }
2189
2190         /*
2191          * Decrease the pool size
2192          * First return free pages to the buddy allocator (being careful
2193          * to keep enough around to satisfy reservations).  Then place
2194          * pages into surplus state as needed so the pool will shrink
2195          * to the desired size as pages become free.
2196          *
2197          * By placing pages into the surplus state independent of the
2198          * overcommit value, we are allowing the surplus pool size to
2199          * exceed overcommit. There are few sane options here. Since
2200          * __alloc_buddy_huge_page() is checking the global counter,
2201          * though, we'll note that we're not allowed to exceed surplus
2202          * and won't grow the pool anywhere else. Not until one of the
2203          * sysctls are changed, or the surplus pages go out of use.
2204          */
2205         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2206         min_count = max(count, min_count);
2207         try_to_free_low(h, min_count, nodes_allowed);
2208         while (min_count < persistent_huge_pages(h)) {
2209                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2210                         break;
2211                 cond_resched_lock(&hugetlb_lock);
2212         }
2213         while (count < persistent_huge_pages(h)) {
2214                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2215                         break;
2216         }
2217 out:
2218         ret = persistent_huge_pages(h);
2219         spin_unlock(&hugetlb_lock);
2220         return ret;
2221 }
2222
2223 #define HSTATE_ATTR_RO(_name) \
2224         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2225
2226 #define HSTATE_ATTR(_name) \
2227         static struct kobj_attribute _name##_attr = \
2228                 __ATTR(_name, 0644, _name##_show, _name##_store)
2229
2230 static struct kobject *hugepages_kobj;
2231 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2232
2233 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2234
2235 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2236 {
2237         int i;
2238
2239         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2240                 if (hstate_kobjs[i] == kobj) {
2241                         if (nidp)
2242                                 *nidp = NUMA_NO_NODE;
2243                         return &hstates[i];
2244                 }
2245
2246         return kobj_to_node_hstate(kobj, nidp);
2247 }
2248
2249 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2250                                         struct kobj_attribute *attr, char *buf)
2251 {
2252         struct hstate *h;
2253         unsigned long nr_huge_pages;
2254         int nid;
2255
2256         h = kobj_to_hstate(kobj, &nid);
2257         if (nid == NUMA_NO_NODE)
2258                 nr_huge_pages = h->nr_huge_pages;
2259         else
2260                 nr_huge_pages = h->nr_huge_pages_node[nid];
2261
2262         return sprintf(buf, "%lu\n", nr_huge_pages);
2263 }
2264
2265 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2266                                            struct hstate *h, int nid,
2267                                            unsigned long count, size_t len)
2268 {
2269         int err;
2270         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2271
2272         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2273                 err = -EINVAL;
2274                 goto out;
2275         }
2276
2277         if (nid == NUMA_NO_NODE) {
2278                 /*
2279                  * global hstate attribute
2280                  */
2281                 if (!(obey_mempolicy &&
2282                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2283                         NODEMASK_FREE(nodes_allowed);
2284                         nodes_allowed = &node_states[N_MEMORY];
2285                 }
2286         } else if (nodes_allowed) {
2287                 /*
2288                  * per node hstate attribute: adjust count to global,
2289                  * but restrict alloc/free to the specified node.
2290                  */
2291                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2292                 init_nodemask_of_node(nodes_allowed, nid);
2293         } else
2294                 nodes_allowed = &node_states[N_MEMORY];
2295
2296         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2297
2298         if (nodes_allowed != &node_states[N_MEMORY])
2299                 NODEMASK_FREE(nodes_allowed);
2300
2301         return len;
2302 out:
2303         NODEMASK_FREE(nodes_allowed);
2304         return err;
2305 }
2306
2307 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2308                                          struct kobject *kobj, const char *buf,
2309                                          size_t len)
2310 {
2311         struct hstate *h;
2312         unsigned long count;
2313         int nid;
2314         int err;
2315
2316         err = kstrtoul(buf, 10, &count);
2317         if (err)
2318                 return err;
2319
2320         h = kobj_to_hstate(kobj, &nid);
2321         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2322 }
2323
2324 static ssize_t nr_hugepages_show(struct kobject *kobj,
2325                                        struct kobj_attribute *attr, char *buf)
2326 {
2327         return nr_hugepages_show_common(kobj, attr, buf);
2328 }
2329
2330 static ssize_t nr_hugepages_store(struct kobject *kobj,
2331                struct kobj_attribute *attr, const char *buf, size_t len)
2332 {
2333         return nr_hugepages_store_common(false, kobj, buf, len);
2334 }
2335 HSTATE_ATTR(nr_hugepages);
2336
2337 #ifdef CONFIG_NUMA
2338
2339 /*
2340  * hstate attribute for optionally mempolicy-based constraint on persistent
2341  * huge page alloc/free.
2342  */
2343 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2344                                        struct kobj_attribute *attr, char *buf)
2345 {
2346         return nr_hugepages_show_common(kobj, attr, buf);
2347 }
2348
2349 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2350                struct kobj_attribute *attr, const char *buf, size_t len)
2351 {
2352         return nr_hugepages_store_common(true, kobj, buf, len);
2353 }
2354 HSTATE_ATTR(nr_hugepages_mempolicy);
2355 #endif
2356
2357
2358 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2359                                         struct kobj_attribute *attr, char *buf)
2360 {
2361         struct hstate *h = kobj_to_hstate(kobj, NULL);
2362         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2363 }
2364
2365 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2366                 struct kobj_attribute *attr, const char *buf, size_t count)
2367 {
2368         int err;
2369         unsigned long input;
2370         struct hstate *h = kobj_to_hstate(kobj, NULL);
2371
2372         if (hstate_is_gigantic(h))
2373                 return -EINVAL;
2374
2375         err = kstrtoul(buf, 10, &input);
2376         if (err)
2377                 return err;
2378
2379         spin_lock(&hugetlb_lock);
2380         h->nr_overcommit_huge_pages = input;
2381         spin_unlock(&hugetlb_lock);
2382
2383         return count;
2384 }
2385 HSTATE_ATTR(nr_overcommit_hugepages);
2386
2387 static ssize_t free_hugepages_show(struct kobject *kobj,
2388                                         struct kobj_attribute *attr, char *buf)
2389 {
2390         struct hstate *h;
2391         unsigned long free_huge_pages;
2392         int nid;
2393
2394         h = kobj_to_hstate(kobj, &nid);
2395         if (nid == NUMA_NO_NODE)
2396                 free_huge_pages = h->free_huge_pages;
2397         else
2398                 free_huge_pages = h->free_huge_pages_node[nid];
2399
2400         return sprintf(buf, "%lu\n", free_huge_pages);
2401 }
2402 HSTATE_ATTR_RO(free_hugepages);
2403
2404 static ssize_t resv_hugepages_show(struct kobject *kobj,
2405                                         struct kobj_attribute *attr, char *buf)
2406 {
2407         struct hstate *h = kobj_to_hstate(kobj, NULL);
2408         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2409 }
2410 HSTATE_ATTR_RO(resv_hugepages);
2411
2412 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2413                                         struct kobj_attribute *attr, char *buf)
2414 {
2415         struct hstate *h;
2416         unsigned long surplus_huge_pages;
2417         int nid;
2418
2419         h = kobj_to_hstate(kobj, &nid);
2420         if (nid == NUMA_NO_NODE)
2421                 surplus_huge_pages = h->surplus_huge_pages;
2422         else
2423                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2424
2425         return sprintf(buf, "%lu\n", surplus_huge_pages);
2426 }
2427 HSTATE_ATTR_RO(surplus_hugepages);
2428
2429 static struct attribute *hstate_attrs[] = {
2430         &nr_hugepages_attr.attr,
2431         &nr_overcommit_hugepages_attr.attr,
2432         &free_hugepages_attr.attr,
2433         &resv_hugepages_attr.attr,
2434         &surplus_hugepages_attr.attr,
2435 #ifdef CONFIG_NUMA
2436         &nr_hugepages_mempolicy_attr.attr,
2437 #endif
2438         NULL,
2439 };
2440
2441 static struct attribute_group hstate_attr_group = {
2442         .attrs = hstate_attrs,
2443 };
2444
2445 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2446                                     struct kobject **hstate_kobjs,
2447                                     struct attribute_group *hstate_attr_group)
2448 {
2449         int retval;
2450         int hi = hstate_index(h);
2451
2452         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2453         if (!hstate_kobjs[hi])
2454                 return -ENOMEM;
2455
2456         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2457         if (retval)
2458                 kobject_put(hstate_kobjs[hi]);
2459
2460         return retval;
2461 }
2462
2463 static void __init hugetlb_sysfs_init(void)
2464 {
2465         struct hstate *h;
2466         int err;
2467
2468         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2469         if (!hugepages_kobj)
2470                 return;
2471
2472         for_each_hstate(h) {
2473                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2474                                          hstate_kobjs, &hstate_attr_group);
2475                 if (err)
2476                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2477         }
2478 }
2479
2480 #ifdef CONFIG_NUMA
2481
2482 /*
2483  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2484  * with node devices in node_devices[] using a parallel array.  The array
2485  * index of a node device or _hstate == node id.
2486  * This is here to avoid any static dependency of the node device driver, in
2487  * the base kernel, on the hugetlb module.
2488  */
2489 struct node_hstate {
2490         struct kobject          *hugepages_kobj;
2491         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2492 };
2493 static struct node_hstate node_hstates[MAX_NUMNODES];
2494
2495 /*
2496  * A subset of global hstate attributes for node devices
2497  */
2498 static struct attribute *per_node_hstate_attrs[] = {
2499         &nr_hugepages_attr.attr,
2500         &free_hugepages_attr.attr,
2501         &surplus_hugepages_attr.attr,
2502         NULL,
2503 };
2504
2505 static struct attribute_group per_node_hstate_attr_group = {
2506         .attrs = per_node_hstate_attrs,
2507 };
2508
2509 /*
2510  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2511  * Returns node id via non-NULL nidp.
2512  */
2513 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2514 {
2515         int nid;
2516
2517         for (nid = 0; nid < nr_node_ids; nid++) {
2518                 struct node_hstate *nhs = &node_hstates[nid];
2519                 int i;
2520                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2521                         if (nhs->hstate_kobjs[i] == kobj) {
2522                                 if (nidp)
2523                                         *nidp = nid;
2524                                 return &hstates[i];
2525                         }
2526         }
2527
2528         BUG();
2529         return NULL;
2530 }
2531
2532 /*
2533  * Unregister hstate attributes from a single node device.
2534  * No-op if no hstate attributes attached.
2535  */
2536 static void hugetlb_unregister_node(struct node *node)
2537 {
2538         struct hstate *h;
2539         struct node_hstate *nhs = &node_hstates[node->dev.id];
2540
2541         if (!nhs->hugepages_kobj)
2542                 return;         /* no hstate attributes */
2543
2544         for_each_hstate(h) {
2545                 int idx = hstate_index(h);
2546                 if (nhs->hstate_kobjs[idx]) {
2547                         kobject_put(nhs->hstate_kobjs[idx]);
2548                         nhs->hstate_kobjs[idx] = NULL;
2549                 }
2550         }
2551
2552         kobject_put(nhs->hugepages_kobj);
2553         nhs->hugepages_kobj = NULL;
2554 }
2555
2556
2557 /*
2558  * Register hstate attributes for a single node device.
2559  * No-op if attributes already registered.
2560  */
2561 static void hugetlb_register_node(struct node *node)
2562 {
2563         struct hstate *h;
2564         struct node_hstate *nhs = &node_hstates[node->dev.id];
2565         int err;
2566
2567         if (nhs->hugepages_kobj)
2568                 return;         /* already allocated */
2569
2570         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2571                                                         &node->dev.kobj);
2572         if (!nhs->hugepages_kobj)
2573                 return;
2574
2575         for_each_hstate(h) {
2576                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2577                                                 nhs->hstate_kobjs,
2578                                                 &per_node_hstate_attr_group);
2579                 if (err) {
2580                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2581                                 h->name, node->dev.id);
2582                         hugetlb_unregister_node(node);
2583                         break;
2584                 }
2585         }
2586 }
2587
2588 /*
2589  * hugetlb init time:  register hstate attributes for all registered node
2590  * devices of nodes that have memory.  All on-line nodes should have
2591  * registered their associated device by this time.
2592  */
2593 static void __init hugetlb_register_all_nodes(void)
2594 {
2595         int nid;
2596
2597         for_each_node_state(nid, N_MEMORY) {
2598                 struct node *node = node_devices[nid];
2599                 if (node->dev.id == nid)
2600                         hugetlb_register_node(node);
2601         }
2602
2603         /*
2604          * Let the node device driver know we're here so it can
2605          * [un]register hstate attributes on node hotplug.
2606          */
2607         register_hugetlbfs_with_node(hugetlb_register_node,
2608                                      hugetlb_unregister_node);
2609 }
2610 #else   /* !CONFIG_NUMA */
2611
2612 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2613 {
2614         BUG();
2615         if (nidp)
2616                 *nidp = -1;
2617         return NULL;
2618 }
2619
2620 static void hugetlb_register_all_nodes(void) { }
2621
2622 #endif
2623
2624 static int __init hugetlb_init(void)
2625 {
2626         int i;
2627
2628         if (!hugepages_supported())
2629                 return 0;
2630
2631         if (!size_to_hstate(default_hstate_size)) {
2632                 default_hstate_size = HPAGE_SIZE;
2633                 if (!size_to_hstate(default_hstate_size))
2634                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2635         }
2636         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2637         if (default_hstate_max_huge_pages) {
2638                 if (!default_hstate.max_huge_pages)
2639                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2640         }
2641
2642         hugetlb_init_hstates();
2643         gather_bootmem_prealloc();
2644         report_hugepages();
2645
2646         hugetlb_sysfs_init();
2647         hugetlb_register_all_nodes();
2648         hugetlb_cgroup_file_init();
2649
2650 #ifdef CONFIG_SMP
2651         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2652 #else
2653         num_fault_mutexes = 1;
2654 #endif
2655         hugetlb_fault_mutex_table =
2656                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2657         BUG_ON(!hugetlb_fault_mutex_table);
2658
2659         for (i = 0; i < num_fault_mutexes; i++)
2660                 mutex_init(&hugetlb_fault_mutex_table[i]);
2661         return 0;
2662 }
2663 subsys_initcall(hugetlb_init);
2664
2665 /* Should be called on processing a hugepagesz=... option */
2666 void __init hugetlb_bad_size(void)
2667 {
2668         parsed_valid_hugepagesz = false;
2669 }
2670
2671 void __init hugetlb_add_hstate(unsigned int order)
2672 {
2673         struct hstate *h;
2674         unsigned long i;
2675
2676         if (size_to_hstate(PAGE_SIZE << order)) {
2677                 pr_warn("hugepagesz= specified twice, ignoring\n");
2678                 return;
2679         }
2680         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2681         BUG_ON(order == 0);
2682         h = &hstates[hugetlb_max_hstate++];
2683         h->order = order;
2684         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2685         h->nr_huge_pages = 0;
2686         h->free_huge_pages = 0;
2687         for (i = 0; i < MAX_NUMNODES; ++i)
2688                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2689         INIT_LIST_HEAD(&h->hugepage_activelist);
2690         h->next_nid_to_alloc = first_memory_node;
2691         h->next_nid_to_free = first_memory_node;
2692         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2693                                         huge_page_size(h)/1024);
2694
2695         parsed_hstate = h;
2696 }
2697
2698 static int __init hugetlb_nrpages_setup(char *s)
2699 {
2700         unsigned long *mhp;
2701         static unsigned long *last_mhp;
2702
2703         if (!parsed_valid_hugepagesz) {
2704                 pr_warn("hugepages = %s preceded by "
2705                         "an unsupported hugepagesz, ignoring\n", s);
2706                 parsed_valid_hugepagesz = true;
2707                 return 1;
2708         }
2709         /*
2710          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2711          * so this hugepages= parameter goes to the "default hstate".
2712          */
2713         else if (!hugetlb_max_hstate)
2714                 mhp = &default_hstate_max_huge_pages;
2715         else
2716                 mhp = &parsed_hstate->max_huge_pages;
2717
2718         if (mhp == last_mhp) {
2719                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2720                 return 1;
2721         }
2722
2723         if (sscanf(s, "%lu", mhp) <= 0)
2724                 *mhp = 0;
2725
2726         /*
2727          * Global state is always initialized later in hugetlb_init.
2728          * But we need to allocate >= MAX_ORDER hstates here early to still
2729          * use the bootmem allocator.
2730          */
2731         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2732                 hugetlb_hstate_alloc_pages(parsed_hstate);
2733
2734         last_mhp = mhp;
2735
2736         return 1;
2737 }
2738 __setup("hugepages=", hugetlb_nrpages_setup);
2739
2740 static int __init hugetlb_default_setup(char *s)
2741 {
2742         default_hstate_size = memparse(s, &s);
2743         return 1;
2744 }
2745 __setup("default_hugepagesz=", hugetlb_default_setup);
2746
2747 static unsigned int cpuset_mems_nr(unsigned int *array)
2748 {
2749         int node;
2750         unsigned int nr = 0;
2751
2752         for_each_node_mask(node, cpuset_current_mems_allowed)
2753                 nr += array[node];
2754
2755         return nr;
2756 }
2757
2758 #ifdef CONFIG_SYSCTL
2759 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2760                          struct ctl_table *table, int write,
2761                          void __user *buffer, size_t *length, loff_t *ppos)
2762 {
2763         struct hstate *h = &default_hstate;
2764         unsigned long tmp = h->max_huge_pages;
2765         int ret;
2766
2767         if (!hugepages_supported())
2768                 return -EOPNOTSUPP;
2769
2770         table->data = &tmp;
2771         table->maxlen = sizeof(unsigned long);
2772         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2773         if (ret)
2774                 goto out;
2775
2776         if (write)
2777                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2778                                                   NUMA_NO_NODE, tmp, *length);
2779 out:
2780         return ret;
2781 }
2782
2783 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2784                           void __user *buffer, size_t *length, loff_t *ppos)
2785 {
2786
2787         return hugetlb_sysctl_handler_common(false, table, write,
2788                                                         buffer, length, ppos);
2789 }
2790
2791 #ifdef CONFIG_NUMA
2792 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2793                           void __user *buffer, size_t *length, loff_t *ppos)
2794 {
2795         return hugetlb_sysctl_handler_common(true, table, write,
2796                                                         buffer, length, ppos);
2797 }
2798 #endif /* CONFIG_NUMA */
2799
2800 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2801                         void __user *buffer,
2802                         size_t *length, loff_t *ppos)
2803 {
2804         struct hstate *h = &default_hstate;
2805         unsigned long tmp;
2806         int ret;
2807
2808         if (!hugepages_supported())
2809                 return -EOPNOTSUPP;
2810
2811         tmp = h->nr_overcommit_huge_pages;
2812
2813         if (write && hstate_is_gigantic(h))
2814                 return -EINVAL;
2815
2816         table->data = &tmp;
2817         table->maxlen = sizeof(unsigned long);
2818         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2819         if (ret)
2820                 goto out;
2821
2822         if (write) {
2823                 spin_lock(&hugetlb_lock);
2824                 h->nr_overcommit_huge_pages = tmp;
2825                 spin_unlock(&hugetlb_lock);
2826         }
2827 out:
2828         return ret;
2829 }
2830
2831 #endif /* CONFIG_SYSCTL */
2832
2833 void hugetlb_report_meminfo(struct seq_file *m)
2834 {
2835         struct hstate *h = &default_hstate;
2836         if (!hugepages_supported())
2837                 return;
2838         seq_printf(m,
2839                         "HugePages_Total:   %5lu\n"
2840                         "HugePages_Free:    %5lu\n"
2841                         "HugePages_Rsvd:    %5lu\n"
2842                         "HugePages_Surp:    %5lu\n"
2843                         "Hugepagesize:   %8lu kB\n",
2844                         h->nr_huge_pages,
2845                         h->free_huge_pages,
2846                         h->resv_huge_pages,
2847                         h->surplus_huge_pages,
2848                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2849 }
2850
2851 int hugetlb_report_node_meminfo(int nid, char *buf)
2852 {
2853         struct hstate *h = &default_hstate;
2854         if (!hugepages_supported())
2855                 return 0;
2856         return sprintf(buf,
2857                 "Node %d HugePages_Total: %5u\n"
2858                 "Node %d HugePages_Free:  %5u\n"
2859                 "Node %d HugePages_Surp:  %5u\n",
2860                 nid, h->nr_huge_pages_node[nid],
2861                 nid, h->free_huge_pages_node[nid],
2862                 nid, h->surplus_huge_pages_node[nid]);
2863 }
2864
2865 void hugetlb_show_meminfo(void)
2866 {
2867         struct hstate *h;
2868         int nid;
2869
2870         if (!hugepages_supported())
2871                 return;
2872
2873         for_each_node_state(nid, N_MEMORY)
2874                 for_each_hstate(h)
2875                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2876                                 nid,
2877                                 h->nr_huge_pages_node[nid],
2878                                 h->free_huge_pages_node[nid],
2879                                 h->surplus_huge_pages_node[nid],
2880                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2881 }
2882
2883 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2884 {
2885         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2886                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2887 }
2888
2889 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2890 unsigned long hugetlb_total_pages(void)
2891 {
2892         struct hstate *h;
2893         unsigned long nr_total_pages = 0;
2894
2895         for_each_hstate(h)
2896                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2897         return nr_total_pages;
2898 }
2899
2900 static int hugetlb_acct_memory(struct hstate *h, long delta)
2901 {
2902         int ret = -ENOMEM;
2903
2904         spin_lock(&hugetlb_lock);
2905         /*
2906          * When cpuset is configured, it breaks the strict hugetlb page
2907          * reservation as the accounting is done on a global variable. Such
2908          * reservation is completely rubbish in the presence of cpuset because
2909          * the reservation is not checked against page availability for the
2910          * current cpuset. Application can still potentially OOM'ed by kernel
2911          * with lack of free htlb page in cpuset that the task is in.
2912          * Attempt to enforce strict accounting with cpuset is almost
2913          * impossible (or too ugly) because cpuset is too fluid that
2914          * task or memory node can be dynamically moved between cpusets.
2915          *
2916          * The change of semantics for shared hugetlb mapping with cpuset is
2917          * undesirable. However, in order to preserve some of the semantics,
2918          * we fall back to check against current free page availability as
2919          * a best attempt and hopefully to minimize the impact of changing
2920          * semantics that cpuset has.
2921          */
2922         if (delta > 0) {
2923                 if (gather_surplus_pages(h, delta) < 0)
2924                         goto out;
2925
2926                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2927                         return_unused_surplus_pages(h, delta);
2928                         goto out;
2929                 }
2930         }
2931
2932         ret = 0;
2933         if (delta < 0)
2934                 return_unused_surplus_pages(h, (unsigned long) -delta);
2935
2936 out:
2937         spin_unlock(&hugetlb_lock);
2938         return ret;
2939 }
2940
2941 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2942 {
2943         struct resv_map *resv = vma_resv_map(vma);
2944
2945         /*
2946          * This new VMA should share its siblings reservation map if present.
2947          * The VMA will only ever have a valid reservation map pointer where
2948          * it is being copied for another still existing VMA.  As that VMA
2949          * has a reference to the reservation map it cannot disappear until
2950          * after this open call completes.  It is therefore safe to take a
2951          * new reference here without additional locking.
2952          */
2953         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2954                 kref_get(&resv->refs);
2955 }
2956
2957 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2958 {
2959         struct hstate *h = hstate_vma(vma);
2960         struct resv_map *resv = vma_resv_map(vma);
2961         struct hugepage_subpool *spool = subpool_vma(vma);
2962         unsigned long reserve, start, end;
2963         long gbl_reserve;
2964
2965         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2966                 return;
2967
2968         start = vma_hugecache_offset(h, vma, vma->vm_start);
2969         end = vma_hugecache_offset(h, vma, vma->vm_end);
2970
2971         reserve = (end - start) - region_count(resv, start, end);
2972
2973         kref_put(&resv->refs, resv_map_release);
2974
2975         if (reserve) {
2976                 /*
2977                  * Decrement reserve counts.  The global reserve count may be
2978                  * adjusted if the subpool has a minimum size.
2979                  */
2980                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2981                 hugetlb_acct_memory(h, -gbl_reserve);
2982         }
2983 }
2984
2985 /*
2986  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2987  * handle_mm_fault() to try to instantiate regular-sized pages in the
2988  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2989  * this far.
2990  */
2991 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2992 {
2993         BUG();
2994         return 0;
2995 }
2996
2997 const struct vm_operations_struct hugetlb_vm_ops = {
2998         .fault = hugetlb_vm_op_fault,
2999         .open = hugetlb_vm_op_open,
3000         .close = hugetlb_vm_op_close,
3001 };
3002
3003 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3004                                 int writable)
3005 {
3006         pte_t entry;
3007
3008         if (writable) {
3009                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3010                                          vma->vm_page_prot)));
3011         } else {
3012                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3013                                            vma->vm_page_prot));
3014         }
3015         entry = pte_mkyoung(entry);
3016         entry = pte_mkhuge(entry);
3017         entry = arch_make_huge_pte(entry, vma, page, writable);
3018
3019         return entry;
3020 }
3021
3022 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3023                                    unsigned long address, pte_t *ptep)
3024 {
3025         pte_t entry;
3026
3027         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3028         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3029                 update_mmu_cache(vma, address, ptep);
3030 }
3031
3032 static int is_hugetlb_entry_migration(pte_t pte)
3033 {
3034         swp_entry_t swp;
3035
3036         if (huge_pte_none(pte) || pte_present(pte))
3037                 return 0;
3038         swp = pte_to_swp_entry(pte);
3039         if (non_swap_entry(swp) && is_migration_entry(swp))
3040                 return 1;
3041         else
3042                 return 0;
3043 }
3044
3045 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3046 {
3047         swp_entry_t swp;
3048
3049         if (huge_pte_none(pte) || pte_present(pte))
3050                 return 0;
3051         swp = pte_to_swp_entry(pte);
3052         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3053                 return 1;
3054         else
3055                 return 0;
3056 }
3057
3058 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3059                             struct vm_area_struct *vma)
3060 {
3061         pte_t *src_pte, *dst_pte, entry;
3062         struct page *ptepage;
3063         unsigned long addr;
3064         int cow;
3065         struct hstate *h = hstate_vma(vma);
3066         unsigned long sz = huge_page_size(h);
3067         unsigned long mmun_start;       /* For mmu_notifiers */
3068         unsigned long mmun_end;         /* For mmu_notifiers */
3069         int ret = 0;
3070
3071         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3072
3073         mmun_start = vma->vm_start;
3074         mmun_end = vma->vm_end;
3075         if (cow)
3076                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3077
3078         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3079                 spinlock_t *src_ptl, *dst_ptl;
3080                 src_pte = huge_pte_offset(src, addr);
3081                 if (!src_pte)
3082                         continue;
3083                 dst_pte = huge_pte_alloc(dst, addr, sz);
3084                 if (!dst_pte) {
3085                         ret = -ENOMEM;
3086                         break;
3087                 }
3088
3089                 /* If the pagetables are shared don't copy or take references */
3090                 if (dst_pte == src_pte)
3091                         continue;
3092
3093                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3094                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3095                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3096                 entry = huge_ptep_get(src_pte);
3097                 if (huge_pte_none(entry)) { /* skip none entry */
3098                         ;
3099                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3100                                     is_hugetlb_entry_hwpoisoned(entry))) {
3101                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3102
3103                         if (is_write_migration_entry(swp_entry) && cow) {
3104                                 /*
3105                                  * COW mappings require pages in both
3106                                  * parent and child to be set to read.
3107                                  */
3108                                 make_migration_entry_read(&swp_entry);
3109                                 entry = swp_entry_to_pte(swp_entry);
3110                                 set_huge_pte_at(src, addr, src_pte, entry);
3111                         }
3112                         set_huge_pte_at(dst, addr, dst_pte, entry);
3113                 } else {
3114                         if (cow) {
3115                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3116                                 mmu_notifier_invalidate_range(src, mmun_start,
3117                                                                    mmun_end);
3118                         }
3119                         entry = huge_ptep_get(src_pte);
3120                         ptepage = pte_page(entry);
3121                         get_page(ptepage);
3122                         page_dup_rmap(ptepage, true);
3123                         set_huge_pte_at(dst, addr, dst_pte, entry);
3124                         hugetlb_count_add(pages_per_huge_page(h), dst);
3125                 }
3126                 spin_unlock(src_ptl);
3127                 spin_unlock(dst_ptl);
3128         }
3129
3130         if (cow)
3131                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3132
3133         return ret;
3134 }
3135
3136 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3137                             unsigned long start, unsigned long end,
3138                             struct page *ref_page)
3139 {
3140         int force_flush = 0;
3141         struct mm_struct *mm = vma->vm_mm;
3142         unsigned long address;
3143         pte_t *ptep;
3144         pte_t pte;
3145         spinlock_t *ptl;
3146         struct page *page;
3147         struct hstate *h = hstate_vma(vma);
3148         unsigned long sz = huge_page_size(h);
3149         const unsigned long mmun_start = start; /* For mmu_notifiers */
3150         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
3151
3152         WARN_ON(!is_vm_hugetlb_page(vma));
3153         BUG_ON(start & ~huge_page_mask(h));
3154         BUG_ON(end & ~huge_page_mask(h));
3155
3156         tlb_start_vma(tlb, vma);
3157         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3158         address = start;
3159 again:
3160         for (; address < end; address += sz) {
3161                 ptep = huge_pte_offset(mm, address);
3162                 if (!ptep)
3163                         continue;
3164
3165                 ptl = huge_pte_lock(h, mm, ptep);
3166                 if (huge_pmd_unshare(mm, &address, ptep))
3167                         goto unlock;
3168
3169                 pte = huge_ptep_get(ptep);
3170                 if (huge_pte_none(pte))
3171                         goto unlock;
3172
3173                 /*
3174                  * Migrating hugepage or HWPoisoned hugepage is already
3175                  * unmapped and its refcount is dropped, so just clear pte here.
3176                  */
3177                 if (unlikely(!pte_present(pte))) {
3178                         huge_pte_clear(mm, address, ptep);
3179                         goto unlock;
3180                 }
3181
3182                 page = pte_page(pte);
3183                 /*
3184                  * If a reference page is supplied, it is because a specific
3185                  * page is being unmapped, not a range. Ensure the page we
3186                  * are about to unmap is the actual page of interest.
3187                  */
3188                 if (ref_page) {
3189                         if (page != ref_page)
3190                                 goto unlock;
3191
3192                         /*
3193                          * Mark the VMA as having unmapped its page so that
3194                          * future faults in this VMA will fail rather than
3195                          * looking like data was lost
3196                          */
3197                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3198                 }
3199
3200                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3201                 tlb_remove_tlb_entry(tlb, ptep, address);
3202                 if (huge_pte_dirty(pte))
3203                         set_page_dirty(page);
3204
3205                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3206                 page_remove_rmap(page, true);
3207                 force_flush = !__tlb_remove_page(tlb, page);
3208                 if (force_flush) {
3209                         address += sz;
3210                         spin_unlock(ptl);
3211                         break;
3212                 }
3213                 /* Bail out after unmapping reference page if supplied */
3214                 if (ref_page) {
3215                         spin_unlock(ptl);
3216                         break;
3217                 }
3218 unlock:
3219                 spin_unlock(ptl);
3220         }
3221         /*
3222          * mmu_gather ran out of room to batch pages, we break out of
3223          * the PTE lock to avoid doing the potential expensive TLB invalidate
3224          * and page-free while holding it.
3225          */
3226         if (force_flush) {
3227                 force_flush = 0;
3228                 tlb_flush_mmu(tlb);
3229                 if (address < end && !ref_page)
3230                         goto again;
3231         }
3232         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3233         tlb_end_vma(tlb, vma);
3234 }
3235
3236 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3237                           struct vm_area_struct *vma, unsigned long start,
3238                           unsigned long end, struct page *ref_page)
3239 {
3240         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3241
3242         /*
3243          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3244          * test will fail on a vma being torn down, and not grab a page table
3245          * on its way out.  We're lucky that the flag has such an appropriate
3246          * name, and can in fact be safely cleared here. We could clear it
3247          * before the __unmap_hugepage_range above, but all that's necessary
3248          * is to clear it before releasing the i_mmap_rwsem. This works
3249          * because in the context this is called, the VMA is about to be
3250          * destroyed and the i_mmap_rwsem is held.
3251          */
3252         vma->vm_flags &= ~VM_MAYSHARE;
3253 }
3254
3255 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3256                           unsigned long end, struct page *ref_page)
3257 {
3258         struct mm_struct *mm;
3259         struct mmu_gather tlb;
3260
3261         mm = vma->vm_mm;
3262
3263         tlb_gather_mmu(&tlb, mm, start, end);
3264         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3265         tlb_finish_mmu(&tlb, start, end);
3266 }
3267
3268 /*
3269  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3270  * mappping it owns the reserve page for. The intention is to unmap the page
3271  * from other VMAs and let the children be SIGKILLed if they are faulting the
3272  * same region.
3273  */
3274 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3275                               struct page *page, unsigned long address)
3276 {
3277         struct hstate *h = hstate_vma(vma);
3278         struct vm_area_struct *iter_vma;
3279         struct address_space *mapping;
3280         pgoff_t pgoff;
3281
3282         /*
3283          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3284          * from page cache lookup which is in HPAGE_SIZE units.
3285          */
3286         address = address & huge_page_mask(h);
3287         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3288                         vma->vm_pgoff;
3289         mapping = file_inode(vma->vm_file)->i_mapping;
3290
3291         /*
3292          * Take the mapping lock for the duration of the table walk. As
3293          * this mapping should be shared between all the VMAs,
3294          * __unmap_hugepage_range() is called as the lock is already held
3295          */
3296         i_mmap_lock_write(mapping);
3297         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3298                 /* Do not unmap the current VMA */
3299                 if (iter_vma == vma)
3300                         continue;
3301
3302                 /*
3303                  * Shared VMAs have their own reserves and do not affect
3304                  * MAP_PRIVATE accounting but it is possible that a shared
3305                  * VMA is using the same page so check and skip such VMAs.
3306                  */
3307                 if (iter_vma->vm_flags & VM_MAYSHARE)
3308                         continue;
3309
3310                 /*
3311                  * Unmap the page from other VMAs without their own reserves.
3312                  * They get marked to be SIGKILLed if they fault in these
3313                  * areas. This is because a future no-page fault on this VMA
3314                  * could insert a zeroed page instead of the data existing
3315                  * from the time of fork. This would look like data corruption
3316                  */
3317                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3318                         unmap_hugepage_range(iter_vma, address,
3319                                              address + huge_page_size(h), page);
3320         }
3321         i_mmap_unlock_write(mapping);
3322 }
3323
3324 /*
3325  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3326  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3327  * cannot race with other handlers or page migration.
3328  * Keep the pte_same checks anyway to make transition from the mutex easier.
3329  */
3330 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3331                         unsigned long address, pte_t *ptep, pte_t pte,
3332                         struct page *pagecache_page, spinlock_t *ptl)
3333 {
3334         struct hstate *h = hstate_vma(vma);
3335         struct page *old_page, *new_page;
3336         int ret = 0, outside_reserve = 0;
3337         unsigned long mmun_start;       /* For mmu_notifiers */
3338         unsigned long mmun_end;         /* For mmu_notifiers */
3339
3340         old_page = pte_page(pte);
3341
3342 retry_avoidcopy:
3343         /* If no-one else is actually using this page, avoid the copy
3344          * and just make the page writable */
3345         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3346                 page_move_anon_rmap(old_page, vma, address);
3347                 set_huge_ptep_writable(vma, address, ptep);
3348                 return 0;
3349         }
3350
3351         /*
3352          * If the process that created a MAP_PRIVATE mapping is about to
3353          * perform a COW due to a shared page count, attempt to satisfy
3354          * the allocation without using the existing reserves. The pagecache
3355          * page is used to determine if the reserve at this address was
3356          * consumed or not. If reserves were used, a partial faulted mapping
3357          * at the time of fork() could consume its reserves on COW instead
3358          * of the full address range.
3359          */
3360         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3361                         old_page != pagecache_page)
3362                 outside_reserve = 1;
3363
3364         get_page(old_page);
3365
3366         /*
3367          * Drop page table lock as buddy allocator may be called. It will
3368          * be acquired again before returning to the caller, as expected.
3369          */
3370         spin_unlock(ptl);
3371         new_page = alloc_huge_page(vma, address, outside_reserve);
3372
3373         if (IS_ERR(new_page)) {
3374                 /*
3375                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3376                  * it is due to references held by a child and an insufficient
3377                  * huge page pool. To guarantee the original mappers
3378                  * reliability, unmap the page from child processes. The child
3379                  * may get SIGKILLed if it later faults.
3380                  */
3381                 if (outside_reserve) {
3382                         put_page(old_page);
3383                         BUG_ON(huge_pte_none(pte));
3384                         unmap_ref_private(mm, vma, old_page, address);
3385                         BUG_ON(huge_pte_none(pte));
3386                         spin_lock(ptl);
3387                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3388                         if (likely(ptep &&
3389                                    pte_same(huge_ptep_get(ptep), pte)))
3390                                 goto retry_avoidcopy;
3391                         /*
3392                          * race occurs while re-acquiring page table
3393                          * lock, and our job is done.
3394                          */
3395                         return 0;
3396                 }
3397
3398                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3399                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
3400                 goto out_release_old;
3401         }
3402
3403         /*
3404          * When the original hugepage is shared one, it does not have
3405          * anon_vma prepared.
3406          */
3407         if (unlikely(anon_vma_prepare(vma))) {
3408                 ret = VM_FAULT_OOM;
3409                 goto out_release_all;
3410         }
3411
3412         copy_user_huge_page(new_page, old_page, address, vma,
3413                             pages_per_huge_page(h));
3414         __SetPageUptodate(new_page);
3415         set_page_huge_active(new_page);
3416
3417         mmun_start = address & huge_page_mask(h);
3418         mmun_end = mmun_start + huge_page_size(h);
3419         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3420
3421         /*
3422          * Retake the page table lock to check for racing updates
3423          * before the page tables are altered
3424          */
3425         spin_lock(ptl);
3426         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3427         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3428                 ClearPagePrivate(new_page);
3429
3430                 /* Break COW */
3431                 huge_ptep_clear_flush(vma, address, ptep);
3432                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3433                 set_huge_pte_at(mm, address, ptep,
3434                                 make_huge_pte(vma, new_page, 1));
3435                 page_remove_rmap(old_page, true);
3436                 hugepage_add_new_anon_rmap(new_page, vma, address);
3437                 /* Make the old page be freed below */
3438                 new_page = old_page;
3439         }
3440         spin_unlock(ptl);
3441         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3442 out_release_all:
3443         put_page(new_page);
3444 out_release_old:
3445         put_page(old_page);
3446
3447         spin_lock(ptl); /* Caller expects lock to be held */
3448         return ret;
3449 }
3450
3451 /* Return the pagecache page at a given address within a VMA */
3452 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3453                         struct vm_area_struct *vma, unsigned long address)
3454 {
3455         struct address_space *mapping;
3456         pgoff_t idx;
3457
3458         mapping = vma->vm_file->f_mapping;
3459         idx = vma_hugecache_offset(h, vma, address);
3460
3461         return find_lock_page(mapping, idx);
3462 }
3463
3464 /*
3465  * Return whether there is a pagecache page to back given address within VMA.
3466  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3467  */
3468 static bool hugetlbfs_pagecache_present(struct hstate *h,
3469                         struct vm_area_struct *vma, unsigned long address)
3470 {
3471         struct address_space *mapping;
3472         pgoff_t idx;
3473         struct page *page;
3474
3475         mapping = vma->vm_file->f_mapping;
3476         idx = vma_hugecache_offset(h, vma, address);
3477
3478         page = find_get_page(mapping, idx);
3479         if (page)
3480                 put_page(page);
3481         return page != NULL;
3482 }
3483
3484 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3485                            pgoff_t idx)
3486 {
3487         struct inode *inode = mapping->host;
3488         struct hstate *h = hstate_inode(inode);
3489         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3490
3491         if (err)
3492                 return err;
3493         ClearPagePrivate(page);
3494
3495         spin_lock(&inode->i_lock);
3496         inode->i_blocks += blocks_per_huge_page(h);
3497         spin_unlock(&inode->i_lock);
3498         return 0;
3499 }
3500
3501 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3502                            struct address_space *mapping, pgoff_t idx,
3503                            unsigned long address, pte_t *ptep, unsigned int flags)
3504 {
3505         struct hstate *h = hstate_vma(vma);
3506         int ret = VM_FAULT_SIGBUS;
3507         int anon_rmap = 0;
3508         unsigned long size;
3509         struct page *page;
3510         pte_t new_pte;
3511         spinlock_t *ptl;
3512
3513         /*
3514          * Currently, we are forced to kill the process in the event the
3515          * original mapper has unmapped pages from the child due to a failed
3516          * COW. Warn that such a situation has occurred as it may not be obvious
3517          */
3518         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3519                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3520                            current->pid);
3521                 return ret;
3522         }
3523
3524         /*
3525          * Use page lock to guard against racing truncation
3526          * before we get page_table_lock.
3527          */
3528 retry:
3529         page = find_lock_page(mapping, idx);
3530         if (!page) {
3531                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3532                 if (idx >= size)
3533                         goto out;
3534                 page = alloc_huge_page(vma, address, 0);
3535                 if (IS_ERR(page)) {
3536                         ret = PTR_ERR(page);
3537                         if (ret == -ENOMEM)
3538                                 ret = VM_FAULT_OOM;
3539                         else
3540                                 ret = VM_FAULT_SIGBUS;
3541                         goto out;
3542                 }
3543                 clear_huge_page(page, address, pages_per_huge_page(h));
3544                 __SetPageUptodate(page);
3545                 set_page_huge_active(page);
3546
3547                 if (vma->vm_flags & VM_MAYSHARE) {
3548                         int err = huge_add_to_page_cache(page, mapping, idx);
3549                         if (err) {
3550                                 put_page(page);
3551                                 if (err == -EEXIST)
3552                                         goto retry;
3553                                 goto out;
3554                         }
3555                 } else {
3556                         lock_page(page);
3557                         if (unlikely(anon_vma_prepare(vma))) {
3558                                 ret = VM_FAULT_OOM;
3559                                 goto backout_unlocked;
3560                         }
3561                         anon_rmap = 1;
3562                 }
3563         } else {
3564                 /*
3565                  * If memory error occurs between mmap() and fault, some process
3566                  * don't have hwpoisoned swap entry for errored virtual address.
3567                  * So we need to block hugepage fault by PG_hwpoison bit check.
3568                  */
3569                 if (unlikely(PageHWPoison(page))) {
3570                         ret = VM_FAULT_HWPOISON |
3571                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3572                         goto backout_unlocked;
3573                 }
3574         }
3575
3576         /*
3577          * If we are going to COW a private mapping later, we examine the
3578          * pending reservations for this page now. This will ensure that
3579          * any allocations necessary to record that reservation occur outside
3580          * the spinlock.
3581          */
3582         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3583                 if (vma_needs_reservation(h, vma, address) < 0) {
3584                         ret = VM_FAULT_OOM;
3585                         goto backout_unlocked;
3586                 }
3587                 /* Just decrements count, does not deallocate */
3588                 vma_end_reservation(h, vma, address);
3589         }
3590
3591         ptl = huge_pte_lockptr(h, mm, ptep);
3592         spin_lock(ptl);
3593         size = i_size_read(mapping->host) >> huge_page_shift(h);
3594         if (idx >= size)
3595                 goto backout;
3596
3597         ret = 0;
3598         if (!huge_pte_none(huge_ptep_get(ptep)))
3599                 goto backout;
3600
3601         if (anon_rmap) {
3602                 ClearPagePrivate(page);
3603                 hugepage_add_new_anon_rmap(page, vma, address);
3604         } else
3605                 page_dup_rmap(page, true);
3606         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3607                                 && (vma->vm_flags & VM_SHARED)));
3608         set_huge_pte_at(mm, address, ptep, new_pte);
3609
3610         hugetlb_count_add(pages_per_huge_page(h), mm);
3611         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3612                 /* Optimization, do the COW without a second fault */
3613                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3614         }
3615
3616         spin_unlock(ptl);
3617         unlock_page(page);
3618 out:
3619         return ret;
3620
3621 backout:
3622         spin_unlock(ptl);
3623 backout_unlocked:
3624         unlock_page(page);
3625         put_page(page);
3626         goto out;
3627 }
3628
3629 #ifdef CONFIG_SMP
3630 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3631                             struct vm_area_struct *vma,
3632                             struct address_space *mapping,
3633                             pgoff_t idx, unsigned long address)
3634 {
3635         unsigned long key[2];
3636         u32 hash;
3637
3638         if (vma->vm_flags & VM_SHARED) {
3639                 key[0] = (unsigned long) mapping;
3640                 key[1] = idx;
3641         } else {
3642                 key[0] = (unsigned long) mm;
3643                 key[1] = address >> huge_page_shift(h);
3644         }
3645
3646         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3647
3648         return hash & (num_fault_mutexes - 1);
3649 }
3650 #else
3651 /*
3652  * For uniprocesor systems we always use a single mutex, so just
3653  * return 0 and avoid the hashing overhead.
3654  */
3655 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3656                             struct vm_area_struct *vma,
3657                             struct address_space *mapping,
3658                             pgoff_t idx, unsigned long address)
3659 {
3660         return 0;
3661 }
3662 #endif
3663
3664 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3665                         unsigned long address, unsigned int flags)
3666 {
3667         pte_t *ptep, entry;
3668         spinlock_t *ptl;
3669         int ret;
3670         u32 hash;
3671         pgoff_t idx;
3672         struct page *page = NULL;
3673         struct page *pagecache_page = NULL;
3674         struct hstate *h = hstate_vma(vma);
3675         struct address_space *mapping;
3676         int need_wait_lock = 0;
3677
3678         address &= huge_page_mask(h);
3679
3680         ptep = huge_pte_offset(mm, address);
3681         if (ptep) {
3682                 entry = huge_ptep_get(ptep);
3683                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3684                         migration_entry_wait_huge(vma, mm, ptep);
3685                         return 0;
3686                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3687                         return VM_FAULT_HWPOISON_LARGE |
3688                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3689         } else {
3690                 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3691                 if (!ptep)
3692                         return VM_FAULT_OOM;
3693         }
3694
3695         mapping = vma->vm_file->f_mapping;
3696         idx = vma_hugecache_offset(h, vma, address);
3697
3698         /*
3699          * Serialize hugepage allocation and instantiation, so that we don't
3700          * get spurious allocation failures if two CPUs race to instantiate
3701          * the same page in the page cache.
3702          */
3703         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3704         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3705
3706         entry = huge_ptep_get(ptep);
3707         if (huge_pte_none(entry)) {
3708                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3709                 goto out_mutex;
3710         }
3711
3712         ret = 0;
3713
3714         /*
3715          * entry could be a migration/hwpoison entry at this point, so this
3716          * check prevents the kernel from going below assuming that we have
3717          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3718          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3719          * handle it.
3720          */
3721         if (!pte_present(entry))
3722                 goto out_mutex;
3723
3724         /*
3725          * If we are going to COW the mapping later, we examine the pending
3726          * reservations for this page now. This will ensure that any
3727          * allocations necessary to record that reservation occur outside the
3728          * spinlock. For private mappings, we also lookup the pagecache
3729          * page now as it is used to determine if a reservation has been
3730          * consumed.
3731          */
3732         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3733                 if (vma_needs_reservation(h, vma, address) < 0) {
3734                         ret = VM_FAULT_OOM;
3735                         goto out_mutex;
3736                 }
3737                 /* Just decrements count, does not deallocate */
3738                 vma_end_reservation(h, vma, address);
3739
3740                 if (!(vma->vm_flags & VM_MAYSHARE))
3741                         pagecache_page = hugetlbfs_pagecache_page(h,
3742                                                                 vma, address);
3743         }
3744
3745         ptl = huge_pte_lock(h, mm, ptep);
3746
3747         /* Check for a racing update before calling hugetlb_cow */
3748         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3749                 goto out_ptl;
3750
3751         /*
3752          * hugetlb_cow() requires page locks of pte_page(entry) and
3753          * pagecache_page, so here we need take the former one
3754          * when page != pagecache_page or !pagecache_page.
3755          */
3756         page = pte_page(entry);
3757         if (page != pagecache_page)
3758                 if (!trylock_page(page)) {
3759                         need_wait_lock = 1;
3760                         goto out_ptl;
3761                 }
3762
3763         get_page(page);
3764
3765         if (flags & FAULT_FLAG_WRITE) {
3766                 if (!huge_pte_write(entry)) {
3767                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3768                                         pagecache_page, ptl);
3769                         goto out_put_page;
3770                 }
3771                 entry = huge_pte_mkdirty(entry);
3772         }
3773         entry = pte_mkyoung(entry);
3774         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3775                                                 flags & FAULT_FLAG_WRITE))
3776                 update_mmu_cache(vma, address, ptep);
3777 out_put_page:
3778         if (page != pagecache_page)
3779                 unlock_page(page);
3780         put_page(page);
3781 out_ptl:
3782         spin_unlock(ptl);
3783
3784         if (pagecache_page) {
3785                 unlock_page(pagecache_page);
3786                 put_page(pagecache_page);
3787         }
3788 out_mutex:
3789         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3790         /*
3791          * Generally it's safe to hold refcount during waiting page lock. But
3792          * here we just wait to defer the next page fault to avoid busy loop and
3793          * the page is not used after unlocked before returning from the current
3794          * page fault. So we are safe from accessing freed page, even if we wait
3795          * here without taking refcount.
3796          */
3797         if (need_wait_lock)
3798                 wait_on_page_locked(page);
3799         return ret;
3800 }
3801
3802 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3803                          struct page **pages, struct vm_area_struct **vmas,
3804                          unsigned long *position, unsigned long *nr_pages,
3805                          long i, unsigned int flags)
3806 {
3807         unsigned long pfn_offset;
3808         unsigned long vaddr = *position;
3809         unsigned long remainder = *nr_pages;
3810         struct hstate *h = hstate_vma(vma);
3811
3812         while (vaddr < vma->vm_end && remainder) {
3813                 pte_t *pte;
3814                 spinlock_t *ptl = NULL;
3815                 int absent;
3816                 struct page *page;
3817
3818                 /*
3819                  * If we have a pending SIGKILL, don't keep faulting pages and
3820                  * potentially allocating memory.
3821                  */
3822                 if (unlikely(fatal_signal_pending(current))) {
3823                         remainder = 0;
3824                         break;
3825                 }
3826
3827                 /*
3828                  * Some archs (sparc64, sh*) have multiple pte_ts to
3829                  * each hugepage.  We have to make sure we get the
3830                  * first, for the page indexing below to work.
3831                  *
3832                  * Note that page table lock is not held when pte is null.
3833                  */
3834                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3835                 if (pte)
3836                         ptl = huge_pte_lock(h, mm, pte);
3837                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3838
3839                 /*
3840                  * When coredumping, it suits get_dump_page if we just return
3841                  * an error where there's an empty slot with no huge pagecache
3842                  * to back it.  This way, we avoid allocating a hugepage, and
3843                  * the sparse dumpfile avoids allocating disk blocks, but its
3844                  * huge holes still show up with zeroes where they need to be.
3845                  */
3846                 if (absent && (flags & FOLL_DUMP) &&
3847                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3848                         if (pte)
3849                                 spin_unlock(ptl);
3850                         remainder = 0;
3851                         break;
3852                 }
3853
3854                 /*
3855                  * We need call hugetlb_fault for both hugepages under migration
3856                  * (in which case hugetlb_fault waits for the migration,) and
3857                  * hwpoisoned hugepages (in which case we need to prevent the
3858                  * caller from accessing to them.) In order to do this, we use
3859                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3860                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3861                  * both cases, and because we can't follow correct pages
3862                  * directly from any kind of swap entries.
3863                  */
3864                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3865                     ((flags & FOLL_WRITE) &&
3866                       !huge_pte_write(huge_ptep_get(pte)))) {
3867                         int ret;
3868
3869                         if (pte)
3870                                 spin_unlock(ptl);
3871                         ret = hugetlb_fault(mm, vma, vaddr,
3872                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3873                         if (!(ret & VM_FAULT_ERROR))
3874                                 continue;
3875
3876                         remainder = 0;
3877                         break;
3878                 }
3879
3880                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3881                 page = pte_page(huge_ptep_get(pte));
3882 same_page:
3883                 if (pages) {
3884                         pages[i] = mem_map_offset(page, pfn_offset);
3885                         get_page(pages[i]);
3886                 }
3887
3888                 if (vmas)
3889                         vmas[i] = vma;
3890
3891                 vaddr += PAGE_SIZE;
3892                 ++pfn_offset;
3893                 --remainder;
3894                 ++i;
3895                 if (vaddr < vma->vm_end && remainder &&
3896                                 pfn_offset < pages_per_huge_page(h)) {
3897                         /*
3898                          * We use pfn_offset to avoid touching the pageframes
3899                          * of this compound page.
3900                          */
3901                         goto same_page;
3902                 }
3903                 spin_unlock(ptl);
3904         }
3905         *nr_pages = remainder;
3906         *position = vaddr;
3907
3908         return i ? i : -EFAULT;
3909 }
3910
3911 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3912                 unsigned long address, unsigned long end, pgprot_t newprot)
3913 {
3914         struct mm_struct *mm = vma->vm_mm;
3915         unsigned long start = address;
3916         pte_t *ptep;
3917         pte_t pte;
3918         struct hstate *h = hstate_vma(vma);
3919         unsigned long pages = 0;
3920
3921         BUG_ON(address >= end);
3922         flush_cache_range(vma, address, end);
3923
3924         mmu_notifier_invalidate_range_start(mm, start, end);
3925         i_mmap_lock_write(vma->vm_file->f_mapping);
3926         for (; address < end; address += huge_page_size(h)) {
3927                 spinlock_t *ptl;
3928                 ptep = huge_pte_offset(mm, address);
3929                 if (!ptep)
3930                         continue;
3931                 ptl = huge_pte_lock(h, mm, ptep);
3932                 if (huge_pmd_unshare(mm, &address, ptep)) {
3933                         pages++;
3934                         spin_unlock(ptl);
3935                         continue;
3936                 }
3937                 pte = huge_ptep_get(ptep);
3938                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3939                         spin_unlock(ptl);
3940                         continue;
3941                 }
3942                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3943                         swp_entry_t entry = pte_to_swp_entry(pte);
3944
3945                         if (is_write_migration_entry(entry)) {
3946                                 pte_t newpte;
3947
3948                                 make_migration_entry_read(&entry);
3949                                 newpte = swp_entry_to_pte(entry);
3950                                 set_huge_pte_at(mm, address, ptep, newpte);
3951                                 pages++;
3952                         }
3953                         spin_unlock(ptl);
3954                         continue;
3955                 }
3956                 if (!huge_pte_none(pte)) {
3957                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3958                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3959                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3960                         set_huge_pte_at(mm, address, ptep, pte);
3961                         pages++;
3962                 }
3963                 spin_unlock(ptl);
3964         }
3965         /*
3966          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3967          * may have cleared our pud entry and done put_page on the page table:
3968          * once we release i_mmap_rwsem, another task can do the final put_page
3969          * and that page table be reused and filled with junk.
3970          */
3971         flush_tlb_range(vma, start, end);
3972         mmu_notifier_invalidate_range(mm, start, end);
3973         i_mmap_unlock_write(vma->vm_file->f_mapping);
3974         mmu_notifier_invalidate_range_end(mm, start, end);
3975
3976         return pages << h->order;
3977 }
3978
3979 int hugetlb_reserve_pages(struct inode *inode,
3980                                         long from, long to,
3981                                         struct vm_area_struct *vma,
3982                                         vm_flags_t vm_flags)
3983 {
3984         long ret, chg;
3985         struct hstate *h = hstate_inode(inode);
3986         struct hugepage_subpool *spool = subpool_inode(inode);
3987         struct resv_map *resv_map;
3988         long gbl_reserve;
3989
3990         /*
3991          * Only apply hugepage reservation if asked. At fault time, an
3992          * attempt will be made for VM_NORESERVE to allocate a page
3993          * without using reserves
3994          */
3995         if (vm_flags & VM_NORESERVE)
3996                 return 0;
3997
3998         /*
3999          * Shared mappings base their reservation on the number of pages that
4000          * are already allocated on behalf of the file. Private mappings need
4001          * to reserve the full area even if read-only as mprotect() may be
4002          * called to make the mapping read-write. Assume !vma is a shm mapping
4003          */
4004         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4005                 resv_map = inode_resv_map(inode);
4006
4007                 chg = region_chg(resv_map, from, to);
4008
4009         } else {
4010                 resv_map = resv_map_alloc();
4011                 if (!resv_map)
4012                         return -ENOMEM;
4013
4014                 chg = to - from;
4015
4016                 set_vma_resv_map(vma, resv_map);
4017                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4018         }
4019
4020         if (chg < 0) {
4021                 ret = chg;
4022                 goto out_err;
4023         }
4024
4025         /*
4026          * There must be enough pages in the subpool for the mapping. If
4027          * the subpool has a minimum size, there may be some global
4028          * reservations already in place (gbl_reserve).
4029          */
4030         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4031         if (gbl_reserve < 0) {
4032                 ret = -ENOSPC;
4033                 goto out_err;
4034         }
4035
4036         /*
4037          * Check enough hugepages are available for the reservation.
4038          * Hand the pages back to the subpool if there are not
4039          */
4040         ret = hugetlb_acct_memory(h, gbl_reserve);
4041         if (ret < 0) {
4042                 /* put back original number of pages, chg */
4043                 (void)hugepage_subpool_put_pages(spool, chg);
4044                 goto out_err;
4045         }
4046
4047         /*
4048          * Account for the reservations made. Shared mappings record regions
4049          * that have reservations as they are shared by multiple VMAs.
4050          * When the last VMA disappears, the region map says how much
4051          * the reservation was and the page cache tells how much of
4052          * the reservation was consumed. Private mappings are per-VMA and
4053          * only the consumed reservations are tracked. When the VMA
4054          * disappears, the original reservation is the VMA size and the
4055          * consumed reservations are stored in the map. Hence, nothing
4056          * else has to be done for private mappings here
4057          */
4058         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4059                 long add = region_add(resv_map, from, to);
4060
4061                 if (unlikely(chg > add)) {
4062                         /*
4063                          * pages in this range were added to the reserve
4064                          * map between region_chg and region_add.  This
4065                          * indicates a race with alloc_huge_page.  Adjust
4066                          * the subpool and reserve counts modified above
4067                          * based on the difference.
4068                          */
4069                         long rsv_adjust;
4070
4071                         rsv_adjust = hugepage_subpool_put_pages(spool,
4072                                                                 chg - add);
4073                         hugetlb_acct_memory(h, -rsv_adjust);
4074                 }
4075         }
4076         return 0;
4077 out_err:
4078         if (!vma || vma->vm_flags & VM_MAYSHARE)
4079                 region_abort(resv_map, from, to);
4080         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4081                 kref_put(&resv_map->refs, resv_map_release);
4082         return ret;
4083 }
4084
4085 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4086                                                                 long freed)
4087 {
4088         struct hstate *h = hstate_inode(inode);
4089         struct resv_map *resv_map = inode_resv_map(inode);
4090         long chg = 0;
4091         struct hugepage_subpool *spool = subpool_inode(inode);
4092         long gbl_reserve;
4093
4094         if (resv_map) {
4095                 chg = region_del(resv_map, start, end);
4096                 /*
4097                  * region_del() can fail in the rare case where a region
4098                  * must be split and another region descriptor can not be
4099                  * allocated.  If end == LONG_MAX, it will not fail.
4100                  */
4101                 if (chg < 0)
4102                         return chg;
4103         }
4104
4105         spin_lock(&inode->i_lock);
4106         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4107         spin_unlock(&inode->i_lock);
4108
4109         /*
4110          * If the subpool has a minimum size, the number of global
4111          * reservations to be released may be adjusted.
4112          */
4113         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4114         hugetlb_acct_memory(h, -gbl_reserve);
4115
4116         return 0;
4117 }
4118
4119 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4120 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4121                                 struct vm_area_struct *vma,
4122                                 unsigned long addr, pgoff_t idx)
4123 {
4124         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4125                                 svma->vm_start;
4126         unsigned long sbase = saddr & PUD_MASK;
4127         unsigned long s_end = sbase + PUD_SIZE;
4128
4129         /* Allow segments to share if only one is marked locked */
4130         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4131         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4132
4133         /*
4134          * match the virtual addresses, permission and the alignment of the
4135          * page table page.
4136          */
4137         if (pmd_index(addr) != pmd_index(saddr) ||
4138             vm_flags != svm_flags ||
4139             sbase < svma->vm_start || svma->vm_end < s_end)
4140                 return 0;
4141
4142         return saddr;
4143 }
4144
4145 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4146 {
4147         unsigned long base = addr & PUD_MASK;
4148         unsigned long end = base + PUD_SIZE;
4149
4150         /*
4151          * check on proper vm_flags and page table alignment
4152          */
4153         if (vma->vm_flags & VM_MAYSHARE &&
4154             vma->vm_start <= base && end <= vma->vm_end)
4155                 return true;
4156         return false;
4157 }
4158
4159 /*
4160  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4161  * and returns the corresponding pte. While this is not necessary for the
4162  * !shared pmd case because we can allocate the pmd later as well, it makes the
4163  * code much cleaner. pmd allocation is essential for the shared case because
4164  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4165  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4166  * bad pmd for sharing.
4167  */
4168 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4169 {
4170         struct vm_area_struct *vma = find_vma(mm, addr);
4171         struct address_space *mapping = vma->vm_file->f_mapping;
4172         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4173                         vma->vm_pgoff;
4174         struct vm_area_struct *svma;
4175         unsigned long saddr;
4176         pte_t *spte = NULL;
4177         pte_t *pte;
4178         spinlock_t *ptl;
4179
4180         if (!vma_shareable(vma, addr))
4181                 return (pte_t *)pmd_alloc(mm, pud, addr);
4182
4183         i_mmap_lock_write(mapping);
4184         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4185                 if (svma == vma)
4186                         continue;
4187
4188                 saddr = page_table_shareable(svma, vma, addr, idx);
4189                 if (saddr) {
4190                         spte = huge_pte_offset(svma->vm_mm, saddr);
4191                         if (spte) {
4192                                 mm_inc_nr_pmds(mm);
4193                                 get_page(virt_to_page(spte));
4194                                 break;
4195                         }
4196                 }
4197         }
4198
4199         if (!spte)
4200                 goto out;
4201
4202         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4203         spin_lock(ptl);
4204         if (pud_none(*pud)) {
4205                 pud_populate(mm, pud,
4206                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4207         } else {
4208                 put_page(virt_to_page(spte));
4209                 mm_inc_nr_pmds(mm);
4210         }
4211         spin_unlock(ptl);
4212 out:
4213         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4214         i_mmap_unlock_write(mapping);
4215         return pte;
4216 }
4217
4218 /*
4219  * unmap huge page backed by shared pte.
4220  *
4221  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4222  * indicated by page_count > 1, unmap is achieved by clearing pud and
4223  * decrementing the ref count. If count == 1, the pte page is not shared.
4224  *
4225  * called with page table lock held.
4226  *
4227  * returns: 1 successfully unmapped a shared pte page
4228  *          0 the underlying pte page is not shared, or it is the last user
4229  */
4230 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4231 {
4232         pgd_t *pgd = pgd_offset(mm, *addr);
4233         pud_t *pud = pud_offset(pgd, *addr);
4234
4235         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4236         if (page_count(virt_to_page(ptep)) == 1)
4237                 return 0;
4238
4239         pud_clear(pud);
4240         put_page(virt_to_page(ptep));
4241         mm_dec_nr_pmds(mm);
4242         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4243         return 1;
4244 }
4245 #define want_pmd_share()        (1)
4246 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4247 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4248 {
4249         return NULL;
4250 }
4251
4252 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4253 {
4254         return 0;
4255 }
4256 #define want_pmd_share()        (0)
4257 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4258
4259 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4260 pte_t *huge_pte_alloc(struct mm_struct *mm,
4261                         unsigned long addr, unsigned long sz)
4262 {
4263         pgd_t *pgd;
4264         pud_t *pud;
4265         pte_t *pte = NULL;
4266
4267         pgd = pgd_offset(mm, addr);
4268         pud = pud_alloc(mm, pgd, addr);
4269         if (pud) {
4270                 if (sz == PUD_SIZE) {
4271                         pte = (pte_t *)pud;
4272                 } else {
4273                         BUG_ON(sz != PMD_SIZE);
4274                         if (want_pmd_share() && pud_none(*pud))
4275                                 pte = huge_pmd_share(mm, addr, pud);
4276                         else
4277                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4278                 }
4279         }
4280         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4281
4282         return pte;
4283 }
4284
4285 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4286 {
4287         pgd_t *pgd;
4288         pud_t *pud;
4289         pmd_t *pmd = NULL;
4290
4291         pgd = pgd_offset(mm, addr);
4292         if (pgd_present(*pgd)) {
4293                 pud = pud_offset(pgd, addr);
4294                 if (pud_present(*pud)) {
4295                         if (pud_huge(*pud))
4296                                 return (pte_t *)pud;
4297                         pmd = pmd_offset(pud, addr);
4298                 }
4299         }
4300         return (pte_t *) pmd;
4301 }
4302
4303 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4304
4305 /*
4306  * These functions are overwritable if your architecture needs its own
4307  * behavior.
4308  */
4309 struct page * __weak
4310 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4311                               int write)
4312 {
4313         return ERR_PTR(-EINVAL);
4314 }
4315
4316 struct page * __weak
4317 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4318                 pmd_t *pmd, int flags)
4319 {
4320         struct page *page = NULL;
4321         spinlock_t *ptl;
4322 retry:
4323         ptl = pmd_lockptr(mm, pmd);
4324         spin_lock(ptl);
4325         /*
4326          * make sure that the address range covered by this pmd is not
4327          * unmapped from other threads.
4328          */
4329         if (!pmd_huge(*pmd))
4330                 goto out;
4331         if (pmd_present(*pmd)) {
4332                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4333                 if (flags & FOLL_GET)
4334                         get_page(page);
4335         } else {
4336                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4337                         spin_unlock(ptl);
4338                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4339                         goto retry;
4340                 }
4341                 /*
4342                  * hwpoisoned entry is treated as no_page_table in
4343                  * follow_page_mask().
4344                  */
4345         }
4346 out:
4347         spin_unlock(ptl);
4348         return page;
4349 }
4350
4351 struct page * __weak
4352 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4353                 pud_t *pud, int flags)
4354 {
4355         if (flags & FOLL_GET)
4356                 return NULL;
4357
4358         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4359 }
4360
4361 #ifdef CONFIG_MEMORY_FAILURE
4362
4363 /*
4364  * This function is called from memory failure code.
4365  * Assume the caller holds page lock of the head page.
4366  */
4367 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4368 {
4369         struct hstate *h = page_hstate(hpage);
4370         int nid = page_to_nid(hpage);
4371         int ret = -EBUSY;
4372
4373         spin_lock(&hugetlb_lock);
4374         /*
4375          * Just checking !page_huge_active is not enough, because that could be
4376          * an isolated/hwpoisoned hugepage (which have >0 refcount).
4377          */
4378         if (!page_huge_active(hpage) && !page_count(hpage)) {
4379                 /*
4380                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
4381                  * but dangling hpage->lru can trigger list-debug warnings
4382                  * (this happens when we call unpoison_memory() on it),
4383                  * so let it point to itself with list_del_init().
4384                  */
4385                 list_del_init(&hpage->lru);
4386                 set_page_refcounted(hpage);
4387                 h->free_huge_pages--;
4388                 h->free_huge_pages_node[nid]--;
4389                 ret = 0;
4390         }
4391         spin_unlock(&hugetlb_lock);
4392         return ret;
4393 }
4394 #endif
4395
4396 bool isolate_huge_page(struct page *page, struct list_head *list)
4397 {
4398         bool ret = true;
4399
4400         VM_BUG_ON_PAGE(!PageHead(page), page);
4401         spin_lock(&hugetlb_lock);
4402         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4403                 ret = false;
4404                 goto unlock;
4405         }
4406         clear_page_huge_active(page);
4407         list_move_tail(&page->lru, list);
4408 unlock:
4409         spin_unlock(&hugetlb_lock);
4410         return ret;
4411 }
4412
4413 void putback_active_hugepage(struct page *page)
4414 {
4415         VM_BUG_ON_PAGE(!PageHead(page), page);
4416         spin_lock(&hugetlb_lock);
4417         set_page_huge_active(page);
4418         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4419         spin_unlock(&hugetlb_lock);
4420         put_page(page);
4421 }