mm/hugetlb: make pud_huge() and follow_huge_pud() aware of non-present pud entry
[linux.git] / arch / x86 / mm / hugetlbpage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IA-32 Huge TLB Page Support for Kernel.
4  *
5  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
6  */
7
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
16 #include <linux/compat.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm/elf.h>
21
22 /*
23  * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
24  * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
25  * Otherwise, returns 0.
26  */
27 int pmd_huge(pmd_t pmd)
28 {
29         return !pmd_none(pmd) &&
30                 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
31 }
32
33 /*
34  * pud_huge() returns 1 if @pud is hugetlb related entry, that is normal
35  * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
36  * Otherwise, returns 0.
37  */
38 int pud_huge(pud_t pud)
39 {
40         return !pud_none(pud) &&
41                 (pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
42 }
43
44 #ifdef CONFIG_HUGETLB_PAGE
45 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
46                 unsigned long addr, unsigned long len,
47                 unsigned long pgoff, unsigned long flags)
48 {
49         struct hstate *h = hstate_file(file);
50         struct vm_unmapped_area_info info;
51
52         info.flags = 0;
53         info.length = len;
54         info.low_limit = get_mmap_base(1);
55
56         /*
57          * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
58          * in the full address space.
59          */
60         info.high_limit = in_32bit_syscall() ?
61                 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
62
63         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64         info.align_offset = 0;
65         return vm_unmapped_area(&info);
66 }
67
68 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
69                 unsigned long addr, unsigned long len,
70                 unsigned long pgoff, unsigned long flags)
71 {
72         struct hstate *h = hstate_file(file);
73         struct vm_unmapped_area_info info;
74
75         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
76         info.length = len;
77         info.low_limit = PAGE_SIZE;
78         info.high_limit = get_mmap_base(0);
79
80         /*
81          * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
82          * in the full address space.
83          */
84         if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
85                 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
86
87         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
88         info.align_offset = 0;
89         addr = vm_unmapped_area(&info);
90
91         /*
92          * A failed mmap() very likely causes application failure,
93          * so fall back to the bottom-up function here. This scenario
94          * can happen with large stack limits and large mmap()
95          * allocations.
96          */
97         if (addr & ~PAGE_MASK) {
98                 VM_BUG_ON(addr != -ENOMEM);
99                 info.flags = 0;
100                 info.low_limit = TASK_UNMAPPED_BASE;
101                 info.high_limit = TASK_SIZE_LOW;
102                 addr = vm_unmapped_area(&info);
103         }
104
105         return addr;
106 }
107
108 unsigned long
109 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
110                 unsigned long len, unsigned long pgoff, unsigned long flags)
111 {
112         struct hstate *h = hstate_file(file);
113         struct mm_struct *mm = current->mm;
114         struct vm_area_struct *vma;
115
116         if (len & ~huge_page_mask(h))
117                 return -EINVAL;
118
119         if (len > TASK_SIZE)
120                 return -ENOMEM;
121
122         /* No address checking. See comment at mmap_address_hint_valid() */
123         if (flags & MAP_FIXED) {
124                 if (prepare_hugepage_range(file, addr, len))
125                         return -EINVAL;
126                 return addr;
127         }
128
129         if (addr) {
130                 addr &= huge_page_mask(h);
131                 if (!mmap_address_hint_valid(addr, len))
132                         goto get_unmapped_area;
133
134                 vma = find_vma(mm, addr);
135                 if (!vma || addr + len <= vm_start_gap(vma))
136                         return addr;
137         }
138
139 get_unmapped_area:
140         if (mm->get_unmapped_area == arch_get_unmapped_area)
141                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
142                                 pgoff, flags);
143         else
144                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
145                                 pgoff, flags);
146 }
147 #endif /* CONFIG_HUGETLB_PAGE */
148
149 #ifdef CONFIG_X86_64
150 bool __init arch_hugetlb_valid_size(unsigned long size)
151 {
152         if (size == PMD_SIZE)
153                 return true;
154         else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
155                 return true;
156         else
157                 return false;
158 }
159
160 #ifdef CONFIG_CONTIG_ALLOC
161 static __init int gigantic_pages_init(void)
162 {
163         /* With compaction or CMA we can allocate gigantic pages at runtime */
164         if (boot_cpu_has(X86_FEATURE_GBPAGES))
165                 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
166         return 0;
167 }
168 arch_initcall(gigantic_pages_init);
169 #endif
170 #endif