Merge tag 'media/v6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux.git] / mm / huge_memory.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2009  Red Hat, Inc.
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/mm_types.h>
24 #include <linux/khugepaged.h>
25 #include <linux/freezer.h>
26 #include <linux/pfn_t.h>
27 #include <linux/mman.h>
28 #include <linux/memremap.h>
29 #include <linux/pagemap.h>
30 #include <linux/debugfs.h>
31 #include <linux/migrate.h>
32 #include <linux/hashtable.h>
33 #include <linux/userfaultfd_k.h>
34 #include <linux/page_idle.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/oom.h>
37 #include <linux/numa.h>
38 #include <linux/page_owner.h>
39 #include <linux/sched/sysctl.h>
40 #include <linux/memory-tiers.h>
41 #include <linux/compat.h>
42 #include <linux/pgalloc_tag.h>
43 #include <linux/pagewalk.h>
44
45 #include <asm/tlb.h>
46 #include <asm/pgalloc.h>
47 #include "internal.h"
48 #include "swap.h"
49
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/thp.h>
52
53 /*
54  * By default, transparent hugepage support is disabled in order to avoid
55  * risking an increased memory footprint for applications that are not
56  * guaranteed to benefit from it. When transparent hugepage support is
57  * enabled, it is for all mappings, and khugepaged scans all mappings.
58  * Defrag is invoked by khugepaged hugepage allocations and by page faults
59  * for all hugepage allocations.
60  */
61 unsigned long transparent_hugepage_flags __read_mostly =
62 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
63         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
64 #endif
65 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
66         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
67 #endif
68         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
69         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
70         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
71
72 static struct shrinker *deferred_split_shrinker;
73 static unsigned long deferred_split_count(struct shrinker *shrink,
74                                           struct shrink_control *sc);
75 static unsigned long deferred_split_scan(struct shrinker *shrink,
76                                          struct shrink_control *sc);
77 static bool split_underused_thp = true;
78
79 static atomic_t huge_zero_refcount;
80 struct folio *huge_zero_folio __read_mostly;
81 unsigned long huge_zero_pfn __read_mostly = ~0UL;
82 unsigned long huge_anon_orders_always __read_mostly;
83 unsigned long huge_anon_orders_madvise __read_mostly;
84 unsigned long huge_anon_orders_inherit __read_mostly;
85 static bool anon_orders_configured __initdata;
86
87 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
88                                          unsigned long vm_flags,
89                                          unsigned long tva_flags,
90                                          unsigned long orders)
91 {
92         bool smaps = tva_flags & TVA_SMAPS;
93         bool in_pf = tva_flags & TVA_IN_PF;
94         bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
95         unsigned long supported_orders;
96
97         /* Check the intersection of requested and supported orders. */
98         if (vma_is_anonymous(vma))
99                 supported_orders = THP_ORDERS_ALL_ANON;
100         else if (vma_is_special_huge(vma))
101                 supported_orders = THP_ORDERS_ALL_SPECIAL;
102         else
103                 supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
104
105         orders &= supported_orders;
106         if (!orders)
107                 return 0;
108
109         if (!vma->vm_mm)                /* vdso */
110                 return 0;
111
112         /*
113          * Explicitly disabled through madvise or prctl, or some
114          * architectures may disable THP for some mappings, for
115          * example, s390 kvm.
116          * */
117         if ((vm_flags & VM_NOHUGEPAGE) ||
118             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
119                 return 0;
120         /*
121          * If the hardware/firmware marked hugepage support disabled.
122          */
123         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
124                 return 0;
125
126         /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
127         if (vma_is_dax(vma))
128                 return in_pf ? orders : 0;
129
130         /*
131          * khugepaged special VMA and hugetlb VMA.
132          * Must be checked after dax since some dax mappings may have
133          * VM_MIXEDMAP set.
134          */
135         if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
136                 return 0;
137
138         /*
139          * Check alignment for file vma and size for both file and anon vma by
140          * filtering out the unsuitable orders.
141          *
142          * Skip the check for page fault. Huge fault does the check in fault
143          * handlers.
144          */
145         if (!in_pf) {
146                 int order = highest_order(orders);
147                 unsigned long addr;
148
149                 while (orders) {
150                         addr = vma->vm_end - (PAGE_SIZE << order);
151                         if (thp_vma_suitable_order(vma, addr, order))
152                                 break;
153                         order = next_order(&orders, order);
154                 }
155
156                 if (!orders)
157                         return 0;
158         }
159
160         /*
161          * Enabled via shmem mount options or sysfs settings.
162          * Must be done before hugepage flags check since shmem has its
163          * own flags.
164          */
165         if (!in_pf && shmem_file(vma->vm_file))
166                 return shmem_allowable_huge_orders(file_inode(vma->vm_file),
167                                                    vma, vma->vm_pgoff, 0,
168                                                    !enforce_sysfs);
169
170         if (!vma_is_anonymous(vma)) {
171                 /*
172                  * Enforce sysfs THP requirements as necessary. Anonymous vmas
173                  * were already handled in thp_vma_allowable_orders().
174                  */
175                 if (enforce_sysfs &&
176                     (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
177                                                     !hugepage_global_always())))
178                         return 0;
179
180                 /*
181                  * Trust that ->huge_fault() handlers know what they are doing
182                  * in fault path.
183                  */
184                 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
185                         return orders;
186                 /* Only regular file is valid in collapse path */
187                 if (((!in_pf || smaps)) && file_thp_enabled(vma))
188                         return orders;
189                 return 0;
190         }
191
192         if (vma_is_temporary_stack(vma))
193                 return 0;
194
195         /*
196          * THPeligible bit of smaps should show 1 for proper VMAs even
197          * though anon_vma is not initialized yet.
198          *
199          * Allow page fault since anon_vma may be not initialized until
200          * the first page fault.
201          */
202         if (!vma->anon_vma)
203                 return (smaps || in_pf) ? orders : 0;
204
205         return orders;
206 }
207
208 static bool get_huge_zero_page(void)
209 {
210         struct folio *zero_folio;
211 retry:
212         if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
213                 return true;
214
215         zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
216                         HPAGE_PMD_ORDER);
217         if (!zero_folio) {
218                 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
219                 return false;
220         }
221         /* Ensure zero folio won't have large_rmappable flag set. */
222         folio_clear_large_rmappable(zero_folio);
223         preempt_disable();
224         if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
225                 preempt_enable();
226                 folio_put(zero_folio);
227                 goto retry;
228         }
229         WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
230
231         /* We take additional reference here. It will be put back by shrinker */
232         atomic_set(&huge_zero_refcount, 2);
233         preempt_enable();
234         count_vm_event(THP_ZERO_PAGE_ALLOC);
235         return true;
236 }
237
238 static void put_huge_zero_page(void)
239 {
240         /*
241          * Counter should never go to zero here. Only shrinker can put
242          * last reference.
243          */
244         BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
245 }
246
247 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
248 {
249         if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
250                 return READ_ONCE(huge_zero_folio);
251
252         if (!get_huge_zero_page())
253                 return NULL;
254
255         if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
256                 put_huge_zero_page();
257
258         return READ_ONCE(huge_zero_folio);
259 }
260
261 void mm_put_huge_zero_folio(struct mm_struct *mm)
262 {
263         if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
264                 put_huge_zero_page();
265 }
266
267 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
268                                         struct shrink_control *sc)
269 {
270         /* we can free zero page only if last reference remains */
271         return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
272 }
273
274 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
275                                        struct shrink_control *sc)
276 {
277         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
278                 struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
279                 BUG_ON(zero_folio == NULL);
280                 WRITE_ONCE(huge_zero_pfn, ~0UL);
281                 folio_put(zero_folio);
282                 return HPAGE_PMD_NR;
283         }
284
285         return 0;
286 }
287
288 static struct shrinker *huge_zero_page_shrinker;
289
290 #ifdef CONFIG_SYSFS
291 static ssize_t enabled_show(struct kobject *kobj,
292                             struct kobj_attribute *attr, char *buf)
293 {
294         const char *output;
295
296         if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
297                 output = "[always] madvise never";
298         else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
299                           &transparent_hugepage_flags))
300                 output = "always [madvise] never";
301         else
302                 output = "always madvise [never]";
303
304         return sysfs_emit(buf, "%s\n", output);
305 }
306
307 static ssize_t enabled_store(struct kobject *kobj,
308                              struct kobj_attribute *attr,
309                              const char *buf, size_t count)
310 {
311         ssize_t ret = count;
312
313         if (sysfs_streq(buf, "always")) {
314                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
315                 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
316         } else if (sysfs_streq(buf, "madvise")) {
317                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
318                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
319         } else if (sysfs_streq(buf, "never")) {
320                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
321                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
322         } else
323                 ret = -EINVAL;
324
325         if (ret > 0) {
326                 int err = start_stop_khugepaged();
327                 if (err)
328                         ret = err;
329         }
330         return ret;
331 }
332
333 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
334
335 ssize_t single_hugepage_flag_show(struct kobject *kobj,
336                                   struct kobj_attribute *attr, char *buf,
337                                   enum transparent_hugepage_flag flag)
338 {
339         return sysfs_emit(buf, "%d\n",
340                           !!test_bit(flag, &transparent_hugepage_flags));
341 }
342
343 ssize_t single_hugepage_flag_store(struct kobject *kobj,
344                                  struct kobj_attribute *attr,
345                                  const char *buf, size_t count,
346                                  enum transparent_hugepage_flag flag)
347 {
348         unsigned long value;
349         int ret;
350
351         ret = kstrtoul(buf, 10, &value);
352         if (ret < 0)
353                 return ret;
354         if (value > 1)
355                 return -EINVAL;
356
357         if (value)
358                 set_bit(flag, &transparent_hugepage_flags);
359         else
360                 clear_bit(flag, &transparent_hugepage_flags);
361
362         return count;
363 }
364
365 static ssize_t defrag_show(struct kobject *kobj,
366                            struct kobj_attribute *attr, char *buf)
367 {
368         const char *output;
369
370         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
371                      &transparent_hugepage_flags))
372                 output = "[always] defer defer+madvise madvise never";
373         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
374                           &transparent_hugepage_flags))
375                 output = "always [defer] defer+madvise madvise never";
376         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
377                           &transparent_hugepage_flags))
378                 output = "always defer [defer+madvise] madvise never";
379         else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
380                           &transparent_hugepage_flags))
381                 output = "always defer defer+madvise [madvise] never";
382         else
383                 output = "always defer defer+madvise madvise [never]";
384
385         return sysfs_emit(buf, "%s\n", output);
386 }
387
388 static ssize_t defrag_store(struct kobject *kobj,
389                             struct kobj_attribute *attr,
390                             const char *buf, size_t count)
391 {
392         if (sysfs_streq(buf, "always")) {
393                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
394                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
395                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
396                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
397         } else if (sysfs_streq(buf, "defer+madvise")) {
398                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
399                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
400                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
401                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
402         } else if (sysfs_streq(buf, "defer")) {
403                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
404                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
405                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
406                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
407         } else if (sysfs_streq(buf, "madvise")) {
408                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
409                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
410                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
411                 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
412         } else if (sysfs_streq(buf, "never")) {
413                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
414                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
415                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
416                 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
417         } else
418                 return -EINVAL;
419
420         return count;
421 }
422 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
423
424 static ssize_t use_zero_page_show(struct kobject *kobj,
425                                   struct kobj_attribute *attr, char *buf)
426 {
427         return single_hugepage_flag_show(kobj, attr, buf,
428                                          TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
429 }
430 static ssize_t use_zero_page_store(struct kobject *kobj,
431                 struct kobj_attribute *attr, const char *buf, size_t count)
432 {
433         return single_hugepage_flag_store(kobj, attr, buf, count,
434                                  TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
435 }
436 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
437
438 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
439                                    struct kobj_attribute *attr, char *buf)
440 {
441         return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
442 }
443 static struct kobj_attribute hpage_pmd_size_attr =
444         __ATTR_RO(hpage_pmd_size);
445
446 static ssize_t split_underused_thp_show(struct kobject *kobj,
447                             struct kobj_attribute *attr, char *buf)
448 {
449         return sysfs_emit(buf, "%d\n", split_underused_thp);
450 }
451
452 static ssize_t split_underused_thp_store(struct kobject *kobj,
453                              struct kobj_attribute *attr,
454                              const char *buf, size_t count)
455 {
456         int err = kstrtobool(buf, &split_underused_thp);
457
458         if (err < 0)
459                 return err;
460
461         return count;
462 }
463
464 static struct kobj_attribute split_underused_thp_attr = __ATTR(
465         shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
466
467 static struct attribute *hugepage_attr[] = {
468         &enabled_attr.attr,
469         &defrag_attr.attr,
470         &use_zero_page_attr.attr,
471         &hpage_pmd_size_attr.attr,
472 #ifdef CONFIG_SHMEM
473         &shmem_enabled_attr.attr,
474 #endif
475         &split_underused_thp_attr.attr,
476         NULL,
477 };
478
479 static const struct attribute_group hugepage_attr_group = {
480         .attrs = hugepage_attr,
481 };
482
483 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
484 static void thpsize_release(struct kobject *kobj);
485 static DEFINE_SPINLOCK(huge_anon_orders_lock);
486 static LIST_HEAD(thpsize_list);
487
488 static ssize_t anon_enabled_show(struct kobject *kobj,
489                                  struct kobj_attribute *attr, char *buf)
490 {
491         int order = to_thpsize(kobj)->order;
492         const char *output;
493
494         if (test_bit(order, &huge_anon_orders_always))
495                 output = "[always] inherit madvise never";
496         else if (test_bit(order, &huge_anon_orders_inherit))
497                 output = "always [inherit] madvise never";
498         else if (test_bit(order, &huge_anon_orders_madvise))
499                 output = "always inherit [madvise] never";
500         else
501                 output = "always inherit madvise [never]";
502
503         return sysfs_emit(buf, "%s\n", output);
504 }
505
506 static ssize_t anon_enabled_store(struct kobject *kobj,
507                                   struct kobj_attribute *attr,
508                                   const char *buf, size_t count)
509 {
510         int order = to_thpsize(kobj)->order;
511         ssize_t ret = count;
512
513         if (sysfs_streq(buf, "always")) {
514                 spin_lock(&huge_anon_orders_lock);
515                 clear_bit(order, &huge_anon_orders_inherit);
516                 clear_bit(order, &huge_anon_orders_madvise);
517                 set_bit(order, &huge_anon_orders_always);
518                 spin_unlock(&huge_anon_orders_lock);
519         } else if (sysfs_streq(buf, "inherit")) {
520                 spin_lock(&huge_anon_orders_lock);
521                 clear_bit(order, &huge_anon_orders_always);
522                 clear_bit(order, &huge_anon_orders_madvise);
523                 set_bit(order, &huge_anon_orders_inherit);
524                 spin_unlock(&huge_anon_orders_lock);
525         } else if (sysfs_streq(buf, "madvise")) {
526                 spin_lock(&huge_anon_orders_lock);
527                 clear_bit(order, &huge_anon_orders_always);
528                 clear_bit(order, &huge_anon_orders_inherit);
529                 set_bit(order, &huge_anon_orders_madvise);
530                 spin_unlock(&huge_anon_orders_lock);
531         } else if (sysfs_streq(buf, "never")) {
532                 spin_lock(&huge_anon_orders_lock);
533                 clear_bit(order, &huge_anon_orders_always);
534                 clear_bit(order, &huge_anon_orders_inherit);
535                 clear_bit(order, &huge_anon_orders_madvise);
536                 spin_unlock(&huge_anon_orders_lock);
537         } else
538                 ret = -EINVAL;
539
540         if (ret > 0) {
541                 int err;
542
543                 err = start_stop_khugepaged();
544                 if (err)
545                         ret = err;
546         }
547         return ret;
548 }
549
550 static struct kobj_attribute anon_enabled_attr =
551         __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
552
553 static struct attribute *anon_ctrl_attrs[] = {
554         &anon_enabled_attr.attr,
555         NULL,
556 };
557
558 static const struct attribute_group anon_ctrl_attr_grp = {
559         .attrs = anon_ctrl_attrs,
560 };
561
562 static struct attribute *file_ctrl_attrs[] = {
563 #ifdef CONFIG_SHMEM
564         &thpsize_shmem_enabled_attr.attr,
565 #endif
566         NULL,
567 };
568
569 static const struct attribute_group file_ctrl_attr_grp = {
570         .attrs = file_ctrl_attrs,
571 };
572
573 static struct attribute *any_ctrl_attrs[] = {
574         NULL,
575 };
576
577 static const struct attribute_group any_ctrl_attr_grp = {
578         .attrs = any_ctrl_attrs,
579 };
580
581 static const struct kobj_type thpsize_ktype = {
582         .release = &thpsize_release,
583         .sysfs_ops = &kobj_sysfs_ops,
584 };
585
586 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
587
588 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
589 {
590         unsigned long sum = 0;
591         int cpu;
592
593         for_each_possible_cpu(cpu) {
594                 struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
595
596                 sum += this->stats[order][item];
597         }
598
599         return sum;
600 }
601
602 #define DEFINE_MTHP_STAT_ATTR(_name, _index)                            \
603 static ssize_t _name##_show(struct kobject *kobj,                       \
604                         struct kobj_attribute *attr, char *buf)         \
605 {                                                                       \
606         int order = to_thpsize(kobj)->order;                            \
607                                                                         \
608         return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index));  \
609 }                                                                       \
610 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
611
612 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
613 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
614 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
615 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
616 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
617 #ifdef CONFIG_SHMEM
618 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
619 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
620 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
621 #endif
622 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
623 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
624 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
625 DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
626 DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
627
628 static struct attribute *anon_stats_attrs[] = {
629         &anon_fault_alloc_attr.attr,
630         &anon_fault_fallback_attr.attr,
631         &anon_fault_fallback_charge_attr.attr,
632 #ifndef CONFIG_SHMEM
633         &swpout_attr.attr,
634         &swpout_fallback_attr.attr,
635 #endif
636         &split_deferred_attr.attr,
637         &nr_anon_attr.attr,
638         &nr_anon_partially_mapped_attr.attr,
639         NULL,
640 };
641
642 static struct attribute_group anon_stats_attr_grp = {
643         .name = "stats",
644         .attrs = anon_stats_attrs,
645 };
646
647 static struct attribute *file_stats_attrs[] = {
648 #ifdef CONFIG_SHMEM
649         &shmem_alloc_attr.attr,
650         &shmem_fallback_attr.attr,
651         &shmem_fallback_charge_attr.attr,
652 #endif
653         NULL,
654 };
655
656 static struct attribute_group file_stats_attr_grp = {
657         .name = "stats",
658         .attrs = file_stats_attrs,
659 };
660
661 static struct attribute *any_stats_attrs[] = {
662 #ifdef CONFIG_SHMEM
663         &swpout_attr.attr,
664         &swpout_fallback_attr.attr,
665 #endif
666         &split_attr.attr,
667         &split_failed_attr.attr,
668         NULL,
669 };
670
671 static struct attribute_group any_stats_attr_grp = {
672         .name = "stats",
673         .attrs = any_stats_attrs,
674 };
675
676 static int sysfs_add_group(struct kobject *kobj,
677                            const struct attribute_group *grp)
678 {
679         int ret = -ENOENT;
680
681         /*
682          * If the group is named, try to merge first, assuming the subdirectory
683          * was already created. This avoids the warning emitted by
684          * sysfs_create_group() if the directory already exists.
685          */
686         if (grp->name)
687                 ret = sysfs_merge_group(kobj, grp);
688         if (ret)
689                 ret = sysfs_create_group(kobj, grp);
690
691         return ret;
692 }
693
694 static struct thpsize *thpsize_create(int order, struct kobject *parent)
695 {
696         unsigned long size = (PAGE_SIZE << order) / SZ_1K;
697         struct thpsize *thpsize;
698         int ret = -ENOMEM;
699
700         thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
701         if (!thpsize)
702                 goto err;
703
704         thpsize->order = order;
705
706         ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
707                                    "hugepages-%lukB", size);
708         if (ret) {
709                 kfree(thpsize);
710                 goto err;
711         }
712
713
714         ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
715         if (ret)
716                 goto err_put;
717
718         ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
719         if (ret)
720                 goto err_put;
721
722         if (BIT(order) & THP_ORDERS_ALL_ANON) {
723                 ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
724                 if (ret)
725                         goto err_put;
726
727                 ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
728                 if (ret)
729                         goto err_put;
730         }
731
732         if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
733                 ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
734                 if (ret)
735                         goto err_put;
736
737                 ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
738                 if (ret)
739                         goto err_put;
740         }
741
742         return thpsize;
743 err_put:
744         kobject_put(&thpsize->kobj);
745 err:
746         return ERR_PTR(ret);
747 }
748
749 static void thpsize_release(struct kobject *kobj)
750 {
751         kfree(to_thpsize(kobj));
752 }
753
754 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
755 {
756         int err;
757         struct thpsize *thpsize;
758         unsigned long orders;
759         int order;
760
761         /*
762          * Default to setting PMD-sized THP to inherit the global setting and
763          * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
764          * constant so we have to do this here.
765          */
766         if (!anon_orders_configured)
767                 huge_anon_orders_inherit = BIT(PMD_ORDER);
768
769         *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
770         if (unlikely(!*hugepage_kobj)) {
771                 pr_err("failed to create transparent hugepage kobject\n");
772                 return -ENOMEM;
773         }
774
775         err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
776         if (err) {
777                 pr_err("failed to register transparent hugepage group\n");
778                 goto delete_obj;
779         }
780
781         err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
782         if (err) {
783                 pr_err("failed to register transparent hugepage group\n");
784                 goto remove_hp_group;
785         }
786
787         orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
788         order = highest_order(orders);
789         while (orders) {
790                 thpsize = thpsize_create(order, *hugepage_kobj);
791                 if (IS_ERR(thpsize)) {
792                         pr_err("failed to create thpsize for order %d\n", order);
793                         err = PTR_ERR(thpsize);
794                         goto remove_all;
795                 }
796                 list_add(&thpsize->node, &thpsize_list);
797                 order = next_order(&orders, order);
798         }
799
800         return 0;
801
802 remove_all:
803         hugepage_exit_sysfs(*hugepage_kobj);
804         return err;
805 remove_hp_group:
806         sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
807 delete_obj:
808         kobject_put(*hugepage_kobj);
809         return err;
810 }
811
812 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
813 {
814         struct thpsize *thpsize, *tmp;
815
816         list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
817                 list_del(&thpsize->node);
818                 kobject_put(&thpsize->kobj);
819         }
820
821         sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
822         sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
823         kobject_put(hugepage_kobj);
824 }
825 #else
826 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
827 {
828         return 0;
829 }
830
831 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
832 {
833 }
834 #endif /* CONFIG_SYSFS */
835
836 static int __init thp_shrinker_init(void)
837 {
838         huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
839         if (!huge_zero_page_shrinker)
840                 return -ENOMEM;
841
842         deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
843                                                  SHRINKER_MEMCG_AWARE |
844                                                  SHRINKER_NONSLAB,
845                                                  "thp-deferred_split");
846         if (!deferred_split_shrinker) {
847                 shrinker_free(huge_zero_page_shrinker);
848                 return -ENOMEM;
849         }
850
851         huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
852         huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
853         shrinker_register(huge_zero_page_shrinker);
854
855         deferred_split_shrinker->count_objects = deferred_split_count;
856         deferred_split_shrinker->scan_objects = deferred_split_scan;
857         shrinker_register(deferred_split_shrinker);
858
859         return 0;
860 }
861
862 static void __init thp_shrinker_exit(void)
863 {
864         shrinker_free(huge_zero_page_shrinker);
865         shrinker_free(deferred_split_shrinker);
866 }
867
868 static int __init hugepage_init(void)
869 {
870         int err;
871         struct kobject *hugepage_kobj;
872
873         if (!has_transparent_hugepage()) {
874                 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
875                 return -EINVAL;
876         }
877
878         /*
879          * hugepages can't be allocated by the buddy allocator
880          */
881         MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
882
883         err = hugepage_init_sysfs(&hugepage_kobj);
884         if (err)
885                 goto err_sysfs;
886
887         err = khugepaged_init();
888         if (err)
889                 goto err_slab;
890
891         err = thp_shrinker_init();
892         if (err)
893                 goto err_shrinker;
894
895         /*
896          * By default disable transparent hugepages on smaller systems,
897          * where the extra memory used could hurt more than TLB overhead
898          * is likely to save.  The admin can still enable it through /sys.
899          */
900         if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
901                 transparent_hugepage_flags = 0;
902                 return 0;
903         }
904
905         err = start_stop_khugepaged();
906         if (err)
907                 goto err_khugepaged;
908
909         return 0;
910 err_khugepaged:
911         thp_shrinker_exit();
912 err_shrinker:
913         khugepaged_destroy();
914 err_slab:
915         hugepage_exit_sysfs(hugepage_kobj);
916 err_sysfs:
917         return err;
918 }
919 subsys_initcall(hugepage_init);
920
921 static int __init setup_transparent_hugepage(char *str)
922 {
923         int ret = 0;
924         if (!str)
925                 goto out;
926         if (!strcmp(str, "always")) {
927                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
928                         &transparent_hugepage_flags);
929                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
930                           &transparent_hugepage_flags);
931                 ret = 1;
932         } else if (!strcmp(str, "madvise")) {
933                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
934                           &transparent_hugepage_flags);
935                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
936                         &transparent_hugepage_flags);
937                 ret = 1;
938         } else if (!strcmp(str, "never")) {
939                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
940                           &transparent_hugepage_flags);
941                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
942                           &transparent_hugepage_flags);
943                 ret = 1;
944         }
945 out:
946         if (!ret)
947                 pr_warn("transparent_hugepage= cannot parse, ignored\n");
948         return ret;
949 }
950 __setup("transparent_hugepage=", setup_transparent_hugepage);
951
952 static inline int get_order_from_str(const char *size_str)
953 {
954         unsigned long size;
955         char *endptr;
956         int order;
957
958         size = memparse(size_str, &endptr);
959
960         if (!is_power_of_2(size))
961                 goto err;
962         order = get_order(size);
963         if (BIT(order) & ~THP_ORDERS_ALL_ANON)
964                 goto err;
965
966         return order;
967 err:
968         pr_err("invalid size %s in thp_anon boot parameter\n", size_str);
969         return -EINVAL;
970 }
971
972 static char str_dup[PAGE_SIZE] __initdata;
973 static int __init setup_thp_anon(char *str)
974 {
975         char *token, *range, *policy, *subtoken;
976         unsigned long always, inherit, madvise;
977         char *start_size, *end_size;
978         int start, end, nr;
979         char *p;
980
981         if (!str || strlen(str) + 1 > PAGE_SIZE)
982                 goto err;
983         strcpy(str_dup, str);
984
985         always = huge_anon_orders_always;
986         madvise = huge_anon_orders_madvise;
987         inherit = huge_anon_orders_inherit;
988         p = str_dup;
989         while ((token = strsep(&p, ";")) != NULL) {
990                 range = strsep(&token, ":");
991                 policy = token;
992
993                 if (!policy)
994                         goto err;
995
996                 while ((subtoken = strsep(&range, ",")) != NULL) {
997                         if (strchr(subtoken, '-')) {
998                                 start_size = strsep(&subtoken, "-");
999                                 end_size = subtoken;
1000
1001                                 start = get_order_from_str(start_size);
1002                                 end = get_order_from_str(end_size);
1003                         } else {
1004                                 start = end = get_order_from_str(subtoken);
1005                         }
1006
1007                         if (start < 0 || end < 0 || start > end)
1008                                 goto err;
1009
1010                         nr = end - start + 1;
1011                         if (!strcmp(policy, "always")) {
1012                                 bitmap_set(&always, start, nr);
1013                                 bitmap_clear(&inherit, start, nr);
1014                                 bitmap_clear(&madvise, start, nr);
1015                         } else if (!strcmp(policy, "madvise")) {
1016                                 bitmap_set(&madvise, start, nr);
1017                                 bitmap_clear(&inherit, start, nr);
1018                                 bitmap_clear(&always, start, nr);
1019                         } else if (!strcmp(policy, "inherit")) {
1020                                 bitmap_set(&inherit, start, nr);
1021                                 bitmap_clear(&madvise, start, nr);
1022                                 bitmap_clear(&always, start, nr);
1023                         } else if (!strcmp(policy, "never")) {
1024                                 bitmap_clear(&inherit, start, nr);
1025                                 bitmap_clear(&madvise, start, nr);
1026                                 bitmap_clear(&always, start, nr);
1027                         } else {
1028                                 pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
1029                                 goto err;
1030                         }
1031                 }
1032         }
1033
1034         huge_anon_orders_always = always;
1035         huge_anon_orders_madvise = madvise;
1036         huge_anon_orders_inherit = inherit;
1037         anon_orders_configured = true;
1038         return 1;
1039
1040 err:
1041         pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
1042         return 0;
1043 }
1044 __setup("thp_anon=", setup_thp_anon);
1045
1046 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
1047 {
1048         if (likely(vma->vm_flags & VM_WRITE))
1049                 pmd = pmd_mkwrite(pmd, vma);
1050         return pmd;
1051 }
1052
1053 #ifdef CONFIG_MEMCG
1054 static inline
1055 struct deferred_split *get_deferred_split_queue(struct folio *folio)
1056 {
1057         struct mem_cgroup *memcg = folio_memcg(folio);
1058         struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
1059
1060         if (memcg)
1061                 return &memcg->deferred_split_queue;
1062         else
1063                 return &pgdat->deferred_split_queue;
1064 }
1065 #else
1066 static inline
1067 struct deferred_split *get_deferred_split_queue(struct folio *folio)
1068 {
1069         struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
1070
1071         return &pgdat->deferred_split_queue;
1072 }
1073 #endif
1074
1075 static inline bool is_transparent_hugepage(const struct folio *folio)
1076 {
1077         if (!folio_test_large(folio))
1078                 return false;
1079
1080         return is_huge_zero_folio(folio) ||
1081                 folio_test_large_rmappable(folio);
1082 }
1083
1084 static unsigned long __thp_get_unmapped_area(struct file *filp,
1085                 unsigned long addr, unsigned long len,
1086                 loff_t off, unsigned long flags, unsigned long size,
1087                 vm_flags_t vm_flags)
1088 {
1089         loff_t off_end = off + len;
1090         loff_t off_align = round_up(off, size);
1091         unsigned long len_pad, ret, off_sub;
1092
1093         if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
1094                 return 0;
1095
1096         if (off_end <= off_align || (off_end - off_align) < size)
1097                 return 0;
1098
1099         len_pad = len + size;
1100         if (len_pad < len || (off + len_pad) < off)
1101                 return 0;
1102
1103         ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad,
1104                                            off >> PAGE_SHIFT, flags, vm_flags);
1105
1106         /*
1107          * The failure might be due to length padding. The caller will retry
1108          * without the padding.
1109          */
1110         if (IS_ERR_VALUE(ret))
1111                 return 0;
1112
1113         /*
1114          * Do not try to align to THP boundary if allocation at the address
1115          * hint succeeds.
1116          */
1117         if (ret == addr)
1118                 return addr;
1119
1120         off_sub = (off - ret) & (size - 1);
1121
1122         if (test_bit(MMF_TOPDOWN, &current->mm->flags) && !off_sub)
1123                 return ret + size;
1124
1125         ret += off_sub;
1126         return ret;
1127 }
1128
1129 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
1130                 unsigned long len, unsigned long pgoff, unsigned long flags,
1131                 vm_flags_t vm_flags)
1132 {
1133         unsigned long ret;
1134         loff_t off = (loff_t)pgoff << PAGE_SHIFT;
1135
1136         ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
1137         if (ret)
1138                 return ret;
1139
1140         return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags,
1141                                             vm_flags);
1142 }
1143
1144 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
1145                 unsigned long len, unsigned long pgoff, unsigned long flags)
1146 {
1147         return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
1148 }
1149 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
1150
1151 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
1152                         struct page *page, gfp_t gfp)
1153 {
1154         struct vm_area_struct *vma = vmf->vma;
1155         struct folio *folio = page_folio(page);
1156         pgtable_t pgtable;
1157         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1158         vm_fault_t ret = 0;
1159
1160         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1161
1162         if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
1163                 folio_put(folio);
1164                 count_vm_event(THP_FAULT_FALLBACK);
1165                 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
1166                 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
1167                 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
1168                 return VM_FAULT_FALLBACK;
1169         }
1170         folio_throttle_swaprate(folio, gfp);
1171
1172         pgtable = pte_alloc_one(vma->vm_mm);
1173         if (unlikely(!pgtable)) {
1174                 ret = VM_FAULT_OOM;
1175                 goto release;
1176         }
1177
1178         folio_zero_user(folio, vmf->address);
1179         /*
1180          * The memory barrier inside __folio_mark_uptodate makes sure that
1181          * folio_zero_user writes become visible before the set_pmd_at()
1182          * write.
1183          */
1184         __folio_mark_uptodate(folio);
1185
1186         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1187         if (unlikely(!pmd_none(*vmf->pmd))) {
1188                 goto unlock_release;
1189         } else {
1190                 pmd_t entry;
1191
1192                 ret = check_stable_address_space(vma->vm_mm);
1193                 if (ret)
1194                         goto unlock_release;
1195
1196                 /* Deliver the page fault to userland */
1197                 if (userfaultfd_missing(vma)) {
1198                         spin_unlock(vmf->ptl);
1199                         folio_put(folio);
1200                         pte_free(vma->vm_mm, pgtable);
1201                         ret = handle_userfault(vmf, VM_UFFD_MISSING);
1202                         VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1203                         return ret;
1204                 }
1205
1206                 entry = mk_huge_pmd(page, vma->vm_page_prot);
1207                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1208                 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
1209                 folio_add_lru_vma(folio, vma);
1210                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1211                 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
1212                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1213                 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1214                 mm_inc_nr_ptes(vma->vm_mm);
1215                 deferred_split_folio(folio, false);
1216                 spin_unlock(vmf->ptl);
1217                 count_vm_event(THP_FAULT_ALLOC);
1218                 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
1219                 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
1220         }
1221
1222         return 0;
1223 unlock_release:
1224         spin_unlock(vmf->ptl);
1225 release:
1226         if (pgtable)
1227                 pte_free(vma->vm_mm, pgtable);
1228         folio_put(folio);
1229         return ret;
1230
1231 }
1232
1233 /*
1234  * always: directly stall for all thp allocations
1235  * defer: wake kswapd and fail if not immediately available
1236  * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
1237  *                fail if not immediately available
1238  * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
1239  *          available
1240  * never: never stall for any thp allocation
1241  */
1242 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
1243 {
1244         const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
1245
1246         /* Always do synchronous compaction */
1247         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
1248                 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
1249
1250         /* Kick kcompactd and fail quickly */
1251         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
1252                 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
1253
1254         /* Synchronous compaction if madvised, otherwise kick kcompactd */
1255         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
1256                 return GFP_TRANSHUGE_LIGHT |
1257                         (vma_madvised ? __GFP_DIRECT_RECLAIM :
1258                                         __GFP_KSWAPD_RECLAIM);
1259
1260         /* Only do synchronous compaction if madvised */
1261         if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
1262                 return GFP_TRANSHUGE_LIGHT |
1263                        (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
1264
1265         return GFP_TRANSHUGE_LIGHT;
1266 }
1267
1268 /* Caller must hold page table lock. */
1269 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
1270                 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
1271                 struct folio *zero_folio)
1272 {
1273         pmd_t entry;
1274         if (!pmd_none(*pmd))
1275                 return;
1276         entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
1277         entry = pmd_mkhuge(entry);
1278         pgtable_trans_huge_deposit(mm, pmd, pgtable);
1279         set_pmd_at(mm, haddr, pmd, entry);
1280         mm_inc_nr_ptes(mm);
1281 }
1282
1283 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1284 {
1285         struct vm_area_struct *vma = vmf->vma;
1286         gfp_t gfp;
1287         struct folio *folio;
1288         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1289         vm_fault_t ret;
1290
1291         if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1292                 return VM_FAULT_FALLBACK;
1293         ret = vmf_anon_prepare(vmf);
1294         if (ret)
1295                 return ret;
1296         khugepaged_enter_vma(vma, vma->vm_flags);
1297
1298         if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1299                         !mm_forbids_zeropage(vma->vm_mm) &&
1300                         transparent_hugepage_use_zero_page()) {
1301                 pgtable_t pgtable;
1302                 struct folio *zero_folio;
1303                 vm_fault_t ret;
1304
1305                 pgtable = pte_alloc_one(vma->vm_mm);
1306                 if (unlikely(!pgtable))
1307                         return VM_FAULT_OOM;
1308                 zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
1309                 if (unlikely(!zero_folio)) {
1310                         pte_free(vma->vm_mm, pgtable);
1311                         count_vm_event(THP_FAULT_FALLBACK);
1312                         return VM_FAULT_FALLBACK;
1313                 }
1314                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1315                 ret = 0;
1316                 if (pmd_none(*vmf->pmd)) {
1317                         ret = check_stable_address_space(vma->vm_mm);
1318                         if (ret) {
1319                                 spin_unlock(vmf->ptl);
1320                                 pte_free(vma->vm_mm, pgtable);
1321                         } else if (userfaultfd_missing(vma)) {
1322                                 spin_unlock(vmf->ptl);
1323                                 pte_free(vma->vm_mm, pgtable);
1324                                 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1325                                 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1326                         } else {
1327                                 set_huge_zero_folio(pgtable, vma->vm_mm, vma,
1328                                                    haddr, vmf->pmd, zero_folio);
1329                                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1330                                 spin_unlock(vmf->ptl);
1331                         }
1332                 } else {
1333                         spin_unlock(vmf->ptl);
1334                         pte_free(vma->vm_mm, pgtable);
1335                 }
1336                 return ret;
1337         }
1338         gfp = vma_thp_gfp_mask(vma);
1339         folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1340         if (unlikely(!folio)) {
1341                 count_vm_event(THP_FAULT_FALLBACK);
1342                 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
1343                 return VM_FAULT_FALLBACK;
1344         }
1345         return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1346 }
1347
1348 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
1349                 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1350                 pgtable_t pgtable)
1351 {
1352         struct mm_struct *mm = vma->vm_mm;
1353         pmd_t entry;
1354         spinlock_t *ptl;
1355
1356         ptl = pmd_lock(mm, pmd);
1357         if (!pmd_none(*pmd)) {
1358                 if (write) {
1359                         if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1360                                 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1361                                 goto out_unlock;
1362                         }
1363                         entry = pmd_mkyoung(*pmd);
1364                         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1365                         if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1366                                 update_mmu_cache_pmd(vma, addr, pmd);
1367                 }
1368
1369                 goto out_unlock;
1370         }
1371
1372         entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1373         if (pfn_t_devmap(pfn))
1374                 entry = pmd_mkdevmap(entry);
1375         else
1376                 entry = pmd_mkspecial(entry);
1377         if (write) {
1378                 entry = pmd_mkyoung(pmd_mkdirty(entry));
1379                 entry = maybe_pmd_mkwrite(entry, vma);
1380         }
1381
1382         if (pgtable) {
1383                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1384                 mm_inc_nr_ptes(mm);
1385                 pgtable = NULL;
1386         }
1387
1388         set_pmd_at(mm, addr, pmd, entry);
1389         update_mmu_cache_pmd(vma, addr, pmd);
1390
1391 out_unlock:
1392         spin_unlock(ptl);
1393         if (pgtable)
1394                 pte_free(mm, pgtable);
1395 }
1396
1397 /**
1398  * vmf_insert_pfn_pmd - insert a pmd size pfn
1399  * @vmf: Structure describing the fault
1400  * @pfn: pfn to insert
1401  * @write: whether it's a write fault
1402  *
1403  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1404  *
1405  * Return: vm_fault_t value.
1406  */
1407 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
1408 {
1409         unsigned long addr = vmf->address & PMD_MASK;
1410         struct vm_area_struct *vma = vmf->vma;
1411         pgprot_t pgprot = vma->vm_page_prot;
1412         pgtable_t pgtable = NULL;
1413
1414         /*
1415          * If we had pmd_special, we could avoid all these restrictions,
1416          * but we need to be consistent with PTEs and architectures that
1417          * can't support a 'special' bit.
1418          */
1419         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1420                         !pfn_t_devmap(pfn));
1421         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1422                                                 (VM_PFNMAP|VM_MIXEDMAP));
1423         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1424
1425         if (addr < vma->vm_start || addr >= vma->vm_end)
1426                 return VM_FAULT_SIGBUS;
1427
1428         if (arch_needs_pgtable_deposit()) {
1429                 pgtable = pte_alloc_one(vma->vm_mm);
1430                 if (!pgtable)
1431                         return VM_FAULT_OOM;
1432         }
1433
1434         track_pfn_insert(vma, &pgprot, pfn);
1435
1436         insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1437         return VM_FAULT_NOPAGE;
1438 }
1439 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1440
1441 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1442 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1443 {
1444         if (likely(vma->vm_flags & VM_WRITE))
1445                 pud = pud_mkwrite(pud);
1446         return pud;
1447 }
1448
1449 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
1450                 pud_t *pud, pfn_t pfn, bool write)
1451 {
1452         struct mm_struct *mm = vma->vm_mm;
1453         pgprot_t prot = vma->vm_page_prot;
1454         pud_t entry;
1455         spinlock_t *ptl;
1456
1457         ptl = pud_lock(mm, pud);
1458         if (!pud_none(*pud)) {
1459                 if (write) {
1460                         if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn)))
1461                                 goto out_unlock;
1462                         entry = pud_mkyoung(*pud);
1463                         entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1464                         if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1465                                 update_mmu_cache_pud(vma, addr, pud);
1466                 }
1467                 goto out_unlock;
1468         }
1469
1470         entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1471         if (pfn_t_devmap(pfn))
1472                 entry = pud_mkdevmap(entry);
1473         else
1474                 entry = pud_mkspecial(entry);
1475         if (write) {
1476                 entry = pud_mkyoung(pud_mkdirty(entry));
1477                 entry = maybe_pud_mkwrite(entry, vma);
1478         }
1479         set_pud_at(mm, addr, pud, entry);
1480         update_mmu_cache_pud(vma, addr, pud);
1481
1482 out_unlock:
1483         spin_unlock(ptl);
1484 }
1485
1486 /**
1487  * vmf_insert_pfn_pud - insert a pud size pfn
1488  * @vmf: Structure describing the fault
1489  * @pfn: pfn to insert
1490  * @write: whether it's a write fault
1491  *
1492  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1493  *
1494  * Return: vm_fault_t value.
1495  */
1496 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1497 {
1498         unsigned long addr = vmf->address & PUD_MASK;
1499         struct vm_area_struct *vma = vmf->vma;
1500         pgprot_t pgprot = vma->vm_page_prot;
1501
1502         /*
1503          * If we had pud_special, we could avoid all these restrictions,
1504          * but we need to be consistent with PTEs and architectures that
1505          * can't support a 'special' bit.
1506          */
1507         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1508                         !pfn_t_devmap(pfn));
1509         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1510                                                 (VM_PFNMAP|VM_MIXEDMAP));
1511         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1512
1513         if (addr < vma->vm_start || addr >= vma->vm_end)
1514                 return VM_FAULT_SIGBUS;
1515
1516         track_pfn_insert(vma, &pgprot, pfn);
1517
1518         insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1519         return VM_FAULT_NOPAGE;
1520 }
1521 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1522 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1523
1524 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1525                pmd_t *pmd, bool write)
1526 {
1527         pmd_t _pmd;
1528
1529         _pmd = pmd_mkyoung(*pmd);
1530         if (write)
1531                 _pmd = pmd_mkdirty(_pmd);
1532         if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1533                                   pmd, _pmd, write))
1534                 update_mmu_cache_pmd(vma, addr, pmd);
1535 }
1536
1537 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1538                 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1539 {
1540         unsigned long pfn = pmd_pfn(*pmd);
1541         struct mm_struct *mm = vma->vm_mm;
1542         struct page *page;
1543         int ret;
1544
1545         assert_spin_locked(pmd_lockptr(mm, pmd));
1546
1547         if (flags & FOLL_WRITE && !pmd_write(*pmd))
1548                 return NULL;
1549
1550         if (pmd_present(*pmd) && pmd_devmap(*pmd))
1551                 /* pass */;
1552         else
1553                 return NULL;
1554
1555         if (flags & FOLL_TOUCH)
1556                 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1557
1558         /*
1559          * device mapped pages can only be returned if the
1560          * caller will manage the page reference count.
1561          */
1562         if (!(flags & (FOLL_GET | FOLL_PIN)))
1563                 return ERR_PTR(-EEXIST);
1564
1565         pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1566         *pgmap = get_dev_pagemap(pfn, *pgmap);
1567         if (!*pgmap)
1568                 return ERR_PTR(-EFAULT);
1569         page = pfn_to_page(pfn);
1570         ret = try_grab_folio(page_folio(page), 1, flags);
1571         if (ret)
1572                 page = ERR_PTR(ret);
1573
1574         return page;
1575 }
1576
1577 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1578                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1579                   struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1580 {
1581         spinlock_t *dst_ptl, *src_ptl;
1582         struct page *src_page;
1583         struct folio *src_folio;
1584         pmd_t pmd;
1585         pgtable_t pgtable = NULL;
1586         int ret = -ENOMEM;
1587
1588         pmd = pmdp_get_lockless(src_pmd);
1589         if (unlikely(pmd_special(pmd))) {
1590                 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1591                 src_ptl = pmd_lockptr(src_mm, src_pmd);
1592                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1593                 /*
1594                  * No need to recheck the pmd, it can't change with write
1595                  * mmap lock held here.
1596                  *
1597                  * Meanwhile, making sure it's not a CoW VMA with writable
1598                  * mapping, otherwise it means either the anon page wrongly
1599                  * applied special bit, or we made the PRIVATE mapping be
1600                  * able to wrongly write to the backend MMIO.
1601                  */
1602                 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
1603                 goto set_pmd;
1604         }
1605
1606         /* Skip if can be re-fill on fault */
1607         if (!vma_is_anonymous(dst_vma))
1608                 return 0;
1609
1610         pgtable = pte_alloc_one(dst_mm);
1611         if (unlikely(!pgtable))
1612                 goto out;
1613
1614         dst_ptl = pmd_lock(dst_mm, dst_pmd);
1615         src_ptl = pmd_lockptr(src_mm, src_pmd);
1616         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1617
1618         ret = -EAGAIN;
1619         pmd = *src_pmd;
1620
1621 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1622         if (unlikely(is_swap_pmd(pmd))) {
1623                 swp_entry_t entry = pmd_to_swp_entry(pmd);
1624
1625                 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1626                 if (!is_readable_migration_entry(entry)) {
1627                         entry = make_readable_migration_entry(
1628                                                         swp_offset(entry));
1629                         pmd = swp_entry_to_pmd(entry);
1630                         if (pmd_swp_soft_dirty(*src_pmd))
1631                                 pmd = pmd_swp_mksoft_dirty(pmd);
1632                         if (pmd_swp_uffd_wp(*src_pmd))
1633                                 pmd = pmd_swp_mkuffd_wp(pmd);
1634                         set_pmd_at(src_mm, addr, src_pmd, pmd);
1635                 }
1636                 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1637                 mm_inc_nr_ptes(dst_mm);
1638                 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1639                 if (!userfaultfd_wp(dst_vma))
1640                         pmd = pmd_swp_clear_uffd_wp(pmd);
1641                 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1642                 ret = 0;
1643                 goto out_unlock;
1644         }
1645 #endif
1646
1647         if (unlikely(!pmd_trans_huge(pmd))) {
1648                 pte_free(dst_mm, pgtable);
1649                 goto out_unlock;
1650         }
1651         /*
1652          * When page table lock is held, the huge zero pmd should not be
1653          * under splitting since we don't split the page itself, only pmd to
1654          * a page table.
1655          */
1656         if (is_huge_zero_pmd(pmd)) {
1657                 /*
1658                  * mm_get_huge_zero_folio() will never allocate a new
1659                  * folio here, since we already have a zero page to
1660                  * copy. It just takes a reference.
1661                  */
1662                 mm_get_huge_zero_folio(dst_mm);
1663                 goto out_zero_page;
1664         }
1665
1666         src_page = pmd_page(pmd);
1667         VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1668         src_folio = page_folio(src_page);
1669
1670         folio_get(src_folio);
1671         if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
1672                 /* Page maybe pinned: split and retry the fault on PTEs. */
1673                 folio_put(src_folio);
1674                 pte_free(dst_mm, pgtable);
1675                 spin_unlock(src_ptl);
1676                 spin_unlock(dst_ptl);
1677                 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1678                 return -EAGAIN;
1679         }
1680         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1681 out_zero_page:
1682         mm_inc_nr_ptes(dst_mm);
1683         pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1684         pmdp_set_wrprotect(src_mm, addr, src_pmd);
1685         if (!userfaultfd_wp(dst_vma))
1686                 pmd = pmd_clear_uffd_wp(pmd);
1687         pmd = pmd_wrprotect(pmd);
1688 set_pmd:
1689         pmd = pmd_mkold(pmd);
1690         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1691
1692         ret = 0;
1693 out_unlock:
1694         spin_unlock(src_ptl);
1695         spin_unlock(dst_ptl);
1696 out:
1697         return ret;
1698 }
1699
1700 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1701 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1702                pud_t *pud, bool write)
1703 {
1704         pud_t _pud;
1705
1706         _pud = pud_mkyoung(*pud);
1707         if (write)
1708                 _pud = pud_mkdirty(_pud);
1709         if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1710                                   pud, _pud, write))
1711                 update_mmu_cache_pud(vma, addr, pud);
1712 }
1713
1714 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1715                   pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1716                   struct vm_area_struct *vma)
1717 {
1718         spinlock_t *dst_ptl, *src_ptl;
1719         pud_t pud;
1720         int ret;
1721
1722         dst_ptl = pud_lock(dst_mm, dst_pud);
1723         src_ptl = pud_lockptr(src_mm, src_pud);
1724         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1725
1726         ret = -EAGAIN;
1727         pud = *src_pud;
1728         if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1729                 goto out_unlock;
1730
1731         /*
1732          * TODO: once we support anonymous pages, use
1733          * folio_try_dup_anon_rmap_*() and split if duplicating fails.
1734          */
1735         if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
1736                 pudp_set_wrprotect(src_mm, addr, src_pud);
1737                 pud = pud_wrprotect(pud);
1738         }
1739         pud = pud_mkold(pud);
1740         set_pud_at(dst_mm, addr, dst_pud, pud);
1741
1742         ret = 0;
1743 out_unlock:
1744         spin_unlock(src_ptl);
1745         spin_unlock(dst_ptl);
1746         return ret;
1747 }
1748
1749 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1750 {
1751         bool write = vmf->flags & FAULT_FLAG_WRITE;
1752
1753         vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1754         if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1755                 goto unlock;
1756
1757         touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1758 unlock:
1759         spin_unlock(vmf->ptl);
1760 }
1761 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1762
1763 void huge_pmd_set_accessed(struct vm_fault *vmf)
1764 {
1765         bool write = vmf->flags & FAULT_FLAG_WRITE;
1766
1767         vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1768         if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1769                 goto unlock;
1770
1771         touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1772
1773 unlock:
1774         spin_unlock(vmf->ptl);
1775 }
1776
1777 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1778 {
1779         const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1780         struct vm_area_struct *vma = vmf->vma;
1781         struct folio *folio;
1782         struct page *page;
1783         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1784         pmd_t orig_pmd = vmf->orig_pmd;
1785
1786         vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1787         VM_BUG_ON_VMA(!vma->anon_vma, vma);
1788
1789         if (is_huge_zero_pmd(orig_pmd))
1790                 goto fallback;
1791
1792         spin_lock(vmf->ptl);
1793
1794         if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1795                 spin_unlock(vmf->ptl);
1796                 return 0;
1797         }
1798
1799         page = pmd_page(orig_pmd);
1800         folio = page_folio(page);
1801         VM_BUG_ON_PAGE(!PageHead(page), page);
1802
1803         /* Early check when only holding the PT lock. */
1804         if (PageAnonExclusive(page))
1805                 goto reuse;
1806
1807         if (!folio_trylock(folio)) {
1808                 folio_get(folio);
1809                 spin_unlock(vmf->ptl);
1810                 folio_lock(folio);
1811                 spin_lock(vmf->ptl);
1812                 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1813                         spin_unlock(vmf->ptl);
1814                         folio_unlock(folio);
1815                         folio_put(folio);
1816                         return 0;
1817                 }
1818                 folio_put(folio);
1819         }
1820
1821         /* Recheck after temporarily dropping the PT lock. */
1822         if (PageAnonExclusive(page)) {
1823                 folio_unlock(folio);
1824                 goto reuse;
1825         }
1826
1827         /*
1828          * See do_wp_page(): we can only reuse the folio exclusively if
1829          * there are no additional references. Note that we always drain
1830          * the LRU cache immediately after adding a THP.
1831          */
1832         if (folio_ref_count(folio) >
1833                         1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1834                 goto unlock_fallback;
1835         if (folio_test_swapcache(folio))
1836                 folio_free_swap(folio);
1837         if (folio_ref_count(folio) == 1) {
1838                 pmd_t entry;
1839
1840                 folio_move_anon_rmap(folio, vma);
1841                 SetPageAnonExclusive(page);
1842                 folio_unlock(folio);
1843 reuse:
1844                 if (unlikely(unshare)) {
1845                         spin_unlock(vmf->ptl);
1846                         return 0;
1847                 }
1848                 entry = pmd_mkyoung(orig_pmd);
1849                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1850                 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1851                         update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1852                 spin_unlock(vmf->ptl);
1853                 return 0;
1854         }
1855
1856 unlock_fallback:
1857         folio_unlock(folio);
1858         spin_unlock(vmf->ptl);
1859 fallback:
1860         __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1861         return VM_FAULT_FALLBACK;
1862 }
1863
1864 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1865                                            unsigned long addr, pmd_t pmd)
1866 {
1867         struct page *page;
1868
1869         if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1870                 return false;
1871
1872         /* Don't touch entries that are not even readable (NUMA hinting). */
1873         if (pmd_protnone(pmd))
1874                 return false;
1875
1876         /* Do we need write faults for softdirty tracking? */
1877         if (pmd_needs_soft_dirty_wp(vma, pmd))
1878                 return false;
1879
1880         /* Do we need write faults for uffd-wp tracking? */
1881         if (userfaultfd_huge_pmd_wp(vma, pmd))
1882                 return false;
1883
1884         if (!(vma->vm_flags & VM_SHARED)) {
1885                 /* See can_change_pte_writable(). */
1886                 page = vm_normal_page_pmd(vma, addr, pmd);
1887                 return page && PageAnon(page) && PageAnonExclusive(page);
1888         }
1889
1890         /* See can_change_pte_writable(). */
1891         return pmd_dirty(pmd);
1892 }
1893
1894 /* NUMA hinting page fault entry point for trans huge pmds */
1895 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1896 {
1897         struct vm_area_struct *vma = vmf->vma;
1898         struct folio *folio;
1899         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1900         int nid = NUMA_NO_NODE;
1901         int target_nid, last_cpupid;
1902         pmd_t pmd, old_pmd;
1903         bool writable = false;
1904         int flags = 0;
1905
1906         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1907         old_pmd = pmdp_get(vmf->pmd);
1908
1909         if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
1910                 spin_unlock(vmf->ptl);
1911                 return 0;
1912         }
1913
1914         pmd = pmd_modify(old_pmd, vma->vm_page_prot);
1915
1916         /*
1917          * Detect now whether the PMD could be writable; this information
1918          * is only valid while holding the PT lock.
1919          */
1920         writable = pmd_write(pmd);
1921         if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1922             can_change_pmd_writable(vma, vmf->address, pmd))
1923                 writable = true;
1924
1925         folio = vm_normal_folio_pmd(vma, haddr, pmd);
1926         if (!folio)
1927                 goto out_map;
1928
1929         nid = folio_nid(folio);
1930
1931         target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
1932                                         &last_cpupid);
1933         if (target_nid == NUMA_NO_NODE)
1934                 goto out_map;
1935         if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
1936                 flags |= TNF_MIGRATE_FAIL;
1937                 goto out_map;
1938         }
1939         /* The folio is isolated and isolation code holds a folio reference. */
1940         spin_unlock(vmf->ptl);
1941         writable = false;
1942
1943         if (!migrate_misplaced_folio(folio, vma, target_nid)) {
1944                 flags |= TNF_MIGRATED;
1945                 nid = target_nid;
1946                 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1947                 return 0;
1948         }
1949
1950         flags |= TNF_MIGRATE_FAIL;
1951         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1952         if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
1953                 spin_unlock(vmf->ptl);
1954                 return 0;
1955         }
1956 out_map:
1957         /* Restore the PMD */
1958         pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
1959         pmd = pmd_mkyoung(pmd);
1960         if (writable)
1961                 pmd = pmd_mkwrite(pmd, vma);
1962         set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1963         update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1964         spin_unlock(vmf->ptl);
1965
1966         if (nid != NUMA_NO_NODE)
1967                 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1968         return 0;
1969 }
1970
1971 /*
1972  * Return true if we do MADV_FREE successfully on entire pmd page.
1973  * Otherwise, return false.
1974  */
1975 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1976                 pmd_t *pmd, unsigned long addr, unsigned long next)
1977 {
1978         spinlock_t *ptl;
1979         pmd_t orig_pmd;
1980         struct folio *folio;
1981         struct mm_struct *mm = tlb->mm;
1982         bool ret = false;
1983
1984         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1985
1986         ptl = pmd_trans_huge_lock(pmd, vma);
1987         if (!ptl)
1988                 goto out_unlocked;
1989
1990         orig_pmd = *pmd;
1991         if (is_huge_zero_pmd(orig_pmd))
1992                 goto out;
1993
1994         if (unlikely(!pmd_present(orig_pmd))) {
1995                 VM_BUG_ON(thp_migration_supported() &&
1996                                   !is_pmd_migration_entry(orig_pmd));
1997                 goto out;
1998         }
1999
2000         folio = pmd_folio(orig_pmd);
2001         /*
2002          * If other processes are mapping this folio, we couldn't discard
2003          * the folio unless they all do MADV_FREE so let's skip the folio.
2004          */
2005         if (folio_likely_mapped_shared(folio))
2006                 goto out;
2007
2008         if (!folio_trylock(folio))
2009                 goto out;
2010
2011         /*
2012          * If user want to discard part-pages of THP, split it so MADV_FREE
2013          * will deactivate only them.
2014          */
2015         if (next - addr != HPAGE_PMD_SIZE) {
2016                 folio_get(folio);
2017                 spin_unlock(ptl);
2018                 split_folio(folio);
2019                 folio_unlock(folio);
2020                 folio_put(folio);
2021                 goto out_unlocked;
2022         }
2023
2024         if (folio_test_dirty(folio))
2025                 folio_clear_dirty(folio);
2026         folio_unlock(folio);
2027
2028         if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
2029                 pmdp_invalidate(vma, addr, pmd);
2030                 orig_pmd = pmd_mkold(orig_pmd);
2031                 orig_pmd = pmd_mkclean(orig_pmd);
2032
2033                 set_pmd_at(mm, addr, pmd, orig_pmd);
2034                 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2035         }
2036
2037         folio_mark_lazyfree(folio);
2038         ret = true;
2039 out:
2040         spin_unlock(ptl);
2041 out_unlocked:
2042         return ret;
2043 }
2044
2045 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
2046 {
2047         pgtable_t pgtable;
2048
2049         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2050         pte_free(mm, pgtable);
2051         mm_dec_nr_ptes(mm);
2052 }
2053
2054 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2055                  pmd_t *pmd, unsigned long addr)
2056 {
2057         pmd_t orig_pmd;
2058         spinlock_t *ptl;
2059
2060         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2061
2062         ptl = __pmd_trans_huge_lock(pmd, vma);
2063         if (!ptl)
2064                 return 0;
2065         /*
2066          * For architectures like ppc64 we look at deposited pgtable
2067          * when calling pmdp_huge_get_and_clear. So do the
2068          * pgtable_trans_huge_withdraw after finishing pmdp related
2069          * operations.
2070          */
2071         orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
2072                                                 tlb->fullmm);
2073         arch_check_zapped_pmd(vma, orig_pmd);
2074         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2075         if (vma_is_special_huge(vma)) {
2076                 if (arch_needs_pgtable_deposit())
2077                         zap_deposited_table(tlb->mm, pmd);
2078                 spin_unlock(ptl);
2079         } else if (is_huge_zero_pmd(orig_pmd)) {
2080                 zap_deposited_table(tlb->mm, pmd);
2081                 spin_unlock(ptl);
2082         } else {
2083                 struct folio *folio = NULL;
2084                 int flush_needed = 1;
2085
2086                 if (pmd_present(orig_pmd)) {
2087                         struct page *page = pmd_page(orig_pmd);
2088
2089                         folio = page_folio(page);
2090                         folio_remove_rmap_pmd(folio, page, vma);
2091                         WARN_ON_ONCE(folio_mapcount(folio) < 0);
2092                         VM_BUG_ON_PAGE(!PageHead(page), page);
2093                 } else if (thp_migration_supported()) {
2094                         swp_entry_t entry;
2095
2096                         VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
2097                         entry = pmd_to_swp_entry(orig_pmd);
2098                         folio = pfn_swap_entry_folio(entry);
2099                         flush_needed = 0;
2100                 } else
2101                         WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
2102
2103                 if (folio_test_anon(folio)) {
2104                         zap_deposited_table(tlb->mm, pmd);
2105                         add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
2106                 } else {
2107                         if (arch_needs_pgtable_deposit())
2108                                 zap_deposited_table(tlb->mm, pmd);
2109                         add_mm_counter(tlb->mm, mm_counter_file(folio),
2110                                        -HPAGE_PMD_NR);
2111                 }
2112
2113                 spin_unlock(ptl);
2114                 if (flush_needed)
2115                         tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
2116         }
2117         return 1;
2118 }
2119
2120 #ifndef pmd_move_must_withdraw
2121 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
2122                                          spinlock_t *old_pmd_ptl,
2123                                          struct vm_area_struct *vma)
2124 {
2125         /*
2126          * With split pmd lock we also need to move preallocated
2127          * PTE page table if new_pmd is on different PMD page table.
2128          *
2129          * We also don't deposit and withdraw tables for file pages.
2130          */
2131         return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
2132 }
2133 #endif
2134
2135 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
2136 {
2137 #ifdef CONFIG_MEM_SOFT_DIRTY
2138         if (unlikely(is_pmd_migration_entry(pmd)))
2139                 pmd = pmd_swp_mksoft_dirty(pmd);
2140         else if (pmd_present(pmd))
2141                 pmd = pmd_mksoft_dirty(pmd);
2142 #endif
2143         return pmd;
2144 }
2145
2146 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
2147                   unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
2148 {
2149         spinlock_t *old_ptl, *new_ptl;
2150         pmd_t pmd;
2151         struct mm_struct *mm = vma->vm_mm;
2152         bool force_flush = false;
2153
2154         /*
2155          * The destination pmd shouldn't be established, free_pgtables()
2156          * should have released it; but move_page_tables() might have already
2157          * inserted a page table, if racing against shmem/file collapse.
2158          */
2159         if (!pmd_none(*new_pmd)) {
2160                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
2161                 return false;
2162         }
2163
2164         /*
2165          * We don't have to worry about the ordering of src and dst
2166          * ptlocks because exclusive mmap_lock prevents deadlock.
2167          */
2168         old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
2169         if (old_ptl) {
2170                 new_ptl = pmd_lockptr(mm, new_pmd);
2171                 if (new_ptl != old_ptl)
2172                         spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2173                 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
2174                 if (pmd_present(pmd))
2175                         force_flush = true;
2176                 VM_BUG_ON(!pmd_none(*new_pmd));
2177
2178                 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
2179                         pgtable_t pgtable;
2180                         pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2181                         pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
2182                 }
2183                 pmd = move_soft_dirty_pmd(pmd);
2184                 set_pmd_at(mm, new_addr, new_pmd, pmd);
2185                 if (force_flush)
2186                         flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
2187                 if (new_ptl != old_ptl)
2188                         spin_unlock(new_ptl);
2189                 spin_unlock(old_ptl);
2190                 return true;
2191         }
2192         return false;
2193 }
2194
2195 /*
2196  * Returns
2197  *  - 0 if PMD could not be locked
2198  *  - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2199  *      or if prot_numa but THP migration is not supported
2200  *  - HPAGE_PMD_NR if protections changed and TLB flush necessary
2201  */
2202 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2203                     pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2204                     unsigned long cp_flags)
2205 {
2206         struct mm_struct *mm = vma->vm_mm;
2207         spinlock_t *ptl;
2208         pmd_t oldpmd, entry;
2209         bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2210         bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2211         bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2212         int ret = 1;
2213
2214         tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2215
2216         if (prot_numa && !thp_migration_supported())
2217                 return 1;
2218
2219         ptl = __pmd_trans_huge_lock(pmd, vma);
2220         if (!ptl)
2221                 return 0;
2222
2223 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2224         if (is_swap_pmd(*pmd)) {
2225                 swp_entry_t entry = pmd_to_swp_entry(*pmd);
2226                 struct folio *folio = pfn_swap_entry_folio(entry);
2227                 pmd_t newpmd;
2228
2229                 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
2230                 if (is_writable_migration_entry(entry)) {
2231                         /*
2232                          * A protection check is difficult so
2233                          * just be safe and disable write
2234                          */
2235                         if (folio_test_anon(folio))
2236                                 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2237                         else
2238                                 entry = make_readable_migration_entry(swp_offset(entry));
2239                         newpmd = swp_entry_to_pmd(entry);
2240                         if (pmd_swp_soft_dirty(*pmd))
2241                                 newpmd = pmd_swp_mksoft_dirty(newpmd);
2242                 } else {
2243                         newpmd = *pmd;
2244                 }
2245
2246                 if (uffd_wp)
2247                         newpmd = pmd_swp_mkuffd_wp(newpmd);
2248                 else if (uffd_wp_resolve)
2249                         newpmd = pmd_swp_clear_uffd_wp(newpmd);
2250                 if (!pmd_same(*pmd, newpmd))
2251                         set_pmd_at(mm, addr, pmd, newpmd);
2252                 goto unlock;
2253         }
2254 #endif
2255
2256         if (prot_numa) {
2257                 struct folio *folio;
2258                 bool toptier;
2259                 /*
2260                  * Avoid trapping faults against the zero page. The read-only
2261                  * data is likely to be read-cached on the local CPU and
2262                  * local/remote hits to the zero page are not interesting.
2263                  */
2264                 if (is_huge_zero_pmd(*pmd))
2265                         goto unlock;
2266
2267                 if (pmd_protnone(*pmd))
2268                         goto unlock;
2269
2270                 folio = pmd_folio(*pmd);
2271                 toptier = node_is_toptier(folio_nid(folio));
2272                 /*
2273                  * Skip scanning top tier node if normal numa
2274                  * balancing is disabled
2275                  */
2276                 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
2277                     toptier)
2278                         goto unlock;
2279
2280                 if (folio_use_access_time(folio))
2281                         folio_xchg_access_time(folio,
2282                                                jiffies_to_msecs(jiffies));
2283         }
2284         /*
2285          * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2286          * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2287          * which is also under mmap_read_lock(mm):
2288          *
2289          *      CPU0:                           CPU1:
2290          *                              change_huge_pmd(prot_numa=1)
2291          *                               pmdp_huge_get_and_clear_notify()
2292          * madvise_dontneed()
2293          *  zap_pmd_range()
2294          *   pmd_trans_huge(*pmd) == 0 (without ptl)
2295          *   // skip the pmd
2296          *                               set_pmd_at();
2297          *                               // pmd is re-established
2298          *
2299          * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2300          * which may break userspace.
2301          *
2302          * pmdp_invalidate_ad() is required to make sure we don't miss
2303          * dirty/young flags set by hardware.
2304          */
2305         oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2306
2307         entry = pmd_modify(oldpmd, newprot);
2308         if (uffd_wp)
2309                 entry = pmd_mkuffd_wp(entry);
2310         else if (uffd_wp_resolve)
2311                 /*
2312                  * Leave the write bit to be handled by PF interrupt
2313                  * handler, then things like COW could be properly
2314                  * handled.
2315                  */
2316                 entry = pmd_clear_uffd_wp(entry);
2317
2318         /* See change_pte_range(). */
2319         if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2320             can_change_pmd_writable(vma, addr, entry))
2321                 entry = pmd_mkwrite(entry, vma);
2322
2323         ret = HPAGE_PMD_NR;
2324         set_pmd_at(mm, addr, pmd, entry);
2325
2326         if (huge_pmd_needs_flush(oldpmd, entry))
2327                 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2328 unlock:
2329         spin_unlock(ptl);
2330         return ret;
2331 }
2332
2333 /*
2334  * Returns:
2335  *
2336  * - 0: if pud leaf changed from under us
2337  * - 1: if pud can be skipped
2338  * - HPAGE_PUD_NR: if pud was successfully processed
2339  */
2340 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2341 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2342                     pud_t *pudp, unsigned long addr, pgprot_t newprot,
2343                     unsigned long cp_flags)
2344 {
2345         struct mm_struct *mm = vma->vm_mm;
2346         pud_t oldpud, entry;
2347         spinlock_t *ptl;
2348
2349         tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
2350
2351         /* NUMA balancing doesn't apply to dax */
2352         if (cp_flags & MM_CP_PROT_NUMA)
2353                 return 1;
2354
2355         /*
2356          * Huge entries on userfault-wp only works with anonymous, while we
2357          * don't have anonymous PUDs yet.
2358          */
2359         if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
2360                 return 1;
2361
2362         ptl = __pud_trans_huge_lock(pudp, vma);
2363         if (!ptl)
2364                 return 0;
2365
2366         /*
2367          * Can't clear PUD or it can race with concurrent zapping.  See
2368          * change_huge_pmd().
2369          */
2370         oldpud = pudp_invalidate(vma, addr, pudp);
2371         entry = pud_modify(oldpud, newprot);
2372         set_pud_at(mm, addr, pudp, entry);
2373         tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
2374
2375         spin_unlock(ptl);
2376         return HPAGE_PUD_NR;
2377 }
2378 #endif
2379
2380 #ifdef CONFIG_USERFAULTFD
2381 /*
2382  * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2383  * the caller, but it must return after releasing the page_table_lock.
2384  * Just move the page from src_pmd to dst_pmd if possible.
2385  * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2386  * repeated by the caller, or other errors in case of failure.
2387  */
2388 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2389                         struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2390                         unsigned long dst_addr, unsigned long src_addr)
2391 {
2392         pmd_t _dst_pmd, src_pmdval;
2393         struct page *src_page;
2394         struct folio *src_folio;
2395         struct anon_vma *src_anon_vma;
2396         spinlock_t *src_ptl, *dst_ptl;
2397         pgtable_t src_pgtable;
2398         struct mmu_notifier_range range;
2399         int err = 0;
2400
2401         src_pmdval = *src_pmd;
2402         src_ptl = pmd_lockptr(mm, src_pmd);
2403
2404         lockdep_assert_held(src_ptl);
2405         vma_assert_locked(src_vma);
2406         vma_assert_locked(dst_vma);
2407
2408         /* Sanity checks before the operation */
2409         if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2410             WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2411                 spin_unlock(src_ptl);
2412                 return -EINVAL;
2413         }
2414
2415         if (!pmd_trans_huge(src_pmdval)) {
2416                 spin_unlock(src_ptl);
2417                 if (is_pmd_migration_entry(src_pmdval)) {
2418                         pmd_migration_entry_wait(mm, &src_pmdval);
2419                         return -EAGAIN;
2420                 }
2421                 return -ENOENT;
2422         }
2423
2424         src_page = pmd_page(src_pmdval);
2425
2426         if (!is_huge_zero_pmd(src_pmdval)) {
2427                 if (unlikely(!PageAnonExclusive(src_page))) {
2428                         spin_unlock(src_ptl);
2429                         return -EBUSY;
2430                 }
2431
2432                 src_folio = page_folio(src_page);
2433                 folio_get(src_folio);
2434         } else
2435                 src_folio = NULL;
2436
2437         spin_unlock(src_ptl);
2438
2439         flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2440         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2441                                 src_addr + HPAGE_PMD_SIZE);
2442         mmu_notifier_invalidate_range_start(&range);
2443
2444         if (src_folio) {
2445                 folio_lock(src_folio);
2446
2447                 /*
2448                  * split_huge_page walks the anon_vma chain without the page
2449                  * lock. Serialize against it with the anon_vma lock, the page
2450                  * lock is not enough.
2451                  */
2452                 src_anon_vma = folio_get_anon_vma(src_folio);
2453                 if (!src_anon_vma) {
2454                         err = -EAGAIN;
2455                         goto unlock_folio;
2456                 }
2457                 anon_vma_lock_write(src_anon_vma);
2458         } else
2459                 src_anon_vma = NULL;
2460
2461         dst_ptl = pmd_lockptr(mm, dst_pmd);
2462         double_pt_lock(src_ptl, dst_ptl);
2463         if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2464                      !pmd_same(*dst_pmd, dst_pmdval))) {
2465                 err = -EAGAIN;
2466                 goto unlock_ptls;
2467         }
2468         if (src_folio) {
2469                 if (folio_maybe_dma_pinned(src_folio) ||
2470                     !PageAnonExclusive(&src_folio->page)) {
2471                         err = -EBUSY;
2472                         goto unlock_ptls;
2473                 }
2474
2475                 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2476                     WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2477                         err = -EBUSY;
2478                         goto unlock_ptls;
2479                 }
2480
2481                 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2482                 /* Folio got pinned from under us. Put it back and fail the move. */
2483                 if (folio_maybe_dma_pinned(src_folio)) {
2484                         set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2485                         err = -EBUSY;
2486                         goto unlock_ptls;
2487                 }
2488
2489                 folio_move_anon_rmap(src_folio, dst_vma);
2490                 src_folio->index = linear_page_index(dst_vma, dst_addr);
2491
2492                 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2493                 /* Follow mremap() behavior and treat the entry dirty after the move */
2494                 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2495         } else {
2496                 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2497                 _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
2498         }
2499         set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2500
2501         src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2502         pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2503 unlock_ptls:
2504         double_pt_unlock(src_ptl, dst_ptl);
2505         if (src_anon_vma) {
2506                 anon_vma_unlock_write(src_anon_vma);
2507                 put_anon_vma(src_anon_vma);
2508         }
2509 unlock_folio:
2510         /* unblock rmap walks */
2511         if (src_folio)
2512                 folio_unlock(src_folio);
2513         mmu_notifier_invalidate_range_end(&range);
2514         if (src_folio)
2515                 folio_put(src_folio);
2516         return err;
2517 }
2518 #endif /* CONFIG_USERFAULTFD */
2519
2520 /*
2521  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2522  *
2523  * Note that if it returns page table lock pointer, this routine returns without
2524  * unlocking page table lock. So callers must unlock it.
2525  */
2526 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2527 {
2528         spinlock_t *ptl;
2529         ptl = pmd_lock(vma->vm_mm, pmd);
2530         if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2531                         pmd_devmap(*pmd)))
2532                 return ptl;
2533         spin_unlock(ptl);
2534         return NULL;
2535 }
2536
2537 /*
2538  * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2539  *
2540  * Note that if it returns page table lock pointer, this routine returns without
2541  * unlocking page table lock. So callers must unlock it.
2542  */
2543 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2544 {
2545         spinlock_t *ptl;
2546
2547         ptl = pud_lock(vma->vm_mm, pud);
2548         if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2549                 return ptl;
2550         spin_unlock(ptl);
2551         return NULL;
2552 }
2553
2554 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2555 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2556                  pud_t *pud, unsigned long addr)
2557 {
2558         spinlock_t *ptl;
2559         pud_t orig_pud;
2560
2561         ptl = __pud_trans_huge_lock(pud, vma);
2562         if (!ptl)
2563                 return 0;
2564
2565         orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2566         arch_check_zapped_pud(vma, orig_pud);
2567         tlb_remove_pud_tlb_entry(tlb, pud, addr);
2568         if (vma_is_special_huge(vma)) {
2569                 spin_unlock(ptl);
2570                 /* No zero page support yet */
2571         } else {
2572                 /* No support for anonymous PUD pages yet */
2573                 BUG();
2574         }
2575         return 1;
2576 }
2577
2578 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2579                 unsigned long haddr)
2580 {
2581         VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2582         VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2583         VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2584         VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2585
2586         count_vm_event(THP_SPLIT_PUD);
2587
2588         pudp_huge_clear_flush(vma, haddr, pud);
2589 }
2590
2591 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2592                 unsigned long address)
2593 {
2594         spinlock_t *ptl;
2595         struct mmu_notifier_range range;
2596
2597         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2598                                 address & HPAGE_PUD_MASK,
2599                                 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2600         mmu_notifier_invalidate_range_start(&range);
2601         ptl = pud_lock(vma->vm_mm, pud);
2602         if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2603                 goto out;
2604         __split_huge_pud_locked(vma, pud, range.start);
2605
2606 out:
2607         spin_unlock(ptl);
2608         mmu_notifier_invalidate_range_end(&range);
2609 }
2610 #else
2611 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2612                 unsigned long address)
2613 {
2614 }
2615 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2616
2617 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2618                 unsigned long haddr, pmd_t *pmd)
2619 {
2620         struct mm_struct *mm = vma->vm_mm;
2621         pgtable_t pgtable;
2622         pmd_t _pmd, old_pmd;
2623         unsigned long addr;
2624         pte_t *pte;
2625         int i;
2626
2627         /*
2628          * Leave pmd empty until pte is filled note that it is fine to delay
2629          * notification until mmu_notifier_invalidate_range_end() as we are
2630          * replacing a zero pmd write protected page with a zero pte write
2631          * protected page.
2632          *
2633          * See Documentation/mm/mmu_notifier.rst
2634          */
2635         old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2636
2637         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2638         pmd_populate(mm, &_pmd, pgtable);
2639
2640         pte = pte_offset_map(&_pmd, haddr);
2641         VM_BUG_ON(!pte);
2642         for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2643                 pte_t entry;
2644
2645                 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2646                 entry = pte_mkspecial(entry);
2647                 if (pmd_uffd_wp(old_pmd))
2648                         entry = pte_mkuffd_wp(entry);
2649                 VM_BUG_ON(!pte_none(ptep_get(pte)));
2650                 set_pte_at(mm, addr, pte, entry);
2651                 pte++;
2652         }
2653         pte_unmap(pte - 1);
2654         smp_wmb(); /* make pte visible before pmd */
2655         pmd_populate(mm, pmd, pgtable);
2656 }
2657
2658 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2659                 unsigned long haddr, bool freeze)
2660 {
2661         struct mm_struct *mm = vma->vm_mm;
2662         struct folio *folio;
2663         struct page *page;
2664         pgtable_t pgtable;
2665         pmd_t old_pmd, _pmd;
2666         bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2667         bool anon_exclusive = false, dirty = false;
2668         unsigned long addr;
2669         pte_t *pte;
2670         int i;
2671
2672         VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2673         VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2674         VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2675         VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2676                                 && !pmd_devmap(*pmd));
2677
2678         count_vm_event(THP_SPLIT_PMD);
2679
2680         if (!vma_is_anonymous(vma)) {
2681                 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2682                 /*
2683                  * We are going to unmap this huge page. So
2684                  * just go ahead and zap it
2685                  */
2686                 if (arch_needs_pgtable_deposit())
2687                         zap_deposited_table(mm, pmd);
2688                 if (vma_is_special_huge(vma))
2689                         return;
2690                 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2691                         swp_entry_t entry;
2692
2693                         entry = pmd_to_swp_entry(old_pmd);
2694                         folio = pfn_swap_entry_folio(entry);
2695                 } else {
2696                         page = pmd_page(old_pmd);
2697                         folio = page_folio(page);
2698                         if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
2699                                 folio_mark_dirty(folio);
2700                         if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2701                                 folio_set_referenced(folio);
2702                         folio_remove_rmap_pmd(folio, page, vma);
2703                         folio_put(folio);
2704                 }
2705                 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
2706                 return;
2707         }
2708
2709         if (is_huge_zero_pmd(*pmd)) {
2710                 /*
2711                  * FIXME: Do we want to invalidate secondary mmu by calling
2712                  * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2713                  * inside __split_huge_pmd() ?
2714                  *
2715                  * We are going from a zero huge page write protected to zero
2716                  * small page also write protected so it does not seems useful
2717                  * to invalidate secondary mmu at this time.
2718                  */
2719                 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2720         }
2721
2722         pmd_migration = is_pmd_migration_entry(*pmd);
2723         if (unlikely(pmd_migration)) {
2724                 swp_entry_t entry;
2725
2726                 old_pmd = *pmd;
2727                 entry = pmd_to_swp_entry(old_pmd);
2728                 page = pfn_swap_entry_to_page(entry);
2729                 write = is_writable_migration_entry(entry);
2730                 if (PageAnon(page))
2731                         anon_exclusive = is_readable_exclusive_migration_entry(entry);
2732                 young = is_migration_entry_young(entry);
2733                 dirty = is_migration_entry_dirty(entry);
2734                 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2735                 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2736         } else {
2737                 /*
2738                  * Up to this point the pmd is present and huge and userland has
2739                  * the whole access to the hugepage during the split (which
2740                  * happens in place). If we overwrite the pmd with the not-huge
2741                  * version pointing to the pte here (which of course we could if
2742                  * all CPUs were bug free), userland could trigger a small page
2743                  * size TLB miss on the small sized TLB while the hugepage TLB
2744                  * entry is still established in the huge TLB. Some CPU doesn't
2745                  * like that. See
2746                  * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2747                  * 383 on page 105. Intel should be safe but is also warns that
2748                  * it's only safe if the permission and cache attributes of the
2749                  * two entries loaded in the two TLB is identical (which should
2750                  * be the case here). But it is generally safer to never allow
2751                  * small and huge TLB entries for the same virtual address to be
2752                  * loaded simultaneously. So instead of doing "pmd_populate();
2753                  * flush_pmd_tlb_range();" we first mark the current pmd
2754                  * notpresent (atomically because here the pmd_trans_huge must
2755                  * remain set at all times on the pmd until the split is
2756                  * complete for this pmd), then we flush the SMP TLB and finally
2757                  * we write the non-huge version of the pmd entry with
2758                  * pmd_populate.
2759                  */
2760                 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2761                 page = pmd_page(old_pmd);
2762                 folio = page_folio(page);
2763                 if (pmd_dirty(old_pmd)) {
2764                         dirty = true;
2765                         folio_set_dirty(folio);
2766                 }
2767                 write = pmd_write(old_pmd);
2768                 young = pmd_young(old_pmd);
2769                 soft_dirty = pmd_soft_dirty(old_pmd);
2770                 uffd_wp = pmd_uffd_wp(old_pmd);
2771
2772                 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2773                 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2774
2775                 /*
2776                  * Without "freeze", we'll simply split the PMD, propagating the
2777                  * PageAnonExclusive() flag for each PTE by setting it for
2778                  * each subpage -- no need to (temporarily) clear.
2779                  *
2780                  * With "freeze" we want to replace mapped pages by
2781                  * migration entries right away. This is only possible if we
2782                  * managed to clear PageAnonExclusive() -- see
2783                  * set_pmd_migration_entry().
2784                  *
2785                  * In case we cannot clear PageAnonExclusive(), split the PMD
2786                  * only and let try_to_migrate_one() fail later.
2787                  *
2788                  * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
2789                  */
2790                 anon_exclusive = PageAnonExclusive(page);
2791                 if (freeze && anon_exclusive &&
2792                     folio_try_share_anon_rmap_pmd(folio, page))
2793                         freeze = false;
2794                 if (!freeze) {
2795                         rmap_t rmap_flags = RMAP_NONE;
2796
2797                         folio_ref_add(folio, HPAGE_PMD_NR - 1);
2798                         if (anon_exclusive)
2799                                 rmap_flags |= RMAP_EXCLUSIVE;
2800                         folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2801                                                  vma, haddr, rmap_flags);
2802                 }
2803         }
2804
2805         /*
2806          * Withdraw the table only after we mark the pmd entry invalid.
2807          * This's critical for some architectures (Power).
2808          */
2809         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2810         pmd_populate(mm, &_pmd, pgtable);
2811
2812         pte = pte_offset_map(&_pmd, haddr);
2813         VM_BUG_ON(!pte);
2814
2815         /*
2816          * Note that NUMA hinting access restrictions are not transferred to
2817          * avoid any possibility of altering permissions across VMAs.
2818          */
2819         if (freeze || pmd_migration) {
2820                 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2821                         pte_t entry;
2822                         swp_entry_t swp_entry;
2823
2824                         if (write)
2825                                 swp_entry = make_writable_migration_entry(
2826                                                         page_to_pfn(page + i));
2827                         else if (anon_exclusive)
2828                                 swp_entry = make_readable_exclusive_migration_entry(
2829                                                         page_to_pfn(page + i));
2830                         else
2831                                 swp_entry = make_readable_migration_entry(
2832                                                         page_to_pfn(page + i));
2833                         if (young)
2834                                 swp_entry = make_migration_entry_young(swp_entry);
2835                         if (dirty)
2836                                 swp_entry = make_migration_entry_dirty(swp_entry);
2837                         entry = swp_entry_to_pte(swp_entry);
2838                         if (soft_dirty)
2839                                 entry = pte_swp_mksoft_dirty(entry);
2840                         if (uffd_wp)
2841                                 entry = pte_swp_mkuffd_wp(entry);
2842
2843                         VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2844                         set_pte_at(mm, addr, pte + i, entry);
2845                 }
2846         } else {
2847                 pte_t entry;
2848
2849                 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
2850                 if (write)
2851                         entry = pte_mkwrite(entry, vma);
2852                 if (!young)
2853                         entry = pte_mkold(entry);
2854                 /* NOTE: this may set soft-dirty too on some archs */
2855                 if (dirty)
2856                         entry = pte_mkdirty(entry);
2857                 if (soft_dirty)
2858                         entry = pte_mksoft_dirty(entry);
2859                 if (uffd_wp)
2860                         entry = pte_mkuffd_wp(entry);
2861
2862                 for (i = 0; i < HPAGE_PMD_NR; i++)
2863                         VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2864
2865                 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
2866         }
2867         pte_unmap(pte);
2868
2869         if (!pmd_migration)
2870                 folio_remove_rmap_pmd(folio, page, vma);
2871         if (freeze)
2872                 put_page(page);
2873
2874         smp_wmb(); /* make pte visible before pmd */
2875         pmd_populate(mm, pmd, pgtable);
2876 }
2877
2878 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
2879                            pmd_t *pmd, bool freeze, struct folio *folio)
2880 {
2881         VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio));
2882         VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
2883         VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2884         VM_BUG_ON(freeze && !folio);
2885
2886         /*
2887          * When the caller requests to set up a migration entry, we
2888          * require a folio to check the PMD against. Otherwise, there
2889          * is a risk of replacing the wrong folio.
2890          */
2891         if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2892             is_pmd_migration_entry(*pmd)) {
2893                 if (folio && folio != pmd_folio(*pmd))
2894                         return;
2895                 __split_huge_pmd_locked(vma, pmd, address, freeze);
2896         }
2897 }
2898
2899 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2900                 unsigned long address, bool freeze, struct folio *folio)
2901 {
2902         spinlock_t *ptl;
2903         struct mmu_notifier_range range;
2904
2905         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2906                                 address & HPAGE_PMD_MASK,
2907                                 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2908         mmu_notifier_invalidate_range_start(&range);
2909         ptl = pmd_lock(vma->vm_mm, pmd);
2910         split_huge_pmd_locked(vma, range.start, pmd, freeze, folio);
2911         spin_unlock(ptl);
2912         mmu_notifier_invalidate_range_end(&range);
2913 }
2914
2915 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2916                 bool freeze, struct folio *folio)
2917 {
2918         pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2919
2920         if (!pmd)
2921                 return;
2922
2923         __split_huge_pmd(vma, pmd, address, freeze, folio);
2924 }
2925
2926 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2927 {
2928         /*
2929          * If the new address isn't hpage aligned and it could previously
2930          * contain an hugepage: check if we need to split an huge pmd.
2931          */
2932         if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2933             range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2934                          ALIGN(address, HPAGE_PMD_SIZE)))
2935                 split_huge_pmd_address(vma, address, false, NULL);
2936 }
2937
2938 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2939                              unsigned long start,
2940                              unsigned long end,
2941                              long adjust_next)
2942 {
2943         /* Check if we need to split start first. */
2944         split_huge_pmd_if_needed(vma, start);
2945
2946         /* Check if we need to split end next. */
2947         split_huge_pmd_if_needed(vma, end);
2948
2949         /*
2950          * If we're also updating the next vma vm_start,
2951          * check if we need to split it.
2952          */
2953         if (adjust_next > 0) {
2954                 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2955                 unsigned long nstart = next->vm_start;
2956                 nstart += adjust_next;
2957                 split_huge_pmd_if_needed(next, nstart);
2958         }
2959 }
2960
2961 static void unmap_folio(struct folio *folio)
2962 {
2963         enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
2964                 TTU_BATCH_FLUSH;
2965
2966         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2967
2968         if (folio_test_pmd_mappable(folio))
2969                 ttu_flags |= TTU_SPLIT_HUGE_PMD;
2970
2971         /*
2972          * Anon pages need migration entries to preserve them, but file
2973          * pages can simply be left unmapped, then faulted back on demand.
2974          * If that is ever changed (perhaps for mlock), update remap_page().
2975          */
2976         if (folio_test_anon(folio))
2977                 try_to_migrate(folio, ttu_flags);
2978         else
2979                 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2980
2981         try_to_unmap_flush();
2982 }
2983
2984 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
2985                                             unsigned long addr, pmd_t *pmdp,
2986                                             struct folio *folio)
2987 {
2988         struct mm_struct *mm = vma->vm_mm;
2989         int ref_count, map_count;
2990         pmd_t orig_pmd = *pmdp;
2991
2992         if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
2993                 return false;
2994
2995         orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
2996
2997         /*
2998          * Syncing against concurrent GUP-fast:
2999          * - clear PMD; barrier; read refcount
3000          * - inc refcount; barrier; read PMD
3001          */
3002         smp_mb();
3003
3004         ref_count = folio_ref_count(folio);
3005         map_count = folio_mapcount(folio);
3006
3007         /*
3008          * Order reads for folio refcount and dirty flag
3009          * (see comments in __remove_mapping()).
3010          */
3011         smp_rmb();
3012
3013         /*
3014          * If the folio or its PMD is redirtied at this point, or if there
3015          * are unexpected references, we will give up to discard this folio
3016          * and remap it.
3017          *
3018          * The only folio refs must be one from isolation plus the rmap(s).
3019          */
3020         if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) ||
3021             ref_count != map_count + 1) {
3022                 set_pmd_at(mm, addr, pmdp, orig_pmd);
3023                 return false;
3024         }
3025
3026         folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
3027         zap_deposited_table(mm, pmdp);
3028         add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
3029         if (vma->vm_flags & VM_LOCKED)
3030                 mlock_drain_local();
3031         folio_put(folio);
3032
3033         return true;
3034 }
3035
3036 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
3037                            pmd_t *pmdp, struct folio *folio)
3038 {
3039         VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
3040         VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3041         VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
3042
3043         if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
3044                 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
3045
3046         return false;
3047 }
3048
3049 static void remap_page(struct folio *folio, unsigned long nr, int flags)
3050 {
3051         int i = 0;
3052
3053         /* If unmap_folio() uses try_to_migrate() on file, remove this check */
3054         if (!folio_test_anon(folio))
3055                 return;
3056         for (;;) {
3057                 remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
3058                 i += folio_nr_pages(folio);
3059                 if (i >= nr)
3060                         break;
3061                 folio = folio_next(folio);
3062         }
3063 }
3064
3065 static void lru_add_page_tail(struct folio *folio, struct page *tail,
3066                 struct lruvec *lruvec, struct list_head *list)
3067 {
3068         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3069         VM_BUG_ON_FOLIO(PageLRU(tail), folio);
3070         lockdep_assert_held(&lruvec->lru_lock);
3071
3072         if (list) {
3073                 /* page reclaim is reclaiming a huge page */
3074                 VM_WARN_ON(folio_test_lru(folio));
3075                 get_page(tail);
3076                 list_add_tail(&tail->lru, list);
3077         } else {
3078                 /* head is still on lru (and we have it frozen) */
3079                 VM_WARN_ON(!folio_test_lru(folio));
3080                 if (folio_test_unevictable(folio))
3081                         tail->mlock_count = 0;
3082                 else
3083                         list_add_tail(&tail->lru, &folio->lru);
3084                 SetPageLRU(tail);
3085         }
3086 }
3087
3088 static void __split_huge_page_tail(struct folio *folio, int tail,
3089                 struct lruvec *lruvec, struct list_head *list,
3090                 unsigned int new_order)
3091 {
3092         struct page *head = &folio->page;
3093         struct page *page_tail = head + tail;
3094         /*
3095          * Careful: new_folio is not a "real" folio before we cleared PageTail.
3096          * Don't pass it around before clear_compound_head().
3097          */
3098         struct folio *new_folio = (struct folio *)page_tail;
3099
3100         VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
3101
3102         /*
3103          * Clone page flags before unfreezing refcount.
3104          *
3105          * After successful get_page_unless_zero() might follow flags change,
3106          * for example lock_page() which set PG_waiters.
3107          *
3108          * Note that for mapped sub-pages of an anonymous THP,
3109          * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
3110          * the migration entry instead from where remap_page() will restore it.
3111          * We can still have PG_anon_exclusive set on effectively unmapped and
3112          * unreferenced sub-pages of an anonymous THP: we can simply drop
3113          * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
3114          */
3115         page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3116         page_tail->flags |= (head->flags &
3117                         ((1L << PG_referenced) |
3118                          (1L << PG_swapbacked) |
3119                          (1L << PG_swapcache) |
3120                          (1L << PG_mlocked) |
3121                          (1L << PG_uptodate) |
3122                          (1L << PG_active) |
3123                          (1L << PG_workingset) |
3124                          (1L << PG_locked) |
3125                          (1L << PG_unevictable) |
3126 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
3127                          (1L << PG_arch_2) |
3128 #endif
3129 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
3130                          (1L << PG_arch_3) |
3131 #endif
3132                          (1L << PG_dirty) |
3133                          LRU_GEN_MASK | LRU_REFS_MASK));
3134
3135         /* ->mapping in first and second tail page is replaced by other uses */
3136         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
3137                         page_tail);
3138         page_tail->mapping = head->mapping;
3139         page_tail->index = head->index + tail;
3140
3141         /*
3142          * page->private should not be set in tail pages. Fix up and warn once
3143          * if private is unexpectedly set.
3144          */
3145         if (unlikely(page_tail->private)) {
3146                 VM_WARN_ON_ONCE_PAGE(true, page_tail);
3147                 page_tail->private = 0;
3148         }
3149         if (folio_test_swapcache(folio))
3150                 new_folio->swap.val = folio->swap.val + tail;
3151
3152         /* Page flags must be visible before we make the page non-compound. */
3153         smp_wmb();
3154
3155         /*
3156          * Clear PageTail before unfreezing page refcount.
3157          *
3158          * After successful get_page_unless_zero() might follow put_page()
3159          * which needs correct compound_head().
3160          */
3161         clear_compound_head(page_tail);
3162         if (new_order) {
3163                 prep_compound_page(page_tail, new_order);
3164                 folio_set_large_rmappable(new_folio);
3165         }
3166
3167         /* Finally unfreeze refcount. Additional reference from page cache. */
3168         page_ref_unfreeze(page_tail,
3169                 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
3170                              folio_nr_pages(new_folio) : 0));
3171
3172         if (folio_test_young(folio))
3173                 folio_set_young(new_folio);
3174         if (folio_test_idle(folio))
3175                 folio_set_idle(new_folio);
3176
3177         folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
3178
3179         /*
3180          * always add to the tail because some iterators expect new
3181          * pages to show after the currently processed elements - e.g.
3182          * migrate_pages
3183          */
3184         lru_add_page_tail(folio, page_tail, lruvec, list);
3185 }
3186
3187 static void __split_huge_page(struct page *page, struct list_head *list,
3188                 pgoff_t end, unsigned int new_order)
3189 {
3190         struct folio *folio = page_folio(page);
3191         struct page *head = &folio->page;
3192         struct lruvec *lruvec;
3193         struct address_space *swap_cache = NULL;
3194         unsigned long offset = 0;
3195         int i, nr_dropped = 0;
3196         unsigned int new_nr = 1 << new_order;
3197         int order = folio_order(folio);
3198         unsigned int nr = 1 << order;
3199
3200         /* complete memcg works before add pages to LRU */
3201         split_page_memcg(head, order, new_order);
3202
3203         if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
3204                 offset = swap_cache_index(folio->swap);
3205                 swap_cache = swap_address_space(folio->swap);
3206                 xa_lock(&swap_cache->i_pages);
3207         }
3208
3209         /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
3210         lruvec = folio_lruvec_lock(folio);
3211
3212         ClearPageHasHWPoisoned(head);
3213
3214         for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
3215                 __split_huge_page_tail(folio, i, lruvec, list, new_order);
3216                 /* Some pages can be beyond EOF: drop them from page cache */
3217                 if (head[i].index >= end) {
3218                         struct folio *tail = page_folio(head + i);
3219
3220                         if (shmem_mapping(folio->mapping))
3221                                 nr_dropped++;
3222                         else if (folio_test_clear_dirty(tail))
3223                                 folio_account_cleaned(tail,
3224                                         inode_to_wb(folio->mapping->host));
3225                         __filemap_remove_folio(tail, NULL);
3226                         folio_put(tail);
3227                 } else if (!PageAnon(page)) {
3228                         __xa_store(&folio->mapping->i_pages, head[i].index,
3229                                         head + i, 0);
3230                 } else if (swap_cache) {
3231                         __xa_store(&swap_cache->i_pages, offset + i,
3232                                         head + i, 0);
3233                 }
3234         }
3235
3236         if (!new_order)
3237                 ClearPageCompound(head);
3238         else {
3239                 struct folio *new_folio = (struct folio *)head;
3240
3241                 folio_set_order(new_folio, new_order);
3242         }
3243         unlock_page_lruvec(lruvec);
3244         /* Caller disabled irqs, so they are still disabled here */
3245
3246         split_page_owner(head, order, new_order);
3247         pgalloc_tag_split(folio, order, new_order);
3248
3249         /* See comment in __split_huge_page_tail() */
3250         if (folio_test_anon(folio)) {
3251                 /* Additional pin to swap cache */
3252                 if (folio_test_swapcache(folio)) {
3253                         folio_ref_add(folio, 1 + new_nr);
3254                         xa_unlock(&swap_cache->i_pages);
3255                 } else {
3256                         folio_ref_inc(folio);
3257                 }
3258         } else {
3259                 /* Additional pin to page cache */
3260                 folio_ref_add(folio, 1 + new_nr);
3261                 xa_unlock(&folio->mapping->i_pages);
3262         }
3263         local_irq_enable();
3264
3265         if (nr_dropped)
3266                 shmem_uncharge(folio->mapping->host, nr_dropped);
3267         remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);
3268
3269         /*
3270          * set page to its compound_head when split to non order-0 pages, so
3271          * we can skip unlocking it below, since PG_locked is transferred to
3272          * the compound_head of the page and the caller will unlock it.
3273          */
3274         if (new_order)
3275                 page = compound_head(page);
3276
3277         for (i = 0; i < nr; i += new_nr) {
3278                 struct page *subpage = head + i;
3279                 struct folio *new_folio = page_folio(subpage);
3280                 if (subpage == page)
3281                         continue;
3282                 folio_unlock(new_folio);
3283
3284                 /*
3285                  * Subpages may be freed if there wasn't any mapping
3286                  * like if add_to_swap() is running on a lru page that
3287                  * had its mapping zapped. And freeing these pages
3288                  * requires taking the lru_lock so we do the put_page
3289                  * of the tail pages after the split is complete.
3290                  */
3291                 free_page_and_swap_cache(subpage);
3292         }
3293 }
3294
3295 /* Racy check whether the huge page can be split */
3296 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
3297 {
3298         int extra_pins;
3299
3300         /* Additional pins from page cache */
3301         if (folio_test_anon(folio))
3302                 extra_pins = folio_test_swapcache(folio) ?
3303                                 folio_nr_pages(folio) : 0;
3304         else
3305                 extra_pins = folio_nr_pages(folio);
3306         if (pextra_pins)
3307                 *pextra_pins = extra_pins;
3308         return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
3309                                         caller_pins;
3310 }
3311
3312 /*
3313  * This function splits a large folio into smaller folios of order @new_order.
3314  * @page can point to any page of the large folio to split. The split operation
3315  * does not change the position of @page.
3316  *
3317  * Prerequisites:
3318  *
3319  * 1) The caller must hold a reference on the @page's owning folio, also known
3320  *    as the large folio.
3321  *
3322  * 2) The large folio must be locked.
3323  *
3324  * 3) The folio must not be pinned. Any unexpected folio references, including
3325  *    GUP pins, will result in the folio not getting split; instead, the caller
3326  *    will receive an -EAGAIN.
3327  *
3328  * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
3329  *    supported for non-file-backed folios, because folio->_deferred_list, which
3330  *    is used by partially mapped folios, is stored in subpage 2, but an order-1
3331  *    folio only has subpages 0 and 1. File-backed order-1 folios are supported,
3332  *    since they do not use _deferred_list.
3333  *
3334  * After splitting, the caller's folio reference will be transferred to @page,
3335  * resulting in a raised refcount of @page after this call. The other pages may
3336  * be freed if they are not mapped.
3337  *
3338  * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3339  *
3340  * Pages in @new_order will inherit the mapping, flags, and so on from the
3341  * huge page.
3342  *
3343  * Returns 0 if the huge page was split successfully.
3344  *
3345  * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
3346  * the folio was concurrently removed from the page cache.
3347  *
3348  * Returns -EBUSY when trying to split the huge zeropage, if the folio is
3349  * under writeback, if fs-specific folio metadata cannot currently be
3350  * released, or if some unexpected race happened (e.g., anon VMA disappeared,
3351  * truncation).
3352  *
3353  * Callers should ensure that the order respects the address space mapping
3354  * min-order if one is set for non-anonymous folios.
3355  *
3356  * Returns -EINVAL when trying to split to an order that is incompatible
3357  * with the folio. Splitting to order 0 is compatible with all folios.
3358  */
3359 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3360                                      unsigned int new_order)
3361 {
3362         struct folio *folio = page_folio(page);
3363         struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3364         /* reset xarray order to new order after split */
3365         XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
3366         bool is_anon = folio_test_anon(folio);
3367         struct address_space *mapping = NULL;
3368         struct anon_vma *anon_vma = NULL;
3369         int order = folio_order(folio);
3370         int extra_pins, ret;
3371         pgoff_t end;
3372         bool is_hzp;
3373
3374         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3375         VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3376
3377         if (new_order >= folio_order(folio))
3378                 return -EINVAL;
3379
3380         if (is_anon) {
3381                 /* order-1 is not supported for anonymous THP. */
3382                 if (new_order == 1) {
3383                         VM_WARN_ONCE(1, "Cannot split to order-1 folio");
3384                         return -EINVAL;
3385                 }
3386         } else if (new_order) {
3387                 /* Split shmem folio to non-zero order not supported */
3388                 if (shmem_mapping(folio->mapping)) {
3389                         VM_WARN_ONCE(1,
3390                                 "Cannot split shmem folio to non-0 order");
3391                         return -EINVAL;
3392                 }
3393                 /*
3394                  * No split if the file system does not support large folio.
3395                  * Note that we might still have THPs in such mappings due to
3396                  * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
3397                  * does not actually support large folios properly.
3398                  */
3399                 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3400                     !mapping_large_folio_support(folio->mapping)) {
3401                         VM_WARN_ONCE(1,
3402                                 "Cannot split file folio to non-0 order");
3403                         return -EINVAL;
3404                 }
3405         }
3406
3407         /* Only swapping a whole PMD-mapped folio is supported */
3408         if (folio_test_swapcache(folio) && new_order)
3409                 return -EINVAL;
3410
3411         is_hzp = is_huge_zero_folio(folio);
3412         if (is_hzp) {
3413                 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
3414                 return -EBUSY;
3415         }
3416
3417         if (folio_test_writeback(folio))
3418                 return -EBUSY;
3419
3420         if (is_anon) {
3421                 /*
3422                  * The caller does not necessarily hold an mmap_lock that would
3423                  * prevent the anon_vma disappearing so we first we take a
3424                  * reference to it and then lock the anon_vma for write. This
3425                  * is similar to folio_lock_anon_vma_read except the write lock
3426                  * is taken to serialise against parallel split or collapse
3427                  * operations.
3428                  */
3429                 anon_vma = folio_get_anon_vma(folio);
3430                 if (!anon_vma) {
3431                         ret = -EBUSY;
3432                         goto out;
3433                 }
3434                 end = -1;
3435                 mapping = NULL;
3436                 anon_vma_lock_write(anon_vma);
3437         } else {
3438                 unsigned int min_order;
3439                 gfp_t gfp;
3440
3441                 mapping = folio->mapping;
3442
3443                 /* Truncated ? */
3444                 if (!mapping) {
3445                         ret = -EBUSY;
3446                         goto out;
3447                 }
3448
3449                 min_order = mapping_min_folio_order(folio->mapping);
3450                 if (new_order < min_order) {
3451                         VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
3452                                      min_order);
3453                         ret = -EINVAL;
3454                         goto out;
3455                 }
3456
3457                 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3458                                                         GFP_RECLAIM_MASK);
3459
3460                 if (!filemap_release_folio(folio, gfp)) {
3461                         ret = -EBUSY;
3462                         goto out;
3463                 }
3464
3465                 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
3466                 if (xas_error(&xas)) {
3467                         ret = xas_error(&xas);
3468                         goto out;
3469                 }
3470
3471                 anon_vma = NULL;
3472                 i_mmap_lock_read(mapping);
3473
3474                 /*
3475                  *__split_huge_page() may need to trim off pages beyond EOF:
3476                  * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3477                  * which cannot be nested inside the page tree lock. So note
3478                  * end now: i_size itself may be changed at any moment, but
3479                  * folio lock is good enough to serialize the trimming.
3480                  */
3481                 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3482                 if (shmem_mapping(mapping))
3483                         end = shmem_fallocend(mapping->host, end);
3484         }
3485
3486         /*
3487          * Racy check if we can split the page, before unmap_folio() will
3488          * split PMDs
3489          */
3490         if (!can_split_folio(folio, 1, &extra_pins)) {
3491                 ret = -EAGAIN;
3492                 goto out_unlock;
3493         }
3494
3495         unmap_folio(folio);
3496
3497         /* block interrupt reentry in xa_lock and spinlock */
3498         local_irq_disable();
3499         if (mapping) {
3500                 /*
3501                  * Check if the folio is present in page cache.
3502                  * We assume all tail are present too, if folio is there.
3503                  */
3504                 xas_lock(&xas);
3505                 xas_reset(&xas);
3506                 if (xas_load(&xas) != folio)
3507                         goto fail;
3508         }
3509
3510         /* Prevent deferred_split_scan() touching ->_refcount */
3511         spin_lock(&ds_queue->split_queue_lock);
3512         if (folio_ref_freeze(folio, 1 + extra_pins)) {
3513                 if (folio_order(folio) > 1 &&
3514                     !list_empty(&folio->_deferred_list)) {
3515                         ds_queue->split_queue_len--;
3516                         if (folio_test_partially_mapped(folio)) {
3517                                 __folio_clear_partially_mapped(folio);
3518                                 mod_mthp_stat(folio_order(folio),
3519                                               MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3520                         }
3521                         /*
3522                          * Reinitialize page_deferred_list after removing the
3523                          * page from the split_queue, otherwise a subsequent
3524                          * split will see list corruption when checking the
3525                          * page_deferred_list.
3526                          */
3527                         list_del_init(&folio->_deferred_list);
3528                 }
3529                 spin_unlock(&ds_queue->split_queue_lock);
3530                 if (mapping) {
3531                         int nr = folio_nr_pages(folio);
3532
3533                         xas_split(&xas, folio, folio_order(folio));
3534                         if (folio_test_pmd_mappable(folio) &&
3535                             new_order < HPAGE_PMD_ORDER) {
3536                                 if (folio_test_swapbacked(folio)) {
3537                                         __lruvec_stat_mod_folio(folio,
3538                                                         NR_SHMEM_THPS, -nr);
3539                                 } else {
3540                                         __lruvec_stat_mod_folio(folio,
3541                                                         NR_FILE_THPS, -nr);
3542                                         filemap_nr_thps_dec(mapping);
3543                                 }
3544                         }
3545                 }
3546
3547                 if (is_anon) {
3548                         mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
3549                         mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order));
3550                 }
3551                 __split_huge_page(page, list, end, new_order);
3552                 ret = 0;
3553         } else {
3554                 spin_unlock(&ds_queue->split_queue_lock);
3555 fail:
3556                 if (mapping)
3557                         xas_unlock(&xas);
3558                 local_irq_enable();
3559                 remap_page(folio, folio_nr_pages(folio), 0);
3560                 ret = -EAGAIN;
3561         }
3562
3563 out_unlock:
3564         if (anon_vma) {
3565                 anon_vma_unlock_write(anon_vma);
3566                 put_anon_vma(anon_vma);
3567         }
3568         if (mapping)
3569                 i_mmap_unlock_read(mapping);
3570 out:
3571         xas_destroy(&xas);
3572         if (order == HPAGE_PMD_ORDER)
3573                 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3574         count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
3575         return ret;
3576 }
3577
3578 int min_order_for_split(struct folio *folio)
3579 {
3580         if (folio_test_anon(folio))
3581                 return 0;
3582
3583         if (!folio->mapping) {
3584                 if (folio_test_pmd_mappable(folio))
3585                         count_vm_event(THP_SPLIT_PAGE_FAILED);
3586                 return -EBUSY;
3587         }
3588
3589         return mapping_min_folio_order(folio->mapping);
3590 }
3591
3592 int split_folio_to_list(struct folio *folio, struct list_head *list)
3593 {
3594         int ret = min_order_for_split(folio);
3595
3596         if (ret < 0)
3597                 return ret;
3598
3599         return split_huge_page_to_list_to_order(&folio->page, list, ret);
3600 }
3601
3602 void __folio_undo_large_rmappable(struct folio *folio)
3603 {
3604         struct deferred_split *ds_queue;
3605         unsigned long flags;
3606
3607         ds_queue = get_deferred_split_queue(folio);
3608         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3609         if (!list_empty(&folio->_deferred_list)) {
3610                 ds_queue->split_queue_len--;
3611                 if (folio_test_partially_mapped(folio)) {
3612                         __folio_clear_partially_mapped(folio);
3613                         mod_mthp_stat(folio_order(folio),
3614                                       MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3615                 }
3616                 list_del_init(&folio->_deferred_list);
3617         }
3618         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3619 }
3620
3621 /* partially_mapped=false won't clear PG_partially_mapped folio flag */
3622 void deferred_split_folio(struct folio *folio, bool partially_mapped)
3623 {
3624         struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3625 #ifdef CONFIG_MEMCG
3626         struct mem_cgroup *memcg = folio_memcg(folio);
3627 #endif
3628         unsigned long flags;
3629
3630         /*
3631          * Order 1 folios have no space for a deferred list, but we also
3632          * won't waste much memory by not adding them to the deferred list.
3633          */
3634         if (folio_order(folio) <= 1)
3635                 return;
3636
3637         if (!partially_mapped && !split_underused_thp)
3638                 return;
3639
3640         /*
3641          * The try_to_unmap() in page reclaim path might reach here too,
3642          * this may cause a race condition to corrupt deferred split queue.
3643          * And, if page reclaim is already handling the same folio, it is
3644          * unnecessary to handle it again in shrinker.
3645          *
3646          * Check the swapcache flag to determine if the folio is being
3647          * handled by page reclaim since THP swap would add the folio into
3648          * swap cache before calling try_to_unmap().
3649          */
3650         if (folio_test_swapcache(folio))
3651                 return;
3652
3653         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3654         if (partially_mapped) {
3655                 if (!folio_test_partially_mapped(folio)) {
3656                         __folio_set_partially_mapped(folio);
3657                         if (folio_test_pmd_mappable(folio))
3658                                 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
3659                         count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
3660                         mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
3661
3662                 }
3663         } else {
3664                 /* partially mapped folios cannot become non-partially mapped */
3665                 VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
3666         }
3667         if (list_empty(&folio->_deferred_list)) {
3668                 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
3669                 ds_queue->split_queue_len++;
3670 #ifdef CONFIG_MEMCG
3671                 if (memcg)
3672                         set_shrinker_bit(memcg, folio_nid(folio),
3673                                          deferred_split_shrinker->id);
3674 #endif
3675         }
3676         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3677 }
3678
3679 static unsigned long deferred_split_count(struct shrinker *shrink,
3680                 struct shrink_control *sc)
3681 {
3682         struct pglist_data *pgdata = NODE_DATA(sc->nid);
3683         struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3684
3685 #ifdef CONFIG_MEMCG
3686         if (sc->memcg)
3687                 ds_queue = &sc->memcg->deferred_split_queue;
3688 #endif
3689         return READ_ONCE(ds_queue->split_queue_len);
3690 }
3691
3692 static bool thp_underused(struct folio *folio)
3693 {
3694         int num_zero_pages = 0, num_filled_pages = 0;
3695         void *kaddr;
3696         int i;
3697
3698         if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
3699                 return false;
3700
3701         for (i = 0; i < folio_nr_pages(folio); i++) {
3702                 kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
3703                 if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
3704                         num_zero_pages++;
3705                         if (num_zero_pages > khugepaged_max_ptes_none) {
3706                                 kunmap_local(kaddr);
3707                                 return true;
3708                         }
3709                 } else {
3710                         /*
3711                          * Another path for early exit once the number
3712                          * of non-zero filled pages exceeds threshold.
3713                          */
3714                         num_filled_pages++;
3715                         if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
3716                                 kunmap_local(kaddr);
3717                                 return false;
3718                         }
3719                 }
3720                 kunmap_local(kaddr);
3721         }
3722         return false;
3723 }
3724
3725 static unsigned long deferred_split_scan(struct shrinker *shrink,
3726                 struct shrink_control *sc)
3727 {
3728         struct pglist_data *pgdata = NODE_DATA(sc->nid);
3729         struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3730         unsigned long flags;
3731         LIST_HEAD(list);
3732         struct folio *folio, *next;
3733         int split = 0;
3734
3735 #ifdef CONFIG_MEMCG
3736         if (sc->memcg)
3737                 ds_queue = &sc->memcg->deferred_split_queue;
3738 #endif
3739
3740         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3741         /* Take pin on all head pages to avoid freeing them under us */
3742         list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3743                                                         _deferred_list) {
3744                 if (folio_try_get(folio)) {
3745                         list_move(&folio->_deferred_list, &list);
3746                 } else {
3747                         /* We lost race with folio_put() */
3748                         if (folio_test_partially_mapped(folio)) {
3749                                 __folio_clear_partially_mapped(folio);
3750                                 mod_mthp_stat(folio_order(folio),
3751                                               MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3752                         }
3753                         list_del_init(&folio->_deferred_list);
3754                         ds_queue->split_queue_len--;
3755                 }
3756                 if (!--sc->nr_to_scan)
3757                         break;
3758         }
3759         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3760
3761         list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3762                 bool did_split = false;
3763                 bool underused = false;
3764
3765                 if (!folio_test_partially_mapped(folio)) {
3766                         underused = thp_underused(folio);
3767                         if (!underused)
3768                                 goto next;
3769                 }
3770                 if (!folio_trylock(folio))
3771                         goto next;
3772                 if (!split_folio(folio)) {
3773                         did_split = true;
3774                         if (underused)
3775                                 count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
3776                         split++;
3777                 }
3778                 folio_unlock(folio);
3779 next:
3780                 /*
3781                  * split_folio() removes folio from list on success.
3782                  * Only add back to the queue if folio is partially mapped.
3783                  * If thp_underused returns false, or if split_folio fails
3784                  * in the case it was underused, then consider it used and
3785                  * don't add it back to split_queue.
3786                  */
3787                 if (!did_split && !folio_test_partially_mapped(folio)) {
3788                         list_del_init(&folio->_deferred_list);
3789                         ds_queue->split_queue_len--;
3790                 }
3791                 folio_put(folio);
3792         }
3793
3794         spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3795         list_splice_tail(&list, &ds_queue->split_queue);
3796         spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3797
3798         /*
3799          * Stop shrinker if we didn't split any page, but the queue is empty.
3800          * This can happen if pages were freed under us.
3801          */
3802         if (!split && list_empty(&ds_queue->split_queue))
3803                 return SHRINK_STOP;
3804         return split;
3805 }
3806
3807 #ifdef CONFIG_DEBUG_FS
3808 static void split_huge_pages_all(void)
3809 {
3810         struct zone *zone;
3811         struct page *page;
3812         struct folio *folio;
3813         unsigned long pfn, max_zone_pfn;
3814         unsigned long total = 0, split = 0;
3815
3816         pr_debug("Split all THPs\n");
3817         for_each_zone(zone) {
3818                 if (!managed_zone(zone))
3819                         continue;
3820                 max_zone_pfn = zone_end_pfn(zone);
3821                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3822                         int nr_pages;
3823
3824                         page = pfn_to_online_page(pfn);
3825                         if (!page || PageTail(page))
3826                                 continue;
3827                         folio = page_folio(page);
3828                         if (!folio_try_get(folio))
3829                                 continue;
3830
3831                         if (unlikely(page_folio(page) != folio))
3832                                 goto next;
3833
3834                         if (zone != folio_zone(folio))
3835                                 goto next;
3836
3837                         if (!folio_test_large(folio)
3838                                 || folio_test_hugetlb(folio)
3839                                 || !folio_test_lru(folio))
3840                                 goto next;
3841
3842                         total++;
3843                         folio_lock(folio);
3844                         nr_pages = folio_nr_pages(folio);
3845                         if (!split_folio(folio))
3846                                 split++;
3847                         pfn += nr_pages - 1;
3848                         folio_unlock(folio);
3849 next:
3850                         folio_put(folio);
3851                         cond_resched();
3852                 }
3853         }
3854
3855         pr_debug("%lu of %lu THP split\n", split, total);
3856 }
3857
3858 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3859 {
3860         return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3861                     is_vm_hugetlb_page(vma);
3862 }
3863
3864 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3865                                 unsigned long vaddr_end, unsigned int new_order)
3866 {
3867         int ret = 0;
3868         struct task_struct *task;
3869         struct mm_struct *mm;
3870         unsigned long total = 0, split = 0;
3871         unsigned long addr;
3872
3873         vaddr_start &= PAGE_MASK;
3874         vaddr_end &= PAGE_MASK;
3875
3876         task = find_get_task_by_vpid(pid);
3877         if (!task) {
3878                 ret = -ESRCH;
3879                 goto out;
3880         }
3881
3882         /* Find the mm_struct */
3883         mm = get_task_mm(task);
3884         put_task_struct(task);
3885
3886         if (!mm) {
3887                 ret = -EINVAL;
3888                 goto out;
3889         }
3890
3891         pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3892                  pid, vaddr_start, vaddr_end);
3893
3894         mmap_read_lock(mm);
3895         /*
3896          * always increase addr by PAGE_SIZE, since we could have a PTE page
3897          * table filled with PTE-mapped THPs, each of which is distinct.
3898          */
3899         for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3900                 struct vm_area_struct *vma = vma_lookup(mm, addr);
3901                 struct folio_walk fw;
3902                 struct folio *folio;
3903                 struct address_space *mapping;
3904                 unsigned int target_order = new_order;
3905
3906                 if (!vma)
3907                         break;
3908
3909                 /* skip special VMA and hugetlb VMA */
3910                 if (vma_not_suitable_for_thp_split(vma)) {
3911                         addr = vma->vm_end;
3912                         continue;
3913                 }
3914
3915                 folio = folio_walk_start(&fw, vma, addr, 0);
3916                 if (!folio)
3917                         continue;
3918
3919                 if (!is_transparent_hugepage(folio))
3920                         goto next;
3921
3922                 if (!folio_test_anon(folio)) {
3923                         mapping = folio->mapping;
3924                         target_order = max(new_order,
3925                                            mapping_min_folio_order(mapping));
3926                 }
3927
3928                 if (target_order >= folio_order(folio))
3929                         goto next;
3930
3931                 total++;
3932                 /*
3933                  * For folios with private, split_huge_page_to_list_to_order()
3934                  * will try to drop it before split and then check if the folio
3935                  * can be split or not. So skip the check here.
3936                  */
3937                 if (!folio_test_private(folio) &&
3938                     !can_split_folio(folio, 0, NULL))
3939                         goto next;
3940
3941                 if (!folio_trylock(folio))
3942                         goto next;
3943                 folio_get(folio);
3944                 folio_walk_end(&fw, vma);
3945
3946                 if (!folio_test_anon(folio) && folio->mapping != mapping)
3947                         goto unlock;
3948
3949                 if (!split_folio_to_order(folio, target_order))
3950                         split++;
3951
3952 unlock:
3953
3954                 folio_unlock(folio);
3955                 folio_put(folio);
3956
3957                 cond_resched();
3958                 continue;
3959 next:
3960                 folio_walk_end(&fw, vma);
3961                 cond_resched();
3962         }
3963         mmap_read_unlock(mm);
3964         mmput(mm);
3965
3966         pr_debug("%lu of %lu THP split\n", split, total);
3967
3968 out:
3969         return ret;
3970 }
3971
3972 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3973                                 pgoff_t off_end, unsigned int new_order)
3974 {
3975         struct filename *file;
3976         struct file *candidate;
3977         struct address_space *mapping;
3978         int ret = -EINVAL;
3979         pgoff_t index;
3980         int nr_pages = 1;
3981         unsigned long total = 0, split = 0;
3982         unsigned int min_order;
3983         unsigned int target_order;
3984
3985         file = getname_kernel(file_path);
3986         if (IS_ERR(file))
3987                 return ret;
3988
3989         candidate = file_open_name(file, O_RDONLY, 0);
3990         if (IS_ERR(candidate))
3991                 goto out;
3992
3993         pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3994                  file_path, off_start, off_end);
3995
3996         mapping = candidate->f_mapping;
3997         min_order = mapping_min_folio_order(mapping);
3998         target_order = max(new_order, min_order);
3999
4000         for (index = off_start; index < off_end; index += nr_pages) {
4001                 struct folio *folio = filemap_get_folio(mapping, index);
4002
4003                 nr_pages = 1;
4004                 if (IS_ERR(folio))
4005                         continue;
4006
4007                 if (!folio_test_large(folio))
4008                         goto next;
4009
4010                 total++;
4011                 nr_pages = folio_nr_pages(folio);
4012
4013                 if (target_order >= folio_order(folio))
4014                         goto next;
4015
4016                 if (!folio_trylock(folio))
4017                         goto next;
4018
4019                 if (folio->mapping != mapping)
4020                         goto unlock;
4021
4022                 if (!split_folio_to_order(folio, target_order))
4023                         split++;
4024
4025 unlock:
4026                 folio_unlock(folio);
4027 next:
4028                 folio_put(folio);
4029                 cond_resched();
4030         }
4031
4032         filp_close(candidate, NULL);
4033         ret = 0;
4034
4035         pr_debug("%lu of %lu file-backed THP split\n", split, total);
4036 out:
4037         putname(file);
4038         return ret;
4039 }
4040
4041 #define MAX_INPUT_BUF_SZ 255
4042
4043 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
4044                                 size_t count, loff_t *ppops)
4045 {
4046         static DEFINE_MUTEX(split_debug_mutex);
4047         ssize_t ret;
4048         /*
4049          * hold pid, start_vaddr, end_vaddr, new_order or
4050          * file_path, off_start, off_end, new_order
4051          */
4052         char input_buf[MAX_INPUT_BUF_SZ];
4053         int pid;
4054         unsigned long vaddr_start, vaddr_end;
4055         unsigned int new_order = 0;
4056
4057         ret = mutex_lock_interruptible(&split_debug_mutex);
4058         if (ret)
4059                 return ret;
4060
4061         ret = -EFAULT;
4062
4063         memset(input_buf, 0, MAX_INPUT_BUF_SZ);
4064         if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
4065                 goto out;
4066
4067         input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
4068
4069         if (input_buf[0] == '/') {
4070                 char *tok;
4071                 char *buf = input_buf;
4072                 char file_path[MAX_INPUT_BUF_SZ];
4073                 pgoff_t off_start = 0, off_end = 0;
4074                 size_t input_len = strlen(input_buf);
4075
4076                 tok = strsep(&buf, ",");
4077                 if (tok) {
4078                         strcpy(file_path, tok);
4079                 } else {
4080                         ret = -EINVAL;
4081                         goto out;
4082                 }
4083
4084                 ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order);
4085                 if (ret != 2 && ret != 3) {
4086                         ret = -EINVAL;
4087                         goto out;
4088                 }
4089                 ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order);
4090                 if (!ret)
4091                         ret = input_len;
4092
4093                 goto out;
4094         }
4095
4096         ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order);
4097         if (ret == 1 && pid == 1) {
4098                 split_huge_pages_all();
4099                 ret = strlen(input_buf);
4100                 goto out;
4101         } else if (ret != 3 && ret != 4) {
4102                 ret = -EINVAL;
4103                 goto out;
4104         }
4105
4106         ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order);
4107         if (!ret)
4108                 ret = strlen(input_buf);
4109 out:
4110         mutex_unlock(&split_debug_mutex);
4111         return ret;
4112
4113 }
4114
4115 static const struct file_operations split_huge_pages_fops = {
4116         .owner   = THIS_MODULE,
4117         .write   = split_huge_pages_write,
4118         .llseek  = no_llseek,
4119 };
4120
4121 static int __init split_huge_pages_debugfs(void)
4122 {
4123         debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
4124                             &split_huge_pages_fops);
4125         return 0;
4126 }
4127 late_initcall(split_huge_pages_debugfs);
4128 #endif
4129
4130 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
4131 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
4132                 struct page *page)
4133 {
4134         struct folio *folio = page_folio(page);
4135         struct vm_area_struct *vma = pvmw->vma;
4136         struct mm_struct *mm = vma->vm_mm;
4137         unsigned long address = pvmw->address;
4138         bool anon_exclusive;
4139         pmd_t pmdval;
4140         swp_entry_t entry;
4141         pmd_t pmdswp;
4142
4143         if (!(pvmw->pmd && !pvmw->pte))
4144                 return 0;
4145
4146         flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
4147         pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
4148
4149         /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
4150         anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
4151         if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
4152                 set_pmd_at(mm, address, pvmw->pmd, pmdval);
4153                 return -EBUSY;
4154         }
4155
4156         if (pmd_dirty(pmdval))
4157                 folio_mark_dirty(folio);
4158         if (pmd_write(pmdval))
4159                 entry = make_writable_migration_entry(page_to_pfn(page));
4160         else if (anon_exclusive)
4161                 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4162         else
4163                 entry = make_readable_migration_entry(page_to_pfn(page));
4164         if (pmd_young(pmdval))
4165                 entry = make_migration_entry_young(entry);
4166         if (pmd_dirty(pmdval))
4167                 entry = make_migration_entry_dirty(entry);
4168         pmdswp = swp_entry_to_pmd(entry);
4169         if (pmd_soft_dirty(pmdval))
4170                 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
4171         if (pmd_uffd_wp(pmdval))
4172                 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
4173         set_pmd_at(mm, address, pvmw->pmd, pmdswp);
4174         folio_remove_rmap_pmd(folio, page, vma);
4175         folio_put(folio);
4176         trace_set_migration_pmd(address, pmd_val(pmdswp));
4177
4178         return 0;
4179 }
4180
4181 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
4182 {
4183         struct folio *folio = page_folio(new);
4184         struct vm_area_struct *vma = pvmw->vma;
4185         struct mm_struct *mm = vma->vm_mm;
4186         unsigned long address = pvmw->address;
4187         unsigned long haddr = address & HPAGE_PMD_MASK;
4188         pmd_t pmde;
4189         swp_entry_t entry;
4190
4191         if (!(pvmw->pmd && !pvmw->pte))
4192                 return;
4193
4194         entry = pmd_to_swp_entry(*pvmw->pmd);
4195         folio_get(folio);
4196         pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
4197         if (pmd_swp_soft_dirty(*pvmw->pmd))
4198                 pmde = pmd_mksoft_dirty(pmde);
4199         if (is_writable_migration_entry(entry))
4200                 pmde = pmd_mkwrite(pmde, vma);
4201         if (pmd_swp_uffd_wp(*pvmw->pmd))
4202                 pmde = pmd_mkuffd_wp(pmde);
4203         if (!is_migration_entry_young(entry))
4204                 pmde = pmd_mkold(pmde);
4205         /* NOTE: this may contain setting soft-dirty on some archs */
4206         if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
4207                 pmde = pmd_mkdirty(pmde);
4208
4209         if (folio_test_anon(folio)) {
4210                 rmap_t rmap_flags = RMAP_NONE;
4211
4212                 if (!is_readable_migration_entry(entry))
4213                         rmap_flags |= RMAP_EXCLUSIVE;
4214
4215                 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
4216         } else {
4217                 folio_add_file_rmap_pmd(folio, new, vma);
4218         }
4219         VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
4220         set_pmd_at(mm, haddr, pvmw->pmd, pmde);
4221
4222         /* No need to invalidate - it was non-present before */
4223         update_mmu_cache_pmd(vma, address, pvmw->pmd);
4224         trace_remove_migration_pmd(address, pmd_val(pmde));
4225 }
4226 #endif