1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 Red Hat, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/mm_types.h>
24 #include <linux/khugepaged.h>
25 #include <linux/freezer.h>
26 #include <linux/pfn_t.h>
27 #include <linux/mman.h>
28 #include <linux/memremap.h>
29 #include <linux/pagemap.h>
30 #include <linux/debugfs.h>
31 #include <linux/migrate.h>
32 #include <linux/hashtable.h>
33 #include <linux/userfaultfd_k.h>
34 #include <linux/page_idle.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/oom.h>
37 #include <linux/numa.h>
38 #include <linux/page_owner.h>
39 #include <linux/sched/sysctl.h>
40 #include <linux/memory-tiers.h>
41 #include <linux/compat.h>
42 #include <linux/pgalloc_tag.h>
43 #include <linux/pagewalk.h>
46 #include <asm/pgalloc.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/thp.h>
54 * By default, transparent hugepage support is disabled in order to avoid
55 * risking an increased memory footprint for applications that are not
56 * guaranteed to benefit from it. When transparent hugepage support is
57 * enabled, it is for all mappings, and khugepaged scans all mappings.
58 * Defrag is invoked by khugepaged hugepage allocations and by page faults
59 * for all hugepage allocations.
61 unsigned long transparent_hugepage_flags __read_mostly =
62 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
63 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
65 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
66 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
68 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
69 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
70 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
72 static struct shrinker *deferred_split_shrinker;
73 static unsigned long deferred_split_count(struct shrinker *shrink,
74 struct shrink_control *sc);
75 static unsigned long deferred_split_scan(struct shrinker *shrink,
76 struct shrink_control *sc);
77 static bool split_underused_thp = true;
79 static atomic_t huge_zero_refcount;
80 struct folio *huge_zero_folio __read_mostly;
81 unsigned long huge_zero_pfn __read_mostly = ~0UL;
82 unsigned long huge_anon_orders_always __read_mostly;
83 unsigned long huge_anon_orders_madvise __read_mostly;
84 unsigned long huge_anon_orders_inherit __read_mostly;
85 static bool anon_orders_configured __initdata;
87 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
88 unsigned long vm_flags,
89 unsigned long tva_flags,
92 bool smaps = tva_flags & TVA_SMAPS;
93 bool in_pf = tva_flags & TVA_IN_PF;
94 bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
95 unsigned long supported_orders;
97 /* Check the intersection of requested and supported orders. */
98 if (vma_is_anonymous(vma))
99 supported_orders = THP_ORDERS_ALL_ANON;
100 else if (vma_is_special_huge(vma))
101 supported_orders = THP_ORDERS_ALL_SPECIAL;
103 supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
105 orders &= supported_orders;
109 if (!vma->vm_mm) /* vdso */
113 * Explicitly disabled through madvise or prctl, or some
114 * architectures may disable THP for some mappings, for
117 if ((vm_flags & VM_NOHUGEPAGE) ||
118 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
121 * If the hardware/firmware marked hugepage support disabled.
123 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
126 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
128 return in_pf ? orders : 0;
131 * khugepaged special VMA and hugetlb VMA.
132 * Must be checked after dax since some dax mappings may have
135 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
139 * Check alignment for file vma and size for both file and anon vma by
140 * filtering out the unsuitable orders.
142 * Skip the check for page fault. Huge fault does the check in fault
146 int order = highest_order(orders);
150 addr = vma->vm_end - (PAGE_SIZE << order);
151 if (thp_vma_suitable_order(vma, addr, order))
153 order = next_order(&orders, order);
161 * Enabled via shmem mount options or sysfs settings.
162 * Must be done before hugepage flags check since shmem has its
165 if (!in_pf && shmem_file(vma->vm_file))
166 return shmem_allowable_huge_orders(file_inode(vma->vm_file),
167 vma, vma->vm_pgoff, 0,
170 if (!vma_is_anonymous(vma)) {
172 * Enforce sysfs THP requirements as necessary. Anonymous vmas
173 * were already handled in thp_vma_allowable_orders().
176 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
177 !hugepage_global_always())))
181 * Trust that ->huge_fault() handlers know what they are doing
184 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
186 /* Only regular file is valid in collapse path */
187 if (((!in_pf || smaps)) && file_thp_enabled(vma))
192 if (vma_is_temporary_stack(vma))
196 * THPeligible bit of smaps should show 1 for proper VMAs even
197 * though anon_vma is not initialized yet.
199 * Allow page fault since anon_vma may be not initialized until
200 * the first page fault.
203 return (smaps || in_pf) ? orders : 0;
208 static bool get_huge_zero_page(void)
210 struct folio *zero_folio;
212 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
215 zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
218 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
221 /* Ensure zero folio won't have large_rmappable flag set. */
222 folio_clear_large_rmappable(zero_folio);
224 if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
226 folio_put(zero_folio);
229 WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
231 /* We take additional reference here. It will be put back by shrinker */
232 atomic_set(&huge_zero_refcount, 2);
234 count_vm_event(THP_ZERO_PAGE_ALLOC);
238 static void put_huge_zero_page(void)
241 * Counter should never go to zero here. Only shrinker can put
244 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
247 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
249 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
250 return READ_ONCE(huge_zero_folio);
252 if (!get_huge_zero_page())
255 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
256 put_huge_zero_page();
258 return READ_ONCE(huge_zero_folio);
261 void mm_put_huge_zero_folio(struct mm_struct *mm)
263 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
264 put_huge_zero_page();
267 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
268 struct shrink_control *sc)
270 /* we can free zero page only if last reference remains */
271 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
274 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
275 struct shrink_control *sc)
277 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
278 struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
279 BUG_ON(zero_folio == NULL);
280 WRITE_ONCE(huge_zero_pfn, ~0UL);
281 folio_put(zero_folio);
288 static struct shrinker *huge_zero_page_shrinker;
291 static ssize_t enabled_show(struct kobject *kobj,
292 struct kobj_attribute *attr, char *buf)
296 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
297 output = "[always] madvise never";
298 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
299 &transparent_hugepage_flags))
300 output = "always [madvise] never";
302 output = "always madvise [never]";
304 return sysfs_emit(buf, "%s\n", output);
307 static ssize_t enabled_store(struct kobject *kobj,
308 struct kobj_attribute *attr,
309 const char *buf, size_t count)
313 if (sysfs_streq(buf, "always")) {
314 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
315 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
316 } else if (sysfs_streq(buf, "madvise")) {
317 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
318 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
319 } else if (sysfs_streq(buf, "never")) {
320 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
321 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
326 int err = start_stop_khugepaged();
333 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
335 ssize_t single_hugepage_flag_show(struct kobject *kobj,
336 struct kobj_attribute *attr, char *buf,
337 enum transparent_hugepage_flag flag)
339 return sysfs_emit(buf, "%d\n",
340 !!test_bit(flag, &transparent_hugepage_flags));
343 ssize_t single_hugepage_flag_store(struct kobject *kobj,
344 struct kobj_attribute *attr,
345 const char *buf, size_t count,
346 enum transparent_hugepage_flag flag)
351 ret = kstrtoul(buf, 10, &value);
358 set_bit(flag, &transparent_hugepage_flags);
360 clear_bit(flag, &transparent_hugepage_flags);
365 static ssize_t defrag_show(struct kobject *kobj,
366 struct kobj_attribute *attr, char *buf)
370 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
371 &transparent_hugepage_flags))
372 output = "[always] defer defer+madvise madvise never";
373 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
374 &transparent_hugepage_flags))
375 output = "always [defer] defer+madvise madvise never";
376 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
377 &transparent_hugepage_flags))
378 output = "always defer [defer+madvise] madvise never";
379 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
380 &transparent_hugepage_flags))
381 output = "always defer defer+madvise [madvise] never";
383 output = "always defer defer+madvise madvise [never]";
385 return sysfs_emit(buf, "%s\n", output);
388 static ssize_t defrag_store(struct kobject *kobj,
389 struct kobj_attribute *attr,
390 const char *buf, size_t count)
392 if (sysfs_streq(buf, "always")) {
393 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
394 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
395 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
396 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
397 } else if (sysfs_streq(buf, "defer+madvise")) {
398 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
399 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
400 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
401 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
402 } else if (sysfs_streq(buf, "defer")) {
403 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
404 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
405 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
406 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
407 } else if (sysfs_streq(buf, "madvise")) {
408 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
409 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
410 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
411 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
412 } else if (sysfs_streq(buf, "never")) {
413 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
414 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
415 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
416 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
422 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
424 static ssize_t use_zero_page_show(struct kobject *kobj,
425 struct kobj_attribute *attr, char *buf)
427 return single_hugepage_flag_show(kobj, attr, buf,
428 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
430 static ssize_t use_zero_page_store(struct kobject *kobj,
431 struct kobj_attribute *attr, const char *buf, size_t count)
433 return single_hugepage_flag_store(kobj, attr, buf, count,
434 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
436 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
438 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
439 struct kobj_attribute *attr, char *buf)
441 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
443 static struct kobj_attribute hpage_pmd_size_attr =
444 __ATTR_RO(hpage_pmd_size);
446 static ssize_t split_underused_thp_show(struct kobject *kobj,
447 struct kobj_attribute *attr, char *buf)
449 return sysfs_emit(buf, "%d\n", split_underused_thp);
452 static ssize_t split_underused_thp_store(struct kobject *kobj,
453 struct kobj_attribute *attr,
454 const char *buf, size_t count)
456 int err = kstrtobool(buf, &split_underused_thp);
464 static struct kobj_attribute split_underused_thp_attr = __ATTR(
465 shrink_underused, 0644, split_underused_thp_show, split_underused_thp_store);
467 static struct attribute *hugepage_attr[] = {
470 &use_zero_page_attr.attr,
471 &hpage_pmd_size_attr.attr,
473 &shmem_enabled_attr.attr,
475 &split_underused_thp_attr.attr,
479 static const struct attribute_group hugepage_attr_group = {
480 .attrs = hugepage_attr,
483 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
484 static void thpsize_release(struct kobject *kobj);
485 static DEFINE_SPINLOCK(huge_anon_orders_lock);
486 static LIST_HEAD(thpsize_list);
488 static ssize_t anon_enabled_show(struct kobject *kobj,
489 struct kobj_attribute *attr, char *buf)
491 int order = to_thpsize(kobj)->order;
494 if (test_bit(order, &huge_anon_orders_always))
495 output = "[always] inherit madvise never";
496 else if (test_bit(order, &huge_anon_orders_inherit))
497 output = "always [inherit] madvise never";
498 else if (test_bit(order, &huge_anon_orders_madvise))
499 output = "always inherit [madvise] never";
501 output = "always inherit madvise [never]";
503 return sysfs_emit(buf, "%s\n", output);
506 static ssize_t anon_enabled_store(struct kobject *kobj,
507 struct kobj_attribute *attr,
508 const char *buf, size_t count)
510 int order = to_thpsize(kobj)->order;
513 if (sysfs_streq(buf, "always")) {
514 spin_lock(&huge_anon_orders_lock);
515 clear_bit(order, &huge_anon_orders_inherit);
516 clear_bit(order, &huge_anon_orders_madvise);
517 set_bit(order, &huge_anon_orders_always);
518 spin_unlock(&huge_anon_orders_lock);
519 } else if (sysfs_streq(buf, "inherit")) {
520 spin_lock(&huge_anon_orders_lock);
521 clear_bit(order, &huge_anon_orders_always);
522 clear_bit(order, &huge_anon_orders_madvise);
523 set_bit(order, &huge_anon_orders_inherit);
524 spin_unlock(&huge_anon_orders_lock);
525 } else if (sysfs_streq(buf, "madvise")) {
526 spin_lock(&huge_anon_orders_lock);
527 clear_bit(order, &huge_anon_orders_always);
528 clear_bit(order, &huge_anon_orders_inherit);
529 set_bit(order, &huge_anon_orders_madvise);
530 spin_unlock(&huge_anon_orders_lock);
531 } else if (sysfs_streq(buf, "never")) {
532 spin_lock(&huge_anon_orders_lock);
533 clear_bit(order, &huge_anon_orders_always);
534 clear_bit(order, &huge_anon_orders_inherit);
535 clear_bit(order, &huge_anon_orders_madvise);
536 spin_unlock(&huge_anon_orders_lock);
543 err = start_stop_khugepaged();
550 static struct kobj_attribute anon_enabled_attr =
551 __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store);
553 static struct attribute *anon_ctrl_attrs[] = {
554 &anon_enabled_attr.attr,
558 static const struct attribute_group anon_ctrl_attr_grp = {
559 .attrs = anon_ctrl_attrs,
562 static struct attribute *file_ctrl_attrs[] = {
564 &thpsize_shmem_enabled_attr.attr,
569 static const struct attribute_group file_ctrl_attr_grp = {
570 .attrs = file_ctrl_attrs,
573 static struct attribute *any_ctrl_attrs[] = {
577 static const struct attribute_group any_ctrl_attr_grp = {
578 .attrs = any_ctrl_attrs,
581 static const struct kobj_type thpsize_ktype = {
582 .release = &thpsize_release,
583 .sysfs_ops = &kobj_sysfs_ops,
586 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
588 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
590 unsigned long sum = 0;
593 for_each_possible_cpu(cpu) {
594 struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
596 sum += this->stats[order][item];
602 #define DEFINE_MTHP_STAT_ATTR(_name, _index) \
603 static ssize_t _name##_show(struct kobject *kobj, \
604 struct kobj_attribute *attr, char *buf) \
606 int order = to_thpsize(kobj)->order; \
608 return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \
610 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
612 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
613 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
614 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
615 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
616 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
618 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
619 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
620 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
622 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
623 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
624 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
625 DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
626 DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
628 static struct attribute *anon_stats_attrs[] = {
629 &anon_fault_alloc_attr.attr,
630 &anon_fault_fallback_attr.attr,
631 &anon_fault_fallback_charge_attr.attr,
634 &swpout_fallback_attr.attr,
636 &split_deferred_attr.attr,
638 &nr_anon_partially_mapped_attr.attr,
642 static struct attribute_group anon_stats_attr_grp = {
644 .attrs = anon_stats_attrs,
647 static struct attribute *file_stats_attrs[] = {
649 &shmem_alloc_attr.attr,
650 &shmem_fallback_attr.attr,
651 &shmem_fallback_charge_attr.attr,
656 static struct attribute_group file_stats_attr_grp = {
658 .attrs = file_stats_attrs,
661 static struct attribute *any_stats_attrs[] = {
664 &swpout_fallback_attr.attr,
667 &split_failed_attr.attr,
671 static struct attribute_group any_stats_attr_grp = {
673 .attrs = any_stats_attrs,
676 static int sysfs_add_group(struct kobject *kobj,
677 const struct attribute_group *grp)
682 * If the group is named, try to merge first, assuming the subdirectory
683 * was already created. This avoids the warning emitted by
684 * sysfs_create_group() if the directory already exists.
687 ret = sysfs_merge_group(kobj, grp);
689 ret = sysfs_create_group(kobj, grp);
694 static struct thpsize *thpsize_create(int order, struct kobject *parent)
696 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
697 struct thpsize *thpsize;
700 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
704 thpsize->order = order;
706 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
707 "hugepages-%lukB", size);
714 ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp);
718 ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp);
722 if (BIT(order) & THP_ORDERS_ALL_ANON) {
723 ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp);
727 ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp);
732 if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) {
733 ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp);
737 ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp);
744 kobject_put(&thpsize->kobj);
749 static void thpsize_release(struct kobject *kobj)
751 kfree(to_thpsize(kobj));
754 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
757 struct thpsize *thpsize;
758 unsigned long orders;
762 * Default to setting PMD-sized THP to inherit the global setting and
763 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
764 * constant so we have to do this here.
766 if (!anon_orders_configured)
767 huge_anon_orders_inherit = BIT(PMD_ORDER);
769 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
770 if (unlikely(!*hugepage_kobj)) {
771 pr_err("failed to create transparent hugepage kobject\n");
775 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
777 pr_err("failed to register transparent hugepage group\n");
781 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
783 pr_err("failed to register transparent hugepage group\n");
784 goto remove_hp_group;
787 orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
788 order = highest_order(orders);
790 thpsize = thpsize_create(order, *hugepage_kobj);
791 if (IS_ERR(thpsize)) {
792 pr_err("failed to create thpsize for order %d\n", order);
793 err = PTR_ERR(thpsize);
796 list_add(&thpsize->node, &thpsize_list);
797 order = next_order(&orders, order);
803 hugepage_exit_sysfs(*hugepage_kobj);
806 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
808 kobject_put(*hugepage_kobj);
812 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
814 struct thpsize *thpsize, *tmp;
816 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
817 list_del(&thpsize->node);
818 kobject_put(&thpsize->kobj);
821 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
822 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
823 kobject_put(hugepage_kobj);
826 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
831 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
834 #endif /* CONFIG_SYSFS */
836 static int __init thp_shrinker_init(void)
838 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
839 if (!huge_zero_page_shrinker)
842 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
843 SHRINKER_MEMCG_AWARE |
845 "thp-deferred_split");
846 if (!deferred_split_shrinker) {
847 shrinker_free(huge_zero_page_shrinker);
851 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
852 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
853 shrinker_register(huge_zero_page_shrinker);
855 deferred_split_shrinker->count_objects = deferred_split_count;
856 deferred_split_shrinker->scan_objects = deferred_split_scan;
857 shrinker_register(deferred_split_shrinker);
862 static void __init thp_shrinker_exit(void)
864 shrinker_free(huge_zero_page_shrinker);
865 shrinker_free(deferred_split_shrinker);
868 static int __init hugepage_init(void)
871 struct kobject *hugepage_kobj;
873 if (!has_transparent_hugepage()) {
874 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
879 * hugepages can't be allocated by the buddy allocator
881 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
883 err = hugepage_init_sysfs(&hugepage_kobj);
887 err = khugepaged_init();
891 err = thp_shrinker_init();
896 * By default disable transparent hugepages on smaller systems,
897 * where the extra memory used could hurt more than TLB overhead
898 * is likely to save. The admin can still enable it through /sys.
900 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
901 transparent_hugepage_flags = 0;
905 err = start_stop_khugepaged();
913 khugepaged_destroy();
915 hugepage_exit_sysfs(hugepage_kobj);
919 subsys_initcall(hugepage_init);
921 static int __init setup_transparent_hugepage(char *str)
926 if (!strcmp(str, "always")) {
927 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
928 &transparent_hugepage_flags);
929 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
930 &transparent_hugepage_flags);
932 } else if (!strcmp(str, "madvise")) {
933 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
934 &transparent_hugepage_flags);
935 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
936 &transparent_hugepage_flags);
938 } else if (!strcmp(str, "never")) {
939 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
940 &transparent_hugepage_flags);
941 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
942 &transparent_hugepage_flags);
947 pr_warn("transparent_hugepage= cannot parse, ignored\n");
950 __setup("transparent_hugepage=", setup_transparent_hugepage);
952 static inline int get_order_from_str(const char *size_str)
958 size = memparse(size_str, &endptr);
960 if (!is_power_of_2(size))
962 order = get_order(size);
963 if (BIT(order) & ~THP_ORDERS_ALL_ANON)
968 pr_err("invalid size %s in thp_anon boot parameter\n", size_str);
972 static char str_dup[PAGE_SIZE] __initdata;
973 static int __init setup_thp_anon(char *str)
975 char *token, *range, *policy, *subtoken;
976 unsigned long always, inherit, madvise;
977 char *start_size, *end_size;
981 if (!str || strlen(str) + 1 > PAGE_SIZE)
983 strcpy(str_dup, str);
985 always = huge_anon_orders_always;
986 madvise = huge_anon_orders_madvise;
987 inherit = huge_anon_orders_inherit;
989 while ((token = strsep(&p, ";")) != NULL) {
990 range = strsep(&token, ":");
996 while ((subtoken = strsep(&range, ",")) != NULL) {
997 if (strchr(subtoken, '-')) {
998 start_size = strsep(&subtoken, "-");
1001 start = get_order_from_str(start_size);
1002 end = get_order_from_str(end_size);
1004 start = end = get_order_from_str(subtoken);
1007 if (start < 0 || end < 0 || start > end)
1010 nr = end - start + 1;
1011 if (!strcmp(policy, "always")) {
1012 bitmap_set(&always, start, nr);
1013 bitmap_clear(&inherit, start, nr);
1014 bitmap_clear(&madvise, start, nr);
1015 } else if (!strcmp(policy, "madvise")) {
1016 bitmap_set(&madvise, start, nr);
1017 bitmap_clear(&inherit, start, nr);
1018 bitmap_clear(&always, start, nr);
1019 } else if (!strcmp(policy, "inherit")) {
1020 bitmap_set(&inherit, start, nr);
1021 bitmap_clear(&madvise, start, nr);
1022 bitmap_clear(&always, start, nr);
1023 } else if (!strcmp(policy, "never")) {
1024 bitmap_clear(&inherit, start, nr);
1025 bitmap_clear(&madvise, start, nr);
1026 bitmap_clear(&always, start, nr);
1028 pr_err("invalid policy %s in thp_anon boot parameter\n", policy);
1034 huge_anon_orders_always = always;
1035 huge_anon_orders_madvise = madvise;
1036 huge_anon_orders_inherit = inherit;
1037 anon_orders_configured = true;
1041 pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str);
1044 __setup("thp_anon=", setup_thp_anon);
1046 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
1048 if (likely(vma->vm_flags & VM_WRITE))
1049 pmd = pmd_mkwrite(pmd, vma);
1055 struct deferred_split *get_deferred_split_queue(struct folio *folio)
1057 struct mem_cgroup *memcg = folio_memcg(folio);
1058 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
1061 return &memcg->deferred_split_queue;
1063 return &pgdat->deferred_split_queue;
1067 struct deferred_split *get_deferred_split_queue(struct folio *folio)
1069 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
1071 return &pgdat->deferred_split_queue;
1075 static inline bool is_transparent_hugepage(const struct folio *folio)
1077 if (!folio_test_large(folio))
1080 return is_huge_zero_folio(folio) ||
1081 folio_test_large_rmappable(folio);
1084 static unsigned long __thp_get_unmapped_area(struct file *filp,
1085 unsigned long addr, unsigned long len,
1086 loff_t off, unsigned long flags, unsigned long size,
1087 vm_flags_t vm_flags)
1089 loff_t off_end = off + len;
1090 loff_t off_align = round_up(off, size);
1091 unsigned long len_pad, ret, off_sub;
1093 if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
1096 if (off_end <= off_align || (off_end - off_align) < size)
1099 len_pad = len + size;
1100 if (len_pad < len || (off + len_pad) < off)
1103 ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad,
1104 off >> PAGE_SHIFT, flags, vm_flags);
1107 * The failure might be due to length padding. The caller will retry
1108 * without the padding.
1110 if (IS_ERR_VALUE(ret))
1114 * Do not try to align to THP boundary if allocation at the address
1120 off_sub = (off - ret) & (size - 1);
1122 if (test_bit(MMF_TOPDOWN, ¤t->mm->flags) && !off_sub)
1129 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
1130 unsigned long len, unsigned long pgoff, unsigned long flags,
1131 vm_flags_t vm_flags)
1134 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
1136 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
1140 return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags,
1144 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
1145 unsigned long len, unsigned long pgoff, unsigned long flags)
1147 return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
1149 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
1151 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
1152 struct page *page, gfp_t gfp)
1154 struct vm_area_struct *vma = vmf->vma;
1155 struct folio *folio = page_folio(page);
1157 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1160 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1162 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
1164 count_vm_event(THP_FAULT_FALLBACK);
1165 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
1166 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
1167 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
1168 return VM_FAULT_FALLBACK;
1170 folio_throttle_swaprate(folio, gfp);
1172 pgtable = pte_alloc_one(vma->vm_mm);
1173 if (unlikely(!pgtable)) {
1178 folio_zero_user(folio, vmf->address);
1180 * The memory barrier inside __folio_mark_uptodate makes sure that
1181 * folio_zero_user writes become visible before the set_pmd_at()
1184 __folio_mark_uptodate(folio);
1186 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1187 if (unlikely(!pmd_none(*vmf->pmd))) {
1188 goto unlock_release;
1192 ret = check_stable_address_space(vma->vm_mm);
1194 goto unlock_release;
1196 /* Deliver the page fault to userland */
1197 if (userfaultfd_missing(vma)) {
1198 spin_unlock(vmf->ptl);
1200 pte_free(vma->vm_mm, pgtable);
1201 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1202 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1206 entry = mk_huge_pmd(page, vma->vm_page_prot);
1207 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1208 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
1209 folio_add_lru_vma(folio, vma);
1210 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1211 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
1212 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1213 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1214 mm_inc_nr_ptes(vma->vm_mm);
1215 deferred_split_folio(folio, false);
1216 spin_unlock(vmf->ptl);
1217 count_vm_event(THP_FAULT_ALLOC);
1218 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
1219 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
1224 spin_unlock(vmf->ptl);
1227 pte_free(vma->vm_mm, pgtable);
1234 * always: directly stall for all thp allocations
1235 * defer: wake kswapd and fail if not immediately available
1236 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
1237 * fail if not immediately available
1238 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
1240 * never: never stall for any thp allocation
1242 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
1244 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
1246 /* Always do synchronous compaction */
1247 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
1248 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
1250 /* Kick kcompactd and fail quickly */
1251 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
1252 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
1254 /* Synchronous compaction if madvised, otherwise kick kcompactd */
1255 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
1256 return GFP_TRANSHUGE_LIGHT |
1257 (vma_madvised ? __GFP_DIRECT_RECLAIM :
1258 __GFP_KSWAPD_RECLAIM);
1260 /* Only do synchronous compaction if madvised */
1261 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
1262 return GFP_TRANSHUGE_LIGHT |
1263 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
1265 return GFP_TRANSHUGE_LIGHT;
1268 /* Caller must hold page table lock. */
1269 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
1270 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
1271 struct folio *zero_folio)
1274 if (!pmd_none(*pmd))
1276 entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
1277 entry = pmd_mkhuge(entry);
1278 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1279 set_pmd_at(mm, haddr, pmd, entry);
1283 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1285 struct vm_area_struct *vma = vmf->vma;
1287 struct folio *folio;
1288 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1291 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1292 return VM_FAULT_FALLBACK;
1293 ret = vmf_anon_prepare(vmf);
1296 khugepaged_enter_vma(vma, vma->vm_flags);
1298 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1299 !mm_forbids_zeropage(vma->vm_mm) &&
1300 transparent_hugepage_use_zero_page()) {
1302 struct folio *zero_folio;
1305 pgtable = pte_alloc_one(vma->vm_mm);
1306 if (unlikely(!pgtable))
1307 return VM_FAULT_OOM;
1308 zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
1309 if (unlikely(!zero_folio)) {
1310 pte_free(vma->vm_mm, pgtable);
1311 count_vm_event(THP_FAULT_FALLBACK);
1312 return VM_FAULT_FALLBACK;
1314 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1316 if (pmd_none(*vmf->pmd)) {
1317 ret = check_stable_address_space(vma->vm_mm);
1319 spin_unlock(vmf->ptl);
1320 pte_free(vma->vm_mm, pgtable);
1321 } else if (userfaultfd_missing(vma)) {
1322 spin_unlock(vmf->ptl);
1323 pte_free(vma->vm_mm, pgtable);
1324 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1325 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1327 set_huge_zero_folio(pgtable, vma->vm_mm, vma,
1328 haddr, vmf->pmd, zero_folio);
1329 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1330 spin_unlock(vmf->ptl);
1333 spin_unlock(vmf->ptl);
1334 pte_free(vma->vm_mm, pgtable);
1338 gfp = vma_thp_gfp_mask(vma);
1339 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1340 if (unlikely(!folio)) {
1341 count_vm_event(THP_FAULT_FALLBACK);
1342 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
1343 return VM_FAULT_FALLBACK;
1345 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1348 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
1349 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1352 struct mm_struct *mm = vma->vm_mm;
1356 ptl = pmd_lock(mm, pmd);
1357 if (!pmd_none(*pmd)) {
1359 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1360 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1363 entry = pmd_mkyoung(*pmd);
1364 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1365 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1366 update_mmu_cache_pmd(vma, addr, pmd);
1372 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1373 if (pfn_t_devmap(pfn))
1374 entry = pmd_mkdevmap(entry);
1376 entry = pmd_mkspecial(entry);
1378 entry = pmd_mkyoung(pmd_mkdirty(entry));
1379 entry = maybe_pmd_mkwrite(entry, vma);
1383 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1388 set_pmd_at(mm, addr, pmd, entry);
1389 update_mmu_cache_pmd(vma, addr, pmd);
1394 pte_free(mm, pgtable);
1398 * vmf_insert_pfn_pmd - insert a pmd size pfn
1399 * @vmf: Structure describing the fault
1400 * @pfn: pfn to insert
1401 * @write: whether it's a write fault
1403 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1405 * Return: vm_fault_t value.
1407 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
1409 unsigned long addr = vmf->address & PMD_MASK;
1410 struct vm_area_struct *vma = vmf->vma;
1411 pgprot_t pgprot = vma->vm_page_prot;
1412 pgtable_t pgtable = NULL;
1415 * If we had pmd_special, we could avoid all these restrictions,
1416 * but we need to be consistent with PTEs and architectures that
1417 * can't support a 'special' bit.
1419 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1420 !pfn_t_devmap(pfn));
1421 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1422 (VM_PFNMAP|VM_MIXEDMAP));
1423 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1425 if (addr < vma->vm_start || addr >= vma->vm_end)
1426 return VM_FAULT_SIGBUS;
1428 if (arch_needs_pgtable_deposit()) {
1429 pgtable = pte_alloc_one(vma->vm_mm);
1431 return VM_FAULT_OOM;
1434 track_pfn_insert(vma, &pgprot, pfn);
1436 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1437 return VM_FAULT_NOPAGE;
1439 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1441 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1442 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1444 if (likely(vma->vm_flags & VM_WRITE))
1445 pud = pud_mkwrite(pud);
1449 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
1450 pud_t *pud, pfn_t pfn, bool write)
1452 struct mm_struct *mm = vma->vm_mm;
1453 pgprot_t prot = vma->vm_page_prot;
1457 ptl = pud_lock(mm, pud);
1458 if (!pud_none(*pud)) {
1460 if (WARN_ON_ONCE(pud_pfn(*pud) != pfn_t_to_pfn(pfn)))
1462 entry = pud_mkyoung(*pud);
1463 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1464 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1465 update_mmu_cache_pud(vma, addr, pud);
1470 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1471 if (pfn_t_devmap(pfn))
1472 entry = pud_mkdevmap(entry);
1474 entry = pud_mkspecial(entry);
1476 entry = pud_mkyoung(pud_mkdirty(entry));
1477 entry = maybe_pud_mkwrite(entry, vma);
1479 set_pud_at(mm, addr, pud, entry);
1480 update_mmu_cache_pud(vma, addr, pud);
1487 * vmf_insert_pfn_pud - insert a pud size pfn
1488 * @vmf: Structure describing the fault
1489 * @pfn: pfn to insert
1490 * @write: whether it's a write fault
1492 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1494 * Return: vm_fault_t value.
1496 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1498 unsigned long addr = vmf->address & PUD_MASK;
1499 struct vm_area_struct *vma = vmf->vma;
1500 pgprot_t pgprot = vma->vm_page_prot;
1503 * If we had pud_special, we could avoid all these restrictions,
1504 * but we need to be consistent with PTEs and architectures that
1505 * can't support a 'special' bit.
1507 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1508 !pfn_t_devmap(pfn));
1509 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1510 (VM_PFNMAP|VM_MIXEDMAP));
1511 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1513 if (addr < vma->vm_start || addr >= vma->vm_end)
1514 return VM_FAULT_SIGBUS;
1516 track_pfn_insert(vma, &pgprot, pfn);
1518 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1519 return VM_FAULT_NOPAGE;
1521 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1522 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1524 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1525 pmd_t *pmd, bool write)
1529 _pmd = pmd_mkyoung(*pmd);
1531 _pmd = pmd_mkdirty(_pmd);
1532 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1534 update_mmu_cache_pmd(vma, addr, pmd);
1537 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1538 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1540 unsigned long pfn = pmd_pfn(*pmd);
1541 struct mm_struct *mm = vma->vm_mm;
1545 assert_spin_locked(pmd_lockptr(mm, pmd));
1547 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1550 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1555 if (flags & FOLL_TOUCH)
1556 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1559 * device mapped pages can only be returned if the
1560 * caller will manage the page reference count.
1562 if (!(flags & (FOLL_GET | FOLL_PIN)))
1563 return ERR_PTR(-EEXIST);
1565 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1566 *pgmap = get_dev_pagemap(pfn, *pgmap);
1568 return ERR_PTR(-EFAULT);
1569 page = pfn_to_page(pfn);
1570 ret = try_grab_folio(page_folio(page), 1, flags);
1572 page = ERR_PTR(ret);
1577 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1578 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1579 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1581 spinlock_t *dst_ptl, *src_ptl;
1582 struct page *src_page;
1583 struct folio *src_folio;
1585 pgtable_t pgtable = NULL;
1588 pmd = pmdp_get_lockless(src_pmd);
1589 if (unlikely(pmd_special(pmd))) {
1590 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1591 src_ptl = pmd_lockptr(src_mm, src_pmd);
1592 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1594 * No need to recheck the pmd, it can't change with write
1595 * mmap lock held here.
1597 * Meanwhile, making sure it's not a CoW VMA with writable
1598 * mapping, otherwise it means either the anon page wrongly
1599 * applied special bit, or we made the PRIVATE mapping be
1600 * able to wrongly write to the backend MMIO.
1602 VM_WARN_ON_ONCE(is_cow_mapping(src_vma->vm_flags) && pmd_write(pmd));
1606 /* Skip if can be re-fill on fault */
1607 if (!vma_is_anonymous(dst_vma))
1610 pgtable = pte_alloc_one(dst_mm);
1611 if (unlikely(!pgtable))
1614 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1615 src_ptl = pmd_lockptr(src_mm, src_pmd);
1616 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1621 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1622 if (unlikely(is_swap_pmd(pmd))) {
1623 swp_entry_t entry = pmd_to_swp_entry(pmd);
1625 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1626 if (!is_readable_migration_entry(entry)) {
1627 entry = make_readable_migration_entry(
1629 pmd = swp_entry_to_pmd(entry);
1630 if (pmd_swp_soft_dirty(*src_pmd))
1631 pmd = pmd_swp_mksoft_dirty(pmd);
1632 if (pmd_swp_uffd_wp(*src_pmd))
1633 pmd = pmd_swp_mkuffd_wp(pmd);
1634 set_pmd_at(src_mm, addr, src_pmd, pmd);
1636 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1637 mm_inc_nr_ptes(dst_mm);
1638 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1639 if (!userfaultfd_wp(dst_vma))
1640 pmd = pmd_swp_clear_uffd_wp(pmd);
1641 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1647 if (unlikely(!pmd_trans_huge(pmd))) {
1648 pte_free(dst_mm, pgtable);
1652 * When page table lock is held, the huge zero pmd should not be
1653 * under splitting since we don't split the page itself, only pmd to
1656 if (is_huge_zero_pmd(pmd)) {
1658 * mm_get_huge_zero_folio() will never allocate a new
1659 * folio here, since we already have a zero page to
1660 * copy. It just takes a reference.
1662 mm_get_huge_zero_folio(dst_mm);
1666 src_page = pmd_page(pmd);
1667 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1668 src_folio = page_folio(src_page);
1670 folio_get(src_folio);
1671 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
1672 /* Page maybe pinned: split and retry the fault on PTEs. */
1673 folio_put(src_folio);
1674 pte_free(dst_mm, pgtable);
1675 spin_unlock(src_ptl);
1676 spin_unlock(dst_ptl);
1677 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1680 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1682 mm_inc_nr_ptes(dst_mm);
1683 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1684 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1685 if (!userfaultfd_wp(dst_vma))
1686 pmd = pmd_clear_uffd_wp(pmd);
1687 pmd = pmd_wrprotect(pmd);
1689 pmd = pmd_mkold(pmd);
1690 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1694 spin_unlock(src_ptl);
1695 spin_unlock(dst_ptl);
1700 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1701 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1702 pud_t *pud, bool write)
1706 _pud = pud_mkyoung(*pud);
1708 _pud = pud_mkdirty(_pud);
1709 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1711 update_mmu_cache_pud(vma, addr, pud);
1714 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1715 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1716 struct vm_area_struct *vma)
1718 spinlock_t *dst_ptl, *src_ptl;
1722 dst_ptl = pud_lock(dst_mm, dst_pud);
1723 src_ptl = pud_lockptr(src_mm, src_pud);
1724 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1728 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1732 * TODO: once we support anonymous pages, use
1733 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
1735 if (is_cow_mapping(vma->vm_flags) && pud_write(pud)) {
1736 pudp_set_wrprotect(src_mm, addr, src_pud);
1737 pud = pud_wrprotect(pud);
1739 pud = pud_mkold(pud);
1740 set_pud_at(dst_mm, addr, dst_pud, pud);
1744 spin_unlock(src_ptl);
1745 spin_unlock(dst_ptl);
1749 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1751 bool write = vmf->flags & FAULT_FLAG_WRITE;
1753 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1754 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1757 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1759 spin_unlock(vmf->ptl);
1761 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1763 void huge_pmd_set_accessed(struct vm_fault *vmf)
1765 bool write = vmf->flags & FAULT_FLAG_WRITE;
1767 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1768 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1771 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1774 spin_unlock(vmf->ptl);
1777 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1779 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1780 struct vm_area_struct *vma = vmf->vma;
1781 struct folio *folio;
1783 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1784 pmd_t orig_pmd = vmf->orig_pmd;
1786 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1787 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1789 if (is_huge_zero_pmd(orig_pmd))
1792 spin_lock(vmf->ptl);
1794 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1795 spin_unlock(vmf->ptl);
1799 page = pmd_page(orig_pmd);
1800 folio = page_folio(page);
1801 VM_BUG_ON_PAGE(!PageHead(page), page);
1803 /* Early check when only holding the PT lock. */
1804 if (PageAnonExclusive(page))
1807 if (!folio_trylock(folio)) {
1809 spin_unlock(vmf->ptl);
1811 spin_lock(vmf->ptl);
1812 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1813 spin_unlock(vmf->ptl);
1814 folio_unlock(folio);
1821 /* Recheck after temporarily dropping the PT lock. */
1822 if (PageAnonExclusive(page)) {
1823 folio_unlock(folio);
1828 * See do_wp_page(): we can only reuse the folio exclusively if
1829 * there are no additional references. Note that we always drain
1830 * the LRU cache immediately after adding a THP.
1832 if (folio_ref_count(folio) >
1833 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1834 goto unlock_fallback;
1835 if (folio_test_swapcache(folio))
1836 folio_free_swap(folio);
1837 if (folio_ref_count(folio) == 1) {
1840 folio_move_anon_rmap(folio, vma);
1841 SetPageAnonExclusive(page);
1842 folio_unlock(folio);
1844 if (unlikely(unshare)) {
1845 spin_unlock(vmf->ptl);
1848 entry = pmd_mkyoung(orig_pmd);
1849 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1850 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1851 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1852 spin_unlock(vmf->ptl);
1857 folio_unlock(folio);
1858 spin_unlock(vmf->ptl);
1860 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1861 return VM_FAULT_FALLBACK;
1864 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1865 unsigned long addr, pmd_t pmd)
1869 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1872 /* Don't touch entries that are not even readable (NUMA hinting). */
1873 if (pmd_protnone(pmd))
1876 /* Do we need write faults for softdirty tracking? */
1877 if (pmd_needs_soft_dirty_wp(vma, pmd))
1880 /* Do we need write faults for uffd-wp tracking? */
1881 if (userfaultfd_huge_pmd_wp(vma, pmd))
1884 if (!(vma->vm_flags & VM_SHARED)) {
1885 /* See can_change_pte_writable(). */
1886 page = vm_normal_page_pmd(vma, addr, pmd);
1887 return page && PageAnon(page) && PageAnonExclusive(page);
1890 /* See can_change_pte_writable(). */
1891 return pmd_dirty(pmd);
1894 /* NUMA hinting page fault entry point for trans huge pmds */
1895 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1897 struct vm_area_struct *vma = vmf->vma;
1898 struct folio *folio;
1899 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1900 int nid = NUMA_NO_NODE;
1901 int target_nid, last_cpupid;
1903 bool writable = false;
1906 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1907 old_pmd = pmdp_get(vmf->pmd);
1909 if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
1910 spin_unlock(vmf->ptl);
1914 pmd = pmd_modify(old_pmd, vma->vm_page_prot);
1917 * Detect now whether the PMD could be writable; this information
1918 * is only valid while holding the PT lock.
1920 writable = pmd_write(pmd);
1921 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1922 can_change_pmd_writable(vma, vmf->address, pmd))
1925 folio = vm_normal_folio_pmd(vma, haddr, pmd);
1929 nid = folio_nid(folio);
1931 target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
1933 if (target_nid == NUMA_NO_NODE)
1935 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
1936 flags |= TNF_MIGRATE_FAIL;
1939 /* The folio is isolated and isolation code holds a folio reference. */
1940 spin_unlock(vmf->ptl);
1943 if (!migrate_misplaced_folio(folio, vma, target_nid)) {
1944 flags |= TNF_MIGRATED;
1946 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1950 flags |= TNF_MIGRATE_FAIL;
1951 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1952 if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
1953 spin_unlock(vmf->ptl);
1957 /* Restore the PMD */
1958 pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
1959 pmd = pmd_mkyoung(pmd);
1961 pmd = pmd_mkwrite(pmd, vma);
1962 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1963 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1964 spin_unlock(vmf->ptl);
1966 if (nid != NUMA_NO_NODE)
1967 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1972 * Return true if we do MADV_FREE successfully on entire pmd page.
1973 * Otherwise, return false.
1975 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1976 pmd_t *pmd, unsigned long addr, unsigned long next)
1980 struct folio *folio;
1981 struct mm_struct *mm = tlb->mm;
1984 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1986 ptl = pmd_trans_huge_lock(pmd, vma);
1991 if (is_huge_zero_pmd(orig_pmd))
1994 if (unlikely(!pmd_present(orig_pmd))) {
1995 VM_BUG_ON(thp_migration_supported() &&
1996 !is_pmd_migration_entry(orig_pmd));
2000 folio = pmd_folio(orig_pmd);
2002 * If other processes are mapping this folio, we couldn't discard
2003 * the folio unless they all do MADV_FREE so let's skip the folio.
2005 if (folio_likely_mapped_shared(folio))
2008 if (!folio_trylock(folio))
2012 * If user want to discard part-pages of THP, split it so MADV_FREE
2013 * will deactivate only them.
2015 if (next - addr != HPAGE_PMD_SIZE) {
2019 folio_unlock(folio);
2024 if (folio_test_dirty(folio))
2025 folio_clear_dirty(folio);
2026 folio_unlock(folio);
2028 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
2029 pmdp_invalidate(vma, addr, pmd);
2030 orig_pmd = pmd_mkold(orig_pmd);
2031 orig_pmd = pmd_mkclean(orig_pmd);
2033 set_pmd_at(mm, addr, pmd, orig_pmd);
2034 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2037 folio_mark_lazyfree(folio);
2045 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
2049 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2050 pte_free(mm, pgtable);
2054 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2055 pmd_t *pmd, unsigned long addr)
2060 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2062 ptl = __pmd_trans_huge_lock(pmd, vma);
2066 * For architectures like ppc64 we look at deposited pgtable
2067 * when calling pmdp_huge_get_and_clear. So do the
2068 * pgtable_trans_huge_withdraw after finishing pmdp related
2071 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
2073 arch_check_zapped_pmd(vma, orig_pmd);
2074 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2075 if (vma_is_special_huge(vma)) {
2076 if (arch_needs_pgtable_deposit())
2077 zap_deposited_table(tlb->mm, pmd);
2079 } else if (is_huge_zero_pmd(orig_pmd)) {
2080 zap_deposited_table(tlb->mm, pmd);
2083 struct folio *folio = NULL;
2084 int flush_needed = 1;
2086 if (pmd_present(orig_pmd)) {
2087 struct page *page = pmd_page(orig_pmd);
2089 folio = page_folio(page);
2090 folio_remove_rmap_pmd(folio, page, vma);
2091 WARN_ON_ONCE(folio_mapcount(folio) < 0);
2092 VM_BUG_ON_PAGE(!PageHead(page), page);
2093 } else if (thp_migration_supported()) {
2096 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
2097 entry = pmd_to_swp_entry(orig_pmd);
2098 folio = pfn_swap_entry_folio(entry);
2101 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
2103 if (folio_test_anon(folio)) {
2104 zap_deposited_table(tlb->mm, pmd);
2105 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
2107 if (arch_needs_pgtable_deposit())
2108 zap_deposited_table(tlb->mm, pmd);
2109 add_mm_counter(tlb->mm, mm_counter_file(folio),
2115 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
2120 #ifndef pmd_move_must_withdraw
2121 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
2122 spinlock_t *old_pmd_ptl,
2123 struct vm_area_struct *vma)
2126 * With split pmd lock we also need to move preallocated
2127 * PTE page table if new_pmd is on different PMD page table.
2129 * We also don't deposit and withdraw tables for file pages.
2131 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
2135 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
2137 #ifdef CONFIG_MEM_SOFT_DIRTY
2138 if (unlikely(is_pmd_migration_entry(pmd)))
2139 pmd = pmd_swp_mksoft_dirty(pmd);
2140 else if (pmd_present(pmd))
2141 pmd = pmd_mksoft_dirty(pmd);
2146 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
2147 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
2149 spinlock_t *old_ptl, *new_ptl;
2151 struct mm_struct *mm = vma->vm_mm;
2152 bool force_flush = false;
2155 * The destination pmd shouldn't be established, free_pgtables()
2156 * should have released it; but move_page_tables() might have already
2157 * inserted a page table, if racing against shmem/file collapse.
2159 if (!pmd_none(*new_pmd)) {
2160 VM_BUG_ON(pmd_trans_huge(*new_pmd));
2165 * We don't have to worry about the ordering of src and dst
2166 * ptlocks because exclusive mmap_lock prevents deadlock.
2168 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
2170 new_ptl = pmd_lockptr(mm, new_pmd);
2171 if (new_ptl != old_ptl)
2172 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
2173 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
2174 if (pmd_present(pmd))
2176 VM_BUG_ON(!pmd_none(*new_pmd));
2178 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
2180 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2181 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
2183 pmd = move_soft_dirty_pmd(pmd);
2184 set_pmd_at(mm, new_addr, new_pmd, pmd);
2186 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
2187 if (new_ptl != old_ptl)
2188 spin_unlock(new_ptl);
2189 spin_unlock(old_ptl);
2197 * - 0 if PMD could not be locked
2198 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2199 * or if prot_numa but THP migration is not supported
2200 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
2202 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2203 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2204 unsigned long cp_flags)
2206 struct mm_struct *mm = vma->vm_mm;
2208 pmd_t oldpmd, entry;
2209 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2210 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2211 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2214 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2216 if (prot_numa && !thp_migration_supported())
2219 ptl = __pmd_trans_huge_lock(pmd, vma);
2223 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2224 if (is_swap_pmd(*pmd)) {
2225 swp_entry_t entry = pmd_to_swp_entry(*pmd);
2226 struct folio *folio = pfn_swap_entry_folio(entry);
2229 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
2230 if (is_writable_migration_entry(entry)) {
2232 * A protection check is difficult so
2233 * just be safe and disable write
2235 if (folio_test_anon(folio))
2236 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2238 entry = make_readable_migration_entry(swp_offset(entry));
2239 newpmd = swp_entry_to_pmd(entry);
2240 if (pmd_swp_soft_dirty(*pmd))
2241 newpmd = pmd_swp_mksoft_dirty(newpmd);
2247 newpmd = pmd_swp_mkuffd_wp(newpmd);
2248 else if (uffd_wp_resolve)
2249 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2250 if (!pmd_same(*pmd, newpmd))
2251 set_pmd_at(mm, addr, pmd, newpmd);
2257 struct folio *folio;
2260 * Avoid trapping faults against the zero page. The read-only
2261 * data is likely to be read-cached on the local CPU and
2262 * local/remote hits to the zero page are not interesting.
2264 if (is_huge_zero_pmd(*pmd))
2267 if (pmd_protnone(*pmd))
2270 folio = pmd_folio(*pmd);
2271 toptier = node_is_toptier(folio_nid(folio));
2273 * Skip scanning top tier node if normal numa
2274 * balancing is disabled
2276 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
2280 if (folio_use_access_time(folio))
2281 folio_xchg_access_time(folio,
2282 jiffies_to_msecs(jiffies));
2285 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2286 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2287 * which is also under mmap_read_lock(mm):
2290 * change_huge_pmd(prot_numa=1)
2291 * pmdp_huge_get_and_clear_notify()
2292 * madvise_dontneed()
2294 * pmd_trans_huge(*pmd) == 0 (without ptl)
2297 * // pmd is re-established
2299 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2300 * which may break userspace.
2302 * pmdp_invalidate_ad() is required to make sure we don't miss
2303 * dirty/young flags set by hardware.
2305 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2307 entry = pmd_modify(oldpmd, newprot);
2309 entry = pmd_mkuffd_wp(entry);
2310 else if (uffd_wp_resolve)
2312 * Leave the write bit to be handled by PF interrupt
2313 * handler, then things like COW could be properly
2316 entry = pmd_clear_uffd_wp(entry);
2318 /* See change_pte_range(). */
2319 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2320 can_change_pmd_writable(vma, addr, entry))
2321 entry = pmd_mkwrite(entry, vma);
2324 set_pmd_at(mm, addr, pmd, entry);
2326 if (huge_pmd_needs_flush(oldpmd, entry))
2327 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2336 * - 0: if pud leaf changed from under us
2337 * - 1: if pud can be skipped
2338 * - HPAGE_PUD_NR: if pud was successfully processed
2340 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2341 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2342 pud_t *pudp, unsigned long addr, pgprot_t newprot,
2343 unsigned long cp_flags)
2345 struct mm_struct *mm = vma->vm_mm;
2346 pud_t oldpud, entry;
2349 tlb_change_page_size(tlb, HPAGE_PUD_SIZE);
2351 /* NUMA balancing doesn't apply to dax */
2352 if (cp_flags & MM_CP_PROT_NUMA)
2356 * Huge entries on userfault-wp only works with anonymous, while we
2357 * don't have anonymous PUDs yet.
2359 if (WARN_ON_ONCE(cp_flags & MM_CP_UFFD_WP_ALL))
2362 ptl = __pud_trans_huge_lock(pudp, vma);
2367 * Can't clear PUD or it can race with concurrent zapping. See
2368 * change_huge_pmd().
2370 oldpud = pudp_invalidate(vma, addr, pudp);
2371 entry = pud_modify(oldpud, newprot);
2372 set_pud_at(mm, addr, pudp, entry);
2373 tlb_flush_pud_range(tlb, addr, HPAGE_PUD_SIZE);
2376 return HPAGE_PUD_NR;
2380 #ifdef CONFIG_USERFAULTFD
2382 * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2383 * the caller, but it must return after releasing the page_table_lock.
2384 * Just move the page from src_pmd to dst_pmd if possible.
2385 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2386 * repeated by the caller, or other errors in case of failure.
2388 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2389 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2390 unsigned long dst_addr, unsigned long src_addr)
2392 pmd_t _dst_pmd, src_pmdval;
2393 struct page *src_page;
2394 struct folio *src_folio;
2395 struct anon_vma *src_anon_vma;
2396 spinlock_t *src_ptl, *dst_ptl;
2397 pgtable_t src_pgtable;
2398 struct mmu_notifier_range range;
2401 src_pmdval = *src_pmd;
2402 src_ptl = pmd_lockptr(mm, src_pmd);
2404 lockdep_assert_held(src_ptl);
2405 vma_assert_locked(src_vma);
2406 vma_assert_locked(dst_vma);
2408 /* Sanity checks before the operation */
2409 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2410 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2411 spin_unlock(src_ptl);
2415 if (!pmd_trans_huge(src_pmdval)) {
2416 spin_unlock(src_ptl);
2417 if (is_pmd_migration_entry(src_pmdval)) {
2418 pmd_migration_entry_wait(mm, &src_pmdval);
2424 src_page = pmd_page(src_pmdval);
2426 if (!is_huge_zero_pmd(src_pmdval)) {
2427 if (unlikely(!PageAnonExclusive(src_page))) {
2428 spin_unlock(src_ptl);
2432 src_folio = page_folio(src_page);
2433 folio_get(src_folio);
2437 spin_unlock(src_ptl);
2439 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2440 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2441 src_addr + HPAGE_PMD_SIZE);
2442 mmu_notifier_invalidate_range_start(&range);
2445 folio_lock(src_folio);
2448 * split_huge_page walks the anon_vma chain without the page
2449 * lock. Serialize against it with the anon_vma lock, the page
2450 * lock is not enough.
2452 src_anon_vma = folio_get_anon_vma(src_folio);
2453 if (!src_anon_vma) {
2457 anon_vma_lock_write(src_anon_vma);
2459 src_anon_vma = NULL;
2461 dst_ptl = pmd_lockptr(mm, dst_pmd);
2462 double_pt_lock(src_ptl, dst_ptl);
2463 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2464 !pmd_same(*dst_pmd, dst_pmdval))) {
2469 if (folio_maybe_dma_pinned(src_folio) ||
2470 !PageAnonExclusive(&src_folio->page)) {
2475 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2476 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2481 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2482 /* Folio got pinned from under us. Put it back and fail the move. */
2483 if (folio_maybe_dma_pinned(src_folio)) {
2484 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2489 folio_move_anon_rmap(src_folio, dst_vma);
2490 src_folio->index = linear_page_index(dst_vma, dst_addr);
2492 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2493 /* Follow mremap() behavior and treat the entry dirty after the move */
2494 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2496 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2497 _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
2499 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2501 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2502 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2504 double_pt_unlock(src_ptl, dst_ptl);
2506 anon_vma_unlock_write(src_anon_vma);
2507 put_anon_vma(src_anon_vma);
2510 /* unblock rmap walks */
2512 folio_unlock(src_folio);
2513 mmu_notifier_invalidate_range_end(&range);
2515 folio_put(src_folio);
2518 #endif /* CONFIG_USERFAULTFD */
2521 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2523 * Note that if it returns page table lock pointer, this routine returns without
2524 * unlocking page table lock. So callers must unlock it.
2526 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2529 ptl = pmd_lock(vma->vm_mm, pmd);
2530 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2538 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2540 * Note that if it returns page table lock pointer, this routine returns without
2541 * unlocking page table lock. So callers must unlock it.
2543 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2547 ptl = pud_lock(vma->vm_mm, pud);
2548 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2554 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2555 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2556 pud_t *pud, unsigned long addr)
2561 ptl = __pud_trans_huge_lock(pud, vma);
2565 orig_pud = pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2566 arch_check_zapped_pud(vma, orig_pud);
2567 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2568 if (vma_is_special_huge(vma)) {
2570 /* No zero page support yet */
2572 /* No support for anonymous PUD pages yet */
2578 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2579 unsigned long haddr)
2581 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2582 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2583 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2584 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2586 count_vm_event(THP_SPLIT_PUD);
2588 pudp_huge_clear_flush(vma, haddr, pud);
2591 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2592 unsigned long address)
2595 struct mmu_notifier_range range;
2597 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2598 address & HPAGE_PUD_MASK,
2599 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2600 mmu_notifier_invalidate_range_start(&range);
2601 ptl = pud_lock(vma->vm_mm, pud);
2602 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2604 __split_huge_pud_locked(vma, pud, range.start);
2608 mmu_notifier_invalidate_range_end(&range);
2611 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2612 unsigned long address)
2615 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2617 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2618 unsigned long haddr, pmd_t *pmd)
2620 struct mm_struct *mm = vma->vm_mm;
2622 pmd_t _pmd, old_pmd;
2628 * Leave pmd empty until pte is filled note that it is fine to delay
2629 * notification until mmu_notifier_invalidate_range_end() as we are
2630 * replacing a zero pmd write protected page with a zero pte write
2633 * See Documentation/mm/mmu_notifier.rst
2635 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2637 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2638 pmd_populate(mm, &_pmd, pgtable);
2640 pte = pte_offset_map(&_pmd, haddr);
2642 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2645 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2646 entry = pte_mkspecial(entry);
2647 if (pmd_uffd_wp(old_pmd))
2648 entry = pte_mkuffd_wp(entry);
2649 VM_BUG_ON(!pte_none(ptep_get(pte)));
2650 set_pte_at(mm, addr, pte, entry);
2654 smp_wmb(); /* make pte visible before pmd */
2655 pmd_populate(mm, pmd, pgtable);
2658 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2659 unsigned long haddr, bool freeze)
2661 struct mm_struct *mm = vma->vm_mm;
2662 struct folio *folio;
2665 pmd_t old_pmd, _pmd;
2666 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2667 bool anon_exclusive = false, dirty = false;
2672 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2673 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2674 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2675 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2676 && !pmd_devmap(*pmd));
2678 count_vm_event(THP_SPLIT_PMD);
2680 if (!vma_is_anonymous(vma)) {
2681 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2683 * We are going to unmap this huge page. So
2684 * just go ahead and zap it
2686 if (arch_needs_pgtable_deposit())
2687 zap_deposited_table(mm, pmd);
2688 if (vma_is_special_huge(vma))
2690 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2693 entry = pmd_to_swp_entry(old_pmd);
2694 folio = pfn_swap_entry_folio(entry);
2696 page = pmd_page(old_pmd);
2697 folio = page_folio(page);
2698 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
2699 folio_mark_dirty(folio);
2700 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2701 folio_set_referenced(folio);
2702 folio_remove_rmap_pmd(folio, page, vma);
2705 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
2709 if (is_huge_zero_pmd(*pmd)) {
2711 * FIXME: Do we want to invalidate secondary mmu by calling
2712 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2713 * inside __split_huge_pmd() ?
2715 * We are going from a zero huge page write protected to zero
2716 * small page also write protected so it does not seems useful
2717 * to invalidate secondary mmu at this time.
2719 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2722 pmd_migration = is_pmd_migration_entry(*pmd);
2723 if (unlikely(pmd_migration)) {
2727 entry = pmd_to_swp_entry(old_pmd);
2728 page = pfn_swap_entry_to_page(entry);
2729 write = is_writable_migration_entry(entry);
2731 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2732 young = is_migration_entry_young(entry);
2733 dirty = is_migration_entry_dirty(entry);
2734 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2735 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2738 * Up to this point the pmd is present and huge and userland has
2739 * the whole access to the hugepage during the split (which
2740 * happens in place). If we overwrite the pmd with the not-huge
2741 * version pointing to the pte here (which of course we could if
2742 * all CPUs were bug free), userland could trigger a small page
2743 * size TLB miss on the small sized TLB while the hugepage TLB
2744 * entry is still established in the huge TLB. Some CPU doesn't
2746 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2747 * 383 on page 105. Intel should be safe but is also warns that
2748 * it's only safe if the permission and cache attributes of the
2749 * two entries loaded in the two TLB is identical (which should
2750 * be the case here). But it is generally safer to never allow
2751 * small and huge TLB entries for the same virtual address to be
2752 * loaded simultaneously. So instead of doing "pmd_populate();
2753 * flush_pmd_tlb_range();" we first mark the current pmd
2754 * notpresent (atomically because here the pmd_trans_huge must
2755 * remain set at all times on the pmd until the split is
2756 * complete for this pmd), then we flush the SMP TLB and finally
2757 * we write the non-huge version of the pmd entry with
2760 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2761 page = pmd_page(old_pmd);
2762 folio = page_folio(page);
2763 if (pmd_dirty(old_pmd)) {
2765 folio_set_dirty(folio);
2767 write = pmd_write(old_pmd);
2768 young = pmd_young(old_pmd);
2769 soft_dirty = pmd_soft_dirty(old_pmd);
2770 uffd_wp = pmd_uffd_wp(old_pmd);
2772 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2773 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2776 * Without "freeze", we'll simply split the PMD, propagating the
2777 * PageAnonExclusive() flag for each PTE by setting it for
2778 * each subpage -- no need to (temporarily) clear.
2780 * With "freeze" we want to replace mapped pages by
2781 * migration entries right away. This is only possible if we
2782 * managed to clear PageAnonExclusive() -- see
2783 * set_pmd_migration_entry().
2785 * In case we cannot clear PageAnonExclusive(), split the PMD
2786 * only and let try_to_migrate_one() fail later.
2788 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
2790 anon_exclusive = PageAnonExclusive(page);
2791 if (freeze && anon_exclusive &&
2792 folio_try_share_anon_rmap_pmd(folio, page))
2795 rmap_t rmap_flags = RMAP_NONE;
2797 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2799 rmap_flags |= RMAP_EXCLUSIVE;
2800 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2801 vma, haddr, rmap_flags);
2806 * Withdraw the table only after we mark the pmd entry invalid.
2807 * This's critical for some architectures (Power).
2809 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2810 pmd_populate(mm, &_pmd, pgtable);
2812 pte = pte_offset_map(&_pmd, haddr);
2816 * Note that NUMA hinting access restrictions are not transferred to
2817 * avoid any possibility of altering permissions across VMAs.
2819 if (freeze || pmd_migration) {
2820 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2822 swp_entry_t swp_entry;
2825 swp_entry = make_writable_migration_entry(
2826 page_to_pfn(page + i));
2827 else if (anon_exclusive)
2828 swp_entry = make_readable_exclusive_migration_entry(
2829 page_to_pfn(page + i));
2831 swp_entry = make_readable_migration_entry(
2832 page_to_pfn(page + i));
2834 swp_entry = make_migration_entry_young(swp_entry);
2836 swp_entry = make_migration_entry_dirty(swp_entry);
2837 entry = swp_entry_to_pte(swp_entry);
2839 entry = pte_swp_mksoft_dirty(entry);
2841 entry = pte_swp_mkuffd_wp(entry);
2843 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2844 set_pte_at(mm, addr, pte + i, entry);
2849 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
2851 entry = pte_mkwrite(entry, vma);
2853 entry = pte_mkold(entry);
2854 /* NOTE: this may set soft-dirty too on some archs */
2856 entry = pte_mkdirty(entry);
2858 entry = pte_mksoft_dirty(entry);
2860 entry = pte_mkuffd_wp(entry);
2862 for (i = 0; i < HPAGE_PMD_NR; i++)
2863 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2865 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
2870 folio_remove_rmap_pmd(folio, page, vma);
2874 smp_wmb(); /* make pte visible before pmd */
2875 pmd_populate(mm, pmd, pgtable);
2878 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
2879 pmd_t *pmd, bool freeze, struct folio *folio)
2881 VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio));
2882 VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
2883 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2884 VM_BUG_ON(freeze && !folio);
2887 * When the caller requests to set up a migration entry, we
2888 * require a folio to check the PMD against. Otherwise, there
2889 * is a risk of replacing the wrong folio.
2891 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2892 is_pmd_migration_entry(*pmd)) {
2893 if (folio && folio != pmd_folio(*pmd))
2895 __split_huge_pmd_locked(vma, pmd, address, freeze);
2899 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2900 unsigned long address, bool freeze, struct folio *folio)
2903 struct mmu_notifier_range range;
2905 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2906 address & HPAGE_PMD_MASK,
2907 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2908 mmu_notifier_invalidate_range_start(&range);
2909 ptl = pmd_lock(vma->vm_mm, pmd);
2910 split_huge_pmd_locked(vma, range.start, pmd, freeze, folio);
2912 mmu_notifier_invalidate_range_end(&range);
2915 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2916 bool freeze, struct folio *folio)
2918 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2923 __split_huge_pmd(vma, pmd, address, freeze, folio);
2926 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2929 * If the new address isn't hpage aligned and it could previously
2930 * contain an hugepage: check if we need to split an huge pmd.
2932 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2933 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2934 ALIGN(address, HPAGE_PMD_SIZE)))
2935 split_huge_pmd_address(vma, address, false, NULL);
2938 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2939 unsigned long start,
2943 /* Check if we need to split start first. */
2944 split_huge_pmd_if_needed(vma, start);
2946 /* Check if we need to split end next. */
2947 split_huge_pmd_if_needed(vma, end);
2950 * If we're also updating the next vma vm_start,
2951 * check if we need to split it.
2953 if (adjust_next > 0) {
2954 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2955 unsigned long nstart = next->vm_start;
2956 nstart += adjust_next;
2957 split_huge_pmd_if_needed(next, nstart);
2961 static void unmap_folio(struct folio *folio)
2963 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
2966 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2968 if (folio_test_pmd_mappable(folio))
2969 ttu_flags |= TTU_SPLIT_HUGE_PMD;
2972 * Anon pages need migration entries to preserve them, but file
2973 * pages can simply be left unmapped, then faulted back on demand.
2974 * If that is ever changed (perhaps for mlock), update remap_page().
2976 if (folio_test_anon(folio))
2977 try_to_migrate(folio, ttu_flags);
2979 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2981 try_to_unmap_flush();
2984 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
2985 unsigned long addr, pmd_t *pmdp,
2986 struct folio *folio)
2988 struct mm_struct *mm = vma->vm_mm;
2989 int ref_count, map_count;
2990 pmd_t orig_pmd = *pmdp;
2992 if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
2995 orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
2998 * Syncing against concurrent GUP-fast:
2999 * - clear PMD; barrier; read refcount
3000 * - inc refcount; barrier; read PMD
3004 ref_count = folio_ref_count(folio);
3005 map_count = folio_mapcount(folio);
3008 * Order reads for folio refcount and dirty flag
3009 * (see comments in __remove_mapping()).
3014 * If the folio or its PMD is redirtied at this point, or if there
3015 * are unexpected references, we will give up to discard this folio
3018 * The only folio refs must be one from isolation plus the rmap(s).
3020 if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) ||
3021 ref_count != map_count + 1) {
3022 set_pmd_at(mm, addr, pmdp, orig_pmd);
3026 folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
3027 zap_deposited_table(mm, pmdp);
3028 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
3029 if (vma->vm_flags & VM_LOCKED)
3030 mlock_drain_local();
3036 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
3037 pmd_t *pmdp, struct folio *folio)
3039 VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
3040 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
3041 VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
3043 if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
3044 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
3049 static void remap_page(struct folio *folio, unsigned long nr, int flags)
3053 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
3054 if (!folio_test_anon(folio))
3057 remove_migration_ptes(folio, folio, RMP_LOCKED | flags);
3058 i += folio_nr_pages(folio);
3061 folio = folio_next(folio);
3065 static void lru_add_page_tail(struct folio *folio, struct page *tail,
3066 struct lruvec *lruvec, struct list_head *list)
3068 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3069 VM_BUG_ON_FOLIO(PageLRU(tail), folio);
3070 lockdep_assert_held(&lruvec->lru_lock);
3073 /* page reclaim is reclaiming a huge page */
3074 VM_WARN_ON(folio_test_lru(folio));
3076 list_add_tail(&tail->lru, list);
3078 /* head is still on lru (and we have it frozen) */
3079 VM_WARN_ON(!folio_test_lru(folio));
3080 if (folio_test_unevictable(folio))
3081 tail->mlock_count = 0;
3083 list_add_tail(&tail->lru, &folio->lru);
3088 static void __split_huge_page_tail(struct folio *folio, int tail,
3089 struct lruvec *lruvec, struct list_head *list,
3090 unsigned int new_order)
3092 struct page *head = &folio->page;
3093 struct page *page_tail = head + tail;
3095 * Careful: new_folio is not a "real" folio before we cleared PageTail.
3096 * Don't pass it around before clear_compound_head().
3098 struct folio *new_folio = (struct folio *)page_tail;
3100 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
3103 * Clone page flags before unfreezing refcount.
3105 * After successful get_page_unless_zero() might follow flags change,
3106 * for example lock_page() which set PG_waiters.
3108 * Note that for mapped sub-pages of an anonymous THP,
3109 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
3110 * the migration entry instead from where remap_page() will restore it.
3111 * We can still have PG_anon_exclusive set on effectively unmapped and
3112 * unreferenced sub-pages of an anonymous THP: we can simply drop
3113 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
3115 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3116 page_tail->flags |= (head->flags &
3117 ((1L << PG_referenced) |
3118 (1L << PG_swapbacked) |
3119 (1L << PG_swapcache) |
3120 (1L << PG_mlocked) |
3121 (1L << PG_uptodate) |
3123 (1L << PG_workingset) |
3125 (1L << PG_unevictable) |
3126 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
3129 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
3133 LRU_GEN_MASK | LRU_REFS_MASK));
3135 /* ->mapping in first and second tail page is replaced by other uses */
3136 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
3138 page_tail->mapping = head->mapping;
3139 page_tail->index = head->index + tail;
3142 * page->private should not be set in tail pages. Fix up and warn once
3143 * if private is unexpectedly set.
3145 if (unlikely(page_tail->private)) {
3146 VM_WARN_ON_ONCE_PAGE(true, page_tail);
3147 page_tail->private = 0;
3149 if (folio_test_swapcache(folio))
3150 new_folio->swap.val = folio->swap.val + tail;
3152 /* Page flags must be visible before we make the page non-compound. */
3156 * Clear PageTail before unfreezing page refcount.
3158 * After successful get_page_unless_zero() might follow put_page()
3159 * which needs correct compound_head().
3161 clear_compound_head(page_tail);
3163 prep_compound_page(page_tail, new_order);
3164 folio_set_large_rmappable(new_folio);
3167 /* Finally unfreeze refcount. Additional reference from page cache. */
3168 page_ref_unfreeze(page_tail,
3169 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
3170 folio_nr_pages(new_folio) : 0));
3172 if (folio_test_young(folio))
3173 folio_set_young(new_folio);
3174 if (folio_test_idle(folio))
3175 folio_set_idle(new_folio);
3177 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
3180 * always add to the tail because some iterators expect new
3181 * pages to show after the currently processed elements - e.g.
3184 lru_add_page_tail(folio, page_tail, lruvec, list);
3187 static void __split_huge_page(struct page *page, struct list_head *list,
3188 pgoff_t end, unsigned int new_order)
3190 struct folio *folio = page_folio(page);
3191 struct page *head = &folio->page;
3192 struct lruvec *lruvec;
3193 struct address_space *swap_cache = NULL;
3194 unsigned long offset = 0;
3195 int i, nr_dropped = 0;
3196 unsigned int new_nr = 1 << new_order;
3197 int order = folio_order(folio);
3198 unsigned int nr = 1 << order;
3200 /* complete memcg works before add pages to LRU */
3201 split_page_memcg(head, order, new_order);
3203 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
3204 offset = swap_cache_index(folio->swap);
3205 swap_cache = swap_address_space(folio->swap);
3206 xa_lock(&swap_cache->i_pages);
3209 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
3210 lruvec = folio_lruvec_lock(folio);
3212 ClearPageHasHWPoisoned(head);
3214 for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
3215 __split_huge_page_tail(folio, i, lruvec, list, new_order);
3216 /* Some pages can be beyond EOF: drop them from page cache */
3217 if (head[i].index >= end) {
3218 struct folio *tail = page_folio(head + i);
3220 if (shmem_mapping(folio->mapping))
3222 else if (folio_test_clear_dirty(tail))
3223 folio_account_cleaned(tail,
3224 inode_to_wb(folio->mapping->host));
3225 __filemap_remove_folio(tail, NULL);
3227 } else if (!PageAnon(page)) {
3228 __xa_store(&folio->mapping->i_pages, head[i].index,
3230 } else if (swap_cache) {
3231 __xa_store(&swap_cache->i_pages, offset + i,
3237 ClearPageCompound(head);
3239 struct folio *new_folio = (struct folio *)head;
3241 folio_set_order(new_folio, new_order);
3243 unlock_page_lruvec(lruvec);
3244 /* Caller disabled irqs, so they are still disabled here */
3246 split_page_owner(head, order, new_order);
3247 pgalloc_tag_split(folio, order, new_order);
3249 /* See comment in __split_huge_page_tail() */
3250 if (folio_test_anon(folio)) {
3251 /* Additional pin to swap cache */
3252 if (folio_test_swapcache(folio)) {
3253 folio_ref_add(folio, 1 + new_nr);
3254 xa_unlock(&swap_cache->i_pages);
3256 folio_ref_inc(folio);
3259 /* Additional pin to page cache */
3260 folio_ref_add(folio, 1 + new_nr);
3261 xa_unlock(&folio->mapping->i_pages);
3266 shmem_uncharge(folio->mapping->host, nr_dropped);
3267 remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);
3270 * set page to its compound_head when split to non order-0 pages, so
3271 * we can skip unlocking it below, since PG_locked is transferred to
3272 * the compound_head of the page and the caller will unlock it.
3275 page = compound_head(page);
3277 for (i = 0; i < nr; i += new_nr) {
3278 struct page *subpage = head + i;
3279 struct folio *new_folio = page_folio(subpage);
3280 if (subpage == page)
3282 folio_unlock(new_folio);
3285 * Subpages may be freed if there wasn't any mapping
3286 * like if add_to_swap() is running on a lru page that
3287 * had its mapping zapped. And freeing these pages
3288 * requires taking the lru_lock so we do the put_page
3289 * of the tail pages after the split is complete.
3291 free_page_and_swap_cache(subpage);
3295 /* Racy check whether the huge page can be split */
3296 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
3300 /* Additional pins from page cache */
3301 if (folio_test_anon(folio))
3302 extra_pins = folio_test_swapcache(folio) ?
3303 folio_nr_pages(folio) : 0;
3305 extra_pins = folio_nr_pages(folio);
3307 *pextra_pins = extra_pins;
3308 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
3313 * This function splits a large folio into smaller folios of order @new_order.
3314 * @page can point to any page of the large folio to split. The split operation
3315 * does not change the position of @page.
3319 * 1) The caller must hold a reference on the @page's owning folio, also known
3320 * as the large folio.
3322 * 2) The large folio must be locked.
3324 * 3) The folio must not be pinned. Any unexpected folio references, including
3325 * GUP pins, will result in the folio not getting split; instead, the caller
3326 * will receive an -EAGAIN.
3328 * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
3329 * supported for non-file-backed folios, because folio->_deferred_list, which
3330 * is used by partially mapped folios, is stored in subpage 2, but an order-1
3331 * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
3332 * since they do not use _deferred_list.
3334 * After splitting, the caller's folio reference will be transferred to @page,
3335 * resulting in a raised refcount of @page after this call. The other pages may
3336 * be freed if they are not mapped.
3338 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3340 * Pages in @new_order will inherit the mapping, flags, and so on from the
3343 * Returns 0 if the huge page was split successfully.
3345 * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
3346 * the folio was concurrently removed from the page cache.
3348 * Returns -EBUSY when trying to split the huge zeropage, if the folio is
3349 * under writeback, if fs-specific folio metadata cannot currently be
3350 * released, or if some unexpected race happened (e.g., anon VMA disappeared,
3353 * Callers should ensure that the order respects the address space mapping
3354 * min-order if one is set for non-anonymous folios.
3356 * Returns -EINVAL when trying to split to an order that is incompatible
3357 * with the folio. Splitting to order 0 is compatible with all folios.
3359 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3360 unsigned int new_order)
3362 struct folio *folio = page_folio(page);
3363 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3364 /* reset xarray order to new order after split */
3365 XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
3366 bool is_anon = folio_test_anon(folio);
3367 struct address_space *mapping = NULL;
3368 struct anon_vma *anon_vma = NULL;
3369 int order = folio_order(folio);
3370 int extra_pins, ret;
3374 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3375 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3377 if (new_order >= folio_order(folio))
3381 /* order-1 is not supported for anonymous THP. */
3382 if (new_order == 1) {
3383 VM_WARN_ONCE(1, "Cannot split to order-1 folio");
3386 } else if (new_order) {
3387 /* Split shmem folio to non-zero order not supported */
3388 if (shmem_mapping(folio->mapping)) {
3390 "Cannot split shmem folio to non-0 order");
3394 * No split if the file system does not support large folio.
3395 * Note that we might still have THPs in such mappings due to
3396 * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
3397 * does not actually support large folios properly.
3399 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3400 !mapping_large_folio_support(folio->mapping)) {
3402 "Cannot split file folio to non-0 order");
3407 /* Only swapping a whole PMD-mapped folio is supported */
3408 if (folio_test_swapcache(folio) && new_order)
3411 is_hzp = is_huge_zero_folio(folio);
3413 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
3417 if (folio_test_writeback(folio))
3422 * The caller does not necessarily hold an mmap_lock that would
3423 * prevent the anon_vma disappearing so we first we take a
3424 * reference to it and then lock the anon_vma for write. This
3425 * is similar to folio_lock_anon_vma_read except the write lock
3426 * is taken to serialise against parallel split or collapse
3429 anon_vma = folio_get_anon_vma(folio);
3436 anon_vma_lock_write(anon_vma);
3438 unsigned int min_order;
3441 mapping = folio->mapping;
3449 min_order = mapping_min_folio_order(folio->mapping);
3450 if (new_order < min_order) {
3451 VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
3457 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3460 if (!filemap_release_folio(folio, gfp)) {
3465 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
3466 if (xas_error(&xas)) {
3467 ret = xas_error(&xas);
3472 i_mmap_lock_read(mapping);
3475 *__split_huge_page() may need to trim off pages beyond EOF:
3476 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3477 * which cannot be nested inside the page tree lock. So note
3478 * end now: i_size itself may be changed at any moment, but
3479 * folio lock is good enough to serialize the trimming.
3481 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3482 if (shmem_mapping(mapping))
3483 end = shmem_fallocend(mapping->host, end);
3487 * Racy check if we can split the page, before unmap_folio() will
3490 if (!can_split_folio(folio, 1, &extra_pins)) {
3497 /* block interrupt reentry in xa_lock and spinlock */
3498 local_irq_disable();
3501 * Check if the folio is present in page cache.
3502 * We assume all tail are present too, if folio is there.
3506 if (xas_load(&xas) != folio)
3510 /* Prevent deferred_split_scan() touching ->_refcount */
3511 spin_lock(&ds_queue->split_queue_lock);
3512 if (folio_ref_freeze(folio, 1 + extra_pins)) {
3513 if (folio_order(folio) > 1 &&
3514 !list_empty(&folio->_deferred_list)) {
3515 ds_queue->split_queue_len--;
3516 if (folio_test_partially_mapped(folio)) {
3517 __folio_clear_partially_mapped(folio);
3518 mod_mthp_stat(folio_order(folio),
3519 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3522 * Reinitialize page_deferred_list after removing the
3523 * page from the split_queue, otherwise a subsequent
3524 * split will see list corruption when checking the
3525 * page_deferred_list.
3527 list_del_init(&folio->_deferred_list);
3529 spin_unlock(&ds_queue->split_queue_lock);
3531 int nr = folio_nr_pages(folio);
3533 xas_split(&xas, folio, folio_order(folio));
3534 if (folio_test_pmd_mappable(folio) &&
3535 new_order < HPAGE_PMD_ORDER) {
3536 if (folio_test_swapbacked(folio)) {
3537 __lruvec_stat_mod_folio(folio,
3538 NR_SHMEM_THPS, -nr);
3540 __lruvec_stat_mod_folio(folio,
3542 filemap_nr_thps_dec(mapping);
3548 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
3549 mod_mthp_stat(new_order, MTHP_STAT_NR_ANON, 1 << (order - new_order));
3551 __split_huge_page(page, list, end, new_order);
3554 spin_unlock(&ds_queue->split_queue_lock);
3559 remap_page(folio, folio_nr_pages(folio), 0);
3565 anon_vma_unlock_write(anon_vma);
3566 put_anon_vma(anon_vma);
3569 i_mmap_unlock_read(mapping);
3572 if (order == HPAGE_PMD_ORDER)
3573 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3574 count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
3578 int min_order_for_split(struct folio *folio)
3580 if (folio_test_anon(folio))
3583 if (!folio->mapping) {
3584 if (folio_test_pmd_mappable(folio))
3585 count_vm_event(THP_SPLIT_PAGE_FAILED);
3589 return mapping_min_folio_order(folio->mapping);
3592 int split_folio_to_list(struct folio *folio, struct list_head *list)
3594 int ret = min_order_for_split(folio);
3599 return split_huge_page_to_list_to_order(&folio->page, list, ret);
3602 void __folio_undo_large_rmappable(struct folio *folio)
3604 struct deferred_split *ds_queue;
3605 unsigned long flags;
3607 ds_queue = get_deferred_split_queue(folio);
3608 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3609 if (!list_empty(&folio->_deferred_list)) {
3610 ds_queue->split_queue_len--;
3611 if (folio_test_partially_mapped(folio)) {
3612 __folio_clear_partially_mapped(folio);
3613 mod_mthp_stat(folio_order(folio),
3614 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3616 list_del_init(&folio->_deferred_list);
3618 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3621 /* partially_mapped=false won't clear PG_partially_mapped folio flag */
3622 void deferred_split_folio(struct folio *folio, bool partially_mapped)
3624 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3626 struct mem_cgroup *memcg = folio_memcg(folio);
3628 unsigned long flags;
3631 * Order 1 folios have no space for a deferred list, but we also
3632 * won't waste much memory by not adding them to the deferred list.
3634 if (folio_order(folio) <= 1)
3637 if (!partially_mapped && !split_underused_thp)
3641 * The try_to_unmap() in page reclaim path might reach here too,
3642 * this may cause a race condition to corrupt deferred split queue.
3643 * And, if page reclaim is already handling the same folio, it is
3644 * unnecessary to handle it again in shrinker.
3646 * Check the swapcache flag to determine if the folio is being
3647 * handled by page reclaim since THP swap would add the folio into
3648 * swap cache before calling try_to_unmap().
3650 if (folio_test_swapcache(folio))
3653 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3654 if (partially_mapped) {
3655 if (!folio_test_partially_mapped(folio)) {
3656 __folio_set_partially_mapped(folio);
3657 if (folio_test_pmd_mappable(folio))
3658 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
3659 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
3660 mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
3664 /* partially mapped folios cannot become non-partially mapped */
3665 VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio);
3667 if (list_empty(&folio->_deferred_list)) {
3668 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
3669 ds_queue->split_queue_len++;
3672 set_shrinker_bit(memcg, folio_nid(folio),
3673 deferred_split_shrinker->id);
3676 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3679 static unsigned long deferred_split_count(struct shrinker *shrink,
3680 struct shrink_control *sc)
3682 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3683 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3687 ds_queue = &sc->memcg->deferred_split_queue;
3689 return READ_ONCE(ds_queue->split_queue_len);
3692 static bool thp_underused(struct folio *folio)
3694 int num_zero_pages = 0, num_filled_pages = 0;
3698 if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
3701 for (i = 0; i < folio_nr_pages(folio); i++) {
3702 kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
3703 if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
3705 if (num_zero_pages > khugepaged_max_ptes_none) {
3706 kunmap_local(kaddr);
3711 * Another path for early exit once the number
3712 * of non-zero filled pages exceeds threshold.
3715 if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
3716 kunmap_local(kaddr);
3720 kunmap_local(kaddr);
3725 static unsigned long deferred_split_scan(struct shrinker *shrink,
3726 struct shrink_control *sc)
3728 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3729 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3730 unsigned long flags;
3732 struct folio *folio, *next;
3737 ds_queue = &sc->memcg->deferred_split_queue;
3740 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3741 /* Take pin on all head pages to avoid freeing them under us */
3742 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3744 if (folio_try_get(folio)) {
3745 list_move(&folio->_deferred_list, &list);
3747 /* We lost race with folio_put() */
3748 if (folio_test_partially_mapped(folio)) {
3749 __folio_clear_partially_mapped(folio);
3750 mod_mthp_stat(folio_order(folio),
3751 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
3753 list_del_init(&folio->_deferred_list);
3754 ds_queue->split_queue_len--;
3756 if (!--sc->nr_to_scan)
3759 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3761 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3762 bool did_split = false;
3763 bool underused = false;
3765 if (!folio_test_partially_mapped(folio)) {
3766 underused = thp_underused(folio);
3770 if (!folio_trylock(folio))
3772 if (!split_folio(folio)) {
3775 count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
3778 folio_unlock(folio);
3781 * split_folio() removes folio from list on success.
3782 * Only add back to the queue if folio is partially mapped.
3783 * If thp_underused returns false, or if split_folio fails
3784 * in the case it was underused, then consider it used and
3785 * don't add it back to split_queue.
3787 if (!did_split && !folio_test_partially_mapped(folio)) {
3788 list_del_init(&folio->_deferred_list);
3789 ds_queue->split_queue_len--;
3794 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3795 list_splice_tail(&list, &ds_queue->split_queue);
3796 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3799 * Stop shrinker if we didn't split any page, but the queue is empty.
3800 * This can happen if pages were freed under us.
3802 if (!split && list_empty(&ds_queue->split_queue))
3807 #ifdef CONFIG_DEBUG_FS
3808 static void split_huge_pages_all(void)
3812 struct folio *folio;
3813 unsigned long pfn, max_zone_pfn;
3814 unsigned long total = 0, split = 0;
3816 pr_debug("Split all THPs\n");
3817 for_each_zone(zone) {
3818 if (!managed_zone(zone))
3820 max_zone_pfn = zone_end_pfn(zone);
3821 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3824 page = pfn_to_online_page(pfn);
3825 if (!page || PageTail(page))
3827 folio = page_folio(page);
3828 if (!folio_try_get(folio))
3831 if (unlikely(page_folio(page) != folio))
3834 if (zone != folio_zone(folio))
3837 if (!folio_test_large(folio)
3838 || folio_test_hugetlb(folio)
3839 || !folio_test_lru(folio))
3844 nr_pages = folio_nr_pages(folio);
3845 if (!split_folio(folio))
3847 pfn += nr_pages - 1;
3848 folio_unlock(folio);
3855 pr_debug("%lu of %lu THP split\n", split, total);
3858 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3860 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3861 is_vm_hugetlb_page(vma);
3864 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3865 unsigned long vaddr_end, unsigned int new_order)
3868 struct task_struct *task;
3869 struct mm_struct *mm;
3870 unsigned long total = 0, split = 0;
3873 vaddr_start &= PAGE_MASK;
3874 vaddr_end &= PAGE_MASK;
3876 task = find_get_task_by_vpid(pid);
3882 /* Find the mm_struct */
3883 mm = get_task_mm(task);
3884 put_task_struct(task);
3891 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3892 pid, vaddr_start, vaddr_end);
3896 * always increase addr by PAGE_SIZE, since we could have a PTE page
3897 * table filled with PTE-mapped THPs, each of which is distinct.
3899 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3900 struct vm_area_struct *vma = vma_lookup(mm, addr);
3901 struct folio_walk fw;
3902 struct folio *folio;
3903 struct address_space *mapping;
3904 unsigned int target_order = new_order;
3909 /* skip special VMA and hugetlb VMA */
3910 if (vma_not_suitable_for_thp_split(vma)) {
3915 folio = folio_walk_start(&fw, vma, addr, 0);
3919 if (!is_transparent_hugepage(folio))
3922 if (!folio_test_anon(folio)) {
3923 mapping = folio->mapping;
3924 target_order = max(new_order,
3925 mapping_min_folio_order(mapping));
3928 if (target_order >= folio_order(folio))
3933 * For folios with private, split_huge_page_to_list_to_order()
3934 * will try to drop it before split and then check if the folio
3935 * can be split or not. So skip the check here.
3937 if (!folio_test_private(folio) &&
3938 !can_split_folio(folio, 0, NULL))
3941 if (!folio_trylock(folio))
3944 folio_walk_end(&fw, vma);
3946 if (!folio_test_anon(folio) && folio->mapping != mapping)
3949 if (!split_folio_to_order(folio, target_order))
3954 folio_unlock(folio);
3960 folio_walk_end(&fw, vma);
3963 mmap_read_unlock(mm);
3966 pr_debug("%lu of %lu THP split\n", split, total);
3972 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3973 pgoff_t off_end, unsigned int new_order)
3975 struct filename *file;
3976 struct file *candidate;
3977 struct address_space *mapping;
3981 unsigned long total = 0, split = 0;
3982 unsigned int min_order;
3983 unsigned int target_order;
3985 file = getname_kernel(file_path);
3989 candidate = file_open_name(file, O_RDONLY, 0);
3990 if (IS_ERR(candidate))
3993 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3994 file_path, off_start, off_end);
3996 mapping = candidate->f_mapping;
3997 min_order = mapping_min_folio_order(mapping);
3998 target_order = max(new_order, min_order);
4000 for (index = off_start; index < off_end; index += nr_pages) {
4001 struct folio *folio = filemap_get_folio(mapping, index);
4007 if (!folio_test_large(folio))
4011 nr_pages = folio_nr_pages(folio);
4013 if (target_order >= folio_order(folio))
4016 if (!folio_trylock(folio))
4019 if (folio->mapping != mapping)
4022 if (!split_folio_to_order(folio, target_order))
4026 folio_unlock(folio);
4032 filp_close(candidate, NULL);
4035 pr_debug("%lu of %lu file-backed THP split\n", split, total);
4041 #define MAX_INPUT_BUF_SZ 255
4043 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
4044 size_t count, loff_t *ppops)
4046 static DEFINE_MUTEX(split_debug_mutex);
4049 * hold pid, start_vaddr, end_vaddr, new_order or
4050 * file_path, off_start, off_end, new_order
4052 char input_buf[MAX_INPUT_BUF_SZ];
4054 unsigned long vaddr_start, vaddr_end;
4055 unsigned int new_order = 0;
4057 ret = mutex_lock_interruptible(&split_debug_mutex);
4063 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
4064 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
4067 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
4069 if (input_buf[0] == '/') {
4071 char *buf = input_buf;
4072 char file_path[MAX_INPUT_BUF_SZ];
4073 pgoff_t off_start = 0, off_end = 0;
4074 size_t input_len = strlen(input_buf);
4076 tok = strsep(&buf, ",");
4078 strcpy(file_path, tok);
4084 ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order);
4085 if (ret != 2 && ret != 3) {
4089 ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order);
4096 ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order);
4097 if (ret == 1 && pid == 1) {
4098 split_huge_pages_all();
4099 ret = strlen(input_buf);
4101 } else if (ret != 3 && ret != 4) {
4106 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order);
4108 ret = strlen(input_buf);
4110 mutex_unlock(&split_debug_mutex);
4115 static const struct file_operations split_huge_pages_fops = {
4116 .owner = THIS_MODULE,
4117 .write = split_huge_pages_write,
4118 .llseek = no_llseek,
4121 static int __init split_huge_pages_debugfs(void)
4123 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
4124 &split_huge_pages_fops);
4127 late_initcall(split_huge_pages_debugfs);
4130 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
4131 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
4134 struct folio *folio = page_folio(page);
4135 struct vm_area_struct *vma = pvmw->vma;
4136 struct mm_struct *mm = vma->vm_mm;
4137 unsigned long address = pvmw->address;
4138 bool anon_exclusive;
4143 if (!(pvmw->pmd && !pvmw->pte))
4146 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
4147 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
4149 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
4150 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
4151 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
4152 set_pmd_at(mm, address, pvmw->pmd, pmdval);
4156 if (pmd_dirty(pmdval))
4157 folio_mark_dirty(folio);
4158 if (pmd_write(pmdval))
4159 entry = make_writable_migration_entry(page_to_pfn(page));
4160 else if (anon_exclusive)
4161 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4163 entry = make_readable_migration_entry(page_to_pfn(page));
4164 if (pmd_young(pmdval))
4165 entry = make_migration_entry_young(entry);
4166 if (pmd_dirty(pmdval))
4167 entry = make_migration_entry_dirty(entry);
4168 pmdswp = swp_entry_to_pmd(entry);
4169 if (pmd_soft_dirty(pmdval))
4170 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
4171 if (pmd_uffd_wp(pmdval))
4172 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
4173 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
4174 folio_remove_rmap_pmd(folio, page, vma);
4176 trace_set_migration_pmd(address, pmd_val(pmdswp));
4181 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
4183 struct folio *folio = page_folio(new);
4184 struct vm_area_struct *vma = pvmw->vma;
4185 struct mm_struct *mm = vma->vm_mm;
4186 unsigned long address = pvmw->address;
4187 unsigned long haddr = address & HPAGE_PMD_MASK;
4191 if (!(pvmw->pmd && !pvmw->pte))
4194 entry = pmd_to_swp_entry(*pvmw->pmd);
4196 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
4197 if (pmd_swp_soft_dirty(*pvmw->pmd))
4198 pmde = pmd_mksoft_dirty(pmde);
4199 if (is_writable_migration_entry(entry))
4200 pmde = pmd_mkwrite(pmde, vma);
4201 if (pmd_swp_uffd_wp(*pvmw->pmd))
4202 pmde = pmd_mkuffd_wp(pmde);
4203 if (!is_migration_entry_young(entry))
4204 pmde = pmd_mkold(pmde);
4205 /* NOTE: this may contain setting soft-dirty on some archs */
4206 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
4207 pmde = pmd_mkdirty(pmde);
4209 if (folio_test_anon(folio)) {
4210 rmap_t rmap_flags = RMAP_NONE;
4212 if (!is_readable_migration_entry(entry))
4213 rmap_flags |= RMAP_EXCLUSIVE;
4215 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
4217 folio_add_file_rmap_pmd(folio, new, vma);
4219 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
4220 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
4222 /* No need to invalidate - it was non-present before */
4223 update_mmu_cache_pmd(vma, address, pvmw->pmd);
4224 trace_remove_migration_pmd(address, pmd_val(pmde));