1 // SPDX-License-Identifier: GPL-2.0-only
3 * Simple NUMA memory policy for the Linux kernel.
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
23 * Allocate memory interleaved over a set of nodes based on
24 * a set of weights (per-node), with normal fallback if it
25 * fails. Otherwise operates the same as interleave.
26 * Example: nodeset(0,1) & weights (2,1) - 2 pages allocated
27 * on node 0 for every 1 page allocated on node 1.
29 * bind Only allocate memory on a specific set of nodes,
31 * FIXME: memory is allocated starting with the first node
32 * to the last. It would be better if bind would truly restrict
33 * the allocation to memory nodes instead
35 * preferred Try a specific node first before normal fallback.
36 * As a special case NUMA_NO_NODE here means do the allocation
37 * on the local CPU. This is normally identical to default,
38 * but useful to set in a VMA when you have a non default
41 * preferred many Try a set of nodes first before normal fallback. This is
42 * similar to preferred without the special case.
44 * default Allocate on the local node first, or when on a VMA
45 * use the process policy. This is what Linux always did
46 * in a NUMA aware kernel and still does by, ahem, default.
48 * The process policy is applied for most non interrupt memory allocations
49 * in that process' context. Interrupts ignore the policies and always
50 * try to allocate on the local CPU. The VMA policy is only applied for memory
51 * allocations for a VMA in the VM.
53 * Currently there are a few corner cases in swapping where the policy
54 * is not applied, but the majority should be handled. When process policy
55 * is used it is not remembered over swap outs/swap ins.
57 * Only the highest zone in the zone hierarchy gets policied. Allocations
58 * requesting a lower zone just use default policy. This implies that
59 * on systems with highmem kernel lowmem allocation don't get policied.
60 * Same with GFP_DMA allocations.
62 * For shmem/tmpfs shared memory the policy is shared between
63 * all users and remembered even when nobody has memory mapped.
67 fix mmap readahead to honour policy and enable policy for any page cache
69 statistics for bigpages
70 global policy for page cache? currently it uses process policy. Requires
72 handle mremap for shared memory (currently ignored for the policy)
74 make bind policy root only? It can trigger oom much faster and the
75 kernel is not always grateful with that.
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/mempolicy.h>
81 #include <linux/pagewalk.h>
82 #include <linux/highmem.h>
83 #include <linux/hugetlb.h>
84 #include <linux/kernel.h>
85 #include <linux/sched.h>
86 #include <linux/sched/mm.h>
87 #include <linux/sched/numa_balancing.h>
88 #include <linux/sched/task.h>
89 #include <linux/nodemask.h>
90 #include <linux/cpuset.h>
91 #include <linux/slab.h>
92 #include <linux/string.h>
93 #include <linux/export.h>
94 #include <linux/nsproxy.h>
95 #include <linux/interrupt.h>
96 #include <linux/init.h>
97 #include <linux/compat.h>
98 #include <linux/ptrace.h>
99 #include <linux/swap.h>
100 #include <linux/seq_file.h>
101 #include <linux/proc_fs.h>
102 #include <linux/migrate.h>
103 #include <linux/ksm.h>
104 #include <linux/rmap.h>
105 #include <linux/security.h>
106 #include <linux/syscalls.h>
107 #include <linux/ctype.h>
108 #include <linux/mm_inline.h>
109 #include <linux/mmu_notifier.h>
110 #include <linux/printk.h>
111 #include <linux/swapops.h>
113 #include <asm/tlbflush.h>
115 #include <linux/uaccess.h>
117 #include "internal.h"
120 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
121 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
122 #define MPOL_MF_WRLOCK (MPOL_MF_INTERNAL << 2) /* Write-lock walked vmas */
124 static struct kmem_cache *policy_cache;
125 static struct kmem_cache *sn_cache;
127 /* Highest zone. An specific allocation for a zone below that is not
129 enum zone_type policy_zone = 0;
132 * run-time system-wide default policy => local allocation
134 static struct mempolicy default_policy = {
135 .refcnt = ATOMIC_INIT(1), /* never free it */
139 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
142 * iw_table is the sysfs-set interleave weight table, a value of 0 denotes
143 * system-default value should be used. A NULL iw_table also denotes that
144 * system-default values should be used. Until the system-default table
145 * is implemented, the system-default is always 1.
147 * iw_table is RCU protected
149 static u8 __rcu *iw_table;
150 static DEFINE_MUTEX(iw_table_lock);
152 static u8 get_il_weight(int node)
158 table = rcu_dereference(iw_table);
159 /* if no iw_table, use system default */
160 weight = table ? table[node] : 1;
161 /* if value in iw_table is 0, use system default */
162 weight = weight ? weight : 1;
168 * numa_nearest_node - Find nearest node by state
169 * @node: Node id to start the search
170 * @state: State to filter the search
172 * Lookup the closest node by distance if @nid is not in state.
174 * Return: this @node if it is in state, otherwise the closest node by distance
176 int numa_nearest_node(int node, unsigned int state)
178 int min_dist = INT_MAX, dist, n, min_node;
180 if (state >= NR_NODE_STATES)
183 if (node == NUMA_NO_NODE || node_state(node, state))
187 for_each_node_state(n, state) {
188 dist = node_distance(node, n);
189 if (dist < min_dist) {
197 EXPORT_SYMBOL_GPL(numa_nearest_node);
199 struct mempolicy *get_task_policy(struct task_struct *p)
201 struct mempolicy *pol = p->mempolicy;
207 node = numa_node_id();
208 if (node != NUMA_NO_NODE) {
209 pol = &preferred_node_policy[node];
210 /* preferred_node_policy is not initialised early in boot */
215 return &default_policy;
218 static const struct mempolicy_operations {
219 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
220 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
221 } mpol_ops[MPOL_MAX];
223 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
225 return pol->flags & MPOL_MODE_FLAGS;
228 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
229 const nodemask_t *rel)
232 nodes_fold(tmp, *orig, nodes_weight(*rel));
233 nodes_onto(*ret, tmp, *rel);
236 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
238 if (nodes_empty(*nodes))
244 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
246 if (nodes_empty(*nodes))
249 nodes_clear(pol->nodes);
250 node_set(first_node(*nodes), pol->nodes);
255 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
256 * any, for the new policy. mpol_new() has already validated the nodes
257 * parameter with respect to the policy mode and flags.
259 * Must be called holding task's alloc_lock to protect task's mems_allowed
260 * and mempolicy. May also be called holding the mmap_lock for write.
262 static int mpol_set_nodemask(struct mempolicy *pol,
263 const nodemask_t *nodes, struct nodemask_scratch *nsc)
268 * Default (pol==NULL) resp. local memory policies are not a
269 * subject of any remapping. They also do not need any special
272 if (!pol || pol->mode == MPOL_LOCAL)
276 nodes_and(nsc->mask1,
277 cpuset_current_mems_allowed, node_states[N_MEMORY]);
281 if (pol->flags & MPOL_F_RELATIVE_NODES)
282 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
284 nodes_and(nsc->mask2, *nodes, nsc->mask1);
286 if (mpol_store_user_nodemask(pol))
287 pol->w.user_nodemask = *nodes;
289 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
291 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
296 * This function just creates a new policy, does some check and simple
297 * initialization. You must invoke mpol_set_nodemask() to set nodes.
299 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
302 struct mempolicy *policy;
304 if (mode == MPOL_DEFAULT) {
305 if (nodes && !nodes_empty(*nodes))
306 return ERR_PTR(-EINVAL);
312 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
313 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
314 * All other modes require a valid pointer to a non-empty nodemask.
316 if (mode == MPOL_PREFERRED) {
317 if (nodes_empty(*nodes)) {
318 if (((flags & MPOL_F_STATIC_NODES) ||
319 (flags & MPOL_F_RELATIVE_NODES)))
320 return ERR_PTR(-EINVAL);
324 } else if (mode == MPOL_LOCAL) {
325 if (!nodes_empty(*nodes) ||
326 (flags & MPOL_F_STATIC_NODES) ||
327 (flags & MPOL_F_RELATIVE_NODES))
328 return ERR_PTR(-EINVAL);
329 } else if (nodes_empty(*nodes))
330 return ERR_PTR(-EINVAL);
332 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
334 return ERR_PTR(-ENOMEM);
335 atomic_set(&policy->refcnt, 1);
337 policy->flags = flags;
338 policy->home_node = NUMA_NO_NODE;
343 /* Slow path of a mpol destructor. */
344 void __mpol_put(struct mempolicy *pol)
346 if (!atomic_dec_and_test(&pol->refcnt))
348 kmem_cache_free(policy_cache, pol);
351 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
355 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
359 if (pol->flags & MPOL_F_STATIC_NODES)
360 nodes_and(tmp, pol->w.user_nodemask, *nodes);
361 else if (pol->flags & MPOL_F_RELATIVE_NODES)
362 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
364 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
366 pol->w.cpuset_mems_allowed = *nodes;
369 if (nodes_empty(tmp))
375 static void mpol_rebind_preferred(struct mempolicy *pol,
376 const nodemask_t *nodes)
378 pol->w.cpuset_mems_allowed = *nodes;
382 * mpol_rebind_policy - Migrate a policy to a different set of nodes
384 * Per-vma policies are protected by mmap_lock. Allocations using per-task
385 * policies are protected by task->mems_allowed_seq to prevent a premature
386 * OOM/allocation failure due to parallel nodemask modification.
388 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
390 if (!pol || pol->mode == MPOL_LOCAL)
392 if (!mpol_store_user_nodemask(pol) &&
393 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
396 mpol_ops[pol->mode].rebind(pol, newmask);
400 * Wrapper for mpol_rebind_policy() that just requires task
401 * pointer, and updates task mempolicy.
403 * Called with task's alloc_lock held.
405 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
407 mpol_rebind_policy(tsk->mempolicy, new);
411 * Rebind each vma in mm to new nodemask.
413 * Call holding a reference to mm. Takes mm->mmap_lock during call.
415 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
417 struct vm_area_struct *vma;
418 VMA_ITERATOR(vmi, mm, 0);
421 for_each_vma(vmi, vma) {
422 vma_start_write(vma);
423 mpol_rebind_policy(vma->vm_policy, new);
425 mmap_write_unlock(mm);
428 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
430 .rebind = mpol_rebind_default,
432 [MPOL_INTERLEAVE] = {
433 .create = mpol_new_nodemask,
434 .rebind = mpol_rebind_nodemask,
437 .create = mpol_new_preferred,
438 .rebind = mpol_rebind_preferred,
441 .create = mpol_new_nodemask,
442 .rebind = mpol_rebind_nodemask,
445 .rebind = mpol_rebind_default,
447 [MPOL_PREFERRED_MANY] = {
448 .create = mpol_new_nodemask,
449 .rebind = mpol_rebind_preferred,
451 [MPOL_WEIGHTED_INTERLEAVE] = {
452 .create = mpol_new_nodemask,
453 .rebind = mpol_rebind_nodemask,
457 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
458 unsigned long flags);
459 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
460 pgoff_t ilx, int *nid);
462 static bool strictly_unmovable(unsigned long flags)
465 * STRICT without MOVE flags lets do_mbind() fail immediately with -EIO
466 * if any misplaced page is found.
468 return (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ==
472 struct migration_mpol { /* for alloc_migration_target_by_mpol() */
473 struct mempolicy *pol;
478 struct list_head *pagelist;
483 struct vm_area_struct *first;
484 struct folio *large; /* note last large folio encountered */
485 long nr_failed; /* could not be isolated at this time */
489 * Check if the folio's nid is in qp->nmask.
491 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
492 * in the invert of qp->nmask.
494 static inline bool queue_folio_required(struct folio *folio,
495 struct queue_pages *qp)
497 int nid = folio_nid(folio);
498 unsigned long flags = qp->flags;
500 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
503 static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
506 struct queue_pages *qp = walk->private;
508 if (unlikely(is_pmd_migration_entry(*pmd))) {
512 folio = pmd_folio(*pmd);
513 if (is_huge_zero_folio(folio)) {
514 walk->action = ACTION_CONTINUE;
517 if (!queue_folio_required(folio, qp))
519 if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
520 !vma_migratable(walk->vma) ||
521 !migrate_folio_add(folio, qp->pagelist, qp->flags))
526 * Scan through folios, checking if they satisfy the required conditions,
527 * moving them from LRU to local pagelist for migration if they do (or not).
529 * queue_folios_pte_range() has two possible return values:
530 * 0 - continue walking to scan for more, even if an existing folio on the
531 * wrong node could not be isolated and queued for migration.
532 * -EIO - only MPOL_MF_STRICT was specified, without MPOL_MF_MOVE or ..._ALL,
533 * and an existing folio was on a node that does not follow the policy.
535 static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
536 unsigned long end, struct mm_walk *walk)
538 struct vm_area_struct *vma = walk->vma;
540 struct queue_pages *qp = walk->private;
541 unsigned long flags = qp->flags;
542 pte_t *pte, *mapped_pte;
546 ptl = pmd_trans_huge_lock(pmd, vma);
548 queue_folios_pmd(pmd, walk);
553 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
555 walk->action = ACTION_AGAIN;
558 for (; addr != end; pte++, addr += PAGE_SIZE) {
559 ptent = ptep_get(pte);
562 if (!pte_present(ptent)) {
563 if (is_migration_entry(pte_to_swp_entry(ptent)))
567 folio = vm_normal_folio(vma, addr, ptent);
568 if (!folio || folio_is_zone_device(folio))
571 * vm_normal_folio() filters out zero pages, but there might
572 * still be reserved folios to skip, perhaps in a VDSO.
574 if (folio_test_reserved(folio))
576 if (!queue_folio_required(folio, qp))
578 if (folio_test_large(folio)) {
580 * A large folio can only be isolated from LRU once,
581 * but may be mapped by many PTEs (and Copy-On-Write may
582 * intersperse PTEs of other, order 0, folios). This is
583 * a common case, so don't mistake it for failure (but
584 * there can be other cases of multi-mapped pages which
585 * this quick check does not help to filter out - and a
586 * search of the pagelist might grow to be prohibitive).
588 * migrate_pages(&pagelist) returns nr_failed folios, so
589 * check "large" now so that queue_pages_range() returns
590 * a comparable nr_failed folios. This does imply that
591 * if folio could not be isolated for some racy reason
592 * at its first PTE, later PTEs will not give it another
593 * chance of isolation; but keeps the accounting simple.
595 if (folio == qp->large)
599 if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
600 !vma_migratable(vma) ||
601 !migrate_folio_add(folio, qp->pagelist, flags)) {
603 if (strictly_unmovable(flags))
607 pte_unmap_unlock(mapped_pte, ptl);
610 if (qp->nr_failed && strictly_unmovable(flags))
615 static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
616 unsigned long addr, unsigned long end,
617 struct mm_walk *walk)
619 #ifdef CONFIG_HUGETLB_PAGE
620 struct queue_pages *qp = walk->private;
621 unsigned long flags = qp->flags;
626 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
627 entry = huge_ptep_get(pte);
628 if (!pte_present(entry)) {
629 if (unlikely(is_hugetlb_entry_migration(entry)))
633 folio = pfn_folio(pte_pfn(entry));
634 if (!queue_folio_required(folio, qp))
636 if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
637 !vma_migratable(walk->vma)) {
642 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
643 * Choosing not to migrate a shared folio is not counted as a failure.
645 * See folio_likely_mapped_shared() on possible imprecision when we
646 * cannot easily detect if a folio is shared.
648 if ((flags & MPOL_MF_MOVE_ALL) ||
649 (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte)))
650 if (!isolate_hugetlb(folio, qp->pagelist))
654 if (qp->nr_failed && strictly_unmovable(flags))
660 #ifdef CONFIG_NUMA_BALANCING
662 * This is used to mark a range of virtual addresses to be inaccessible.
663 * These are later cleared by a NUMA hinting fault. Depending on these
664 * faults, pages may be migrated for better NUMA placement.
666 * This is assuming that NUMA faults are handled using PROT_NONE. If
667 * an architecture makes a different choice, it will need further
668 * changes to the core.
670 unsigned long change_prot_numa(struct vm_area_struct *vma,
671 unsigned long addr, unsigned long end)
673 struct mmu_gather tlb;
676 tlb_gather_mmu(&tlb, vma->vm_mm);
678 nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
680 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
682 tlb_finish_mmu(&tlb);
686 #endif /* CONFIG_NUMA_BALANCING */
688 static int queue_pages_test_walk(unsigned long start, unsigned long end,
689 struct mm_walk *walk)
691 struct vm_area_struct *next, *vma = walk->vma;
692 struct queue_pages *qp = walk->private;
693 unsigned long flags = qp->flags;
695 /* range check first */
696 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
700 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
701 (qp->start < vma->vm_start))
702 /* hole at head side of range */
705 next = find_vma(vma->vm_mm, vma->vm_end);
706 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
707 ((vma->vm_end < qp->end) &&
708 (!next || vma->vm_end < next->vm_start)))
709 /* hole at middle or tail of range */
713 * Need check MPOL_MF_STRICT to return -EIO if possible
714 * regardless of vma_migratable
716 if (!vma_migratable(vma) &&
717 !(flags & MPOL_MF_STRICT))
721 * Check page nodes, and queue pages to move, in the current vma.
722 * But if no moving, and no strict checking, the scan can be skipped.
724 if (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
729 static const struct mm_walk_ops queue_pages_walk_ops = {
730 .hugetlb_entry = queue_folios_hugetlb,
731 .pmd_entry = queue_folios_pte_range,
732 .test_walk = queue_pages_test_walk,
733 .walk_lock = PGWALK_RDLOCK,
736 static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = {
737 .hugetlb_entry = queue_folios_hugetlb,
738 .pmd_entry = queue_folios_pte_range,
739 .test_walk = queue_pages_test_walk,
740 .walk_lock = PGWALK_WRLOCK,
744 * Walk through page tables and collect pages to be migrated.
746 * If pages found in a given range are not on the required set of @nodes,
747 * and migration is allowed, they are isolated and queued to @pagelist.
749 * queue_pages_range() may return:
750 * 0 - all pages already on the right node, or successfully queued for moving
751 * (or neither strict checking nor moving requested: only range checking).
752 * >0 - this number of misplaced folios could not be queued for moving
753 * (a hugetlbfs page or a transparent huge page being counted as 1).
754 * -EIO - a misplaced page found, when MPOL_MF_STRICT specified without MOVEs.
755 * -EFAULT - a hole in the memory range, when MPOL_MF_DISCONTIG_OK unspecified.
758 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
759 nodemask_t *nodes, unsigned long flags,
760 struct list_head *pagelist)
763 struct queue_pages qp = {
764 .pagelist = pagelist,
771 const struct mm_walk_ops *ops = (flags & MPOL_MF_WRLOCK) ?
772 &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
774 err = walk_page_range(mm, start, end, ops, &qp);
777 /* whole range in hole */
780 return err ? : qp.nr_failed;
784 * Apply policy to a single VMA
785 * This must be called with the mmap_lock held for writing.
787 static int vma_replace_policy(struct vm_area_struct *vma,
788 struct mempolicy *pol)
791 struct mempolicy *old;
792 struct mempolicy *new;
794 vma_assert_write_locked(vma);
800 if (vma->vm_ops && vma->vm_ops->set_policy) {
801 err = vma->vm_ops->set_policy(vma, new);
806 old = vma->vm_policy;
807 vma->vm_policy = new; /* protected by mmap_lock */
816 /* Split or merge the VMA (if required) and apply the new policy */
817 static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
818 struct vm_area_struct **prev, unsigned long start,
819 unsigned long end, struct mempolicy *new_pol)
821 unsigned long vmstart, vmend;
823 vmend = min(end, vma->vm_end);
824 if (start > vma->vm_start) {
828 vmstart = vma->vm_start;
831 if (mpol_equal(vma->vm_policy, new_pol)) {
836 vma = vma_modify_policy(vmi, *prev, vma, vmstart, vmend, new_pol);
841 return vma_replace_policy(vma, new_pol);
844 /* Set the process memory policy */
845 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
848 struct mempolicy *new, *old;
849 NODEMASK_SCRATCH(scratch);
855 new = mpol_new(mode, flags, nodes);
862 ret = mpol_set_nodemask(new, nodes, scratch);
864 task_unlock(current);
869 old = current->mempolicy;
870 current->mempolicy = new;
871 if (new && (new->mode == MPOL_INTERLEAVE ||
872 new->mode == MPOL_WEIGHTED_INTERLEAVE)) {
873 current->il_prev = MAX_NUMNODES-1;
874 current->il_weight = 0;
876 task_unlock(current);
880 NODEMASK_SCRATCH_FREE(scratch);
885 * Return nodemask for policy for get_mempolicy() query
887 * Called with task's alloc_lock held
889 static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
892 if (pol == &default_policy)
897 case MPOL_INTERLEAVE:
899 case MPOL_PREFERRED_MANY:
900 case MPOL_WEIGHTED_INTERLEAVE:
904 /* return empty node mask for local allocation */
911 static int lookup_node(struct mm_struct *mm, unsigned long addr)
913 struct page *p = NULL;
916 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
918 ret = page_to_nid(p);
924 /* Retrieve NUMA policy */
925 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
926 unsigned long addr, unsigned long flags)
929 struct mm_struct *mm = current->mm;
930 struct vm_area_struct *vma = NULL;
931 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
934 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
937 if (flags & MPOL_F_MEMS_ALLOWED) {
938 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
940 *policy = 0; /* just so it's initialized */
942 *nmask = cpuset_current_mems_allowed;
943 task_unlock(current);
947 if (flags & MPOL_F_ADDR) {
948 pgoff_t ilx; /* ignored here */
950 * Do NOT fall back to task policy if the
951 * vma/shared policy at addr is NULL. We
952 * want to return MPOL_DEFAULT in this case.
955 vma = vma_lookup(mm, addr);
957 mmap_read_unlock(mm);
960 pol = __get_vma_policy(vma, addr, &ilx);
965 pol = &default_policy; /* indicates default behavior */
967 if (flags & MPOL_F_NODE) {
968 if (flags & MPOL_F_ADDR) {
970 * Take a refcount on the mpol, because we are about to
971 * drop the mmap_lock, after which only "pol" remains
972 * valid, "vma" is stale.
977 mmap_read_unlock(mm);
978 err = lookup_node(mm, addr);
982 } else if (pol == current->mempolicy &&
983 pol->mode == MPOL_INTERLEAVE) {
984 *policy = next_node_in(current->il_prev, pol->nodes);
985 } else if (pol == current->mempolicy &&
986 pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
987 if (current->il_weight)
988 *policy = current->il_prev;
990 *policy = next_node_in(current->il_prev,
997 *policy = pol == &default_policy ? MPOL_DEFAULT :
1000 * Internal mempolicy flags must be masked off before exposing
1001 * the policy to userspace.
1003 *policy |= (pol->flags & MPOL_MODE_FLAGS);
1008 if (mpol_store_user_nodemask(pol)) {
1009 *nmask = pol->w.user_nodemask;
1012 get_policy_nodemask(pol, nmask);
1013 task_unlock(current);
1020 mmap_read_unlock(mm);
1022 mpol_put(pol_refcount);
1026 #ifdef CONFIG_MIGRATION
1027 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1028 unsigned long flags)
1031 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
1032 * Choosing not to migrate a shared folio is not counted as a failure.
1034 * See folio_likely_mapped_shared() on possible imprecision when we
1035 * cannot easily detect if a folio is shared.
1037 if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) {
1038 if (folio_isolate_lru(folio)) {
1039 list_add_tail(&folio->lru, foliolist);
1040 node_stat_mod_folio(folio,
1041 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1042 folio_nr_pages(folio));
1045 * Non-movable folio may reach here. And, there may be
1046 * temporary off LRU folios or non-LRU movable folios.
1047 * Treat them as unmovable folios since they can't be
1048 * isolated, so they can't be moved at the moment.
1057 * Migrate pages from one node to a target node.
1058 * Returns error or the number of pages not migrated.
1060 static long migrate_to_node(struct mm_struct *mm, int source, int dest,
1064 struct vm_area_struct *vma;
1065 LIST_HEAD(pagelist);
1068 struct migration_target_control mtc = {
1070 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1071 .reason = MR_SYSCALL,
1075 node_set(source, nmask);
1077 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1080 vma = find_vma(mm, 0);
1083 * This does not migrate the range, but isolates all pages that
1084 * need migration. Between passing in the full user address
1085 * space range and MPOL_MF_DISCONTIG_OK, this call cannot fail,
1086 * but passes back the count of pages which could not be isolated.
1088 nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1089 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1090 mmap_read_unlock(mm);
1092 if (!list_empty(&pagelist)) {
1093 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1094 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1096 putback_movable_pages(&pagelist);
1105 * Move pages between the two nodesets so as to preserve the physical
1106 * layout as much as possible.
1108 * Returns the number of page that could not be moved.
1110 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1111 const nodemask_t *to, int flags)
1117 lru_cache_disable();
1120 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1121 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1122 * bit in 'tmp', and return that <source, dest> pair for migration.
1123 * The pair of nodemasks 'to' and 'from' define the map.
1125 * If no pair of bits is found that way, fallback to picking some
1126 * pair of 'source' and 'dest' bits that are not the same. If the
1127 * 'source' and 'dest' bits are the same, this represents a node
1128 * that will be migrating to itself, so no pages need move.
1130 * If no bits are left in 'tmp', or if all remaining bits left
1131 * in 'tmp' correspond to the same bit in 'to', return false
1132 * (nothing left to migrate).
1134 * This lets us pick a pair of nodes to migrate between, such that
1135 * if possible the dest node is not already occupied by some other
1136 * source node, minimizing the risk of overloading the memory on a
1137 * node that would happen if we migrated incoming memory to a node
1138 * before migrating outgoing memory source that same node.
1140 * A single scan of tmp is sufficient. As we go, we remember the
1141 * most recent <s, d> pair that moved (s != d). If we find a pair
1142 * that not only moved, but what's better, moved to an empty slot
1143 * (d is not set in tmp), then we break out then, with that pair.
1144 * Otherwise when we finish scanning from_tmp, we at least have the
1145 * most recent <s, d> pair that moved. If we get all the way through
1146 * the scan of tmp without finding any node that moved, much less
1147 * moved to an empty node, then there is nothing left worth migrating.
1151 while (!nodes_empty(tmp)) {
1153 int source = NUMA_NO_NODE;
1156 for_each_node_mask(s, tmp) {
1159 * do_migrate_pages() tries to maintain the relative
1160 * node relationship of the pages established between
1161 * threads and memory areas.
1163 * However if the number of source nodes is not equal to
1164 * the number of destination nodes we can not preserve
1165 * this node relative relationship. In that case, skip
1166 * copying memory from a node that is in the destination
1169 * Example: [2,3,4] -> [3,4,5] moves everything.
1170 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1173 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1174 (node_isset(s, *to)))
1177 d = node_remap(s, *from, *to);
1181 source = s; /* Node moved. Memorize */
1184 /* dest not in remaining from nodes? */
1185 if (!node_isset(dest, tmp))
1188 if (source == NUMA_NO_NODE)
1191 node_clear(source, tmp);
1192 err = migrate_to_node(mm, source, dest, flags);
1202 return (nr_failed < INT_MAX) ? nr_failed : INT_MAX;
1206 * Allocate a new folio for page migration, according to NUMA mempolicy.
1208 static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1209 unsigned long private)
1211 struct migration_mpol *mmpol = (struct migration_mpol *)private;
1212 struct mempolicy *pol = mmpol->pol;
1213 pgoff_t ilx = mmpol->ilx;
1216 int nid = numa_node_id();
1219 order = folio_order(src);
1220 ilx += src->index >> order;
1222 if (folio_test_hugetlb(src)) {
1223 nodemask_t *nodemask;
1226 h = folio_hstate(src);
1227 gfp = htlb_alloc_mask(h);
1228 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
1229 return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp,
1230 htlb_allow_alloc_fallback(MR_MEMPOLICY_MBIND));
1233 if (folio_test_large(src))
1234 gfp = GFP_TRANSHUGE;
1236 gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP;
1238 page = alloc_pages_mpol(gfp, order, pol, ilx, nid);
1239 return page_rmappable_folio(page);
1243 static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1244 unsigned long flags)
1249 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1250 const nodemask_t *to, int flags)
1255 static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1256 unsigned long private)
1262 static long do_mbind(unsigned long start, unsigned long len,
1263 unsigned short mode, unsigned short mode_flags,
1264 nodemask_t *nmask, unsigned long flags)
1266 struct mm_struct *mm = current->mm;
1267 struct vm_area_struct *vma, *prev;
1268 struct vma_iterator vmi;
1269 struct migration_mpol mmpol;
1270 struct mempolicy *new;
1274 LIST_HEAD(pagelist);
1276 if (flags & ~(unsigned long)MPOL_MF_VALID)
1278 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1281 if (start & ~PAGE_MASK)
1284 if (mode == MPOL_DEFAULT)
1285 flags &= ~MPOL_MF_STRICT;
1287 len = PAGE_ALIGN(len);
1295 new = mpol_new(mode, mode_flags, nmask);
1297 return PTR_ERR(new);
1300 * If we are using the default policy then operation
1301 * on discontinuous address spaces is okay after all
1304 flags |= MPOL_MF_DISCONTIG_OK;
1306 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1307 lru_cache_disable();
1309 NODEMASK_SCRATCH(scratch);
1311 mmap_write_lock(mm);
1312 err = mpol_set_nodemask(new, nmask, scratch);
1314 mmap_write_unlock(mm);
1317 NODEMASK_SCRATCH_FREE(scratch);
1323 * Lock the VMAs before scanning for pages to migrate,
1324 * to ensure we don't miss a concurrently inserted page.
1326 nr_failed = queue_pages_range(mm, start, end, nmask,
1327 flags | MPOL_MF_INVERT | MPOL_MF_WRLOCK, &pagelist);
1329 if (nr_failed < 0) {
1333 vma_iter_init(&vmi, mm, start);
1334 prev = vma_prev(&vmi);
1335 for_each_vma_range(vmi, vma, end) {
1336 err = mbind_range(&vmi, vma, &prev, start, end, new);
1342 if (!err && !list_empty(&pagelist)) {
1343 /* Convert MPOL_DEFAULT's NULL to task or default policy */
1345 new = get_task_policy(current);
1352 * In the interleaved case, attempt to allocate on exactly the
1353 * targeted nodes, for the first VMA to be migrated; for later
1354 * VMAs, the nodes will still be interleaved from the targeted
1355 * nodemask, but one by one may be selected differently.
1357 if (new->mode == MPOL_INTERLEAVE ||
1358 new->mode == MPOL_WEIGHTED_INTERLEAVE) {
1359 struct folio *folio;
1361 unsigned long addr = -EFAULT;
1363 list_for_each_entry(folio, &pagelist, lru) {
1364 if (!folio_test_ksm(folio))
1367 if (!list_entry_is_head(folio, &pagelist, lru)) {
1368 vma_iter_init(&vmi, mm, start);
1369 for_each_vma_range(vmi, vma, end) {
1370 addr = page_address_in_vma(
1371 folio_page(folio, 0), vma);
1372 if (addr != -EFAULT)
1376 if (addr != -EFAULT) {
1377 order = folio_order(folio);
1378 /* We already know the pol, but not the ilx */
1379 mpol_cond_put(get_vma_policy(vma, addr, order,
1381 /* Set base from which to increment by index */
1382 mmpol.ilx -= folio->index >> order;
1387 mmap_write_unlock(mm);
1389 if (!err && !list_empty(&pagelist)) {
1390 nr_failed |= migrate_pages(&pagelist,
1391 alloc_migration_target_by_mpol, NULL,
1392 (unsigned long)&mmpol, MIGRATE_SYNC,
1393 MR_MEMPOLICY_MBIND, NULL);
1396 if (nr_failed && (flags & MPOL_MF_STRICT))
1398 if (!list_empty(&pagelist))
1399 putback_movable_pages(&pagelist);
1402 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1408 * User space interface with variable sized bitmaps for nodelists.
1410 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1411 unsigned long maxnode)
1413 unsigned long nlongs = BITS_TO_LONGS(maxnode);
1416 if (in_compat_syscall())
1417 ret = compat_get_bitmap(mask,
1418 (const compat_ulong_t __user *)nmask,
1421 ret = copy_from_user(mask, nmask,
1422 nlongs * sizeof(unsigned long));
1427 if (maxnode % BITS_PER_LONG)
1428 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1433 /* Copy a node mask from user space. */
1434 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1435 unsigned long maxnode)
1438 nodes_clear(*nodes);
1439 if (maxnode == 0 || !nmask)
1441 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1445 * When the user specified more nodes than supported just check
1446 * if the non supported part is all zero, one word at a time,
1447 * starting at the end.
1449 while (maxnode > MAX_NUMNODES) {
1450 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1453 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
1456 if (maxnode - bits >= MAX_NUMNODES) {
1459 maxnode = MAX_NUMNODES;
1460 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1466 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1469 /* Copy a kernel node mask to user space */
1470 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1473 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1474 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1475 bool compat = in_compat_syscall();
1478 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1480 if (copy > nbytes) {
1481 if (copy > PAGE_SIZE)
1483 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1486 maxnode = nr_node_ids;
1490 return compat_put_bitmap((compat_ulong_t __user *)mask,
1491 nodes_addr(*nodes), maxnode);
1493 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1496 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1497 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1499 *flags = *mode & MPOL_MODE_FLAGS;
1500 *mode &= ~MPOL_MODE_FLAGS;
1502 if ((unsigned int)(*mode) >= MPOL_MAX)
1504 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1506 if (*flags & MPOL_F_NUMA_BALANCING) {
1507 if (*mode == MPOL_BIND || *mode == MPOL_PREFERRED_MANY)
1508 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1515 static long kernel_mbind(unsigned long start, unsigned long len,
1516 unsigned long mode, const unsigned long __user *nmask,
1517 unsigned long maxnode, unsigned int flags)
1519 unsigned short mode_flags;
1524 start = untagged_addr(start);
1525 err = sanitize_mpol_flags(&lmode, &mode_flags);
1529 err = get_nodes(&nodes, nmask, maxnode);
1533 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1536 SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1537 unsigned long, home_node, unsigned long, flags)
1539 struct mm_struct *mm = current->mm;
1540 struct vm_area_struct *vma, *prev;
1541 struct mempolicy *new, *old;
1544 VMA_ITERATOR(vmi, mm, start);
1546 start = untagged_addr(start);
1547 if (start & ~PAGE_MASK)
1550 * flags is used for future extension if any.
1556 * Check home_node is online to avoid accessing uninitialized
1559 if (home_node >= MAX_NUMNODES || !node_online(home_node))
1562 len = PAGE_ALIGN(len);
1569 mmap_write_lock(mm);
1570 prev = vma_prev(&vmi);
1571 for_each_vma_range(vmi, vma, end) {
1573 * If any vma in the range got policy other than MPOL_BIND
1574 * or MPOL_PREFERRED_MANY we return error. We don't reset
1575 * the home node for vmas we already updated before.
1577 old = vma_policy(vma);
1582 if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1586 new = mpol_dup(old);
1592 vma_start_write(vma);
1593 new->home_node = home_node;
1594 err = mbind_range(&vmi, vma, &prev, start, end, new);
1599 mmap_write_unlock(mm);
1603 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1604 unsigned long, mode, const unsigned long __user *, nmask,
1605 unsigned long, maxnode, unsigned int, flags)
1607 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1610 /* Set the process memory policy */
1611 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1612 unsigned long maxnode)
1614 unsigned short mode_flags;
1619 err = sanitize_mpol_flags(&lmode, &mode_flags);
1623 err = get_nodes(&nodes, nmask, maxnode);
1627 return do_set_mempolicy(lmode, mode_flags, &nodes);
1630 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1631 unsigned long, maxnode)
1633 return kernel_set_mempolicy(mode, nmask, maxnode);
1636 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1637 const unsigned long __user *old_nodes,
1638 const unsigned long __user *new_nodes)
1640 struct mm_struct *mm = NULL;
1641 struct task_struct *task;
1642 nodemask_t task_nodes;
1646 NODEMASK_SCRATCH(scratch);
1651 old = &scratch->mask1;
1652 new = &scratch->mask2;
1654 err = get_nodes(old, old_nodes, maxnode);
1658 err = get_nodes(new, new_nodes, maxnode);
1662 /* Find the mm_struct */
1664 task = pid ? find_task_by_vpid(pid) : current;
1670 get_task_struct(task);
1675 * Check if this process has the right to modify the specified process.
1676 * Use the regular "ptrace_may_access()" checks.
1678 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1685 task_nodes = cpuset_mems_allowed(task);
1686 /* Is the user allowed to access the target nodes? */
1687 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1692 task_nodes = cpuset_mems_allowed(current);
1693 nodes_and(*new, *new, task_nodes);
1694 if (nodes_empty(*new))
1697 err = security_task_movememory(task);
1701 mm = get_task_mm(task);
1702 put_task_struct(task);
1709 err = do_migrate_pages(mm, old, new,
1710 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1714 NODEMASK_SCRATCH_FREE(scratch);
1719 put_task_struct(task);
1723 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1724 const unsigned long __user *, old_nodes,
1725 const unsigned long __user *, new_nodes)
1727 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1730 /* Retrieve NUMA policy */
1731 static int kernel_get_mempolicy(int __user *policy,
1732 unsigned long __user *nmask,
1733 unsigned long maxnode,
1735 unsigned long flags)
1741 if (nmask != NULL && maxnode < nr_node_ids)
1744 addr = untagged_addr(addr);
1746 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1751 if (policy && put_user(pval, policy))
1755 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1760 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1761 unsigned long __user *, nmask, unsigned long, maxnode,
1762 unsigned long, addr, unsigned long, flags)
1764 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1767 bool vma_migratable(struct vm_area_struct *vma)
1769 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1773 * DAX device mappings require predictable access latency, so avoid
1774 * incurring periodic faults.
1776 if (vma_is_dax(vma))
1779 if (is_vm_hugetlb_page(vma) &&
1780 !hugepage_migration_supported(hstate_vma(vma)))
1784 * Migration allocates pages in the highest zone. If we cannot
1785 * do so then migration (at least from node to node) is not
1789 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1795 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1796 unsigned long addr, pgoff_t *ilx)
1799 return (vma->vm_ops && vma->vm_ops->get_policy) ?
1800 vma->vm_ops->get_policy(vma, addr, ilx) : vma->vm_policy;
1804 * get_vma_policy(@vma, @addr, @order, @ilx)
1805 * @vma: virtual memory area whose policy is sought
1806 * @addr: address in @vma for shared policy lookup
1807 * @order: 0, or appropriate huge_page_order for interleaving
1808 * @ilx: interleave index (output), for use only when MPOL_INTERLEAVE or
1809 * MPOL_WEIGHTED_INTERLEAVE
1811 * Returns effective policy for a VMA at specified address.
1812 * Falls back to current->mempolicy or system default policy, as necessary.
1813 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1814 * count--added by the get_policy() vm_op, as appropriate--to protect against
1815 * freeing by another task. It is the caller's responsibility to free the
1816 * extra reference for shared policies.
1818 struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1819 unsigned long addr, int order, pgoff_t *ilx)
1821 struct mempolicy *pol;
1823 pol = __get_vma_policy(vma, addr, ilx);
1825 pol = get_task_policy(current);
1826 if (pol->mode == MPOL_INTERLEAVE ||
1827 pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
1828 *ilx += vma->vm_pgoff >> order;
1829 *ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order);
1834 bool vma_policy_mof(struct vm_area_struct *vma)
1836 struct mempolicy *pol;
1838 if (vma->vm_ops && vma->vm_ops->get_policy) {
1840 pgoff_t ilx; /* ignored here */
1842 pol = vma->vm_ops->get_policy(vma, vma->vm_start, &ilx);
1843 if (pol && (pol->flags & MPOL_F_MOF))
1850 pol = vma->vm_policy;
1852 pol = get_task_policy(current);
1854 return pol->flags & MPOL_F_MOF;
1857 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1859 enum zone_type dynamic_policy_zone = policy_zone;
1861 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1864 * if policy->nodes has movable memory only,
1865 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1867 * policy->nodes is intersect with node_states[N_MEMORY].
1868 * so if the following test fails, it implies
1869 * policy->nodes has movable memory only.
1871 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1872 dynamic_policy_zone = ZONE_MOVABLE;
1874 return zone >= dynamic_policy_zone;
1877 static unsigned int weighted_interleave_nodes(struct mempolicy *policy)
1880 unsigned int cpuset_mems_cookie;
1883 /* to prevent miscount use tsk->mems_allowed_seq to detect rebind */
1884 cpuset_mems_cookie = read_mems_allowed_begin();
1885 node = current->il_prev;
1886 if (!current->il_weight || !node_isset(node, policy->nodes)) {
1887 node = next_node_in(node, policy->nodes);
1888 if (read_mems_allowed_retry(cpuset_mems_cookie))
1890 if (node == MAX_NUMNODES)
1892 current->il_prev = node;
1893 current->il_weight = get_il_weight(node);
1895 current->il_weight--;
1899 /* Do dynamic interleaving for a process */
1900 static unsigned int interleave_nodes(struct mempolicy *policy)
1903 unsigned int cpuset_mems_cookie;
1905 /* to prevent miscount, use tsk->mems_allowed_seq to detect rebind */
1907 cpuset_mems_cookie = read_mems_allowed_begin();
1908 nid = next_node_in(current->il_prev, policy->nodes);
1909 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1911 if (nid < MAX_NUMNODES)
1912 current->il_prev = nid;
1917 * Depending on the memory policy provide a node from which to allocate the
1920 unsigned int mempolicy_slab_node(void)
1922 struct mempolicy *policy;
1923 int node = numa_mem_id();
1928 policy = current->mempolicy;
1932 switch (policy->mode) {
1933 case MPOL_PREFERRED:
1934 return first_node(policy->nodes);
1936 case MPOL_INTERLEAVE:
1937 return interleave_nodes(policy);
1939 case MPOL_WEIGHTED_INTERLEAVE:
1940 return weighted_interleave_nodes(policy);
1943 case MPOL_PREFERRED_MANY:
1948 * Follow bind policy behavior and start allocation at the
1951 struct zonelist *zonelist;
1952 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1953 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1954 z = first_zones_zonelist(zonelist, highest_zoneidx,
1956 return z->zone ? zone_to_nid(z->zone) : node;
1966 static unsigned int read_once_policy_nodemask(struct mempolicy *pol,
1970 * barrier stabilizes the nodemask locally so that it can be iterated
1971 * over safely without concern for changes. Allocators validate node
1972 * selection does not violate mems_allowed, so this is safe.
1975 memcpy(mask, &pol->nodes, sizeof(nodemask_t));
1977 return nodes_weight(*mask);
1980 static unsigned int weighted_interleave_nid(struct mempolicy *pol, pgoff_t ilx)
1982 nodemask_t nodemask;
1983 unsigned int target, nr_nodes;
1985 unsigned int weight_total = 0;
1989 nr_nodes = read_once_policy_nodemask(pol, &nodemask);
1991 return numa_node_id();
1994 table = rcu_dereference(iw_table);
1995 /* calculate the total weight */
1996 for_each_node_mask(nid, nodemask) {
1997 /* detect system default usage */
1998 weight = table ? table[nid] : 1;
1999 weight = weight ? weight : 1;
2000 weight_total += weight;
2003 /* Calculate the node offset based on totals */
2004 target = ilx % weight_total;
2005 nid = first_node(nodemask);
2007 /* detect system default usage */
2008 weight = table ? table[nid] : 1;
2009 weight = weight ? weight : 1;
2010 if (target < weight)
2013 nid = next_node_in(nid, nodemask);
2020 * Do static interleaving for interleave index @ilx. Returns the ilx'th
2021 * node in pol->nodes (starting from ilx=0), wrapping around if ilx
2022 * exceeds the number of present nodes.
2024 static unsigned int interleave_nid(struct mempolicy *pol, pgoff_t ilx)
2026 nodemask_t nodemask;
2027 unsigned int target, nnodes;
2031 nnodes = read_once_policy_nodemask(pol, &nodemask);
2033 return numa_node_id();
2034 target = ilx % nnodes;
2035 nid = first_node(nodemask);
2036 for (i = 0; i < target; i++)
2037 nid = next_node(nid, nodemask);
2042 * Return a nodemask representing a mempolicy for filtering nodes for
2043 * page allocation, together with preferred node id (or the input node id).
2045 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
2046 pgoff_t ilx, int *nid)
2048 nodemask_t *nodemask = NULL;
2050 switch (pol->mode) {
2051 case MPOL_PREFERRED:
2052 /* Override input node id */
2053 *nid = first_node(pol->nodes);
2055 case MPOL_PREFERRED_MANY:
2056 nodemask = &pol->nodes;
2057 if (pol->home_node != NUMA_NO_NODE)
2058 *nid = pol->home_node;
2061 /* Restrict to nodemask (but not on lower zones) */
2062 if (apply_policy_zone(pol, gfp_zone(gfp)) &&
2063 cpuset_nodemask_valid_mems_allowed(&pol->nodes))
2064 nodemask = &pol->nodes;
2065 if (pol->home_node != NUMA_NO_NODE)
2066 *nid = pol->home_node;
2068 * __GFP_THISNODE shouldn't even be used with the bind policy
2069 * because we might easily break the expectation to stay on the
2070 * requested node and not break the policy.
2072 WARN_ON_ONCE(gfp & __GFP_THISNODE);
2074 case MPOL_INTERLEAVE:
2075 /* Override input node id */
2076 *nid = (ilx == NO_INTERLEAVE_INDEX) ?
2077 interleave_nodes(pol) : interleave_nid(pol, ilx);
2079 case MPOL_WEIGHTED_INTERLEAVE:
2080 *nid = (ilx == NO_INTERLEAVE_INDEX) ?
2081 weighted_interleave_nodes(pol) :
2082 weighted_interleave_nid(pol, ilx);
2089 #ifdef CONFIG_HUGETLBFS
2091 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2092 * @vma: virtual memory area whose policy is sought
2093 * @addr: address in @vma for shared policy lookup and interleave policy
2094 * @gfp_flags: for requested zone
2095 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2096 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2098 * Returns a nid suitable for a huge page allocation and a pointer
2099 * to the struct mempolicy for conditional unref after allocation.
2100 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2101 * to the mempolicy's @nodemask for filtering the zonelist.
2103 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2104 struct mempolicy **mpol, nodemask_t **nodemask)
2109 nid = numa_node_id();
2110 *mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
2111 *nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid);
2116 * init_nodemask_of_mempolicy
2118 * If the current task's mempolicy is "default" [NULL], return 'false'
2119 * to indicate default policy. Otherwise, extract the policy nodemask
2120 * for 'bind' or 'interleave' policy into the argument nodemask, or
2121 * initialize the argument nodemask to contain the single node for
2122 * 'preferred' or 'local' policy and return 'true' to indicate presence
2123 * of non-default mempolicy.
2125 * We don't bother with reference counting the mempolicy [mpol_get/put]
2126 * because the current task is examining it's own mempolicy and a task's
2127 * mempolicy is only ever changed by the task itself.
2129 * N.B., it is the caller's responsibility to free a returned nodemask.
2131 bool init_nodemask_of_mempolicy(nodemask_t *mask)
2133 struct mempolicy *mempolicy;
2135 if (!(mask && current->mempolicy))
2139 mempolicy = current->mempolicy;
2140 switch (mempolicy->mode) {
2141 case MPOL_PREFERRED:
2142 case MPOL_PREFERRED_MANY:
2144 case MPOL_INTERLEAVE:
2145 case MPOL_WEIGHTED_INTERLEAVE:
2146 *mask = mempolicy->nodes;
2150 init_nodemask_of_node(mask, numa_node_id());
2156 task_unlock(current);
2163 * mempolicy_in_oom_domain
2165 * If tsk's mempolicy is "bind", check for intersection between mask and
2166 * the policy nodemask. Otherwise, return true for all other policies
2167 * including "interleave", as a tsk with "interleave" policy may have
2168 * memory allocated from all nodes in system.
2170 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2172 bool mempolicy_in_oom_domain(struct task_struct *tsk,
2173 const nodemask_t *mask)
2175 struct mempolicy *mempolicy;
2182 mempolicy = tsk->mempolicy;
2183 if (mempolicy && mempolicy->mode == MPOL_BIND)
2184 ret = nodes_intersects(mempolicy->nodes, *mask);
2190 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2191 int nid, nodemask_t *nodemask)
2194 gfp_t preferred_gfp;
2197 * This is a two pass approach. The first pass will only try the
2198 * preferred nodes but skip the direct reclaim and allow the
2199 * allocation to fail, while the second pass will try all the
2202 preferred_gfp = gfp | __GFP_NOWARN;
2203 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2204 page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask);
2206 page = __alloc_pages_noprof(gfp, order, nid, NULL);
2212 * alloc_pages_mpol - Allocate pages according to NUMA mempolicy.
2214 * @order: Order of the page allocation.
2215 * @pol: Pointer to the NUMA mempolicy.
2216 * @ilx: Index for interleave mempolicy (also distinguishes alloc_pages()).
2217 * @nid: Preferred node (usually numa_node_id() but @mpol may override it).
2219 * Return: The page on success or NULL if allocation fails.
2221 struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
2222 struct mempolicy *pol, pgoff_t ilx, int nid)
2224 nodemask_t *nodemask;
2227 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
2229 if (pol->mode == MPOL_PREFERRED_MANY)
2230 return alloc_pages_preferred_many(gfp, order, nid, nodemask);
2232 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2233 /* filter "hugepage" allocation, unless from alloc_pages() */
2234 order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
2236 * For hugepage allocation and non-interleave policy which
2237 * allows the current node (or other explicitly preferred
2238 * node) we only try to allocate from the current/preferred
2239 * node and don't fall back to other nodes, as the cost of
2240 * remote accesses would likely offset THP benefits.
2242 * If the policy is interleave or does not allow the current
2243 * node in its nodemask, we allocate the standard way.
2245 if (pol->mode != MPOL_INTERLEAVE &&
2246 pol->mode != MPOL_WEIGHTED_INTERLEAVE &&
2247 (!nodemask || node_isset(nid, *nodemask))) {
2249 * First, try to allocate THP only on local node, but
2250 * don't reclaim unnecessarily, just compact.
2252 page = __alloc_pages_node_noprof(nid,
2253 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2254 if (page || !(gfp & __GFP_DIRECT_RECLAIM))
2257 * If hugepage allocations are configured to always
2258 * synchronous compact or the vma has been madvised
2259 * to prefer hugepage backing, retry allowing remote
2260 * memory with both reclaim and compact as well.
2265 page = __alloc_pages_noprof(gfp, order, nid, nodemask);
2267 if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) {
2268 /* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */
2269 if (static_branch_likely(&vm_numa_stat_key) &&
2270 page_to_nid(page) == nid) {
2272 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2281 * vma_alloc_folio - Allocate a folio for a VMA.
2283 * @order: Order of the folio.
2284 * @vma: Pointer to VMA.
2285 * @addr: Virtual address of the allocation. Must be inside @vma.
2286 * @hugepage: Unused (was: For hugepages try only preferred node if possible).
2288 * Allocate a folio for a specific address in @vma, using the appropriate
2289 * NUMA policy. The caller must hold the mmap_lock of the mm_struct of the
2290 * VMA to prevent it from going away. Should be used for all allocations
2291 * for folios that will be mapped into user space, excepting hugetlbfs, and
2292 * excepting where direct use of alloc_pages_mpol() is more appropriate.
2294 * Return: The folio on success or NULL if allocation fails.
2296 struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
2297 unsigned long addr, bool hugepage)
2299 struct mempolicy *pol;
2303 pol = get_vma_policy(vma, addr, order, &ilx);
2304 page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order,
2305 pol, ilx, numa_node_id());
2307 return page_rmappable_folio(page);
2309 EXPORT_SYMBOL(vma_alloc_folio_noprof);
2312 * alloc_pages - Allocate pages.
2314 * @order: Power of two of number of pages to allocate.
2316 * Allocate 1 << @order contiguous pages. The physical address of the
2317 * first page is naturally aligned (eg an order-3 allocation will be aligned
2318 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2319 * process is honoured when in process context.
2321 * Context: Can be called from any context, providing the appropriate GFP
2323 * Return: The page on success or NULL if allocation fails.
2325 struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order)
2327 struct mempolicy *pol = &default_policy;
2330 * No reference counting needed for current->mempolicy
2331 * nor system default_policy
2333 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2334 pol = get_task_policy(current);
2336 return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX,
2339 EXPORT_SYMBOL(alloc_pages_noprof);
2341 struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
2343 return page_rmappable_folio(alloc_pages_noprof(gfp | __GFP_COMP, order));
2345 EXPORT_SYMBOL(folio_alloc_noprof);
2347 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2348 struct mempolicy *pol, unsigned long nr_pages,
2349 struct page **page_array)
2352 unsigned long nr_pages_per_node;
2355 unsigned long nr_allocated;
2356 unsigned long total_allocated = 0;
2358 nodes = nodes_weight(pol->nodes);
2359 nr_pages_per_node = nr_pages / nodes;
2360 delta = nr_pages - nodes * nr_pages_per_node;
2362 for (i = 0; i < nodes; i++) {
2364 nr_allocated = alloc_pages_bulk_noprof(gfp,
2365 interleave_nodes(pol), NULL,
2366 nr_pages_per_node + 1, NULL,
2370 nr_allocated = alloc_pages_bulk_noprof(gfp,
2371 interleave_nodes(pol), NULL,
2372 nr_pages_per_node, NULL, page_array);
2375 page_array += nr_allocated;
2376 total_allocated += nr_allocated;
2379 return total_allocated;
2382 static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
2383 struct mempolicy *pol, unsigned long nr_pages,
2384 struct page **page_array)
2386 struct task_struct *me = current;
2387 unsigned int cpuset_mems_cookie;
2388 unsigned long total_allocated = 0;
2389 unsigned long nr_allocated = 0;
2390 unsigned long rounds;
2391 unsigned long node_pages, delta;
2392 u8 *table, *weights, weight;
2393 unsigned int weight_total = 0;
2394 unsigned long rem_pages = nr_pages;
2397 int resume_node = MAX_NUMNODES - 1;
2398 u8 resume_weight = 0;
2405 /* read the nodes onto the stack, retry if done during rebind */
2407 cpuset_mems_cookie = read_mems_allowed_begin();
2408 nnodes = read_once_policy_nodemask(pol, &nodes);
2409 } while (read_mems_allowed_retry(cpuset_mems_cookie));
2411 /* if the nodemask has become invalid, we cannot do anything */
2415 /* Continue allocating from most recent node and adjust the nr_pages */
2417 weight = me->il_weight;
2418 if (weight && node_isset(node, nodes)) {
2419 node_pages = min(rem_pages, weight);
2420 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2422 page_array += nr_allocated;
2423 total_allocated += nr_allocated;
2424 /* if that's all the pages, no need to interleave */
2425 if (rem_pages <= weight) {
2426 me->il_weight -= rem_pages;
2427 return total_allocated;
2429 /* Otherwise we adjust remaining pages, continue from there */
2430 rem_pages -= weight;
2432 /* clear active weight in case of an allocation failure */
2436 /* create a local copy of node weights to operate on outside rcu */
2437 weights = kzalloc(nr_node_ids, GFP_KERNEL);
2439 return total_allocated;
2442 table = rcu_dereference(iw_table);
2444 memcpy(weights, table, nr_node_ids);
2447 /* calculate total, detect system default usage */
2448 for_each_node_mask(node, nodes) {
2451 weight_total += weights[node];
2455 * Calculate rounds/partial rounds to minimize __alloc_pages_bulk calls.
2456 * Track which node weighted interleave should resume from.
2458 * if (rounds > 0) and (delta == 0), resume_node will always be
2459 * the node following prev_node and its weight.
2461 rounds = rem_pages / weight_total;
2462 delta = rem_pages % weight_total;
2463 resume_node = next_node_in(prev_node, nodes);
2464 resume_weight = weights[resume_node];
2465 for (i = 0; i < nnodes; i++) {
2466 node = next_node_in(prev_node, nodes);
2467 weight = weights[node];
2468 node_pages = weight * rounds;
2469 /* If a delta exists, add this node's portion of the delta */
2470 if (delta > weight) {
2471 node_pages += weight;
2474 /* when delta is depleted, resume from that node */
2475 node_pages += delta;
2477 resume_weight = weight - delta;
2480 /* node_pages can be 0 if an allocation fails and rounds == 0 */
2483 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2485 page_array += nr_allocated;
2486 total_allocated += nr_allocated;
2487 if (total_allocated == nr_pages)
2491 me->il_prev = resume_node;
2492 me->il_weight = resume_weight;
2494 return total_allocated;
2497 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2498 struct mempolicy *pol, unsigned long nr_pages,
2499 struct page **page_array)
2501 gfp_t preferred_gfp;
2502 unsigned long nr_allocated = 0;
2504 preferred_gfp = gfp | __GFP_NOWARN;
2505 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2507 nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
2508 nr_pages, NULL, page_array);
2510 if (nr_allocated < nr_pages)
2511 nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
2512 nr_pages - nr_allocated, NULL,
2513 page_array + nr_allocated);
2514 return nr_allocated;
2517 /* alloc pages bulk and mempolicy should be considered at the
2518 * same time in some situation such as vmalloc.
2520 * It can accelerate memory allocation especially interleaving
2523 unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
2524 unsigned long nr_pages, struct page **page_array)
2526 struct mempolicy *pol = &default_policy;
2527 nodemask_t *nodemask;
2530 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2531 pol = get_task_policy(current);
2533 if (pol->mode == MPOL_INTERLEAVE)
2534 return alloc_pages_bulk_array_interleave(gfp, pol,
2535 nr_pages, page_array);
2537 if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
2538 return alloc_pages_bulk_array_weighted_interleave(
2539 gfp, pol, nr_pages, page_array);
2541 if (pol->mode == MPOL_PREFERRED_MANY)
2542 return alloc_pages_bulk_array_preferred_many(gfp,
2543 numa_node_id(), pol, nr_pages, page_array);
2545 nid = numa_node_id();
2546 nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
2547 return alloc_pages_bulk_noprof(gfp, nid, nodemask,
2548 nr_pages, NULL, page_array);
2551 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2553 struct mempolicy *pol = mpol_dup(src->vm_policy);
2556 return PTR_ERR(pol);
2557 dst->vm_policy = pol;
2562 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2563 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2564 * with the mems_allowed returned by cpuset_mems_allowed(). This
2565 * keeps mempolicies cpuset relative after its cpuset moves. See
2566 * further kernel/cpuset.c update_nodemask().
2568 * current's mempolicy may be rebinded by the other task(the task that changes
2569 * cpuset's mems), so we needn't do rebind work for current task.
2572 /* Slow path of a mempolicy duplicate */
2573 struct mempolicy *__mpol_dup(struct mempolicy *old)
2575 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2578 return ERR_PTR(-ENOMEM);
2580 /* task's mempolicy is protected by alloc_lock */
2581 if (old == current->mempolicy) {
2584 task_unlock(current);
2588 if (current_cpuset_is_being_rebound()) {
2589 nodemask_t mems = cpuset_mems_allowed(current);
2590 mpol_rebind_policy(new, &mems);
2592 atomic_set(&new->refcnt, 1);
2596 /* Slow path of a mempolicy comparison */
2597 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2601 if (a->mode != b->mode)
2603 if (a->flags != b->flags)
2605 if (a->home_node != b->home_node)
2607 if (mpol_store_user_nodemask(a))
2608 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2613 case MPOL_INTERLEAVE:
2614 case MPOL_PREFERRED:
2615 case MPOL_PREFERRED_MANY:
2616 case MPOL_WEIGHTED_INTERLEAVE:
2617 return !!nodes_equal(a->nodes, b->nodes);
2627 * Shared memory backing store policy support.
2629 * Remember policies even when nobody has shared memory mapped.
2630 * The policies are kept in Red-Black tree linked from the inode.
2631 * They are protected by the sp->lock rwlock, which should be held
2632 * for any accesses to the tree.
2636 * lookup first element intersecting start-end. Caller holds sp->lock for
2637 * reading or for writing
2639 static struct sp_node *sp_lookup(struct shared_policy *sp,
2640 pgoff_t start, pgoff_t end)
2642 struct rb_node *n = sp->root.rb_node;
2645 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2647 if (start >= p->end)
2649 else if (end <= p->start)
2657 struct sp_node *w = NULL;
2658 struct rb_node *prev = rb_prev(n);
2661 w = rb_entry(prev, struct sp_node, nd);
2662 if (w->end <= start)
2666 return rb_entry(n, struct sp_node, nd);
2670 * Insert a new shared policy into the list. Caller holds sp->lock for
2673 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2675 struct rb_node **p = &sp->root.rb_node;
2676 struct rb_node *parent = NULL;
2681 nd = rb_entry(parent, struct sp_node, nd);
2682 if (new->start < nd->start)
2684 else if (new->end > nd->end)
2685 p = &(*p)->rb_right;
2689 rb_link_node(&new->nd, parent, p);
2690 rb_insert_color(&new->nd, &sp->root);
2693 /* Find shared policy intersecting idx */
2694 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
2697 struct mempolicy *pol = NULL;
2700 if (!sp->root.rb_node)
2702 read_lock(&sp->lock);
2703 sn = sp_lookup(sp, idx, idx+1);
2705 mpol_get(sn->policy);
2708 read_unlock(&sp->lock);
2712 static void sp_free(struct sp_node *n)
2714 mpol_put(n->policy);
2715 kmem_cache_free(sn_cache, n);
2719 * mpol_misplaced - check whether current folio node is valid in policy
2721 * @folio: folio to be checked
2722 * @vmf: structure describing the fault
2723 * @addr: virtual address in @vma for shared policy lookup and interleave policy
2725 * Lookup current policy node id for vma,addr and "compare to" folio's
2726 * node id. Policy determination "mimics" alloc_page_vma().
2727 * Called from fault path where we know the vma and faulting address.
2729 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2730 * policy, or a suitable node ID to allocate a replacement folio from.
2732 int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
2735 struct mempolicy *pol;
2738 int curnid = folio_nid(folio);
2739 struct vm_area_struct *vma = vmf->vma;
2740 int thiscpu = raw_smp_processor_id();
2741 int thisnid = numa_node_id();
2742 int polnid = NUMA_NO_NODE;
2743 int ret = NUMA_NO_NODE;
2746 * Make sure ptl is held so that we don't preempt and we
2747 * have a stable smp processor id
2749 lockdep_assert_held(vmf->ptl);
2750 pol = get_vma_policy(vma, addr, folio_order(folio), &ilx);
2751 if (!(pol->flags & MPOL_F_MOF))
2754 switch (pol->mode) {
2755 case MPOL_INTERLEAVE:
2756 polnid = interleave_nid(pol, ilx);
2759 case MPOL_WEIGHTED_INTERLEAVE:
2760 polnid = weighted_interleave_nid(pol, ilx);
2763 case MPOL_PREFERRED:
2764 if (node_isset(curnid, pol->nodes))
2766 polnid = first_node(pol->nodes);
2770 polnid = numa_node_id();
2774 case MPOL_PREFERRED_MANY:
2776 * Even though MPOL_PREFERRED_MANY can allocate pages outside
2777 * policy nodemask we don't allow numa migration to nodes
2778 * outside policy nodemask for now. This is done so that if we
2779 * want demotion to slow memory to happen, before allocating
2780 * from some DRAM node say 'x', we will end up using a
2781 * MPOL_PREFERRED_MANY mask excluding node 'x'. In such scenario
2782 * we should not promote to node 'x' from slow memory node.
2784 if (pol->flags & MPOL_F_MORON) {
2786 * Optimize placement among multiple nodes
2787 * via NUMA balancing
2789 if (node_isset(thisnid, pol->nodes))
2795 * use current page if in policy nodemask,
2796 * else select nearest allowed node, if any.
2797 * If no allowed nodes, use current [!misplaced].
2799 if (node_isset(curnid, pol->nodes))
2801 z = first_zones_zonelist(
2802 node_zonelist(thisnid, GFP_HIGHUSER),
2803 gfp_zone(GFP_HIGHUSER),
2805 polnid = zone_to_nid(z->zone);
2812 /* Migrate the folio towards the node whose CPU is referencing it */
2813 if (pol->flags & MPOL_F_MORON) {
2816 if (!should_numa_migrate_memory(current, folio, curnid,
2821 if (curnid != polnid)
2830 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2831 * dropped after task->mempolicy is set to NULL so that any allocation done as
2832 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2835 void mpol_put_task_policy(struct task_struct *task)
2837 struct mempolicy *pol;
2840 pol = task->mempolicy;
2841 task->mempolicy = NULL;
2846 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2848 rb_erase(&n->nd, &sp->root);
2852 static void sp_node_init(struct sp_node *node, unsigned long start,
2853 unsigned long end, struct mempolicy *pol)
2855 node->start = start;
2860 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2861 struct mempolicy *pol)
2864 struct mempolicy *newpol;
2866 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2870 newpol = mpol_dup(pol);
2871 if (IS_ERR(newpol)) {
2872 kmem_cache_free(sn_cache, n);
2875 newpol->flags |= MPOL_F_SHARED;
2876 sp_node_init(n, start, end, newpol);
2881 /* Replace a policy range. */
2882 static int shared_policy_replace(struct shared_policy *sp, pgoff_t start,
2883 pgoff_t end, struct sp_node *new)
2886 struct sp_node *n_new = NULL;
2887 struct mempolicy *mpol_new = NULL;
2891 write_lock(&sp->lock);
2892 n = sp_lookup(sp, start, end);
2893 /* Take care of old policies in the same range. */
2894 while (n && n->start < end) {
2895 struct rb_node *next = rb_next(&n->nd);
2896 if (n->start >= start) {
2902 /* Old policy spanning whole new range. */
2907 *mpol_new = *n->policy;
2908 atomic_set(&mpol_new->refcnt, 1);
2909 sp_node_init(n_new, end, n->end, mpol_new);
2911 sp_insert(sp, n_new);
2920 n = rb_entry(next, struct sp_node, nd);
2924 write_unlock(&sp->lock);
2931 kmem_cache_free(sn_cache, n_new);
2936 write_unlock(&sp->lock);
2938 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2941 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2944 atomic_set(&mpol_new->refcnt, 1);
2949 * mpol_shared_policy_init - initialize shared policy for inode
2950 * @sp: pointer to inode shared policy
2951 * @mpol: struct mempolicy to install
2953 * Install non-NULL @mpol in inode's shared policy rb-tree.
2954 * On entry, the current task has a reference on a non-NULL @mpol.
2955 * This must be released on exit.
2956 * This is called at get_inode() calls and we can use GFP_KERNEL.
2958 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2962 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2963 rwlock_init(&sp->lock);
2967 struct mempolicy *npol;
2968 NODEMASK_SCRATCH(scratch);
2973 /* contextualize the tmpfs mount point mempolicy to this file */
2974 npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2976 goto free_scratch; /* no valid nodemask intersection */
2979 ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch);
2980 task_unlock(current);
2984 /* alloc node covering entire file; adds ref to file's npol */
2985 sn = sp_alloc(0, MAX_LFS_FILESIZE >> PAGE_SHIFT, npol);
2989 mpol_put(npol); /* drop initial ref on file's npol */
2991 NODEMASK_SCRATCH_FREE(scratch);
2993 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2997 int mpol_set_shared_policy(struct shared_policy *sp,
2998 struct vm_area_struct *vma, struct mempolicy *pol)
3001 struct sp_node *new = NULL;
3002 unsigned long sz = vma_pages(vma);
3005 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
3009 err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
3015 /* Free a backing policy store on inode delete. */
3016 void mpol_free_shared_policy(struct shared_policy *sp)
3019 struct rb_node *next;
3021 if (!sp->root.rb_node)
3023 write_lock(&sp->lock);
3024 next = rb_first(&sp->root);
3026 n = rb_entry(next, struct sp_node, nd);
3027 next = rb_next(&n->nd);
3030 write_unlock(&sp->lock);
3033 #ifdef CONFIG_NUMA_BALANCING
3034 static int __initdata numabalancing_override;
3036 static void __init check_numabalancing_enable(void)
3038 bool numabalancing_default = false;
3040 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
3041 numabalancing_default = true;
3043 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
3044 if (numabalancing_override)
3045 set_numabalancing_state(numabalancing_override == 1);
3047 if (num_online_nodes() > 1 && !numabalancing_override) {
3048 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
3049 numabalancing_default ? "Enabling" : "Disabling");
3050 set_numabalancing_state(numabalancing_default);
3054 static int __init setup_numabalancing(char *str)
3060 if (!strcmp(str, "enable")) {
3061 numabalancing_override = 1;
3063 } else if (!strcmp(str, "disable")) {
3064 numabalancing_override = -1;
3069 pr_warn("Unable to parse numa_balancing=\n");
3073 __setup("numa_balancing=", setup_numabalancing);
3075 static inline void __init check_numabalancing_enable(void)
3078 #endif /* CONFIG_NUMA_BALANCING */
3080 void __init numa_policy_init(void)
3082 nodemask_t interleave_nodes;
3083 unsigned long largest = 0;
3084 int nid, prefer = 0;
3086 policy_cache = kmem_cache_create("numa_policy",
3087 sizeof(struct mempolicy),
3088 0, SLAB_PANIC, NULL);
3090 sn_cache = kmem_cache_create("shared_policy_node",
3091 sizeof(struct sp_node),
3092 0, SLAB_PANIC, NULL);
3094 for_each_node(nid) {
3095 preferred_node_policy[nid] = (struct mempolicy) {
3096 .refcnt = ATOMIC_INIT(1),
3097 .mode = MPOL_PREFERRED,
3098 .flags = MPOL_F_MOF | MPOL_F_MORON,
3099 .nodes = nodemask_of_node(nid),
3104 * Set interleaving policy for system init. Interleaving is only
3105 * enabled across suitably sized nodes (default is >= 16MB), or
3106 * fall back to the largest node if they're all smaller.
3108 nodes_clear(interleave_nodes);
3109 for_each_node_state(nid, N_MEMORY) {
3110 unsigned long total_pages = node_present_pages(nid);
3112 /* Preserve the largest node */
3113 if (largest < total_pages) {
3114 largest = total_pages;
3118 /* Interleave this node? */
3119 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
3120 node_set(nid, interleave_nodes);
3123 /* All too small, use the largest */
3124 if (unlikely(nodes_empty(interleave_nodes)))
3125 node_set(prefer, interleave_nodes);
3127 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
3128 pr_err("%s: interleaving failed\n", __func__);
3130 check_numabalancing_enable();
3133 /* Reset policy of current process to default */
3134 void numa_default_policy(void)
3136 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
3140 * Parse and format mempolicy from/to strings
3142 static const char * const policy_modes[] =
3144 [MPOL_DEFAULT] = "default",
3145 [MPOL_PREFERRED] = "prefer",
3146 [MPOL_BIND] = "bind",
3147 [MPOL_INTERLEAVE] = "interleave",
3148 [MPOL_WEIGHTED_INTERLEAVE] = "weighted interleave",
3149 [MPOL_LOCAL] = "local",
3150 [MPOL_PREFERRED_MANY] = "prefer (many)",
3155 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
3156 * @str: string containing mempolicy to parse
3157 * @mpol: pointer to struct mempolicy pointer, returned on success.
3160 * <mode>[=<flags>][:<nodelist>]
3162 * Return: %0 on success, else %1
3164 int mpol_parse_str(char *str, struct mempolicy **mpol)
3166 struct mempolicy *new = NULL;
3167 unsigned short mode_flags;
3169 char *nodelist = strchr(str, ':');
3170 char *flags = strchr(str, '=');
3174 *flags++ = '\0'; /* terminate mode string */
3177 /* NUL-terminate mode or flags string */
3179 if (nodelist_parse(nodelist, nodes))
3181 if (!nodes_subset(nodes, node_states[N_MEMORY]))
3186 mode = match_string(policy_modes, MPOL_MAX, str);
3191 case MPOL_PREFERRED:
3193 * Insist on a nodelist of one node only, although later
3194 * we use first_node(nodes) to grab a single node, so here
3195 * nodelist (or nodes) cannot be empty.
3198 char *rest = nodelist;
3199 while (isdigit(*rest))
3203 if (nodes_empty(nodes))
3207 case MPOL_INTERLEAVE:
3208 case MPOL_WEIGHTED_INTERLEAVE:
3210 * Default to online nodes with memory if no nodelist
3213 nodes = node_states[N_MEMORY];
3217 * Don't allow a nodelist; mpol_new() checks flags
3224 * Insist on a empty nodelist
3229 case MPOL_PREFERRED_MANY:
3232 * Insist on a nodelist
3241 * Currently, we only support two mutually exclusive
3244 if (!strcmp(flags, "static"))
3245 mode_flags |= MPOL_F_STATIC_NODES;
3246 else if (!strcmp(flags, "relative"))
3247 mode_flags |= MPOL_F_RELATIVE_NODES;
3252 new = mpol_new(mode, mode_flags, &nodes);
3257 * Save nodes for mpol_to_str() to show the tmpfs mount options
3258 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3260 if (mode != MPOL_PREFERRED) {
3262 } else if (nodelist) {
3263 nodes_clear(new->nodes);
3264 node_set(first_node(nodes), new->nodes);
3266 new->mode = MPOL_LOCAL;
3270 * Save nodes for contextualization: this will be used to "clone"
3271 * the mempolicy in a specific context [cpuset] at a later time.
3273 new->w.user_nodemask = nodes;
3278 /* Restore string for error message */
3287 #endif /* CONFIG_TMPFS */
3290 * mpol_to_str - format a mempolicy structure for printing
3291 * @buffer: to contain formatted mempolicy string
3292 * @maxlen: length of @buffer
3293 * @pol: pointer to mempolicy to be formatted
3295 * Convert @pol into a string. If @buffer is too short, truncate the string.
3296 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3297 * longest flag, "relative", and to display at least a few node ids.
3299 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3302 nodemask_t nodes = NODE_MASK_NONE;
3303 unsigned short mode = MPOL_DEFAULT;
3304 unsigned short flags = 0;
3306 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3315 case MPOL_PREFERRED:
3316 case MPOL_PREFERRED_MANY:
3318 case MPOL_INTERLEAVE:
3319 case MPOL_WEIGHTED_INTERLEAVE:
3324 snprintf(p, maxlen, "unknown");
3328 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3330 if (flags & MPOL_MODE_FLAGS) {
3331 p += snprintf(p, buffer + maxlen - p, "=");
3334 * Currently, the only defined flags are mutually exclusive
3336 if (flags & MPOL_F_STATIC_NODES)
3337 p += snprintf(p, buffer + maxlen - p, "static");
3338 else if (flags & MPOL_F_RELATIVE_NODES)
3339 p += snprintf(p, buffer + maxlen - p, "relative");
3342 if (!nodes_empty(nodes))
3343 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3344 nodemask_pr_args(&nodes));
3348 struct iw_node_attr {
3349 struct kobj_attribute kobj_attr;
3353 static ssize_t node_show(struct kobject *kobj, struct kobj_attribute *attr,
3356 struct iw_node_attr *node_attr;
3359 node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
3360 weight = get_il_weight(node_attr->nid);
3361 return sysfs_emit(buf, "%d\n", weight);
3364 static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr,
3365 const char *buf, size_t count)
3367 struct iw_node_attr *node_attr;
3372 node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
3373 if (count == 0 || sysfs_streq(buf, ""))
3375 else if (kstrtou8(buf, 0, &weight))
3378 new = kzalloc(nr_node_ids, GFP_KERNEL);
3382 mutex_lock(&iw_table_lock);
3383 old = rcu_dereference_protected(iw_table,
3384 lockdep_is_held(&iw_table_lock));
3386 memcpy(new, old, nr_node_ids);
3387 new[node_attr->nid] = weight;
3388 rcu_assign_pointer(iw_table, new);
3389 mutex_unlock(&iw_table_lock);
3395 static struct iw_node_attr **node_attrs;
3397 static void sysfs_wi_node_release(struct iw_node_attr *node_attr,
3398 struct kobject *parent)
3402 sysfs_remove_file(parent, &node_attr->kobj_attr.attr);
3403 kfree(node_attr->kobj_attr.attr.name);
3407 static void sysfs_wi_release(struct kobject *wi_kobj)
3411 for (i = 0; i < nr_node_ids; i++)
3412 sysfs_wi_node_release(node_attrs[i], wi_kobj);
3413 kobject_put(wi_kobj);
3416 static const struct kobj_type wi_ktype = {
3417 .sysfs_ops = &kobj_sysfs_ops,
3418 .release = sysfs_wi_release,
3421 static int add_weight_node(int nid, struct kobject *wi_kobj)
3423 struct iw_node_attr *node_attr;
3426 node_attr = kzalloc(sizeof(*node_attr), GFP_KERNEL);
3430 name = kasprintf(GFP_KERNEL, "node%d", nid);
3436 sysfs_attr_init(&node_attr->kobj_attr.attr);
3437 node_attr->kobj_attr.attr.name = name;
3438 node_attr->kobj_attr.attr.mode = 0644;
3439 node_attr->kobj_attr.show = node_show;
3440 node_attr->kobj_attr.store = node_store;
3441 node_attr->nid = nid;
3443 if (sysfs_create_file(wi_kobj, &node_attr->kobj_attr.attr)) {
3444 kfree(node_attr->kobj_attr.attr.name);
3446 pr_err("failed to add attribute to weighted_interleave\n");
3450 node_attrs[nid] = node_attr;
3454 static int add_weighted_interleave_group(struct kobject *root_kobj)
3456 struct kobject *wi_kobj;
3459 wi_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
3463 err = kobject_init_and_add(wi_kobj, &wi_ktype, root_kobj,
3464 "weighted_interleave");
3470 for_each_node_state(nid, N_POSSIBLE) {
3471 err = add_weight_node(nid, wi_kobj);
3473 pr_err("failed to add sysfs [node%d]\n", nid);
3478 kobject_put(wi_kobj);
3482 static void mempolicy_kobj_release(struct kobject *kobj)
3486 mutex_lock(&iw_table_lock);
3487 old = rcu_dereference_protected(iw_table,
3488 lockdep_is_held(&iw_table_lock));
3489 rcu_assign_pointer(iw_table, NULL);
3490 mutex_unlock(&iw_table_lock);
3497 static const struct kobj_type mempolicy_ktype = {
3498 .release = mempolicy_kobj_release
3501 static int __init mempolicy_sysfs_init(void)
3504 static struct kobject *mempolicy_kobj;
3506 mempolicy_kobj = kzalloc(sizeof(*mempolicy_kobj), GFP_KERNEL);
3507 if (!mempolicy_kobj) {
3512 node_attrs = kcalloc(nr_node_ids, sizeof(struct iw_node_attr *),
3519 err = kobject_init_and_add(mempolicy_kobj, &mempolicy_ktype, mm_kobj,
3524 err = add_weighted_interleave_group(mempolicy_kobj);
3526 pr_err("mempolicy sysfs structure failed to initialize\n");
3527 kobject_put(mempolicy_kobj);
3535 kfree(mempolicy_kobj);
3537 pr_err("failed to add mempolicy kobject to the system\n");
3541 late_initcall(mempolicy_sysfs_init);
3542 #endif /* CONFIG_SYSFS */