2 * Copyright 2013 Red Hat Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
21 #include <linux/hmm.h>
22 #include <linux/init.h>
23 #include <linux/rmap.h>
24 #include <linux/swap.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/mmzone.h>
28 #include <linux/pagemap.h>
29 #include <linux/swapops.h>
30 #include <linux/hugetlb.h>
31 #include <linux/memremap.h>
32 #include <linux/jump_label.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/memory_hotplug.h>
36 #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
38 #if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
40 * Device private memory see HMM (Documentation/vm/hmm.txt) or hmm.h
42 DEFINE_STATIC_KEY_FALSE(device_private_key);
43 EXPORT_SYMBOL(device_private_key);
44 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
47 #if IS_ENABLED(CONFIG_HMM_MIRROR)
48 static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
51 * struct hmm - HMM per mm struct
53 * @mm: mm struct this HMM struct is bound to
54 * @lock: lock protecting ranges list
55 * @sequence: we track updates to the CPU page table with a sequence number
56 * @ranges: list of range being snapshotted
57 * @mirrors: list of mirrors for this mm
58 * @mmu_notifier: mmu notifier to track updates to CPU page table
59 * @mirrors_sem: read/write semaphore protecting the mirrors list
65 struct list_head ranges;
66 struct list_head mirrors;
67 struct mmu_notifier mmu_notifier;
68 struct rw_semaphore mirrors_sem;
72 * hmm_register - register HMM against an mm (HMM internal)
74 * @mm: mm struct to attach to
76 * This is not intended to be used directly by device drivers. It allocates an
77 * HMM struct if mm does not have one, and initializes it.
79 static struct hmm *hmm_register(struct mm_struct *mm)
81 struct hmm *hmm = READ_ONCE(mm->hmm);
85 * The hmm struct can only be freed once the mm_struct goes away,
86 * hence we should always have pre-allocated an new hmm struct
92 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
95 INIT_LIST_HEAD(&hmm->mirrors);
96 init_rwsem(&hmm->mirrors_sem);
97 atomic_set(&hmm->sequence, 0);
98 hmm->mmu_notifier.ops = NULL;
99 INIT_LIST_HEAD(&hmm->ranges);
100 spin_lock_init(&hmm->lock);
104 * We should only get here if hold the mmap_sem in write mode ie on
105 * registration of first mirror through hmm_mirror_register()
107 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
108 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
113 spin_lock(&mm->page_table_lock);
118 spin_unlock(&mm->page_table_lock);
121 mmu_notifier_unregister(&hmm->mmu_notifier, mm);
128 void hmm_mm_destroy(struct mm_struct *mm)
133 static void hmm_invalidate_range(struct hmm *hmm,
134 enum hmm_update_type action,
138 struct hmm_mirror *mirror;
139 struct hmm_range *range;
141 spin_lock(&hmm->lock);
142 list_for_each_entry(range, &hmm->ranges, list) {
143 unsigned long addr, idx, npages;
145 if (end < range->start || start >= range->end)
148 range->valid = false;
149 addr = max(start, range->start);
150 idx = (addr - range->start) >> PAGE_SHIFT;
151 npages = (min(range->end, end) - addr) >> PAGE_SHIFT;
152 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
154 spin_unlock(&hmm->lock);
156 down_read(&hmm->mirrors_sem);
157 list_for_each_entry(mirror, &hmm->mirrors, list)
158 mirror->ops->sync_cpu_device_pagetables(mirror, action,
160 up_read(&hmm->mirrors_sem);
163 static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
165 struct hmm_mirror *mirror;
166 struct hmm *hmm = mm->hmm;
168 down_write(&hmm->mirrors_sem);
169 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
172 list_del_init(&mirror->list);
173 if (mirror->ops->release) {
175 * Drop mirrors_sem so callback can wait on any pending
176 * work that might itself trigger mmu_notifier callback
177 * and thus would deadlock with us.
179 up_write(&hmm->mirrors_sem);
180 mirror->ops->release(mirror);
181 down_write(&hmm->mirrors_sem);
183 mirror = list_first_entry_or_null(&hmm->mirrors,
184 struct hmm_mirror, list);
186 up_write(&hmm->mirrors_sem);
189 static void hmm_invalidate_range_start(struct mmu_notifier *mn,
190 struct mm_struct *mm,
194 struct hmm *hmm = mm->hmm;
198 atomic_inc(&hmm->sequence);
201 static void hmm_invalidate_range_end(struct mmu_notifier *mn,
202 struct mm_struct *mm,
206 struct hmm *hmm = mm->hmm;
210 hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
213 static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
214 .release = hmm_release,
215 .invalidate_range_start = hmm_invalidate_range_start,
216 .invalidate_range_end = hmm_invalidate_range_end,
220 * hmm_mirror_register() - register a mirror against an mm
222 * @mirror: new mirror struct to register
223 * @mm: mm to register against
225 * To start mirroring a process address space, the device driver must register
226 * an HMM mirror struct.
228 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
230 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
233 if (!mm || !mirror || !mirror->ops)
237 mirror->hmm = hmm_register(mm);
241 down_write(&mirror->hmm->mirrors_sem);
242 if (mirror->hmm->mm == NULL) {
244 * A racing hmm_mirror_unregister() is about to destroy the hmm
245 * struct. Try again to allocate a new one.
247 up_write(&mirror->hmm->mirrors_sem);
251 list_add(&mirror->list, &mirror->hmm->mirrors);
252 up_write(&mirror->hmm->mirrors_sem);
257 EXPORT_SYMBOL(hmm_mirror_register);
260 * hmm_mirror_unregister() - unregister a mirror
262 * @mirror: new mirror struct to register
264 * Stop mirroring a process address space, and cleanup.
266 void hmm_mirror_unregister(struct hmm_mirror *mirror)
268 bool should_unregister = false;
269 struct mm_struct *mm;
272 if (mirror->hmm == NULL)
276 down_write(&hmm->mirrors_sem);
277 list_del_init(&mirror->list);
278 should_unregister = list_empty(&hmm->mirrors);
282 up_write(&hmm->mirrors_sem);
284 if (!should_unregister || mm == NULL)
287 spin_lock(&mm->page_table_lock);
290 spin_unlock(&mm->page_table_lock);
292 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
295 EXPORT_SYMBOL(hmm_mirror_unregister);
297 struct hmm_vma_walk {
298 struct hmm_range *range;
305 static int hmm_vma_do_fault(struct mm_walk *walk,
309 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
310 struct hmm_vma_walk *hmm_vma_walk = walk->private;
311 struct vm_area_struct *vma = walk->vma;
314 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
315 flags |= hmm_vma_walk->write ? FAULT_FLAG_WRITE : 0;
316 r = handle_mm_fault(vma, addr, flags);
317 if (r & VM_FAULT_RETRY)
319 if (r & VM_FAULT_ERROR) {
320 *pfn = HMM_PFN_ERROR;
327 static int hmm_pfns_bad(unsigned long addr,
329 struct mm_walk *walk)
331 struct hmm_vma_walk *hmm_vma_walk = walk->private;
332 struct hmm_range *range = hmm_vma_walk->range;
333 uint64_t *pfns = range->pfns;
336 i = (addr - range->start) >> PAGE_SHIFT;
337 for (; addr < end; addr += PAGE_SIZE, i++)
338 pfns[i] = HMM_PFN_ERROR;
344 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
345 * @start: range virtual start address (inclusive)
346 * @end: range virtual end address (exclusive)
347 * @walk: mm_walk structure
348 * Returns: 0 on success, -EAGAIN after page fault, or page fault error
350 * This function will be called whenever pmd_none() or pte_none() returns true,
351 * or whenever there is no page directory covering the virtual address range.
353 static int hmm_vma_walk_hole(unsigned long addr,
355 struct mm_walk *walk)
357 struct hmm_vma_walk *hmm_vma_walk = walk->private;
358 struct hmm_range *range = hmm_vma_walk->range;
359 uint64_t *pfns = range->pfns;
362 hmm_vma_walk->last = addr;
363 i = (addr - range->start) >> PAGE_SHIFT;
364 for (; addr < end; addr += PAGE_SIZE, i++) {
366 if (hmm_vma_walk->fault) {
369 ret = hmm_vma_do_fault(walk, addr, &pfns[i]);
375 return hmm_vma_walk->fault ? -EAGAIN : 0;
378 static int hmm_vma_handle_pmd(struct mm_walk *walk,
384 struct hmm_vma_walk *hmm_vma_walk = walk->private;
385 unsigned long pfn, i;
388 if (pmd_protnone(pmd))
389 return hmm_vma_walk_hole(addr, end, walk);
391 if ((hmm_vma_walk->fault & hmm_vma_walk->write) && !pmd_write(pmd))
392 return hmm_vma_walk_hole(addr, end, walk);
394 pfn = pmd_pfn(pmd) + pte_index(addr);
395 flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
396 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
397 pfns[i] = hmm_pfn_from_pfn(pfn) | flag;
398 hmm_vma_walk->last = end;
402 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
403 unsigned long end, pmd_t *pmdp, pte_t *ptep,
406 struct hmm_vma_walk *hmm_vma_walk = walk->private;
407 struct vm_area_struct *vma = walk->vma;
414 if (hmm_vma_walk->fault)
419 if (!pte_present(pte)) {
420 swp_entry_t entry = pte_to_swp_entry(pte);
422 if (!non_swap_entry(entry)) {
423 if (hmm_vma_walk->fault)
429 * This is a special swap entry, ignore migration, use
430 * device and report anything else as error.
432 if (is_device_private_entry(entry)) {
433 *pfn = hmm_pfn_from_pfn(swp_offset(entry));
434 if (is_write_device_private_entry(entry)) {
435 *pfn |= HMM_PFN_WRITE;
436 } else if ((hmm_vma_walk->fault & hmm_vma_walk->write))
438 *pfn |= HMM_PFN_DEVICE_PRIVATE;
442 if (is_migration_entry(entry)) {
443 if (hmm_vma_walk->fault) {
445 hmm_vma_walk->last = addr;
446 migration_entry_wait(vma->vm_mm,
453 /* Report error for everything else */
454 *pfn = HMM_PFN_ERROR;
458 if ((hmm_vma_walk->fault & hmm_vma_walk->write) && !pte_write(pte))
461 *pfn = hmm_pfn_from_pfn(pte_pfn(pte));
462 *pfn |= pte_write(pte) ? HMM_PFN_WRITE : 0;
467 /* Fault any virtual address we were asked to fault */
468 return hmm_vma_walk_hole(addr, end, walk);
471 static int hmm_vma_walk_pmd(pmd_t *pmdp,
474 struct mm_walk *walk)
476 struct hmm_vma_walk *hmm_vma_walk = walk->private;
477 struct hmm_range *range = hmm_vma_walk->range;
478 uint64_t *pfns = range->pfns;
479 unsigned long addr = start, i;
482 i = (addr - range->start) >> PAGE_SHIFT;
486 return hmm_vma_walk_hole(start, end, walk);
488 if (pmd_huge(*pmdp) && (range->vma->vm_flags & VM_HUGETLB))
489 return hmm_pfns_bad(start, end, walk);
491 if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) {
495 * No need to take pmd_lock here, even if some other threads
496 * is splitting the huge pmd we will get that event through
497 * mmu_notifier callback.
499 * So just read pmd value and check again its a transparent
500 * huge or device mapping one and compute corresponding pfn
503 pmd = pmd_read_atomic(pmdp);
505 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
508 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
512 return hmm_pfns_bad(start, end, walk);
514 ptep = pte_offset_map(pmdp, addr);
515 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
518 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
520 /* hmm_vma_handle_pte() did unmap pte directory */
521 hmm_vma_walk->last = addr;
527 hmm_vma_walk->last = addr;
531 static void hmm_pfns_clear(uint64_t *pfns,
535 for (; addr < end; addr += PAGE_SIZE, pfns++)
539 static void hmm_pfns_special(struct hmm_range *range)
541 unsigned long addr = range->start, i = 0;
543 for (; addr < range->end; addr += PAGE_SIZE, i++)
544 range->pfns[i] = HMM_PFN_SPECIAL;
548 * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
549 * @range: range being snapshotted
550 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
551 * vma permission, 0 success
553 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
554 * validity is tracked by range struct. See hmm_vma_range_done() for further
557 * The range struct is initialized here. It tracks the CPU page table, but only
558 * if the function returns success (0), in which case the caller must then call
559 * hmm_vma_range_done() to stop CPU page table update tracking on this range.
561 * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
562 * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
564 int hmm_vma_get_pfns(struct hmm_range *range)
566 struct vm_area_struct *vma = range->vma;
567 struct hmm_vma_walk hmm_vma_walk;
568 struct mm_walk mm_walk;
571 /* Sanity check, this really should not happen ! */
572 if (range->start < vma->vm_start || range->start >= vma->vm_end)
574 if (range->end < vma->vm_start || range->end > vma->vm_end)
577 hmm = hmm_register(vma->vm_mm);
580 /* Caller must have registered a mirror, via hmm_mirror_register() ! */
581 if (!hmm->mmu_notifier.ops)
584 /* FIXME support hugetlb fs */
585 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
586 hmm_pfns_special(range);
590 if (!(vma->vm_flags & VM_READ)) {
592 * If vma do not allow read access, then assume that it does
593 * not allow write access, either. Architecture that allow
594 * write without read access are not supported by HMM, because
595 * operations such has atomic access would not work.
597 hmm_pfns_clear(range->pfns, range->start, range->end);
601 /* Initialize range to track CPU page table update */
602 spin_lock(&hmm->lock);
604 list_add_rcu(&range->list, &hmm->ranges);
605 spin_unlock(&hmm->lock);
607 hmm_vma_walk.fault = false;
608 hmm_vma_walk.range = range;
609 mm_walk.private = &hmm_vma_walk;
612 mm_walk.mm = vma->vm_mm;
613 mm_walk.pte_entry = NULL;
614 mm_walk.test_walk = NULL;
615 mm_walk.hugetlb_entry = NULL;
616 mm_walk.pmd_entry = hmm_vma_walk_pmd;
617 mm_walk.pte_hole = hmm_vma_walk_hole;
619 walk_page_range(range->start, range->end, &mm_walk);
622 EXPORT_SYMBOL(hmm_vma_get_pfns);
625 * hmm_vma_range_done() - stop tracking change to CPU page table over a range
626 * @range: range being tracked
627 * Returns: false if range data has been invalidated, true otherwise
629 * Range struct is used to track updates to the CPU page table after a call to
630 * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done
631 * using the data, or wants to lock updates to the data it got from those
632 * functions, it must call the hmm_vma_range_done() function, which will then
633 * stop tracking CPU page table updates.
635 * Note that device driver must still implement general CPU page table update
636 * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using
637 * the mmu_notifier API directly.
639 * CPU page table update tracking done through hmm_range is only temporary and
640 * to be used while trying to duplicate CPU page table contents for a range of
643 * There are two ways to use this :
645 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
646 * trans = device_build_page_table_update_transaction(pfns);
647 * device_page_table_lock();
648 * if (!hmm_vma_range_done(range)) {
649 * device_page_table_unlock();
652 * device_commit_transaction(trans);
653 * device_page_table_unlock();
656 * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
657 * device_page_table_lock();
658 * hmm_vma_range_done(range);
659 * device_update_page_table(range->pfns);
660 * device_page_table_unlock();
662 bool hmm_vma_range_done(struct hmm_range *range)
664 unsigned long npages = (range->end - range->start) >> PAGE_SHIFT;
667 if (range->end <= range->start) {
672 hmm = hmm_register(range->vma->vm_mm);
674 memset(range->pfns, 0, sizeof(*range->pfns) * npages);
678 spin_lock(&hmm->lock);
679 list_del_rcu(&range->list);
680 spin_unlock(&hmm->lock);
684 EXPORT_SYMBOL(hmm_vma_range_done);
687 * hmm_vma_fault() - try to fault some address in a virtual address range
688 * @range: range being faulted
689 * @write: is it a write fault
690 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
691 * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
693 * This is similar to a regular CPU page fault except that it will not trigger
694 * any memory migration if the memory being faulted is not accessible by CPUs.
696 * On error, for one virtual address in the range, the function will mark the
697 * corresponding HMM pfn entry with an error flag.
699 * Expected use pattern:
701 * down_read(&mm->mmap_sem);
702 * // Find vma and address device wants to fault, initialize hmm_pfn_t
703 * // array accordingly
704 * ret = hmm_vma_fault(range, write, block);
707 * hmm_vma_range_done(range);
708 * // You might want to rate limit or yield to play nicely, you may
709 * // also commit any valid pfn in the array assuming that you are
710 * // getting true from hmm_vma_range_monitor_end()
719 * up_read(&mm->mmap_sem)
722 * // Take device driver lock that serialize device page table update
723 * driver_lock_device_page_table_update();
724 * hmm_vma_range_done(range);
725 * // Commit pfns we got from hmm_vma_fault()
726 * driver_unlock_device_page_table_update();
727 * up_read(&mm->mmap_sem)
729 * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0)
730 * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION !
732 * YOU HAVE BEEN WARNED !
734 int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
736 struct vm_area_struct *vma = range->vma;
737 unsigned long start = range->start;
738 struct hmm_vma_walk hmm_vma_walk;
739 struct mm_walk mm_walk;
743 /* Sanity check, this really should not happen ! */
744 if (range->start < vma->vm_start || range->start >= vma->vm_end)
746 if (range->end < vma->vm_start || range->end > vma->vm_end)
749 hmm = hmm_register(vma->vm_mm);
751 hmm_pfns_clear(range->pfns, range->start, range->end);
754 /* Caller must have registered a mirror using hmm_mirror_register() */
755 if (!hmm->mmu_notifier.ops)
758 /* FIXME support hugetlb fs */
759 if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
760 hmm_pfns_special(range);
764 if (!(vma->vm_flags & VM_READ)) {
766 * If vma do not allow read access, then assume that it does
767 * not allow write access, either. Architecture that allow
768 * write without read access are not supported by HMM, because
769 * operations such has atomic access would not work.
771 hmm_pfns_clear(range->pfns, range->start, range->end);
775 /* Initialize range to track CPU page table update */
776 spin_lock(&hmm->lock);
778 list_add_rcu(&range->list, &hmm->ranges);
779 spin_unlock(&hmm->lock);
781 hmm_vma_walk.fault = true;
782 hmm_vma_walk.write = write;
783 hmm_vma_walk.block = block;
784 hmm_vma_walk.range = range;
785 mm_walk.private = &hmm_vma_walk;
786 hmm_vma_walk.last = range->start;
789 mm_walk.mm = vma->vm_mm;
790 mm_walk.pte_entry = NULL;
791 mm_walk.test_walk = NULL;
792 mm_walk.hugetlb_entry = NULL;
793 mm_walk.pmd_entry = hmm_vma_walk_pmd;
794 mm_walk.pte_hole = hmm_vma_walk_hole;
797 ret = walk_page_range(start, range->end, &mm_walk);
798 start = hmm_vma_walk.last;
799 } while (ret == -EAGAIN);
804 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
805 hmm_pfns_clear(&range->pfns[i], hmm_vma_walk.last, range->end);
806 hmm_vma_range_done(range);
810 EXPORT_SYMBOL(hmm_vma_fault);
811 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
814 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
815 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
820 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
826 EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
829 static void hmm_devmem_ref_release(struct percpu_ref *ref)
831 struct hmm_devmem *devmem;
833 devmem = container_of(ref, struct hmm_devmem, ref);
834 complete(&devmem->completion);
837 static void hmm_devmem_ref_exit(void *data)
839 struct percpu_ref *ref = data;
840 struct hmm_devmem *devmem;
842 devmem = container_of(ref, struct hmm_devmem, ref);
843 percpu_ref_exit(ref);
844 devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data);
847 static void hmm_devmem_ref_kill(void *data)
849 struct percpu_ref *ref = data;
850 struct hmm_devmem *devmem;
852 devmem = container_of(ref, struct hmm_devmem, ref);
853 percpu_ref_kill(ref);
854 wait_for_completion(&devmem->completion);
855 devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data);
858 static int hmm_devmem_fault(struct vm_area_struct *vma,
860 const struct page *page,
864 struct hmm_devmem *devmem = page->pgmap->data;
866 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
869 static void hmm_devmem_free(struct page *page, void *data)
871 struct hmm_devmem *devmem = data;
873 devmem->ops->free(devmem, page);
876 static DEFINE_MUTEX(hmm_devmem_lock);
877 static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
879 static void hmm_devmem_radix_release(struct resource *resource)
881 resource_size_t key, align_start, align_size;
883 align_start = resource->start & ~(PA_SECTION_SIZE - 1);
884 align_size = ALIGN(resource_size(resource), PA_SECTION_SIZE);
886 mutex_lock(&hmm_devmem_lock);
887 for (key = resource->start;
888 key <= resource->end;
889 key += PA_SECTION_SIZE)
890 radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT);
891 mutex_unlock(&hmm_devmem_lock);
894 static void hmm_devmem_release(struct device *dev, void *data)
896 struct hmm_devmem *devmem = data;
897 struct resource *resource = devmem->resource;
898 unsigned long start_pfn, npages;
902 if (percpu_ref_tryget_live(&devmem->ref)) {
903 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
904 percpu_ref_put(&devmem->ref);
907 /* pages are dead and unused, undo the arch mapping */
908 start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
909 npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
911 page = pfn_to_page(start_pfn);
912 zone = page_zone(page);
915 if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY)
916 __remove_pages(zone, start_pfn, npages, NULL);
918 arch_remove_memory(start_pfn << PAGE_SHIFT,
919 npages << PAGE_SHIFT, NULL);
922 hmm_devmem_radix_release(resource);
925 static struct hmm_devmem *hmm_devmem_find(resource_size_t phys)
927 WARN_ON_ONCE(!rcu_read_lock_held());
929 return radix_tree_lookup(&hmm_devmem_radix, phys >> PA_SECTION_SHIFT);
932 static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
934 resource_size_t key, align_start, align_size, align_end;
935 struct device *device = devmem->device;
936 int ret, nid, is_ram;
939 align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
940 align_size = ALIGN(devmem->resource->start +
941 resource_size(devmem->resource),
942 PA_SECTION_SIZE) - align_start;
944 is_ram = region_intersects(align_start, align_size,
945 IORESOURCE_SYSTEM_RAM,
947 if (is_ram == REGION_MIXED) {
948 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
949 __func__, devmem->resource);
952 if (is_ram == REGION_INTERSECTS)
955 if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY)
956 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
958 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
960 devmem->pagemap.res = *devmem->resource;
961 devmem->pagemap.page_fault = hmm_devmem_fault;
962 devmem->pagemap.page_free = hmm_devmem_free;
963 devmem->pagemap.dev = devmem->device;
964 devmem->pagemap.ref = &devmem->ref;
965 devmem->pagemap.data = devmem;
967 mutex_lock(&hmm_devmem_lock);
968 align_end = align_start + align_size - 1;
969 for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) {
970 struct hmm_devmem *dup;
973 dup = hmm_devmem_find(key);
976 dev_err(device, "%s: collides with mapping for %s\n",
977 __func__, dev_name(dup->device));
978 mutex_unlock(&hmm_devmem_lock);
982 ret = radix_tree_insert(&hmm_devmem_radix,
983 key >> PA_SECTION_SHIFT,
986 dev_err(device, "%s: failed: %d\n", __func__, ret);
987 mutex_unlock(&hmm_devmem_lock);
991 mutex_unlock(&hmm_devmem_lock);
993 nid = dev_to_node(device);
999 * For device private memory we call add_pages() as we only need to
1000 * allocate and initialize struct page for the device memory. More-
1001 * over the device memory is un-accessible thus we do not want to
1002 * create a linear mapping for the memory like arch_add_memory()
1005 * For device public memory, which is accesible by the CPU, we do
1006 * want the linear mapping and thus use arch_add_memory().
1008 if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC)
1009 ret = arch_add_memory(nid, align_start, align_size, NULL,
1012 ret = add_pages(nid, align_start >> PAGE_SHIFT,
1013 align_size >> PAGE_SHIFT, NULL, false);
1016 goto error_add_memory;
1018 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
1019 align_start >> PAGE_SHIFT,
1020 align_size >> PAGE_SHIFT, NULL);
1023 for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) {
1024 struct page *page = pfn_to_page(pfn);
1026 page->pgmap = &devmem->pagemap;
1031 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
1033 hmm_devmem_radix_release(devmem->resource);
1038 static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
1040 struct hmm_devmem *devmem = data;
1042 return devmem->resource == match_data;
1045 static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
1047 devres_release(devmem->device, &hmm_devmem_release,
1048 &hmm_devmem_match, devmem->resource);
1052 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1054 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1055 * @device: device struct to bind the resource too
1056 * @size: size in bytes of the device memory to add
1057 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1059 * This function first finds an empty range of physical address big enough to
1060 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1061 * in turn allocates struct pages. It does not do anything beyond that; all
1062 * events affecting the memory will go through the various callbacks provided
1063 * by hmm_devmem_ops struct.
1065 * Device driver should call this function during device initialization and
1066 * is then responsible of memory management. HMM only provides helpers.
1068 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1069 struct device *device,
1072 struct hmm_devmem *devmem;
1073 resource_size_t addr;
1076 static_branch_enable(&device_private_key);
1078 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1079 GFP_KERNEL, dev_to_node(device));
1081 return ERR_PTR(-ENOMEM);
1083 init_completion(&devmem->completion);
1084 devmem->pfn_first = -1UL;
1085 devmem->pfn_last = -1UL;
1086 devmem->resource = NULL;
1087 devmem->device = device;
1090 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1093 goto error_percpu_ref;
1095 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1097 goto error_devm_add_action;
1099 size = ALIGN(size, PA_SECTION_SIZE);
1100 addr = min((unsigned long)iomem_resource.end,
1101 (1UL << MAX_PHYSMEM_BITS) - 1);
1102 addr = addr - size + 1UL;
1105 * FIXME add a new helper to quickly walk resource tree and find free
1108 * FIXME what about ioport_resource resource ?
1110 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1111 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1112 if (ret != REGION_DISJOINT)
1115 devmem->resource = devm_request_mem_region(device, addr, size,
1117 if (!devmem->resource) {
1119 goto error_no_resource;
1123 if (!devmem->resource) {
1125 goto error_no_resource;
1128 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1129 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1130 devmem->pfn_last = devmem->pfn_first +
1131 (resource_size(devmem->resource) >> PAGE_SHIFT);
1133 ret = hmm_devmem_pages_create(devmem);
1137 devres_add(device, devmem);
1139 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1141 hmm_devmem_remove(devmem);
1142 return ERR_PTR(ret);
1148 devm_release_mem_region(device, devmem->resource->start,
1149 resource_size(devmem->resource));
1151 error_devm_add_action:
1152 hmm_devmem_ref_kill(&devmem->ref);
1153 hmm_devmem_ref_exit(&devmem->ref);
1155 devres_free(devmem);
1156 return ERR_PTR(ret);
1158 EXPORT_SYMBOL(hmm_devmem_add);
1160 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1161 struct device *device,
1162 struct resource *res)
1164 struct hmm_devmem *devmem;
1167 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1168 return ERR_PTR(-EINVAL);
1170 static_branch_enable(&device_private_key);
1172 devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem),
1173 GFP_KERNEL, dev_to_node(device));
1175 return ERR_PTR(-ENOMEM);
1177 init_completion(&devmem->completion);
1178 devmem->pfn_first = -1UL;
1179 devmem->pfn_last = -1UL;
1180 devmem->resource = res;
1181 devmem->device = device;
1184 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1187 goto error_percpu_ref;
1189 ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref);
1191 goto error_devm_add_action;
1194 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1195 devmem->pfn_last = devmem->pfn_first +
1196 (resource_size(devmem->resource) >> PAGE_SHIFT);
1198 ret = hmm_devmem_pages_create(devmem);
1200 goto error_devm_add_action;
1202 devres_add(device, devmem);
1204 ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref);
1206 hmm_devmem_remove(devmem);
1207 return ERR_PTR(ret);
1212 error_devm_add_action:
1213 hmm_devmem_ref_kill(&devmem->ref);
1214 hmm_devmem_ref_exit(&devmem->ref);
1216 devres_free(devmem);
1217 return ERR_PTR(ret);
1219 EXPORT_SYMBOL(hmm_devmem_add_resource);
1222 * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE)
1224 * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory
1226 * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf
1227 * of the device driver. It will free struct page and remove the resource that
1228 * reserved the physical address range for this device memory.
1230 void hmm_devmem_remove(struct hmm_devmem *devmem)
1232 resource_size_t start, size;
1233 struct device *device;
1239 device = devmem->device;
1240 start = devmem->resource->start;
1241 size = resource_size(devmem->resource);
1243 cdm = devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY;
1244 hmm_devmem_ref_kill(&devmem->ref);
1245 hmm_devmem_ref_exit(&devmem->ref);
1246 hmm_devmem_pages_remove(devmem);
1249 devm_release_mem_region(device, start, size);
1251 EXPORT_SYMBOL(hmm_devmem_remove);
1254 * A device driver that wants to handle multiple devices memory through a
1255 * single fake device can use hmm_device to do so. This is purely a helper
1256 * and it is not needed to make use of any HMM functionality.
1258 #define HMM_DEVICE_MAX 256
1260 static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1261 static DEFINE_SPINLOCK(hmm_device_lock);
1262 static struct class *hmm_device_class;
1263 static dev_t hmm_device_devt;
1265 static void hmm_device_release(struct device *device)
1267 struct hmm_device *hmm_device;
1269 hmm_device = container_of(device, struct hmm_device, device);
1270 spin_lock(&hmm_device_lock);
1271 clear_bit(hmm_device->minor, hmm_device_mask);
1272 spin_unlock(&hmm_device_lock);
1277 struct hmm_device *hmm_device_new(void *drvdata)
1279 struct hmm_device *hmm_device;
1281 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1283 return ERR_PTR(-ENOMEM);
1285 spin_lock(&hmm_device_lock);
1286 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1287 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1288 spin_unlock(&hmm_device_lock);
1290 return ERR_PTR(-EBUSY);
1292 set_bit(hmm_device->minor, hmm_device_mask);
1293 spin_unlock(&hmm_device_lock);
1295 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1296 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1298 hmm_device->device.release = hmm_device_release;
1299 dev_set_drvdata(&hmm_device->device, drvdata);
1300 hmm_device->device.class = hmm_device_class;
1301 device_initialize(&hmm_device->device);
1305 EXPORT_SYMBOL(hmm_device_new);
1307 void hmm_device_put(struct hmm_device *hmm_device)
1309 put_device(&hmm_device->device);
1311 EXPORT_SYMBOL(hmm_device_put);
1313 static int __init hmm_init(void)
1317 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1323 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1324 if (IS_ERR(hmm_device_class)) {
1325 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1326 return PTR_ERR(hmm_device_class);
1331 device_initcall(hmm_init);
1332 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */