1 // SPDX-License-Identifier: GPL-2.0
3 * This is a module to test the HMM (Heterogeneous Memory Management)
4 * mirror and zone device private memory migration APIs of the kernel.
5 * Userspace programs can register with the driver to mirror their own address
6 * space and can use the device to read/write any valid virtual address.
8 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/cdev.h>
14 #include <linux/device.h>
15 #include <linux/memremap.h>
16 #include <linux/mutex.h>
17 #include <linux/rwsem.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/highmem.h>
21 #include <linux/delay.h>
22 #include <linux/pagemap.h>
23 #include <linux/hmm.h>
24 #include <linux/vmalloc.h>
25 #include <linux/swap.h>
26 #include <linux/swapops.h>
27 #include <linux/sched/mm.h>
28 #include <linux/platform_device.h>
29 #include <linux/rmap.h>
30 #include <linux/mmu_notifier.h>
31 #include <linux/migrate.h>
33 #include "test_hmm_uapi.h"
35 #define DMIRROR_NDEVICES 2
36 #define DMIRROR_RANGE_FAULT_TIMEOUT 1000
37 #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
38 #define DEVMEM_CHUNKS_RESERVE 16
40 static const struct dev_pagemap_ops dmirror_devmem_ops;
41 static const struct mmu_interval_notifier_ops dmirror_min_ops;
42 static dev_t dmirror_dev;
44 struct dmirror_device;
46 struct dmirror_bounce {
53 #define DPT_XA_TAG_ATOMIC 1UL
54 #define DPT_XA_TAG_WRITE 3UL
57 * Data structure to track address ranges and register for mmu interval
60 struct dmirror_interval {
61 struct mmu_interval_notifier notifier;
62 struct dmirror *dmirror;
66 * Data attached to the open device file.
67 * Note that it might be shared after a fork().
70 struct dmirror_device *mdevice;
72 struct mmu_interval_notifier notifier;
77 * ZONE_DEVICE pages for migration and simulating device memory.
79 struct dmirror_chunk {
80 struct dev_pagemap pagemap;
81 struct dmirror_device *mdevice;
87 struct dmirror_device {
89 struct hmm_devmem *devmem;
90 unsigned int zone_device_type;
92 unsigned int devmem_capacity;
93 unsigned int devmem_count;
94 struct dmirror_chunk **devmem_chunks;
95 struct mutex devmem_lock; /* protects the above */
99 struct page *free_pages;
100 spinlock_t lock; /* protects the above */
103 static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES];
105 static int dmirror_bounce_init(struct dmirror_bounce *bounce,
112 bounce->ptr = vmalloc(size);
118 static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
123 static int dmirror_fops_open(struct inode *inode, struct file *filp)
125 struct cdev *cdev = inode->i_cdev;
126 struct dmirror *dmirror;
129 /* Mirror this process address space */
130 dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL);
134 dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice);
135 mutex_init(&dmirror->mutex);
136 xa_init(&dmirror->pt);
138 ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm,
139 0, ULONG_MAX & PAGE_MASK, &dmirror_min_ops);
145 filp->private_data = dmirror;
149 static int dmirror_fops_release(struct inode *inode, struct file *filp)
151 struct dmirror *dmirror = filp->private_data;
153 mmu_interval_notifier_remove(&dmirror->notifier);
154 xa_destroy(&dmirror->pt);
159 static struct dmirror_device *dmirror_page_to_device(struct page *page)
162 return container_of(page->pgmap, struct dmirror_chunk,
166 static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
168 unsigned long *pfns = range->hmm_pfns;
171 for (pfn = (range->start >> PAGE_SHIFT);
172 pfn < (range->end >> PAGE_SHIFT);
178 * Since we asked for hmm_range_fault() to populate pages,
179 * it shouldn't return an error entry on success.
181 WARN_ON(*pfns & HMM_PFN_ERROR);
182 WARN_ON(!(*pfns & HMM_PFN_VALID));
184 page = hmm_pfn_to_page(*pfns);
188 if (*pfns & HMM_PFN_WRITE)
189 entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
190 else if (WARN_ON(range->default_flags & HMM_PFN_WRITE))
192 entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
193 if (xa_is_err(entry))
194 return xa_err(entry);
200 static void dmirror_do_update(struct dmirror *dmirror, unsigned long start,
207 * The XArray doesn't hold references to pages since it relies on
208 * the mmu notifier to clear page pointers when they become stale.
209 * Therefore, it is OK to just clear the entry.
211 xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT,
213 xa_erase(&dmirror->pt, pfn);
216 static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
217 const struct mmu_notifier_range *range,
218 unsigned long cur_seq)
220 struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
223 * Ignore invalidation callbacks for device private pages since
224 * the invalidation is handled as part of the migration process.
226 if (range->event == MMU_NOTIFY_MIGRATE &&
227 range->owner == dmirror->mdevice)
230 if (mmu_notifier_range_blockable(range))
231 mutex_lock(&dmirror->mutex);
232 else if (!mutex_trylock(&dmirror->mutex))
235 mmu_interval_set_seq(mni, cur_seq);
236 dmirror_do_update(dmirror, range->start, range->end);
238 mutex_unlock(&dmirror->mutex);
242 static const struct mmu_interval_notifier_ops dmirror_min_ops = {
243 .invalidate = dmirror_interval_invalidate,
246 static int dmirror_range_fault(struct dmirror *dmirror,
247 struct hmm_range *range)
249 struct mm_struct *mm = dmirror->notifier.mm;
250 unsigned long timeout =
251 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
255 if (time_after(jiffies, timeout)) {
260 range->notifier_seq = mmu_interval_read_begin(range->notifier);
262 ret = hmm_range_fault(range);
263 mmap_read_unlock(mm);
270 mutex_lock(&dmirror->mutex);
271 if (mmu_interval_read_retry(range->notifier,
272 range->notifier_seq)) {
273 mutex_unlock(&dmirror->mutex);
279 ret = dmirror_do_fault(dmirror, range);
281 mutex_unlock(&dmirror->mutex);
286 static int dmirror_fault(struct dmirror *dmirror, unsigned long start,
287 unsigned long end, bool write)
289 struct mm_struct *mm = dmirror->notifier.mm;
291 unsigned long pfns[64];
292 struct hmm_range range = {
293 .notifier = &dmirror->notifier,
297 HMM_PFN_REQ_FAULT | (write ? HMM_PFN_REQ_WRITE : 0),
298 .dev_private_owner = dmirror->mdevice,
302 /* Since the mm is for the mirrored process, get a reference first. */
303 if (!mmget_not_zero(mm))
306 for (addr = start; addr < end; addr = range.end) {
308 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
310 ret = dmirror_range_fault(dmirror, &range);
319 static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
320 unsigned long end, struct dmirror_bounce *bounce)
325 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
327 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
332 entry = xa_load(&dmirror->pt, pfn);
333 page = xa_untag_pointer(entry);
338 memcpy(ptr, tmp, PAGE_SIZE);
348 static int dmirror_read(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
350 struct dmirror_bounce bounce;
351 unsigned long start, end;
352 unsigned long size = cmd->npages << PAGE_SHIFT;
360 ret = dmirror_bounce_init(&bounce, start, size);
365 mutex_lock(&dmirror->mutex);
366 ret = dmirror_do_read(dmirror, start, end, &bounce);
367 mutex_unlock(&dmirror->mutex);
371 start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
372 ret = dmirror_fault(dmirror, start, end, false);
379 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
383 cmd->cpages = bounce.cpages;
384 dmirror_bounce_fini(&bounce);
388 static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
389 unsigned long end, struct dmirror_bounce *bounce)
394 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
396 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
401 entry = xa_load(&dmirror->pt, pfn);
402 page = xa_untag_pointer(entry);
403 if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE)
407 memcpy(tmp, ptr, PAGE_SIZE);
417 static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
419 struct dmirror_bounce bounce;
420 unsigned long start, end;
421 unsigned long size = cmd->npages << PAGE_SHIFT;
429 ret = dmirror_bounce_init(&bounce, start, size);
432 if (copy_from_user(bounce.ptr, u64_to_user_ptr(cmd->ptr),
439 mutex_lock(&dmirror->mutex);
440 ret = dmirror_do_write(dmirror, start, end, &bounce);
441 mutex_unlock(&dmirror->mutex);
445 start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
446 ret = dmirror_fault(dmirror, start, end, true);
453 cmd->cpages = bounce.cpages;
454 dmirror_bounce_fini(&bounce);
458 static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
461 struct dmirror_chunk *devmem;
462 struct resource *res;
464 unsigned long pfn_first;
465 unsigned long pfn_last;
468 devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
472 res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
477 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
478 devmem->pagemap.range.start = res->start;
479 devmem->pagemap.range.end = res->end;
480 devmem->pagemap.nr_range = 1;
481 devmem->pagemap.ops = &dmirror_devmem_ops;
482 devmem->pagemap.owner = mdevice;
484 mutex_lock(&mdevice->devmem_lock);
486 if (mdevice->devmem_count == mdevice->devmem_capacity) {
487 struct dmirror_chunk **new_chunks;
488 unsigned int new_capacity;
490 new_capacity = mdevice->devmem_capacity +
491 DEVMEM_CHUNKS_RESERVE;
492 new_chunks = krealloc(mdevice->devmem_chunks,
493 sizeof(new_chunks[0]) * new_capacity,
497 mdevice->devmem_capacity = new_capacity;
498 mdevice->devmem_chunks = new_chunks;
501 ptr = memremap_pages(&devmem->pagemap, numa_node_id());
505 devmem->mdevice = mdevice;
506 pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
507 pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
508 mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
510 mutex_unlock(&mdevice->devmem_lock);
512 pr_info("added new %u MB chunk (total %u chunks, %u MB) PFNs [0x%lx 0x%lx)\n",
513 DEVMEM_CHUNK_SIZE / (1024 * 1024),
514 mdevice->devmem_count,
515 mdevice->devmem_count * (DEVMEM_CHUNK_SIZE / (1024 * 1024)),
516 pfn_first, pfn_last);
518 spin_lock(&mdevice->lock);
519 for (pfn = pfn_first; pfn < pfn_last; pfn++) {
520 struct page *page = pfn_to_page(pfn);
522 page->zone_device_data = mdevice->free_pages;
523 mdevice->free_pages = page;
526 *ppage = mdevice->free_pages;
527 mdevice->free_pages = (*ppage)->zone_device_data;
530 spin_unlock(&mdevice->lock);
535 mutex_unlock(&mdevice->devmem_lock);
536 release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
543 static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
545 struct page *dpage = NULL;
549 * This is a fake device so we alloc real system memory to store
552 rpage = alloc_page(GFP_HIGHUSER);
556 spin_lock(&mdevice->lock);
558 if (mdevice->free_pages) {
559 dpage = mdevice->free_pages;
560 mdevice->free_pages = dpage->zone_device_data;
562 spin_unlock(&mdevice->lock);
564 spin_unlock(&mdevice->lock);
565 if (!dmirror_allocate_chunk(mdevice, &dpage))
569 dpage->zone_device_data = rpage;
578 static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
579 struct dmirror *dmirror)
581 struct dmirror_device *mdevice = dmirror->mdevice;
582 const unsigned long *src = args->src;
583 unsigned long *dst = args->dst;
586 for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
592 if (!(*src & MIGRATE_PFN_MIGRATE))
596 * Note that spage might be NULL which is OK since it is an
597 * unallocated pte_none() or read-only zero page.
599 spage = migrate_pfn_to_page(*src);
601 dpage = dmirror_devmem_alloc_page(mdevice);
605 rpage = dpage->zone_device_data;
607 copy_highpage(rpage, spage);
609 clear_highpage(rpage);
612 * Normally, a device would use the page->zone_device_data to
613 * point to the mirror but here we use it to hold the page for
614 * the simulated device memory and that page holds the pointer
617 rpage->zone_device_data = dmirror;
619 *dst = migrate_pfn(page_to_pfn(dpage));
620 if ((*src & MIGRATE_PFN_WRITE) ||
621 (!spage && args->vma->vm_flags & VM_WRITE))
622 *dst |= MIGRATE_PFN_WRITE;
626 static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
631 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
634 entry = xa_load(&dmirror->pt, pfn);
635 if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
642 static int dmirror_atomic_map(unsigned long start, unsigned long end,
643 struct page **pages, struct dmirror *dmirror)
645 unsigned long pfn, mapped = 0;
648 /* Map the migrated pages into the device's page tables. */
649 mutex_lock(&dmirror->mutex);
651 for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
658 entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
659 entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
660 if (xa_is_err(entry)) {
661 mutex_unlock(&dmirror->mutex);
662 return xa_err(entry);
668 mutex_unlock(&dmirror->mutex);
672 static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
673 struct dmirror *dmirror)
675 unsigned long start = args->start;
676 unsigned long end = args->end;
677 const unsigned long *src = args->src;
678 const unsigned long *dst = args->dst;
681 /* Map the migrated pages into the device's page tables. */
682 mutex_lock(&dmirror->mutex);
684 for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++,
689 if (!(*src & MIGRATE_PFN_MIGRATE))
692 dpage = migrate_pfn_to_page(*dst);
697 * Store the page that holds the data so the page table
698 * doesn't have to deal with ZONE_DEVICE private pages.
700 entry = dpage->zone_device_data;
701 if (*dst & MIGRATE_PFN_WRITE)
702 entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
703 entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
704 if (xa_is_err(entry)) {
705 mutex_unlock(&dmirror->mutex);
706 return xa_err(entry);
710 mutex_unlock(&dmirror->mutex);
714 static int dmirror_exclusive(struct dmirror *dmirror,
715 struct hmm_dmirror_cmd *cmd)
717 unsigned long start, end, addr;
718 unsigned long size = cmd->npages << PAGE_SHIFT;
719 struct mm_struct *mm = dmirror->notifier.mm;
720 struct page *pages[64];
721 struct dmirror_bounce bounce;
730 /* Since the mm is for the mirrored process, get a reference first. */
731 if (!mmget_not_zero(mm))
735 for (addr = start; addr < end; addr = next) {
736 unsigned long mapped = 0;
739 if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
742 next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
744 ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
746 * Do dmirror_atomic_map() iff all pages are marked for
747 * exclusive access to avoid accessing uninitialized
750 if (ret == (next - addr) >> PAGE_SHIFT)
751 mapped = dmirror_atomic_map(addr, next, pages, dmirror);
752 for (i = 0; i < ret; i++) {
754 unlock_page(pages[i]);
759 if (addr + (mapped << PAGE_SHIFT) < next) {
760 mmap_read_unlock(mm);
765 mmap_read_unlock(mm);
768 /* Return the migrated data for verification. */
769 ret = dmirror_bounce_init(&bounce, start, size);
772 mutex_lock(&dmirror->mutex);
773 ret = dmirror_do_read(dmirror, start, end, &bounce);
774 mutex_unlock(&dmirror->mutex);
776 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
781 cmd->cpages = bounce.cpages;
782 dmirror_bounce_fini(&bounce);
786 static int dmirror_migrate(struct dmirror *dmirror,
787 struct hmm_dmirror_cmd *cmd)
789 unsigned long start, end, addr;
790 unsigned long size = cmd->npages << PAGE_SHIFT;
791 struct mm_struct *mm = dmirror->notifier.mm;
792 struct vm_area_struct *vma;
793 unsigned long src_pfns[64];
794 unsigned long dst_pfns[64];
795 struct dmirror_bounce bounce;
796 struct migrate_vma args;
805 /* Since the mm is for the mirrored process, get a reference first. */
806 if (!mmget_not_zero(mm))
810 for (addr = start; addr < end; addr = next) {
811 vma = vma_lookup(mm, addr);
812 if (!vma || !(vma->vm_flags & VM_READ)) {
816 next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
817 if (next > vma->vm_end)
825 args.pgmap_owner = dmirror->mdevice;
826 args.flags = MIGRATE_VMA_SELECT_SYSTEM;
827 ret = migrate_vma_setup(&args);
831 dmirror_migrate_alloc_and_copy(&args, dmirror);
832 migrate_vma_pages(&args);
833 dmirror_migrate_finalize_and_map(&args, dmirror);
834 migrate_vma_finalize(&args);
836 mmap_read_unlock(mm);
839 /* Return the migrated data for verification. */
840 ret = dmirror_bounce_init(&bounce, start, size);
843 mutex_lock(&dmirror->mutex);
844 ret = dmirror_do_read(dmirror, start, end, &bounce);
845 mutex_unlock(&dmirror->mutex);
847 if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
851 cmd->cpages = bounce.cpages;
852 dmirror_bounce_fini(&bounce);
856 mmap_read_unlock(mm);
861 static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
862 unsigned char *perm, unsigned long entry)
866 if (entry & HMM_PFN_ERROR) {
867 *perm = HMM_DMIRROR_PROT_ERROR;
870 if (!(entry & HMM_PFN_VALID)) {
871 *perm = HMM_DMIRROR_PROT_NONE;
875 page = hmm_pfn_to_page(entry);
876 if (is_device_private_page(page)) {
877 /* Is the page migrated to this device or some other? */
878 if (dmirror->mdevice == dmirror_page_to_device(page))
879 *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL;
881 *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE;
882 } else if (is_zero_pfn(page_to_pfn(page)))
883 *perm = HMM_DMIRROR_PROT_ZERO;
885 *perm = HMM_DMIRROR_PROT_NONE;
886 if (entry & HMM_PFN_WRITE)
887 *perm |= HMM_DMIRROR_PROT_WRITE;
889 *perm |= HMM_DMIRROR_PROT_READ;
890 if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PMD_SHIFT)
891 *perm |= HMM_DMIRROR_PROT_PMD;
892 else if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PUD_SHIFT)
893 *perm |= HMM_DMIRROR_PROT_PUD;
896 static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
897 const struct mmu_notifier_range *range,
898 unsigned long cur_seq)
900 struct dmirror_interval *dmi =
901 container_of(mni, struct dmirror_interval, notifier);
902 struct dmirror *dmirror = dmi->dmirror;
904 if (mmu_notifier_range_blockable(range))
905 mutex_lock(&dmirror->mutex);
906 else if (!mutex_trylock(&dmirror->mutex))
910 * Snapshots only need to set the sequence number since any
911 * invalidation in the interval invalidates the whole snapshot.
913 mmu_interval_set_seq(mni, cur_seq);
915 mutex_unlock(&dmirror->mutex);
919 static const struct mmu_interval_notifier_ops dmirror_mrn_ops = {
920 .invalidate = dmirror_snapshot_invalidate,
923 static int dmirror_range_snapshot(struct dmirror *dmirror,
924 struct hmm_range *range,
927 struct mm_struct *mm = dmirror->notifier.mm;
928 struct dmirror_interval notifier;
929 unsigned long timeout =
930 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
935 notifier.dmirror = dmirror;
936 range->notifier = ¬ifier.notifier;
938 ret = mmu_interval_notifier_insert(range->notifier, mm,
939 range->start, range->end - range->start,
945 if (time_after(jiffies, timeout)) {
950 range->notifier_seq = mmu_interval_read_begin(range->notifier);
953 ret = hmm_range_fault(range);
954 mmap_read_unlock(mm);
961 mutex_lock(&dmirror->mutex);
962 if (mmu_interval_read_retry(range->notifier,
963 range->notifier_seq)) {
964 mutex_unlock(&dmirror->mutex);
970 n = (range->end - range->start) >> PAGE_SHIFT;
971 for (i = 0; i < n; i++)
972 dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]);
974 mutex_unlock(&dmirror->mutex);
976 mmu_interval_notifier_remove(range->notifier);
980 static int dmirror_snapshot(struct dmirror *dmirror,
981 struct hmm_dmirror_cmd *cmd)
983 struct mm_struct *mm = dmirror->notifier.mm;
984 unsigned long start, end;
985 unsigned long size = cmd->npages << PAGE_SHIFT;
988 unsigned long pfns[64];
989 unsigned char perm[64];
991 struct hmm_range range = {
993 .dev_private_owner = dmirror->mdevice,
1002 /* Since the mm is for the mirrored process, get a reference first. */
1003 if (!mmget_not_zero(mm))
1007 * Register a temporary notifier to detect invalidations even if it
1008 * overlaps with other mmu_interval_notifiers.
1010 uptr = u64_to_user_ptr(cmd->ptr);
1011 for (addr = start; addr < end; addr = next) {
1014 next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
1018 ret = dmirror_range_snapshot(dmirror, &range, perm);
1022 n = (range.end - range.start) >> PAGE_SHIFT;
1023 if (copy_to_user(uptr, perm, n)) {
1036 static long dmirror_fops_unlocked_ioctl(struct file *filp,
1037 unsigned int command,
1040 void __user *uarg = (void __user *)arg;
1041 struct hmm_dmirror_cmd cmd;
1042 struct dmirror *dmirror;
1045 dmirror = filp->private_data;
1049 if (copy_from_user(&cmd, uarg, sizeof(cmd)))
1052 if (cmd.addr & ~PAGE_MASK)
1054 if (cmd.addr >= (cmd.addr + (cmd.npages << PAGE_SHIFT)))
1061 case HMM_DMIRROR_READ:
1062 ret = dmirror_read(dmirror, &cmd);
1065 case HMM_DMIRROR_WRITE:
1066 ret = dmirror_write(dmirror, &cmd);
1069 case HMM_DMIRROR_MIGRATE:
1070 ret = dmirror_migrate(dmirror, &cmd);
1073 case HMM_DMIRROR_EXCLUSIVE:
1074 ret = dmirror_exclusive(dmirror, &cmd);
1077 case HMM_DMIRROR_CHECK_EXCLUSIVE:
1078 ret = dmirror_check_atomic(dmirror, cmd.addr,
1079 cmd.addr + (cmd.npages << PAGE_SHIFT));
1082 case HMM_DMIRROR_SNAPSHOT:
1083 ret = dmirror_snapshot(dmirror, &cmd);
1092 if (copy_to_user(uarg, &cmd, sizeof(cmd)))
1098 static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma)
1102 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
1106 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1110 ret = vm_insert_page(vma, addr, page);
1121 static const struct file_operations dmirror_fops = {
1122 .open = dmirror_fops_open,
1123 .release = dmirror_fops_release,
1124 .mmap = dmirror_fops_mmap,
1125 .unlocked_ioctl = dmirror_fops_unlocked_ioctl,
1126 .llseek = default_llseek,
1127 .owner = THIS_MODULE,
1130 static void dmirror_devmem_free(struct page *page)
1132 struct page *rpage = page->zone_device_data;
1133 struct dmirror_device *mdevice;
1138 mdevice = dmirror_page_to_device(page);
1140 spin_lock(&mdevice->lock);
1142 page->zone_device_data = mdevice->free_pages;
1143 mdevice->free_pages = page;
1144 spin_unlock(&mdevice->lock);
1147 static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
1148 struct dmirror *dmirror)
1150 const unsigned long *src = args->src;
1151 unsigned long *dst = args->dst;
1152 unsigned long start = args->start;
1153 unsigned long end = args->end;
1156 for (addr = start; addr < end; addr += PAGE_SIZE,
1158 struct page *dpage, *spage;
1160 spage = migrate_pfn_to_page(*src);
1161 if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
1163 spage = spage->zone_device_data;
1165 dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
1170 xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
1171 copy_highpage(dpage, spage);
1172 *dst = migrate_pfn(page_to_pfn(dpage));
1173 if (*src & MIGRATE_PFN_WRITE)
1174 *dst |= MIGRATE_PFN_WRITE;
1179 static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
1181 struct migrate_vma args;
1182 unsigned long src_pfns;
1183 unsigned long dst_pfns;
1185 struct dmirror *dmirror;
1189 * Normally, a device would use the page->zone_device_data to point to
1190 * the mirror but here we use it to hold the page for the simulated
1191 * device memory and that page holds the pointer to the mirror.
1193 rpage = vmf->page->zone_device_data;
1194 dmirror = rpage->zone_device_data;
1196 /* FIXME demonstrate how we can adjust migrate range */
1197 args.vma = vmf->vma;
1198 args.start = vmf->address;
1199 args.end = args.start + PAGE_SIZE;
1200 args.src = &src_pfns;
1201 args.dst = &dst_pfns;
1202 args.pgmap_owner = dmirror->mdevice;
1203 args.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
1205 if (migrate_vma_setup(&args))
1206 return VM_FAULT_SIGBUS;
1208 ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
1211 migrate_vma_pages(&args);
1213 * No device finalize step is needed since
1214 * dmirror_devmem_fault_alloc_and_copy() will have already
1215 * invalidated the device page table.
1217 migrate_vma_finalize(&args);
1221 static const struct dev_pagemap_ops dmirror_devmem_ops = {
1222 .page_free = dmirror_devmem_free,
1223 .migrate_to_ram = dmirror_devmem_fault,
1226 static int dmirror_device_init(struct dmirror_device *mdevice, int id)
1231 dev = MKDEV(MAJOR(dmirror_dev), id);
1232 mutex_init(&mdevice->devmem_lock);
1233 spin_lock_init(&mdevice->lock);
1235 cdev_init(&mdevice->cdevice, &dmirror_fops);
1236 mdevice->cdevice.owner = THIS_MODULE;
1237 ret = cdev_add(&mdevice->cdevice, dev, 1);
1241 /* Build a list of free ZONE_DEVICE private struct pages */
1242 dmirror_allocate_chunk(mdevice, NULL);
1247 static void dmirror_device_remove(struct dmirror_device *mdevice)
1251 if (mdevice->devmem_chunks) {
1252 for (i = 0; i < mdevice->devmem_count; i++) {
1253 struct dmirror_chunk *devmem =
1254 mdevice->devmem_chunks[i];
1256 memunmap_pages(&devmem->pagemap);
1257 release_mem_region(devmem->pagemap.range.start,
1258 range_len(&devmem->pagemap.range));
1261 kfree(mdevice->devmem_chunks);
1264 cdev_del(&mdevice->cdevice);
1267 static int __init hmm_dmirror_init(void)
1273 ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES,
1278 memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0]));
1279 dmirror_devices[ndevices++].zone_device_type =
1280 HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
1281 dmirror_devices[ndevices++].zone_device_type =
1282 HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
1283 for (id = 0; id < ndevices; id++) {
1284 ret = dmirror_device_init(dmirror_devices + id, id);
1289 pr_info("HMM test module loaded. This is only for testing HMM.\n");
1294 dmirror_device_remove(dmirror_devices + id);
1295 unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
1300 static void __exit hmm_dmirror_exit(void)
1304 for (id = 0; id < DMIRROR_NDEVICES; id++)
1305 dmirror_device_remove(dmirror_devices + id);
1306 unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
1309 module_init(hmm_dmirror_init);
1310 module_exit(hmm_dmirror_exit);
1311 MODULE_LICENSE("GPL");