1 /* $NetBSD: nvmm.c,v 1.22.2.7 2020/08/29 17:00:28 martin Exp $ */
4 * Copyright (c) 2018-2020 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/param.h>
33 #include <sys/systm.h>
36 #include <sys/devfs.h>
37 #include <sys/device.h>
38 #include <sys/fcntl.h>
39 #include <sys/kernel.h>
40 #include <sys/module.h>
42 #include <sys/thread.h>
44 #include <dev/virtual/nvmm/nvmm_compat.h>
45 #include <dev/virtual/nvmm/nvmm.h>
46 #include <dev/virtual/nvmm/nvmm_internal.h>
47 #include <dev/virtual/nvmm/nvmm_ioctl.h>
49 MALLOC_DEFINE(M_NVMM, "nvmm", "NVMM data");
51 static struct nvmm_machine machines[NVMM_MAX_MACHINES];
52 static volatile unsigned int nmachines __cacheline_aligned;
54 static const struct nvmm_impl *nvmm_impl_list[] = {
55 #if defined(__x86_64__)
56 &nvmm_x86_svm, /* x86 AMD SVM */
57 &nvmm_x86_vmx /* x86 Intel VMX */
61 static const struct nvmm_impl *nvmm_impl = NULL;
63 static struct nvmm_owner root_owner;
65 /* -------------------------------------------------------------------------- */
68 nvmm_machine_alloc(struct nvmm_machine **ret)
70 struct nvmm_machine *mach;
73 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
76 rw_enter(&mach->lock, RW_WRITER);
83 mach->time = time_second;
85 atomic_inc_uint(&nmachines);
93 nvmm_machine_free(struct nvmm_machine *mach)
95 KASSERT(rw_write_held(&mach->lock));
96 KASSERT(mach->present);
97 mach->present = false;
98 atomic_dec_uint(&nmachines);
102 nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid,
103 struct nvmm_machine **ret, bool writer)
105 struct nvmm_machine *mach;
106 krw_t op = writer ? RW_WRITER : RW_READER;
108 if (__predict_false(machid >= NVMM_MAX_MACHINES)) {
111 mach = &machines[machid];
113 rw_enter(&mach->lock, op);
114 if (__predict_false(!mach->present)) {
115 rw_exit(&mach->lock);
118 if (__predict_false(mach->owner != owner && owner != &root_owner)) {
119 rw_exit(&mach->lock);
128 nvmm_machine_put(struct nvmm_machine *mach)
130 rw_exit(&mach->lock);
133 /* -------------------------------------------------------------------------- */
136 nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
137 struct nvmm_cpu **ret)
139 struct nvmm_cpu *vcpu;
141 if (cpuid >= NVMM_MAX_VCPUS) {
144 vcpu = &mach->cpus[cpuid];
146 mutex_enter(&vcpu->lock);
148 mutex_exit(&vcpu->lock);
152 vcpu->present = true;
154 vcpu->hcpu_last = -1;
160 nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
162 KASSERT(mutex_owned(&vcpu->lock));
163 vcpu->present = false;
164 if (vcpu->comm != NULL) {
165 uvm_deallocate(kernel_map, (vaddr_t)vcpu->comm, PAGE_SIZE);
170 nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
171 struct nvmm_cpu **ret)
173 struct nvmm_cpu *vcpu;
175 if (__predict_false(cpuid >= NVMM_MAX_VCPUS)) {
178 vcpu = &mach->cpus[cpuid];
180 mutex_enter(&vcpu->lock);
181 if (__predict_false(!vcpu->present)) {
182 mutex_exit(&vcpu->lock);
191 nvmm_vcpu_put(struct nvmm_cpu *vcpu)
193 mutex_exit(&vcpu->lock);
196 /* -------------------------------------------------------------------------- */
199 nvmm_kill_machines(struct nvmm_owner *owner)
201 struct nvmm_machine *mach;
202 struct nvmm_cpu *vcpu;
206 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
209 rw_enter(&mach->lock, RW_WRITER);
210 if (!mach->present || mach->owner != owner) {
211 rw_exit(&mach->lock);
216 for (j = 0; j < NVMM_MAX_VCPUS; j++) {
217 error = nvmm_vcpu_get(mach, j, &vcpu);
220 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
221 nvmm_vcpu_free(mach, vcpu);
223 atomic_dec_uint(&mach->ncpus);
225 (*nvmm_impl->machine_destroy)(mach);
226 uvmspace_free(mach->vm);
228 /* Drop the kernel UOBJ refs. */
229 for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) {
230 if (!mach->hmap[j].present)
232 uao_detach(mach->hmap[j].uobj);
235 nvmm_machine_free(mach);
237 rw_exit(&mach->lock);
241 /* -------------------------------------------------------------------------- */
244 nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args)
246 args->cap.version = NVMM_KERN_VERSION;
247 args->cap.state_size = nvmm_impl->state_size;
248 args->cap.max_machines = NVMM_MAX_MACHINES;
249 args->cap.max_vcpus = NVMM_MAX_VCPUS;
250 args->cap.max_ram = NVMM_MAX_RAM;
252 (*nvmm_impl->capability)(&args->cap);
258 nvmm_machine_create(struct nvmm_owner *owner,
259 struct nvmm_ioc_machine_create *args)
261 struct nvmm_machine *mach;
264 error = nvmm_machine_alloc(&mach);
268 /* Curproc owns the machine. */
271 /* Zero out the host mappings. */
272 memset(&mach->hmap, 0, sizeof(mach->hmap));
274 /* Create the machine vmspace. */
276 mach->gpa_end = NVMM_MAX_RAM;
277 mach->vm = uvmspace_alloc(0, mach->gpa_end - mach->gpa_begin, false);
279 /* Create the comm uobj. */
280 mach->commuobj = uao_create(NVMM_MAX_VCPUS * PAGE_SIZE, 0);
282 (*nvmm_impl->machine_create)(mach);
284 args->machid = mach->machid;
285 nvmm_machine_put(mach);
291 nvmm_machine_destroy(struct nvmm_owner *owner,
292 struct nvmm_ioc_machine_destroy *args)
294 struct nvmm_machine *mach;
295 struct nvmm_cpu *vcpu;
299 error = nvmm_machine_get(owner, args->machid, &mach, true);
303 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
304 error = nvmm_vcpu_get(mach, i, &vcpu);
308 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
309 nvmm_vcpu_free(mach, vcpu);
311 atomic_dec_uint(&mach->ncpus);
314 (*nvmm_impl->machine_destroy)(mach);
316 /* Free the machine vmspace. */
317 uvmspace_free(mach->vm);
319 /* Drop the kernel UOBJ refs. */
320 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
321 if (!mach->hmap[i].present)
323 uao_detach(mach->hmap[i].uobj);
326 nvmm_machine_free(mach);
327 nvmm_machine_put(mach);
333 nvmm_machine_configure(struct nvmm_owner *owner,
334 struct nvmm_ioc_machine_configure *args)
336 struct nvmm_machine *mach;
342 op = NVMM_MACH_CONF_MD(args->op);
343 if (__predict_false(op >= nvmm_impl->mach_conf_max)) {
347 allocsz = nvmm_impl->mach_conf_sizes[op];
348 data = kmem_alloc(allocsz, KM_SLEEP);
350 error = nvmm_machine_get(owner, args->machid, &mach, true);
352 kmem_free(data, allocsz);
356 error = copyin(args->conf, data, allocsz);
361 error = (*nvmm_impl->machine_configure)(mach, op, data);
364 nvmm_machine_put(mach);
365 kmem_free(data, allocsz);
370 nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args)
372 struct nvmm_machine *mach;
373 struct nvmm_cpu *vcpu;
376 error = nvmm_machine_get(owner, args->machid, &mach, false);
380 error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu);
384 /* Allocate the comm page. */
385 uao_reference(mach->commuobj);
386 error = uvm_map(kernel_map, (vaddr_t *)&vcpu->comm, PAGE_SIZE,
387 mach->commuobj, args->cpuid * PAGE_SIZE, 0, UVM_MAPFLAG(UVM_PROT_RW,
388 UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
390 uao_detach(mach->commuobj);
391 nvmm_vcpu_free(mach, vcpu);
395 error = uvm_map_pageable(kernel_map, (vaddr_t)vcpu->comm,
396 (vaddr_t)vcpu->comm + PAGE_SIZE, false, 0);
398 nvmm_vcpu_free(mach, vcpu);
402 memset(vcpu->comm, 0, PAGE_SIZE);
404 error = (*nvmm_impl->vcpu_create)(mach, vcpu);
406 nvmm_vcpu_free(mach, vcpu);
412 atomic_inc_uint(&mach->ncpus);
415 nvmm_machine_put(mach);
420 nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args)
422 struct nvmm_machine *mach;
423 struct nvmm_cpu *vcpu;
426 error = nvmm_machine_get(owner, args->machid, &mach, false);
430 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
434 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
435 nvmm_vcpu_free(mach, vcpu);
437 atomic_dec_uint(&mach->ncpus);
440 nvmm_machine_put(mach);
445 nvmm_vcpu_configure(struct nvmm_owner *owner,
446 struct nvmm_ioc_vcpu_configure *args)
448 struct nvmm_machine *mach;
449 struct nvmm_cpu *vcpu;
455 op = NVMM_VCPU_CONF_MD(args->op);
456 if (__predict_false(op >= nvmm_impl->vcpu_conf_max))
459 allocsz = nvmm_impl->vcpu_conf_sizes[op];
460 data = kmem_alloc(allocsz, KM_SLEEP);
462 error = nvmm_machine_get(owner, args->machid, &mach, false);
464 kmem_free(data, allocsz);
468 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
470 nvmm_machine_put(mach);
471 kmem_free(data, allocsz);
475 error = copyin(args->conf, data, allocsz);
480 error = (*nvmm_impl->vcpu_configure)(vcpu, op, data);
484 nvmm_machine_put(mach);
485 kmem_free(data, allocsz);
490 nvmm_vcpu_setstate(struct nvmm_owner *owner,
491 struct nvmm_ioc_vcpu_setstate *args)
493 struct nvmm_machine *mach;
494 struct nvmm_cpu *vcpu;
497 error = nvmm_machine_get(owner, args->machid, &mach, false);
501 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
505 (*nvmm_impl->vcpu_setstate)(vcpu);
509 nvmm_machine_put(mach);
514 nvmm_vcpu_getstate(struct nvmm_owner *owner,
515 struct nvmm_ioc_vcpu_getstate *args)
517 struct nvmm_machine *mach;
518 struct nvmm_cpu *vcpu;
521 error = nvmm_machine_get(owner, args->machid, &mach, false);
525 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
529 (*nvmm_impl->vcpu_getstate)(vcpu);
533 nvmm_machine_put(mach);
538 nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args)
540 struct nvmm_machine *mach;
541 struct nvmm_cpu *vcpu;
544 error = nvmm_machine_get(owner, args->machid, &mach, false);
548 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
552 error = (*nvmm_impl->vcpu_inject)(vcpu);
556 nvmm_machine_put(mach);
561 nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
562 struct nvmm_vcpu_exit *exit)
564 struct vmspace *vm = mach->vm;
568 /* Got a signal? Or pending resched? Leave. */
569 if (__predict_false(nvmm_return_needed())) {
570 exit->reason = NVMM_VCPU_EXIT_NONE;
575 ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit);
576 if (__predict_false(ret != 0)) {
580 /* Process nested page faults. */
581 if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
584 if (exit->u.mem.gpa >= mach->gpa_end) {
587 if (uvm_fault(&vm->vm_map, exit->u.mem.gpa, exit->u.mem.prot)) {
596 nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args)
598 struct nvmm_machine *mach;
599 struct nvmm_cpu *vcpu;
602 error = nvmm_machine_get(owner, args->machid, &mach, false);
606 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
610 error = nvmm_do_vcpu_run(mach, vcpu, &args->exit);
614 nvmm_machine_put(mach);
618 /* -------------------------------------------------------------------------- */
620 static struct uvm_object *
621 nvmm_hmapping_getuobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
624 struct nvmm_hmapping *hmapping;
627 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
628 hmapping = &mach->hmap[i];
629 if (!hmapping->present) {
632 if (hva >= hmapping->hva &&
633 hva + size <= hmapping->hva + hmapping->size) {
634 *off = hva - hmapping->hva;
635 return hmapping->uobj;
643 nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size)
645 struct nvmm_hmapping *hmapping;
648 if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) {
655 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
656 hmapping = &mach->hmap[i];
657 if (!hmapping->present) {
661 if (hva >= hmapping->hva &&
662 hva + size <= hmapping->hva + hmapping->size) {
666 if (hva >= hmapping->hva &&
667 hva < hmapping->hva + hmapping->size) {
670 if (hva + size > hmapping->hva &&
671 hva + size <= hmapping->hva + hmapping->size) {
674 if (hva <= hmapping->hva &&
675 hva + size >= hmapping->hva + hmapping->size) {
683 static struct nvmm_hmapping *
684 nvmm_hmapping_alloc(struct nvmm_machine *mach)
686 struct nvmm_hmapping *hmapping;
689 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
690 hmapping = &mach->hmap[i];
691 if (!hmapping->present) {
692 hmapping->present = true;
701 nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size)
703 struct vmspace *vmspace = curproc->p_vmspace;
704 struct nvmm_hmapping *hmapping;
707 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
708 hmapping = &mach->hmap[i];
709 if (!hmapping->present || hmapping->hva != hva ||
710 hmapping->size != size) {
714 uvm_unmap(&vmspace->vm_map, hmapping->hva,
715 hmapping->hva + hmapping->size);
716 uao_detach(hmapping->uobj);
718 hmapping->uobj = NULL;
719 hmapping->present = false;
728 nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args)
730 struct vmspace *vmspace = curproc->p_vmspace;
731 struct nvmm_machine *mach;
732 struct nvmm_hmapping *hmapping;
736 error = nvmm_machine_get(owner, args->machid, &mach, true);
740 error = nvmm_hmapping_validate(mach, args->hva, args->size);
744 hmapping = nvmm_hmapping_alloc(mach);
745 if (hmapping == NULL) {
750 hmapping->hva = args->hva;
751 hmapping->size = args->size;
752 hmapping->uobj = uao_create(hmapping->size, 0);
755 /* Take a reference for the user. */
756 uao_reference(hmapping->uobj);
758 /* Map the uobj into the user address space, as pageable. */
759 error = uvm_map(&vmspace->vm_map, &uva, hmapping->size, hmapping->uobj,
760 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_SHARE,
761 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
763 uao_detach(hmapping->uobj);
767 nvmm_machine_put(mach);
772 nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args)
774 struct nvmm_machine *mach;
777 error = nvmm_machine_get(owner, args->machid, &mach, true);
781 error = nvmm_hmapping_free(mach, args->hva, args->size);
783 nvmm_machine_put(mach);
787 /* -------------------------------------------------------------------------- */
790 nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args)
792 struct nvmm_machine *mach;
793 struct uvm_object *uobj;
798 error = nvmm_machine_get(owner, args->machid, &mach, false);
802 if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) {
807 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
808 (args->hva % PAGE_SIZE) != 0) {
812 if (args->hva == 0) {
816 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
820 if (args->gpa + args->size <= args->gpa) {
824 if (args->gpa + args->size > mach->gpa_end) {
830 uobj = nvmm_hmapping_getuobj(mach, args->hva, args->size, &off);
836 /* Take a reference for the machine. */
839 /* Map the uobj into the machine address space, as pageable. */
840 error = uvm_map(&mach->vm->vm_map, &gpa, args->size, uobj, off, 0,
841 UVM_MAPFLAG(args->prot, UVM_PROT_RWX, UVM_INH_NONE,
842 UVM_ADV_RANDOM, UVM_FLAG_FIXED|UVM_FLAG_UNMAP));
847 if (gpa != args->gpa) {
849 printf("[!] uvm_map problem\n");
855 nvmm_machine_put(mach);
860 nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args)
862 struct nvmm_machine *mach;
866 error = nvmm_machine_get(owner, args->machid, &mach, false);
870 if ((args->gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) {
874 if (args->gpa < mach->gpa_begin || args->gpa >= mach->gpa_end) {
878 if (args->gpa + args->size <= args->gpa) {
882 if (args->gpa + args->size >= mach->gpa_end) {
888 /* Unmap the memory from the machine. */
889 uvm_unmap(&mach->vm->vm_map, gpa, gpa + args->size);
892 nvmm_machine_put(mach);
896 /* -------------------------------------------------------------------------- */
899 nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
901 struct nvmm_ctl_mach_info ctl;
902 struct nvmm_machine *mach;
906 if (args->size != sizeof(ctl))
908 error = copyin(args->data, &ctl, sizeof(ctl));
912 error = nvmm_machine_get(owner, ctl.machid, &mach, true);
916 ctl.nvcpus = mach->ncpus;
919 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
920 if (!mach->hmap[i].present)
922 ctl.nram += mach->hmap[i].size;
925 ctl.pid = mach->owner->pid;
926 ctl.time = mach->time;
928 nvmm_machine_put(mach);
930 error = copyout(&ctl, args->data, sizeof(ctl));
938 nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
941 case NVMM_CTL_MACH_INFO:
942 return nvmm_ctl_mach_info(owner, args);
948 /* -------------------------------------------------------------------------- */
950 static const struct nvmm_impl *
955 for (i = 0; i < __arraycount(nvmm_impl_list); i++) {
956 if ((*nvmm_impl_list[i]->ident)())
957 return nvmm_impl_list[i];
968 nvmm_impl = nvmm_ident();
969 if (nvmm_impl == NULL)
972 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
973 machines[i].machid = i;
974 rw_init(&machines[i].lock);
975 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
976 machines[i].cpus[n].present = false;
977 machines[i].cpus[n].cpuid = n;
978 mutex_init(&machines[i].cpus[n].lock, MUTEX_DEFAULT,
983 (*nvmm_impl->init)();
993 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
994 rw_destroy(&machines[i].lock);
995 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
996 mutex_destroy(&machines[i].cpus[n].lock);
1000 (*nvmm_impl->fini)();
1004 /* -------------------------------------------------------------------------- */
1006 static d_open_t nvmm_open;
1007 static d_ioctl_t nvmm_ioctl;
1008 static d_mmap_single_t nvmm_mmap_single;
1009 static d_priv_dtor_t nvmm_dtor;
1011 static struct dev_ops nvmm_ops = {
1012 { "nvmm", 0, D_MPSAFE },
1013 .d_open = nvmm_open,
1014 .d_ioctl = nvmm_ioctl,
1015 .d_mmap_single = nvmm_mmap_single,
1019 nvmm_open(struct dev_open_args *ap)
1021 int flags = ap->a_oflags;
1022 struct nvmm_owner *owner;
1026 if (__predict_false(nvmm_impl == NULL))
1028 if (!(flags & O_CLOEXEC))
1031 if (priv_check_cred(ap->a_cred, PRIV_ROOT, 0) == 0) {
1032 owner = &root_owner;
1034 owner = kmem_alloc(sizeof(*owner), KM_SLEEP);
1035 owner->pid = curthread->td_proc->p_pid;
1038 fp = ap->a_fpp ? *ap->a_fpp : NULL;
1039 error = devfs_set_cdevpriv(fp, owner, nvmm_dtor);
1049 nvmm_dtor(void *arg)
1051 struct nvmm_owner *owner = arg;
1053 KASSERT(owner != NULL);
1054 nvmm_kill_machines(owner);
1055 if (owner != &root_owner) {
1056 kmem_free(owner, sizeof(*owner));
1061 nvmm_mmap_single(struct dev_mmap_single_args *ap)
1063 vm_ooffset_t *offp = ap->a_offset;
1064 size_t size = ap->a_size;
1065 int prot = ap->a_nprot;
1066 struct vm_object **uobjp = ap->a_object;
1067 struct file *fp = ap->a_fp;
1068 struct nvmm_owner *owner = NULL;
1069 struct nvmm_machine *mach;
1070 nvmm_machid_t machid;
1074 devfs_get_cdevpriv(fp, (void **)&owner);
1075 KASSERT(owner != NULL);
1077 if (prot & PROT_EXEC)
1079 if (size != PAGE_SIZE)
1082 cpuid = NVMM_COMM_CPUID(*offp);
1083 if (__predict_false(cpuid >= NVMM_MAX_VCPUS))
1086 machid = NVMM_COMM_MACHID(*offp);
1087 error = nvmm_machine_get(owner, machid, &mach, false);
1091 uao_reference(mach->commuobj);
1092 *uobjp = mach->commuobj;
1093 *offp = cpuid * PAGE_SIZE;
1095 nvmm_machine_put(mach);
1100 nvmm_ioctl(struct dev_ioctl_args *ap)
1102 unsigned long cmd = ap->a_cmd;
1103 void *data = ap->a_data;
1104 struct file *fp = ap->a_fp;
1105 struct nvmm_owner *owner = NULL;
1107 devfs_get_cdevpriv(fp, (void **)&owner);
1108 KASSERT(owner != NULL);
1111 case NVMM_IOC_CAPABILITY:
1112 return nvmm_capability(owner, data);
1113 case NVMM_IOC_MACHINE_CREATE:
1114 return nvmm_machine_create(owner, data);
1115 case NVMM_IOC_MACHINE_DESTROY:
1116 return nvmm_machine_destroy(owner, data);
1117 case NVMM_IOC_MACHINE_CONFIGURE:
1118 return nvmm_machine_configure(owner, data);
1119 case NVMM_IOC_VCPU_CREATE:
1120 return nvmm_vcpu_create(owner, data);
1121 case NVMM_IOC_VCPU_DESTROY:
1122 return nvmm_vcpu_destroy(owner, data);
1123 case NVMM_IOC_VCPU_CONFIGURE:
1124 return nvmm_vcpu_configure(owner, data);
1125 case NVMM_IOC_VCPU_SETSTATE:
1126 return nvmm_vcpu_setstate(owner, data);
1127 case NVMM_IOC_VCPU_GETSTATE:
1128 return nvmm_vcpu_getstate(owner, data);
1129 case NVMM_IOC_VCPU_INJECT:
1130 return nvmm_vcpu_inject(owner, data);
1131 case NVMM_IOC_VCPU_RUN:
1132 return nvmm_vcpu_run(owner, data);
1133 case NVMM_IOC_GPA_MAP:
1134 return nvmm_gpa_map(owner, data);
1135 case NVMM_IOC_GPA_UNMAP:
1136 return nvmm_gpa_unmap(owner, data);
1137 case NVMM_IOC_HVA_MAP:
1138 return nvmm_hva_map(owner, data);
1139 case NVMM_IOC_HVA_UNMAP:
1140 return nvmm_hva_unmap(owner, data);
1142 return nvmm_ctl(owner, data);
1148 /* -------------------------------------------------------------------------- */
1155 error = nvmm_init();
1157 panic("%s: impossible", __func__);
1158 printf("nvmm: attached, using backend %s\n", nvmm_impl->name);
1166 if (atomic_load_acq_int(&nmachines) > 0)
1174 nvmm_modevent(module_t mod __unused, int type, void *data __unused)
1176 static cdev_t dev = NULL;
1181 if (nvmm_ident() == NULL) {
1182 printf("nvmm: cpu not supported\n");
1185 error = nvmm_attach();
1189 dev = make_dev(&nvmm_ops, 0, UID_ROOT, GID_NVMM, 0660, "nvmm");
1191 printf("nvmm: unable to create device\n");
1199 error = nvmm_detach();
1216 static moduledata_t nvmm_moddata = {
1218 .evhand = nvmm_modevent,
1222 DECLARE_MODULE(nvmm, nvmm_moddata, SI_SUB_PSEUDO, SI_ORDER_ANY);
1223 MODULE_VERSION(nvmm, NVMM_KERN_VERSION);