2 * Copyright (c) 2018-2021 Maxime Villard, m00nbsd.net
5 * This code is part of the NVMM hypervisor.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
30 #include <sys/systm.h>
32 #include <sys/kernel.h>
36 #include "nvmm_internal.h"
37 #include "nvmm_ioctl.h"
39 static struct nvmm_machine machines[NVMM_MAX_MACHINES];
40 volatile unsigned int nmachines __cacheline_aligned;
42 static const struct nvmm_impl *nvmm_impl_list[] = {
43 #if defined(__x86_64__)
44 &nvmm_x86_svm, /* x86 AMD SVM */
45 &nvmm_x86_vmx /* x86 Intel VMX */
49 const struct nvmm_impl *nvmm_impl __read_mostly = NULL;
51 struct nvmm_owner nvmm_root_owner;
53 /* -------------------------------------------------------------------------- */
56 nvmm_machine_alloc(struct nvmm_machine **ret)
58 struct nvmm_machine *mach;
61 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
64 os_rwl_wlock(&mach->lock);
66 os_rwl_unlock(&mach->lock);
71 mach->time = time_second;
73 os_atomic_inc_uint(&nmachines);
81 nvmm_machine_free(struct nvmm_machine *mach)
83 OS_ASSERT(os_rwl_wheld(&mach->lock));
84 OS_ASSERT(mach->present);
85 mach->present = false;
86 os_atomic_dec_uint(&nmachines);
90 nvmm_machine_get(struct nvmm_owner *owner, nvmm_machid_t machid,
91 struct nvmm_machine **ret, bool writer)
93 struct nvmm_machine *mach;
95 if (__predict_false(machid >= NVMM_MAX_MACHINES)) {
98 mach = &machines[machid];
100 if (__predict_false(writer)) {
101 os_rwl_wlock(&mach->lock);
103 os_rwl_rlock(&mach->lock);
105 if (__predict_false(!mach->present)) {
106 os_rwl_unlock(&mach->lock);
109 if (__predict_false(mach->owner != owner &&
110 owner != &nvmm_root_owner)) {
111 os_rwl_unlock(&mach->lock);
120 nvmm_machine_put(struct nvmm_machine *mach)
122 os_rwl_unlock(&mach->lock);
125 /* -------------------------------------------------------------------------- */
128 nvmm_vcpu_alloc(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
129 struct nvmm_cpu **ret)
131 struct nvmm_cpu *vcpu;
133 if (cpuid >= NVMM_MAX_VCPUS) {
136 vcpu = &mach->cpus[cpuid];
138 os_mtx_lock(&vcpu->lock);
140 os_mtx_unlock(&vcpu->lock);
144 vcpu->present = true;
146 vcpu->hcpu_last = -1;
152 nvmm_vcpu_free(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
154 OS_ASSERT(os_mtx_owned(&vcpu->lock));
155 vcpu->present = false;
156 if (vcpu->comm != NULL) {
157 os_vmobj_unmap(os_kernel_map, (vaddr_t)vcpu->comm,
158 (vaddr_t)vcpu->comm + NVMM_COMM_PAGE_SIZE, true);
160 * Require userland to unmap the comm page from its address
161 * space, because os_curproc_map at this point (fd close)
162 * is not guaranteed to be the correct address space.
168 nvmm_vcpu_get(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
169 struct nvmm_cpu **ret)
171 struct nvmm_cpu *vcpu;
173 if (__predict_false(cpuid >= NVMM_MAX_VCPUS)) {
176 vcpu = &mach->cpus[cpuid];
178 os_mtx_lock(&vcpu->lock);
179 if (__predict_false(!vcpu->present)) {
180 os_mtx_unlock(&vcpu->lock);
189 nvmm_vcpu_put(struct nvmm_cpu *vcpu)
191 os_mtx_unlock(&vcpu->lock);
194 /* -------------------------------------------------------------------------- */
197 nvmm_kill_machines(struct nvmm_owner *owner)
199 struct nvmm_machine *mach;
200 struct nvmm_cpu *vcpu;
204 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
207 os_rwl_wlock(&mach->lock);
208 if (!mach->present || mach->owner != owner) {
209 os_rwl_unlock(&mach->lock);
214 for (j = 0; j < NVMM_MAX_VCPUS; j++) {
215 error = nvmm_vcpu_get(mach, j, &vcpu);
218 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
219 nvmm_vcpu_free(mach, vcpu);
221 os_atomic_dec_uint(&mach->ncpus);
223 (*nvmm_impl->machine_destroy)(mach);
224 os_vmspace_destroy(mach->vm);
226 /* Drop the kernel vmobj refs. */
227 for (j = 0; j < NVMM_MAX_HMAPPINGS; j++) {
228 if (!mach->hmap[j].present)
230 os_vmobj_rel(mach->hmap[j].vmobj);
233 nvmm_machine_free(mach);
235 os_rwl_unlock(&mach->lock);
239 /* -------------------------------------------------------------------------- */
242 nvmm_capability(struct nvmm_owner *owner, struct nvmm_ioc_capability *args)
244 args->cap.version = NVMM_KERN_VERSION;
245 args->cap.state_size = nvmm_impl->state_size;
246 args->cap.comm_size = NVMM_COMM_PAGE_SIZE;
247 args->cap.max_machines = NVMM_MAX_MACHINES;
248 args->cap.max_vcpus = NVMM_MAX_VCPUS;
249 args->cap.max_ram = NVMM_MAX_RAM;
251 (*nvmm_impl->capability)(&args->cap);
257 nvmm_machine_create(struct nvmm_owner *owner,
258 struct nvmm_ioc_machine_create *args)
260 struct nvmm_machine *mach;
263 error = nvmm_machine_alloc(&mach);
267 /* Curproc owns the machine. */
270 /* Zero out the host mappings. */
271 memset(&mach->hmap, 0, sizeof(mach->hmap));
273 /* Create the machine vmspace. */
275 mach->gpa_end = NVMM_MAX_RAM;
276 mach->vm = os_vmspace_create(mach->gpa_begin, mach->gpa_end);
280 * Set PMAP_MULTI on the backing pmap for the machine. Only
281 * pmap changes to the backing pmap for the machine affect the
282 * guest. Changes to the host's pmap do not affect the guest's
285 pmap_maybethreaded(&mach->vm->vm_pmap);
288 /* Create the comm vmobj. */
289 mach->commvmobj = os_vmobj_create(
290 NVMM_MAX_VCPUS * NVMM_COMM_PAGE_SIZE);
292 (*nvmm_impl->machine_create)(mach);
294 args->machid = mach->machid;
295 nvmm_machine_put(mach);
301 nvmm_machine_destroy(struct nvmm_owner *owner,
302 struct nvmm_ioc_machine_destroy *args)
304 struct nvmm_machine *mach;
305 struct nvmm_cpu *vcpu;
309 error = nvmm_machine_get(owner, args->machid, &mach, true);
313 for (i = 0; i < NVMM_MAX_VCPUS; i++) {
314 error = nvmm_vcpu_get(mach, i, &vcpu);
318 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
319 nvmm_vcpu_free(mach, vcpu);
321 os_atomic_dec_uint(&mach->ncpus);
324 (*nvmm_impl->machine_destroy)(mach);
326 /* Free the machine vmspace. */
327 os_vmspace_destroy(mach->vm);
329 /* Drop the kernel vmobj refs. */
330 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
331 if (!mach->hmap[i].present)
333 os_vmobj_rel(mach->hmap[i].vmobj);
336 nvmm_machine_free(mach);
337 nvmm_machine_put(mach);
343 nvmm_machine_configure(struct nvmm_owner *owner,
344 struct nvmm_ioc_machine_configure *args)
346 struct nvmm_machine *mach;
352 op = NVMM_MACH_CONF_MD(args->op);
353 if (__predict_false(op >= nvmm_impl->mach_conf_max)) {
357 allocsz = nvmm_impl->mach_conf_sizes[op];
358 data = os_mem_alloc(allocsz);
360 error = nvmm_machine_get(owner, args->machid, &mach, true);
362 os_mem_free(data, allocsz);
366 error = copyin(args->conf, data, allocsz);
371 error = (*nvmm_impl->machine_configure)(mach, op, data);
374 nvmm_machine_put(mach);
375 os_mem_free(data, allocsz);
380 nvmm_vcpu_create(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_create *args)
382 struct nvmm_machine *mach;
383 struct nvmm_cpu *vcpu;
386 error = nvmm_machine_get(owner, args->machid, &mach, false);
390 error = nvmm_vcpu_alloc(mach, args->cpuid, &vcpu);
394 /* Map the comm page on the kernel side, as wired. */
395 error = os_vmobj_map(os_kernel_map, (vaddr_t *)&vcpu->comm,
396 NVMM_COMM_PAGE_SIZE, mach->commvmobj,
397 args->cpuid * NVMM_COMM_PAGE_SIZE, true /* wired */,
398 false /* !fixed */, true /* shared */, PROT_READ | PROT_WRITE,
399 PROT_READ | PROT_WRITE);
401 nvmm_vcpu_free(mach, vcpu);
406 memset(vcpu->comm, 0, NVMM_COMM_PAGE_SIZE);
408 /* Map the comm page on the user side, as pageable. */
409 error = os_vmobj_map(os_curproc_map, (vaddr_t *)&args->comm,
410 NVMM_COMM_PAGE_SIZE, mach->commvmobj,
411 args->cpuid * NVMM_COMM_PAGE_SIZE, false /* !wired */,
412 false /* !fixed */, true /* shared */, PROT_READ | PROT_WRITE,
413 PROT_READ | PROT_WRITE);
415 nvmm_vcpu_free(mach, vcpu);
420 error = (*nvmm_impl->vcpu_create)(mach, vcpu);
422 nvmm_vcpu_free(mach, vcpu);
428 os_atomic_inc_uint(&mach->ncpus);
431 nvmm_machine_put(mach);
436 nvmm_vcpu_destroy(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_destroy *args)
438 struct nvmm_machine *mach;
439 struct nvmm_cpu *vcpu;
442 error = nvmm_machine_get(owner, args->machid, &mach, false);
446 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
450 (*nvmm_impl->vcpu_destroy)(mach, vcpu);
451 nvmm_vcpu_free(mach, vcpu);
453 os_atomic_dec_uint(&mach->ncpus);
456 nvmm_machine_put(mach);
461 nvmm_vcpu_configure(struct nvmm_owner *owner,
462 struct nvmm_ioc_vcpu_configure *args)
464 struct nvmm_machine *mach;
465 struct nvmm_cpu *vcpu;
471 op = NVMM_VCPU_CONF_MD(args->op);
472 if (__predict_false(op >= nvmm_impl->vcpu_conf_max))
475 allocsz = nvmm_impl->vcpu_conf_sizes[op];
476 data = os_mem_alloc(allocsz);
478 error = nvmm_machine_get(owner, args->machid, &mach, false);
480 os_mem_free(data, allocsz);
484 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
486 nvmm_machine_put(mach);
487 os_mem_free(data, allocsz);
491 error = copyin(args->conf, data, allocsz);
496 error = (*nvmm_impl->vcpu_configure)(vcpu, op, data);
500 nvmm_machine_put(mach);
501 os_mem_free(data, allocsz);
506 nvmm_vcpu_setstate(struct nvmm_owner *owner,
507 struct nvmm_ioc_vcpu_setstate *args)
509 struct nvmm_machine *mach;
510 struct nvmm_cpu *vcpu;
513 error = nvmm_machine_get(owner, args->machid, &mach, false);
517 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
521 (*nvmm_impl->vcpu_setstate)(vcpu);
525 nvmm_machine_put(mach);
530 nvmm_vcpu_getstate(struct nvmm_owner *owner,
531 struct nvmm_ioc_vcpu_getstate *args)
533 struct nvmm_machine *mach;
534 struct nvmm_cpu *vcpu;
537 error = nvmm_machine_get(owner, args->machid, &mach, false);
541 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
545 (*nvmm_impl->vcpu_getstate)(vcpu);
549 nvmm_machine_put(mach);
554 nvmm_vcpu_inject(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_inject *args)
556 struct nvmm_machine *mach;
557 struct nvmm_cpu *vcpu;
560 error = nvmm_machine_get(owner, args->machid, &mach, false);
564 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
568 error = (*nvmm_impl->vcpu_inject)(vcpu);
572 nvmm_machine_put(mach);
577 nvmm_do_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
578 struct nvmm_vcpu_exit *exit)
580 struct vmspace *vm = mach->vm;
584 /* Got a signal? Or pending resched? Leave. */
585 if (__predict_false(os_return_needed())) {
586 exit->reason = NVMM_VCPU_EXIT_NONE;
591 ret = (*nvmm_impl->vcpu_run)(mach, vcpu, exit);
592 if (__predict_false(ret != 0)) {
596 /* Process nested page faults. */
597 if (__predict_true(exit->reason != NVMM_VCPU_EXIT_MEMORY)) {
600 if (exit->u.mem.gpa >= mach->gpa_end) {
603 if (os_vmspace_fault(vm, exit->u.mem.gpa, exit->u.mem.prot)) {
612 nvmm_vcpu_run(struct nvmm_owner *owner, struct nvmm_ioc_vcpu_run *args)
614 struct nvmm_machine *mach;
615 struct nvmm_cpu *vcpu;
618 error = nvmm_machine_get(owner, args->machid, &mach, false);
622 error = nvmm_vcpu_get(mach, args->cpuid, &vcpu);
626 error = nvmm_do_vcpu_run(mach, vcpu, &args->exit);
630 nvmm_machine_put(mach);
634 /* -------------------------------------------------------------------------- */
637 nvmm_hmapping_getvmobj(struct nvmm_machine *mach, uintptr_t hva, size_t size,
640 struct nvmm_hmapping *hmapping;
643 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
644 hmapping = &mach->hmap[i];
645 if (!hmapping->present) {
648 if (hva >= hmapping->hva &&
649 hva + size <= hmapping->hva + hmapping->size) {
650 *off = hva - hmapping->hva;
651 return hmapping->vmobj;
659 nvmm_hmapping_validate(struct nvmm_machine *mach, uintptr_t hva, size_t size)
661 struct nvmm_hmapping *hmapping;
666 if ((hva % PAGE_SIZE) != 0 || (size % PAGE_SIZE) != 0) {
674 * Overflow tests MUST be done very carefully to avoid compiler
675 * optimizations from effectively deleting the test.
677 hva_end = hva + size;
684 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
685 hmapping = &mach->hmap[i];
687 if (!hmapping->present) {
690 hmap_end = hmapping->hva + hmapping->size;
692 if (hva >= hmapping->hva && hva_end <= hmap_end)
694 if (hva >= hmapping->hva && hva < hmap_end)
696 if (hva_end > hmapping->hva && hva_end <= hmap_end)
698 if (hva <= hmapping->hva && hva_end >= hmap_end)
705 static struct nvmm_hmapping *
706 nvmm_hmapping_alloc(struct nvmm_machine *mach)
708 struct nvmm_hmapping *hmapping;
711 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
712 hmapping = &mach->hmap[i];
713 if (!hmapping->present) {
714 hmapping->present = true;
723 nvmm_hmapping_free(struct nvmm_machine *mach, uintptr_t hva, size_t size)
725 struct nvmm_hmapping *hmapping;
728 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
729 hmapping = &mach->hmap[i];
730 if (!hmapping->present || hmapping->hva != hva ||
731 hmapping->size != size) {
735 os_vmobj_unmap(os_curproc_map, hmapping->hva,
736 hmapping->hva + hmapping->size, false);
737 os_vmobj_rel(hmapping->vmobj);
739 hmapping->vmobj = NULL;
740 hmapping->present = false;
749 nvmm_hva_map(struct nvmm_owner *owner, struct nvmm_ioc_hva_map *args)
751 struct nvmm_machine *mach;
752 struct nvmm_hmapping *hmapping;
756 error = nvmm_machine_get(owner, args->machid, &mach, true);
760 error = nvmm_hmapping_validate(mach, args->hva, args->size);
764 hmapping = nvmm_hmapping_alloc(mach);
765 if (hmapping == NULL) {
770 hmapping->hva = args->hva;
771 hmapping->size = args->size;
772 hmapping->vmobj = os_vmobj_create(hmapping->size);
775 /* Map the vmobj into the user address space, as pageable. */
776 error = os_vmobj_map(os_curproc_map, &uva, hmapping->size,
777 hmapping->vmobj, 0, false /* !wired */, true /* fixed */,
778 true /* shared */, PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE);
781 nvmm_machine_put(mach);
786 nvmm_hva_unmap(struct nvmm_owner *owner, struct nvmm_ioc_hva_unmap *args)
788 struct nvmm_machine *mach;
791 error = nvmm_machine_get(owner, args->machid, &mach, true);
795 error = nvmm_hmapping_free(mach, args->hva, args->size);
797 nvmm_machine_put(mach);
801 /* -------------------------------------------------------------------------- */
804 nvmm_gpa_map(struct nvmm_owner *owner, struct nvmm_ioc_gpa_map *args)
806 struct nvmm_machine *mach;
813 error = nvmm_machine_get(owner, args->machid, &mach, false);
817 if ((args->prot & ~(PROT_READ|PROT_WRITE|PROT_EXEC)) != 0) {
823 * Overflow tests MUST be done very carefully to avoid compiler
824 * optimizations from effectively deleting the test.
827 gpa_end = gpa + args->size;
828 if (gpa_end <= gpa) {
833 if ((gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0 ||
834 (args->hva % PAGE_SIZE) != 0) {
838 if (args->hva == 0) {
843 if (gpa < mach->gpa_begin || gpa >= mach->gpa_end) {
847 if (gpa_end > mach->gpa_end) {
852 vmobj = nvmm_hmapping_getvmobj(mach, args->hva, args->size, &off);
858 /* Map the vmobj into the machine address space, as pageable. */
859 error = os_vmobj_map(&mach->vm->vm_map, &gpa, args->size, vmobj, off,
860 false /* !wired */, true /* fixed */, false /* !shared */,
861 args->prot, PROT_READ | PROT_WRITE | PROT_EXEC);
864 nvmm_machine_put(mach);
869 nvmm_gpa_unmap(struct nvmm_owner *owner, struct nvmm_ioc_gpa_unmap *args)
871 struct nvmm_machine *mach;
876 error = nvmm_machine_get(owner, args->machid, &mach, false);
881 * Overflow tests MUST be done very carefully to avoid compiler
882 * optimizations from effectively deleting the test.
885 gpa_end = gpa + args->size;
886 if (gpa_end <= gpa) {
891 if ((gpa % PAGE_SIZE) != 0 || (args->size % PAGE_SIZE) != 0) {
895 if (gpa < mach->gpa_begin || gpa >= mach->gpa_end) {
899 if (gpa_end >= mach->gpa_end) {
904 /* Unmap the memory from the machine. */
905 os_vmobj_unmap(&mach->vm->vm_map, gpa, gpa + args->size, false);
908 nvmm_machine_put(mach);
912 /* -------------------------------------------------------------------------- */
915 nvmm_ctl_mach_info(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
917 struct nvmm_ctl_mach_info ctl;
918 struct nvmm_machine *mach;
922 if (args->size != sizeof(ctl))
924 error = copyin(args->data, &ctl, sizeof(ctl));
928 error = nvmm_machine_get(owner, ctl.machid, &mach, true);
932 ctl.nvcpus = mach->ncpus;
935 for (i = 0; i < NVMM_MAX_HMAPPINGS; i++) {
936 if (!mach->hmap[i].present)
938 ctl.nram += mach->hmap[i].size;
941 ctl.pid = mach->owner->pid;
942 ctl.time = mach->time;
944 nvmm_machine_put(mach);
946 error = copyout(&ctl, args->data, sizeof(ctl));
954 nvmm_ctl(struct nvmm_owner *owner, struct nvmm_ioc_ctl *args)
957 case NVMM_CTL_MACH_INFO:
958 return nvmm_ctl_mach_info(owner, args);
964 /* -------------------------------------------------------------------------- */
966 const struct nvmm_impl *
971 for (i = 0; i < __arraycount(nvmm_impl_list); i++) {
972 if ((*nvmm_impl_list[i]->ident)())
973 return nvmm_impl_list[i];
984 nvmm_impl = nvmm_ident();
985 if (nvmm_impl == NULL)
988 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
989 machines[i].machid = i;
990 os_rwl_init(&machines[i].lock);
991 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
992 machines[i].cpus[n].present = false;
993 machines[i].cpus[n].cpuid = n;
994 os_mtx_init(&machines[i].cpus[n].lock);
998 (*nvmm_impl->init)();
1008 for (i = 0; i < NVMM_MAX_MACHINES; i++) {
1009 os_rwl_destroy(&machines[i].lock);
1010 for (n = 0; n < NVMM_MAX_VCPUS; n++) {
1011 os_mtx_destroy(&machines[i].cpus[n].lock);
1015 (*nvmm_impl->fini)();
1019 /* -------------------------------------------------------------------------- */
1022 nvmm_ioctl(struct nvmm_owner *owner, unsigned long cmd, void *data)
1025 case NVMM_IOC_CAPABILITY:
1026 return nvmm_capability(owner, data);
1027 case NVMM_IOC_MACHINE_CREATE:
1028 return nvmm_machine_create(owner, data);
1029 case NVMM_IOC_MACHINE_DESTROY:
1030 return nvmm_machine_destroy(owner, data);
1031 case NVMM_IOC_MACHINE_CONFIGURE:
1032 return nvmm_machine_configure(owner, data);
1033 case NVMM_IOC_VCPU_CREATE:
1034 return nvmm_vcpu_create(owner, data);
1035 case NVMM_IOC_VCPU_DESTROY:
1036 return nvmm_vcpu_destroy(owner, data);
1037 case NVMM_IOC_VCPU_CONFIGURE:
1038 return nvmm_vcpu_configure(owner, data);
1039 case NVMM_IOC_VCPU_SETSTATE:
1040 return nvmm_vcpu_setstate(owner, data);
1041 case NVMM_IOC_VCPU_GETSTATE:
1042 return nvmm_vcpu_getstate(owner, data);
1043 case NVMM_IOC_VCPU_INJECT:
1044 return nvmm_vcpu_inject(owner, data);
1045 case NVMM_IOC_VCPU_RUN:
1046 return nvmm_vcpu_run(owner, data);
1047 case NVMM_IOC_GPA_MAP:
1048 return nvmm_gpa_map(owner, data);
1049 case NVMM_IOC_GPA_UNMAP:
1050 return nvmm_gpa_unmap(owner, data);
1051 case NVMM_IOC_HVA_MAP:
1052 return nvmm_hva_map(owner, data);
1053 case NVMM_IOC_HVA_UNMAP:
1054 return nvmm_hva_unmap(owner, data);
1056 return nvmm_ctl(owner, data);