dd81d68f9a964131d2e7a1813f885aa05e53f9c1
[dragonfly.git] / sys / kern / sys_vmm.c
1 /*
2  * Copyright (c) 2003-2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Mihai Carabas <mihai.carabas@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/proc.h>
39 #include <sys/user.h>
40 #include <sys/wait.h>
41 #include <sys/vmm.h>
42
43 #include <sys/thread2.h>
44 #include <sys/spinlock2.h>
45
46 #include <machine/cpu.h>
47 #include <machine/vmm.h>
48
49 /*
50  * vmm guest system call:
51  * - init the calling thread structure
52  * - prepare for running in non-root mode
53  */
54 int
55 sys_vmm_guest_ctl(struct vmm_guest_ctl_args *uap)
56 {
57         int error = 0;
58         struct guest_options options;
59         struct trapframe *tf = uap->sysmsg_frame;
60         unsigned long stack_limit = USRSTACK;
61         unsigned char stack_page[PAGE_SIZE];
62
63         clear_quickret();
64
65         switch (uap->op) {
66                 case VMM_GUEST_RUN:
67                         error = copyin(uap->options, &options, sizeof(struct guest_options));
68                         if (error) {
69                                 kprintf("sys_vmm_guest: error copyin guest_options\n");
70                                 goto out;
71                         }
72
73                         while(stack_limit > tf->tf_sp) {
74                                 stack_limit -= PAGE_SIZE;
75                                 options.new_stack -= PAGE_SIZE;
76
77                                 error = copyin((const void *)stack_limit, (void *)stack_page, PAGE_SIZE);
78                                 if (error) {
79                                         kprintf("sys_vmm_guest: error copyin stack\n");
80                                         goto out;
81                                 }
82
83                                 error = copyout((const void *)stack_page, (void *)options.new_stack, PAGE_SIZE);
84                                 if (error) {
85                                         kprintf("sys_vmm_guest: error copyout stack\n");
86                                         goto out;
87                                 }
88                         }
89
90                         bcopy(tf, &options.tf, sizeof(struct trapframe));
91
92                         error = vmm_vminit(&options);
93                         if (error) {
94                                 if (error == ENODEV) {
95                                         kprintf("sys_vmm_guest: vmm_vminit failed -"
96                                             "no VMM available \n");
97                                         goto out;
98                                 } else {
99                                         kprintf("sys_vmm_guest: vmm_vminit failed\n");
100                                         goto out_exit;
101                                 }
102                         }
103
104                         generic_lwp_return(curthread->td_lwp, tf);
105
106                         error = vmm_vmrun();
107
108                         break;
109                 default:
110                         kprintf("sys_vmm_guest: INVALID op\n");
111                         error = EINVAL;
112                         goto out;
113         }
114 out_exit:
115         exit1(W_EXITCODE(error, 0));
116 out:
117         return (error);
118 }
119
120 static
121 void
122 vmm_exit_vmm(void *dummy __unused)
123 {
124 }
125
126 int
127 sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args *uap)
128 {
129         int error = 0;
130         cpumask_t oactive;
131         cpumask_t nactive;
132         long val;
133         struct proc *p = curproc;
134
135         if (p->p_vmm == NULL)
136                 return ENOSYS;
137
138         crit_enter_id("vmm_inval");
139
140         /*
141          * Set CPUMASK_LOCK, spin if anyone else is trying to set CPUMASK_LOCK.
142          */
143         for (;;) {
144                 oactive = p->p_vmm_cpumask & ~CPUMASK_LOCK;
145                 cpu_ccfence();
146                 nactive = oactive | CPUMASK_LOCK;
147                 if (atomic_cmpset_cpumask(&p->p_vmm_cpumask, oactive, nactive))
148                         break;
149                 lwkt_process_ipiq();
150                 cpu_pause();
151         }
152
153         /*
154          * Wait for other cpu's to exit VMM mode (for this vkernel).  No
155          * new cpus will enter VMM mode while we hold the lock.  New waiters
156          * may turn-up though so the wakeup() later on has to be
157          * unconditional.
158          */
159         if (oactive & mycpu->gd_other_cpus) {
160                 lwkt_send_ipiq_mask(oactive & mycpu->gd_other_cpus,
161                                     vmm_exit_vmm, NULL);
162                 while (p->p_vmm_cpumask & ~CPUMASK_LOCK) {
163                         lwkt_process_ipiq();
164                         cpu_pause();
165                 }
166         }
167
168         /*
169          * Make the requested modification, wakeup any waiters.
170          */
171         copyin(uap->srcaddr, &val, sizeof(long));
172         copyout(&val, uap->dstaddr, sizeof(long));
173
174         atomic_clear_cpumask(&p->p_vmm_cpumask, CPUMASK_LOCK);
175         wakeup(&p->p_vmm_cpumask);
176
177         crit_exit_id("vmm_inval");
178
179         return error;
180 }