Merge branch 'vendor/LESS'
[dragonfly.git] / sys / kern / sys_vmm.c
1 /*
2  * Copyright (c) 2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Mihai Carabas <mihai.carabas@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/proc.h>
39 #include <sys/user.h>
40 #include <sys/wait.h>
41 #include <sys/vmm.h>
42
43 #include <sys/thread2.h>
44 #include <sys/spinlock2.h>
45
46 #include <machine/cpu.h>
47 #include <machine/vmm.h>
48
49 /*
50  * vmm guest system call:
51  * - init the calling thread structure
52  * - prepare for running in non-root mode
53  */
54 int
55 sys_vmm_guest_ctl(struct vmm_guest_ctl_args *uap)
56 {
57         int error = 0;
58         struct vmm_guest_options options;
59         struct trapframe *tf = uap->sysmsg_frame;
60         unsigned long stack_limit = USRSTACK;
61         unsigned char stack_page[PAGE_SIZE];
62
63         clear_quickret();
64
65         switch (uap->op) {
66                 case VMM_GUEST_RUN:
67                         error = copyin(uap->options, &options,
68                             sizeof(struct vmm_guest_options));
69                         if (error) {
70                                 kprintf("%s: error copyin vmm_guest_options\n",
71                                     __func__);
72                                 goto out;
73                         }
74
75                         while(stack_limit > tf->tf_sp) {
76                                 stack_limit -= PAGE_SIZE;
77                                 options.new_stack -= PAGE_SIZE;
78
79                                 error = copyin((const void *)stack_limit,
80                                     (void *)stack_page, PAGE_SIZE);
81                                 if (error) {
82                                         kprintf("%s: error copyin stack\n",
83                                             __func__);
84                                         goto out;
85                                 }
86
87                                 error = copyout((const void *)stack_page,
88                                     (void *)options.new_stack, PAGE_SIZE);
89                                 if (error) {
90                                         kprintf("%s: error copyout stack\n",
91                                             __func__);
92                                         goto out;
93                                 }
94                         }
95
96                         bcopy(tf, &options.tf, sizeof(struct trapframe));
97
98                         error = vmm_vminit(&options);
99                         if (error) {
100                                 if (error == ENODEV) {
101                                         kprintf("%s: vmm_vminit failed - "
102                                             "no VMM available \n", __func__);
103                                         goto out;
104                                 }
105                                 kprintf("%s: vmm_vminit failed\n", __func__);
106                                 goto out_exit;
107                         }
108
109                         generic_lwp_return(curthread->td_lwp, tf);
110
111                         error = vmm_vmrun();
112
113                         break;
114                 default:
115                         kprintf("%s: INVALID op\n", __func__);
116                         error = EINVAL;
117                         goto out;
118         }
119 out_exit:
120         exit1(W_EXITCODE(error, 0));
121 out:
122         return (error);
123 }
124
125 static void
126 vmm_exit_vmm(void *dummy __unused)
127 {
128 }
129
130 int
131 sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args *uap)
132 {
133         int error = 0;
134         cpulock_t olock;
135         cpulock_t nlock;
136         cpumask_t mask;
137         long val;
138         struct proc *p = curproc;
139
140         if (p->p_vmm == NULL)
141                 return ENOSYS;
142
143         crit_enter_id("vmm_inval");
144
145         /*
146          * Acquire CPULOCK_EXCL, spin while we wait.
147          */
148         KKASSERT(CPUMASK_TESTMASK(p->p_vmm_cpumask, mycpu->gd_cpumask) == 0);
149         for (;;) {
150                 olock = p->p_vmm_cpulock & ~CPULOCK_EXCL;
151                 cpu_ccfence();
152                 nlock = olock | CPULOCK_EXCL;
153                 if (atomic_cmpset_int(&p->p_vmm_cpulock, olock, nlock))
154                         break;
155                 lwkt_process_ipiq();
156                 cpu_pause();
157         }
158
159         /*
160          * Wait for other cpu's to exit VMM mode (for this vkernel).  No
161          * new cpus will enter VMM mode while we hold the lock.  New waiters
162          * may turn-up though so the wakeup() later on has to be
163          * unconditional.
164          *
165          * We must test on p_vmm_cpulock's counter, not the mask, because
166          * VMM entries will set the mask bit unconditionally first
167          * (interlocking our IPI below) and then conditionally bump the
168          * counter.
169          */
170         if (olock & CPULOCK_CNTMASK) {
171                 mask = p->p_vmm_cpumask;
172                 CPUMASK_ANDMASK(mask, mycpu->gd_other_cpus);
173                 lwkt_send_ipiq_mask(mask, vmm_exit_vmm, NULL);
174                 while (p->p_vmm_cpulock & CPULOCK_CNTMASK) {
175                         lwkt_process_ipiq();
176                         cpu_pause();
177                 }
178         }
179
180         /*
181          * Make the requested modification, wakeup any waiters.
182          */
183         if (uap->srcaddr) {
184                 copyin(uap->srcaddr, &val, sizeof(long));
185                 copyout(&val, uap->dstaddr, sizeof(long));
186         }
187
188         atomic_clear_int(&p->p_vmm_cpulock, CPULOCK_EXCL);
189         wakeup(&p->p_vmm_cpulock);
190
191         crit_exit_id("vmm_inval");
192
193         return error;
194 }