kernel - Refactor cpumask_t to extend cpus past 64, part 1/2
[dragonfly.git] / sys / kern / sys_vmm.c
1 /*
2  * Copyright (c) 2003-2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Mihai Carabas <mihai.carabas@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/proc.h>
39 #include <sys/user.h>
40 #include <sys/wait.h>
41 #include <sys/vmm.h>
42
43 #include <sys/thread2.h>
44 #include <sys/spinlock2.h>
45
46 #include <machine/cpu.h>
47 #include <machine/vmm.h>
48
49 /*
50  * vmm guest system call:
51  * - init the calling thread structure
52  * - prepare for running in non-root mode
53  */
54 int
55 sys_vmm_guest_ctl(struct vmm_guest_ctl_args *uap)
56 {
57         int error = 0;
58         struct vmm_guest_options options;
59         struct trapframe *tf = uap->sysmsg_frame;
60         unsigned long stack_limit = USRSTACK;
61         unsigned char stack_page[PAGE_SIZE];
62
63         clear_quickret();
64
65         switch (uap->op) {
66                 case VMM_GUEST_RUN:
67                         error = copyin(uap->options, &options, sizeof(struct vmm_guest_options));
68                         if (error) {
69                                 kprintf("sys_vmm_guest: error copyin vmm_guest_options\n");
70                                 goto out;
71                         }
72
73                         while(stack_limit > tf->tf_sp) {
74                                 stack_limit -= PAGE_SIZE;
75                                 options.new_stack -= PAGE_SIZE;
76
77                                 error = copyin((const void *)stack_limit, (void *)stack_page, PAGE_SIZE);
78                                 if (error) {
79                                         kprintf("sys_vmm_guest: error copyin stack\n");
80                                         goto out;
81                                 }
82
83                                 error = copyout((const void *)stack_page, (void *)options.new_stack, PAGE_SIZE);
84                                 if (error) {
85                                         kprintf("sys_vmm_guest: error copyout stack\n");
86                                         goto out;
87                                 }
88                         }
89
90                         bcopy(tf, &options.tf, sizeof(struct trapframe));
91
92                         error = vmm_vminit(&options);
93                         if (error) {
94                                 if (error == ENODEV) {
95                                         kprintf("sys_vmm_guest: vmm_vminit failed -"
96                                             "no VMM available \n");
97                                         goto out;
98                                 } else {
99                                         kprintf("sys_vmm_guest: vmm_vminit failed\n");
100                                         goto out_exit;
101                                 }
102                         }
103
104                         generic_lwp_return(curthread->td_lwp, tf);
105
106                         error = vmm_vmrun();
107
108                         break;
109                 default:
110                         kprintf("sys_vmm_guest: INVALID op\n");
111                         error = EINVAL;
112                         goto out;
113         }
114 out_exit:
115         exit1(W_EXITCODE(error, 0));
116 out:
117         return (error);
118 }
119
120 static
121 void
122 vmm_exit_vmm(void *dummy __unused)
123 {
124 }
125
126 int
127 sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args *uap)
128 {
129         int error = 0;
130         cpulock_t olock;
131         cpulock_t nlock;
132         cpumask_t mask;
133         long val;
134         struct proc *p = curproc;
135
136         if (p->p_vmm == NULL)
137                 return ENOSYS;
138
139         crit_enter_id("vmm_inval");
140
141         /*
142          * Acquire CPULOCK_EXCL, spin while we wait.
143          */
144         KKASSERT(CPUMASK_TESTMASK(p->p_vmm_cpumask, mycpu->gd_cpumask) == 0);
145         for (;;) {
146                 olock = p->p_vmm_cpulock & ~CPULOCK_EXCL;
147                 cpu_ccfence();
148                 nlock = olock | CPULOCK_EXCL;
149                 if (atomic_cmpset_int(&p->p_vmm_cpulock, olock, nlock))
150                         break;
151                 lwkt_process_ipiq();
152                 cpu_pause();
153         }
154
155         /*
156          * Wait for other cpu's to exit VMM mode (for this vkernel).  No
157          * new cpus will enter VMM mode while we hold the lock.  New waiters
158          * may turn-up though so the wakeup() later on has to be
159          * unconditional.
160          *
161          * We must test on p_vmm_cpulock's counter, not the mask, because
162          * VMM entries will set the mask bit unconditionally first
163          * (interlocking our IPI below) and then conditionally bump the
164          * counter.
165          */
166         if (olock & CPULOCK_CNTMASK) {
167                 mask = p->p_vmm_cpumask;
168                 CPUMASK_ANDMASK(mask, mycpu->gd_other_cpus);
169                 lwkt_send_ipiq_mask(mask, vmm_exit_vmm, NULL);
170                 while (p->p_vmm_cpulock & CPULOCK_CNTMASK) {
171                         lwkt_process_ipiq();
172                         cpu_pause();
173                 }
174         }
175
176         /*
177          * Make the requested modification, wakeup any waiters.
178          */
179         copyin(uap->srcaddr, &val, sizeof(long));
180         copyout(&val, uap->dstaddr, sizeof(long));
181
182         atomic_clear_int(&p->p_vmm_cpulock, CPULOCK_EXCL);
183         wakeup(&p->p_vmm_cpulock);
184
185         crit_exit_id("vmm_inval");
186
187         return error;
188 }