kernel - remove debugging print_backtrace() when a vkernel is killed
[dragonfly.git] / sys / vm / vm_vmspace.c
CommitLineData
133aabc4 1/*
af2b4857
MD
2 * (MPSAFE)
3 *
133aabc4
MD
4 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
133aabc4
MD
35 */
36
37#include <sys/param.h>
38#include <sys/kernel.h>
39#include <sys/systm.h>
40#include <sys/sysproto.h>
021a4ed4 41#include <sys/kern_syscall.h>
d3313941 42#include <sys/mman.h>
af2b4857 43#include <sys/thread.h>
d3313941
MD
44#include <sys/proc.h>
45#include <sys/malloc.h>
46#include <sys/sysctl.h>
47#include <sys/vkernel.h>
4a22e893 48#include <sys/vmspace.h>
d3313941
MD
49
50#include <vm/vm_extern.h>
51#include <vm/pmap.h>
52
c439ad8f
MD
53#include <machine/vmparam.h>
54
e3161323 55#include <sys/sysref2.h>
684a93c4 56#include <sys/mplock2.h>
e3161323 57
39005e16 58static struct vmspace_entry *vkernel_find_vmspace(struct vkernel_proc *vkp,
4a22e893
MD
59 void *id);
60static void vmspace_entry_delete(struct vmspace_entry *ve,
39005e16 61 struct vkernel_proc *vkp);
d3313941
MD
62
63static MALLOC_DEFINE(M_VKERNEL, "vkernel", "VKernel structures");
133aabc4
MD
64
65/*
d3313941 66 * vmspace_create (void *id, int type, void *data)
133aabc4 67 *
d3313941
MD
68 * Create a VMSPACE under the control of the caller with the specified id.
69 * An id of NULL cannot be used. The type and data fields must currently
70 * be 0.
133aabc4 71 *
d3313941
MD
72 * The vmspace starts out completely empty. Memory may be mapped into the
73 * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
74 * with vmspace_mcontrol().
3919ced0 75 *
af2b4857 76 * No requirements.
d3313941
MD
77 */
78int
79sys_vmspace_create(struct vmspace_create_args *uap)
80{
d3313941 81 struct vmspace_entry *ve;
39005e16 82 struct vkernel_proc *vkp;
af2b4857 83 struct proc *p = curproc;
3919ced0 84 int error;
d3313941
MD
85
86 if (vkernel_enable == 0)
87 return (EOPNOTSUPP);
88
89 /*
90 * Create a virtual kernel side-structure for the process if one
91 * does not exist.
af2b4857
MD
92 *
93 * Implement a simple resolution for SMP races.
d3313941 94 */
af2b4857 95 if ((vkp = p->p_vkernel) == NULL) {
39005e16 96 vkp = kmalloc(sizeof(*vkp), M_VKERNEL, M_WAITOK|M_ZERO);
af2b4857
MD
97 lwkt_gettoken(&proc_token);
98 if (p->p_vkernel == NULL) {
99 vkp->refs = 1;
a3c18566 100 lwkt_token_init(&vkp->token, "vkernel");
af2b4857
MD
101 RB_INIT(&vkp->root);
102 p->p_vkernel = vkp;
103 } else {
104 kfree(vkp, M_VKERNEL);
105 vkp = p->p_vkernel;
106 }
107 lwkt_reltoken(&proc_token);
d3313941
MD
108 }
109
af2b4857
MD
110 get_mplock();
111
d3313941 112 /*
af2b4857 113 * Create a new VMSPACE, disallow conflicting ids
d3313941 114 */
d3313941 115 ve = kmalloc(sizeof(struct vmspace_entry), M_VKERNEL, M_WAITOK|M_ZERO);
88181b08 116 ve->vmspace = vmspace_alloc(VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
d3313941
MD
117 ve->id = uap->id;
118 pmap_pinit2(vmspace_pmap(ve->vmspace));
af2b4857
MD
119
120 lwkt_gettoken(&vkp->token);
121 if (RB_INSERT(vmspace_rb_tree, &vkp->root, ve)) {
122 sysref_put(&ve->vmspace->vm_sysref);
123 kfree(ve, M_VKERNEL);
124 error = EEXIST;
125 } else {
126 error = 0;
127 }
128 lwkt_reltoken(&vkp->token);
3919ced0
MD
129 rel_mplock();
130 return (error);
d3313941
MD
131}
132
133/*
af2b4857 134 * Destroy a VMSPACE given its identifier.
3919ced0 135 *
af2b4857 136 * No requirements.
133aabc4
MD
137 */
138int
d3313941 139sys_vmspace_destroy(struct vmspace_destroy_args *uap)
133aabc4 140{
39005e16 141 struct vkernel_proc *vkp;
d3313941 142 struct vmspace_entry *ve;
3919ced0 143 int error;
d3313941 144
3919ced0
MD
145 get_mplock();
146 if ((vkp = curproc->p_vkernel) == NULL) {
147 error = EINVAL;
af2b4857 148 goto done3;
3919ced0 149 }
af2b4857 150 lwkt_gettoken(&vkp->token);
3919ced0
MD
151 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
152 error = ENOENT;
af2b4857 153 goto done2;
3919ced0
MD
154 }
155 if (ve->refs) {
156 error = EBUSY;
af2b4857 157 goto done2;
3919ced0 158 }
39005e16 159 vmspace_entry_delete(ve, vkp);
3919ced0 160 error = 0;
af2b4857
MD
161done2:
162 lwkt_reltoken(&vkp->token);
163done3:
3919ced0
MD
164 rel_mplock();
165 return(error);
133aabc4
MD
166}
167
168/*
4e7c41c5
MD
169 * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
170 * struct vextframe *vframe);
133aabc4 171 *
d3313941
MD
172 * Transfer control to a VMSPACE. Control is returned after the specified
173 * number of microseconds or if a page fault, signal, trap, or system call
021a4ed4 174 * occurs. The context is updated as appropriate.
3919ced0 175 *
af2b4857 176 * No requirements.
133aabc4
MD
177 */
178int
d3313941 179sys_vmspace_ctl(struct vmspace_ctl_args *uap)
133aabc4 180{
39005e16
MD
181 struct vkernel_proc *vkp;
182 struct vkernel_lwp *vklp;
d3313941 183 struct vmspace_entry *ve;
39005e16 184 struct lwp *lp;
4a22e893
MD
185 struct proc *p;
186 int framesz;
187 int error;
d3313941 188
39005e16
MD
189 lp = curthread->td_lwp;
190 p = lp->lwp_proc;
287ebb09 191
af2b4857
MD
192 if ((vkp = p->p_vkernel) == NULL)
193 return (EINVAL);
194
3919ced0 195 get_mplock();
af2b4857 196 lwkt_gettoken(&vkp->token);
3919ced0
MD
197 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
198 error = ENOENT;
199 goto done;
200 }
4a22e893 201
7c1212ec
MD
202 /*
203 * Signal mailbox interlock
204 */
287ebb09
MD
205 if (p->p_flag & P_MAILBOX) {
206 p->p_flag &= ~P_MAILBOX;
3919ced0
MD
207 error = EINTR;
208 goto done;
7c1212ec
MD
209 }
210
4a22e893
MD
211 switch(uap->cmd) {
212 case VMSPACE_CTL_RUN:
213 /*
214 * Save the caller's register context, swap VM spaces, and
215 * install the passed register context. Return with
216 * EJUSTRETURN so the syscall code doesn't adjust the context.
217 */
39005e16 218 atomic_add_int(&ve->refs, 1);
4a22e893 219 framesz = sizeof(struct trapframe);
39005e16
MD
220 if ((vklp = lp->lwp_vkernel) == NULL) {
221 vklp = kmalloc(sizeof(*vklp), M_VKERNEL,
222 M_WAITOK|M_ZERO);
223 lp->lwp_vkernel = vklp;
224 }
225 vklp->user_trapframe = uap->tframe;
226 vklp->user_vextframe = uap->vframe;
227 bcopy(uap->sysmsg_frame, &vklp->save_trapframe, framesz);
228 bcopy(&curthread->td_tls, &vklp->save_vextframe.vx_tls,
229 sizeof(vklp->save_vextframe.vx_tls));
4e7c41c5 230 error = copyin(uap->tframe, uap->sysmsg_frame, framesz);
af2b4857
MD
231 if (error == 0) {
232 error = copyin(&uap->vframe->vx_tls,
233 &curthread->td_tls,
234 sizeof(struct savetls));
235 }
4a22e893
MD
236 if (error == 0)
237 error = cpu_sanitize_frame(uap->sysmsg_frame);
4e7c41c5
MD
238 if (error == 0)
239 error = cpu_sanitize_tls(&curthread->td_tls);
4a22e893 240 if (error) {
af2b4857
MD
241 bcopy(&vklp->save_trapframe, uap->sysmsg_frame,
242 framesz);
39005e16
MD
243 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
244 sizeof(vklp->save_vextframe.vx_tls));
4e7c41c5 245 set_user_TLS();
39005e16 246 atomic_subtract_int(&ve->refs, 1);
4a22e893 247 } else {
39005e16
MD
248 vklp->ve = ve;
249 pmap_setlwpvm(lp, ve->vmspace);
4e7c41c5 250 set_user_TLS();
431d0fef 251 set_vkernel_fp(uap->sysmsg_frame);
4a22e893
MD
252 error = EJUSTRETURN;
253 }
254 break;
255 default:
256 error = EOPNOTSUPP;
257 break;
258 }
3919ced0 259done:
af2b4857 260 lwkt_reltoken(&vkp->token);
3919ced0 261 rel_mplock();
4a22e893 262 return(error);
133aabc4
MD
263}
264
265/*
d3313941 266 * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
133aabc4 267 *
d3313941
MD
268 * map memory within a VMSPACE. This function is just like a normal mmap()
269 * but operates on the vmspace's memory map. Most callers use this to create
270 * a MAP_VPAGETABLE mapping.
3919ced0 271 *
3de6dc48 272 * No requirements.
133aabc4
MD
273 */
274int
d3313941 275sys_vmspace_mmap(struct vmspace_mmap_args *uap)
133aabc4 276{
39005e16 277 struct vkernel_proc *vkp;
d3313941 278 struct vmspace_entry *ve;
021a4ed4 279 int error;
d3313941 280
3de6dc48
VS
281 /*
282 * We hold the vmspace token to serialize calls to vkernel_find_vmspace
283 * and the vm token to serialize calls to kern_mmap.
284 */
285 lwkt_gettoken(&vm_token);
286 lwkt_gettoken(&vmspace_token);
3919ced0
MD
287 if ((vkp = curproc->p_vkernel) == NULL) {
288 error = EINVAL;
af2b4857 289 goto done3;
3919ced0 290 }
af2b4857
MD
291
292 /*
293 * NOTE: kern_mmap() can block so we need to temporarily ref ve->refs.
294 */
295 lwkt_gettoken(&vkp->token);
296 if ((ve = vkernel_find_vmspace(vkp, uap->id)) != NULL) {
297 atomic_add_int(&ve->refs, 1);
298 error = kern_mmap(ve->vmspace, uap->addr, uap->len,
299 uap->prot, uap->flags,
300 uap->fd, uap->offset, &uap->sysmsg_resultp);
301 atomic_subtract_int(&ve->refs, 1);
302 } else {
3919ced0 303 error = ENOENT;
3919ced0 304 }
af2b4857
MD
305 lwkt_reltoken(&vkp->token);
306done3:
3de6dc48
VS
307 lwkt_reltoken(&vmspace_token);
308 lwkt_reltoken(&vm_token);
021a4ed4 309 return (error);
133aabc4
MD
310}
311
312/*
d3313941 313 * vmspace_munmap(id, addr, len)
133aabc4 314 *
d3313941 315 * unmap memory within a VMSPACE.
3919ced0 316 *
af2b4857 317 * No requirements.
133aabc4
MD
318 */
319int
d3313941 320sys_vmspace_munmap(struct vmspace_munmap_args *uap)
133aabc4 321{
39005e16 322 struct vkernel_proc *vkp;
d3313941 323 struct vmspace_entry *ve;
021a4ed4 324 vm_offset_t addr;
e54488bb 325 vm_offset_t tmpaddr;
021a4ed4
MD
326 vm_size_t size, pageoff;
327 vm_map_t map;
3919ced0 328 int error;
d3313941 329
3919ced0
MD
330 get_mplock();
331 if ((vkp = curproc->p_vkernel) == NULL) {
332 error = EINVAL;
af2b4857 333 goto done3;
3919ced0 334 }
af2b4857 335 lwkt_gettoken(&vkp->token);
3919ced0
MD
336 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
337 error = ENOENT;
af2b4857 338 goto done2;
3919ced0 339 }
021a4ed4
MD
340
341 /*
af2b4857
MD
342 * NOTE: kern_munmap() can block so we need to temporarily
343 * ref ve->refs.
344 */
345 atomic_add_int(&ve->refs, 1);
346
347 /*
021a4ed4
MD
348 * Copied from sys_munmap()
349 */
350 addr = (vm_offset_t)uap->addr;
351 size = uap->len;
352
353 pageoff = (addr & PAGE_MASK);
354 addr -= pageoff;
355 size += pageoff;
356 size = (vm_size_t)round_page(size);
3919ced0
MD
357 if (size < uap->len) { /* wrap */
358 error = EINVAL;
af2b4857 359 goto done1;
3919ced0 360 }
e54488bb 361 tmpaddr = addr + size; /* workaround gcc4 opt */
3919ced0
MD
362 if (tmpaddr < addr) { /* wrap */
363 error = EINVAL;
af2b4857 364 goto done1;
3919ced0
MD
365 }
366 if (size == 0) {
367 error = 0;
af2b4857 368 goto done1;
3919ced0
MD
369 }
370
371 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) {
372 error = EINVAL;
af2b4857 373 goto done1;
3919ced0
MD
374 }
375 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) {
376 error = EINVAL;
af2b4857 377 goto done1;
3919ced0 378 }
021a4ed4 379 map = &ve->vmspace->vm_map;
46754a20 380 if (!vm_map_check_protection(map, addr, tmpaddr, VM_PROT_NONE, FALSE)) {
3919ced0 381 error = EINVAL;
af2b4857 382 goto done1;
3919ced0 383 }
021a4ed4 384 vm_map_remove(map, addr, addr + size);
3919ced0 385 error = 0;
af2b4857
MD
386done1:
387 atomic_subtract_int(&ve->refs, 1);
388done2:
389 lwkt_reltoken(&vkp->token);
390done3:
3919ced0
MD
391 rel_mplock();
392 return (error);
021a4ed4
MD
393}
394
395/*
396 * vmspace_pread(id, buf, nbyte, flags, offset)
397 *
398 * Read data from a vmspace. The number of bytes read is returned or
399 * -1 if an unrecoverable error occured. If the number of bytes read is
400 * less then the request size, a page fault occured in the VMSPACE which
401 * the caller must resolve in order to proceed.
3919ced0
MD
402 *
403 * (not implemented yet)
af2b4857 404 * No requirements.
021a4ed4
MD
405 */
406int
407sys_vmspace_pread(struct vmspace_pread_args *uap)
408{
39005e16 409 struct vkernel_proc *vkp;
021a4ed4 410 struct vmspace_entry *ve;
3919ced0 411 int error;
021a4ed4 412
3919ced0
MD
413 get_mplock();
414 if ((vkp = curproc->p_vkernel) == NULL) {
415 error = EINVAL;
af2b4857 416 goto done3;
3919ced0 417 }
af2b4857 418 lwkt_gettoken(&vkp->token);
3919ced0
MD
419 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
420 error = ENOENT;
af2b4857 421 goto done2;
3919ced0
MD
422 }
423 error = EINVAL;
af2b4857
MD
424done2:
425 lwkt_reltoken(&vkp->token);
426done3:
3919ced0
MD
427 rel_mplock();
428 return (error);
021a4ed4
MD
429}
430
431/*
432 * vmspace_pwrite(id, buf, nbyte, flags, offset)
433 *
434 * Write data to a vmspace. The number of bytes written is returned or
435 * -1 if an unrecoverable error occured. If the number of bytes written is
436 * less then the request size, a page fault occured in the VMSPACE which
437 * the caller must resolve in order to proceed.
3919ced0
MD
438 *
439 * (not implemented yet)
af2b4857 440 * No requirements.
021a4ed4
MD
441 */
442int
443sys_vmspace_pwrite(struct vmspace_pwrite_args *uap)
444{
39005e16 445 struct vkernel_proc *vkp;
021a4ed4 446 struct vmspace_entry *ve;
3919ced0 447 int error;
021a4ed4 448
3919ced0
MD
449 get_mplock();
450 if ((vkp = curproc->p_vkernel) == NULL) {
451 error = EINVAL;
af2b4857 452 goto done3;
3919ced0 453 }
af2b4857 454 lwkt_gettoken(&vkp->token);
3919ced0
MD
455 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
456 error = ENOENT;
af2b4857 457 goto done2;
3919ced0
MD
458 }
459 error = EINVAL;
af2b4857
MD
460done2:
461 lwkt_reltoken(&vkp->token);
462done3:
3919ced0
MD
463 rel_mplock();
464 return (error);
133aabc4
MD
465}
466
467/*
d3313941 468 * vmspace_mcontrol(id, addr, len, behav, value)
133aabc4 469 *
d3313941 470 * madvise/mcontrol support for a vmspace.
3919ced0 471 *
af2b4857 472 * No requirements.
133aabc4
MD
473 */
474int
d3313941
MD
475sys_vmspace_mcontrol(struct vmspace_mcontrol_args *uap)
476{
39005e16 477 struct vkernel_proc *vkp;
d3313941
MD
478 struct vmspace_entry *ve;
479 vm_offset_t start, end;
e54488bb 480 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len;
3919ced0 481 int error;
d3313941 482
3919ced0
MD
483 get_mplock();
484 if ((vkp = curproc->p_vkernel) == NULL) {
485 error = EINVAL;
af2b4857 486 goto done3;
3919ced0 487 }
af2b4857 488 lwkt_gettoken(&vkp->token);
3919ced0
MD
489 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
490 error = ENOENT;
af2b4857 491 goto done2;
3919ced0 492 }
d3313941
MD
493
494 /*
af2b4857
MD
495 * NOTE: kern_madvise() can block so we need to temporarily
496 * ref ve->refs.
497 */
498 atomic_add_int(&ve->refs, 1);
499
500 /*
d3313941
MD
501 * This code is basically copied from sys_mcontrol()
502 */
3919ced0
MD
503 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) {
504 error = EINVAL;
af2b4857 505 goto done1;
3919ced0 506 }
d3313941 507
3919ced0
MD
508 if (tmpaddr < (vm_offset_t)uap->addr) {
509 error = EINVAL;
af2b4857 510 goto done1;
3919ced0
MD
511 }
512 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) {
513 error = EINVAL;
af2b4857 514 goto done1;
3919ced0
MD
515 }
516 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) {
517 error = EINVAL;
af2b4857 518 goto done1;
3919ced0 519 }
d3313941
MD
520
521 start = trunc_page((vm_offset_t) uap->addr);
e54488bb 522 end = round_page(tmpaddr);
d3313941 523
3919ced0
MD
524 error = vm_map_madvise(&ve->vmspace->vm_map, start, end,
525 uap->behav, uap->value);
af2b4857
MD
526done1:
527 atomic_subtract_int(&ve->refs, 1);
528done2:
529 lwkt_reltoken(&vkp->token);
530done3:
3919ced0
MD
531 rel_mplock();
532 return (error);
d3313941
MD
533}
534
535/*
536 * Red black tree functions
537 */
538static int rb_vmspace_compare(struct vmspace_entry *, struct vmspace_entry *);
539RB_GENERATE(vmspace_rb_tree, vmspace_entry, rb_entry, rb_vmspace_compare);
540
af2b4857
MD
541/*
542 * a->start is address, and the only field has to be initialized.
543 * The caller must hold vkp->token.
544 *
545 * The caller must hold vkp->token.
546 */
d3313941
MD
547static int
548rb_vmspace_compare(struct vmspace_entry *a, struct vmspace_entry *b)
549{
550 if ((char *)a->id < (char *)b->id)
551 return(-1);
552 else if ((char *)a->id > (char *)b->id)
553 return(1);
554 return(0);
555}
556
af2b4857
MD
557/*
558 * The caller must hold vkp->token.
559 */
d3313941
MD
560static
561int
562rb_vmspace_delete(struct vmspace_entry *ve, void *data)
563{
39005e16 564 struct vkernel_proc *vkp = data;
4a22e893
MD
565
566 KKASSERT(ve->refs == 0);
39005e16 567 vmspace_entry_delete(ve, vkp);
4a22e893
MD
568 return(0);
569}
570
571/*
572 * Remove a vmspace_entry from the RB tree and destroy it. We have to clean
573 * up the pmap, the vm_map, then destroy the vmspace.
af2b4857
MD
574 *
575 * This function must remove the ve immediately before it might potentially
576 * block.
577 *
578 * The caller must hold vkp->token.
4a22e893
MD
579 */
580static
581void
39005e16 582vmspace_entry_delete(struct vmspace_entry *ve, struct vkernel_proc *vkp)
4a22e893 583{
39005e16 584 RB_REMOVE(vmspace_rb_tree, &vkp->root, ve);
d3313941 585
4a22e893 586 pmap_remove_pages(vmspace_pmap(ve->vmspace),
88181b08 587 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
4a22e893 588 vm_map_remove(&ve->vmspace->vm_map,
88181b08 589 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
e3161323 590 sysref_put(&ve->vmspace->vm_sysref);
d3313941 591 kfree(ve, M_VKERNEL);
d3313941
MD
592}
593
af2b4857
MD
594/*
595 * Locate the ve for (id), return the ve or NULL. If found this function
596 * will bump ve->refs which prevents the ve from being immediately destroyed
597 * (but it can still be removed).
598 *
599 * The caller must hold vkp->token.
600 */
d3313941
MD
601static
602struct vmspace_entry *
39005e16 603vkernel_find_vmspace(struct vkernel_proc *vkp, void *id)
d3313941
MD
604{
605 struct vmspace_entry *ve;
606 struct vmspace_entry key;
607
608 key.id = id;
39005e16 609 ve = RB_FIND(vmspace_rb_tree, &vkp->root, &key);
d3313941
MD
610 return (ve);
611}
612
613/*
614 * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
615 * a vkernel process.
af2b4857
MD
616 *
617 * No requirements.
d3313941
MD
618 */
619void
4a22e893 620vkernel_inherit(struct proc *p1, struct proc *p2)
d3313941 621{
39005e16
MD
622 struct vkernel_proc *vkp;
623
624 vkp = p1->p_vkernel;
625 KKASSERT(vkp->refs > 0);
626 atomic_add_int(&vkp->refs, 1);
627 p2->p_vkernel = vkp;
d3313941
MD
628}
629
af2b4857
MD
630/*
631 * No requirements.
632 */
d3313941 633void
4a22e893 634vkernel_exit(struct proc *p)
133aabc4 635{
39005e16 636 struct vkernel_proc *vkp;
287ebb09 637 struct lwp *lp;
4a22e893 638
39005e16 639 vkp = p->p_vkernel;
af2b4857 640
4a22e893
MD
641 /*
642 * Restore the original VM context if we are killed while running
643 * a different one.
4e7c41c5
MD
644 *
645 * This isn't supposed to happen. What is supposed to happen is
646 * that the process should enter vkernel_trap() before the handling
647 * the signal.
4a22e893 648 */
3e291793 649 RB_FOREACH(lp, lwp_rb_tree, &p->p_lwp_tree) {
39005e16 650 vkernel_lwp_exit(lp);
d3313941 651 }
4a22e893
MD
652
653 /*
654 * Dereference the common area
655 */
39005e16
MD
656 p->p_vkernel = NULL;
657 KKASSERT(vkp->refs > 0);
4a22e893 658
af2b4857
MD
659 if (atomic_fetchadd_int(&vkp->refs, -1) == 1) {
660 lwkt_gettoken(&vkp->token);
39005e16
MD
661 RB_SCAN(vmspace_rb_tree, &vkp->root, NULL,
662 rb_vmspace_delete, vkp);
af2b4857 663 lwkt_reltoken(&vkp->token);
39005e16
MD
664 kfree(vkp, M_VKERNEL);
665 }
666}
667
af2b4857
MD
668/*
669 * No requirements.
670 */
39005e16
MD
671void
672vkernel_lwp_exit(struct lwp *lp)
673{
674 struct vkernel_lwp *vklp;
675 struct vmspace_entry *ve;
676
677 if ((vklp = lp->lwp_vkernel) != NULL) {
678 if ((ve = vklp->ve) != NULL) {
679 kprintf("Warning, pid %d killed with "
680 "active VC!\n", lp->lwp_proc->p_pid);
39005e16
MD
681 pmap_setlwpvm(lp, lp->lwp_proc->p_vmspace);
682 vklp->ve = NULL;
683 KKASSERT(ve->refs > 0);
684 atomic_subtract_int(&ve->refs, 1);
685 }
686 lp->lwp_vkernel = NULL;
687 kfree(vklp, M_VKERNEL);
4a22e893 688 }
4a22e893
MD
689}
690
691/*
692 * A VM space under virtual kernel control trapped out or made a system call
693 * or otherwise needs to return control to the virtual kernel context.
af2b4857
MD
694 *
695 * No requirements.
4a22e893 696 */
bb47c072 697void
287ebb09 698vkernel_trap(struct lwp *lp, struct trapframe *frame)
4a22e893 699{
287ebb09 700 struct proc *p = lp->lwp_proc;
4a22e893 701 struct vmspace_entry *ve;
39005e16 702 struct vkernel_lwp *vklp;
4a22e893
MD
703 int error;
704
4a22e893
MD
705 /*
706 * Which vmspace entry was running?
707 */
39005e16
MD
708 vklp = lp->lwp_vkernel;
709 KKASSERT(vklp);
710 ve = vklp->ve;
4a22e893
MD
711 KKASSERT(ve != NULL);
712
713 /*
287ebb09 714 * Switch the LWP vmspace back to the virtual kernel's VM space.
4a22e893 715 */
39005e16 716 vklp->ve = NULL;
287ebb09 717 pmap_setlwpvm(lp, p->p_vmspace);
4a22e893 718 KKASSERT(ve->refs > 0);
39005e16 719 atomic_subtract_int(&ve->refs, 1);
af2b4857 720 /* ve is invalid once we kill our ref */
4a22e893
MD
721
722 /*
4e7c41c5
MD
723 * Copy the emulated process frame to the virtual kernel process.
724 * The emulated process cannot change TLS descriptors so don't
725 * bother saving them, we already have a copy.
726 *
727 * Restore the virtual kernel's saved context so the virtual kernel
728 * process can resume.
4a22e893 729 */
39005e16
MD
730 error = copyout(frame, vklp->user_trapframe, sizeof(*frame));
731 bcopy(&vklp->save_trapframe, frame, sizeof(*frame));
732 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
733 sizeof(vklp->save_vextframe.vx_tls));
4e7c41c5 734 set_user_TLS();
bb47c072 735 cpu_vkernel_trap(frame, error);
133aabc4 736}