2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vm/vm_vmspace.c,v 1.4 2006/10/20 17:02:09 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kern_syscall.h>
44 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <sys/vkernel.h>
47 #include <sys/vmspace.h>
48 #include <sys/spinlock2.h>
50 #include <vm/vm_extern.h>
53 static struct vmspace_entry *vkernel_find_vmspace(struct vkernel_common *vc,
55 static void vmspace_entry_delete(struct vmspace_entry *ve,
56 struct vkernel_common *vc);
58 static MALLOC_DEFINE(M_VKERNEL, "vkernel", "VKernel structures");
61 * vmspace_create (void *id, int type, void *data)
63 * Create a VMSPACE under the control of the caller with the specified id.
64 * An id of NULL cannot be used. The type and data fields must currently
67 * The vmspace starts out completely empty. Memory may be mapped into the
68 * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
69 * with vmspace_mcontrol().
72 sys_vmspace_create(struct vmspace_create_args *uap)
74 struct vkernel_common *vc;
75 struct vmspace_entry *ve;
78 if (vkernel_enable == 0)
82 * Create a virtual kernel side-structure for the process if one
85 if ((vk = curproc->p_vkernel) == NULL) {
86 vk = kmalloc(sizeof(*vk), M_VKERNEL, M_WAITOK|M_ZERO);
87 vc = kmalloc(sizeof(*vc), M_VKERNEL, M_WAITOK|M_ZERO);
89 spin_init(&vc->vc_spin);
90 RB_INIT(&vc->vc_root);
92 curproc->p_vkernel = vk;
97 * Create a new VMSPACE
99 if (vkernel_find_vmspace(vc, uap->id))
101 ve = kmalloc(sizeof(struct vmspace_entry), M_VKERNEL, M_WAITOK|M_ZERO);
102 ve->vmspace = vmspace_alloc(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
104 pmap_pinit2(vmspace_pmap(ve->vmspace));
105 RB_INSERT(vmspace_rb_tree, &vc->vc_root, ve);
110 * vmspace_destroy (void *id)
115 sys_vmspace_destroy(struct vmspace_destroy_args *uap)
117 struct vkernel_common *vc;
118 struct vmspace_entry *ve;
121 if ((vk = curproc->p_vkernel) == NULL)
124 if ((ve = vkernel_find_vmspace(vc, uap->id)) == NULL)
128 vmspace_entry_delete(ve, vc);
133 * vmspace_ctl (void *id, int cmd, void *ctx, int ctx_bytes, int timeout_us)
135 * Transfer control to a VMSPACE. Control is returned after the specified
136 * number of microseconds or if a page fault, signal, trap, or system call
137 * occurs. The context is updated as appropriate.
140 sys_vmspace_ctl(struct vmspace_ctl_args *uap)
142 struct vkernel_common *vc;
143 struct vmspace_entry *ve;
149 if ((vk = curproc->p_vkernel) == NULL)
152 if ((ve = vkernel_find_vmspace(vc, uap->id)) == NULL)
156 case VMSPACE_CTL_RUN:
158 * Save the caller's register context, swap VM spaces, and
159 * install the passed register context. Return with
160 * EJUSTRETURN so the syscall code doesn't adjust the context.
164 framesz = sizeof(struct trapframe);
166 vk->vk_save_vmspace = p->p_vmspace;
167 vk->vk_user_frame = uap->ctx;
168 bcopy(uap->sysmsg_frame, &vk->vk_save_frame, framesz);
169 error = copyin(uap->ctx, uap->sysmsg_frame, framesz);
171 error = cpu_sanitize_frame(uap->sysmsg_frame);
173 bcopy(&vk->vk_save_frame, uap->sysmsg_frame, framesz);
174 vk->vk_current = NULL;
175 vk->vk_save_vmspace = NULL;
179 p->p_vmspace = ve->vmspace;
192 * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
194 * map memory within a VMSPACE. This function is just like a normal mmap()
195 * but operates on the vmspace's memory map. Most callers use this to create
196 * a MAP_VPAGETABLE mapping.
199 sys_vmspace_mmap(struct vmspace_mmap_args *uap)
201 struct vkernel_common *vc;
202 struct vmspace_entry *ve;
206 if ((vk = curproc->p_vkernel) == NULL)
209 if ((ve = vkernel_find_vmspace(vc, uap->id)) == NULL)
211 error = kern_mmap(ve->vmspace, uap->addr, uap->len,
212 uap->prot, uap->flags,
213 uap->fd, uap->offset, &uap->sysmsg_resultp);
218 * vmspace_munmap(id, addr, len)
220 * unmap memory within a VMSPACE.
223 sys_vmspace_munmap(struct vmspace_munmap_args *uap)
225 struct vkernel_common *vc;
226 struct vmspace_entry *ve;
229 vm_size_t size, pageoff;
232 if ((vk = curproc->p_vkernel) == NULL)
235 if ((ve = vkernel_find_vmspace(vc, uap->id)) == NULL)
239 * Copied from sys_munmap()
241 addr = (vm_offset_t)uap->addr;
244 pageoff = (addr & PAGE_MASK);
247 size = (vm_size_t)round_page(size);
248 if (addr + size < addr)
253 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
256 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
259 map = &ve->vmspace->vm_map;
260 if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
262 vm_map_remove(map, addr, addr + size);
267 * vmspace_pread(id, buf, nbyte, flags, offset)
269 * Read data from a vmspace. The number of bytes read is returned or
270 * -1 if an unrecoverable error occured. If the number of bytes read is
271 * less then the request size, a page fault occured in the VMSPACE which
272 * the caller must resolve in order to proceed.
275 sys_vmspace_pread(struct vmspace_pread_args *uap)
277 struct vkernel_common *vc;
278 struct vmspace_entry *ve;
281 if ((vk = curproc->p_vkernel) == NULL)
284 if ((ve = vkernel_find_vmspace(vc, uap->id)) == NULL)
290 * vmspace_pwrite(id, buf, nbyte, flags, offset)
292 * Write data to a vmspace. The number of bytes written is returned or
293 * -1 if an unrecoverable error occured. If the number of bytes written is
294 * less then the request size, a page fault occured in the VMSPACE which
295 * the caller must resolve in order to proceed.
298 sys_vmspace_pwrite(struct vmspace_pwrite_args *uap)
300 struct vkernel_common *vc;
301 struct vmspace_entry *ve;
304 if ((vk = curproc->p_vkernel) == NULL)
307 if ((ve = vkernel_find_vmspace(vc, uap->id)) == NULL)
313 * vmspace_mcontrol(id, addr, len, behav, value)
315 * madvise/mcontrol support for a vmspace.
318 sys_vmspace_mcontrol(struct vmspace_mcontrol_args *uap)
320 struct vkernel_common *vc;
321 struct vmspace_entry *ve;
323 vm_offset_t start, end;
325 if ((vk = curproc->p_vkernel) == NULL)
328 if ((ve = vkernel_find_vmspace(vc, uap->id)) == NULL)
332 * This code is basically copied from sys_mcontrol()
334 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END)
337 if (VM_MAXUSER_ADDRESS > 0 &&
338 ((vm_offset_t) uap->addr + uap->len) > VM_MAXUSER_ADDRESS)
341 if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS)
344 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
347 start = trunc_page((vm_offset_t) uap->addr);
348 end = round_page((vm_offset_t) uap->addr + uap->len);
350 return (vm_map_madvise(&ve->vmspace->vm_map, start, end,
351 uap->behav, uap->value));
355 * Red black tree functions
357 static int rb_vmspace_compare(struct vmspace_entry *, struct vmspace_entry *);
358 RB_GENERATE(vmspace_rb_tree, vmspace_entry, rb_entry, rb_vmspace_compare);
360 /* a->start is address, and the only field has to be initialized */
362 rb_vmspace_compare(struct vmspace_entry *a, struct vmspace_entry *b)
364 if ((char *)a->id < (char *)b->id)
366 else if ((char *)a->id > (char *)b->id)
373 rb_vmspace_delete(struct vmspace_entry *ve, void *data)
375 struct vkernel_common *vc = data;
377 KKASSERT(ve->refs == 0);
378 vmspace_entry_delete(ve, vc);
383 * Remove a vmspace_entry from the RB tree and destroy it. We have to clean
384 * up the pmap, the vm_map, then destroy the vmspace.
388 vmspace_entry_delete(struct vmspace_entry *ve, struct vkernel_common *vc)
390 RB_REMOVE(vmspace_rb_tree, &vc->vc_root, ve);
392 pmap_remove_pages(vmspace_pmap(ve->vmspace),
393 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
394 vm_map_remove(&ve->vmspace->vm_map,
395 VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
396 vmspace_free(ve->vmspace);
397 kfree(ve, M_VKERNEL);
402 struct vmspace_entry *
403 vkernel_find_vmspace(struct vkernel_common *vc, void *id)
405 struct vmspace_entry *ve;
406 struct vmspace_entry key;
409 ve = RB_FIND(vmspace_rb_tree, &vc->vc_root, &key);
414 * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
418 vkernel_inherit(struct proc *p1, struct proc *p2)
420 struct vkernel_common *vc;
425 KKASSERT(vc->vc_refs > 0);
426 atomic_add_int(&vc->vc_refs, 1);
427 vk = kmalloc(sizeof(*vk), M_VKERNEL, M_WAITOK|M_ZERO);
433 vkernel_exit(struct proc *p)
435 struct vkernel_common *vc;
436 struct vmspace_entry *ve;
443 vk->vk_common = NULL;
446 * Restore the original VM context if we are killed while running
449 if ((ve = vk->vk_current) != NULL) {
450 printf("killed with active VC\n");
451 vk->vk_current = NULL;
453 p->p_vmspace = vk->vk_save_vmspace;
455 vk->vk_save_vmspace = NULL;
456 KKASSERT(ve->refs > 0);
461 * Dereference the common area
463 KKASSERT(vc->vc_refs > 0);
464 spin_lock_wr(&vc->vc_spin);
465 if (--vc->vc_refs == 0)
467 spin_unlock_wr(&vc->vc_spin);
470 RB_SCAN(vmspace_rb_tree, &vc->vc_root, NULL,
471 rb_vmspace_delete, vc);
472 kfree(vc, M_VKERNEL);
474 kfree(vk, M_VKERNEL);
478 * A VM space under virtual kernel control trapped out or made a system call
479 * or otherwise needs to return control to the virtual kernel context.
482 vkernel_trap(struct proc *p, struct trapframe *frame)
484 struct vmspace_entry *ve;
488 printf("trap for vkernel type %d wm=%d\n",
489 frame->tf_trapno & 0x7FFFFFFF,
490 ((frame->tf_trapno & 0x80000000) ? 1 : 0));
493 * Which vmspace entry was running?
497 vk->vk_current = NULL;
498 KKASSERT(ve != NULL);
501 * Switch the process context back to the virtual kernel's VM space.
504 p->p_vmspace = vk->vk_save_vmspace;
506 vk->vk_save_vmspace = NULL;
507 KKASSERT(ve->refs > 0);
511 * Copy the trapframe to the virtual kernel's userspace, then
512 * restore virtual kernel's original syscall trap frame so we
513 * can 'return' from the system call that ran the custom VM space.
515 error = copyout(frame, vk->vk_user_frame, sizeof(*frame));
516 bcopy(&vk->vk_save_frame, frame, sizeof(*frame));