AMD64 - Refactor uio_resid and size_t assumptions.
[dragonfly.git] / sys / vm / vm_vmspace.c
CommitLineData
133aabc4
MD
1/*
2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
3e291793 34 * $DragonFly: src/sys/vm/vm_vmspace.c,v 1.14 2007/08/15 03:15:07 dillon Exp $
133aabc4
MD
35 */
36
37#include <sys/param.h>
38#include <sys/kernel.h>
39#include <sys/systm.h>
40#include <sys/sysproto.h>
021a4ed4 41#include <sys/kern_syscall.h>
d3313941
MD
42#include <sys/mman.h>
43#include <sys/proc.h>
44#include <sys/malloc.h>
45#include <sys/sysctl.h>
46#include <sys/vkernel.h>
4a22e893 47#include <sys/vmspace.h>
d3313941
MD
48
49#include <vm/vm_extern.h>
50#include <vm/pmap.h>
51
c439ad8f
MD
52#include <machine/vmparam.h>
53
e3161323
MD
54#include <sys/spinlock2.h>
55#include <sys/sysref2.h>
56
39005e16 57static struct vmspace_entry *vkernel_find_vmspace(struct vkernel_proc *vkp,
4a22e893
MD
58 void *id);
59static void vmspace_entry_delete(struct vmspace_entry *ve,
39005e16 60 struct vkernel_proc *vkp);
d3313941
MD
61
62static MALLOC_DEFINE(M_VKERNEL, "vkernel", "VKernel structures");
133aabc4
MD
63
64/*
d3313941 65 * vmspace_create (void *id, int type, void *data)
133aabc4 66 *
d3313941
MD
67 * Create a VMSPACE under the control of the caller with the specified id.
68 * An id of NULL cannot be used. The type and data fields must currently
69 * be 0.
133aabc4 70 *
d3313941
MD
71 * The vmspace starts out completely empty. Memory may be mapped into the
72 * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
73 * with vmspace_mcontrol().
74 */
75int
76sys_vmspace_create(struct vmspace_create_args *uap)
77{
d3313941 78 struct vmspace_entry *ve;
39005e16 79 struct vkernel_proc *vkp;
d3313941
MD
80
81 if (vkernel_enable == 0)
82 return (EOPNOTSUPP);
83
84 /*
85 * Create a virtual kernel side-structure for the process if one
86 * does not exist.
87 */
39005e16
MD
88 if ((vkp = curproc->p_vkernel) == NULL) {
89 vkp = kmalloc(sizeof(*vkp), M_VKERNEL, M_WAITOK|M_ZERO);
90 vkp->refs = 1;
91 spin_init(&vkp->spin);
92 RB_INIT(&vkp->root);
93 curproc->p_vkernel = vkp;
d3313941
MD
94 }
95
96 /*
97 * Create a new VMSPACE
98 */
39005e16 99 if (vkernel_find_vmspace(vkp, uap->id))
d3313941
MD
100 return (EEXIST);
101 ve = kmalloc(sizeof(struct vmspace_entry), M_VKERNEL, M_WAITOK|M_ZERO);
88181b08 102 ve->vmspace = vmspace_alloc(VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
d3313941
MD
103 ve->id = uap->id;
104 pmap_pinit2(vmspace_pmap(ve->vmspace));
39005e16 105 RB_INSERT(vmspace_rb_tree, &vkp->root, ve);
d3313941
MD
106 return (0);
107}
108
109/*
110 * vmspace_destroy (void *id)
133aabc4 111 *
d3313941 112 * Destroy a VMSPACE.
133aabc4
MD
113 */
114int
d3313941 115sys_vmspace_destroy(struct vmspace_destroy_args *uap)
133aabc4 116{
39005e16 117 struct vkernel_proc *vkp;
d3313941
MD
118 struct vmspace_entry *ve;
119
39005e16 120 if ((vkp = curproc->p_vkernel) == NULL)
d3313941 121 return (EINVAL);
39005e16 122 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
d3313941 123 return (ENOENT);
4a22e893
MD
124 if (ve->refs)
125 return (EBUSY);
39005e16 126 vmspace_entry_delete(ve, vkp);
d3313941 127 return(0);
133aabc4
MD
128}
129
130/*
4e7c41c5
MD
131 * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
132 * struct vextframe *vframe);
133aabc4 133 *
d3313941
MD
134 * Transfer control to a VMSPACE. Control is returned after the specified
135 * number of microseconds or if a page fault, signal, trap, or system call
021a4ed4 136 * occurs. The context is updated as appropriate.
133aabc4
MD
137 */
138int
d3313941 139sys_vmspace_ctl(struct vmspace_ctl_args *uap)
133aabc4 140{
39005e16
MD
141 struct vkernel_proc *vkp;
142 struct vkernel_lwp *vklp;
d3313941 143 struct vmspace_entry *ve;
39005e16 144 struct lwp *lp;
4a22e893
MD
145 struct proc *p;
146 int framesz;
147 int error;
d3313941 148
39005e16
MD
149 lp = curthread->td_lwp;
150 p = lp->lwp_proc;
287ebb09 151
39005e16 152 if ((vkp = p->p_vkernel) == NULL)
d3313941 153 return (EINVAL);
39005e16 154 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
d3313941 155 return (ENOENT);
4a22e893 156
7c1212ec
MD
157 /*
158 * Signal mailbox interlock
159 */
287ebb09
MD
160 if (p->p_flag & P_MAILBOX) {
161 p->p_flag &= ~P_MAILBOX;
7c1212ec
MD
162 return (EINTR);
163 }
164
4a22e893
MD
165 switch(uap->cmd) {
166 case VMSPACE_CTL_RUN:
167 /*
168 * Save the caller's register context, swap VM spaces, and
169 * install the passed register context. Return with
170 * EJUSTRETURN so the syscall code doesn't adjust the context.
171 */
39005e16 172 atomic_add_int(&ve->refs, 1);
4a22e893 173 framesz = sizeof(struct trapframe);
39005e16
MD
174 if ((vklp = lp->lwp_vkernel) == NULL) {
175 vklp = kmalloc(sizeof(*vklp), M_VKERNEL,
176 M_WAITOK|M_ZERO);
177 lp->lwp_vkernel = vklp;
178 }
179 vklp->user_trapframe = uap->tframe;
180 vklp->user_vextframe = uap->vframe;
181 bcopy(uap->sysmsg_frame, &vklp->save_trapframe, framesz);
182 bcopy(&curthread->td_tls, &vklp->save_vextframe.vx_tls,
183 sizeof(vklp->save_vextframe.vx_tls));
4e7c41c5
MD
184 error = copyin(uap->tframe, uap->sysmsg_frame, framesz);
185 if (error == 0)
186 error = copyin(&uap->vframe->vx_tls, &curthread->td_tls, sizeof(struct savetls));
4a22e893
MD
187 if (error == 0)
188 error = cpu_sanitize_frame(uap->sysmsg_frame);
4e7c41c5
MD
189 if (error == 0)
190 error = cpu_sanitize_tls(&curthread->td_tls);
4a22e893 191 if (error) {
39005e16
MD
192 bcopy(&vklp->save_trapframe, uap->sysmsg_frame, framesz);
193 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
194 sizeof(vklp->save_vextframe.vx_tls));
4e7c41c5 195 set_user_TLS();
39005e16 196 atomic_subtract_int(&ve->refs, 1);
4a22e893 197 } else {
39005e16
MD
198 vklp->ve = ve;
199 pmap_setlwpvm(lp, ve->vmspace);
4e7c41c5 200 set_user_TLS();
431d0fef 201 set_vkernel_fp(uap->sysmsg_frame);
4a22e893
MD
202 error = EJUSTRETURN;
203 }
204 break;
205 default:
206 error = EOPNOTSUPP;
207 break;
208 }
209 return(error);
133aabc4
MD
210}
211
212/*
d3313941 213 * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
133aabc4 214 *
d3313941
MD
215 * map memory within a VMSPACE. This function is just like a normal mmap()
216 * but operates on the vmspace's memory map. Most callers use this to create
217 * a MAP_VPAGETABLE mapping.
133aabc4
MD
218 */
219int
d3313941 220sys_vmspace_mmap(struct vmspace_mmap_args *uap)
133aabc4 221{
39005e16 222 struct vkernel_proc *vkp;
d3313941 223 struct vmspace_entry *ve;
021a4ed4 224 int error;
d3313941 225
39005e16 226 if ((vkp = curproc->p_vkernel) == NULL)
d3313941 227 return (EINVAL);
39005e16 228 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
d3313941 229 return (ENOENT);
021a4ed4
MD
230 error = kern_mmap(ve->vmspace, uap->addr, uap->len,
231 uap->prot, uap->flags,
232 uap->fd, uap->offset, &uap->sysmsg_resultp);
233 return (error);
133aabc4
MD
234}
235
236/*
d3313941 237 * vmspace_munmap(id, addr, len)
133aabc4 238 *
d3313941 239 * unmap memory within a VMSPACE.
133aabc4
MD
240 */
241int
d3313941 242sys_vmspace_munmap(struct vmspace_munmap_args *uap)
133aabc4 243{
39005e16 244 struct vkernel_proc *vkp;
d3313941 245 struct vmspace_entry *ve;
021a4ed4 246 vm_offset_t addr;
e54488bb 247 vm_offset_t tmpaddr;
021a4ed4
MD
248 vm_size_t size, pageoff;
249 vm_map_t map;
d3313941 250
39005e16 251 if ((vkp = curproc->p_vkernel) == NULL)
d3313941 252 return (EINVAL);
39005e16 253 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
d3313941 254 return (ENOENT);
021a4ed4
MD
255
256 /*
257 * Copied from sys_munmap()
258 */
259 addr = (vm_offset_t)uap->addr;
260 size = uap->len;
261
262 pageoff = (addr & PAGE_MASK);
263 addr -= pageoff;
264 size += pageoff;
265 size = (vm_size_t)round_page(size);
e54488bb
MD
266 if (size < uap->len) /* wrap */
267 return (EINVAL);
268 tmpaddr = addr + size; /* workaround gcc4 opt */
269 if (tmpaddr < addr) /* wrap */
021a4ed4
MD
270 return (EINVAL);
271 if (size == 0)
272 return (0);
273
e54488bb 274 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS)
021a4ed4 275 return (EINVAL);
88181b08 276 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
021a4ed4 277 return (EINVAL);
021a4ed4 278 map = &ve->vmspace->vm_map;
e54488bb 279 if (!vm_map_check_protection(map, addr, tmpaddr, VM_PROT_NONE))
021a4ed4
MD
280 return (EINVAL);
281 vm_map_remove(map, addr, addr + size);
282 return (0);
283}
284
285/*
286 * vmspace_pread(id, buf, nbyte, flags, offset)
287 *
288 * Read data from a vmspace. The number of bytes read is returned or
289 * -1 if an unrecoverable error occured. If the number of bytes read is
290 * less then the request size, a page fault occured in the VMSPACE which
291 * the caller must resolve in order to proceed.
292 */
293int
294sys_vmspace_pread(struct vmspace_pread_args *uap)
295{
39005e16 296 struct vkernel_proc *vkp;
021a4ed4
MD
297 struct vmspace_entry *ve;
298
39005e16 299 if ((vkp = curproc->p_vkernel) == NULL)
021a4ed4 300 return (EINVAL);
39005e16 301 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
021a4ed4
MD
302 return (ENOENT);
303 return (EINVAL);
304}
305
306/*
307 * vmspace_pwrite(id, buf, nbyte, flags, offset)
308 *
309 * Write data to a vmspace. The number of bytes written is returned or
310 * -1 if an unrecoverable error occured. If the number of bytes written is
311 * less then the request size, a page fault occured in the VMSPACE which
312 * the caller must resolve in order to proceed.
313 */
314int
315sys_vmspace_pwrite(struct vmspace_pwrite_args *uap)
316{
39005e16 317 struct vkernel_proc *vkp;
021a4ed4
MD
318 struct vmspace_entry *ve;
319
39005e16 320 if ((vkp = curproc->p_vkernel) == NULL)
021a4ed4 321 return (EINVAL);
39005e16 322 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
021a4ed4
MD
323 return (ENOENT);
324 return (EINVAL);
133aabc4
MD
325}
326
327/*
d3313941 328 * vmspace_mcontrol(id, addr, len, behav, value)
133aabc4 329 *
d3313941 330 * madvise/mcontrol support for a vmspace.
133aabc4
MD
331 */
332int
d3313941
MD
333sys_vmspace_mcontrol(struct vmspace_mcontrol_args *uap)
334{
39005e16 335 struct vkernel_proc *vkp;
d3313941
MD
336 struct vmspace_entry *ve;
337 vm_offset_t start, end;
e54488bb 338 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len;
d3313941 339
39005e16 340 if ((vkp = curproc->p_vkernel) == NULL)
d3313941 341 return (EINVAL);
39005e16 342 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
d3313941
MD
343 return (ENOENT);
344
345 /*
346 * This code is basically copied from sys_mcontrol()
347 */
348 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END)
349 return (EINVAL);
350
e54488bb 351 if (tmpaddr < (vm_offset_t)uap->addr)
d3313941 352 return (EINVAL);
e54488bb 353 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS)
d3313941 354 return (EINVAL);
e54488bb 355 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS)
d3313941
MD
356 return (EINVAL);
357
358 start = trunc_page((vm_offset_t) uap->addr);
e54488bb 359 end = round_page(tmpaddr);
d3313941
MD
360
361 return (vm_map_madvise(&ve->vmspace->vm_map, start, end,
362 uap->behav, uap->value));
363}
364
365/*
366 * Red black tree functions
367 */
368static int rb_vmspace_compare(struct vmspace_entry *, struct vmspace_entry *);
369RB_GENERATE(vmspace_rb_tree, vmspace_entry, rb_entry, rb_vmspace_compare);
370
371/* a->start is address, and the only field has to be initialized */
372static int
373rb_vmspace_compare(struct vmspace_entry *a, struct vmspace_entry *b)
374{
375 if ((char *)a->id < (char *)b->id)
376 return(-1);
377 else if ((char *)a->id > (char *)b->id)
378 return(1);
379 return(0);
380}
381
382static
383int
384rb_vmspace_delete(struct vmspace_entry *ve, void *data)
385{
39005e16 386 struct vkernel_proc *vkp = data;
4a22e893
MD
387
388 KKASSERT(ve->refs == 0);
39005e16 389 vmspace_entry_delete(ve, vkp);
4a22e893
MD
390 return(0);
391}
392
393/*
394 * Remove a vmspace_entry from the RB tree and destroy it. We have to clean
395 * up the pmap, the vm_map, then destroy the vmspace.
396 */
397static
398void
39005e16 399vmspace_entry_delete(struct vmspace_entry *ve, struct vkernel_proc *vkp)
4a22e893 400{
39005e16 401 RB_REMOVE(vmspace_rb_tree, &vkp->root, ve);
d3313941 402
4a22e893 403 pmap_remove_pages(vmspace_pmap(ve->vmspace),
88181b08 404 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
4a22e893 405 vm_map_remove(&ve->vmspace->vm_map,
88181b08 406 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
e3161323 407 sysref_put(&ve->vmspace->vm_sysref);
d3313941 408 kfree(ve, M_VKERNEL);
d3313941
MD
409}
410
4a22e893 411
d3313941
MD
412static
413struct vmspace_entry *
39005e16 414vkernel_find_vmspace(struct vkernel_proc *vkp, void *id)
d3313941
MD
415{
416 struct vmspace_entry *ve;
417 struct vmspace_entry key;
418
419 key.id = id;
39005e16 420 ve = RB_FIND(vmspace_rb_tree, &vkp->root, &key);
d3313941
MD
421 return (ve);
422}
423
424/*
425 * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
426 * a vkernel process.
427 */
428void
4a22e893 429vkernel_inherit(struct proc *p1, struct proc *p2)
d3313941 430{
39005e16
MD
431 struct vkernel_proc *vkp;
432
433 vkp = p1->p_vkernel;
434 KKASSERT(vkp->refs > 0);
435 atomic_add_int(&vkp->refs, 1);
436 p2->p_vkernel = vkp;
d3313941
MD
437}
438
439void
4a22e893 440vkernel_exit(struct proc *p)
133aabc4 441{
39005e16 442 struct vkernel_proc *vkp;
287ebb09 443 struct lwp *lp;
4a22e893
MD
444 int freeme = 0;
445
39005e16 446 vkp = p->p_vkernel;
4a22e893
MD
447 /*
448 * Restore the original VM context if we are killed while running
449 * a different one.
4e7c41c5
MD
450 *
451 * This isn't supposed to happen. What is supposed to happen is
452 * that the process should enter vkernel_trap() before the handling
453 * the signal.
4a22e893 454 */
3e291793 455 RB_FOREACH(lp, lwp_rb_tree, &p->p_lwp_tree) {
39005e16 456 vkernel_lwp_exit(lp);
d3313941 457 }
4a22e893
MD
458
459 /*
460 * Dereference the common area
461 */
39005e16
MD
462 p->p_vkernel = NULL;
463 KKASSERT(vkp->refs > 0);
464 spin_lock_wr(&vkp->spin);
465 if (--vkp->refs == 0)
4a22e893 466 freeme = 1;
39005e16 467 spin_unlock_wr(&vkp->spin);
4a22e893
MD
468
469 if (freeme) {
39005e16
MD
470 RB_SCAN(vmspace_rb_tree, &vkp->root, NULL,
471 rb_vmspace_delete, vkp);
472 kfree(vkp, M_VKERNEL);
473 }
474}
475
476void
477vkernel_lwp_exit(struct lwp *lp)
478{
479 struct vkernel_lwp *vklp;
480 struct vmspace_entry *ve;
481
482 if ((vklp = lp->lwp_vkernel) != NULL) {
483 if ((ve = vklp->ve) != NULL) {
484 kprintf("Warning, pid %d killed with "
485 "active VC!\n", lp->lwp_proc->p_pid);
1e5fb84b 486 print_backtrace();
39005e16
MD
487 pmap_setlwpvm(lp, lp->lwp_proc->p_vmspace);
488 vklp->ve = NULL;
489 KKASSERT(ve->refs > 0);
490 atomic_subtract_int(&ve->refs, 1);
491 }
492 lp->lwp_vkernel = NULL;
493 kfree(vklp, M_VKERNEL);
4a22e893 494 }
4a22e893
MD
495}
496
497/*
498 * A VM space under virtual kernel control trapped out or made a system call
499 * or otherwise needs to return control to the virtual kernel context.
500 */
501int
287ebb09 502vkernel_trap(struct lwp *lp, struct trapframe *frame)
4a22e893 503{
287ebb09 504 struct proc *p = lp->lwp_proc;
4a22e893 505 struct vmspace_entry *ve;
39005e16 506 struct vkernel_lwp *vklp;
4a22e893
MD
507 int error;
508
4a22e893
MD
509 /*
510 * Which vmspace entry was running?
511 */
39005e16
MD
512 vklp = lp->lwp_vkernel;
513 KKASSERT(vklp);
514 ve = vklp->ve;
4a22e893
MD
515 KKASSERT(ve != NULL);
516
517 /*
287ebb09 518 * Switch the LWP vmspace back to the virtual kernel's VM space.
4a22e893 519 */
39005e16 520 vklp->ve = NULL;
287ebb09 521 pmap_setlwpvm(lp, p->p_vmspace);
4a22e893 522 KKASSERT(ve->refs > 0);
39005e16 523 atomic_subtract_int(&ve->refs, 1);
4a22e893
MD
524
525 /*
4e7c41c5
MD
526 * Copy the emulated process frame to the virtual kernel process.
527 * The emulated process cannot change TLS descriptors so don't
528 * bother saving them, we already have a copy.
529 *
530 * Restore the virtual kernel's saved context so the virtual kernel
531 * process can resume.
4a22e893 532 */
39005e16
MD
533 error = copyout(frame, vklp->user_trapframe, sizeof(*frame));
534 bcopy(&vklp->save_trapframe, frame, sizeof(*frame));
535 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
536 sizeof(vklp->save_vextframe.vx_tls));
4e7c41c5 537 set_user_TLS();
4a22e893 538 return(error);
133aabc4
MD
539}
540