2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
58 * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $
63 #include <sys/param.h>
64 #include <sys/systm.h>
66 #include <sys/resourcevar.h>
69 #include <sys/vmmeter.h>
70 #include <sys/sysctl.h>
72 #include <sys/kernel.h>
73 #include <sys/unistd.h>
75 #include <machine/limits.h>
76 #include <machine/vmm.h>
79 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_extern.h>
89 #include <vm/vm_page2.h>
92 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
94 * Note: run scheduling should be divorced from the vm system.
96 static void scheduler (void *);
97 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL);
101 static int swap_debug = 0;
102 SYSCTL_INT(_vm, OID_AUTO, swap_debug,
103 CTLFLAG_RW, &swap_debug, 0, "");
107 static int scheduler_notify;
109 static void swapout (struct proc *);
115 kernacc(c_caddr_t addr, int len, int rw)
118 vm_offset_t saddr, eaddr;
121 KASSERT((rw & (~VM_PROT_ALL)) == 0,
122 ("illegal ``rw'' argument to kernacc (%x)", rw));
125 * The globaldata space is not part of the kernel_map proper,
126 * check access separately.
128 if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len)))
132 * Nominal kernel memory access - check access via kernel_map.
134 if ((vm_offset_t)addr + len > kernel_map.max_offset ||
135 (vm_offset_t)addr + len < (vm_offset_t)addr) {
139 saddr = trunc_page((vm_offset_t)addr);
140 eaddr = round_page((vm_offset_t)addr + len);
141 rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE);
150 useracc(c_caddr_t addr, int len, int rw)
158 KASSERT((rw & (~VM_PROT_ALL)) == 0,
159 ("illegal ``rw'' argument to useracc (%x)", rw));
162 if (curthread->td_vmm) {
163 if (vmm_vm_get_gpa(curproc, (register_t *)&gpa, (register_t) addr))
164 panic("%s: could not get GPA\n", __func__);
165 addr = (c_caddr_t) gpa;
169 * XXX - check separately to disallow access to user area and user
170 * page tables - they are in the map.
172 wrap = (vm_offset_t)addr + len;
173 if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) {
176 map = &curproc->p_vmspace->vm_map;
177 vm_map_lock_read(map);
179 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
180 round_page(wrap), prot, TRUE);
181 vm_map_unlock_read(map);
190 vslock(caddr_t addr, u_int len)
193 vm_map_wire(&curproc->p_vmspace->vm_map,
194 trunc_page((vm_offset_t)addr),
195 round_page((vm_offset_t)addr + len), 0);
203 vsunlock(caddr_t addr, u_int len)
206 vm_map_wire(&curproc->p_vmspace->vm_map,
207 trunc_page((vm_offset_t)addr),
208 round_page((vm_offset_t)addr + len),
214 * Implement fork's actions on an address space.
215 * Here we arrange for the address space to be copied or referenced,
216 * allocate a user struct (pcb and kernel stack), then call the
217 * machine-dependent layer to fill those in and make the new process
218 * ready to run. The new process is set up so that it returns directly
219 * to user mode to avoid stack copying and relocation problems.
224 vm_fork(struct proc *p1, struct proc *p2, int flags)
226 if ((flags & RFPROC) == 0) {
228 * Divorce the memory, if it is shared, essentially
229 * this changes shared memory amongst threads, into
232 if ((flags & RFMEM) == 0) {
233 if (vmspace_getrefs(p1->p_vmspace) > 1) {
237 cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags);
242 vmspace_ref(p1->p_vmspace);
243 p2->p_vmspace = p1->p_vmspace;
246 while (vm_page_count_severe()) {
250 if ((flags & RFMEM) == 0) {
251 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
253 pmap_pinit2(vmspace_pmap(p2->p_vmspace));
255 if (p1->p_vmspace->vm_shm)
263 * Set default limits for VM system. Call during proc0's initialization.
265 * Called from the low level boot code only.
268 vm_init_limits(struct proc *p)
273 * Set up the initial limits on process VM. Set the maximum resident
274 * set size to be half of (reasonably) available memory. Since this
275 * is a soft limit, it comes into effect only when the system is out
276 * of memory - half of main memory helps to favor smaller processes,
277 * and reduces thrashing of the object cache.
279 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
280 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
281 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
282 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
283 /* limit the limit to no less than 2MB */
284 rss_limit = max(vmstats.v_free_count, 512);
285 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
286 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
290 * Faultin the specified process. Note that the process can be in any
291 * state. Just clear P_SWAPPEDOUT and call wakeup in case the process is
297 faultin(struct proc *p)
299 if (p->p_flags & P_SWAPPEDOUT) {
301 * The process is waiting in the kernel to return to user
302 * mode but cannot until P_SWAPPEDOUT gets cleared.
304 lwkt_gettoken(&p->p_token);
305 p->p_flags &= ~(P_SWAPPEDOUT | P_SWAPWAIT);
308 kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm);
311 lwkt_reltoken(&p->p_token);
316 * Kernel initialization eventually falls through to this function,
317 * which is process 0.
319 * This swapin algorithm attempts to swap-in processes only if there
320 * is enough space for them. Of course, if a process waits for a long
321 * time, it will be swapped in anyway.
323 struct scheduler_info {
328 static int scheduler_callback(struct proc *p, void *data);
331 scheduler(void *dummy)
333 struct scheduler_info info;
336 KKASSERT(!IN_CRITICAL_SECT(curthread));
338 scheduler_notify = 0;
340 * Don't try to swap anything in if we are low on memory.
342 if (vm_page_count_severe()) {
348 * Look for a good candidate to wake up
350 * XXX we should make the schedule thread pcpu and then use a
351 * segmented allproc scan.
355 allproc_scan(scheduler_callback, &info, 0);
358 * Nothing to do, back to sleep for at least 1/10 of a second. If
359 * we are woken up, immediately process the next request. If
360 * multiple requests have built up the first is processed
361 * immediately and the rest are staggered.
363 if ((p = info.pp) == NULL) {
364 tsleep(&proc0, 0, "nowork", hz / 10);
365 if (scheduler_notify == 0)
366 tsleep(&scheduler_notify, 0, "nowork", 0);
371 * Fault the selected process in, then wait for a short period of
374 * XXX we need a heuristic to get a measure of system stress and
375 * then adjust our stagger wakeup delay accordingly.
377 lwkt_gettoken(&p->p_token);
380 lwkt_reltoken(&p->p_token);
382 tsleep(&proc0, 0, "swapin", hz / 10);
387 * Process only has its hold count bumped, we need the token
388 * to safely scan the LWPs
391 scheduler_callback(struct proc *p, void *data)
393 struct scheduler_info *info = data;
400 * We only care about processes in swap-wait. Interlock test with
401 * token if the flag is found set.
403 if ((p->p_flags & P_SWAPWAIT) == 0)
405 lwkt_gettoken_shared(&p->p_token);
406 if ((p->p_flags & P_SWAPWAIT) == 0) {
407 lwkt_reltoken(&p->p_token);
412 * Calculate priority for swap-in
415 FOREACH_LWP_IN_PROC(lp, p) {
416 /* XXX lwp might need a different metric */
417 pri += lp->lwp_slptime;
419 pri += p->p_swtime - p->p_nice * 8;
422 * The more pages paged out while we were swapped,
423 * the more work we have to do to get up and running
424 * again and the lower our wakeup priority.
426 * Each second of sleep time is worth ~1MB
428 if ((vm = p->p_vmspace) != NULL) {
430 pgs = vmspace_resident_count(vm);
431 if (pgs < vm->vm_swrss) {
432 pri -= (vm->vm_swrss - pgs) /
433 (1024 * 1024 / PAGE_SIZE);
437 lwkt_reltoken(&p->p_token);
440 * If this process is higher priority and there is
441 * enough space, then select this process instead of
442 * the previous selection.
444 if (pri > info->ppri) {
461 if (scheduler_notify == 0) {
462 scheduler_notify = 1;
463 wakeup(&scheduler_notify);
469 #define swappable(p) \
470 (((p)->p_lock == 0) && \
471 ((p)->p_flags & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0)
475 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
477 static int swap_idle_threshold1 = 15;
478 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
479 CTLFLAG_RW, &swap_idle_threshold1, 0, "Guaranteed process resident time (sec)");
482 * Swap_idle_threshold2 is the time that a process can be idle before
483 * it will be swapped out, if idle swapping is enabled. Default is
486 static int swap_idle_threshold2 = 60;
487 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
488 CTLFLAG_RW, &swap_idle_threshold2, 0, "Time (sec) a process can idle before being swapped");
491 * Swapout is driven by the pageout daemon. Very simple, we find eligible
492 * procs and mark them as being swapped out. This will cause the kernel
493 * to prefer to pageout those proc's pages first and the procs in question
494 * will not return to user mode until the swapper tells them they can.
496 * If any procs have been sleeping/stopped for at least maxslp seconds,
497 * they are swapped. Else, we swap the longest-sleeping or stopped process,
498 * if any, otherwise the longest-resident process.
501 static int swapout_procs_callback(struct proc *p, void *data);
507 swapout_procs(int action)
509 allproc_scan(swapout_procs_callback, &action, 0);
513 swapout_procs_callback(struct proc *p, void *data)
516 int action = *(int *)data;
522 lwkt_gettoken(&p->p_token);
525 * We only consider active processes.
527 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) {
528 lwkt_reltoken(&p->p_token);
532 FOREACH_LWP_IN_PROC(lp, p) {
534 * do not swap out a realtime process
536 if (RTP_PRIO_IS_REALTIME(lp->lwp_rtprio.type)) {
537 lwkt_reltoken(&p->p_token);
542 * Guarentee swap_idle_threshold time in memory
544 if (lp->lwp_slptime < swap_idle_threshold1) {
545 lwkt_reltoken(&p->p_token);
550 * If the system is under memory stress, or if we
551 * are swapping idle processes >= swap_idle_threshold2,
552 * then swap the process out.
554 if (((action & VM_SWAP_NORMAL) == 0) &&
555 (((action & VM_SWAP_IDLE) == 0) ||
556 (lp->lwp_slptime < swap_idle_threshold2))) {
557 lwkt_reltoken(&p->p_token);
561 if (minslp == -1 || lp->lwp_slptime < minslp)
562 minslp = lp->lwp_slptime;
566 * If the process has been asleep for awhile, swap
569 if ((action & VM_SWAP_NORMAL) ||
570 ((action & VM_SWAP_IDLE) &&
571 (minslp > swap_idle_threshold2))) {
576 * cleanup our reference
578 lwkt_reltoken(&p->p_token);
584 * The caller must hold p->p_token
587 swapout(struct proc *p)
591 kprintf("swapping out %d (%s)\n", p->p_pid, p->p_comm);
596 * remember the process resident count
598 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
599 p->p_flags |= P_SWAPPEDOUT;
603 #endif /* !NO_SWAPPING */