2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
58 * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $
63 #include <sys/param.h>
64 #include <sys/systm.h>
66 #include <sys/resourcevar.h>
69 #include <sys/vmmeter.h>
70 #include <sys/sysctl.h>
72 #include <sys/kernel.h>
73 #include <sys/unistd.h>
75 #include <machine/limits.h>
76 #include <machine/vmm.h>
79 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_page2.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
90 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
92 * Note: run scheduling should be divorced from the vm system.
94 static void scheduler (void *);
95 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL);
99 static int swap_debug = 0;
100 SYSCTL_INT(_vm, OID_AUTO, swap_debug,
101 CTLFLAG_RW, &swap_debug, 0, "");
105 static int scheduler_notify;
107 static void swapout (struct proc *);
113 kernacc(c_caddr_t addr, int len, int rw)
116 vm_offset_t saddr, eaddr;
119 KASSERT((rw & (~VM_PROT_ALL)) == 0,
120 ("illegal ``rw'' argument to kernacc (%x)", rw));
123 * The globaldata space is not part of the kernel_map proper,
124 * check access separately.
126 if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len)))
130 * Nominal kernel memory access - check access via kernel_map.
132 if ((vm_offset_t)addr + len > vm_map_max(&kernel_map) ||
133 (vm_offset_t)addr + len < (vm_offset_t)addr) {
137 saddr = trunc_page((vm_offset_t)addr);
138 eaddr = round_page((vm_offset_t)addr + len);
139 rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE);
148 useracc(c_caddr_t addr, int len, int rw)
156 KASSERT((rw & (~VM_PROT_ALL)) == 0,
157 ("illegal ``rw'' argument to useracc (%x)", rw));
160 if (curthread->td_vmm) {
161 if (vmm_vm_get_gpa(curproc, (register_t *)&gpa, (register_t) addr))
162 panic("%s: could not get GPA\n", __func__);
163 addr = (c_caddr_t) gpa;
167 * XXX - check separately to disallow access to user area and user
168 * page tables - they are in the map.
170 wrap = (vm_offset_t)addr + len;
171 if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) {
174 map = &curproc->p_vmspace->vm_map;
175 vm_map_lock_read(map);
177 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
178 round_page(wrap), prot, TRUE);
179 vm_map_unlock_read(map);
188 vslock(caddr_t addr, u_int len)
191 vm_map_wire(&curproc->p_vmspace->vm_map,
192 trunc_page((vm_offset_t)addr),
193 round_page((vm_offset_t)addr + len), 0);
201 vsunlock(caddr_t addr, u_int len)
204 vm_map_wire(&curproc->p_vmspace->vm_map,
205 trunc_page((vm_offset_t)addr),
206 round_page((vm_offset_t)addr + len),
212 * Implement fork's actions on an address space.
213 * Here we arrange for the address space to be copied or referenced,
214 * allocate a user struct (pcb and kernel stack), then call the
215 * machine-dependent layer to fill those in and make the new process
216 * ready to run. The new process is set up so that it returns directly
217 * to user mode to avoid stack copying and relocation problems.
222 vm_fork(struct proc *p1, struct proc *p2, int flags)
224 if ((flags & RFPROC) == 0) {
226 * Divorce the memory, if it is shared, essentially
227 * this changes shared memory amongst threads, into
230 if ((flags & RFMEM) == 0) {
231 if (vmspace_getrefs(p1->p_vmspace) > 1) {
235 cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags);
240 vmspace_ref(p1->p_vmspace);
241 p2->p_vmspace = p1->p_vmspace;
244 while (vm_page_count_severe()) {
248 if ((flags & RFMEM) == 0) {
249 p2->p_vmspace = vmspace_fork(p1->p_vmspace);
251 pmap_pinit2(vmspace_pmap(p2->p_vmspace));
253 if (p1->p_vmspace->vm_shm)
261 * Set default limits for VM system. Call during proc0's initialization.
263 * Called from the low level boot code only.
266 vm_init_limits(struct proc *p)
271 * Set up the initial limits on process VM. Set the maximum resident
272 * set size to be half of (reasonably) available memory. Since this
273 * is a soft limit, it comes into effect only when the system is out
274 * of memory - half of main memory helps to favor smaller processes,
275 * and reduces thrashing of the object cache.
277 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
278 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
279 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
280 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
281 /* limit the limit to no less than 2MB */
282 rss_limit = max(vmstats.v_free_count, 512);
283 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
284 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
288 * Faultin the specified process. Note that the process can be in any
289 * state. Just clear P_SWAPPEDOUT and call wakeup in case the process is
295 faultin(struct proc *p)
297 if (p->p_flags & P_SWAPPEDOUT) {
299 * The process is waiting in the kernel to return to user
300 * mode but cannot until P_SWAPPEDOUT gets cleared.
302 lwkt_gettoken(&p->p_token);
303 p->p_flags &= ~(P_SWAPPEDOUT | P_SWAPWAIT);
306 kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm);
309 lwkt_reltoken(&p->p_token);
314 * Kernel initialization eventually falls through to this function,
315 * which is process 0.
317 * This swapin algorithm attempts to swap-in processes only if there
318 * is enough space for them. Of course, if a process waits for a long
319 * time, it will be swapped in anyway.
321 struct scheduler_info {
326 static int scheduler_callback(struct proc *p, void *data);
329 scheduler(void *dummy)
331 struct scheduler_info info;
334 KKASSERT(!IN_CRITICAL_SECT(curthread));
336 scheduler_notify = 0;
338 * Don't try to swap anything in if we are low on memory.
340 if (vm_page_count_severe()) {
346 * Look for a good candidate to wake up
348 * XXX we should make the schedule thread pcpu and then use a
349 * segmented allproc scan.
353 allproc_scan(scheduler_callback, &info, 0);
356 * Nothing to do, back to sleep for at least 1/10 of a second. If
357 * we are woken up, immediately process the next request. If
358 * multiple requests have built up the first is processed
359 * immediately and the rest are staggered.
361 if ((p = info.pp) == NULL) {
362 tsleep(&proc0, 0, "nowork", hz / 10);
363 if (scheduler_notify == 0)
364 tsleep(&scheduler_notify, 0, "nowork", 0);
369 * Fault the selected process in, then wait for a short period of
372 * XXX we need a heuristic to get a measure of system stress and
373 * then adjust our stagger wakeup delay accordingly.
375 lwkt_gettoken(&p->p_token);
378 lwkt_reltoken(&p->p_token);
380 tsleep(&proc0, 0, "swapin", hz / 10);
385 * Process only has its hold count bumped, we need the token
386 * to safely scan the LWPs
389 scheduler_callback(struct proc *p, void *data)
391 struct scheduler_info *info = data;
398 * We only care about processes in swap-wait. Interlock test with
399 * token if the flag is found set.
401 if ((p->p_flags & P_SWAPWAIT) == 0)
403 lwkt_gettoken_shared(&p->p_token);
404 if ((p->p_flags & P_SWAPWAIT) == 0) {
405 lwkt_reltoken(&p->p_token);
410 * Calculate priority for swap-in
413 FOREACH_LWP_IN_PROC(lp, p) {
414 /* XXX lwp might need a different metric */
415 pri += lp->lwp_slptime;
417 pri += p->p_swtime - p->p_nice * 8;
420 * The more pages paged out while we were swapped,
421 * the more work we have to do to get up and running
422 * again and the lower our wakeup priority.
424 * Each second of sleep time is worth ~1MB
426 if ((vm = p->p_vmspace) != NULL) {
428 pgs = vmspace_resident_count(vm);
429 if (pgs < vm->vm_swrss) {
430 pri -= (vm->vm_swrss - pgs) /
431 (1024 * 1024 / PAGE_SIZE);
435 lwkt_reltoken(&p->p_token);
438 * If this process is higher priority and there is
439 * enough space, then select this process instead of
440 * the previous selection.
442 if (pri > info->ppri) {
459 if (scheduler_notify == 0) {
460 scheduler_notify = 1;
461 wakeup(&scheduler_notify);
467 #define swappable(p) \
468 (((p)->p_lock == 0) && \
469 ((p)->p_flags & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0)
473 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
475 static int swap_idle_threshold1 = 15;
476 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1,
477 CTLFLAG_RW, &swap_idle_threshold1, 0, "Guaranteed process resident time (sec)");
480 * Swap_idle_threshold2 is the time that a process can be idle before
481 * it will be swapped out, if idle swapping is enabled. Default is
484 static int swap_idle_threshold2 = 60;
485 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2,
486 CTLFLAG_RW, &swap_idle_threshold2, 0, "Time (sec) a process can idle before being swapped");
489 * Swapout is driven by the pageout daemon. Very simple, we find eligible
490 * procs and mark them as being swapped out. This will cause the kernel
491 * to prefer to pageout those proc's pages first and the procs in question
492 * will not return to user mode until the swapper tells them they can.
494 * If any procs have been sleeping/stopped for at least maxslp seconds,
495 * they are swapped. Else, we swap the longest-sleeping or stopped process,
496 * if any, otherwise the longest-resident process.
499 static int swapout_procs_callback(struct proc *p, void *data);
505 swapout_procs(int action)
507 allproc_scan(swapout_procs_callback, &action, 0);
511 swapout_procs_callback(struct proc *p, void *data)
514 int action = *(int *)data;
520 lwkt_gettoken(&p->p_token);
523 * We only consider active processes.
525 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) {
526 lwkt_reltoken(&p->p_token);
530 FOREACH_LWP_IN_PROC(lp, p) {
532 * do not swap out a realtime process
534 if (RTP_PRIO_IS_REALTIME(lp->lwp_rtprio.type)) {
535 lwkt_reltoken(&p->p_token);
540 * Guarentee swap_idle_threshold time in memory
542 if (lp->lwp_slptime < swap_idle_threshold1) {
543 lwkt_reltoken(&p->p_token);
548 * If the system is under memory stress, or if we
549 * are swapping idle processes >= swap_idle_threshold2,
550 * then swap the process out.
552 if (((action & VM_SWAP_NORMAL) == 0) &&
553 (((action & VM_SWAP_IDLE) == 0) ||
554 (lp->lwp_slptime < swap_idle_threshold2))) {
555 lwkt_reltoken(&p->p_token);
559 if (minslp == -1 || lp->lwp_slptime < minslp)
560 minslp = lp->lwp_slptime;
564 * If the process has been asleep for awhile, swap
567 if ((action & VM_SWAP_NORMAL) ||
568 ((action & VM_SWAP_IDLE) &&
569 (minslp > swap_idle_threshold2))) {
574 * cleanup our reference
576 lwkt_reltoken(&p->p_token);
582 * The caller must hold p->p_token
585 swapout(struct proc *p)
589 kprintf("swapping out %d (%s)\n", p->p_pid, p->p_comm);
594 * remember the process resident count
596 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
597 p->p_flags |= P_SWAPPEDOUT;
601 #endif /* !NO_SWAPPING */