2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
39 * External virtual filesystem routines
42 #include <sys/param.h>
43 #include <sys/systm.h>
46 #include <sys/dirent.h>
47 #include <sys/domain.h>
48 #include <sys/eventhandler.h>
49 #include <sys/fcntl.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
54 #include <sys/mount.h>
56 #include <sys/namei.h>
57 #include <sys/reboot.h>
58 #include <sys/socket.h>
60 #include <sys/sysctl.h>
61 #include <sys/syslog.h>
62 #include <sys/vmmeter.h>
63 #include <sys/vnode.h>
65 #include <machine/limits.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pager.h>
75 #include <vm/vnode_pager.h>
78 #include <sys/thread2.h>
83 #define SYNCER_MAXDELAY 32
84 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS);
85 time_t syncdelay = 30; /* max time to delay syncing data */
86 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
87 sysctl_kern_syncdelay, "I", "VFS data synchronization delay");
88 time_t filedelay = 30; /* time to delay syncing files */
89 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW,
90 &filedelay, 0, "File synchronization delay");
91 time_t dirdelay = 29; /* time to delay syncing directories */
92 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW,
93 &dirdelay, 0, "Directory synchronization delay");
94 time_t metadelay = 28; /* time to delay syncing metadata */
95 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW,
96 &metadelay, 0, "VFS metadata synchronization delay");
97 static int rushjob; /* number of slots to run ASAP */
98 static int stat_rush_requests; /* number of times I/O speeded up */
99 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW,
100 &stat_rush_requests, 0, "");
102 LIST_HEAD(synclist, vnode);
104 #define SC_FLAG_EXIT (0x1) /* request syncer exit */
105 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */
106 #define SC_FLAG_BIOOPS_ALL (0x4) /* do bufops_sync(NULL) */
110 struct lwkt_token sc_token;
111 struct thread *sc_thread;
114 struct synclist *syncer_workitem_pending;
120 static struct syncer_ctx syncer_ctx0;
122 static void syncer_thread(void *);
125 syncer_ctx_init(struct syncer_ctx *ctx, struct mount *mp)
129 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF,
131 ctx->syncer_delayno = 0;
132 lwkt_token_init(&ctx->sc_token, "syncer");
136 * Called from vfsinit()
141 syncer_ctx_init(&syncer_ctx0, NULL);
142 syncer_ctx0.sc_flags |= SC_FLAG_BIOOPS_ALL;
144 /* Support schedcpu wakeup of syncer0 */
145 lbolt_syncer = &syncer_ctx0;
149 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS)
154 error = sysctl_handle_int(oidp, &v, 0, req);
155 if (error || !req->newptr)
159 if (v > SYNCER_MAXDELAY)
166 static struct syncer_ctx *
167 vn_get_syncer(struct vnode *vp)
170 struct syncer_ctx *ctx;
172 if ((mp = vp->v_mount) != NULL)
173 ctx = mp->mnt_syncer_ctx;
180 * The workitem queue.
182 * It is useful to delay writes of file data and filesystem metadata
183 * for tens of seconds so that quickly created and deleted files need
184 * not waste disk bandwidth being created and removed. To realize this,
185 * we append vnodes to a "workitem" queue. When running with a soft
186 * updates implementation, most pending metadata dependencies should
187 * not wait for more than a few seconds. Thus, mounted on block devices
188 * are delayed only about a half the time that file data is delayed.
189 * Similarly, directory updates are more critical, so are only delayed
190 * about a third the time that file data is delayed. Thus, there are
191 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
192 * one each second (driven off the filesystem syncer process). The
193 * syncer_delayno variable indicates the next queue that is to be processed.
194 * Items that need to be processed soon are placed in this queue:
196 * syncer_workitem_pending[syncer_delayno]
198 * A delay of fifteen seconds is done by placing the request fifteen
199 * entries later in the queue:
201 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
206 * Add an item to the syncer work queue.
208 * WARNING: Cannot get vp->v_token here if not already held, we must
209 * depend on the syncer_token (which might already be held by
210 * the caller) to protect v_synclist and VONWORKLST.
215 vn_syncer_add(struct vnode *vp, int delay)
217 struct syncer_ctx *ctx;
220 ctx = vn_get_syncer(vp);
222 lwkt_gettoken(&ctx->sc_token);
224 if (vp->v_flag & VONWORKLST)
225 LIST_REMOVE(vp, v_synclist);
227 slot = -delay & ctx->syncer_mask;
229 if (delay > SYNCER_MAXDELAY - 2)
230 delay = SYNCER_MAXDELAY - 2;
231 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask;
234 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist);
235 vsetflags(vp, VONWORKLST);
237 lwkt_reltoken(&ctx->sc_token);
241 * Removes the vnode from the syncer list. Since we might block while
242 * acquiring the syncer_token we have to recheck conditions.
244 * vp->v_token held on call
247 vn_syncer_remove(struct vnode *vp)
249 struct syncer_ctx *ctx;
251 ctx = vn_get_syncer(vp);
253 lwkt_gettoken(&ctx->sc_token);
255 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST &&
256 RB_EMPTY(&vp->v_rbdirty_tree)) {
257 vclrflags(vp, VONWORKLST);
258 LIST_REMOVE(vp, v_synclist);
261 lwkt_reltoken(&ctx->sc_token);
265 * vnode must be locked
268 vclrisdirty(struct vnode *vp)
270 vclrflags(vp, VISDIRTY);
271 if (vp->v_flag & VONWORKLST)
272 vn_syncer_remove(vp);
276 * vnode must be stable
279 vsetisdirty(struct vnode *vp)
281 if ((vp->v_flag & VISDIRTY) == 0) {
282 vsetflags(vp, VISDIRTY);
283 vn_syncer_add(vp, syncdelay);
288 * Create per-filesystem syncer process
291 vn_syncer_thr_create(struct mount *mp)
293 struct syncer_ctx *ctx;
294 static int syncalloc = 0;
297 if (mp->mnt_kern_flag & MNTK_THR_SYNC) {
298 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP,
300 syncer_ctx_init(ctx, mp);
301 mp->mnt_syncer_ctx = ctx;
302 rc = kthread_create(syncer_thread, ctx, &ctx->sc_thread,
303 "syncer%d", ++syncalloc);
305 mp->mnt_syncer_ctx = &syncer_ctx0;
310 * Stop per-filesystem syncer process
313 vn_syncer_thr_stop(struct mount *mp)
315 struct syncer_ctx *ctx;
317 ctx = mp->mnt_syncer_ctx;
318 if (ctx == NULL || ctx == &syncer_ctx0)
320 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC);
322 lwkt_gettoken(&ctx->sc_token);
324 /* Signal the syncer process to exit */
325 ctx->sc_flags |= SC_FLAG_EXIT;
328 /* Wait till syncer process exits */
329 while ((ctx->sc_flags & SC_FLAG_DONE) == 0)
330 tsleep(&ctx->sc_flags, 0, "syncexit", hz);
332 mp->mnt_syncer_ctx = NULL;
333 lwkt_reltoken(&ctx->sc_token);
335 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask);
339 struct thread *updatethread;
342 * System filesystem synchronizer daemon.
345 syncer_thread(void *_ctx)
347 struct thread *td = curthread;
348 struct syncer_ctx *ctx = _ctx;
349 struct synclist *slp;
354 int vnodes_synced = 0;
357 * syncer0 runs till system shutdown; per-filesystem syncers are
358 * terminated on filesystem unmount
360 if (ctx == &syncer_ctx0)
361 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
364 kproc_suspend_loop();
366 starttime = time_uptime;
367 lwkt_gettoken(&ctx->sc_token);
370 * Push files whose dirty time has expired. Be careful
371 * of interrupt race on slp queue.
373 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
374 ctx->syncer_delayno = (ctx->syncer_delayno + 1) &
377 while ((vp = LIST_FIRST(slp)) != NULL) {
378 if (ctx->syncer_forced) {
379 if (vget(vp, LK_EXCLUSIVE) == 0) {
380 VOP_FSYNC(vp, MNT_NOWAIT, 0);
385 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
386 VOP_FSYNC(vp, MNT_LAZY, 0);
393 * vp is stale but can still be used if we can
394 * verify that it remains at the head of the list.
395 * Be careful not to try to get vp->v_token as
396 * vp can become stale if this blocks.
398 * If the vp is still at the head of the list were
399 * unable to completely flush it and move it to
400 * a later slot to give other vnodes a fair shot.
402 * Note that v_tag VT_VFS vnodes can remain on the
403 * worklist with no dirty blocks, but sync_fsync()
404 * moves it to a later slot so we will never see it
407 * It is possible to race a vnode with no dirty
408 * buffers being removed from the list. If this
409 * occurs we will move the vnode in the synclist
410 * and then the other thread will remove it. Do
411 * not try to remove it here.
413 if (LIST_FIRST(slp) == vp)
414 vn_syncer_add(vp, syncdelay);
417 sc_flags = ctx->sc_flags;
419 /* Exit on unmount */
420 if (sc_flags & SC_FLAG_EXIT)
423 lwkt_reltoken(&ctx->sc_token);
426 * Do sync processing for each mount.
428 if (ctx->sc_mp || sc_flags & SC_FLAG_BIOOPS_ALL)
429 bio_ops_sync(ctx->sc_mp);
432 * The variable rushjob allows the kernel to speed up the
433 * processing of the filesystem syncer process. A rushjob
434 * value of N tells the filesystem syncer to process the next
435 * N seconds worth of work on its queue ASAP. Currently rushjob
436 * is used by the soft update code to speed up the filesystem
437 * syncer process when the incore state is getting so far
438 * ahead of the disk that the kernel memory pool is being
439 * threatened with exhaustion.
441 if (ctx == &syncer_ctx0 && rushjob > 0) {
442 atomic_subtract_int(&rushjob, 1);
446 * If it has taken us less than a second to process the
447 * current work, then wait. Otherwise start right over
448 * again. We can still lose time if any single round
449 * takes more than two seconds, but it does not really
450 * matter as we are just trying to generally pace the
451 * filesystem activity.
453 if (time_uptime == starttime)
454 tsleep(ctx, 0, "syncer", hz);
458 * Unmount/exit path for per-filesystem syncers; sc_token held
460 ctx->sc_flags |= SC_FLAG_DONE;
461 sc_flagsp = &ctx->sc_flags;
462 lwkt_reltoken(&ctx->sc_token);
469 syncer_thread_start(void)
471 syncer_thread(&syncer_ctx0);
474 static struct kproc_desc up_kp = {
479 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
482 * Request the syncer daemon to speed up its work.
483 * We never push it to speed up more than half of its
484 * normal turn time, otherwise it could take over the cpu.
490 * Don't bother protecting the test. unsleep_and_wakeup_thread()
491 * will only do something real if the thread is in the right state.
493 wakeup(lbolt_syncer);
494 if (rushjob < syncdelay / 2) {
495 atomic_add_int(&rushjob, 1);
496 stat_rush_requests += 1;
503 * Routine to create and manage a filesystem syncer vnode.
505 static int sync_close(struct vop_close_args *);
506 static int sync_fsync(struct vop_fsync_args *);
507 static int sync_inactive(struct vop_inactive_args *);
508 static int sync_reclaim (struct vop_reclaim_args *);
509 static int sync_print(struct vop_print_args *);
511 static struct vop_ops sync_vnode_vops = {
512 .vop_default = vop_eopnotsupp,
513 .vop_close = sync_close,
514 .vop_fsync = sync_fsync,
515 .vop_inactive = sync_inactive,
516 .vop_reclaim = sync_reclaim,
517 .vop_print = sync_print,
520 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops;
522 VNODEOP_SET(sync_vnode_vops);
525 * Create a new filesystem syncer vnode for the specified mount point.
526 * This vnode is placed on the worklist and is responsible for sync'ing
529 * NOTE: read-only mounts are also placed on the worklist. The filesystem
530 * sync code is also responsible for cleaning up vnodes.
533 vfs_allocate_syncvnode(struct mount *mp)
536 static long start, incr, next;
539 /* Allocate a new vnode */
540 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0);
542 mp->mnt_syncer = NULL;
547 * Place the vnode onto the syncer worklist. We attempt to
548 * scatter them about on the list so that they will go off
549 * at evenly distributed times even if all the filesystems
550 * are mounted at once.
553 if (next == 0 || next > SYNCER_MAXDELAY) {
557 start = SYNCER_MAXDELAY / 2;
558 incr = SYNCER_MAXDELAY;
562 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0);
565 * The mnt_syncer field inherits the vnode reference, which is
566 * held until later decomissioning.
574 sync_close(struct vop_close_args *ap)
580 * Do a lazy sync of the filesystem.
582 * sync_fsync { struct vnode *a_vp, int a_waitfor }
585 sync_fsync(struct vop_fsync_args *ap)
587 struct vnode *syncvp = ap->a_vp;
588 struct mount *mp = syncvp->v_mount;
592 * We only need to do something if this is a lazy evaluation.
594 if ((ap->a_waitfor & MNT_LAZY) == 0)
598 * Move ourselves to the back of the sync list.
600 vn_syncer_add(syncvp, syncdelay);
603 * Walk the list of vnodes pushing all that are dirty and
604 * not already on the sync list, and freeing vnodes which have
605 * no refs and whos VM objects are empty. vfs_msync() handles
606 * the VM issues and must be called whether the mount is readonly
609 if (vfs_busy(mp, LK_NOWAIT) != 0)
611 if (mp->mnt_flag & MNT_RDONLY) {
612 vfs_msync(mp, MNT_NOWAIT);
614 asyncflag = mp->mnt_flag & MNT_ASYNC;
615 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */
616 vfs_msync(mp, MNT_NOWAIT);
617 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY);
619 mp->mnt_flag |= MNT_ASYNC;
626 * The syncer vnode is no longer referenced.
628 * sync_inactive { struct vnode *a_vp, struct proc *a_p }
631 sync_inactive(struct vop_inactive_args *ap)
633 vgone_vxlocked(ap->a_vp);
638 * The syncer vnode is no longer needed and is being decommissioned.
639 * This can only occur when the last reference has been released on
640 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL.
642 * Modifications to the worklist must be protected with a critical
645 * sync_reclaim { struct vnode *a_vp }
648 sync_reclaim(struct vop_reclaim_args *ap)
650 struct vnode *vp = ap->a_vp;
651 struct syncer_ctx *ctx;
653 ctx = vn_get_syncer(vp);
655 lwkt_gettoken(&ctx->sc_token);
656 KKASSERT(vp->v_mount->mnt_syncer != vp);
657 if (vp->v_flag & VONWORKLST) {
658 LIST_REMOVE(vp, v_synclist);
659 vclrflags(vp, VONWORKLST);
661 lwkt_reltoken(&ctx->sc_token);
667 * This is very similar to vmntvnodescan() but it only scans the
668 * vnodes on the syncer list. VFS's which support faster VFS_SYNC
669 * operations use the VISDIRTY flag on the vnode to ensure that vnodes
670 * with dirty inodes are added to the syncer in addition to vnodes
671 * with dirty buffers, and can use this function instead of nmntvnodescan().
673 * This is important when a system has millions of vnodes.
679 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
682 struct syncer_ctx *ctx;
683 struct synclist *slp;
689 if (vmsc_flags & VMSC_NOWAIT)
695 * Syncer list context. This API requires a dedicated syncer thread.
698 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC);
699 ctx = mp->mnt_syncer_ctx;
700 KKASSERT(ctx != &syncer_ctx0);
702 lwkt_gettoken(&ctx->sc_token);
705 * Setup for loop. Allow races against the syncer thread but
706 * require that the syncer thread no be lazy if we were told
709 b = ctx->syncer_delayno & ctx->syncer_mask;
711 if ((vmsc_flags & VMSC_NOWAIT) == 0)
712 ++ctx->syncer_forced;
715 slp = &ctx->syncer_workitem_pending[i];
717 while ((vp = LIST_FIRST(slp)) != NULL) {
718 KKASSERT(vp->v_mount == mp);
719 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) {
720 slowfunc(mp, vp, data);
723 if (LIST_FIRST(slp) == vp)
724 vn_syncer_add(vp, -(i + syncdelay));
726 i = (i + 1) & ctx->syncer_mask;
729 if ((vmsc_flags & VMSC_NOWAIT) == 0)
730 --ctx->syncer_forced;
731 lwkt_reltoken(&ctx->sc_token);
736 * Print out a syncer vnode.
738 * sync_print { struct vnode *a_vp }
741 sync_print(struct vop_print_args *ap)
743 struct vnode *vp = ap->a_vp;
745 kprintf("syncer vnode");
746 lockmgr_printinfo(&vp->v_lock);