2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_sync.c,v 1.18 2008/05/18 05:54:25 dillon Exp $
44 * External virtual filesystem routines
48 #include <sys/param.h>
49 #include <sys/systm.h>
52 #include <sys/dirent.h>
53 #include <sys/domain.h>
54 #include <sys/eventhandler.h>
55 #include <sys/fcntl.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
58 #include <sys/malloc.h>
60 #include <sys/mount.h>
62 #include <sys/namei.h>
63 #include <sys/reboot.h>
64 #include <sys/socket.h>
66 #include <sys/sysctl.h>
67 #include <sys/syslog.h>
68 #include <sys/vmmeter.h>
69 #include <sys/vnode.h>
71 #include <machine/limits.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_kern.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pager.h>
81 #include <vm/vnode_pager.h>
84 #include <sys/thread2.h>
85 #include <sys/mplock2.h>
90 #define SYNCER_MAXDELAY 32
91 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
92 time_t syncdelay = 30; /* max time to delay syncing data */
93 SYSCTL_INT(_kern, OID_AUTO, syncdelay, CTLFLAG_RW,
94 &syncdelay, 0, "VFS data synchronization delay");
95 time_t filedelay = 30; /* time to delay syncing files */
96 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW,
97 &filedelay, 0, "File synchronization delay");
98 time_t dirdelay = 29; /* time to delay syncing directories */
99 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW,
100 &dirdelay, 0, "Directory synchronization delay");
101 time_t metadelay = 28; /* time to delay syncing metadata */
102 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW,
103 &metadelay, 0, "VFS metadata synchronization delay");
104 static int rushjob; /* number of slots to run ASAP */
105 static int stat_rush_requests; /* number of times I/O speeded up */
106 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW,
107 &stat_rush_requests, 0, "");
109 static int syncer_delayno = 0;
110 static long syncer_mask;
111 static struct lwkt_token syncer_token;
112 LIST_HEAD(synclist, vnode);
113 static struct synclist *syncer_workitem_pending;
116 * Called from vfsinit()
121 syncer_workitem_pending = hashinit(syncer_maxdelay, M_DEVBUF,
123 syncer_maxdelay = syncer_mask + 1;
124 lwkt_token_init(&syncer_token, 1, "syncer");
128 * The workitem queue.
130 * It is useful to delay writes of file data and filesystem metadata
131 * for tens of seconds so that quickly created and deleted files need
132 * not waste disk bandwidth being created and removed. To realize this,
133 * we append vnodes to a "workitem" queue. When running with a soft
134 * updates implementation, most pending metadata dependencies should
135 * not wait for more than a few seconds. Thus, mounted on block devices
136 * are delayed only about a half the time that file data is delayed.
137 * Similarly, directory updates are more critical, so are only delayed
138 * about a third the time that file data is delayed. Thus, there are
139 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
140 * one each second (driven off the filesystem syncer process). The
141 * syncer_delayno variable indicates the next queue that is to be processed.
142 * Items that need to be processed soon are placed in this queue:
144 * syncer_workitem_pending[syncer_delayno]
146 * A delay of fifteen seconds is done by placing the request fifteen
147 * entries later in the queue:
149 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
154 * Add an item to the syncer work queue.
156 * WARNING: Cannot get vp->v_token here if not already held, we must
157 * depend on the syncer_token (which might already be held by
158 * the caller) to protect v_synclist and VONWORKLST.
163 vn_syncer_add(struct vnode *vp, int delay)
167 lwkt_gettoken(&syncer_token);
169 if (vp->v_flag & VONWORKLST)
170 LIST_REMOVE(vp, v_synclist);
171 if (delay > syncer_maxdelay - 2)
172 delay = syncer_maxdelay - 2;
173 slot = (syncer_delayno + delay) & syncer_mask;
175 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
176 vsetflags(vp, VONWORKLST);
178 lwkt_reltoken(&syncer_token);
182 * Removes the vnode from the syncer list. Since we might block while
183 * acquiring the syncer_token we have to recheck conditions.
185 * vp->v_token held on call
188 vn_syncer_remove(struct vnode *vp)
190 lwkt_gettoken(&syncer_token);
192 if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) {
193 vclrflags(vp, VONWORKLST);
194 LIST_REMOVE(vp, v_synclist);
197 lwkt_reltoken(&syncer_token);
200 struct thread *updatethread;
203 * System filesystem synchronizer daemon.
205 * NOTE: Started MPSAFE but is not yet mpsafe.
210 struct thread *td = curthread;
211 struct synclist *slp;
215 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
220 kproc_suspend_loop();
222 starttime = time_second;
223 lwkt_gettoken(&syncer_token);
226 * Push files whose dirty time has expired. Be careful
227 * of interrupt race on slp queue.
229 slp = &syncer_workitem_pending[syncer_delayno];
231 if (syncer_delayno == syncer_maxdelay)
234 while ((vp = LIST_FIRST(slp)) != NULL) {
235 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
236 VOP_FSYNC(vp, MNT_LAZY, 0);
241 * vp is stale but can still be used if we can
242 * verify that it remains at the head of the list.
243 * Be careful not to try to get vp->v_token as
244 * vp can become stale if this blocks.
246 * If the vp is still at the head of the list were
247 * unable to completely flush it and move it to
248 * a later slot to give other vnodes a fair shot.
250 * Note that v_tag VT_VFS vnodes can remain on the
251 * worklist with no dirty blocks, but sync_fsync()
252 * moves it to a later slot so we will never see it
255 * It is possible to race a vnode with no dirty
256 * buffers being removed from the list. If this
257 * occurs we will move the vnode in the synclist
258 * and then the other thread will remove it. Do
259 * not try to remove it here.
261 if (LIST_FIRST(slp) == vp)
262 vn_syncer_add(vp, syncdelay);
264 lwkt_reltoken(&syncer_token);
267 * Do sync processing for each mount.
272 * The variable rushjob allows the kernel to speed up the
273 * processing of the filesystem syncer process. A rushjob
274 * value of N tells the filesystem syncer to process the next
275 * N seconds worth of work on its queue ASAP. Currently rushjob
276 * is used by the soft update code to speed up the filesystem
277 * syncer process when the incore state is getting so far
278 * ahead of the disk that the kernel memory pool is being
279 * threatened with exhaustion.
286 * If it has taken us less than a second to process the
287 * current work, then wait. Otherwise start right over
288 * again. We can still lose time if any single round
289 * takes more than two seconds, but it does not really
290 * matter as we are just trying to generally pace the
291 * filesystem activity.
293 if (time_second == starttime)
294 tsleep(&lbolt_syncer, 0, "syncer", 0);
299 static struct kproc_desc up_kp = {
304 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
307 * Request the syncer daemon to speed up its work.
308 * We never push it to speed up more than half of its
309 * normal turn time, otherwise it could take over the cpu.
311 * YYY wchan field protected by the BGL.
317 * Don't bother protecting the test. unsleep_and_wakeup_thread()
318 * will only do something real if the thread is in the right state.
320 wakeup(&lbolt_syncer);
321 if (rushjob < syncdelay / 2) {
323 stat_rush_requests += 1;
330 * Routine to create and manage a filesystem syncer vnode.
332 static int sync_close(struct vop_close_args *);
333 static int sync_fsync(struct vop_fsync_args *);
334 static int sync_inactive(struct vop_inactive_args *);
335 static int sync_reclaim (struct vop_reclaim_args *);
336 static int sync_print(struct vop_print_args *);
338 static struct vop_ops sync_vnode_vops = {
339 .vop_default = vop_eopnotsupp,
340 .vop_close = sync_close,
341 .vop_fsync = sync_fsync,
342 .vop_inactive = sync_inactive,
343 .vop_reclaim = sync_reclaim,
344 .vop_print = sync_print,
347 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops;
349 VNODEOP_SET(sync_vnode_vops);
352 * Create a new filesystem syncer vnode for the specified mount point.
353 * This vnode is placed on the worklist and is responsible for sync'ing
356 * NOTE: read-only mounts are also placed on the worklist. The filesystem
357 * sync code is also responsible for cleaning up vnodes.
360 vfs_allocate_syncvnode(struct mount *mp)
363 static long start, incr, next;
366 /* Allocate a new vnode */
367 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0);
369 mp->mnt_syncer = NULL;
374 * Place the vnode onto the syncer worklist. We attempt to
375 * scatter them about on the list so that they will go off
376 * at evenly distributed times even if all the filesystems
377 * are mounted at once.
380 if (next == 0 || next > syncer_maxdelay) {
384 start = syncer_maxdelay / 2;
385 incr = syncer_maxdelay;
389 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0);
392 * The mnt_syncer field inherits the vnode reference, which is
393 * held until later decomissioning.
401 sync_close(struct vop_close_args *ap)
407 * Do a lazy sync of the filesystem.
409 * sync_fsync { struct vnode *a_vp, int a_waitfor }
412 sync_fsync(struct vop_fsync_args *ap)
414 struct vnode *syncvp = ap->a_vp;
415 struct mount *mp = syncvp->v_mount;
419 * We only need to do something if this is a lazy evaluation.
421 if (ap->a_waitfor != MNT_LAZY)
425 * Move ourselves to the back of the sync list.
427 vn_syncer_add(syncvp, syncdelay);
430 * Walk the list of vnodes pushing all that are dirty and
431 * not already on the sync list, and freeing vnodes which have
432 * no refs and whos VM objects are empty. vfs_msync() handles
433 * the VM issues and must be called whether the mount is readonly
436 if (vfs_busy(mp, LK_NOWAIT) != 0)
438 if (mp->mnt_flag & MNT_RDONLY) {
439 vfs_msync(mp, MNT_NOWAIT);
441 asyncflag = mp->mnt_flag & MNT_ASYNC;
442 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */
443 vfs_msync(mp, MNT_NOWAIT);
444 VFS_SYNC(mp, MNT_LAZY);
446 mp->mnt_flag |= MNT_ASYNC;
453 * The syncer vnode is no longer referenced.
455 * sync_inactive { struct vnode *a_vp, struct proc *a_p }
458 sync_inactive(struct vop_inactive_args *ap)
460 vgone_vxlocked(ap->a_vp);
465 * The syncer vnode is no longer needed and is being decommissioned.
466 * This can only occur when the last reference has been released on
467 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL.
469 * Modifications to the worklist must be protected with a critical
472 * sync_reclaim { struct vnode *a_vp }
475 sync_reclaim(struct vop_reclaim_args *ap)
477 struct vnode *vp = ap->a_vp;
479 lwkt_gettoken(&syncer_token);
480 KKASSERT(vp->v_mount->mnt_syncer != vp);
481 if (vp->v_flag & VONWORKLST) {
482 LIST_REMOVE(vp, v_synclist);
483 vclrflags(vp, VONWORKLST);
485 lwkt_reltoken(&syncer_token);
491 * Print out a syncer vnode.
493 * sync_print { struct vnode *a_vp }
496 sync_print(struct vop_print_args *ap)
498 struct vnode *vp = ap->a_vp;
500 kprintf("syncer vnode");
501 lockmgr_printinfo(&vp->v_lock);