2 * Copyright (c) 2004-2006 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/vfs_jops.c,v 1.27 2006/05/08 18:45:51 dillon Exp $
37 * Each mount point may have zero or more independantly configured journals
38 * attached to it. Each journal is represented by a memory FIFO and worker
39 * thread. Journal events are streamed through the FIFO to the thread,
40 * batched up (typically on one-second intervals), and written out by the
43 * Journal vnode ops are executed instead of mnt_vn_norm_ops when one or
44 * more journals have been installed on a mount point. It becomes the
45 * responsibility of the journal op to call the underlying normal op as
48 #include <sys/param.h>
49 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/queue.h>
55 #include <sys/malloc.h>
56 #include <sys/mount.h>
57 #include <sys/unistd.h>
58 #include <sys/vnode.h>
60 #include <sys/mountctl.h>
61 #include <sys/journal.h>
64 #include <sys/msfbuf.h>
65 #include <sys/socket.h>
66 #include <sys/socketvar.h>
68 #include <machine/limits.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_pager.h>
74 #include <vm/vnode_pager.h>
76 #include <sys/file2.h>
77 #include <sys/thread2.h>
79 static int journal_attach(struct mount *mp);
80 static void journal_detach(struct mount *mp);
81 static int journal_install_vfs_journal(struct mount *mp, struct file *fp,
82 const struct mountctl_install_journal *info);
83 static int journal_restart_vfs_journal(struct mount *mp, struct file *fp,
84 const struct mountctl_restart_journal *info);
85 static int journal_remove_vfs_journal(struct mount *mp,
86 const struct mountctl_remove_journal *info);
87 static int journal_restart(struct mount *mp, struct file *fp,
88 struct journal *jo, int flags);
89 static int journal_destroy(struct mount *mp, struct journal *jo, int flags);
90 static int journal_resync_vfs_journal(struct mount *mp, const void *ctl);
91 static int journal_status_vfs_journal(struct mount *mp,
92 const struct mountctl_status_journal *info,
93 struct mountctl_journal_ret_status *rstat,
94 int buflen, int *res);
96 static void jrecord_undo_file(struct jrecord *jrec, struct vnode *vp,
97 int jrflags, off_t off, off_t bytes);
99 static int journal_setattr(struct vop_setattr_args *ap);
100 static int journal_write(struct vop_write_args *ap);
101 static int journal_fsync(struct vop_fsync_args *ap);
102 static int journal_putpages(struct vop_putpages_args *ap);
103 static int journal_setacl(struct vop_setacl_args *ap);
104 static int journal_setextattr(struct vop_setextattr_args *ap);
105 static int journal_ncreate(struct vop_ncreate_args *ap);
106 static int journal_nmknod(struct vop_nmknod_args *ap);
107 static int journal_nlink(struct vop_nlink_args *ap);
108 static int journal_nsymlink(struct vop_nsymlink_args *ap);
109 static int journal_nwhiteout(struct vop_nwhiteout_args *ap);
110 static int journal_nremove(struct vop_nremove_args *ap);
111 static int journal_nmkdir(struct vop_nmkdir_args *ap);
112 static int journal_nrmdir(struct vop_nrmdir_args *ap);
113 static int journal_nrename(struct vop_nrename_args *ap);
115 #define JRUNDO_SIZE 0x00000001
116 #define JRUNDO_UID 0x00000002
117 #define JRUNDO_GID 0x00000004
118 #define JRUNDO_FSID 0x00000008
119 #define JRUNDO_MODES 0x00000010
120 #define JRUNDO_INUM 0x00000020
121 #define JRUNDO_ATIME 0x00000040
122 #define JRUNDO_MTIME 0x00000080
123 #define JRUNDO_CTIME 0x00000100
124 #define JRUNDO_GEN 0x00000200
125 #define JRUNDO_FLAGS 0x00000400
126 #define JRUNDO_UDEV 0x00000800
127 #define JRUNDO_NLINK 0x00001000
128 #define JRUNDO_FILEDATA 0x00010000
129 #define JRUNDO_GETVP 0x00020000
130 #define JRUNDO_CONDLINK 0x00040000 /* write file data if link count 1 */
131 #define JRUNDO_VATTR (JRUNDO_SIZE|JRUNDO_UID|JRUNDO_GID|JRUNDO_FSID|\
132 JRUNDO_MODES|JRUNDO_INUM|JRUNDO_ATIME|JRUNDO_MTIME|\
133 JRUNDO_CTIME|JRUNDO_GEN|JRUNDO_FLAGS|JRUNDO_UDEV|\
135 #define JRUNDO_ALL (JRUNDO_VATTR|JRUNDO_FILEDATA)
137 static struct vnodeopv_entry_desc journal_vnodeop_entries[] = {
138 { &vop_default_desc, vop_journal_operate_ap },
139 { &vop_mountctl_desc, (void *)journal_mountctl },
140 { &vop_setattr_desc, (void *)journal_setattr },
141 { &vop_write_desc, (void *)journal_write },
142 { &vop_fsync_desc, (void *)journal_fsync },
143 { &vop_putpages_desc, (void *)journal_putpages },
144 { &vop_setacl_desc, (void *)journal_setacl },
145 { &vop_setextattr_desc, (void *)journal_setextattr },
146 { &vop_ncreate_desc, (void *)journal_ncreate },
147 { &vop_nmknod_desc, (void *)journal_nmknod },
148 { &vop_nlink_desc, (void *)journal_nlink },
149 { &vop_nsymlink_desc, (void *)journal_nsymlink },
150 { &vop_nwhiteout_desc, (void *)journal_nwhiteout },
151 { &vop_nremove_desc, (void *)journal_nremove },
152 { &vop_nmkdir_desc, (void *)journal_nmkdir },
153 { &vop_nrmdir_desc, (void *)journal_nrmdir },
154 { &vop_nrename_desc, (void *)journal_nrename },
158 static MALLOC_DEFINE(M_JOURNAL, "journal", "Journaling structures");
159 static MALLOC_DEFINE(M_JFIFO, "journal-fifo", "Journal FIFO");
162 journal_mountctl(struct vop_mountctl_args *ap)
167 mp = ap->a_head.a_ops->vv_mount;
170 if (mp->mnt_vn_journal_ops == NULL) {
172 case MOUNTCTL_INSTALL_VFS_JOURNAL:
173 error = journal_attach(mp);
174 if (error == 0 && ap->a_ctllen != sizeof(struct mountctl_install_journal))
176 if (error == 0 && ap->a_fp == NULL)
179 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
180 if (TAILQ_EMPTY(&mp->mnt_jlist))
183 case MOUNTCTL_RESTART_VFS_JOURNAL:
184 case MOUNTCTL_REMOVE_VFS_JOURNAL:
185 case MOUNTCTL_RESYNC_VFS_JOURNAL:
186 case MOUNTCTL_STATUS_VFS_JOURNAL:
195 case MOUNTCTL_INSTALL_VFS_JOURNAL:
196 if (ap->a_ctllen != sizeof(struct mountctl_install_journal))
198 if (error == 0 && ap->a_fp == NULL)
201 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
203 case MOUNTCTL_RESTART_VFS_JOURNAL:
204 if (ap->a_ctllen != sizeof(struct mountctl_restart_journal))
206 if (error == 0 && ap->a_fp == NULL)
209 error = journal_restart_vfs_journal(mp, ap->a_fp, ap->a_ctl);
211 case MOUNTCTL_REMOVE_VFS_JOURNAL:
212 if (ap->a_ctllen != sizeof(struct mountctl_remove_journal))
215 error = journal_remove_vfs_journal(mp, ap->a_ctl);
216 if (TAILQ_EMPTY(&mp->mnt_jlist))
219 case MOUNTCTL_RESYNC_VFS_JOURNAL:
220 if (ap->a_ctllen != 0)
222 error = journal_resync_vfs_journal(mp, ap->a_ctl);
224 case MOUNTCTL_STATUS_VFS_JOURNAL:
225 if (ap->a_ctllen != sizeof(struct mountctl_status_journal))
228 error = journal_status_vfs_journal(mp, ap->a_ctl,
229 ap->a_buf, ap->a_buflen, ap->a_res);
241 * High level mount point setup. When a
244 journal_attach(struct mount *mp)
246 KKASSERT(mp->mnt_jbitmap == NULL);
247 vfs_add_vnodeops(mp, &mp->mnt_vn_journal_ops,
248 journal_vnodeop_entries, 0);
249 mp->mnt_jbitmap = malloc(JREC_STREAMID_JMAX/8, M_JOURNAL, M_WAITOK|M_ZERO);
250 mp->mnt_streamid = JREC_STREAMID_JMIN;
255 journal_detach(struct mount *mp)
257 KKASSERT(mp->mnt_jbitmap != NULL);
258 if (mp->mnt_vn_journal_ops)
259 vfs_rm_vnodeops(&mp->mnt_vn_journal_ops);
260 free(mp->mnt_jbitmap, M_JOURNAL);
261 mp->mnt_jbitmap = NULL;
265 * Install a journal on a mount point. Each journal has an associated worker
266 * thread which is responsible for buffering and spooling the data to the
267 * target. A mount point may have multiple journals attached to it. An
268 * initial start record is generated when the journal is associated.
271 journal_install_vfs_journal(struct mount *mp, struct file *fp,
272 const struct mountctl_install_journal *info)
279 jo = malloc(sizeof(struct journal), M_JOURNAL, M_WAITOK|M_ZERO);
280 bcopy(info->id, jo->id, sizeof(jo->id));
281 jo->flags = info->flags & ~(MC_JOURNAL_WACTIVE | MC_JOURNAL_RACTIVE |
282 MC_JOURNAL_STOP_REQ);
285 * Memory FIFO size, round to nearest power of 2
287 if (info->membufsize) {
288 if (info->membufsize < 65536)
290 else if (info->membufsize > 128 * 1024 * 1024)
291 size = 128 * 1024 * 1024;
293 size = (int)info->membufsize;
298 while (jo->fifo.size < size)
302 * Other parameters. If not specified the starting transaction id
303 * will be the current date.
306 jo->transid = info->transid;
310 jo->transid = ((int64_t)ts.tv_sec << 30) | ts.tv_nsec;
316 * Allocate the memory FIFO
318 jo->fifo.mask = jo->fifo.size - 1;
319 jo->fifo.membase = malloc(jo->fifo.size, M_JFIFO, M_WAITOK|M_ZERO|M_NULLOK);
320 if (jo->fifo.membase == NULL)
324 * Create the worker threads and generate the association record.
330 journal_create_threads(jo);
331 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
332 jrecord_write(&jrec, JTYPE_ASSOCIATE, 0);
333 jrecord_done(&jrec, 0);
334 TAILQ_INSERT_TAIL(&mp->mnt_jlist, jo, jentry);
340 * Restart a journal with a new descriptor. The existing reader and writer
341 * threads are terminated and a new descriptor is associated with the
342 * journal. The FIFO rindex is reset to xindex and the threads are then
346 journal_restart_vfs_journal(struct mount *mp, struct file *fp,
347 const struct mountctl_restart_journal *info)
352 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
353 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
357 error = journal_restart(mp, fp, jo, info->flags);
364 journal_restart(struct mount *mp, struct file *fp,
365 struct journal *jo, int flags)
373 * Record the fact that we are doing a restart in the journal.
374 * XXX it isn't safe to do this if the journal is being restarted
375 * because it was locked up and the writer thread has already exited.
377 jrecord_init(jo, &jrec, JREC_STREAMID_RESTART);
378 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
379 jrecord_done(&jrec, 0);
383 * Stop the reader and writer threads and clean up the current
386 printf("RESTART WITH FP %p KILLING %p\n", fp, jo->fp);
387 journal_destroy_threads(jo, flags);
393 * Associate the new descriptor, reset the FIFO index, and recreate
398 jo->fifo.rindex = jo->fifo.xindex;
399 journal_create_threads(jo);
405 * Disassociate a journal from a mount point and terminate its worker thread.
406 * A final termination record is written out before the file pointer is
410 journal_remove_vfs_journal(struct mount *mp,
411 const struct mountctl_remove_journal *info)
416 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
417 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
421 error = journal_destroy(mp, jo, info->flags);
428 * Remove all journals associated with a mount point. Usually called
429 * by the umount code.
432 journal_remove_all_journals(struct mount *mp, int flags)
436 while ((jo = TAILQ_FIRST(&mp->mnt_jlist)) != NULL) {
437 journal_destroy(mp, jo, flags);
442 journal_destroy(struct mount *mp, struct journal *jo, int flags)
446 TAILQ_REMOVE(&mp->mnt_jlist, jo, jentry);
448 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
449 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
450 jrecord_done(&jrec, 0);
452 journal_destroy_threads(jo, flags);
456 if (jo->fifo.membase)
457 free(jo->fifo.membase, M_JFIFO);
464 journal_resync_vfs_journal(struct mount *mp, const void *ctl)
470 journal_status_vfs_journal(struct mount *mp,
471 const struct mountctl_status_journal *info,
472 struct mountctl_journal_ret_status *rstat,
473 int buflen, int *res)
481 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
482 if (info->index == MC_JOURNAL_INDEX_ID) {
483 if (bcmp(jo->id, info->id, sizeof(jo->id)) != 0)
485 } else if (info->index >= 0) {
486 if (info->index < index)
488 } else if (info->index != MC_JOURNAL_INDEX_ALL) {
491 if (buflen < sizeof(*rstat)) {
493 rstat[-1].flags |= MC_JOURNAL_STATUS_MORETOCOME;
498 bzero(rstat, sizeof(*rstat));
499 rstat->recsize = sizeof(*rstat);
500 bcopy(jo->id, rstat->id, sizeof(jo->id));
501 rstat->index = index;
502 rstat->membufsize = jo->fifo.size;
503 rstat->membufused = jo->fifo.windex - jo->fifo.xindex;
504 rstat->membufunacked = jo->fifo.rindex - jo->fifo.xindex;
505 rstat->bytessent = jo->total_acked;
506 rstat->fifostalls = jo->fifostalls;
509 *res += sizeof(*rstat);
510 buflen -= sizeof(*rstat);
515 /************************************************************************
516 * PARALLEL TRANSACTION SUPPORT ROUTINES *
517 ************************************************************************
519 * JRECLIST_*() - routines which create and iterate over jrecord structures,
520 * because a mount point may have multiple attached journals.
524 * Initialize the passed jrecord_list and create a jrecord for each
525 * journal we need to write to. Unnecessary mallocs are avoided by
526 * using the passed jrecord structure as the first jrecord in the list.
527 * A starting transaction is pushed for each jrecord.
529 * Returns non-zero if any of the journals require undo records.
533 jreclist_init(struct mount *mp, struct jrecord_list *jreclist,
534 struct jrecord *jreccache, int16_t rectype)
537 struct jrecord *jrec;
542 TAILQ_INIT(&jreclist->list);
545 * Select the stream ID to use for the transaction. We must select
546 * a stream ID that is not currently in use by some other parallel
549 * Don't bother calculating the next streamid when reassigning
550 * mnt_streamid, since parallel transactions are fairly rare. This
551 * also allows someone observing the raw records to clearly see
552 * when parallel transactions occur.
554 streamid = mp->mnt_streamid;
556 while (mp->mnt_jbitmap[streamid >> 3] & (1 << (streamid & 7))) {
557 if (++streamid == JREC_STREAMID_JMAX)
558 streamid = JREC_STREAMID_JMIN;
559 if (++count == JREC_STREAMID_JMAX - JREC_STREAMID_JMIN) {
560 printf("jreclist_init: all streamid's in use! sleeping\n");
561 tsleep(jreclist, 0, "jsidfl", hz * 10);
565 mp->mnt_jbitmap[streamid >> 3] |= 1 << (streamid & 7);
566 mp->mnt_streamid = streamid;
567 jreclist->streamid = streamid;
570 * Now initialize a stream on each journal.
574 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
578 jrec = malloc(sizeof(*jrec), M_JOURNAL, M_WAITOK);
579 jrecord_init(jo, jrec, streamid);
580 jrec->user_save = jrecord_push(jrec, rectype);
581 TAILQ_INSERT_TAIL(&jreclist->list, jrec, user_entry);
582 if (jo->flags & MC_JOURNAL_WANT_REVERSABLE)
590 * Terminate the journaled transactions started by jreclist_init(). If
591 * an error occured, the transaction records will be aborted.
595 jreclist_done(struct mount *mp, struct jrecord_list *jreclist, int error)
597 struct jrecord *jrec;
601 * Cleanup the jrecord state on each journal.
603 TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
604 jrecord_pop(jrec, jrec->user_save);
605 jrecord_done(jrec, error);
609 * Free allocated jrec's (the first is always supplied)
612 while ((jrec = TAILQ_FIRST(&jreclist->list)) != NULL) {
613 TAILQ_REMOVE(&jreclist->list, jrec, user_entry);
615 free(jrec, M_JOURNAL);
620 * Clear the streamid so it can be reused.
622 mp->mnt_jbitmap[jreclist->streamid >> 3] &= ~(1 << (jreclist->streamid & 7));
626 * This procedure writes out UNDO records for available reversable
629 * XXX could use improvement. There is no need to re-read the file
634 jreclist_undo_file(struct jrecord_list *jreclist, struct vnode *vp,
635 int jrflags, off_t off, off_t bytes)
637 struct jrecord *jrec;
641 if (jrflags & JRUNDO_GETVP)
642 error = vget(vp, LK_SHARED);
644 TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
645 if (jrec->jo->flags & MC_JOURNAL_WANT_REVERSABLE) {
646 jrecord_undo_file(jrec, vp, jrflags, off, bytes);
650 if (error == 0 && jrflags & JRUNDO_GETVP)
654 /************************************************************************
655 * LOW LEVEL UNDO SUPPORT ROUTINE *
656 ************************************************************************
658 * This function is used to support UNDO records. It will generate an
659 * appropriate record with the requested portion of the file data. Note
660 * that file data is only recorded if JRUNDO_FILEDATA is passed. If bytes
661 * is -1, it will be set to the size of the file.
664 jrecord_undo_file(struct jrecord *jrec, struct vnode *vp, int jrflags,
665 off_t off, off_t bytes)
668 void *save1; /* warning, save pointers do not always remain valid */
673 * Setup. Start the UNDO record, obtain a shared lock on the vnode,
674 * and retrieve attribute info.
676 save1 = jrecord_push(jrec, JTYPE_UNDO);
677 error = VOP_GETATTR(vp, &attr);
682 * Generate UNDO records as requested.
684 if (jrflags & JRUNDO_VATTR) {
685 save2 = jrecord_push(jrec, JTYPE_VATTR);
686 jrecord_leaf(jrec, JLEAF_VTYPE, &attr.va_type, sizeof(attr.va_type));
687 if ((jrflags & JRUNDO_NLINK) && attr.va_nlink != VNOVAL)
688 jrecord_leaf(jrec, JLEAF_NLINK, &attr.va_nlink, sizeof(attr.va_nlink));
689 if ((jrflags & JRUNDO_SIZE) && attr.va_size != VNOVAL)
690 jrecord_leaf(jrec, JLEAF_SIZE, &attr.va_size, sizeof(attr.va_size));
691 if ((jrflags & JRUNDO_UID) && attr.va_uid != VNOVAL)
692 jrecord_leaf(jrec, JLEAF_UID, &attr.va_uid, sizeof(attr.va_uid));
693 if ((jrflags & JRUNDO_GID) && attr.va_gid != VNOVAL)
694 jrecord_leaf(jrec, JLEAF_GID, &attr.va_gid, sizeof(attr.va_gid));
695 if ((jrflags & JRUNDO_FSID) && attr.va_fsid != VNOVAL)
696 jrecord_leaf(jrec, JLEAF_FSID, &attr.va_fsid, sizeof(attr.va_fsid));
697 if ((jrflags & JRUNDO_MODES) && attr.va_mode != (mode_t)VNOVAL)
698 jrecord_leaf(jrec, JLEAF_MODES, &attr.va_mode, sizeof(attr.va_mode));
699 if ((jrflags & JRUNDO_INUM) && attr.va_fileid != VNOVAL)
700 jrecord_leaf(jrec, JLEAF_INUM, &attr.va_fileid, sizeof(attr.va_fileid));
701 if ((jrflags & JRUNDO_ATIME) && attr.va_atime.tv_sec != VNOVAL)
702 jrecord_leaf(jrec, JLEAF_ATIME, &attr.va_atime, sizeof(attr.va_atime));
703 if ((jrflags & JRUNDO_MTIME) && attr.va_mtime.tv_sec != VNOVAL)
704 jrecord_leaf(jrec, JLEAF_MTIME, &attr.va_mtime, sizeof(attr.va_mtime));
705 if ((jrflags & JRUNDO_CTIME) && attr.va_ctime.tv_sec != VNOVAL)
706 jrecord_leaf(jrec, JLEAF_CTIME, &attr.va_ctime, sizeof(attr.va_ctime));
707 if ((jrflags & JRUNDO_GEN) && attr.va_gen != VNOVAL)
708 jrecord_leaf(jrec, JLEAF_GEN, &attr.va_gen, sizeof(attr.va_gen));
709 if ((jrflags & JRUNDO_FLAGS) && attr.va_flags != VNOVAL)
710 jrecord_leaf(jrec, JLEAF_FLAGS, &attr.va_flags, sizeof(attr.va_flags));
711 if ((jrflags & JRUNDO_UDEV) && attr.va_rdev != VNOVAL)
712 jrecord_leaf(jrec, JLEAF_UDEV, &attr.va_rdev, sizeof(attr.va_rdev));
713 jrecord_pop(jrec, save2);
717 * Output the file data being overwritten by reading the file and
718 * writing it out to the journal prior to the write operation. We
719 * do not need to write out data past the current file EOF.
721 * XXX support JRUNDO_CONDLINK - do not write out file data for files
722 * with a link count > 1. The undo code needs to locate the inode and
723 * regenerate the hardlink.
725 if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VREG) {
726 if (attr.va_size != VNOVAL) {
728 bytes = attr.va_size - off;
729 if (off + bytes > attr.va_size)
730 bytes = attr.va_size - off;
732 jrecord_file_data(jrec, vp, off, bytes);
737 if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VLNK) {
742 buf = malloc(PATH_MAX, M_JOURNAL, M_WAITOK);
744 aiov.iov_len = PATH_MAX;
745 auio.uio_iov = &aiov;
748 auio.uio_rw = UIO_READ;
749 auio.uio_segflg = UIO_SYSSPACE;
750 auio.uio_td = curthread;
751 auio.uio_resid = PATH_MAX;
752 error = VOP_READLINK(vp, &auio, proc0.p_ucred);
754 jrecord_leaf(jrec, JLEAF_SYMLINKDATA, buf,
755 PATH_MAX - auio.uio_resid);
757 free(buf, M_JOURNAL);
761 jrecord_leaf(jrec, JLEAF_ERROR, &error, sizeof(error));
762 jrecord_pop(jrec, save1);
765 /************************************************************************
767 ************************************************************************
769 * These are function shims replacing the normal filesystem ops. We become
770 * responsible for calling the underlying filesystem ops. We have the choice
771 * of executing the underlying op first and then generating the journal entry,
772 * or starting the journal entry, executing the underlying op, and then
773 * either completing or aborting it.
775 * The journal is supposed to be a high-level entity, which generally means
776 * identifying files by name rather then by inode. Supplying both allows
777 * the journal to be used both for inode-number-compatible 'mirrors' and
778 * for simple filesystem replication.
780 * Writes are particularly difficult to deal with because a single write may
781 * represent a hundred megabyte buffer or more, and both writes and truncations
782 * require the 'old' data to be written out as well as the new data if the
783 * log is reversable. Other issues:
785 * - How to deal with operations on unlinked files (no path available),
786 * but which may still be filesystem visible due to hard links.
788 * - How to deal with modifications made via a memory map.
790 * - Future cache coherency support will require cache coherency API calls
791 * both prior to and after the call to the underlying VFS.
793 * ALSO NOTE: We do not have to shim compatibility VOPs like MKDIR which have
794 * new VFS equivalents (NMKDIR).
798 * Journal vop_settattr { a_vp, a_vap, a_cred, a_td }
802 journal_setattr(struct vop_setattr_args *ap)
804 struct jrecord_list jreclist;
805 struct jrecord jreccache;
806 struct jrecord *jrec;
811 mp = ap->a_head.a_ops->vv_mount;
812 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETATTR)) {
813 jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_VATTR, 0, 0);
815 error = vop_journal_operate_ap(&ap->a_head);
817 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
818 jrecord_write_cred(jrec, curthread, ap->a_cred);
819 jrecord_write_vnode_ref(jrec, ap->a_vp);
820 save = jrecord_push(jrec, JTYPE_REDO);
821 jrecord_write_vattr(jrec, ap->a_vap);
822 jrecord_pop(jrec, save);
825 jreclist_done(mp, &jreclist, error);
830 * Journal vop_write { a_vp, a_uio, a_ioflag, a_cred }
834 journal_write(struct vop_write_args *ap)
836 struct jrecord_list jreclist;
837 struct jrecord jreccache;
838 struct jrecord *jrec;
841 struct iovec uio_one_iovec;
846 * This is really nasty. UIO's don't retain sufficient information to
847 * be reusable once they've gone through the VOP chain. The iovecs get
848 * cleared, so we have to copy the UIO.
850 * XXX fix the UIO code to not destroy iov's during a scan so we can
851 * reuse the uio over and over again.
853 * XXX UNDO code needs to journal the old data prior to the write.
855 uio_copy = *ap->a_uio;
856 if (uio_copy.uio_iovcnt == 1) {
857 uio_one_iovec = ap->a_uio->uio_iov[0];
858 uio_copy.uio_iov = &uio_one_iovec;
860 uio_copy.uio_iov = malloc(uio_copy.uio_iovcnt * sizeof(struct iovec),
861 M_JOURNAL, M_WAITOK);
862 bcopy(ap->a_uio->uio_iov, uio_copy.uio_iov,
863 uio_copy.uio_iovcnt * sizeof(struct iovec));
867 * Write out undo data. Note that uio_offset is incorrect if
868 * IO_APPEND is set, but fortunately we have no undo file data to
869 * write out in that case.
871 mp = ap->a_head.a_ops->vv_mount;
872 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_WRITE)) {
873 if (ap->a_ioflag & IO_APPEND) {
874 jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_SIZE|JRUNDO_MTIME, 0, 0);
876 jreclist_undo_file(&jreclist, ap->a_vp,
877 JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
878 uio_copy.uio_offset, uio_copy.uio_resid);
881 error = vop_journal_operate_ap(&ap->a_head);
884 * XXX bad hack to figure out the offset for O_APPEND writes (note:
885 * uio field state after the VFS operation).
887 uio_copy.uio_offset = ap->a_uio->uio_offset -
888 (uio_copy.uio_resid - ap->a_uio->uio_resid);
891 * Output the write data to the journal.
894 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
895 jrecord_write_cred(jrec, NULL, ap->a_cred);
896 jrecord_write_vnode_ref(jrec, ap->a_vp);
897 save = jrecord_push(jrec, JTYPE_REDO);
898 jrecord_write_uio(jrec, JLEAF_FILEDATA, &uio_copy);
899 jrecord_pop(jrec, save);
902 jreclist_done(mp, &jreclist, error);
904 if (uio_copy.uio_iov != &uio_one_iovec)
905 free(uio_copy.uio_iov, M_JOURNAL);
910 * Journal vop_fsync { a_vp, a_waitfor, a_td }
914 journal_fsync(struct vop_fsync_args *ap)
922 error = vop_journal_operate_ap(&ap->a_head);
924 mp = ap->a_head.a_ops->vv_mount;
926 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
927 /* XXX synchronize pending journal records */
935 * Journal vop_putpages { a_vp, a_m, a_count, a_sync, a_rtvals, a_offset }
937 * note: a_count is in bytes.
941 journal_putpages(struct vop_putpages_args *ap)
943 struct jrecord_list jreclist;
944 struct jrecord jreccache;
945 struct jrecord *jrec;
950 mp = ap->a_head.a_ops->vv_mount;
951 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_PUTPAGES) &&
954 jreclist_undo_file(&jreclist, ap->a_vp,
955 JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
956 ap->a_offset, btoc(ap->a_count));
958 error = vop_journal_operate_ap(&ap->a_head);
959 if (error == 0 && ap->a_count > 0) {
960 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
961 jrecord_write_vnode_ref(jrec, ap->a_vp);
962 save = jrecord_push(jrec, JTYPE_REDO);
963 jrecord_write_pagelist(jrec, JLEAF_FILEDATA, ap->a_m, ap->a_rtvals,
964 btoc(ap->a_count), ap->a_offset);
965 jrecord_pop(jrec, save);
968 jreclist_done(mp, &jreclist, error);
973 * Journal vop_setacl { a_vp, a_type, a_aclp, a_cred, a_td }
977 journal_setacl(struct vop_setacl_args *ap)
979 struct jrecord_list jreclist;
980 struct jrecord jreccache;
981 struct jrecord *jrec;
985 mp = ap->a_head.a_ops->vv_mount;
986 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETACL);
987 error = vop_journal_operate_ap(&ap->a_head);
989 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
991 if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
992 jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
994 jrecord_write_cred(jrec, curthread, ap->a_cred);
995 jrecord_write_vnode_ref(jrec, ap->a_vp);
997 save = jrecord_push(jrec, JTYPE_REDO);
999 jrecord_pop(jrec, save);
1003 jreclist_done(mp, &jreclist, error);
1008 * Journal vop_setextattr { a_vp, a_name, a_uio, a_cred, a_td }
1012 journal_setextattr(struct vop_setextattr_args *ap)
1014 struct jrecord_list jreclist;
1015 struct jrecord jreccache;
1016 struct jrecord *jrec;
1021 mp = ap->a_head.a_ops->vv_mount;
1022 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETEXTATTR);
1023 error = vop_journal_operate_ap(&ap->a_head);
1025 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1027 if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
1028 jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
1030 jrecord_write_cred(jrec, curthread, ap->a_cred);
1031 jrecord_write_vnode_ref(jrec, ap->a_vp);
1032 jrecord_leaf(jrec, JLEAF_ATTRNAME, ap->a_name, strlen(ap->a_name));
1033 save = jrecord_push(jrec, JTYPE_REDO);
1034 jrecord_write_uio(jrec, JLEAF_FILEDATA, ap->a_uio);
1035 jrecord_pop(jrec, save);
1038 jreclist_done(mp, &jreclist, error);
1043 * Journal vop_ncreate { a_ncp, a_vpp, a_cred, a_vap }
1047 journal_ncreate(struct vop_ncreate_args *ap)
1049 struct jrecord_list jreclist;
1050 struct jrecord jreccache;
1051 struct jrecord *jrec;
1056 mp = ap->a_head.a_ops->vv_mount;
1057 jreclist_init(mp, &jreclist, &jreccache, JTYPE_CREATE);
1058 error = vop_journal_operate_ap(&ap->a_head);
1060 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1061 jrecord_write_cred(jrec, NULL, ap->a_cred);
1062 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1064 jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1065 save = jrecord_push(jrec, JTYPE_REDO);
1066 jrecord_write_vattr(jrec, ap->a_vap);
1067 jrecord_pop(jrec, save);
1070 jreclist_done(mp, &jreclist, error);
1075 * Journal vop_nmknod { a_ncp, a_vpp, a_cred, a_vap }
1079 journal_nmknod(struct vop_nmknod_args *ap)
1081 struct jrecord_list jreclist;
1082 struct jrecord jreccache;
1083 struct jrecord *jrec;
1088 mp = ap->a_head.a_ops->vv_mount;
1089 jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKNOD);
1090 error = vop_journal_operate_ap(&ap->a_head);
1092 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1093 jrecord_write_cred(jrec, NULL, ap->a_cred);
1094 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1095 save = jrecord_push(jrec, JTYPE_REDO);
1096 jrecord_write_vattr(jrec, ap->a_vap);
1097 jrecord_pop(jrec, save);
1099 jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1102 jreclist_done(mp, &jreclist, error);
1107 * Journal vop_nlink { a_ncp, a_vp, a_cred }
1111 journal_nlink(struct vop_nlink_args *ap)
1113 struct jrecord_list jreclist;
1114 struct jrecord jreccache;
1115 struct jrecord *jrec;
1120 mp = ap->a_head.a_ops->vv_mount;
1121 jreclist_init(mp, &jreclist, &jreccache, JTYPE_LINK);
1122 error = vop_journal_operate_ap(&ap->a_head);
1124 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1125 jrecord_write_cred(jrec, NULL, ap->a_cred);
1126 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1127 /* XXX PATH to VP and inode number */
1128 /* XXX this call may not record the correct path when
1129 * multiple paths are available */
1130 save = jrecord_push(jrec, JTYPE_REDO);
1131 jrecord_write_vnode_link(jrec, ap->a_vp, ap->a_ncp);
1132 jrecord_pop(jrec, save);
1135 jreclist_done(mp, &jreclist, error);
1140 * Journal vop_symlink { a_ncp, a_vpp, a_cred, a_vap, a_target }
1144 journal_nsymlink(struct vop_nsymlink_args *ap)
1146 struct jrecord_list jreclist;
1147 struct jrecord jreccache;
1148 struct jrecord *jrec;
1153 mp = ap->a_head.a_ops->vv_mount;
1154 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SYMLINK);
1155 error = vop_journal_operate_ap(&ap->a_head);
1157 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1158 jrecord_write_cred(jrec, NULL, ap->a_cred);
1159 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1160 save = jrecord_push(jrec, JTYPE_REDO);
1161 jrecord_leaf(jrec, JLEAF_SYMLINKDATA,
1162 ap->a_target, strlen(ap->a_target));
1163 jrecord_pop(jrec, save);
1165 jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1168 jreclist_done(mp, &jreclist, error);
1173 * Journal vop_nwhiteout { a_ncp, a_cred, a_flags }
1177 journal_nwhiteout(struct vop_nwhiteout_args *ap)
1179 struct jrecord_list jreclist;
1180 struct jrecord jreccache;
1181 struct jrecord *jrec;
1185 mp = ap->a_head.a_ops->vv_mount;
1186 jreclist_init(mp, &jreclist, &jreccache, JTYPE_WHITEOUT);
1187 error = vop_journal_operate_ap(&ap->a_head);
1189 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1190 jrecord_write_cred(jrec, NULL, ap->a_cred);
1191 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1194 jreclist_done(mp, &jreclist, error);
1199 * Journal vop_nremove { a_ncp, a_cred }
1203 journal_nremove(struct vop_nremove_args *ap)
1205 struct jrecord_list jreclist;
1206 struct jrecord jreccache;
1207 struct jrecord *jrec;
1211 mp = ap->a_head.a_ops->vv_mount;
1212 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_REMOVE) &&
1215 jreclist_undo_file(&jreclist, ap->a_ncp->nc_vp,
1216 JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1218 error = vop_journal_operate_ap(&ap->a_head);
1220 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1221 jrecord_write_cred(jrec, NULL, ap->a_cred);
1222 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1225 jreclist_done(mp, &jreclist, error);
1230 * Journal vop_nmkdir { a_ncp, a_vpp, a_cred, a_vap }
1234 journal_nmkdir(struct vop_nmkdir_args *ap)
1236 struct jrecord_list jreclist;
1237 struct jrecord jreccache;
1238 struct jrecord *jrec;
1242 mp = ap->a_head.a_ops->vv_mount;
1243 jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKDIR);
1244 error = vop_journal_operate_ap(&ap->a_head);
1246 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1248 if (jo->flags & MC_JOURNAL_WANT_AUDIT) {
1249 jrecord_write_audit(jrec);
1252 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1253 jrecord_write_cred(jrec, NULL, ap->a_cred);
1254 jrecord_write_vattr(jrec, ap->a_vap);
1255 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1257 jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1260 jreclist_done(mp, &jreclist, error);
1265 * Journal vop_nrmdir { a_ncp, a_cred }
1269 journal_nrmdir(struct vop_nrmdir_args *ap)
1271 struct jrecord_list jreclist;
1272 struct jrecord jreccache;
1273 struct jrecord *jrec;
1277 mp = ap->a_head.a_ops->vv_mount;
1278 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RMDIR)) {
1279 jreclist_undo_file(&jreclist, ap->a_ncp->nc_vp,
1280 JRUNDO_VATTR|JRUNDO_GETVP, 0, 0);
1282 error = vop_journal_operate_ap(&ap->a_head);
1284 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1285 jrecord_write_cred(jrec, NULL, ap->a_cred);
1286 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1289 jreclist_done(mp, &jreclist, error);
1294 * Journal vop_nrename { a_fncp, a_tncp, a_cred }
1298 journal_nrename(struct vop_nrename_args *ap)
1300 struct jrecord_list jreclist;
1301 struct jrecord jreccache;
1302 struct jrecord *jrec;
1306 mp = ap->a_head.a_ops->vv_mount;
1307 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RENAME) &&
1310 jreclist_undo_file(&jreclist, ap->a_tncp->nc_vp,
1311 JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1313 error = vop_journal_operate_ap(&ap->a_head);
1315 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1316 jrecord_write_cred(jrec, NULL, ap->a_cred);
1317 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_fncp);
1318 jrecord_write_path(jrec, JLEAF_PATH2, ap->a_tncp);
1321 jreclist_done(mp, &jreclist, error);