Fix typo.
[dragonfly.git] / sys / kern / vfs_jops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2004-2006 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/kern/vfs_jops.c,v 1.31 2006/09/30 21:10:19 swildner Exp $
35 */
36/*
37 * Each mount point may have zero or more independantly configured journals
38 * attached to it. Each journal is represented by a memory FIFO and worker
39 * thread. Journal events are streamed through the FIFO to the thread,
40 * batched up (typically on one-second intervals), and written out by the
41 * thread.
42 *
43 * Journal vnode ops are executed instead of mnt_vn_norm_ops when one or
44 * more journals have been installed on a mount point. It becomes the
45 * responsibility of the journal op to call the underlying normal op as
46 * appropriate.
47 */
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/buf.h>
51#include <sys/conf.h>
52#include <sys/kernel.h>
53#include <sys/queue.h>
54#include <sys/lock.h>
55#include <sys/malloc.h>
56#include <sys/mount.h>
57#include <sys/unistd.h>
58#include <sys/vnode.h>
59#include <sys/poll.h>
60#include <sys/mountctl.h>
61#include <sys/journal.h>
62#include <sys/file.h>
63#include <sys/proc.h>
64#include <sys/msfbuf.h>
65#include <sys/socket.h>
66#include <sys/socketvar.h>
67
68#include <machine/limits.h>
69
70#include <vm/vm.h>
71#include <vm/vm_object.h>
72#include <vm/vm_page.h>
73#include <vm/vm_pager.h>
74#include <vm/vnode_pager.h>
75
76#include <sys/file2.h>
77#include <sys/thread2.h>
78
79static int journal_attach(struct mount *mp);
80static void journal_detach(struct mount *mp);
81static int journal_install_vfs_journal(struct mount *mp, struct file *fp,
82 const struct mountctl_install_journal *info);
83static int journal_restart_vfs_journal(struct mount *mp, struct file *fp,
84 const struct mountctl_restart_journal *info);
85static int journal_remove_vfs_journal(struct mount *mp,
86 const struct mountctl_remove_journal *info);
87static int journal_restart(struct mount *mp, struct file *fp,
88 struct journal *jo, int flags);
89static int journal_destroy(struct mount *mp, struct journal *jo, int flags);
90static int journal_resync_vfs_journal(struct mount *mp, const void *ctl);
91static int journal_status_vfs_journal(struct mount *mp,
92 const struct mountctl_status_journal *info,
93 struct mountctl_journal_ret_status *rstat,
94 int buflen, int *res);
95
96static void jrecord_undo_file(struct jrecord *jrec, struct vnode *vp,
97 int jrflags, off_t off, off_t bytes);
98
99static int journal_setattr(struct vop_setattr_args *ap);
100static int journal_write(struct vop_write_args *ap);
101static int journal_fsync(struct vop_fsync_args *ap);
102static int journal_putpages(struct vop_putpages_args *ap);
103static int journal_setacl(struct vop_setacl_args *ap);
104static int journal_setextattr(struct vop_setextattr_args *ap);
105static int journal_ncreate(struct vop_ncreate_args *ap);
106static int journal_nmknod(struct vop_nmknod_args *ap);
107static int journal_nlink(struct vop_nlink_args *ap);
108static int journal_nsymlink(struct vop_nsymlink_args *ap);
109static int journal_nwhiteout(struct vop_nwhiteout_args *ap);
110static int journal_nremove(struct vop_nremove_args *ap);
111static int journal_nmkdir(struct vop_nmkdir_args *ap);
112static int journal_nrmdir(struct vop_nrmdir_args *ap);
113static int journal_nrename(struct vop_nrename_args *ap);
114
115#define JRUNDO_SIZE 0x00000001
116#define JRUNDO_UID 0x00000002
117#define JRUNDO_GID 0x00000004
118#define JRUNDO_FSID 0x00000008
119#define JRUNDO_MODES 0x00000010
120#define JRUNDO_INUM 0x00000020
121#define JRUNDO_ATIME 0x00000040
122#define JRUNDO_MTIME 0x00000080
123#define JRUNDO_CTIME 0x00000100
124#define JRUNDO_GEN 0x00000200
125#define JRUNDO_FLAGS 0x00000400
126#define JRUNDO_UDEV 0x00000800
127#define JRUNDO_NLINK 0x00001000
128#define JRUNDO_FILEDATA 0x00010000
129#define JRUNDO_GETVP 0x00020000
130#define JRUNDO_CONDLINK 0x00040000 /* write file data if link count 1 */
131#define JRUNDO_VATTR (JRUNDO_SIZE|JRUNDO_UID|JRUNDO_GID|JRUNDO_FSID|\
132 JRUNDO_MODES|JRUNDO_INUM|JRUNDO_ATIME|JRUNDO_MTIME|\
133 JRUNDO_CTIME|JRUNDO_GEN|JRUNDO_FLAGS|JRUNDO_UDEV|\
134 JRUNDO_NLINK)
135#define JRUNDO_ALL (JRUNDO_VATTR|JRUNDO_FILEDATA)
136
137static struct vop_ops journal_vnode_vops = {
138 .vop_default = vop_journal_operate_ap,
139 .vop_mountctl = journal_mountctl,
140 .vop_setattr = journal_setattr,
141 .vop_write = journal_write,
142 .vop_fsync = journal_fsync,
143 .vop_putpages = journal_putpages,
144 .vop_setacl = journal_setacl,
145 .vop_setextattr = journal_setextattr,
146 .vop_ncreate = journal_ncreate,
147 .vop_nmknod = journal_nmknod,
148 .vop_nlink = journal_nlink,
149 .vop_nsymlink = journal_nsymlink,
150 .vop_nwhiteout = journal_nwhiteout,
151 .vop_nremove = journal_nremove,
152 .vop_nmkdir = journal_nmkdir,
153 .vop_nrmdir = journal_nrmdir,
154 .vop_nrename = journal_nrename
155};
156
157int
158journal_mountctl(struct vop_mountctl_args *ap)
159{
160 struct mount *mp;
161 int error = 0;
162
163 mp = ap->a_head.a_ops->head.vv_mount;
164 KKASSERT(mp);
165
166 if (mp->mnt_vn_journal_ops == NULL) {
167 switch(ap->a_op) {
168 case MOUNTCTL_INSTALL_VFS_JOURNAL:
169 error = journal_attach(mp);
170 if (error == 0 && ap->a_ctllen != sizeof(struct mountctl_install_journal))
171 error = EINVAL;
172 if (error == 0 && ap->a_fp == NULL)
173 error = EBADF;
174 if (error == 0)
175 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
176 if (TAILQ_EMPTY(&mp->mnt_jlist))
177 journal_detach(mp);
178 break;
179 case MOUNTCTL_RESTART_VFS_JOURNAL:
180 case MOUNTCTL_REMOVE_VFS_JOURNAL:
181 case MOUNTCTL_RESYNC_VFS_JOURNAL:
182 case MOUNTCTL_STATUS_VFS_JOURNAL:
183 error = ENOENT;
184 break;
185 default:
186 error = EOPNOTSUPP;
187 break;
188 }
189 } else {
190 switch(ap->a_op) {
191 case MOUNTCTL_INSTALL_VFS_JOURNAL:
192 if (ap->a_ctllen != sizeof(struct mountctl_install_journal))
193 error = EINVAL;
194 if (error == 0 && ap->a_fp == NULL)
195 error = EBADF;
196 if (error == 0)
197 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
198 break;
199 case MOUNTCTL_RESTART_VFS_JOURNAL:
200 if (ap->a_ctllen != sizeof(struct mountctl_restart_journal))
201 error = EINVAL;
202 if (error == 0 && ap->a_fp == NULL)
203 error = EBADF;
204 if (error == 0)
205 error = journal_restart_vfs_journal(mp, ap->a_fp, ap->a_ctl);
206 break;
207 case MOUNTCTL_REMOVE_VFS_JOURNAL:
208 if (ap->a_ctllen != sizeof(struct mountctl_remove_journal))
209 error = EINVAL;
210 if (error == 0)
211 error = journal_remove_vfs_journal(mp, ap->a_ctl);
212 if (TAILQ_EMPTY(&mp->mnt_jlist))
213 journal_detach(mp);
214 break;
215 case MOUNTCTL_RESYNC_VFS_JOURNAL:
216 if (ap->a_ctllen != 0)
217 error = EINVAL;
218 error = journal_resync_vfs_journal(mp, ap->a_ctl);
219 break;
220 case MOUNTCTL_STATUS_VFS_JOURNAL:
221 if (ap->a_ctllen != sizeof(struct mountctl_status_journal))
222 error = EINVAL;
223 if (error == 0) {
224 error = journal_status_vfs_journal(mp, ap->a_ctl,
225 ap->a_buf, ap->a_buflen, ap->a_res);
226 }
227 break;
228 default:
229 error = EOPNOTSUPP;
230 break;
231 }
232 }
233 return (error);
234}
235
236/*
237 * High level mount point setup. When a
238 */
239static int
240journal_attach(struct mount *mp)
241{
242 KKASSERT(mp->mnt_jbitmap == NULL);
243 vfs_add_vnodeops(mp, &journal_vnode_vops, &mp->mnt_vn_journal_ops);
244 mp->mnt_jbitmap = kmalloc(JREC_STREAMID_JMAX/8, M_JOURNAL, M_WAITOK|M_ZERO);
245 mp->mnt_streamid = JREC_STREAMID_JMIN;
246 return(0);
247}
248
249static void
250journal_detach(struct mount *mp)
251{
252 KKASSERT(mp->mnt_jbitmap != NULL);
253 if (mp->mnt_vn_journal_ops)
254 vfs_rm_vnodeops(mp, &journal_vnode_vops, &mp->mnt_vn_journal_ops);
255 kfree(mp->mnt_jbitmap, M_JOURNAL);
256 mp->mnt_jbitmap = NULL;
257}
258
259/*
260 * Install a journal on a mount point. Each journal has an associated worker
261 * thread which is responsible for buffering and spooling the data to the
262 * target. A mount point may have multiple journals attached to it. An
263 * initial start record is generated when the journal is associated.
264 */
265static int
266journal_install_vfs_journal(struct mount *mp, struct file *fp,
267 const struct mountctl_install_journal *info)
268{
269 struct journal *jo;
270 struct jrecord jrec;
271 int error = 0;
272 int size;
273
274 jo = kmalloc(sizeof(struct journal), M_JOURNAL, M_WAITOK|M_ZERO);
275 bcopy(info->id, jo->id, sizeof(jo->id));
276 jo->flags = info->flags & ~(MC_JOURNAL_WACTIVE | MC_JOURNAL_RACTIVE |
277 MC_JOURNAL_STOP_REQ);
278
279 /*
280 * Memory FIFO size, round to nearest power of 2
281 */
282 if (info->membufsize) {
283 if (info->membufsize < 65536)
284 size = 65536;
285 else if (info->membufsize > 128 * 1024 * 1024)
286 size = 128 * 1024 * 1024;
287 else
288 size = (int)info->membufsize;
289 } else {
290 size = 1024 * 1024;
291 }
292 jo->fifo.size = 1;
293 while (jo->fifo.size < size)
294 jo->fifo.size <<= 1;
295
296 /*
297 * Other parameters. If not specified the starting transaction id
298 * will be the current date.
299 */
300 if (info->transid) {
301 jo->transid = info->transid;
302 } else {
303 struct timespec ts;
304 getnanotime(&ts);
305 jo->transid = ((int64_t)ts.tv_sec << 30) | ts.tv_nsec;
306 }
307
308 jo->fp = fp;
309
310 /*
311 * Allocate the memory FIFO
312 */
313 jo->fifo.mask = jo->fifo.size - 1;
314 jo->fifo.membase = kmalloc(jo->fifo.size, M_JFIFO, M_WAITOK|M_ZERO|M_NULLOK);
315 if (jo->fifo.membase == NULL)
316 error = ENOMEM;
317
318 /*
319 * Create the worker threads and generate the association record.
320 */
321 if (error) {
322 kfree(jo, M_JOURNAL);
323 } else {
324 fhold(fp);
325 journal_create_threads(jo);
326 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
327 jrecord_write(&jrec, JTYPE_ASSOCIATE, 0);
328 jrecord_done(&jrec, 0);
329 TAILQ_INSERT_TAIL(&mp->mnt_jlist, jo, jentry);
330 }
331 return(error);
332}
333
334/*
335 * Restart a journal with a new descriptor. The existing reader and writer
336 * threads are terminated and a new descriptor is associated with the
337 * journal. The FIFO rindex is reset to xindex and the threads are then
338 * restarted.
339 */
340static int
341journal_restart_vfs_journal(struct mount *mp, struct file *fp,
342 const struct mountctl_restart_journal *info)
343{
344 struct journal *jo;
345 int error;
346
347 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
348 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
349 break;
350 }
351 if (jo)
352 error = journal_restart(mp, fp, jo, info->flags);
353 else
354 error = EINVAL;
355 return (error);
356}
357
358static int
359journal_restart(struct mount *mp, struct file *fp,
360 struct journal *jo, int flags)
361{
362 /*
363 * XXX lock the jo
364 */
365
366#if 0
367 /*
368 * Record the fact that we are doing a restart in the journal.
369 * XXX it isn't safe to do this if the journal is being restarted
370 * because it was locked up and the writer thread has already exited.
371 */
372 jrecord_init(jo, &jrec, JREC_STREAMID_RESTART);
373 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
374 jrecord_done(&jrec, 0);
375#endif
376
377 /*
378 * Stop the reader and writer threads and clean up the current
379 * descriptor.
380 */
381 printf("RESTART WITH FP %p KILLING %p\n", fp, jo->fp);
382 journal_destroy_threads(jo, flags);
383
384 if (jo->fp)
385 fdrop(jo->fp);
386
387 /*
388 * Associate the new descriptor, reset the FIFO index, and recreate
389 * the threads.
390 */
391 fhold(fp);
392 jo->fp = fp;
393 jo->fifo.rindex = jo->fifo.xindex;
394 journal_create_threads(jo);
395
396 return(0);
397}
398
399/*
400 * Disassociate a journal from a mount point and terminate its worker thread.
401 * A final termination record is written out before the file pointer is
402 * dropped.
403 */
404static int
405journal_remove_vfs_journal(struct mount *mp,
406 const struct mountctl_remove_journal *info)
407{
408 struct journal *jo;
409 int error;
410
411 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
412 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
413 break;
414 }
415 if (jo)
416 error = journal_destroy(mp, jo, info->flags);
417 else
418 error = EINVAL;
419 return (error);
420}
421
422/*
423 * Remove all journals associated with a mount point. Usually called
424 * by the umount code.
425 */
426void
427journal_remove_all_journals(struct mount *mp, int flags)
428{
429 struct journal *jo;
430
431 while ((jo = TAILQ_FIRST(&mp->mnt_jlist)) != NULL) {
432 journal_destroy(mp, jo, flags);
433 }
434}
435
436static int
437journal_destroy(struct mount *mp, struct journal *jo, int flags)
438{
439 struct jrecord jrec;
440
441 TAILQ_REMOVE(&mp->mnt_jlist, jo, jentry);
442
443 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
444 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
445 jrecord_done(&jrec, 0);
446
447 journal_destroy_threads(jo, flags);
448
449 if (jo->fp)
450 fdrop(jo->fp);
451 if (jo->fifo.membase)
452 kfree(jo->fifo.membase, M_JFIFO);
453 kfree(jo, M_JOURNAL);
454
455 return(0);
456}
457
458static int
459journal_resync_vfs_journal(struct mount *mp, const void *ctl)
460{
461 return(EINVAL);
462}
463
464static int
465journal_status_vfs_journal(struct mount *mp,
466 const struct mountctl_status_journal *info,
467 struct mountctl_journal_ret_status *rstat,
468 int buflen, int *res)
469{
470 struct journal *jo;
471 int error = 0;
472 int index;
473
474 index = 0;
475 *res = 0;
476 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
477 if (info->index == MC_JOURNAL_INDEX_ID) {
478 if (bcmp(jo->id, info->id, sizeof(jo->id)) != 0)
479 continue;
480 } else if (info->index >= 0) {
481 if (info->index < index)
482 continue;
483 } else if (info->index != MC_JOURNAL_INDEX_ALL) {
484 continue;
485 }
486 if (buflen < sizeof(*rstat)) {
487 if (*res)
488 rstat[-1].flags |= MC_JOURNAL_STATUS_MORETOCOME;
489 else
490 error = EINVAL;
491 break;
492 }
493 bzero(rstat, sizeof(*rstat));
494 rstat->recsize = sizeof(*rstat);
495 bcopy(jo->id, rstat->id, sizeof(jo->id));
496 rstat->index = index;
497 rstat->membufsize = jo->fifo.size;
498 rstat->membufused = jo->fifo.windex - jo->fifo.xindex;
499 rstat->membufunacked = jo->fifo.rindex - jo->fifo.xindex;
500 rstat->bytessent = jo->total_acked;
501 rstat->fifostalls = jo->fifostalls;
502 ++rstat;
503 ++index;
504 *res += sizeof(*rstat);
505 buflen -= sizeof(*rstat);
506 }
507 return(error);
508}
509
510/************************************************************************
511 * PARALLEL TRANSACTION SUPPORT ROUTINES *
512 ************************************************************************
513 *
514 * JRECLIST_*() - routines which create and iterate over jrecord structures,
515 * because a mount point may have multiple attached journals.
516 */
517
518/*
519 * Initialize the passed jrecord_list and create a jrecord for each
520 * journal we need to write to. Unnecessary mallocs are avoided by
521 * using the passed jrecord structure as the first jrecord in the list.
522 * A starting transaction is pushed for each jrecord.
523 *
524 * Returns non-zero if any of the journals require undo records.
525 */
526static
527int
528jreclist_init(struct mount *mp, struct jrecord_list *jreclist,
529 struct jrecord *jreccache, int16_t rectype)
530{
531 struct journal *jo;
532 struct jrecord *jrec;
533 int wantrev;
534 int count;
535 int16_t streamid;
536
537 TAILQ_INIT(&jreclist->list);
538
539 /*
540 * Select the stream ID to use for the transaction. We must select
541 * a stream ID that is not currently in use by some other parallel
542 * transaction.
543 *
544 * Don't bother calculating the next streamid when reassigning
545 * mnt_streamid, since parallel transactions are fairly rare. This
546 * also allows someone observing the raw records to clearly see
547 * when parallel transactions occur.
548 */
549 streamid = mp->mnt_streamid;
550 count = 0;
551 while (mp->mnt_jbitmap[streamid >> 3] & (1 << (streamid & 7))) {
552 if (++streamid == JREC_STREAMID_JMAX)
553 streamid = JREC_STREAMID_JMIN;
554 if (++count == JREC_STREAMID_JMAX - JREC_STREAMID_JMIN) {
555 printf("jreclist_init: all streamid's in use! sleeping\n");
556 tsleep(jreclist, 0, "jsidfl", hz * 10);
557 count = 0;
558 }
559 }
560 mp->mnt_jbitmap[streamid >> 3] |= 1 << (streamid & 7);
561 mp->mnt_streamid = streamid;
562 jreclist->streamid = streamid;
563
564 /*
565 * Now initialize a stream on each journal.
566 */
567 count = 0;
568 wantrev = 0;
569 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
570 if (count == 0)
571 jrec = jreccache;
572 else
573 jrec = kmalloc(sizeof(*jrec), M_JOURNAL, M_WAITOK);
574 jrecord_init(jo, jrec, streamid);
575 jrec->user_save = jrecord_push(jrec, rectype);
576 TAILQ_INSERT_TAIL(&jreclist->list, jrec, user_entry);
577 if (jo->flags & MC_JOURNAL_WANT_REVERSABLE)
578 wantrev = 1;
579 ++count;
580 }
581 return(wantrev);
582}
583
584/*
585 * Terminate the journaled transactions started by jreclist_init(). If
586 * an error occured, the transaction records will be aborted.
587 */
588static
589void
590jreclist_done(struct mount *mp, struct jrecord_list *jreclist, int error)
591{
592 struct jrecord *jrec;
593 int count;
594
595 /*
596 * Cleanup the jrecord state on each journal.
597 */
598 TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
599 jrecord_pop(jrec, jrec->user_save);
600 jrecord_done(jrec, error);
601 }
602
603 /*
604 * Free allocated jrec's (the first is always supplied)
605 */
606 count = 0;
607 while ((jrec = TAILQ_FIRST(&jreclist->list)) != NULL) {
608 TAILQ_REMOVE(&jreclist->list, jrec, user_entry);
609 if (count)
610 kfree(jrec, M_JOURNAL);
611 ++count;
612 }
613
614 /*
615 * Clear the streamid so it can be reused.
616 */
617 mp->mnt_jbitmap[jreclist->streamid >> 3] &= ~(1 << (jreclist->streamid & 7));
618}
619
620/*
621 * This procedure writes out UNDO records for available reversable
622 * journals.
623 *
624 * XXX could use improvement. There is no need to re-read the file
625 * for each journal.
626 */
627static
628void
629jreclist_undo_file(struct jrecord_list *jreclist, struct vnode *vp,
630 int jrflags, off_t off, off_t bytes)
631{
632 struct jrecord *jrec;
633 int error;
634
635 error = 0;
636 if (jrflags & JRUNDO_GETVP)
637 error = vget(vp, LK_SHARED);
638 if (error == 0) {
639 TAILQ_FOREACH(jrec, &jreclist->list, user_entry) {
640 if (jrec->jo->flags & MC_JOURNAL_WANT_REVERSABLE) {
641 jrecord_undo_file(jrec, vp, jrflags, off, bytes);
642 }
643 }
644 }
645 if (error == 0 && jrflags & JRUNDO_GETVP)
646 vput(vp);
647}
648
649/************************************************************************
650 * LOW LEVEL UNDO SUPPORT ROUTINE *
651 ************************************************************************
652 *
653 * This function is used to support UNDO records. It will generate an
654 * appropriate record with the requested portion of the file data. Note
655 * that file data is only recorded if JRUNDO_FILEDATA is passed. If bytes
656 * is -1, it will be set to the size of the file.
657 */
658static void
659jrecord_undo_file(struct jrecord *jrec, struct vnode *vp, int jrflags,
660 off_t off, off_t bytes)
661{
662 struct vattr attr;
663 void *save1; /* warning, save pointers do not always remain valid */
664 void *save2;
665 int error;
666
667 /*
668 * Setup. Start the UNDO record, obtain a shared lock on the vnode,
669 * and retrieve attribute info.
670 */
671 save1 = jrecord_push(jrec, JTYPE_UNDO);
672 error = VOP_GETATTR(vp, &attr);
673 if (error)
674 goto done;
675
676 /*
677 * Generate UNDO records as requested.
678 */
679 if (jrflags & JRUNDO_VATTR) {
680 save2 = jrecord_push(jrec, JTYPE_VATTR);
681 jrecord_leaf(jrec, JLEAF_VTYPE, &attr.va_type, sizeof(attr.va_type));
682 if ((jrflags & JRUNDO_NLINK) && attr.va_nlink != VNOVAL)
683 jrecord_leaf(jrec, JLEAF_NLINK, &attr.va_nlink, sizeof(attr.va_nlink));
684 if ((jrflags & JRUNDO_SIZE) && attr.va_size != VNOVAL)
685 jrecord_leaf(jrec, JLEAF_SIZE, &attr.va_size, sizeof(attr.va_size));
686 if ((jrflags & JRUNDO_UID) && attr.va_uid != VNOVAL)
687 jrecord_leaf(jrec, JLEAF_UID, &attr.va_uid, sizeof(attr.va_uid));
688 if ((jrflags & JRUNDO_GID) && attr.va_gid != VNOVAL)
689 jrecord_leaf(jrec, JLEAF_GID, &attr.va_gid, sizeof(attr.va_gid));
690 if ((jrflags & JRUNDO_FSID) && attr.va_fsid != VNOVAL)
691 jrecord_leaf(jrec, JLEAF_FSID, &attr.va_fsid, sizeof(attr.va_fsid));
692 if ((jrflags & JRUNDO_MODES) && attr.va_mode != (mode_t)VNOVAL)
693 jrecord_leaf(jrec, JLEAF_MODES, &attr.va_mode, sizeof(attr.va_mode));
694 if ((jrflags & JRUNDO_INUM) && attr.va_fileid != VNOVAL)
695 jrecord_leaf(jrec, JLEAF_INUM, &attr.va_fileid, sizeof(attr.va_fileid));
696 if ((jrflags & JRUNDO_ATIME) && attr.va_atime.tv_sec != VNOVAL)
697 jrecord_leaf(jrec, JLEAF_ATIME, &attr.va_atime, sizeof(attr.va_atime));
698 if ((jrflags & JRUNDO_MTIME) && attr.va_mtime.tv_sec != VNOVAL)
699 jrecord_leaf(jrec, JLEAF_MTIME, &attr.va_mtime, sizeof(attr.va_mtime));
700 if ((jrflags & JRUNDO_CTIME) && attr.va_ctime.tv_sec != VNOVAL)
701 jrecord_leaf(jrec, JLEAF_CTIME, &attr.va_ctime, sizeof(attr.va_ctime));
702 if ((jrflags & JRUNDO_GEN) && attr.va_gen != VNOVAL)
703 jrecord_leaf(jrec, JLEAF_GEN, &attr.va_gen, sizeof(attr.va_gen));
704 if ((jrflags & JRUNDO_FLAGS) && attr.va_flags != VNOVAL)
705 jrecord_leaf(jrec, JLEAF_FLAGS, &attr.va_flags, sizeof(attr.va_flags));
706 if ((jrflags & JRUNDO_UDEV) && attr.va_rdev != VNOVAL)
707 jrecord_leaf(jrec, JLEAF_UDEV, &attr.va_rdev, sizeof(attr.va_rdev));
708 jrecord_pop(jrec, save2);
709 }
710
711 /*
712 * Output the file data being overwritten by reading the file and
713 * writing it out to the journal prior to the write operation. We
714 * do not need to write out data past the current file EOF.
715 *
716 * XXX support JRUNDO_CONDLINK - do not write out file data for files
717 * with a link count > 1. The undo code needs to locate the inode and
718 * regenerate the hardlink.
719 */
720 if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VREG) {
721 if (attr.va_size != VNOVAL) {
722 if (bytes == -1)
723 bytes = attr.va_size - off;
724 if (off + bytes > attr.va_size)
725 bytes = attr.va_size - off;
726 if (bytes > 0)
727 jrecord_file_data(jrec, vp, off, bytes);
728 } else {
729 error = EINVAL;
730 }
731 }
732 if ((jrflags & JRUNDO_FILEDATA) && attr.va_type == VLNK) {
733 struct iovec aiov;
734 struct uio auio;
735 char *buf;
736
737 buf = kmalloc(PATH_MAX, M_JOURNAL, M_WAITOK);
738 aiov.iov_base = buf;
739 aiov.iov_len = PATH_MAX;
740 auio.uio_iov = &aiov;
741 auio.uio_iovcnt = 1;
742 auio.uio_offset = 0;
743 auio.uio_rw = UIO_READ;
744 auio.uio_segflg = UIO_SYSSPACE;
745 auio.uio_td = curthread;
746 auio.uio_resid = PATH_MAX;
747 error = VOP_READLINK(vp, &auio, proc0.p_ucred);
748 if (error == 0) {
749 jrecord_leaf(jrec, JLEAF_SYMLINKDATA, buf,
750 PATH_MAX - auio.uio_resid);
751 }
752 kfree(buf, M_JOURNAL);
753 }
754done:
755 if (error)
756 jrecord_leaf(jrec, JLEAF_ERROR, &error, sizeof(error));
757 jrecord_pop(jrec, save1);
758}
759
760/************************************************************************
761 * JOURNAL VNOPS *
762 ************************************************************************
763 *
764 * These are function shims replacing the normal filesystem ops. We become
765 * responsible for calling the underlying filesystem ops. We have the choice
766 * of executing the underlying op first and then generating the journal entry,
767 * or starting the journal entry, executing the underlying op, and then
768 * either completing or aborting it.
769 *
770 * The journal is supposed to be a high-level entity, which generally means
771 * identifying files by name rather then by inode. Supplying both allows
772 * the journal to be used both for inode-number-compatible 'mirrors' and
773 * for simple filesystem replication.
774 *
775 * Writes are particularly difficult to deal with because a single write may
776 * represent a hundred megabyte buffer or more, and both writes and truncations
777 * require the 'old' data to be written out as well as the new data if the
778 * log is reversable. Other issues:
779 *
780 * - How to deal with operations on unlinked files (no path available),
781 * but which may still be filesystem visible due to hard links.
782 *
783 * - How to deal with modifications made via a memory map.
784 *
785 * - Future cache coherency support will require cache coherency API calls
786 * both prior to and after the call to the underlying VFS.
787 *
788 * ALSO NOTE: We do not have to shim compatibility VOPs like MKDIR which have
789 * new VFS equivalents (NMKDIR).
790 */
791
792/*
793 * Journal vop_settattr { a_vp, a_vap, a_cred, a_td }
794 */
795static
796int
797journal_setattr(struct vop_setattr_args *ap)
798{
799 struct jrecord_list jreclist;
800 struct jrecord jreccache;
801 struct jrecord *jrec;
802 struct mount *mp;
803 void *save;
804 int error;
805
806 mp = ap->a_head.a_ops->head.vv_mount;
807 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETATTR)) {
808 jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_VATTR, 0, 0);
809 }
810 error = vop_journal_operate_ap(&ap->a_head);
811 if (error == 0) {
812 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
813 jrecord_write_cred(jrec, curthread, ap->a_cred);
814 jrecord_write_vnode_ref(jrec, ap->a_vp);
815 save = jrecord_push(jrec, JTYPE_REDO);
816 jrecord_write_vattr(jrec, ap->a_vap);
817 jrecord_pop(jrec, save);
818 }
819 }
820 jreclist_done(mp, &jreclist, error);
821 return (error);
822}
823
824/*
825 * Journal vop_write { a_vp, a_uio, a_ioflag, a_cred }
826 */
827static
828int
829journal_write(struct vop_write_args *ap)
830{
831 struct jrecord_list jreclist;
832 struct jrecord jreccache;
833 struct jrecord *jrec;
834 struct mount *mp;
835 struct uio uio_copy;
836 struct iovec uio_one_iovec;
837 void *save;
838 int error;
839
840 /*
841 * This is really nasty. UIO's don't retain sufficient information to
842 * be reusable once they've gone through the VOP chain. The iovecs get
843 * cleared, so we have to copy the UIO.
844 *
845 * XXX fix the UIO code to not destroy iov's during a scan so we can
846 * reuse the uio over and over again.
847 *
848 * XXX UNDO code needs to journal the old data prior to the write.
849 */
850 uio_copy = *ap->a_uio;
851 if (uio_copy.uio_iovcnt == 1) {
852 uio_one_iovec = ap->a_uio->uio_iov[0];
853 uio_copy.uio_iov = &uio_one_iovec;
854 } else {
855 uio_copy.uio_iov = kmalloc(uio_copy.uio_iovcnt * sizeof(struct iovec),
856 M_JOURNAL, M_WAITOK);
857 bcopy(ap->a_uio->uio_iov, uio_copy.uio_iov,
858 uio_copy.uio_iovcnt * sizeof(struct iovec));
859 }
860
861 /*
862 * Write out undo data. Note that uio_offset is incorrect if
863 * IO_APPEND is set, but fortunately we have no undo file data to
864 * write out in that case.
865 */
866 mp = ap->a_head.a_ops->head.vv_mount;
867 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_WRITE)) {
868 if (ap->a_ioflag & IO_APPEND) {
869 jreclist_undo_file(&jreclist, ap->a_vp, JRUNDO_SIZE|JRUNDO_MTIME, 0, 0);
870 } else {
871 jreclist_undo_file(&jreclist, ap->a_vp,
872 JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
873 uio_copy.uio_offset, uio_copy.uio_resid);
874 }
875 }
876 error = vop_journal_operate_ap(&ap->a_head);
877
878 /*
879 * XXX bad hack to figure out the offset for O_APPEND writes (note:
880 * uio field state after the VFS operation).
881 */
882 uio_copy.uio_offset = ap->a_uio->uio_offset -
883 (uio_copy.uio_resid - ap->a_uio->uio_resid);
884
885 /*
886 * Output the write data to the journal.
887 */
888 if (error == 0) {
889 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
890 jrecord_write_cred(jrec, NULL, ap->a_cred);
891 jrecord_write_vnode_ref(jrec, ap->a_vp);
892 save = jrecord_push(jrec, JTYPE_REDO);
893 jrecord_write_uio(jrec, JLEAF_FILEDATA, &uio_copy);
894 jrecord_pop(jrec, save);
895 }
896 }
897 jreclist_done(mp, &jreclist, error);
898
899 if (uio_copy.uio_iov != &uio_one_iovec)
900 kfree(uio_copy.uio_iov, M_JOURNAL);
901 return (error);
902}
903
904/*
905 * Journal vop_fsync { a_vp, a_waitfor, a_td }
906 */
907static
908int
909journal_fsync(struct vop_fsync_args *ap)
910{
911#if 0
912 struct mount *mp;
913 struct journal *jo;
914#endif
915 int error;
916
917 error = vop_journal_operate_ap(&ap->a_head);
918#if 0
919 mp = ap->a_head.a_ops->head.vv_mount;
920 if (error == 0) {
921 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
922 /* XXX synchronize pending journal records */
923 }
924 }
925#endif
926 return (error);
927}
928
929/*
930 * Journal vop_putpages { a_vp, a_m, a_count, a_sync, a_rtvals, a_offset }
931 *
932 * note: a_count is in bytes.
933 */
934static
935int
936journal_putpages(struct vop_putpages_args *ap)
937{
938 struct jrecord_list jreclist;
939 struct jrecord jreccache;
940 struct jrecord *jrec;
941 struct mount *mp;
942 void *save;
943 int error;
944
945 mp = ap->a_head.a_ops->head.vv_mount;
946 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_PUTPAGES) &&
947 ap->a_count > 0
948 ) {
949 jreclist_undo_file(&jreclist, ap->a_vp,
950 JRUNDO_FILEDATA|JRUNDO_SIZE|JRUNDO_MTIME,
951 ap->a_offset, btoc(ap->a_count));
952 }
953 error = vop_journal_operate_ap(&ap->a_head);
954 if (error == 0 && ap->a_count > 0) {
955 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
956 jrecord_write_vnode_ref(jrec, ap->a_vp);
957 save = jrecord_push(jrec, JTYPE_REDO);
958 jrecord_write_pagelist(jrec, JLEAF_FILEDATA, ap->a_m, ap->a_rtvals,
959 btoc(ap->a_count), ap->a_offset);
960 jrecord_pop(jrec, save);
961 }
962 }
963 jreclist_done(mp, &jreclist, error);
964 return (error);
965}
966
967/*
968 * Journal vop_setacl { a_vp, a_type, a_aclp, a_cred, a_td }
969 */
970static
971int
972journal_setacl(struct vop_setacl_args *ap)
973{
974 struct jrecord_list jreclist;
975 struct jrecord jreccache;
976 struct jrecord *jrec;
977 struct mount *mp;
978 int error;
979
980 mp = ap->a_head.a_ops->head.vv_mount;
981 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETACL);
982 error = vop_journal_operate_ap(&ap->a_head);
983 if (error == 0) {
984 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
985#if 0
986 if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
987 jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
988#endif
989 jrecord_write_cred(jrec, curthread, ap->a_cred);
990 jrecord_write_vnode_ref(jrec, ap->a_vp);
991#if 0
992 save = jrecord_push(jrec, JTYPE_REDO);
993 /* XXX type, aclp */
994 jrecord_pop(jrec, save);
995#endif
996 }
997 }
998 jreclist_done(mp, &jreclist, error);
999 return (error);
1000}
1001
1002/*
1003 * Journal vop_setextattr { a_vp, a_name, a_uio, a_cred, a_td }
1004 */
1005static
1006int
1007journal_setextattr(struct vop_setextattr_args *ap)
1008{
1009 struct jrecord_list jreclist;
1010 struct jrecord jreccache;
1011 struct jrecord *jrec;
1012 struct mount *mp;
1013 void *save;
1014 int error;
1015
1016 mp = ap->a_head.a_ops->head.vv_mount;
1017 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SETEXTATTR);
1018 error = vop_journal_operate_ap(&ap->a_head);
1019 if (error == 0) {
1020 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1021#if 0
1022 if ((jo->flags & MC_JOURNAL_WANT_REVERSABLE))
1023 jrecord_undo_file(jrec, ap->a_vp, JRUNDO_XXX, 0, 0);
1024#endif
1025 jrecord_write_cred(jrec, curthread, ap->a_cred);
1026 jrecord_write_vnode_ref(jrec, ap->a_vp);
1027 jrecord_leaf(jrec, JLEAF_ATTRNAME, ap->a_name, strlen(ap->a_name));
1028 save = jrecord_push(jrec, JTYPE_REDO);
1029 jrecord_write_uio(jrec, JLEAF_FILEDATA, ap->a_uio);
1030 jrecord_pop(jrec, save);
1031 }
1032 }
1033 jreclist_done(mp, &jreclist, error);
1034 return (error);
1035}
1036
1037/*
1038 * Journal vop_ncreate { a_ncp, a_vpp, a_cred, a_vap }
1039 */
1040static
1041int
1042journal_ncreate(struct vop_ncreate_args *ap)
1043{
1044 struct jrecord_list jreclist;
1045 struct jrecord jreccache;
1046 struct jrecord *jrec;
1047 struct mount *mp;
1048 void *save;
1049 int error;
1050
1051 mp = ap->a_head.a_ops->head.vv_mount;
1052 jreclist_init(mp, &jreclist, &jreccache, JTYPE_CREATE);
1053 error = vop_journal_operate_ap(&ap->a_head);
1054 if (error == 0) {
1055 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1056 jrecord_write_cred(jrec, NULL, ap->a_cred);
1057 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1058 if (*ap->a_vpp)
1059 jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1060 save = jrecord_push(jrec, JTYPE_REDO);
1061 jrecord_write_vattr(jrec, ap->a_vap);
1062 jrecord_pop(jrec, save);
1063 }
1064 }
1065 jreclist_done(mp, &jreclist, error);
1066 return (error);
1067}
1068
1069/*
1070 * Journal vop_nmknod { a_ncp, a_vpp, a_cred, a_vap }
1071 */
1072static
1073int
1074journal_nmknod(struct vop_nmknod_args *ap)
1075{
1076 struct jrecord_list jreclist;
1077 struct jrecord jreccache;
1078 struct jrecord *jrec;
1079 struct mount *mp;
1080 void *save;
1081 int error;
1082
1083 mp = ap->a_head.a_ops->head.vv_mount;
1084 jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKNOD);
1085 error = vop_journal_operate_ap(&ap->a_head);
1086 if (error == 0) {
1087 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1088 jrecord_write_cred(jrec, NULL, ap->a_cred);
1089 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1090 save = jrecord_push(jrec, JTYPE_REDO);
1091 jrecord_write_vattr(jrec, ap->a_vap);
1092 jrecord_pop(jrec, save);
1093 if (*ap->a_vpp)
1094 jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1095 }
1096 }
1097 jreclist_done(mp, &jreclist, error);
1098 return (error);
1099}
1100
1101/*
1102 * Journal vop_nlink { a_ncp, a_vp, a_cred }
1103 */
1104static
1105int
1106journal_nlink(struct vop_nlink_args *ap)
1107{
1108 struct jrecord_list jreclist;
1109 struct jrecord jreccache;
1110 struct jrecord *jrec;
1111 struct mount *mp;
1112 void *save;
1113 int error;
1114
1115 mp = ap->a_head.a_ops->head.vv_mount;
1116 jreclist_init(mp, &jreclist, &jreccache, JTYPE_LINK);
1117 error = vop_journal_operate_ap(&ap->a_head);
1118 if (error == 0) {
1119 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1120 jrecord_write_cred(jrec, NULL, ap->a_cred);
1121 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1122 /* XXX PATH to VP and inode number */
1123 /* XXX this call may not record the correct path when
1124 * multiple paths are available */
1125 save = jrecord_push(jrec, JTYPE_REDO);
1126 jrecord_write_vnode_link(jrec, ap->a_vp, ap->a_ncp);
1127 jrecord_pop(jrec, save);
1128 }
1129 }
1130 jreclist_done(mp, &jreclist, error);
1131 return (error);
1132}
1133
1134/*
1135 * Journal vop_symlink { a_ncp, a_vpp, a_cred, a_vap, a_target }
1136 */
1137static
1138int
1139journal_nsymlink(struct vop_nsymlink_args *ap)
1140{
1141 struct jrecord_list jreclist;
1142 struct jrecord jreccache;
1143 struct jrecord *jrec;
1144 struct mount *mp;
1145 void *save;
1146 int error;
1147
1148 mp = ap->a_head.a_ops->head.vv_mount;
1149 jreclist_init(mp, &jreclist, &jreccache, JTYPE_SYMLINK);
1150 error = vop_journal_operate_ap(&ap->a_head);
1151 if (error == 0) {
1152 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1153 jrecord_write_cred(jrec, NULL, ap->a_cred);
1154 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1155 save = jrecord_push(jrec, JTYPE_REDO);
1156 jrecord_leaf(jrec, JLEAF_SYMLINKDATA,
1157 ap->a_target, strlen(ap->a_target));
1158 jrecord_pop(jrec, save);
1159 if (*ap->a_vpp)
1160 jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1161 }
1162 }
1163 jreclist_done(mp, &jreclist, error);
1164 return (error);
1165}
1166
1167/*
1168 * Journal vop_nwhiteout { a_ncp, a_cred, a_flags }
1169 */
1170static
1171int
1172journal_nwhiteout(struct vop_nwhiteout_args *ap)
1173{
1174 struct jrecord_list jreclist;
1175 struct jrecord jreccache;
1176 struct jrecord *jrec;
1177 struct mount *mp;
1178 int error;
1179
1180 mp = ap->a_head.a_ops->head.vv_mount;
1181 jreclist_init(mp, &jreclist, &jreccache, JTYPE_WHITEOUT);
1182 error = vop_journal_operate_ap(&ap->a_head);
1183 if (error == 0) {
1184 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1185 jrecord_write_cred(jrec, NULL, ap->a_cred);
1186 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1187 }
1188 }
1189 jreclist_done(mp, &jreclist, error);
1190 return (error);
1191}
1192
1193/*
1194 * Journal vop_nremove { a_ncp, a_cred }
1195 */
1196static
1197int
1198journal_nremove(struct vop_nremove_args *ap)
1199{
1200 struct jrecord_list jreclist;
1201 struct jrecord jreccache;
1202 struct jrecord *jrec;
1203 struct mount *mp;
1204 int error;
1205
1206 mp = ap->a_head.a_ops->head.vv_mount;
1207 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_REMOVE) &&
1208 ap->a_ncp->nc_vp
1209 ) {
1210 jreclist_undo_file(&jreclist, ap->a_ncp->nc_vp,
1211 JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1212 }
1213 error = vop_journal_operate_ap(&ap->a_head);
1214 if (error == 0) {
1215 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1216 jrecord_write_cred(jrec, NULL, ap->a_cred);
1217 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1218 }
1219 }
1220 jreclist_done(mp, &jreclist, error);
1221 return (error);
1222}
1223
1224/*
1225 * Journal vop_nmkdir { a_ncp, a_vpp, a_cred, a_vap }
1226 */
1227static
1228int
1229journal_nmkdir(struct vop_nmkdir_args *ap)
1230{
1231 struct jrecord_list jreclist;
1232 struct jrecord jreccache;
1233 struct jrecord *jrec;
1234 struct mount *mp;
1235 int error;
1236
1237 mp = ap->a_head.a_ops->head.vv_mount;
1238 jreclist_init(mp, &jreclist, &jreccache, JTYPE_MKDIR);
1239 error = vop_journal_operate_ap(&ap->a_head);
1240 if (error == 0) {
1241 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1242#if 0
1243 if (jo->flags & MC_JOURNAL_WANT_AUDIT) {
1244 jrecord_write_audit(jrec);
1245 }
1246#endif
1247 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1248 jrecord_write_cred(jrec, NULL, ap->a_cred);
1249 jrecord_write_vattr(jrec, ap->a_vap);
1250 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1251 if (*ap->a_vpp)
1252 jrecord_write_vnode_ref(jrec, *ap->a_vpp);
1253 }
1254 }
1255 jreclist_done(mp, &jreclist, error);
1256 return (error);
1257}
1258
1259/*
1260 * Journal vop_nrmdir { a_ncp, a_cred }
1261 */
1262static
1263int
1264journal_nrmdir(struct vop_nrmdir_args *ap)
1265{
1266 struct jrecord_list jreclist;
1267 struct jrecord jreccache;
1268 struct jrecord *jrec;
1269 struct mount *mp;
1270 int error;
1271
1272 mp = ap->a_head.a_ops->head.vv_mount;
1273 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RMDIR)) {
1274 jreclist_undo_file(&jreclist, ap->a_ncp->nc_vp,
1275 JRUNDO_VATTR|JRUNDO_GETVP, 0, 0);
1276 }
1277 error = vop_journal_operate_ap(&ap->a_head);
1278 if (error == 0) {
1279 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1280 jrecord_write_cred(jrec, NULL, ap->a_cred);
1281 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_ncp);
1282 }
1283 }
1284 jreclist_done(mp, &jreclist, error);
1285 return (error);
1286}
1287
1288/*
1289 * Journal vop_nrename { a_fncp, a_tncp, a_cred }
1290 */
1291static
1292int
1293journal_nrename(struct vop_nrename_args *ap)
1294{
1295 struct jrecord_list jreclist;
1296 struct jrecord jreccache;
1297 struct jrecord *jrec;
1298 struct mount *mp;
1299 int error;
1300
1301 mp = ap->a_head.a_ops->head.vv_mount;
1302 if (jreclist_init(mp, &jreclist, &jreccache, JTYPE_RENAME) &&
1303 ap->a_tncp->nc_vp
1304 ) {
1305 jreclist_undo_file(&jreclist, ap->a_tncp->nc_vp,
1306 JRUNDO_ALL|JRUNDO_GETVP|JRUNDO_CONDLINK, 0, -1);
1307 }
1308 error = vop_journal_operate_ap(&ap->a_head);
1309 if (error == 0) {
1310 TAILQ_FOREACH(jrec, &jreclist.list, user_entry) {
1311 jrecord_write_cred(jrec, NULL, ap->a_cred);
1312 jrecord_write_path(jrec, JLEAF_PATH1, ap->a_fncp);
1313 jrecord_write_path(jrec, JLEAF_PATH2, ap->a_tncp);
1314 }
1315 }
1316 jreclist_done(mp, &jreclist, error);
1317 return (error);
1318}
1319