Add a simple msf_buf_bytes() inline which returns the number of bytes
[dragonfly.git] / sys / kern / vfs_jops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/kern/vfs_jops.c,v 1.10 2005/03/04 21:37:27 dillon Exp $
35 */
36/*
37 * Each mount point may have zero or more independantly configured journals
38 * attached to it. Each journal is represented by a memory FIFO and worker
39 * thread. Journal events are streamed through the FIFO to the thread,
40 * batched up (typically on one-second intervals), and written out by the
41 * thread.
42 *
43 * Journal vnode ops are executed instead of mnt_vn_norm_ops when one or
44 * more journals have been installed on a mount point. It becomes the
45 * responsibility of the journal op to call the underlying normal op as
46 * appropriate.
47 *
48 * The journaling protocol is intended to evolve into a two-way stream
49 * whereby transaction IDs can be acknowledged by the journaling target
50 * when the data has been committed to hard storage. Both implicit and
51 * explicit acknowledgement schemes will be supported, depending on the
52 * sophistication of the journaling stream, plus resynchronization and
53 * restart when a journaling stream is interrupted. This information will
54 * also be made available to journaling-aware filesystems to allow better
55 * management of their own physical storage synchronization mechanisms as
56 * well as to allow such filesystems to take direct advantage of the kernel's
57 * journaling layer so they don't have to roll their own.
58 *
59 * In addition, the worker thread will have access to much larger
60 * spooling areas then the memory buffer is able to provide by e.g.
61 * reserving swap space, in order to absorb potentially long interruptions
62 * of off-site journaling streams, and to prevent 'slow' off-site linkages
63 * from radically slowing down local filesystem operations.
64 *
65 * Because of the non-trivial algorithms the journaling system will be
66 * required to support, use of a worker thread is mandatory. Efficiencies
67 * are maintained by utilitizing the memory FIFO to batch transactions when
68 * possible, reducing the number of gratuitous thread switches and taking
69 * advantage of cpu caches through the use of shorter batched code paths
70 * rather then trying to do everything in the context of the process
71 * originating the filesystem op. In the future the memory FIFO can be
72 * made per-cpu to remove BGL or other locking requirements.
73 */
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/buf.h>
77#include <sys/conf.h>
78#include <sys/kernel.h>
79#include <sys/queue.h>
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mount.h>
83#include <sys/unistd.h>
84#include <sys/vnode.h>
85#include <sys/poll.h>
86#include <sys/mountctl.h>
87#include <sys/journal.h>
88#include <sys/file.h>
89#include <sys/proc.h>
90#include <sys/msfbuf.h>
91
92#include <machine/limits.h>
93
94#include <vm/vm.h>
95#include <vm/vm_object.h>
96#include <vm/vm_page.h>
97#include <vm/vm_pager.h>
98#include <vm/vnode_pager.h>
99
100#include <sys/file2.h>
101#include <sys/thread2.h>
102
103static int journal_attach(struct mount *mp);
104static void journal_detach(struct mount *mp);
105static int journal_install_vfs_journal(struct mount *mp, struct file *fp,
106 const struct mountctl_install_journal *info);
107static int journal_remove_vfs_journal(struct mount *mp,
108 const struct mountctl_remove_journal *info);
109static int journal_resync_vfs_journal(struct mount *mp, const void *ctl);
110static int journal_status_vfs_journal(struct mount *mp,
111 const struct mountctl_status_journal *info,
112 struct mountctl_journal_ret_status *rstat,
113 int buflen, int *res);
114static void journal_thread(void *info);
115
116static void *journal_reserve(struct journal *jo,
117 struct journal_rawrecbeg **rawpp,
118 int16_t streamid, int bytes);
119static void *journal_extend(struct journal *jo,
120 struct journal_rawrecbeg **rawpp,
121 int truncbytes, int bytes, int *newstreamrecp);
122static void journal_abort(struct journal *jo,
123 struct journal_rawrecbeg **rawpp);
124static void journal_commit(struct journal *jo,
125 struct journal_rawrecbeg **rawpp,
126 int bytes, int closeout);
127
128static void jrecord_init(struct journal *jo,
129 struct jrecord *jrec, int16_t streamid);
130static struct journal_subrecord *jrecord_push(
131 struct jrecord *jrec, int16_t rectype);
132static void jrecord_pop(struct jrecord *jrec, struct journal_subrecord *parent);
133static struct journal_subrecord *jrecord_write(struct jrecord *jrec,
134 int16_t rectype, int bytes);
135static void jrecord_data(struct jrecord *jrec, const void *buf, int bytes);
136static void jrecord_done(struct jrecord *jrec, int abortit);
137
138static int journal_setattr(struct vop_setattr_args *ap);
139static int journal_write(struct vop_write_args *ap);
140static int journal_fsync(struct vop_fsync_args *ap);
141static int journal_putpages(struct vop_putpages_args *ap);
142static int journal_setacl(struct vop_setacl_args *ap);
143static int journal_setextattr(struct vop_setextattr_args *ap);
144static int journal_ncreate(struct vop_ncreate_args *ap);
145static int journal_nmknod(struct vop_nmknod_args *ap);
146static int journal_nlink(struct vop_nlink_args *ap);
147static int journal_nsymlink(struct vop_nsymlink_args *ap);
148static int journal_nwhiteout(struct vop_nwhiteout_args *ap);
149static int journal_nremove(struct vop_nremove_args *ap);
150static int journal_nmkdir(struct vop_nmkdir_args *ap);
151static int journal_nrmdir(struct vop_nrmdir_args *ap);
152static int journal_nrename(struct vop_nrename_args *ap);
153
154static struct vnodeopv_entry_desc journal_vnodeop_entries[] = {
155 { &vop_default_desc, vop_journal_operate_ap },
156 { &vop_mountctl_desc, (void *)journal_mountctl },
157 { &vop_setattr_desc, (void *)journal_setattr },
158 { &vop_write_desc, (void *)journal_write },
159 { &vop_fsync_desc, (void *)journal_fsync },
160 { &vop_putpages_desc, (void *)journal_putpages },
161 { &vop_setacl_desc, (void *)journal_setacl },
162 { &vop_setextattr_desc, (void *)journal_setextattr },
163 { &vop_ncreate_desc, (void *)journal_ncreate },
164 { &vop_nmknod_desc, (void *)journal_nmknod },
165 { &vop_nlink_desc, (void *)journal_nlink },
166 { &vop_nsymlink_desc, (void *)journal_nsymlink },
167 { &vop_nwhiteout_desc, (void *)journal_nwhiteout },
168 { &vop_nremove_desc, (void *)journal_nremove },
169 { &vop_nmkdir_desc, (void *)journal_nmkdir },
170 { &vop_nrmdir_desc, (void *)journal_nrmdir },
171 { &vop_nrename_desc, (void *)journal_nrename },
172 { NULL, NULL }
173};
174
175static MALLOC_DEFINE(M_JOURNAL, "journal", "Journaling structures");
176static MALLOC_DEFINE(M_JFIFO, "journal-fifo", "Journal FIFO");
177
178int
179journal_mountctl(struct vop_mountctl_args *ap)
180{
181 struct mount *mp;
182 int error = 0;
183
184 mp = ap->a_head.a_ops->vv_mount;
185 KKASSERT(mp);
186
187 if (mp->mnt_vn_journal_ops == NULL) {
188 switch(ap->a_op) {
189 case MOUNTCTL_INSTALL_VFS_JOURNAL:
190 error = journal_attach(mp);
191 if (error == 0 && ap->a_ctllen != sizeof(struct mountctl_install_journal))
192 error = EINVAL;
193 if (error == 0 && ap->a_fp == NULL)
194 error = EBADF;
195 if (error == 0)
196 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
197 if (TAILQ_EMPTY(&mp->mnt_jlist))
198 journal_detach(mp);
199 break;
200 case MOUNTCTL_REMOVE_VFS_JOURNAL:
201 case MOUNTCTL_RESYNC_VFS_JOURNAL:
202 case MOUNTCTL_STATUS_VFS_JOURNAL:
203 error = ENOENT;
204 break;
205 default:
206 error = EOPNOTSUPP;
207 break;
208 }
209 } else {
210 switch(ap->a_op) {
211 case MOUNTCTL_INSTALL_VFS_JOURNAL:
212 if (ap->a_ctllen != sizeof(struct mountctl_install_journal))
213 error = EINVAL;
214 if (error == 0 && ap->a_fp == NULL)
215 error = EBADF;
216 if (error == 0)
217 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
218 break;
219 case MOUNTCTL_REMOVE_VFS_JOURNAL:
220 if (ap->a_ctllen != sizeof(struct mountctl_remove_journal))
221 error = EINVAL;
222 if (error == 0)
223 error = journal_remove_vfs_journal(mp, ap->a_ctl);
224 if (TAILQ_EMPTY(&mp->mnt_jlist))
225 journal_detach(mp);
226 break;
227 case MOUNTCTL_RESYNC_VFS_JOURNAL:
228 if (ap->a_ctllen != 0)
229 error = EINVAL;
230 error = journal_resync_vfs_journal(mp, ap->a_ctl);
231 break;
232 case MOUNTCTL_STATUS_VFS_JOURNAL:
233 if (ap->a_ctllen != sizeof(struct mountctl_status_journal))
234 error = EINVAL;
235 if (error == 0) {
236 error = journal_status_vfs_journal(mp, ap->a_ctl,
237 ap->a_buf, ap->a_buflen, ap->a_res);
238 }
239 break;
240 default:
241 error = EOPNOTSUPP;
242 break;
243 }
244 }
245 return (error);
246}
247
248/*
249 * High level mount point setup. When a
250 */
251static int
252journal_attach(struct mount *mp)
253{
254 vfs_add_vnodeops(mp, &mp->mnt_vn_journal_ops, journal_vnodeop_entries);
255 return(0);
256}
257
258static void
259journal_detach(struct mount *mp)
260{
261 if (mp->mnt_vn_journal_ops)
262 vfs_rm_vnodeops(&mp->mnt_vn_journal_ops);
263}
264
265/*
266 * Install a journal on a mount point. Each journal has an associated worker
267 * thread which is responsible for buffering and spooling the data to the
268 * target. A mount point may have multiple journals attached to it. An
269 * initial start record is generated when the journal is associated.
270 */
271static int
272journal_install_vfs_journal(struct mount *mp, struct file *fp,
273 const struct mountctl_install_journal *info)
274{
275 struct journal *jo;
276 struct jrecord jrec;
277 int error = 0;
278 int size;
279
280 jo = malloc(sizeof(struct journal), M_JOURNAL, M_WAITOK|M_ZERO);
281 bcopy(info->id, jo->id, sizeof(jo->id));
282 jo->flags = info->flags & ~(MC_JOURNAL_ACTIVE | MC_JOURNAL_STOP_REQ);
283
284 /*
285 * Memory FIFO size, round to nearest power of 2
286 */
287 if (info->membufsize) {
288 if (info->membufsize < 65536)
289 size = 65536;
290 else if (info->membufsize > 128 * 1024 * 1024)
291 size = 128 * 1024 * 1024;
292 else
293 size = (int)info->membufsize;
294 } else {
295 size = 1024 * 1024;
296 }
297 jo->fifo.size = 1;
298 while (jo->fifo.size < size)
299 jo->fifo.size <<= 1;
300
301 /*
302 * Other parameters. If not specified the starting transaction id
303 * will be the current date.
304 */
305 if (info->transid) {
306 jo->transid = info->transid;
307 } else {
308 struct timespec ts;
309 getnanotime(&ts);
310 jo->transid = ((int64_t)ts.tv_sec << 30) | ts.tv_nsec;
311 }
312
313 jo->fp = fp;
314
315 /*
316 * Allocate the memory FIFO
317 */
318 jo->fifo.mask = jo->fifo.size - 1;
319 jo->fifo.membase = malloc(jo->fifo.size, M_JFIFO, M_WAITOK|M_ZERO|M_NULLOK);
320 if (jo->fifo.membase == NULL)
321 error = ENOMEM;
322
323 /*
324 * Create the worker thread and generate the association record.
325 */
326 if (error) {
327 free(jo, M_JOURNAL);
328 } else {
329 fhold(fp);
330 jo->flags |= MC_JOURNAL_ACTIVE;
331 lwkt_create(journal_thread, jo, NULL, &jo->thread,
332 TDF_STOPREQ, -1, "journal %.*s", JIDMAX, jo->id);
333 lwkt_setpri(&jo->thread, TDPRI_KERN_DAEMON);
334 lwkt_schedule(&jo->thread);
335
336 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
337 jrecord_write(&jrec, JTYPE_ASSOCIATE, 0);
338 jrecord_done(&jrec, 0);
339 TAILQ_INSERT_TAIL(&mp->mnt_jlist, jo, jentry);
340 }
341 return(error);
342}
343
344/*
345 * Disassociate a journal from a mount point and terminate its worker thread.
346 * A final termination record is written out before the file pointer is
347 * dropped.
348 */
349static int
350journal_remove_vfs_journal(struct mount *mp,
351 const struct mountctl_remove_journal *info)
352{
353 struct journal *jo;
354 struct jrecord jrec;
355 int error;
356
357 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
358 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
359 break;
360 }
361 if (jo) {
362 error = 0;
363 TAILQ_REMOVE(&mp->mnt_jlist, jo, jentry);
364
365 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
366 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
367 jrecord_done(&jrec, 0);
368
369 jo->flags |= MC_JOURNAL_STOP_REQ | (info->flags & MC_JOURNAL_STOP_IMM);
370 wakeup(&jo->fifo);
371 while (jo->flags & MC_JOURNAL_ACTIVE) {
372 tsleep(jo, 0, "jwait", 0);
373 }
374 lwkt_free_thread(&jo->thread); /* XXX SMP */
375 if (jo->fp)
376 fdrop(jo->fp, curthread);
377 if (jo->fifo.membase)
378 free(jo->fifo.membase, M_JFIFO);
379 free(jo, M_JOURNAL);
380 } else {
381 error = EINVAL;
382 }
383 return (error);
384}
385
386static int
387journal_resync_vfs_journal(struct mount *mp, const void *ctl)
388{
389 return(EINVAL);
390}
391
392static int
393journal_status_vfs_journal(struct mount *mp,
394 const struct mountctl_status_journal *info,
395 struct mountctl_journal_ret_status *rstat,
396 int buflen, int *res)
397{
398 struct journal *jo;
399 int error = 0;
400 int index;
401
402 index = 0;
403 *res = 0;
404 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
405 if (info->index == MC_JOURNAL_INDEX_ID) {
406 if (bcmp(jo->id, info->id, sizeof(jo->id)) != 0)
407 continue;
408 } else if (info->index >= 0) {
409 if (info->index < index)
410 continue;
411 } else if (info->index != MC_JOURNAL_INDEX_ALL) {
412 continue;
413 }
414 if (buflen < sizeof(*rstat)) {
415 if (*res)
416 rstat[-1].flags |= MC_JOURNAL_STATUS_MORETOCOME;
417 else
418 error = EINVAL;
419 break;
420 }
421 bzero(rstat, sizeof(*rstat));
422 rstat->recsize = sizeof(*rstat);
423 bcopy(jo->id, rstat->id, sizeof(jo->id));
424 rstat->index = index;
425 rstat->membufsize = jo->fifo.size;
426 rstat->membufused = jo->fifo.xindex - jo->fifo.rindex;
427 rstat->membufiopend = jo->fifo.windex - jo->fifo.rindex;
428 rstat->bytessent = jo->total_acked;
429 ++rstat;
430 ++index;
431 *res += sizeof(*rstat);
432 buflen -= sizeof(*rstat);
433 }
434 return(error);
435}
436/*
437 * The per-journal worker thread is responsible for writing out the
438 * journal's FIFO to the target stream.
439 */
440static void
441journal_thread(void *info)
442{
443 struct journal *jo = info;
444 struct journal_rawrecbeg *rawp;
445 int bytes;
446 int error;
447 int avail;
448 int res;
449
450 for (;;) {
451 /*
452 * Calculate the number of bytes available to write. This buffer
453 * area may contain reserved records so we can't just write it out
454 * without further checks.
455 */
456 bytes = jo->fifo.windex - jo->fifo.rindex;
457
458 /*
459 * sleep if no bytes are available or if an incomplete record is
460 * encountered (it needs to be filled in before we can write it
461 * out), and skip any pad records that we encounter.
462 */
463 if (bytes == 0) {
464 if (jo->flags & MC_JOURNAL_STOP_REQ)
465 break;
466 tsleep(&jo->fifo, 0, "jfifo", hz);
467 continue;
468 }
469
470 /*
471 * Sleep if we can not go any further due to hitting an incomplete
472 * record. This case should occur rarely but may have to be better
473 * optimized XXX.
474 */
475 rawp = (void *)(jo->fifo.membase + (jo->fifo.rindex & jo->fifo.mask));
476 if (rawp->begmagic == JREC_INCOMPLETEMAGIC) {
477 tsleep(&jo->fifo, 0, "jpad", hz);
478 continue;
479 }
480
481 /*
482 * Skip any pad records. We do not write out pad records if we can
483 * help it.
484 *
485 * If xindex is caught up to rindex it gets incremented along with
486 * rindex. XXX
487 */
488 if (rawp->streamid == JREC_STREAMID_PAD) {
489 if (jo->fifo.rindex == jo->fifo.xindex)
490 jo->fifo.xindex += (rawp->recsize + 15) & ~15;
491 jo->fifo.rindex += (rawp->recsize + 15) & ~15;
492 jo->total_acked += bytes;
493 KKASSERT(jo->fifo.windex - jo->fifo.rindex >= 0);
494 continue;
495 }
496
497 /*
498 * 'bytes' is the amount of data that can potentially be written out.
499 * Calculate 'res', the amount of data that can actually be written
500 * out. res is bounded either by hitting the end of the physical
501 * memory buffer or by hitting an incomplete record. Incomplete
502 * records often occur due to the way the space reservation model
503 * works.
504 */
505 res = 0;
506 avail = jo->fifo.size - (jo->fifo.rindex & jo->fifo.mask);
507 while (res < bytes && rawp->begmagic == JREC_BEGMAGIC) {
508 res += (rawp->recsize + 15) & ~15;
509 if (res >= avail) {
510 KKASSERT(res == avail);
511 break;
512 }
513 rawp = (void *)((char *)rawp + ((rawp->recsize + 15) & ~15));
514 }
515
516 /*
517 * Issue the write and deal with any errors or other conditions.
518 * For now assume blocking I/O. Since we are record-aware the
519 * code cannot yet handle partial writes.
520 *
521 * XXX EWOULDBLOCK/NBIO
522 * XXX notification on failure
523 * XXX permanent verses temporary failures
524 * XXX two-way acknowledgement stream in the return direction / xindex
525 */
526 bytes = res;
527 error = fp_write(jo->fp,
528 jo->fifo.membase + (jo->fifo.rindex & jo->fifo.mask),
529 bytes, &res);
530 if (error) {
531 printf("journal_thread(%s) write, error %d\n", jo->id, error);
532 /* XXX */
533 } else {
534 KKASSERT(res == bytes);
535 }
536
537 /*
538 * Advance rindex. XXX for now also advance xindex, which will
539 * eventually be advanced only when the target acknowledges the
540 * sequence space.
541 */
542 jo->fifo.rindex += bytes;
543 jo->fifo.xindex += bytes;
544 jo->total_acked += bytes;
545 KKASSERT(jo->fifo.windex - jo->fifo.rindex >= 0);
546 if (jo->flags & MC_JOURNAL_WWAIT) {
547 jo->flags &= ~MC_JOURNAL_WWAIT; /* XXX hysteresis */
548 wakeup(&jo->fifo.windex);
549 }
550 }
551 jo->flags &= ~MC_JOURNAL_ACTIVE;
552 wakeup(jo);
553 wakeup(&jo->fifo.windex);
554}
555
556/*
557 * This builds a pad record which the journaling thread will skip over. Pad
558 * records are required when we are unable to reserve sufficient stream space
559 * due to insufficient space at the end of the physical memory fifo.
560 */
561static
562void
563journal_build_pad(struct journal_rawrecbeg *rawp, int recsize)
564{
565 struct journal_rawrecend *rendp;
566
567 KKASSERT((recsize & 15) == 0 && recsize >= 16);
568
569 rawp->streamid = JREC_STREAMID_PAD;
570 rawp->recsize = recsize; /* must be 16-byte aligned */
571 rawp->seqno = 0;
572 /*
573 * WARNING, rendp may overlap rawp->seqno. This is necessary to
574 * allow PAD records to fit in 16 bytes. Use cpu_mb1() to
575 * hopefully cause the compiler to not make any assumptions.
576 */
577 rendp = (void *)((char *)rawp + rawp->recsize - sizeof(*rendp));
578 rendp->endmagic = JREC_ENDMAGIC;
579 rendp->check = 0;
580 rendp->recsize = rawp->recsize;
581
582 /*
583 * Set the begin magic last. This is what will allow the journal
584 * thread to write the record out.
585 */
586 cpu_mb1();
587 rawp->begmagic = JREC_BEGMAGIC;
588}
589
590/*
591 * Wake up the worker thread if the FIFO is more then half full or if
592 * someone is waiting for space to be freed up. Otherwise let the
593 * heartbeat deal with it. Being able to avoid waking up the worker
594 * is the key to the journal's cpu performance.
595 */
596static __inline
597void
598journal_commit_wakeup(struct journal *jo)
599{
600 int avail;
601
602 avail = jo->fifo.size - (jo->fifo.windex - jo->fifo.xindex);
603 KKASSERT(avail >= 0);
604 if ((avail < (jo->fifo.size >> 1)) || (jo->flags & MC_JOURNAL_WWAIT))
605 wakeup(&jo->fifo);
606}
607
608/*
609 * Create a new BEGIN stream record with the specified streamid and the
610 * specified amount of payload space. *rawpp will be set to point to the
611 * base of the new stream record and a pointer to the base of the payload
612 * space will be returned. *rawpp does not need to be pre-NULLd prior to
613 * making this call.
614 *
615 * A stream can be extended, aborted, or committed by other API calls
616 * below. This may result in a sequence of potentially disconnected
617 * stream records to be output to the journaling target. The first record
618 * (the one created by this function) will be marked JREC_STREAMCTL_BEGIN,
619 * while the last record on commit or abort will be marked JREC_STREAMCTL_END
620 * (and possibly also JREC_STREAMCTL_ABORTED). The last record could wind
621 * up being the same as the first, in which case the bits are all set in
622 * the first record.
623 *
624 * The stream record is created in an incomplete state by setting the begin
625 * magic to JREC_INCOMPLETEMAGIC. This prevents the worker thread from
626 * flushing the fifo past our record until we have finished populating it.
627 * Other threads can reserve and operate on their own space without stalling
628 * but the stream output will stall until we have completed operations. The
629 * memory FIFO is intended to be large enough to absorb such situations
630 * without stalling out other threads.
631 */
632static
633void *
634journal_reserve(struct journal *jo, struct journal_rawrecbeg **rawpp,
635 int16_t streamid, int bytes)
636{
637 struct journal_rawrecbeg *rawp;
638 int avail;
639 int availtoend;
640 int req;
641
642 /*
643 * Add header and trailer overheads to the passed payload. Note that
644 * the passed payload size need not be aligned in any way.
645 */
646 bytes += sizeof(struct journal_rawrecbeg);
647 bytes += sizeof(struct journal_rawrecend);
648
649 for (;;) {
650 /*
651 * First, check boundary conditions. If the request would wrap around
652 * we have to skip past the ending block and return to the beginning
653 * of the FIFO's buffer. Calculate 'req' which is the actual number
654 * of bytes being reserved, including wrap-around dead space.
655 *
656 * Neither 'bytes' or 'req' are aligned.
657 *
658 * Note that availtoend is not truncated to avail and so cannot be
659 * used to determine whether the reservation is possible by itself.
660 * Also, since all fifo ops are 16-byte aligned, we can check
661 * the size before calculating the aligned size.
662 */
663 availtoend = jo->fifo.size - (jo->fifo.windex & jo->fifo.mask);
664 KKASSERT((availtoend & 15) == 0);
665 if (bytes > availtoend)
666 req = bytes + availtoend; /* add pad to end */
667 else
668 req = bytes;
669
670 /*
671 * Next calculate the total available space and see if it is
672 * sufficient. We cannot overwrite previously buffered data
673 * past xindex because otherwise we would not be able to restart
674 * a broken link at the target's last point of commit.
675 */
676 avail = jo->fifo.size - (jo->fifo.windex - jo->fifo.xindex);
677 KKASSERT(avail >= 0 && (avail & 15) == 0);
678
679 if (avail < req) {
680 /* XXX MC_JOURNAL_STOP_IMM */
681 jo->flags |= MC_JOURNAL_WWAIT;
682 tsleep(&jo->fifo.windex, 0, "jwrite", 0);
683 continue;
684 }
685
686 /*
687 * Create a pad record for any dead space and create an incomplete
688 * record for the live space, then return a pointer to the
689 * contiguous buffer space that was requested.
690 *
691 * NOTE: The worker thread will not flush past an incomplete
692 * record, so the reserved space can be filled in at-will. The
693 * journaling code must also be aware the reserved sections occuring
694 * after this one will also not be written out even if completed
695 * until this one is completed.
696 */
697 rawp = (void *)(jo->fifo.membase + (jo->fifo.windex & jo->fifo.mask));
698 if (req != bytes) {
699 journal_build_pad(rawp, availtoend);
700 rawp = (void *)jo->fifo.membase;
701 }
702 rawp->begmagic = JREC_INCOMPLETEMAGIC; /* updated by abort/commit */
703 rawp->recsize = bytes; /* (unaligned size) */
704 rawp->streamid = streamid | JREC_STREAMCTL_BEGIN;
705 rawp->seqno = 0; /* set by caller */
706
707 /*
708 * Issue a memory barrier to guarentee that the record data has been
709 * properly initialized before we advance the write index and return
710 * a pointer to the reserved record. Otherwise the worker thread
711 * could accidently run past us.
712 *
713 * Note that stream records are always 16-byte aligned.
714 */
715 cpu_mb1();
716 jo->fifo.windex += (req + 15) & ~15;
717 *rawpp = rawp;
718 return(rawp + 1);
719 }
720 /* not reached */
721 *rawpp = NULL;
722 return(NULL);
723}
724
725/*
726 * Extend a previous reservation by the specified number of payload bytes.
727 * If it is not possible to extend the existing reservation due to either
728 * another thread having reserved space after us or due to a boundary
729 * condition, the current reservation will be committed and possibly
730 * truncated and a new reservation with the specified payload size will
731 * be created. *rawpp is set to the new reservation in this case but the
732 * caller cannot depend on a comparison with the old rawp to determine if
733 * this case occurs because we could end up using the same memory FIFO
734 * offset for the new stream record.
735 *
736 * In either case this function will return a pointer to the base of the
737 * extended payload space.
738 *
739 * If a new stream block is created the caller needs to recalculate payload
740 * byte counts, if the same stream block is used the caller needs to extend
741 * its current notion of the payload byte count.
742 */
743static void *
744journal_extend(struct journal *jo, struct journal_rawrecbeg **rawpp,
745 int truncbytes, int bytes, int *newstreamrecp)
746{
747 struct journal_rawrecbeg *rawp;
748 int16_t streamid;
749 int availtoend;
750 int avail;
751 int osize;
752 int nsize;
753 int wbase;
754 void *rptr;
755
756 *newstreamrecp = 0;
757 rawp = *rawpp;
758 osize = (rawp->recsize + 15) & ~15;
759 nsize = (rawp->recsize + bytes + 15) & ~15;
760 wbase = (char *)rawp - jo->fifo.membase;
761
762 /*
763 * If the aligned record size does not change we can trivially extend
764 * the record.
765 */
766 if (nsize == osize) {
767 rawp->recsize += bytes;
768 return((char *)rawp + rawp->recsize - bytes);
769 }
770
771 /*
772 * If the fifo's write index hasn't been modified since we made the
773 * reservation and we do not hit any boundary conditions, we can
774 * trivially extend the record.
775 */
776 if ((jo->fifo.windex & jo->fifo.mask) == wbase + osize) {
777 availtoend = jo->fifo.size - wbase;
778 avail = jo->fifo.size - (jo->fifo.windex - jo->fifo.xindex) + osize;
779 KKASSERT((availtoend & 15) == 0);
780 KKASSERT((avail & 15) == 0);
781 if (nsize <= avail && nsize <= availtoend) {
782 jo->fifo.windex += nsize - osize;
783 rawp->recsize += bytes;
784 return((char *)rawp + rawp->recsize - bytes);
785 }
786 }
787
788 /*
789 * It was not possible to extend the buffer. Commit the current
790 * buffer and create a new one. We manually clear the BEGIN mark that
791 * journal_reserve() creates (because this is a continuing record, not
792 * the start of a new stream).
793 */
794 streamid = rawp->streamid & JREC_STREAMID_MASK;
795 journal_commit(jo, rawpp, truncbytes, 0);
796 rptr = journal_reserve(jo, rawpp, streamid, bytes);
797 rawp = *rawpp;
798 rawp->streamid &= ~JREC_STREAMCTL_BEGIN;
799 *newstreamrecp = 1;
800 return(rptr);
801}
802
803/*
804 * Abort a journal record. If the transaction record represents a stream
805 * BEGIN and we can reverse the fifo's write index we can simply reverse
806 * index the entire record, as if it were never reserved in the first place.
807 *
808 * Otherwise we set the JREC_STREAMCTL_ABORTED bit and commit the record
809 * with the payload truncated to 0 bytes.
810 */
811static void
812journal_abort(struct journal *jo, struct journal_rawrecbeg **rawpp)
813{
814 struct journal_rawrecbeg *rawp;
815 int osize;
816
817 rawp = *rawpp;
818 osize = (rawp->recsize + 15) & ~15;
819
820 if ((rawp->streamid & JREC_STREAMCTL_BEGIN) &&
821 (jo->fifo.windex & jo->fifo.mask) ==
822 (char *)rawp - jo->fifo.membase + osize)
823 {
824 jo->fifo.windex -= osize;
825 *rawpp = NULL;
826 } else {
827 rawp->streamid |= JREC_STREAMCTL_ABORTED;
828 journal_commit(jo, rawpp, 0, 1);
829 }
830}
831
832/*
833 * Commit a journal record and potentially truncate it to the specified
834 * number of payload bytes. If you do not want to truncate the record,
835 * simply pass -1 for the bytes parameter. Do not pass rawp->recsize, that
836 * field includes header and trailer and will not be correct. Note that
837 * passing 0 will truncate the entire data payload of the record.
838 *
839 * The logical stream is terminated by this function.
840 *
841 * If truncation occurs, and it is not possible to physically optimize the
842 * memory FIFO due to other threads having reserved space after ours,
843 * the remaining reserved space will be covered by a pad record.
844 */
845static void
846journal_commit(struct journal *jo, struct journal_rawrecbeg **rawpp,
847 int bytes, int closeout)
848{
849 struct journal_rawrecbeg *rawp;
850 struct journal_rawrecend *rendp;
851 int osize;
852 int nsize;
853
854 rawp = *rawpp;
855 *rawpp = NULL;
856
857 KKASSERT((char *)rawp >= jo->fifo.membase &&
858 (char *)rawp + rawp->recsize <= jo->fifo.membase + jo->fifo.size);
859 KKASSERT(((intptr_t)rawp & 15) == 0);
860
861 /*
862 * Truncate the record if necessary. If the FIFO write index as still
863 * at the end of our record we can optimally backindex it. Otherwise
864 * we have to insert a pad record to cover the dead space.
865 *
866 * We calculate osize which is the 16-byte-aligned original recsize.
867 * We calculate nsize which is the 16-byte-aligned new recsize.
868 *
869 * Due to alignment issues or in case the passed truncation bytes is
870 * the same as the original payload, nsize may be equal to osize even
871 * if the committed bytes is less then the originally reserved bytes.
872 */
873 if (bytes >= 0) {
874 KKASSERT(bytes >= 0 && bytes <= rawp->recsize - sizeof(struct journal_rawrecbeg) - sizeof(struct journal_rawrecend));
875 osize = (rawp->recsize + 15) & ~15;
876 rawp->recsize = bytes + sizeof(struct journal_rawrecbeg) +
877 sizeof(struct journal_rawrecend);
878 nsize = (rawp->recsize + 15) & ~15;
879 KKASSERT(nsize <= osize);
880 if (osize == nsize) {
881 /* do nothing */
882 } else if ((jo->fifo.windex & jo->fifo.mask) == (char *)rawp - jo->fifo.membase + osize) {
883 /* we are able to backindex the fifo */
884 jo->fifo.windex -= osize - nsize;
885 } else {
886 /* we cannot backindex the fifo, emplace a pad in the dead space */
887 journal_build_pad((void *)((char *)rawp + nsize), osize - nsize);
888 }
889 }
890
891 /*
892 * Fill in the trailer. Note that unlike pad records, the trailer will
893 * never overlap the header.
894 */
895 rendp = (void *)((char *)rawp +
896 ((rawp->recsize + 15) & ~15) - sizeof(*rendp));
897 rendp->endmagic = JREC_ENDMAGIC;
898 rendp->recsize = rawp->recsize;
899 rendp->check = 0; /* XXX check word, disabled for now */
900
901 /*
902 * Fill in begmagic last. This will allow the worker thread to proceed.
903 * Use a memory barrier to guarentee write ordering. Mark the stream
904 * as terminated if closeout is set. This is the typical case.
905 */
906 if (closeout)
907 rawp->streamid |= JREC_STREAMCTL_END;
908 cpu_mb1(); /* memory barrier */
909 rawp->begmagic = JREC_BEGMAGIC;
910
911 journal_commit_wakeup(jo);
912}
913
914/************************************************************************
915 * TRANSACTION SUPPORT ROUTINES *
916 ************************************************************************
917 *
918 * JRECORD_*() - routines to create subrecord transactions and embed them
919 * in the logical streams managed by the journal_*() routines.
920 */
921
922static int16_t sid = JREC_STREAMID_JMIN;
923
924/*
925 * Initialize the passed jrecord structure and start a new stream transaction
926 * by reserving an initial build space in the journal's memory FIFO.
927 */
928static void
929jrecord_init(struct journal *jo, struct jrecord *jrec, int16_t streamid)
930{
931 bzero(jrec, sizeof(*jrec));
932 jrec->jo = jo;
933 if (streamid < 0) {
934 streamid = sid++; /* XXX need to track stream ids! */
935 if (sid == JREC_STREAMID_JMAX)
936 sid = JREC_STREAMID_JMIN;
937 }
938 jrec->streamid = streamid;
939 jrec->stream_residual = JREC_DEFAULTSIZE;
940 jrec->stream_reserved = jrec->stream_residual;
941 jrec->stream_ptr =
942 journal_reserve(jo, &jrec->rawp, streamid, jrec->stream_reserved);
943}
944
945/*
946 * Push a recursive record type. All pushes should have matching pops.
947 * The old parent is returned and the newly pushed record becomes the
948 * new parent. Note that the old parent's pointer may already be invalid
949 * or may become invalid if jrecord_write() had to build a new stream
950 * record, so the caller should not mess with the returned pointer in
951 * any way other then to save it.
952 */
953static
954struct journal_subrecord *
955jrecord_push(struct jrecord *jrec, int16_t rectype)
956{
957 struct journal_subrecord *save;
958
959 save = jrec->parent;
960 jrec->parent = jrecord_write(jrec, rectype|JMASK_NESTED, 0);
961 jrec->last = NULL;
962 KKASSERT(jrec->parent != NULL);
963 ++jrec->pushcount;
964 ++jrec->pushptrgood; /* cleared on flush */
965 return(save);
966}
967
968/*
969 * Pop a previously pushed sub-transaction. We must set JMASK_LAST
970 * on the last record written within the subtransaction. If the last
971 * record written is not accessible or if the subtransaction is empty,
972 * we must write out a pad record with JMASK_LAST set before popping.
973 *
974 * When popping a subtransaction the parent record's recsize field
975 * will be properly set. If the parent pointer is no longer valid
976 * (which can occur if the data has already been flushed out to the
977 * stream), the protocol spec allows us to leave it 0.
978 *
979 * The saved parent pointer which we restore may or may not be valid,
980 * and if not valid may or may not be NULL, depending on the value
981 * of pushptrgood.
982 */
983static void
984jrecord_pop(struct jrecord *jrec, struct journal_subrecord *save)
985{
986 struct journal_subrecord *last;
987
988 KKASSERT(jrec->pushcount > 0);
989 KKASSERT(jrec->residual == 0);
990
991 /*
992 * Set JMASK_LAST on the last record we wrote at the current
993 * level. If last is NULL we either no longer have access to the
994 * record or the subtransaction was empty and we must write out a pad
995 * record.
996 */
997 if ((last = jrec->last) == NULL) {
998 jrecord_write(jrec, JLEAF_PAD|JMASK_LAST, 0);
999 last = jrec->last; /* reload after possible flush */
1000 } else {
1001 last->rectype |= JMASK_LAST;
1002 }
1003
1004 /*
1005 * pushptrgood tells us how many levels of parent record pointers
1006 * are valid. The jrec only stores the current parent record pointer
1007 * (and it is only valid if pushptrgood != 0). The higher level parent
1008 * record pointers are saved by the routines calling jrecord_push() and
1009 * jrecord_pop(). These pointers may become stale and we determine
1010 * that fact by tracking the count of valid parent pointers with
1011 * pushptrgood. Pointers become invalid when their related stream
1012 * record gets pushed out.
1013 *
1014 * If no pointer is available (the data has already been pushed out),
1015 * then no fixup of e.g. the length field is possible for non-leaf
1016 * nodes. The protocol allows for this situation by placing a larger
1017 * burden on the program scanning the stream on the other end.
1018 *
1019 * [parentA]
1020 * [node X]
1021 * [parentB]
1022 * [node Y]
1023 * [node Z]
1024 * (pop B) see NOTE B
1025 * (pop A) see NOTE A
1026 *
1027 * NOTE B: This pop sets LAST in node Z if the node is still accessible,
1028 * else a PAD record is appended and LAST is set in that.
1029 *
1030 * This pop sets the record size in parentB if parentB is still
1031 * accessible, else the record size is left 0 (the scanner must
1032 * deal with that).
1033 *
1034 * This pop sets the new 'last' record to parentB, the pointer
1035 * to which may or may not still be accessible.
1036 *
1037 * NOTE A: This pop sets LAST in parentB if the node is still accessible,
1038 * else a PAD record is appended and LAST is set in that.
1039 *
1040 * This pop sets the record size in parentA if parentA is still
1041 * accessible, else the record size is left 0 (the scanner must
1042 * deal with that).
1043 *
1044 * This pop sets the new 'last' record to parentA, the pointer
1045 * to which may or may not still be accessible.
1046 *
1047 * Also note that the last record in the stream transaction, which in
1048 * the above example is parentA, does not currently have the LAST bit
1049 * set.
1050 *
1051 * The current parent becomes the last record relative to the
1052 * saved parent passed into us. It's validity is based on
1053 * whether pushptrgood is non-zero prior to decrementing. The saved
1054 * parent becomes the new parent, and its validity is based on whether
1055 * pushptrgood is non-zero after decrementing.
1056 *
1057 * The old jrec->parent may be NULL if it is no longer accessible.
1058 * If pushptrgood is non-zero, however, it is guarenteed to not
1059 * be NULL (since no flush occured).
1060 */
1061 jrec->last = jrec->parent;
1062 --jrec->pushcount;
1063 if (jrec->pushptrgood) {
1064 KKASSERT(jrec->last != NULL && last != NULL);
1065 if (--jrec->pushptrgood == 0) {
1066 jrec->parent = NULL; /* 'save' contains garbage or NULL */
1067 } else {
1068 KKASSERT(save != NULL);
1069 jrec->parent = save; /* 'save' must not be NULL */
1070 }
1071
1072 /*
1073 * Set the record size in the old parent. 'last' still points to
1074 * the original last record in the subtransaction being popped,
1075 * jrec->last points to the old parent (which became the last
1076 * record relative to the new parent being popped into).
1077 */
1078 jrec->last->recsize = (char *)last + last->recsize - (char *)jrec->last;
1079 } else {
1080 jrec->parent = NULL;
1081 KKASSERT(jrec->last == NULL);
1082 }
1083}
1084
1085/*
1086 * Write out a leaf record, including associated data.
1087 */
1088static
1089void
1090jrecord_leaf(struct jrecord *jrec, int16_t rectype, void *ptr, int bytes)
1091{
1092 jrecord_write(jrec, rectype, bytes);
1093 jrecord_data(jrec, ptr, bytes);
1094}
1095
1096/*
1097 * Write a leaf record out and return a pointer to its base. The leaf
1098 * record may contain potentially megabytes of data which is supplied
1099 * in jrecord_data() calls. The exact amount must be specified in this
1100 * call.
1101 *
1102 * THE RETURNED SUBRECORD POINTER IS ONLY VALID IMMEDIATELY AFTER THE
1103 * CALL AND MAY BECOME INVALID AT ANY TIME. ONLY THE PUSH/POP CODE SHOULD
1104 * USE THE RETURN VALUE.
1105 */
1106static
1107struct journal_subrecord *
1108jrecord_write(struct jrecord *jrec, int16_t rectype, int bytes)
1109{
1110 struct journal_subrecord *last;
1111 int pusheditout;
1112
1113 /*
1114 * Try to catch some obvious errors. Nesting records must specify a
1115 * size of 0, and there should be no left-overs from previous operations
1116 * (such as incomplete data writeouts).
1117 */
1118 KKASSERT(bytes == 0 || (rectype & JMASK_NESTED) == 0);
1119 KKASSERT(jrec->residual == 0);
1120
1121 /*
1122 * Check to see if the current stream record has enough room for
1123 * the new subrecord header. If it doesn't we extend the current
1124 * stream record.
1125 *
1126 * This may have the side effect of pushing out the current stream record
1127 * and creating a new one. We must adjust our stream tracking fields
1128 * accordingly.
1129 */
1130 if (jrec->stream_residual < sizeof(struct journal_subrecord)) {
1131 jrec->stream_ptr = journal_extend(jrec->jo, &jrec->rawp,
1132 jrec->stream_reserved - jrec->stream_residual,
1133 JREC_DEFAULTSIZE, &pusheditout);
1134 if (pusheditout) {
1135 jrec->stream_reserved = JREC_DEFAULTSIZE;
1136 jrec->stream_residual = JREC_DEFAULTSIZE;
1137 jrec->parent = NULL; /* no longer accessible */
1138 jrec->pushptrgood = 0; /* restored parents in pops no good */
1139 } else {
1140 jrec->stream_reserved += JREC_DEFAULTSIZE;
1141 jrec->stream_residual += JREC_DEFAULTSIZE;
1142 }
1143 }
1144 last = (void *)jrec->stream_ptr;
1145 last->rectype = rectype;
1146 last->reserved = 0;
1147 last->recsize = sizeof(struct journal_subrecord) + bytes;
1148 jrec->last = last;
1149 jrec->residual = bytes; /* remaining data to be posted */
1150 jrec->residual_align = -bytes & 7; /* post-data alignment required */
1151 return(last);
1152}
1153
1154/*
1155 * Write out the data associated with a leaf record. Any number of calls
1156 * to this routine may be made as long as the byte count adds up to the
1157 * amount originally specified in jrecord_write().
1158 *
1159 * The act of writing out the leaf data may result in numerous stream records
1160 * being pushed out. Callers should be aware that even the associated
1161 * subrecord header may become inaccessible due to stream record pushouts.
1162 */
1163static void
1164jrecord_data(struct jrecord *jrec, const void *buf, int bytes)
1165{
1166 int pusheditout;
1167 int extsize;
1168
1169 KKASSERT(bytes >= 0 && bytes <= jrec->residual);
1170
1171 /*
1172 * Push out stream records as long as there is insufficient room to hold
1173 * the remaining data.
1174 */
1175 while (jrec->stream_residual < bytes) {
1176 /*
1177 * Fill in any remaining space in the current stream record.
1178 */
1179 bcopy(buf, jrec->stream_ptr, jrec->stream_residual);
1180 buf = (const char *)buf + jrec->stream_residual;
1181 bytes -= jrec->stream_residual;
1182 /*jrec->stream_ptr += jrec->stream_residual;*/
1183 jrec->residual -= jrec->stream_residual;
1184 jrec->stream_residual = 0;
1185
1186 /*
1187 * Try to extend the current stream record, but no more then 1/4
1188 * the size of the FIFO.
1189 */
1190 extsize = jrec->jo->fifo.size >> 2;
1191 if (extsize > bytes)
1192 extsize = (bytes + 15) & ~15;
1193
1194 jrec->stream_ptr = journal_extend(jrec->jo, &jrec->rawp,
1195 jrec->stream_reserved - jrec->stream_residual,
1196 extsize, &pusheditout);
1197 if (pusheditout) {
1198 jrec->stream_reserved = extsize;
1199 jrec->stream_residual = extsize;
1200 jrec->parent = NULL; /* no longer accessible */
1201 jrec->last = NULL; /* no longer accessible */
1202 jrec->pushptrgood = 0; /* restored parents in pops no good */
1203 } else {
1204 jrec->stream_reserved += extsize;
1205 jrec->stream_residual += extsize;
1206 }
1207 }
1208
1209 /*
1210 * Push out any remaining bytes into the current stream record.
1211 */
1212 if (bytes) {
1213 bcopy(buf, jrec->stream_ptr, bytes);
1214 jrec->stream_ptr += bytes;
1215 jrec->stream_residual -= bytes;
1216 jrec->residual -= bytes;
1217 }
1218
1219 /*
1220 * Handle data alignment requirements for the subrecord. Because the
1221 * stream record's data space is more strictly aligned, it must already
1222 * have sufficient space to hold any subrecord alignment slop.
1223 */
1224 if (jrec->residual == 0 && jrec->residual_align) {
1225 KKASSERT(jrec->residual_align <= jrec->stream_residual);
1226 bzero(jrec->stream_ptr, jrec->residual_align);
1227 jrec->stream_ptr += jrec->residual_align;
1228 jrec->stream_residual -= jrec->residual_align;
1229 jrec->residual_align = 0;
1230 }
1231}
1232
1233/*
1234 * We are finished with the transaction. This closes the transaction created
1235 * by jrecord_init().
1236 *
1237 * NOTE: If abortit is not set then we must be at the top level with no
1238 * residual subrecord data left to output.
1239 *
1240 * If abortit is set then we can be in any state, all pushes will be
1241 * popped and it is ok for there to be residual data. This works
1242 * because the virtual stream itself is truncated. Scanners must deal
1243 * with this situation.
1244 *
1245 * The stream record will be committed or aborted as specified and jrecord
1246 * resources will be cleaned up.
1247 */
1248static void
1249jrecord_done(struct jrecord *jrec, int abortit)
1250{
1251 KKASSERT(jrec->rawp != NULL);
1252
1253 if (abortit) {
1254 journal_abort(jrec->jo, &jrec->rawp);
1255 } else {
1256 KKASSERT(jrec->pushcount == 0 && jrec->residual == 0);
1257 journal_commit(jrec->jo, &jrec->rawp,
1258 jrec->stream_reserved - jrec->stream_residual, 1);
1259 }
1260
1261 /*
1262 * jrec should not be used beyond this point without another init,
1263 * but clean up some fields to ensure that we panic if it is.
1264 *
1265 * Note that jrec->rawp is NULLd out by journal_abort/journal_commit.
1266 */
1267 jrec->jo = NULL;
1268 jrec->stream_ptr = NULL;
1269}
1270
1271/************************************************************************
1272 * LOW LEVEL RECORD SUPPORT ROUTINES *
1273 ************************************************************************
1274 *
1275 * These routine create low level recursive and leaf subrecords representing
1276 * common filesystem structures.
1277 */
1278
1279/*
1280 * Write out a filename path relative to the base of the mount point.
1281 * rectype is typically JLEAF_PATH{1,2,3,4}.
1282 */
1283static void
1284jrecord_write_path(struct jrecord *jrec, int16_t rectype, struct namecache *ncp)
1285{
1286 char buf[64]; /* local buffer if it fits, else malloced */
1287 char *base;
1288 int pathlen;
1289 int index;
1290 struct namecache *scan;
1291
1292 /*
1293 * Pass 1 - figure out the number of bytes required. Include terminating
1294 * \0 on last element and '/' separator on other elements.
1295 */
1296again:
1297 pathlen = 0;
1298 for (scan = ncp;
1299 scan && (scan->nc_flag & NCF_MOUNTPT) == 0;
1300 scan = scan->nc_parent
1301 ) {
1302 pathlen += scan->nc_nlen + 1;
1303 }
1304
1305 if (pathlen <= sizeof(buf))
1306 base = buf;
1307 else
1308 base = malloc(pathlen, M_TEMP, M_INTWAIT);
1309
1310 /*
1311 * Pass 2 - generate the path buffer
1312 */
1313 index = pathlen;
1314 for (scan = ncp;
1315 scan && (scan->nc_flag & NCF_MOUNTPT) == 0;
1316 scan = scan->nc_parent
1317 ) {
1318 if (scan->nc_nlen >= index) {
1319 if (base != buf)
1320 free(base, M_TEMP);
1321 goto again;
1322 }
1323 if (index == pathlen)
1324 base[--index] = 0;
1325 else
1326 base[--index] = '/';
1327 index -= scan->nc_nlen;
1328 bcopy(scan->nc_name, base + index, scan->nc_nlen);
1329 }
1330 jrecord_leaf(jrec, rectype, base + index, pathlen - index);
1331 if (base != buf)
1332 free(base, M_TEMP);
1333}
1334
1335/*
1336 * Write out a file attribute structure. While somewhat inefficient, using
1337 * a recursive data structure is the most portable and extensible way.
1338 */
1339static void
1340jrecord_write_vattr(struct jrecord *jrec, struct vattr *vat)
1341{
1342 void *save;
1343
1344 save = jrecord_push(jrec, JTYPE_VATTR);
1345 if (vat->va_type != VNON)
1346 jrecord_leaf(jrec, JLEAF_UID, &vat->va_type, sizeof(vat->va_type));
1347 if (vat->va_uid != VNOVAL)
1348 jrecord_leaf(jrec, JLEAF_UID, &vat->va_mode, sizeof(vat->va_mode));
1349 if (vat->va_nlink != VNOVAL)
1350 jrecord_leaf(jrec, JLEAF_NLINK, &vat->va_nlink, sizeof(vat->va_nlink));
1351 if (vat->va_uid != VNOVAL)
1352 jrecord_leaf(jrec, JLEAF_UID, &vat->va_uid, sizeof(vat->va_uid));
1353 if (vat->va_gid != VNOVAL)
1354 jrecord_leaf(jrec, JLEAF_GID, &vat->va_gid, sizeof(vat->va_gid));
1355 if (vat->va_fsid != VNOVAL)
1356 jrecord_leaf(jrec, JLEAF_FSID, &vat->va_fsid, sizeof(vat->va_fsid));
1357 if (vat->va_fileid != VNOVAL)
1358 jrecord_leaf(jrec, JLEAF_INUM, &vat->va_fileid, sizeof(vat->va_fileid));
1359 if (vat->va_size != VNOVAL)
1360 jrecord_leaf(jrec, JLEAF_SIZE, &vat->va_size, sizeof(vat->va_size));
1361 if (vat->va_atime.tv_sec != VNOVAL)
1362 jrecord_leaf(jrec, JLEAF_ATIME, &vat->va_atime, sizeof(vat->va_atime));
1363 if (vat->va_mtime.tv_sec != VNOVAL)
1364 jrecord_leaf(jrec, JLEAF_MTIME, &vat->va_mtime, sizeof(vat->va_mtime));
1365 if (vat->va_ctime.tv_sec != VNOVAL)
1366 jrecord_leaf(jrec, JLEAF_CTIME, &vat->va_ctime, sizeof(vat->va_ctime));
1367 if (vat->va_gen != VNOVAL)
1368 jrecord_leaf(jrec, JLEAF_GEN, &vat->va_gen, sizeof(vat->va_gen));
1369 if (vat->va_flags != VNOVAL)
1370 jrecord_leaf(jrec, JLEAF_FLAGS, &vat->va_flags, sizeof(vat->va_flags));
1371 if (vat->va_rdev != VNOVAL)
1372 jrecord_leaf(jrec, JLEAF_UDEV, &vat->va_rdev, sizeof(vat->va_rdev));
1373#if 0
1374 if (vat->va_filerev != VNOVAL)
1375 jrecord_leaf(jrec, JLEAF_FILEREV, &vat->va_filerev, sizeof(vat->va_filerev));
1376#endif
1377 jrecord_pop(jrec, save);
1378}
1379
1380/*
1381 * Write out the creds used to issue a file operation. If a process is
1382 * available write out additional tracking information related to the
1383 * process.
1384 *
1385 * XXX additional tracking info
1386 * XXX tty line info
1387 */
1388static void
1389jrecord_write_cred(struct jrecord *jrec, struct thread *td, struct ucred *cred)
1390{
1391 void *save;
1392 struct proc *p;
1393
1394 save = jrecord_push(jrec, JTYPE_CRED);
1395 jrecord_leaf(jrec, JLEAF_UID, &cred->cr_uid, sizeof(cred->cr_uid));
1396 jrecord_leaf(jrec, JLEAF_GID, &cred->cr_gid, sizeof(cred->cr_gid));
1397 if (td && (p = td->td_proc) != NULL) {
1398 jrecord_leaf(jrec, JLEAF_PID, &p->p_pid, sizeof(p->p_pid));
1399 jrecord_leaf(jrec, JLEAF_COMM, p->p_comm, sizeof(p->p_comm));
1400 }
1401 jrecord_pop(jrec, save);
1402}
1403
1404/*
1405 * Write out information required to identify a vnode
1406 */
1407static void
1408jrecord_write_vnode_ref(struct jrecord *jrec, struct vnode *vp)
1409{
1410 /* XXX */
1411}
1412
1413/*
1414 * Write out the data represented by a UIO.
1415 */
1416struct jwuio_info {
1417 struct jrecord *jrec;
1418 int16_t rectype;
1419};
1420
1421static int jrecord_write_uio_callback(void *info, char *buf, int bytes);
1422
1423static void
1424jrecord_write_uio(struct jrecord *jrec, int16_t rectype, struct uio *uio)
1425{
1426 struct jwuio_info info = { jrec, rectype };
1427 int error;
1428
1429 jrecord_leaf(jrec, JLEAF_SEEKPOS, &uio->uio_offset,
1430 sizeof(uio->uio_offset));
1431 error = msf_uio_iterate(uio, jrecord_write_uio_callback, &info);
1432 if (error)
1433 printf("XXX warning uio iterate failed %d\n", error);
1434}
1435
1436static int
1437jrecord_write_uio_callback(void *info_arg, char *buf, int bytes)
1438{
1439 struct jwuio_info *info = info_arg;
1440
1441 jrecord_leaf(info->jrec, info->rectype, buf, bytes);
1442 return(0);
1443}
1444
1445/************************************************************************
1446 * JOURNAL VNOPS *
1447 ************************************************************************
1448 *
1449 * These are function shims replacing the normal filesystem ops. We become
1450 * responsible for calling the underlying filesystem ops. We have the choice
1451 * of executing the underlying op first and then generating the journal entry,
1452 * or starting the journal entry, executing the underlying op, and then
1453 * either completing or aborting it.
1454 *
1455 * The journal is supposed to be a high-level entity, which generally means
1456 * identifying files by name rather then by inode. Supplying both allows
1457 * the journal to be used both for inode-number-compatible 'mirrors' and
1458 * for simple filesystem replication.
1459 *
1460 * Writes are particularly difficult to deal with because a single write may
1461 * represent a hundred megabyte buffer or more, and both writes and truncations
1462 * require the 'old' data to be written out as well as the new data if the
1463 * log is reversable. Other issues:
1464 *
1465 * - How to deal with operations on unlinked files (no path available),
1466 * but which may still be filesystem visible due to hard links.
1467 *
1468 * - How to deal with modifications made via a memory map.
1469 *
1470 * - Future cache coherency support will require cache coherency API calls
1471 * both prior to and after the call to the underlying VFS.
1472 *
1473 * ALSO NOTE: We do not have to shim compatibility VOPs like MKDIR which have
1474 * new VFS equivalents (NMKDIR).
1475 */
1476
1477/*
1478 * Journal vop_settattr { a_vp, a_vap, a_cred, a_td }
1479 */
1480static
1481int
1482journal_setattr(struct vop_setattr_args *ap)
1483{
1484 struct mount *mp;
1485 struct journal *jo;
1486 struct jrecord jrec;
1487 void *save; /* warning, save pointers do not always remain valid */
1488 int error;
1489
1490 error = vop_journal_operate_ap(&ap->a_head);
1491 mp = ap->a_head.a_ops->vv_mount;
1492 if (error == 0) {
1493 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1494 jrecord_init(jo, &jrec, -1);
1495 save = jrecord_push(&jrec, JTYPE_SETATTR);
1496 jrecord_write_cred(&jrec, ap->a_td, ap->a_cred);
1497 jrecord_write_vnode_ref(&jrec, ap->a_vp);
1498 jrecord_write_vattr(&jrec, ap->a_vap);
1499 jrecord_pop(&jrec, save);
1500 jrecord_done(&jrec, 0);
1501 }
1502 }
1503 return (error);
1504}
1505
1506/*
1507 * Journal vop_write { a_vp, a_uio, a_ioflag, a_cred }
1508 */
1509static
1510int
1511journal_write(struct vop_write_args *ap)
1512{
1513 struct mount *mp;
1514 struct journal *jo;
1515 struct jrecord jrec;
1516 struct uio uio_copy;
1517 struct iovec uio_one_iovec;
1518 void *save; /* warning, save pointers do not always remain valid */
1519 int error;
1520
1521 /*
1522 * This is really nasty. UIO's don't retain sufficient information to
1523 * be reusable once they've gone through the VOP chain. The iovecs get
1524 * cleared, so we have to copy the UIO.
1525 *
1526 * XXX fix the UIO code to not destroy iov's during a scan so we can
1527 * reuse the uio over and over again.
1528 */
1529 uio_copy = *ap->a_uio;
1530 if (uio_copy.uio_iovcnt == 1) {
1531 uio_one_iovec = ap->a_uio->uio_iov[0];
1532 uio_copy.uio_iov = &uio_one_iovec;
1533 } else {
1534 uio_copy.uio_iov = malloc(uio_copy.uio_iovcnt * sizeof(struct iovec),
1535 M_JOURNAL, M_WAITOK);
1536 bcopy(ap->a_uio->uio_iov, uio_copy.uio_iov,
1537 uio_copy.uio_iovcnt * sizeof(struct iovec));
1538 }
1539
1540 error = vop_journal_operate_ap(&ap->a_head);
1541 mp = ap->a_head.a_ops->vv_mount;
1542 if (error == 0) {
1543 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1544 jrecord_init(jo, &jrec, -1);
1545 save = jrecord_push(&jrec, JTYPE_WRITE);
1546 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1547 jrecord_write_vnode_ref(&jrec, ap->a_vp);
1548 jrecord_write_uio(&jrec, JLEAF_FILEDATA, &uio_copy);
1549 jrecord_pop(&jrec, save);
1550 jrecord_done(&jrec, 0);
1551 }
1552 }
1553
1554 if (uio_copy.uio_iov != &uio_one_iovec)
1555 free(uio_copy.uio_iov, M_JOURNAL);
1556
1557
1558 return (error);
1559}
1560
1561/*
1562 * Journal vop_fsync { a_vp, a_waitfor, a_td }
1563 */
1564static
1565int
1566journal_fsync(struct vop_fsync_args *ap)
1567{
1568 struct mount *mp;
1569 struct journal *jo;
1570 int error;
1571
1572 error = vop_journal_operate_ap(&ap->a_head);
1573 mp = ap->a_head.a_ops->vv_mount;
1574 if (error == 0) {
1575 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1576 /* XXX synchronize pending journal records */
1577 }
1578 }
1579 return (error);
1580}
1581
1582/*
1583 * Journal vop_putpages { a_vp, a_m, a_count, a_sync, a_rtvals, a_offset }
1584 */
1585static
1586int
1587journal_putpages(struct vop_putpages_args *ap)
1588{
1589 struct mount *mp;
1590 struct journal *jo;
1591 struct jrecord jrec;
1592 void *save; /* warning, save pointers do not always remain valid */
1593 int error;
1594
1595 error = vop_journal_operate_ap(&ap->a_head);
1596 mp = ap->a_head.a_ops->vv_mount;
1597 if (error == 0) {
1598 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1599 jrecord_init(jo, &jrec, -1);
1600 save = jrecord_push(&jrec, JTYPE_PUTPAGES);
1601 jrecord_write_vnode_ref(&jrec, ap->a_vp);
1602 /* XXX pagelist */
1603 jrecord_pop(&jrec, save);
1604 jrecord_done(&jrec, 0);
1605 }
1606 }
1607 return (error);
1608}
1609
1610/*
1611 * Journal vop_setacl { a_vp, a_type, a_aclp, a_cred, a_td }
1612 */
1613static
1614int
1615journal_setacl(struct vop_setacl_args *ap)
1616{
1617 struct mount *mp;
1618 struct journal *jo;
1619 struct jrecord jrec;
1620 void *save; /* warning, save pointers do not always remain valid */
1621 int error;
1622
1623 error = vop_journal_operate_ap(&ap->a_head);
1624 mp = ap->a_head.a_ops->vv_mount;
1625 if (error == 0) {
1626 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1627 jrecord_init(jo, &jrec, -1);
1628 save = jrecord_push(&jrec, JTYPE_SETACL);
1629 jrecord_write_cred(&jrec, ap->a_td, ap->a_cred);
1630 jrecord_write_vnode_ref(&jrec, ap->a_vp);
1631 /* XXX type, aclp */
1632 jrecord_pop(&jrec, save);
1633 jrecord_done(&jrec, 0);
1634 }
1635 }
1636 return (error);
1637}
1638
1639/*
1640 * Journal vop_setextattr { a_vp, a_name, a_uio, a_cred, a_td }
1641 */
1642static
1643int
1644journal_setextattr(struct vop_setextattr_args *ap)
1645{
1646 struct mount *mp;
1647 struct journal *jo;
1648 struct jrecord jrec;
1649 void *save; /* warning, save pointers do not always remain valid */
1650 int error;
1651
1652 error = vop_journal_operate_ap(&ap->a_head);
1653 mp = ap->a_head.a_ops->vv_mount;
1654 if (error == 0) {
1655 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1656 jrecord_init(jo, &jrec, -1);
1657 save = jrecord_push(&jrec, JTYPE_SETEXTATTR);
1658 jrecord_write_cred(&jrec, ap->a_td, ap->a_cred);
1659 jrecord_write_vnode_ref(&jrec, ap->a_vp);
1660 jrecord_leaf(&jrec, JLEAF_ATTRNAME, ap->a_name, strlen(ap->a_name));
1661 jrecord_write_uio(&jrec, JLEAF_FILEDATA, ap->a_uio);
1662 jrecord_pop(&jrec, save);
1663 jrecord_done(&jrec, 0);
1664 }
1665 }
1666 return (error);
1667}
1668
1669/*
1670 * Journal vop_ncreate { a_ncp, a_vpp, a_cred, a_vap }
1671 */
1672static
1673int
1674journal_ncreate(struct vop_ncreate_args *ap)
1675{
1676 struct mount *mp;
1677 struct journal *jo;
1678 struct jrecord jrec;
1679 void *save; /* warning, save pointers do not always remain valid */
1680 int error;
1681
1682 error = vop_journal_operate_ap(&ap->a_head);
1683 mp = ap->a_head.a_ops->vv_mount;
1684 if (error == 0) {
1685 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1686 jrecord_init(jo, &jrec, -1);
1687 save = jrecord_push(&jrec, JTYPE_CREATE);
1688 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1689 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1690 if (*ap->a_vpp)
1691 jrecord_write_vnode_ref(&jrec, *ap->a_vpp);
1692 jrecord_pop(&jrec, save);
1693 jrecord_done(&jrec, 0);
1694 }
1695 }
1696 return (error);
1697}
1698
1699/*
1700 * Journal vop_nmknod { a_ncp, a_vpp, a_cred, a_vap }
1701 */
1702static
1703int
1704journal_nmknod(struct vop_nmknod_args *ap)
1705{
1706 struct mount *mp;
1707 struct journal *jo;
1708 struct jrecord jrec;
1709 void *save; /* warning, save pointers do not always remain valid */
1710 int error;
1711
1712 error = vop_journal_operate_ap(&ap->a_head);
1713 mp = ap->a_head.a_ops->vv_mount;
1714 if (error == 0) {
1715 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1716 jrecord_init(jo, &jrec, -1);
1717 save = jrecord_push(&jrec, JTYPE_MKNOD);
1718 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1719 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1720 jrecord_write_vattr(&jrec, ap->a_vap);
1721 if (*ap->a_vpp)
1722 jrecord_write_vnode_ref(&jrec, *ap->a_vpp);
1723 jrecord_pop(&jrec, save);
1724 jrecord_done(&jrec, 0);
1725 }
1726 }
1727 return (error);
1728}
1729
1730/*
1731 * Journal vop_nlink { a_ncp, a_vp, a_cred }
1732 */
1733static
1734int
1735journal_nlink(struct vop_nlink_args *ap)
1736{
1737 struct mount *mp;
1738 struct journal *jo;
1739 struct jrecord jrec;
1740 void *save; /* warning, save pointers do not always remain valid */
1741 int error;
1742
1743 error = vop_journal_operate_ap(&ap->a_head);
1744 mp = ap->a_head.a_ops->vv_mount;
1745 if (error == 0) {
1746 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1747 jrecord_init(jo, &jrec, -1);
1748 save = jrecord_push(&jrec, JTYPE_LINK);
1749 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1750 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1751 jrecord_write_vnode_ref(&jrec, ap->a_vp);
1752 /* XXX PATH to VP and inode number */
1753 jrecord_pop(&jrec, save);
1754 jrecord_done(&jrec, 0);
1755 }
1756 }
1757 return (error);
1758}
1759
1760/*
1761 * Journal vop_symlink { a_ncp, a_vpp, a_cred, a_vap, a_target }
1762 */
1763static
1764int
1765journal_nsymlink(struct vop_nsymlink_args *ap)
1766{
1767 struct mount *mp;
1768 struct journal *jo;
1769 struct jrecord jrec;
1770 void *save; /* warning, save pointers do not always remain valid */
1771 int error;
1772
1773 error = vop_journal_operate_ap(&ap->a_head);
1774 mp = ap->a_head.a_ops->vv_mount;
1775 if (error == 0) {
1776 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1777 jrecord_init(jo, &jrec, -1);
1778 save = jrecord_push(&jrec, JTYPE_SYMLINK);
1779 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1780 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1781 jrecord_leaf(&jrec, JLEAF_SYMLINKDATA,
1782 ap->a_target, strlen(ap->a_target));
1783 if (*ap->a_vpp)
1784 jrecord_write_vnode_ref(&jrec, *ap->a_vpp);
1785 jrecord_pop(&jrec, save);
1786 jrecord_done(&jrec, 0);
1787 }
1788 }
1789 return (error);
1790}
1791
1792/*
1793 * Journal vop_nwhiteout { a_ncp, a_cred, a_flags }
1794 */
1795static
1796int
1797journal_nwhiteout(struct vop_nwhiteout_args *ap)
1798{
1799 struct mount *mp;
1800 struct journal *jo;
1801 struct jrecord jrec;
1802 void *save; /* warning, save pointers do not always remain valid */
1803 int error;
1804
1805 error = vop_journal_operate_ap(&ap->a_head);
1806 mp = ap->a_head.a_ops->vv_mount;
1807 if (error == 0) {
1808 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1809 jrecord_init(jo, &jrec, -1);
1810 save = jrecord_push(&jrec, JTYPE_WHITEOUT);
1811 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1812 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1813 jrecord_pop(&jrec, save);
1814 jrecord_done(&jrec, 0);
1815 }
1816 }
1817 return (error);
1818}
1819
1820/*
1821 * Journal vop_nremove { a_ncp, a_cred }
1822 */
1823static
1824int
1825journal_nremove(struct vop_nremove_args *ap)
1826{
1827 struct mount *mp;
1828 struct journal *jo;
1829 struct jrecord jrec;
1830 void *save; /* warning, save pointers do not always remain valid */
1831 int error;
1832
1833 error = vop_journal_operate_ap(&ap->a_head);
1834 mp = ap->a_head.a_ops->vv_mount;
1835 if (error == 0) {
1836 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1837 jrecord_init(jo, &jrec, -1);
1838 save = jrecord_push(&jrec, JTYPE_REMOVE);
1839 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1840 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1841 jrecord_pop(&jrec, save);
1842 jrecord_done(&jrec, 0);
1843 }
1844 }
1845 return (error);
1846}
1847
1848/*
1849 * Journal vop_nmkdir { a_ncp, a_vpp, a_cred, a_vap }
1850 */
1851static
1852int
1853journal_nmkdir(struct vop_nmkdir_args *ap)
1854{
1855 struct mount *mp;
1856 struct journal *jo;
1857 struct jrecord jrec;
1858 void *save; /* warning, save pointers do not always remain valid */
1859 int error;
1860
1861 error = vop_journal_operate_ap(&ap->a_head);
1862 mp = ap->a_head.a_ops->vv_mount;
1863 if (error == 0) {
1864 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1865 jrecord_init(jo, &jrec, -1);
1866 if (jo->flags & MC_JOURNAL_WANT_REVERSABLE) {
1867 save = jrecord_push(&jrec, JTYPE_UNDO);
1868 /* XXX undo operations */
1869 jrecord_pop(&jrec, save);
1870 }
1871#if 0
1872 if (jo->flags & MC_JOURNAL_WANT_AUDIT) {
1873 jrecord_write_audit(&jrec);
1874 }
1875#endif
1876 save = jrecord_push(&jrec, JTYPE_MKDIR);
1877 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1878 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1879 jrecord_write_vattr(&jrec, ap->a_vap);
1880 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1881 if (*ap->a_vpp)
1882 jrecord_write_vnode_ref(&jrec, *ap->a_vpp);
1883 jrecord_pop(&jrec, save);
1884 jrecord_done(&jrec, 0);
1885 }
1886 }
1887 return (error);
1888}
1889
1890/*
1891 * Journal vop_nrmdir { a_ncp, a_cred }
1892 */
1893static
1894int
1895journal_nrmdir(struct vop_nrmdir_args *ap)
1896{
1897 struct mount *mp;
1898 struct journal *jo;
1899 struct jrecord jrec;
1900 void *save; /* warning, save pointers do not always remain valid */
1901 int error;
1902
1903 error = vop_journal_operate_ap(&ap->a_head);
1904 mp = ap->a_head.a_ops->vv_mount;
1905 if (error == 0) {
1906 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1907 jrecord_init(jo, &jrec, -1);
1908 save = jrecord_push(&jrec, JTYPE_RMDIR);
1909 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1910 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
1911 jrecord_pop(&jrec, save);
1912 jrecord_done(&jrec, 0);
1913 }
1914 }
1915 return (error);
1916}
1917
1918/*
1919 * Journal vop_nrename { a_fncp, a_tncp, a_cred }
1920 */
1921static
1922int
1923journal_nrename(struct vop_nrename_args *ap)
1924{
1925 struct mount *mp;
1926 struct journal *jo;
1927 struct jrecord jrec;
1928 void *save; /* warning, save pointers do not always remain valid */
1929 int error;
1930
1931 error = vop_journal_operate_ap(&ap->a_head);
1932 mp = ap->a_head.a_ops->vv_mount;
1933 if (error == 0) {
1934 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1935 jrecord_init(jo, &jrec, -1);
1936 save = jrecord_push(&jrec, JTYPE_RENAME);
1937 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1938 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_fncp);
1939 jrecord_write_path(&jrec, JLEAF_PATH2, ap->a_tncp);
1940 jrecord_pop(&jrec, save);
1941 jrecord_done(&jrec, 0);
1942 }
1943 }
1944 return (error);
1945}
1946