Add AbortProc typedef to simplify code.
[dragonfly.git] / sys / kern / vfs_jops.c
CommitLineData
6ddb7618
MD
1/*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
500b6a22 34 * $DragonFly: src/sys/kern/vfs_jops.c,v 1.18 2005/07/13 01:58:20 dillon Exp $
2281065e
MD
35 */
36/*
37 * Each mount point may have zero or more independantly configured journals
38 * attached to it. Each journal is represented by a memory FIFO and worker
39 * thread. Journal events are streamed through the FIFO to the thread,
40 * batched up (typically on one-second intervals), and written out by the
41 * thread.
42 *
43 * Journal vnode ops are executed instead of mnt_vn_norm_ops when one or
44 * more journals have been installed on a mount point. It becomes the
45 * responsibility of the journal op to call the underlying normal op as
46 * appropriate.
47 *
48 * The journaling protocol is intended to evolve into a two-way stream
49 * whereby transaction IDs can be acknowledged by the journaling target
50 * when the data has been committed to hard storage. Both implicit and
51 * explicit acknowledgement schemes will be supported, depending on the
52 * sophistication of the journaling stream, plus resynchronization and
53 * restart when a journaling stream is interrupted. This information will
54 * also be made available to journaling-aware filesystems to allow better
55 * management of their own physical storage synchronization mechanisms as
56 * well as to allow such filesystems to take direct advantage of the kernel's
57 * journaling layer so they don't have to roll their own.
58 *
82eaef15 59 * In addition, the worker thread will have access to much larger
2281065e
MD
60 * spooling areas then the memory buffer is able to provide by e.g.
61 * reserving swap space, in order to absorb potentially long interruptions
62 * of off-site journaling streams, and to prevent 'slow' off-site linkages
63 * from radically slowing down local filesystem operations.
64 *
65 * Because of the non-trivial algorithms the journaling system will be
66 * required to support, use of a worker thread is mandatory. Efficiencies
67 * are maintained by utilitizing the memory FIFO to batch transactions when
68 * possible, reducing the number of gratuitous thread switches and taking
69 * advantage of cpu caches through the use of shorter batched code paths
70 * rather then trying to do everything in the context of the process
82eaef15
MD
71 * originating the filesystem op. In the future the memory FIFO can be
72 * made per-cpu to remove BGL or other locking requirements.
6ddb7618 73 */
6ddb7618
MD
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/buf.h>
77#include <sys/conf.h>
78#include <sys/kernel.h>
82eaef15 79#include <sys/queue.h>
6ddb7618
MD
80#include <sys/lock.h>
81#include <sys/malloc.h>
82#include <sys/mount.h>
83#include <sys/unistd.h>
84#include <sys/vnode.h>
85#include <sys/poll.h>
2281065e 86#include <sys/mountctl.h>
b2f7ec6c 87#include <sys/journal.h>
2281065e 88#include <sys/file.h>
b2f7ec6c 89#include <sys/proc.h>
9578bde0 90#include <sys/msfbuf.h>
500b6a22
MD
91#include <sys/socket.h>
92#include <sys/socketvar.h>
6ddb7618
MD
93
94#include <machine/limits.h>
95
96#include <vm/vm.h>
97#include <vm/vm_object.h>
98#include <vm/vm_page.h>
99#include <vm/vm_pager.h>
100#include <vm/vnode_pager.h>
101
2281065e
MD
102#include <sys/file2.h>
103#include <sys/thread2.h>
104
105static int journal_attach(struct mount *mp);
106static void journal_detach(struct mount *mp);
107static int journal_install_vfs_journal(struct mount *mp, struct file *fp,
108 const struct mountctl_install_journal *info);
500b6a22
MD
109static int journal_restart_vfs_journal(struct mount *mp, struct file *fp,
110 const struct mountctl_restart_journal *info);
2281065e
MD
111static int journal_remove_vfs_journal(struct mount *mp,
112 const struct mountctl_remove_journal *info);
500b6a22
MD
113static int journal_restart(struct mount *mp, struct file *fp,
114 struct journal *jo, int flags);
432b8263 115static int journal_destroy(struct mount *mp, struct journal *jo, int flags);
2281065e 116static int journal_resync_vfs_journal(struct mount *mp, const void *ctl);
39b13188
MD
117static int journal_status_vfs_journal(struct mount *mp,
118 const struct mountctl_status_journal *info,
119 struct mountctl_journal_ret_status *rstat,
120 int buflen, int *res);
500b6a22
MD
121static void journal_create_threads(struct journal *jo);
122static void journal_destroy_threads(struct journal *jo, int flags);
432b8263
MD
123static void journal_wthread(void *info);
124static void journal_rthread(void *info);
82eaef15
MD
125
126static void *journal_reserve(struct journal *jo,
127 struct journal_rawrecbeg **rawpp,
128 int16_t streamid, int bytes);
129static void *journal_extend(struct journal *jo,
130 struct journal_rawrecbeg **rawpp,
131 int truncbytes, int bytes, int *newstreamrecp);
132static void journal_abort(struct journal *jo,
133 struct journal_rawrecbeg **rawpp);
134static void journal_commit(struct journal *jo,
135 struct journal_rawrecbeg **rawpp,
136 int bytes, int closeout);
137
138static void jrecord_init(struct journal *jo,
139 struct jrecord *jrec, int16_t streamid);
140static struct journal_subrecord *jrecord_push(
141 struct jrecord *jrec, int16_t rectype);
142static void jrecord_pop(struct jrecord *jrec, struct journal_subrecord *parent);
143static struct journal_subrecord *jrecord_write(struct jrecord *jrec,
144 int16_t rectype, int bytes);
145static void jrecord_data(struct jrecord *jrec, const void *buf, int bytes);
146static void jrecord_done(struct jrecord *jrec, int abortit);
147
558b8e00
MD
148static int journal_setattr(struct vop_setattr_args *ap);
149static int journal_write(struct vop_write_args *ap);
150static int journal_fsync(struct vop_fsync_args *ap);
151static int journal_putpages(struct vop_putpages_args *ap);
152static int journal_setacl(struct vop_setacl_args *ap);
153static int journal_setextattr(struct vop_setextattr_args *ap);
154static int journal_ncreate(struct vop_ncreate_args *ap);
155static int journal_nmknod(struct vop_nmknod_args *ap);
156static int journal_nlink(struct vop_nlink_args *ap);
157static int journal_nsymlink(struct vop_nsymlink_args *ap);
158static int journal_nwhiteout(struct vop_nwhiteout_args *ap);
159static int journal_nremove(struct vop_nremove_args *ap);
2281065e 160static int journal_nmkdir(struct vop_nmkdir_args *ap);
558b8e00
MD
161static int journal_nrmdir(struct vop_nrmdir_args *ap);
162static int journal_nrename(struct vop_nrename_args *ap);
2281065e 163
6ddb7618 164static struct vnodeopv_entry_desc journal_vnodeop_entries[] = {
2281065e
MD
165 { &vop_default_desc, vop_journal_operate_ap },
166 { &vop_mountctl_desc, (void *)journal_mountctl },
558b8e00
MD
167 { &vop_setattr_desc, (void *)journal_setattr },
168 { &vop_write_desc, (void *)journal_write },
169 { &vop_fsync_desc, (void *)journal_fsync },
170 { &vop_putpages_desc, (void *)journal_putpages },
171 { &vop_setacl_desc, (void *)journal_setacl },
172 { &vop_setextattr_desc, (void *)journal_setextattr },
173 { &vop_ncreate_desc, (void *)journal_ncreate },
174 { &vop_nmknod_desc, (void *)journal_nmknod },
175 { &vop_nlink_desc, (void *)journal_nlink },
176 { &vop_nsymlink_desc, (void *)journal_nsymlink },
177 { &vop_nwhiteout_desc, (void *)journal_nwhiteout },
178 { &vop_nremove_desc, (void *)journal_nremove },
2281065e 179 { &vop_nmkdir_desc, (void *)journal_nmkdir },
558b8e00
MD
180 { &vop_nrmdir_desc, (void *)journal_nrmdir },
181 { &vop_nrename_desc, (void *)journal_nrename },
2281065e 182 { NULL, NULL }
6ddb7618
MD
183};
184
82eaef15 185static MALLOC_DEFINE(M_JOURNAL, "journal", "Journaling structures");
2281065e
MD
186static MALLOC_DEFINE(M_JFIFO, "journal-fifo", "Journal FIFO");
187
6ddb7618 188int
2281065e
MD
189journal_mountctl(struct vop_mountctl_args *ap)
190{
191 struct mount *mp;
192 int error = 0;
193
194 mp = ap->a_head.a_ops->vv_mount;
195 KKASSERT(mp);
196
197 if (mp->mnt_vn_journal_ops == NULL) {
198 switch(ap->a_op) {
199 case MOUNTCTL_INSTALL_VFS_JOURNAL:
200 error = journal_attach(mp);
201 if (error == 0 && ap->a_ctllen != sizeof(struct mountctl_install_journal))
202 error = EINVAL;
203 if (error == 0 && ap->a_fp == NULL)
204 error = EBADF;
205 if (error == 0)
206 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
207 if (TAILQ_EMPTY(&mp->mnt_jlist))
208 journal_detach(mp);
209 break;
500b6a22 210 case MOUNTCTL_RESTART_VFS_JOURNAL:
2281065e
MD
211 case MOUNTCTL_REMOVE_VFS_JOURNAL:
212 case MOUNTCTL_RESYNC_VFS_JOURNAL:
39b13188
MD
213 case MOUNTCTL_STATUS_VFS_JOURNAL:
214 error = ENOENT;
2281065e
MD
215 break;
216 default:
217 error = EOPNOTSUPP;
218 break;
219 }
220 } else {
221 switch(ap->a_op) {
222 case MOUNTCTL_INSTALL_VFS_JOURNAL:
223 if (ap->a_ctllen != sizeof(struct mountctl_install_journal))
224 error = EINVAL;
225 if (error == 0 && ap->a_fp == NULL)
226 error = EBADF;
227 if (error == 0)
228 error = journal_install_vfs_journal(mp, ap->a_fp, ap->a_ctl);
229 break;
500b6a22
MD
230 case MOUNTCTL_RESTART_VFS_JOURNAL:
231 if (ap->a_ctllen != sizeof(struct mountctl_restart_journal))
232 error = EINVAL;
233 if (error == 0 && ap->a_fp == NULL)
234 error = EBADF;
235 if (error == 0)
236 error = journal_restart_vfs_journal(mp, ap->a_fp, ap->a_ctl);
237 break;
2281065e
MD
238 case MOUNTCTL_REMOVE_VFS_JOURNAL:
239 if (ap->a_ctllen != sizeof(struct mountctl_remove_journal))
240 error = EINVAL;
241 if (error == 0)
242 error = journal_remove_vfs_journal(mp, ap->a_ctl);
243 if (TAILQ_EMPTY(&mp->mnt_jlist))
244 journal_detach(mp);
245 break;
246 case MOUNTCTL_RESYNC_VFS_JOURNAL:
247 if (ap->a_ctllen != 0)
248 error = EINVAL;
249 error = journal_resync_vfs_journal(mp, ap->a_ctl);
250 break;
39b13188
MD
251 case MOUNTCTL_STATUS_VFS_JOURNAL:
252 if (ap->a_ctllen != sizeof(struct mountctl_status_journal))
253 error = EINVAL;
254 if (error == 0) {
255 error = journal_status_vfs_journal(mp, ap->a_ctl,
256 ap->a_buf, ap->a_buflen, ap->a_res);
257 }
258 break;
2281065e
MD
259 default:
260 error = EOPNOTSUPP;
261 break;
262 }
263 }
264 return (error);
265}
266
267/*
268 * High level mount point setup. When a
269 */
270static int
6ddb7618
MD
271journal_attach(struct mount *mp)
272{
2281065e
MD
273 vfs_add_vnodeops(mp, &mp->mnt_vn_journal_ops, journal_vnodeop_entries);
274 return(0);
6ddb7618
MD
275}
276
2281065e 277static void
6ddb7618
MD
278journal_detach(struct mount *mp)
279{
2281065e
MD
280 if (mp->mnt_vn_journal_ops)
281 vfs_rm_vnodeops(&mp->mnt_vn_journal_ops);
282}
283
284/*
82eaef15
MD
285 * Install a journal on a mount point. Each journal has an associated worker
286 * thread which is responsible for buffering and spooling the data to the
287 * target. A mount point may have multiple journals attached to it. An
288 * initial start record is generated when the journal is associated.
2281065e
MD
289 */
290static int
291journal_install_vfs_journal(struct mount *mp, struct file *fp,
292 const struct mountctl_install_journal *info)
293{
294 struct journal *jo;
82eaef15 295 struct jrecord jrec;
2281065e
MD
296 int error = 0;
297 int size;
298
299 jo = malloc(sizeof(struct journal), M_JOURNAL, M_WAITOK|M_ZERO);
300 bcopy(info->id, jo->id, sizeof(jo->id));
432b8263
MD
301 jo->flags = info->flags & ~(MC_JOURNAL_WACTIVE | MC_JOURNAL_RACTIVE |
302 MC_JOURNAL_STOP_REQ);
2281065e
MD
303
304 /*
305 * Memory FIFO size, round to nearest power of 2
306 */
82eaef15 307 if (info->membufsize) {
2281065e
MD
308 if (info->membufsize < 65536)
309 size = 65536;
310 else if (info->membufsize > 128 * 1024 * 1024)
311 size = 128 * 1024 * 1024;
312 else
313 size = (int)info->membufsize;
314 } else {
315 size = 1024 * 1024;
316 }
317 jo->fifo.size = 1;
318 while (jo->fifo.size < size)
319 jo->fifo.size <<= 1;
320
321 /*
322 * Other parameters. If not specified the starting transaction id
323 * will be the current date.
324 */
82eaef15 325 if (info->transid) {
2281065e
MD
326 jo->transid = info->transid;
327 } else {
328 struct timespec ts;
329 getnanotime(&ts);
330 jo->transid = ((int64_t)ts.tv_sec << 30) | ts.tv_nsec;
331 }
332
333 jo->fp = fp;
334
335 /*
336 * Allocate the memory FIFO
337 */
338 jo->fifo.mask = jo->fifo.size - 1;
339 jo->fifo.membase = malloc(jo->fifo.size, M_JFIFO, M_WAITOK|M_ZERO|M_NULLOK);
340 if (jo->fifo.membase == NULL)
341 error = ENOMEM;
342
82eaef15 343 /*
3119bac5 344 * Create the worker threads and generate the association record.
82eaef15 345 */
2281065e
MD
346 if (error) {
347 free(jo, M_JOURNAL);
348 } else {
349 fhold(fp);
500b6a22 350 journal_create_threads(jo);
82eaef15
MD
351 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
352 jrecord_write(&jrec, JTYPE_ASSOCIATE, 0);
353 jrecord_done(&jrec, 0);
2281065e
MD
354 TAILQ_INSERT_TAIL(&mp->mnt_jlist, jo, jentry);
355 }
356 return(error);
357}
358
500b6a22
MD
359/*
360 * Restart a journal with a new descriptor. The existing reader and writer
361 * threads are terminated and a new descriptor is associated with the
362 * journal. The FIFO rindex is reset to xindex and the threads are then
363 * restarted.
364 */
365static int
366journal_restart_vfs_journal(struct mount *mp, struct file *fp,
367 const struct mountctl_restart_journal *info)
368{
369 struct journal *jo;
370 int error;
371
372 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
373 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
374 break;
375 }
376 if (jo)
377 error = journal_restart(mp, fp, jo, info->flags);
378 else
379 error = EINVAL;
380 return (error);
381}
382
383static int
384journal_restart(struct mount *mp, struct file *fp,
385 struct journal *jo, int flags)
386{
387 /*
388 * XXX lock the jo
389 */
390
391#if 0
392 /*
393 * Record the fact that we are doing a restart in the journal.
394 * XXX it isn't safe to do this if the journal is being restarted
395 * because it was locked up and the writer thread has already exited.
396 */
397 jrecord_init(jo, &jrec, JREC_STREAMID_RESTART);
398 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
399 jrecord_done(&jrec, 0);
400#endif
401
402 /*
403 * Stop the reader and writer threads and clean up the current
404 * descriptor.
405 */
406 printf("RESTART WITH FP %p KILLING %p\n", fp, jo->fp);
407 journal_destroy_threads(jo, flags);
408
409 if (jo->fp)
410 fdrop(jo->fp, curthread);
411
412 /*
413 * Associate the new descriptor, reset the FIFO index, and recreate
414 * the threads.
415 */
416 fhold(fp);
417 jo->fp = fp;
418 jo->fifo.rindex = jo->fifo.xindex;
419 journal_create_threads(jo);
420
421 return(0);
422}
423
82eaef15
MD
424/*
425 * Disassociate a journal from a mount point and terminate its worker thread.
426 * A final termination record is written out before the file pointer is
427 * dropped.
428 */
2281065e 429static int
82eaef15
MD
430journal_remove_vfs_journal(struct mount *mp,
431 const struct mountctl_remove_journal *info)
2281065e
MD
432{
433 struct journal *jo;
434 int error;
435
436 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
437 if (bcmp(jo->id, info->id, sizeof(jo->id)) == 0)
438 break;
439 }
432b8263
MD
440 if (jo)
441 error = journal_destroy(mp, jo, info->flags);
442 else
443 error = EINVAL;
444 return (error);
445}
82eaef15 446
432b8263
MD
447/*
448 * Remove all journals associated with a mount point. Usually called
449 * by the umount code.
450 */
451void
452journal_remove_all_journals(struct mount *mp, int flags)
453{
454 struct journal *jo;
82eaef15 455
432b8263
MD
456 while ((jo = TAILQ_FIRST(&mp->mnt_jlist)) != NULL) {
457 journal_destroy(mp, jo, flags);
2281065e 458 }
432b8263
MD
459}
460
461static int
462journal_destroy(struct mount *mp, struct journal *jo, int flags)
463{
464 struct jrecord jrec;
465
466 TAILQ_REMOVE(&mp->mnt_jlist, jo, jentry);
467
468 jrecord_init(jo, &jrec, JREC_STREAMID_DISCONT);
469 jrecord_write(&jrec, JTYPE_DISASSOCIATE, 0);
470 jrecord_done(&jrec, 0);
471
500b6a22
MD
472 journal_destroy_threads(jo, flags);
473
432b8263
MD
474 if (jo->fp)
475 fdrop(jo->fp, curthread);
476 if (jo->fifo.membase)
477 free(jo->fifo.membase, M_JFIFO);
478 free(jo, M_JOURNAL);
479 return(0);
2281065e
MD
480}
481
482static int
483journal_resync_vfs_journal(struct mount *mp, const void *ctl)
484{
485 return(EINVAL);
486}
487
39b13188
MD
488static int
489journal_status_vfs_journal(struct mount *mp,
490 const struct mountctl_status_journal *info,
491 struct mountctl_journal_ret_status *rstat,
492 int buflen, int *res)
493{
494 struct journal *jo;
495 int error = 0;
496 int index;
497
498 index = 0;
499 *res = 0;
500 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
501 if (info->index == MC_JOURNAL_INDEX_ID) {
502 if (bcmp(jo->id, info->id, sizeof(jo->id)) != 0)
503 continue;
504 } else if (info->index >= 0) {
505 if (info->index < index)
506 continue;
507 } else if (info->index != MC_JOURNAL_INDEX_ALL) {
508 continue;
509 }
510 if (buflen < sizeof(*rstat)) {
511 if (*res)
512 rstat[-1].flags |= MC_JOURNAL_STATUS_MORETOCOME;
513 else
514 error = EINVAL;
515 break;
516 }
517 bzero(rstat, sizeof(*rstat));
518 rstat->recsize = sizeof(*rstat);
519 bcopy(jo->id, rstat->id, sizeof(jo->id));
520 rstat->index = index;
521 rstat->membufsize = jo->fifo.size;
3119bac5
MD
522 rstat->membufused = jo->fifo.windex - jo->fifo.xindex;
523 rstat->membufunacked = jo->fifo.rindex - jo->fifo.xindex;
39b13188 524 rstat->bytessent = jo->total_acked;
3119bac5 525 rstat->fifostalls = jo->fifostalls;
39b13188
MD
526 ++rstat;
527 ++index;
528 *res += sizeof(*rstat);
529 buflen -= sizeof(*rstat);
530 }
531 return(error);
532}
432b8263 533
500b6a22
MD
534static void
535journal_create_threads(struct journal *jo)
536{
537 jo->flags &= ~(MC_JOURNAL_STOP_REQ | MC_JOURNAL_STOP_IMM);
538 jo->flags |= MC_JOURNAL_WACTIVE;
539 lwkt_create(journal_wthread, jo, NULL, &jo->wthread,
540 TDF_STOPREQ, -1, "journal w:%.*s", JIDMAX, jo->id);
541 lwkt_setpri(&jo->wthread, TDPRI_KERN_DAEMON);
542 lwkt_schedule(&jo->wthread);
543
544 if (jo->flags & MC_JOURNAL_WANT_FULLDUPLEX) {
545 jo->flags |= MC_JOURNAL_RACTIVE;
546 lwkt_create(journal_rthread, jo, NULL, &jo->rthread,
547 TDF_STOPREQ, -1, "journal r:%.*s", JIDMAX, jo->id);
548 lwkt_setpri(&jo->rthread, TDPRI_KERN_DAEMON);
549 lwkt_schedule(&jo->rthread);
550 }
551}
552
553static void
554journal_destroy_threads(struct journal *jo, int flags)
555{
556 int wcount;
557
558 jo->flags |= MC_JOURNAL_STOP_REQ | (flags & MC_JOURNAL_STOP_IMM);
559 wakeup(&jo->fifo);
560 wcount = 0;
561 while (jo->flags & (MC_JOURNAL_WACTIVE | MC_JOURNAL_RACTIVE)) {
562 tsleep(jo, 0, "jwait", hz);
563 if (++wcount % 10 == 0) {
564 printf("Warning: journal %s waiting for descriptors to close\n",
565 jo->id);
566 }
567 }
568
569 /*
570 * XXX SMP - threads should move to cpu requesting the restart or
571 * termination before finishing up to properly interlock.
572 */
573 tsleep(jo, 0, "jwait", hz);
574 lwkt_free_thread(&jo->wthread);
575 if (jo->flags & MC_JOURNAL_WANT_FULLDUPLEX)
576 lwkt_free_thread(&jo->rthread);
577}
578
82eaef15
MD
579/*
580 * The per-journal worker thread is responsible for writing out the
581 * journal's FIFO to the target stream.
582 */
2281065e 583static void
432b8263 584journal_wthread(void *info)
2281065e
MD
585{
586 struct journal *jo = info;
82eaef15 587 struct journal_rawrecbeg *rawp;
2281065e
MD
588 int bytes;
589 int error;
82eaef15 590 int avail;
2281065e
MD
591 int res;
592
593 for (;;) {
82eaef15
MD
594 /*
595 * Calculate the number of bytes available to write. This buffer
596 * area may contain reserved records so we can't just write it out
597 * without further checks.
598 */
599 bytes = jo->fifo.windex - jo->fifo.rindex;
600
601 /*
602 * sleep if no bytes are available or if an incomplete record is
603 * encountered (it needs to be filled in before we can write it
604 * out), and skip any pad records that we encounter.
605 */
606 if (bytes == 0) {
2281065e
MD
607 if (jo->flags & MC_JOURNAL_STOP_REQ)
608 break;
82eaef15
MD
609 tsleep(&jo->fifo, 0, "jfifo", hz);
610 continue;
611 }
9578bde0
MD
612
613 /*
614 * Sleep if we can not go any further due to hitting an incomplete
615 * record. This case should occur rarely but may have to be better
616 * optimized XXX.
617 */
82eaef15
MD
618 rawp = (void *)(jo->fifo.membase + (jo->fifo.rindex & jo->fifo.mask));
619 if (rawp->begmagic == JREC_INCOMPLETEMAGIC) {
620 tsleep(&jo->fifo, 0, "jpad", hz);
621 continue;
622 }
9578bde0
MD
623
624 /*
625 * Skip any pad records. We do not write out pad records if we can
626 * help it.
9578bde0 627 */
82eaef15 628 if (rawp->streamid == JREC_STREAMID_PAD) {
3119bac5
MD
629 if ((jo->flags & MC_JOURNAL_WANT_FULLDUPLEX) == 0) {
630 if (jo->fifo.rindex == jo->fifo.xindex) {
631 jo->fifo.xindex += (rawp->recsize + 15) & ~15;
632 jo->total_acked += (rawp->recsize + 15) & ~15;
633 }
634 }
82eaef15 635 jo->fifo.rindex += (rawp->recsize + 15) & ~15;
9578bde0
MD
636 jo->total_acked += bytes;
637 KKASSERT(jo->fifo.windex - jo->fifo.rindex >= 0);
82eaef15
MD
638 continue;
639 }
640
641 /*
9578bde0
MD
642 * 'bytes' is the amount of data that can potentially be written out.
643 * Calculate 'res', the amount of data that can actually be written
644 * out. res is bounded either by hitting the end of the physical
645 * memory buffer or by hitting an incomplete record. Incomplete
646 * records often occur due to the way the space reservation model
647 * works.
82eaef15
MD
648 */
649 res = 0;
650 avail = jo->fifo.size - (jo->fifo.rindex & jo->fifo.mask);
651 while (res < bytes && rawp->begmagic == JREC_BEGMAGIC) {
652 res += (rawp->recsize + 15) & ~15;
653 if (res >= avail) {
654 KKASSERT(res == avail);
655 break;
656 }
9578bde0 657 rawp = (void *)((char *)rawp + ((rawp->recsize + 15) & ~15));
2281065e 658 }
82eaef15
MD
659
660 /*
661 * Issue the write and deal with any errors or other conditions.
662 * For now assume blocking I/O. Since we are record-aware the
663 * code cannot yet handle partial writes.
664 *
3119bac5
MD
665 * We bump rindex prior to issuing the write to avoid racing
666 * the acknowledgement coming back (which could prevent the ack
667 * from bumping xindex). Restarts are always based on xindex so
668 * we do not try to undo the rindex if an error occurs.
669 *
82eaef15
MD
670 * XXX EWOULDBLOCK/NBIO
671 * XXX notification on failure
9578bde0 672 * XXX permanent verses temporary failures
82eaef15
MD
673 * XXX two-way acknowledgement stream in the return direction / xindex
674 */
675 bytes = res;
3119bac5 676 jo->fifo.rindex += bytes;
82eaef15 677 error = fp_write(jo->fp,
3119bac5 678 jo->fifo.membase + ((jo->fifo.rindex - bytes) & jo->fifo.mask),
82eaef15 679 bytes, &res);
2281065e
MD
680 if (error) {
681 printf("journal_thread(%s) write, error %d\n", jo->id, error);
82eaef15 682 /* XXX */
2281065e 683 } else {
82eaef15 684 KKASSERT(res == bytes);
82eaef15
MD
685 }
686
687 /*
432b8263
MD
688 * Advance rindex. If the journal stream is not full duplex we also
689 * advance xindex, otherwise the rjournal thread is responsible for
690 * advancing xindex.
82eaef15 691 */
3119bac5 692 if ((jo->flags & MC_JOURNAL_WANT_FULLDUPLEX) == 0) {
432b8263 693 jo->fifo.xindex += bytes;
3119bac5
MD
694 jo->total_acked += bytes;
695 }
9578bde0 696 KKASSERT(jo->fifo.windex - jo->fifo.rindex >= 0);
432b8263
MD
697 if ((jo->flags & MC_JOURNAL_WANT_FULLDUPLEX) == 0) {
698 if (jo->flags & MC_JOURNAL_WWAIT) {
699 jo->flags &= ~MC_JOURNAL_WWAIT; /* XXX hysteresis */
700 wakeup(&jo->fifo.windex);
701 }
702 }
703 }
500b6a22 704 fp_shutdown(jo->fp, SHUT_WR);
432b8263
MD
705 jo->flags &= ~MC_JOURNAL_WACTIVE;
706 wakeup(jo);
707 wakeup(&jo->fifo.windex);
708}
709
710/*
711 * A second per-journal worker thread is created for two-way journaling
712 * streams to deal with the return acknowledgement stream.
713 */
714static void
715journal_rthread(void *info)
716{
717 struct journal_rawrecbeg *rawp;
718 struct journal_ackrecord ack;
719 struct journal *jo = info;
720 int64_t transid;
721 int error;
722 int count;
723 int bytes;
432b8263
MD
724
725 transid = 0;
726 error = 0;
727
728 for (;;) {
729 /*
730 * We have been asked to stop
731 */
732 if (jo->flags & MC_JOURNAL_STOP_REQ)
733 break;
734
735 /*
736 * If we have no active transaction id, get one from the return
737 * stream.
738 */
739 if (transid == 0) {
3119bac5
MD
740 error = fp_read(jo->fp, &ack, sizeof(ack), &count, 1);
741#if 0
742 printf("fp_read ack error %d count %d\n", error, count);
743#endif
744 if (error || count != sizeof(ack))
745 break;
432b8263
MD
746 if (error) {
747 printf("read error %d on receive stream\n", error);
748 break;
749 }
750 if (ack.rbeg.begmagic != JREC_BEGMAGIC ||
751 ack.rend.endmagic != JREC_ENDMAGIC
752 ) {
753 printf("bad begmagic or endmagic on receive stream\n");
754 break;
755 }
756 transid = ack.rbeg.transid;
2281065e 757 }
432b8263
MD
758
759 /*
760 * Calculate the number of unacknowledged bytes. If there are no
761 * unacknowledged bytes then unsent data was acknowledged, report,
762 * sleep a bit, and loop in that case. This should not happen
763 * normally. The ack record is thrown away.
764 */
765 bytes = jo->fifo.rindex - jo->fifo.xindex;
766
767 if (bytes == 0) {
3119bac5 768 printf("warning: unsent data acknowledged transid %08llx\n", transid);
432b8263
MD
769 tsleep(&jo->fifo.xindex, 0, "jrseq", hz);
770 transid = 0;
771 continue;
772 }
773
774 /*
3119bac5 775 * Since rindex has advanced, the record pointed to by xindex
432b8263
MD
776 * must be a valid record.
777 */
778 rawp = (void *)(jo->fifo.membase + (jo->fifo.xindex & jo->fifo.mask));
779 KKASSERT(rawp->begmagic == JREC_BEGMAGIC);
780 KKASSERT(rawp->recsize <= bytes);
781
782 /*
783 * The target can acknowledge several records at once.
784 */
785 if (rawp->transid < transid) {
3119bac5 786#if 1
432b8263 787 printf("ackskip %08llx/%08llx\n", rawp->transid, transid);
3119bac5 788#endif
432b8263 789 jo->fifo.xindex += (rawp->recsize + 15) & ~15;
3119bac5 790 jo->total_acked += (rawp->recsize + 15) & ~15;
432b8263
MD
791 if (jo->flags & MC_JOURNAL_WWAIT) {
792 jo->flags &= ~MC_JOURNAL_WWAIT; /* XXX hysteresis */
793 wakeup(&jo->fifo.windex);
794 }
795 continue;
796 }
797 if (rawp->transid == transid) {
3119bac5 798#if 1
432b8263 799 printf("ackskip %08llx/%08llx\n", rawp->transid, transid);
3119bac5 800#endif
432b8263 801 jo->fifo.xindex += (rawp->recsize + 15) & ~15;
3119bac5 802 jo->total_acked += (rawp->recsize + 15) & ~15;
432b8263
MD
803 if (jo->flags & MC_JOURNAL_WWAIT) {
804 jo->flags &= ~MC_JOURNAL_WWAIT; /* XXX hysteresis */
805 wakeup(&jo->fifo.windex);
806 }
807 transid = 0;
808 continue;
809 }
3119bac5 810 printf("warning: unsent data(2) acknowledged transid %08llx\n", transid);
432b8263 811 transid = 0;
2281065e 812 }
432b8263 813 jo->flags &= ~MC_JOURNAL_RACTIVE;
2281065e
MD
814 wakeup(jo);
815 wakeup(&jo->fifo.windex);
816}
817
9578bde0
MD
818/*
819 * This builds a pad record which the journaling thread will skip over. Pad
820 * records are required when we are unable to reserve sufficient stream space
821 * due to insufficient space at the end of the physical memory fifo.
432b8263
MD
822 *
823 * Even though the record is not transmitted, a normal transid must be
824 * assigned to it so link recovery operations after a failure work properly.
9578bde0 825 */
88c28735 826static
82eaef15 827void
432b8263 828journal_build_pad(struct journal_rawrecbeg *rawp, int recsize, int64_t transid)
2281065e 829{
82eaef15
MD
830 struct journal_rawrecend *rendp;
831
832 KKASSERT((recsize & 15) == 0 && recsize >= 16);
833
82eaef15
MD
834 rawp->streamid = JREC_STREAMID_PAD;
835 rawp->recsize = recsize; /* must be 16-byte aligned */
432b8263 836 rawp->transid = transid;
82eaef15
MD
837 /*
838 * WARNING, rendp may overlap rawp->seqno. This is necessary to
35238fa5 839 * allow PAD records to fit in 16 bytes. Use cpu_ccfence() to
82eaef15
MD
840 * hopefully cause the compiler to not make any assumptions.
841 */
82eaef15
MD
842 rendp = (void *)((char *)rawp + rawp->recsize - sizeof(*rendp));
843 rendp->endmagic = JREC_ENDMAGIC;
844 rendp->check = 0;
845 rendp->recsize = rawp->recsize;
9578bde0
MD
846
847 /*
848 * Set the begin magic last. This is what will allow the journal
35238fa5
MD
849 * thread to write the record out. Use a store fence to prevent
850 * compiler and cpu reordering of the writes.
9578bde0 851 */
35238fa5 852 cpu_sfence();
9578bde0 853 rawp->begmagic = JREC_BEGMAGIC;
2281065e
MD
854}
855
82eaef15
MD
856/*
857 * Wake up the worker thread if the FIFO is more then half full or if
858 * someone is waiting for space to be freed up. Otherwise let the
859 * heartbeat deal with it. Being able to avoid waking up the worker
9578bde0 860 * is the key to the journal's cpu performance.
82eaef15
MD
861 */
862static __inline
2281065e 863void
82eaef15 864journal_commit_wakeup(struct journal *jo)
2281065e
MD
865{
866 int avail;
867
82eaef15
MD
868 avail = jo->fifo.size - (jo->fifo.windex - jo->fifo.xindex);
869 KKASSERT(avail >= 0);
870 if ((avail < (jo->fifo.size >> 1)) || (jo->flags & MC_JOURNAL_WWAIT))
871 wakeup(&jo->fifo);
872}
873
874/*
875 * Create a new BEGIN stream record with the specified streamid and the
876 * specified amount of payload space. *rawpp will be set to point to the
877 * base of the new stream record and a pointer to the base of the payload
878 * space will be returned. *rawpp does not need to be pre-NULLd prior to
432b8263 879 * making this call. The raw record header will be partially initialized.
82eaef15
MD
880 *
881 * A stream can be extended, aborted, or committed by other API calls
882 * below. This may result in a sequence of potentially disconnected
883 * stream records to be output to the journaling target. The first record
884 * (the one created by this function) will be marked JREC_STREAMCTL_BEGIN,
885 * while the last record on commit or abort will be marked JREC_STREAMCTL_END
886 * (and possibly also JREC_STREAMCTL_ABORTED). The last record could wind
887 * up being the same as the first, in which case the bits are all set in
888 * the first record.
889 *
890 * The stream record is created in an incomplete state by setting the begin
891 * magic to JREC_INCOMPLETEMAGIC. This prevents the worker thread from
892 * flushing the fifo past our record until we have finished populating it.
893 * Other threads can reserve and operate on their own space without stalling
894 * but the stream output will stall until we have completed operations. The
895 * memory FIFO is intended to be large enough to absorb such situations
896 * without stalling out other threads.
897 */
898static
899void *
900journal_reserve(struct journal *jo, struct journal_rawrecbeg **rawpp,
901 int16_t streamid, int bytes)
902{
903 struct journal_rawrecbeg *rawp;
904 int avail;
905 int availtoend;
906 int req;
907
908 /*
909 * Add header and trailer overheads to the passed payload. Note that
910 * the passed payload size need not be aligned in any way.
911 */
912 bytes += sizeof(struct journal_rawrecbeg);
913 bytes += sizeof(struct journal_rawrecend);
914
915 for (;;) {
916 /*
917 * First, check boundary conditions. If the request would wrap around
918 * we have to skip past the ending block and return to the beginning
919 * of the FIFO's buffer. Calculate 'req' which is the actual number
920 * of bytes being reserved, including wrap-around dead space.
921 *
88c28735
MD
922 * Neither 'bytes' or 'req' are aligned.
923 *
82eaef15
MD
924 * Note that availtoend is not truncated to avail and so cannot be
925 * used to determine whether the reservation is possible by itself.
926 * Also, since all fifo ops are 16-byte aligned, we can check
927 * the size before calculating the aligned size.
928 */
929 availtoend = jo->fifo.size - (jo->fifo.windex & jo->fifo.mask);
88c28735 930 KKASSERT((availtoend & 15) == 0);
82eaef15
MD
931 if (bytes > availtoend)
932 req = bytes + availtoend; /* add pad to end */
933 else
934 req = bytes;
935
936 /*
937 * Next calculate the total available space and see if it is
938 * sufficient. We cannot overwrite previously buffered data
939 * past xindex because otherwise we would not be able to restart
940 * a broken link at the target's last point of commit.
941 */
942 avail = jo->fifo.size - (jo->fifo.windex - jo->fifo.xindex);
943 KKASSERT(avail >= 0 && (avail & 15) == 0);
944
945 if (avail < req) {
946 /* XXX MC_JOURNAL_STOP_IMM */
2281065e 947 jo->flags |= MC_JOURNAL_WWAIT;
3119bac5 948 ++jo->fifostalls;
2281065e
MD
949 tsleep(&jo->fifo.windex, 0, "jwrite", 0);
950 continue;
951 }
82eaef15
MD
952
953 /*
954 * Create a pad record for any dead space and create an incomplete
955 * record for the live space, then return a pointer to the
956 * contiguous buffer space that was requested.
957 *
958 * NOTE: The worker thread will not flush past an incomplete
959 * record, so the reserved space can be filled in at-will. The
960 * journaling code must also be aware the reserved sections occuring
961 * after this one will also not be written out even if completed
962 * until this one is completed.
432b8263
MD
963 *
964 * The transaction id must accomodate real and potential pad creation.
82eaef15
MD
965 */
966 rawp = (void *)(jo->fifo.membase + (jo->fifo.windex & jo->fifo.mask));
967 if (req != bytes) {
432b8263
MD
968 journal_build_pad(rawp, availtoend, jo->transid);
969 ++jo->transid;
82eaef15
MD
970 rawp = (void *)jo->fifo.membase;
971 }
972 rawp->begmagic = JREC_INCOMPLETEMAGIC; /* updated by abort/commit */
973 rawp->recsize = bytes; /* (unaligned size) */
974 rawp->streamid = streamid | JREC_STREAMCTL_BEGIN;
432b8263
MD
975 rawp->transid = jo->transid;
976 jo->transid += 2;
82eaef15
MD
977
978 /*
979 * Issue a memory barrier to guarentee that the record data has been
980 * properly initialized before we advance the write index and return
981 * a pointer to the reserved record. Otherwise the worker thread
982 * could accidently run past us.
983 *
984 * Note that stream records are always 16-byte aligned.
985 */
35238fa5 986 cpu_sfence();
82eaef15
MD
987 jo->fifo.windex += (req + 15) & ~15;
988 *rawpp = rawp;
989 return(rawp + 1);
990 }
991 /* not reached */
992 *rawpp = NULL;
993 return(NULL);
994}
995
996/*
143c4f15
MD
997 * Attempt to extend the stream record by <bytes> worth of payload space.
998 *
999 * If it is possible to extend the existing stream record no truncation
1000 * occurs and the record is extended as specified. A pointer to the
1001 * truncation offset within the payload space is returned.
82eaef15 1002 *
143c4f15
MD
1003 * If it is not possible to do this the existing stream record is truncated
1004 * and committed, and a new stream record of size <bytes> is created. A
1005 * pointer to the base of the new stream record's payload space is returned.
82eaef15 1006 *
143c4f15
MD
1007 * *rawpp is set to the new reservation in the case of a new record but
1008 * the caller cannot depend on a comparison with the old rawp to determine if
1009 * this case occurs because we could end up using the same memory FIFO
1010 * offset for the new stream record. Use *newstreamrecp instead.
82eaef15
MD
1011 */
1012static void *
1013journal_extend(struct journal *jo, struct journal_rawrecbeg **rawpp,
1014 int truncbytes, int bytes, int *newstreamrecp)
1015{
1016 struct journal_rawrecbeg *rawp;
1017 int16_t streamid;
1018 int availtoend;
1019 int avail;
1020 int osize;
1021 int nsize;
1022 int wbase;
1023 void *rptr;
1024
1025 *newstreamrecp = 0;
1026 rawp = *rawpp;
1027 osize = (rawp->recsize + 15) & ~15;
1028 nsize = (rawp->recsize + bytes + 15) & ~15;
1029 wbase = (char *)rawp - jo->fifo.membase;
1030
1031 /*
143c4f15
MD
1032 * If the aligned record size does not change we can trivially adjust
1033 * the record size.
82eaef15
MD
1034 */
1035 if (nsize == osize) {
1036 rawp->recsize += bytes;
143c4f15 1037 return((char *)(rawp + 1) + truncbytes);
82eaef15
MD
1038 }
1039
1040 /*
1041 * If the fifo's write index hasn't been modified since we made the
1042 * reservation and we do not hit any boundary conditions, we can
143c4f15 1043 * trivially make the record smaller or larger.
82eaef15
MD
1044 */
1045 if ((jo->fifo.windex & jo->fifo.mask) == wbase + osize) {
1046 availtoend = jo->fifo.size - wbase;
1047 avail = jo->fifo.size - (jo->fifo.windex - jo->fifo.xindex) + osize;
1048 KKASSERT((availtoend & 15) == 0);
1049 KKASSERT((avail & 15) == 0);
1050 if (nsize <= avail && nsize <= availtoend) {
1051 jo->fifo.windex += nsize - osize;
1052 rawp->recsize += bytes;
143c4f15 1053 return((char *)(rawp + 1) + truncbytes);
82eaef15
MD
1054 }
1055 }
1056
1057 /*
1058 * It was not possible to extend the buffer. Commit the current
1059 * buffer and create a new one. We manually clear the BEGIN mark that
1060 * journal_reserve() creates (because this is a continuing record, not
1061 * the start of a new stream).
1062 */
1063 streamid = rawp->streamid & JREC_STREAMID_MASK;
1064 journal_commit(jo, rawpp, truncbytes, 0);
1065 rptr = journal_reserve(jo, rawpp, streamid, bytes);
1066 rawp = *rawpp;
1067 rawp->streamid &= ~JREC_STREAMCTL_BEGIN;
1068 *newstreamrecp = 1;
1069 return(rptr);
1070}
1071
1072/*
1073 * Abort a journal record. If the transaction record represents a stream
1074 * BEGIN and we can reverse the fifo's write index we can simply reverse
1075 * index the entire record, as if it were never reserved in the first place.
1076 *
1077 * Otherwise we set the JREC_STREAMCTL_ABORTED bit and commit the record
1078 * with the payload truncated to 0 bytes.
1079 */
1080static void
1081journal_abort(struct journal *jo, struct journal_rawrecbeg **rawpp)
1082{
1083 struct journal_rawrecbeg *rawp;
1084 int osize;
1085
1086 rawp = *rawpp;
1087 osize = (rawp->recsize + 15) & ~15;
1088
1089 if ((rawp->streamid & JREC_STREAMCTL_BEGIN) &&
1090 (jo->fifo.windex & jo->fifo.mask) ==
1091 (char *)rawp - jo->fifo.membase + osize)
1092 {
1093 jo->fifo.windex -= osize;
1094 *rawpp = NULL;
1095 } else {
1096 rawp->streamid |= JREC_STREAMCTL_ABORTED;
1097 journal_commit(jo, rawpp, 0, 1);
1098 }
1099}
1100
1101/*
1102 * Commit a journal record and potentially truncate it to the specified
1103 * number of payload bytes. If you do not want to truncate the record,
1104 * simply pass -1 for the bytes parameter. Do not pass rawp->recsize, that
1105 * field includes header and trailer and will not be correct. Note that
1106 * passing 0 will truncate the entire data payload of the record.
1107 *
1108 * The logical stream is terminated by this function.
1109 *
1110 * If truncation occurs, and it is not possible to physically optimize the
1111 * memory FIFO due to other threads having reserved space after ours,
1112 * the remaining reserved space will be covered by a pad record.
1113 */
1114static void
1115journal_commit(struct journal *jo, struct journal_rawrecbeg **rawpp,
1116 int bytes, int closeout)
1117{
1118 struct journal_rawrecbeg *rawp;
1119 struct journal_rawrecend *rendp;
1120 int osize;
1121 int nsize;
1122
1123 rawp = *rawpp;
1124 *rawpp = NULL;
1125
1126 KKASSERT((char *)rawp >= jo->fifo.membase &&
1127 (char *)rawp + rawp->recsize <= jo->fifo.membase + jo->fifo.size);
1128 KKASSERT(((intptr_t)rawp & 15) == 0);
1129
1130 /*
88c28735 1131 * Truncate the record if necessary. If the FIFO write index as still
82eaef15 1132 * at the end of our record we can optimally backindex it. Otherwise
88c28735 1133 * we have to insert a pad record to cover the dead space.
82eaef15
MD
1134 *
1135 * We calculate osize which is the 16-byte-aligned original recsize.
1136 * We calculate nsize which is the 16-byte-aligned new recsize.
1137 *
1138 * Due to alignment issues or in case the passed truncation bytes is
88c28735
MD
1139 * the same as the original payload, nsize may be equal to osize even
1140 * if the committed bytes is less then the originally reserved bytes.
82eaef15
MD
1141 */
1142 if (bytes >= 0) {
1143 KKASSERT(bytes >= 0 && bytes <= rawp->recsize - sizeof(struct journal_rawrecbeg) - sizeof(struct journal_rawrecend));
1144 osize = (rawp->recsize + 15) & ~15;
1145 rawp->recsize = bytes + sizeof(struct journal_rawrecbeg) +
1146 sizeof(struct journal_rawrecend);
1147 nsize = (rawp->recsize + 15) & ~15;
88c28735 1148 KKASSERT(nsize <= osize);
82eaef15
MD
1149 if (osize == nsize) {
1150 /* do nothing */
1151 } else if ((jo->fifo.windex & jo->fifo.mask) == (char *)rawp - jo->fifo.membase + osize) {
1152 /* we are able to backindex the fifo */
1153 jo->fifo.windex -= osize - nsize;
1154 } else {
1155 /* we cannot backindex the fifo, emplace a pad in the dead space */
432b8263
MD
1156 journal_build_pad((void *)((char *)rawp + nsize), osize - nsize,
1157 rawp->transid + 1);
82eaef15 1158 }
2281065e 1159 }
82eaef15
MD
1160
1161 /*
1162 * Fill in the trailer. Note that unlike pad records, the trailer will
1163 * never overlap the header.
1164 */
1165 rendp = (void *)((char *)rawp +
1166 ((rawp->recsize + 15) & ~15) - sizeof(*rendp));
1167 rendp->endmagic = JREC_ENDMAGIC;
1168 rendp->recsize = rawp->recsize;
1169 rendp->check = 0; /* XXX check word, disabled for now */
1170
1171 /*
1172 * Fill in begmagic last. This will allow the worker thread to proceed.
1173 * Use a memory barrier to guarentee write ordering. Mark the stream
1174 * as terminated if closeout is set. This is the typical case.
1175 */
1176 if (closeout)
1177 rawp->streamid |= JREC_STREAMCTL_END;
35238fa5 1178 cpu_sfence(); /* memory and compiler barrier */
82eaef15
MD
1179 rawp->begmagic = JREC_BEGMAGIC;
1180
1181 journal_commit_wakeup(jo);
1182}
1183
1184/************************************************************************
1185 * TRANSACTION SUPPORT ROUTINES *
1186 ************************************************************************
1187 *
1188 * JRECORD_*() - routines to create subrecord transactions and embed them
1189 * in the logical streams managed by the journal_*() routines.
1190 */
1191
1192static int16_t sid = JREC_STREAMID_JMIN;
1193
1194/*
1195 * Initialize the passed jrecord structure and start a new stream transaction
1196 * by reserving an initial build space in the journal's memory FIFO.
1197 */
1198static void
1199jrecord_init(struct journal *jo, struct jrecord *jrec, int16_t streamid)
1200{
1201 bzero(jrec, sizeof(*jrec));
1202 jrec->jo = jo;
1203 if (streamid < 0) {
1204 streamid = sid++; /* XXX need to track stream ids! */
1205 if (sid == JREC_STREAMID_JMAX)
1206 sid = JREC_STREAMID_JMIN;
1207 }
1208 jrec->streamid = streamid;
1209 jrec->stream_residual = JREC_DEFAULTSIZE;
1210 jrec->stream_reserved = jrec->stream_residual;
1211 jrec->stream_ptr =
1212 journal_reserve(jo, &jrec->rawp, streamid, jrec->stream_reserved);
1213}
1214
1215/*
1216 * Push a recursive record type. All pushes should have matching pops.
1217 * The old parent is returned and the newly pushed record becomes the
1218 * new parent. Note that the old parent's pointer may already be invalid
1219 * or may become invalid if jrecord_write() had to build a new stream
1220 * record, so the caller should not mess with the returned pointer in
1221 * any way other then to save it.
1222 */
1223static
1224struct journal_subrecord *
1225jrecord_push(struct jrecord *jrec, int16_t rectype)
1226{
1227 struct journal_subrecord *save;
1228
1229 save = jrec->parent;
1230 jrec->parent = jrecord_write(jrec, rectype|JMASK_NESTED, 0);
1231 jrec->last = NULL;
1232 KKASSERT(jrec->parent != NULL);
1233 ++jrec->pushcount;
1234 ++jrec->pushptrgood; /* cleared on flush */
1235 return(save);
1236}
1237
1238/*
1239 * Pop a previously pushed sub-transaction. We must set JMASK_LAST
1240 * on the last record written within the subtransaction. If the last
1241 * record written is not accessible or if the subtransaction is empty,
1242 * we must write out a pad record with JMASK_LAST set before popping.
1243 *
1244 * When popping a subtransaction the parent record's recsize field
1245 * will be properly set. If the parent pointer is no longer valid
1246 * (which can occur if the data has already been flushed out to the
1247 * stream), the protocol spec allows us to leave it 0.
1248 *
1249 * The saved parent pointer which we restore may or may not be valid,
1250 * and if not valid may or may not be NULL, depending on the value
1251 * of pushptrgood.
1252 */
1253static void
1254jrecord_pop(struct jrecord *jrec, struct journal_subrecord *save)
1255{
1256 struct journal_subrecord *last;
1257
1258 KKASSERT(jrec->pushcount > 0);
1259 KKASSERT(jrec->residual == 0);
1260
1261 /*
1262 * Set JMASK_LAST on the last record we wrote at the current
1263 * level. If last is NULL we either no longer have access to the
1264 * record or the subtransaction was empty and we must write out a pad
1265 * record.
1266 */
1267 if ((last = jrec->last) == NULL) {
1268 jrecord_write(jrec, JLEAF_PAD|JMASK_LAST, 0);
1269 last = jrec->last; /* reload after possible flush */
1270 } else {
1271 last->rectype |= JMASK_LAST;
1272 }
1273
1274 /*
1275 * pushptrgood tells us how many levels of parent record pointers
1276 * are valid. The jrec only stores the current parent record pointer
1277 * (and it is only valid if pushptrgood != 0). The higher level parent
1278 * record pointers are saved by the routines calling jrecord_push() and
1279 * jrecord_pop(). These pointers may become stale and we determine
1280 * that fact by tracking the count of valid parent pointers with
1281 * pushptrgood. Pointers become invalid when their related stream
1282 * record gets pushed out.
1283 *
b2f7ec6c
MD
1284 * If no pointer is available (the data has already been pushed out),
1285 * then no fixup of e.g. the length field is possible for non-leaf
1286 * nodes. The protocol allows for this situation by placing a larger
1287 * burden on the program scanning the stream on the other end.
1288 *
82eaef15
MD
1289 * [parentA]
1290 * [node X]
1291 * [parentB]
1292 * [node Y]
1293 * [node Z]
1294 * (pop B) see NOTE B
1295 * (pop A) see NOTE A
1296 *
1297 * NOTE B: This pop sets LAST in node Z if the node is still accessible,
1298 * else a PAD record is appended and LAST is set in that.
1299 *
1300 * This pop sets the record size in parentB if parentB is still
1301 * accessible, else the record size is left 0 (the scanner must
1302 * deal with that).
1303 *
1304 * This pop sets the new 'last' record to parentB, the pointer
1305 * to which may or may not still be accessible.
1306 *
1307 * NOTE A: This pop sets LAST in parentB if the node is still accessible,
1308 * else a PAD record is appended and LAST is set in that.
1309 *
1310 * This pop sets the record size in parentA if parentA is still
1311 * accessible, else the record size is left 0 (the scanner must
1312 * deal with that).
1313 *
1314 * This pop sets the new 'last' record to parentA, the pointer
1315 * to which may or may not still be accessible.
1316 *
1317 * Also note that the last record in the stream transaction, which in
1318 * the above example is parentA, does not currently have the LAST bit
1319 * set.
1320 *
1321 * The current parent becomes the last record relative to the
1322 * saved parent passed into us. It's validity is based on
1323 * whether pushptrgood is non-zero prior to decrementing. The saved
1324 * parent becomes the new parent, and its validity is based on whether
1325 * pushptrgood is non-zero after decrementing.
1326 *
1327 * The old jrec->parent may be NULL if it is no longer accessible.
1328 * If pushptrgood is non-zero, however, it is guarenteed to not
1329 * be NULL (since no flush occured).
1330 */
1331 jrec->last = jrec->parent;
1332 --jrec->pushcount;
1333 if (jrec->pushptrgood) {
1334 KKASSERT(jrec->last != NULL && last != NULL);
1335 if (--jrec->pushptrgood == 0) {
1336 jrec->parent = NULL; /* 'save' contains garbage or NULL */
1337 } else {
1338 KKASSERT(save != NULL);
1339 jrec->parent = save; /* 'save' must not be NULL */
1340 }
1341
1342 /*
1343 * Set the record size in the old parent. 'last' still points to
1344 * the original last record in the subtransaction being popped,
1345 * jrec->last points to the old parent (which became the last
1346 * record relative to the new parent being popped into).
1347 */
1348 jrec->last->recsize = (char *)last + last->recsize - (char *)jrec->last;
1349 } else {
1350 jrec->parent = NULL;
1351 KKASSERT(jrec->last == NULL);
1352 }
1353}
1354
b2f7ec6c
MD
1355/*
1356 * Write out a leaf record, including associated data.
1357 */
1358static
1359void
1360jrecord_leaf(struct jrecord *jrec, int16_t rectype, void *ptr, int bytes)
1361{
1362 jrecord_write(jrec, rectype, bytes);
1363 jrecord_data(jrec, ptr, bytes);
b2f7ec6c
MD
1364}
1365
82eaef15
MD
1366/*
1367 * Write a leaf record out and return a pointer to its base. The leaf
1368 * record may contain potentially megabytes of data which is supplied
1369 * in jrecord_data() calls. The exact amount must be specified in this
1370 * call.
b2f7ec6c
MD
1371 *
1372 * THE RETURNED SUBRECORD POINTER IS ONLY VALID IMMEDIATELY AFTER THE
1373 * CALL AND MAY BECOME INVALID AT ANY TIME. ONLY THE PUSH/POP CODE SHOULD
1374 * USE THE RETURN VALUE.
82eaef15
MD
1375 */
1376static
1377struct journal_subrecord *
1378jrecord_write(struct jrecord *jrec, int16_t rectype, int bytes)
1379{
1380 struct journal_subrecord *last;
1381 int pusheditout;
1382
1383 /*
1384 * Try to catch some obvious errors. Nesting records must specify a
1385 * size of 0, and there should be no left-overs from previous operations
1386 * (such as incomplete data writeouts).
1387 */
1388 KKASSERT(bytes == 0 || (rectype & JMASK_NESTED) == 0);
1389 KKASSERT(jrec->residual == 0);
1390
1391 /*
1392 * Check to see if the current stream record has enough room for
1393 * the new subrecord header. If it doesn't we extend the current
1394 * stream record.
1395 *
1396 * This may have the side effect of pushing out the current stream record
1397 * and creating a new one. We must adjust our stream tracking fields
1398 * accordingly.
1399 */
1400 if (jrec->stream_residual < sizeof(struct journal_subrecord)) {
1401 jrec->stream_ptr = journal_extend(jrec->jo, &jrec->rawp,
1402 jrec->stream_reserved - jrec->stream_residual,
1403 JREC_DEFAULTSIZE, &pusheditout);
1404 if (pusheditout) {
143c4f15
MD
1405 /*
1406 * If a pushout occured, the pushed out stream record was
1407 * truncated as specified and the new record is exactly the
1408 * extension size specified.
1409 */
82eaef15
MD
1410 jrec->stream_reserved = JREC_DEFAULTSIZE;
1411 jrec->stream_residual = JREC_DEFAULTSIZE;
1412 jrec->parent = NULL; /* no longer accessible */
1413 jrec->pushptrgood = 0; /* restored parents in pops no good */
1414 } else {
143c4f15
MD
1415 /*
1416 * If no pushout occured the stream record is NOT truncated and
1417 * IS extended.
1418 */
82eaef15
MD
1419 jrec->stream_reserved += JREC_DEFAULTSIZE;
1420 jrec->stream_residual += JREC_DEFAULTSIZE;
1421 }
1422 }
1423 last = (void *)jrec->stream_ptr;
1424 last->rectype = rectype;
1425 last->reserved = 0;
b7ef558f
MD
1426
1427 /*
1428 * We may not know the record size for recursive records and the
1429 * header may become unavailable due to limited FIFO space. Write
1430 * -1 to indicate this special case.
1431 */
1432 if ((rectype & JMASK_NESTED) && bytes == 0)
1433 last->recsize = -1;
1434 else
1435 last->recsize = sizeof(struct journal_subrecord) + bytes;
82eaef15
MD
1436 jrec->last = last;
1437 jrec->residual = bytes; /* remaining data to be posted */
1438 jrec->residual_align = -bytes & 7; /* post-data alignment required */
143c4f15
MD
1439 jrec->stream_ptr += sizeof(*last); /* current write pointer */
1440 jrec->stream_residual -= sizeof(*last); /* space remaining in stream */
82eaef15
MD
1441 return(last);
1442}
1443
1444/*
1445 * Write out the data associated with a leaf record. Any number of calls
1446 * to this routine may be made as long as the byte count adds up to the
1447 * amount originally specified in jrecord_write().
1448 *
1449 * The act of writing out the leaf data may result in numerous stream records
1450 * being pushed out. Callers should be aware that even the associated
1451 * subrecord header may become inaccessible due to stream record pushouts.
1452 */
1453static void
1454jrecord_data(struct jrecord *jrec, const void *buf, int bytes)
1455{
1456 int pusheditout;
1457 int extsize;
1458
1459 KKASSERT(bytes >= 0 && bytes <= jrec->residual);
1460
1461 /*
1462 * Push out stream records as long as there is insufficient room to hold
1463 * the remaining data.
1464 */
1465 while (jrec->stream_residual < bytes) {
1466 /*
1467 * Fill in any remaining space in the current stream record.
1468 */
1469 bcopy(buf, jrec->stream_ptr, jrec->stream_residual);
1470 buf = (const char *)buf + jrec->stream_residual;
1471 bytes -= jrec->stream_residual;
1472 /*jrec->stream_ptr += jrec->stream_residual;*/
82eaef15 1473 jrec->residual -= jrec->stream_residual;
9578bde0 1474 jrec->stream_residual = 0;
82eaef15
MD
1475
1476 /*
1477 * Try to extend the current stream record, but no more then 1/4
1478 * the size of the FIFO.
1479 */
1480 extsize = jrec->jo->fifo.size >> 2;
1481 if (extsize > bytes)
1482 extsize = (bytes + 15) & ~15;
1483
1484 jrec->stream_ptr = journal_extend(jrec->jo, &jrec->rawp,
1485 jrec->stream_reserved - jrec->stream_residual,
1486 extsize, &pusheditout);
1487 if (pusheditout) {
1488 jrec->stream_reserved = extsize;
1489 jrec->stream_residual = extsize;
1490 jrec->parent = NULL; /* no longer accessible */
1491 jrec->last = NULL; /* no longer accessible */
1492 jrec->pushptrgood = 0; /* restored parents in pops no good */
1493 } else {
1494 jrec->stream_reserved += extsize;
1495 jrec->stream_residual += extsize;
1496 }
1497 }
1498
1499 /*
1500 * Push out any remaining bytes into the current stream record.
1501 */
1502 if (bytes) {
1503 bcopy(buf, jrec->stream_ptr, bytes);
1504 jrec->stream_ptr += bytes;
1505 jrec->stream_residual -= bytes;
1506 jrec->residual -= bytes;
1507 }
1508
1509 /*
1510 * Handle data alignment requirements for the subrecord. Because the
1511 * stream record's data space is more strictly aligned, it must already
1512 * have sufficient space to hold any subrecord alignment slop.
1513 */
1514 if (jrec->residual == 0 && jrec->residual_align) {
1515 KKASSERT(jrec->residual_align <= jrec->stream_residual);
1516 bzero(jrec->stream_ptr, jrec->residual_align);
1517 jrec->stream_ptr += jrec->residual_align;
1518 jrec->stream_residual -= jrec->residual_align;
1519 jrec->residual_align = 0;
1520 }
1521}
1522
1523/*
9578bde0
MD
1524 * We are finished with the transaction. This closes the transaction created
1525 * by jrecord_init().
1526 *
1527 * NOTE: If abortit is not set then we must be at the top level with no
1528 * residual subrecord data left to output.
1529 *
1530 * If abortit is set then we can be in any state, all pushes will be
1531 * popped and it is ok for there to be residual data. This works
1532 * because the virtual stream itself is truncated. Scanners must deal
1533 * with this situation.
82eaef15
MD
1534 *
1535 * The stream record will be committed or aborted as specified and jrecord
1536 * resources will be cleaned up.
1537 */
1538static void
1539jrecord_done(struct jrecord *jrec, int abortit)
1540{
1541 KKASSERT(jrec->rawp != NULL);
1542
1543 if (abortit) {
1544 journal_abort(jrec->jo, &jrec->rawp);
1545 } else {
1546 KKASSERT(jrec->pushcount == 0 && jrec->residual == 0);
1547 journal_commit(jrec->jo, &jrec->rawp,
1548 jrec->stream_reserved - jrec->stream_residual, 1);
1549 }
1550
1551 /*
1552 * jrec should not be used beyond this point without another init,
1553 * but clean up some fields to ensure that we panic if it is.
1554 *
1555 * Note that jrec->rawp is NULLd out by journal_abort/journal_commit.
1556 */
1557 jrec->jo = NULL;
1558 jrec->stream_ptr = NULL;
1559}
1560
1561/************************************************************************
b2f7ec6c 1562 * LOW LEVEL RECORD SUPPORT ROUTINES *
82eaef15
MD
1563 ************************************************************************
1564 *
b2f7ec6c
MD
1565 * These routine create low level recursive and leaf subrecords representing
1566 * common filesystem structures.
82eaef15
MD
1567 */
1568
b2f7ec6c
MD
1569/*
1570 * Write out a filename path relative to the base of the mount point.
1571 * rectype is typically JLEAF_PATH{1,2,3,4}.
1572 */
82eaef15
MD
1573static void
1574jrecord_write_path(struct jrecord *jrec, int16_t rectype, struct namecache *ncp)
1575{
b2f7ec6c
MD
1576 char buf[64]; /* local buffer if it fits, else malloced */
1577 char *base;
1578 int pathlen;
1579 int index;
1580 struct namecache *scan;
1581
1582 /*
1583 * Pass 1 - figure out the number of bytes required. Include terminating
1584 * \0 on last element and '/' separator on other elements.
1585 */
1586again:
1587 pathlen = 0;
1588 for (scan = ncp;
1589 scan && (scan->nc_flag & NCF_MOUNTPT) == 0;
1590 scan = scan->nc_parent
1591 ) {
1592 pathlen += scan->nc_nlen + 1;
1593 }
1594
1595 if (pathlen <= sizeof(buf))
1596 base = buf;
1597 else
1598 base = malloc(pathlen, M_TEMP, M_INTWAIT);
1599
1600 /*
1601 * Pass 2 - generate the path buffer
1602 */
1603 index = pathlen;
1604 for (scan = ncp;
1605 scan && (scan->nc_flag & NCF_MOUNTPT) == 0;
1606 scan = scan->nc_parent
1607 ) {
1608 if (scan->nc_nlen >= index) {
1609 if (base != buf)
1610 free(base, M_TEMP);
1611 goto again;
1612 }
1613 if (index == pathlen)
1614 base[--index] = 0;
1615 else
1616 base[--index] = '/';
1617 index -= scan->nc_nlen;
1618 bcopy(scan->nc_name, base + index, scan->nc_nlen);
1619 }
1620 jrecord_leaf(jrec, rectype, base + index, pathlen - index);
1621 if (base != buf)
1622 free(base, M_TEMP);
82eaef15
MD
1623}
1624
b2f7ec6c
MD
1625/*
1626 * Write out a file attribute structure. While somewhat inefficient, using
1627 * a recursive data structure is the most portable and extensible way.
1628 */
82eaef15
MD
1629static void
1630jrecord_write_vattr(struct jrecord *jrec, struct vattr *vat)
1631{
b2f7ec6c
MD
1632 void *save;
1633
1634 save = jrecord_push(jrec, JTYPE_VATTR);
1635 if (vat->va_type != VNON)
432b8263 1636 jrecord_leaf(jrec, JLEAF_VTYPE, &vat->va_type, sizeof(vat->va_type));
d0887c34 1637 if (vat->va_mode != (mode_t)VNOVAL)
432b8263 1638 jrecord_leaf(jrec, JLEAF_MODES, &vat->va_mode, sizeof(vat->va_mode));
b2f7ec6c
MD
1639 if (vat->va_nlink != VNOVAL)
1640 jrecord_leaf(jrec, JLEAF_NLINK, &vat->va_nlink, sizeof(vat->va_nlink));
1641 if (vat->va_uid != VNOVAL)
1642 jrecord_leaf(jrec, JLEAF_UID, &vat->va_uid, sizeof(vat->va_uid));
1643 if (vat->va_gid != VNOVAL)
1644 jrecord_leaf(jrec, JLEAF_GID, &vat->va_gid, sizeof(vat->va_gid));
1645 if (vat->va_fsid != VNOVAL)
1646 jrecord_leaf(jrec, JLEAF_FSID, &vat->va_fsid, sizeof(vat->va_fsid));
1647 if (vat->va_fileid != VNOVAL)
1648 jrecord_leaf(jrec, JLEAF_INUM, &vat->va_fileid, sizeof(vat->va_fileid));
1649 if (vat->va_size != VNOVAL)
1650 jrecord_leaf(jrec, JLEAF_SIZE, &vat->va_size, sizeof(vat->va_size));
1651 if (vat->va_atime.tv_sec != VNOVAL)
1652 jrecord_leaf(jrec, JLEAF_ATIME, &vat->va_atime, sizeof(vat->va_atime));
1653 if (vat->va_mtime.tv_sec != VNOVAL)
1654 jrecord_leaf(jrec, JLEAF_MTIME, &vat->va_mtime, sizeof(vat->va_mtime));
1655 if (vat->va_ctime.tv_sec != VNOVAL)
1656 jrecord_leaf(jrec, JLEAF_CTIME, &vat->va_ctime, sizeof(vat->va_ctime));
1657 if (vat->va_gen != VNOVAL)
1658 jrecord_leaf(jrec, JLEAF_GEN, &vat->va_gen, sizeof(vat->va_gen));
1659 if (vat->va_flags != VNOVAL)
1660 jrecord_leaf(jrec, JLEAF_FLAGS, &vat->va_flags, sizeof(vat->va_flags));
1661 if (vat->va_rdev != VNOVAL)
1662 jrecord_leaf(jrec, JLEAF_UDEV, &vat->va_rdev, sizeof(vat->va_rdev));
1663#if 0
1664 if (vat->va_filerev != VNOVAL)
1665 jrecord_leaf(jrec, JLEAF_FILEREV, &vat->va_filerev, sizeof(vat->va_filerev));
1666#endif
1667 jrecord_pop(jrec, save);
b2f7ec6c
MD
1668}
1669
1670/*
1671 * Write out the creds used to issue a file operation. If a process is
1672 * available write out additional tracking information related to the
1673 * process.
1674 *
1675 * XXX additional tracking info
1676 * XXX tty line info
1677 */
1678static void
1679jrecord_write_cred(struct jrecord *jrec, struct thread *td, struct ucred *cred)
1680{
1681 void *save;
1682 struct proc *p;
1683
1684 save = jrecord_push(jrec, JTYPE_CRED);
1685 jrecord_leaf(jrec, JLEAF_UID, &cred->cr_uid, sizeof(cred->cr_uid));
1686 jrecord_leaf(jrec, JLEAF_GID, &cred->cr_gid, sizeof(cred->cr_gid));
1687 if (td && (p = td->td_proc) != NULL) {
1688 jrecord_leaf(jrec, JLEAF_PID, &p->p_pid, sizeof(p->p_pid));
1689 jrecord_leaf(jrec, JLEAF_COMM, p->p_comm, sizeof(p->p_comm));
1690 }
1691 jrecord_pop(jrec, save);
b2f7ec6c
MD
1692}
1693
1694/*
1695 * Write out information required to identify a vnode
143c4f15
MD
1696 *
1697 * XXX this needs work. We should write out the inode number as well,
1698 * and in fact avoid writing out the file path for seqential writes
1699 * occuring within e.g. a certain period of time.
b2f7ec6c
MD
1700 */
1701static void
1702jrecord_write_vnode_ref(struct jrecord *jrec, struct vnode *vp)
1703{
143c4f15
MD
1704 struct namecache *ncp;
1705
1706 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1707 if ((ncp->nc_flag & (NCF_UNRESOLVED|NCF_DESTROYED)) == 0)
1708 break;
1709 }
1710 if (ncp)
1711 jrecord_write_path(jrec, JLEAF_PATH_REF, ncp);
1712}
1713
f4659a6c
MD
1714static void
1715jrecord_write_vnode_link(struct jrecord *jrec, struct vnode *vp,
1716 struct namecache *notncp)
1717{
1718 struct namecache *ncp;
1719
1720 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1721 if (ncp == notncp)
1722 continue;
1723 if ((ncp->nc_flag & (NCF_UNRESOLVED|NCF_DESTROYED)) == 0)
1724 break;
1725 }
1726 if (ncp)
1727 jrecord_write_path(jrec, JLEAF_PATH_REF, ncp);
1728}
1729
143c4f15
MD
1730#if 0
1731/*
1732 * Write out the current contents of the file within the specified
1733 * range. This is typically called from within an UNDO section. A
1734 * locked vnode must be passed.
1735 */
1736static int
1737jrecord_write_filearea(struct jrecord *jrec, struct vnode *vp,
1738 off_t begoff, off_t endoff)
1739{
1740}
1741#endif
1742
1743/*
1744 * Write out the data represented by a pagelist
1745 */
1746static void
1747jrecord_write_pagelist(struct jrecord *jrec, int16_t rectype,
1748 struct vm_page **pglist, int *rtvals, int pgcount,
1749 off_t offset)
1750{
1751 struct msf_buf *msf;
1752 int error;
1753 int b;
1754 int i;
1755
1756 i = 0;
1757 while (i < pgcount) {
1758 /*
1759 * Find the next valid section. Skip any invalid elements
1760 */
1761 if (rtvals[i] != VM_PAGER_OK) {
1762 ++i;
1763 offset += PAGE_SIZE;
1764 continue;
1765 }
1766
1767 /*
1768 * Figure out how big the valid section is, capping I/O at what the
1769 * MSFBUF can represent.
1770 */
1771 b = i;
1772 while (i < pgcount && i - b != XIO_INTERNAL_PAGES &&
1773 rtvals[i] == VM_PAGER_OK
1774 ) {
1775 ++i;
1776 }
1777
1778 /*
1779 * And write it out.
1780 */
1781 if (i - b) {
1782 error = msf_map_pagelist(&msf, pglist + b, i - b, 0);
1783 if (error == 0) {
1784 printf("RECORD PUTPAGES %d\n", msf_buf_bytes(msf));
1785 jrecord_leaf(jrec, JLEAF_SEEKPOS, &offset, sizeof(offset));
1786 jrecord_leaf(jrec, rectype,
1787 msf_buf_kva(msf), msf_buf_bytes(msf));
1788 msf_buf_free(msf);
1789 } else {
1790 printf("jrecord_write_pagelist: mapping failure\n");
1791 }
1792 offset += (off_t)(i - b) << PAGE_SHIFT;
1793 }
1794 }
b2f7ec6c
MD
1795}
1796
1797/*
9578bde0 1798 * Write out the data represented by a UIO.
b2f7ec6c 1799 */
9578bde0
MD
1800struct jwuio_info {
1801 struct jrecord *jrec;
1802 int16_t rectype;
1803};
1804
1805static int jrecord_write_uio_callback(void *info, char *buf, int bytes);
1806
b2f7ec6c
MD
1807static void
1808jrecord_write_uio(struct jrecord *jrec, int16_t rectype, struct uio *uio)
1809{
9578bde0
MD
1810 struct jwuio_info info = { jrec, rectype };
1811 int error;
1812
143c4f15
MD
1813 if (uio->uio_segflg != UIO_NOCOPY) {
1814 jrecord_leaf(jrec, JLEAF_SEEKPOS, &uio->uio_offset,
1815 sizeof(uio->uio_offset));
1816 error = msf_uio_iterate(uio, jrecord_write_uio_callback, &info);
1817 if (error)
1818 printf("XXX warning uio iterate failed %d\n", error);
1819 }
9578bde0
MD
1820}
1821
1822static int
1823jrecord_write_uio_callback(void *info_arg, char *buf, int bytes)
1824{
1825 struct jwuio_info *info = info_arg;
1826
9578bde0
MD
1827 jrecord_leaf(info->jrec, info->rectype, buf, bytes);
1828 return(0);
2281065e
MD
1829}
1830
1831/************************************************************************
1832 * JOURNAL VNOPS *
558b8e00
MD
1833 ************************************************************************
1834 *
1835 * These are function shims replacing the normal filesystem ops. We become
1836 * responsible for calling the underlying filesystem ops. We have the choice
1837 * of executing the underlying op first and then generating the journal entry,
1838 * or starting the journal entry, executing the underlying op, and then
1839 * either completing or aborting it.
1840 *
1841 * The journal is supposed to be a high-level entity, which generally means
1842 * identifying files by name rather then by inode. Supplying both allows
1843 * the journal to be used both for inode-number-compatible 'mirrors' and
1844 * for simple filesystem replication.
1845 *
1846 * Writes are particularly difficult to deal with because a single write may
1847 * represent a hundred megabyte buffer or more, and both writes and truncations
1848 * require the 'old' data to be written out as well as the new data if the
1849 * log is reversable. Other issues:
1850 *
1851 * - How to deal with operations on unlinked files (no path available),
1852 * but which may still be filesystem visible due to hard links.
1853 *
1854 * - How to deal with modifications made via a memory map.
1855 *
1856 * - Future cache coherency support will require cache coherency API calls
1857 * both prior to and after the call to the underlying VFS.
1858 *
1859 * ALSO NOTE: We do not have to shim compatibility VOPs like MKDIR which have
1860 * new VFS equivalents (NMKDIR).
1861 */
1862
b2f7ec6c
MD
1863/*
1864 * Journal vop_settattr { a_vp, a_vap, a_cred, a_td }
1865 */
558b8e00
MD
1866static
1867int
1868journal_setattr(struct vop_setattr_args *ap)
1869{
1870 struct mount *mp;
1871 struct journal *jo;
1872 struct jrecord jrec;
1873 void *save; /* warning, save pointers do not always remain valid */
1874 int error;
1875
1876 error = vop_journal_operate_ap(&ap->a_head);
1877 mp = ap->a_head.a_ops->vv_mount;
1878 if (error == 0) {
1879 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1880 jrecord_init(jo, &jrec, -1);
1881 save = jrecord_push(&jrec, JTYPE_SETATTR);
b2f7ec6c
MD
1882 jrecord_write_cred(&jrec, ap->a_td, ap->a_cred);
1883 jrecord_write_vnode_ref(&jrec, ap->a_vp);
1884 jrecord_write_vattr(&jrec, ap->a_vap);
558b8e00
MD
1885 jrecord_pop(&jrec, save);
1886 jrecord_done(&jrec, 0);
1887 }
1888 }
1889 return (error);
1890}
1891
b2f7ec6c
MD
1892/*
1893 * Journal vop_write { a_vp, a_uio, a_ioflag, a_cred }
1894 */
558b8e00
MD
1895static
1896int
1897journal_write(struct vop_write_args *ap)
1898{
1899 struct mount *mp;
1900 struct journal *jo;
1901 struct jrecord jrec;
9578bde0
MD
1902 struct uio uio_copy;
1903 struct iovec uio_one_iovec;
558b8e00
MD
1904 void *save; /* warning, save pointers do not always remain valid */
1905 int error;
1906
9578bde0
MD
1907 /*
1908 * This is really nasty. UIO's don't retain sufficient information to
1909 * be reusable once they've gone through the VOP chain. The iovecs get
1910 * cleared, so we have to copy the UIO.
1911 *
1912 * XXX fix the UIO code to not destroy iov's during a scan so we can
1913 * reuse the uio over and over again.
d0887c34
MD
1914 *
1915 * XXX UNDO code needs to journal the old data prior to the write.
9578bde0
MD
1916 */
1917 uio_copy = *ap->a_uio;
1918 if (uio_copy.uio_iovcnt == 1) {
1919 uio_one_iovec = ap->a_uio->uio_iov[0];
1920 uio_copy.uio_iov = &uio_one_iovec;
1921 } else {
1922 uio_copy.uio_iov = malloc(uio_copy.uio_iovcnt * sizeof(struct iovec),
1923 M_JOURNAL, M_WAITOK);
1924 bcopy(ap->a_uio->uio_iov, uio_copy.uio_iov,
1925 uio_copy.uio_iovcnt * sizeof(struct iovec));
1926 }
1927
558b8e00 1928 error = vop_journal_operate_ap(&ap->a_head);
d0887c34
MD
1929
1930 /*
1931 * XXX bad hack to figure out the offset for O_APPEND writes (note:
1932 * uio field state after the VFS operation).
1933 */
1934 uio_copy.uio_offset = ap->a_uio->uio_offset -
1935 (uio_copy.uio_resid - ap->a_uio->uio_resid);
1936
558b8e00
MD
1937 mp = ap->a_head.a_ops->vv_mount;
1938 if (error == 0) {
1939 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1940 jrecord_init(jo, &jrec, -1);
1941 save = jrecord_push(&jrec, JTYPE_WRITE);
b2f7ec6c
MD
1942 jrecord_write_cred(&jrec, NULL, ap->a_cred);
1943 jrecord_write_vnode_ref(&jrec, ap->a_vp);
9578bde0 1944 jrecord_write_uio(&jrec, JLEAF_FILEDATA, &uio_copy);
558b8e00
MD
1945 jrecord_pop(&jrec, save);
1946 jrecord_done(&jrec, 0);
1947 }
1948 }
9578bde0
MD
1949
1950 if (uio_copy.uio_iov != &uio_one_iovec)
1951 free(uio_copy.uio_iov, M_JOURNAL);
1952
1953
558b8e00
MD
1954 return (error);
1955}
1956
b2f7ec6c
MD
1957/*
1958 * Journal vop_fsync { a_vp, a_waitfor, a_td }
1959 */
558b8e00
MD
1960static
1961int
1962journal_fsync(struct vop_fsync_args *ap)
1963{
1964 struct mount *mp;
1965 struct journal *jo;
1966 int error;
1967
1968 error = vop_journal_operate_ap(&ap->a_head);
1969 mp = ap->a_head.a_ops->vv_mount;
1970 if (error == 0) {
1971 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1972 /* XXX synchronize pending journal records */
1973 }
1974 }
1975 return (error);
1976}
1977
b2f7ec6c
MD
1978/*
1979 * Journal vop_putpages { a_vp, a_m, a_count, a_sync, a_rtvals, a_offset }
143c4f15
MD
1980 *
1981 * note: a_count is in bytes.
b2f7ec6c 1982 */
558b8e00
MD
1983static
1984int
1985journal_putpages(struct vop_putpages_args *ap)
1986{
1987 struct mount *mp;
1988 struct journal *jo;
1989 struct jrecord jrec;
1990 void *save; /* warning, save pointers do not always remain valid */
1991 int error;
1992
1993 error = vop_journal_operate_ap(&ap->a_head);
1994 mp = ap->a_head.a_ops->vv_mount;
143c4f15 1995 if (error == 0 && ap->a_count > 0) {
558b8e00
MD
1996 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
1997 jrecord_init(jo, &jrec, -1);
1998 save = jrecord_push(&jrec, JTYPE_PUTPAGES);
b2f7ec6c 1999 jrecord_write_vnode_ref(&jrec, ap->a_vp);
143c4f15
MD
2000 jrecord_write_pagelist(&jrec, JLEAF_FILEDATA,
2001 ap->a_m, ap->a_rtvals, btoc(ap->a_count), ap->a_offset);
558b8e00
MD
2002 jrecord_pop(&jrec, save);
2003 jrecord_done(&jrec, 0);
2004 }
2005 }
2006 return (error);
2007}
2008
b2f7ec6c
MD
2009/*
2010 * Journal vop_setacl { a_vp, a_type, a_aclp, a_cred, a_td }
2011 */
558b8e00
MD
2012static
2013int
2014journal_setacl(struct vop_setacl_args *ap)
2015{
2016 struct mount *mp;
2017 struct journal *jo;
2018 struct jrecord jrec;
2019 void *save; /* warning, save pointers do not always remain valid */
2020 int error;
2021
2022 error = vop_journal_operate_ap(&ap->a_head);
2023 mp = ap->a_head.a_ops->vv_mount;
2024 if (error == 0) {
2025 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2026 jrecord_init(jo, &jrec, -1);
2027 save = jrecord_push(&jrec, JTYPE_SETACL);
b2f7ec6c
MD
2028 jrecord_write_cred(&jrec, ap->a_td, ap->a_cred);
2029 jrecord_write_vnode_ref(&jrec, ap->a_vp);
2030 /* XXX type, aclp */
558b8e00
MD
2031 jrecord_pop(&jrec, save);
2032 jrecord_done(&jrec, 0);
2033 }
2034 }
2035 return (error);
2036}
2037
b2f7ec6c
MD
2038/*
2039 * Journal vop_setextattr { a_vp, a_name, a_uio, a_cred, a_td }
2040 */
558b8e00
MD
2041static
2042int
2043journal_setextattr(struct vop_setextattr_args *ap)
2044{
2045 struct mount *mp;
2046 struct journal *jo;
2047 struct jrecord jrec;
2048 void *save; /* warning, save pointers do not always remain valid */
2049 int error;
2050
2051 error = vop_journal_operate_ap(&ap->a_head);
2052 mp = ap->a_head.a_ops->vv_mount;
2053 if (error == 0) {
2054 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2055 jrecord_init(jo, &jrec, -1);
2056 save = jrecord_push(&jrec, JTYPE_SETEXTATTR);
b2f7ec6c
MD
2057 jrecord_write_cred(&jrec, ap->a_td, ap->a_cred);
2058 jrecord_write_vnode_ref(&jrec, ap->a_vp);
2059 jrecord_leaf(&jrec, JLEAF_ATTRNAME, ap->a_name, strlen(ap->a_name));
2060 jrecord_write_uio(&jrec, JLEAF_FILEDATA, ap->a_uio);
558b8e00
MD
2061 jrecord_pop(&jrec, save);
2062 jrecord_done(&jrec, 0);
2063 }
2064 }
2065 return (error);
2066}
2067
b2f7ec6c
MD
2068/*
2069 * Journal vop_ncreate { a_ncp, a_vpp, a_cred, a_vap }
2070 */
558b8e00
MD
2071static
2072int
2073journal_ncreate(struct vop_ncreate_args *ap)
2074{
2075 struct mount *mp;
2076 struct journal *jo;
2077 struct jrecord jrec;
2078 void *save; /* warning, save pointers do not always remain valid */
2079 int error;
2080
2081 error = vop_journal_operate_ap(&ap->a_head);
2082 mp = ap->a_head.a_ops->vv_mount;
2083 if (error == 0) {
2084 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2085 jrecord_init(jo, &jrec, -1);
2086 save = jrecord_push(&jrec, JTYPE_CREATE);
b2f7ec6c
MD
2087 jrecord_write_cred(&jrec, NULL, ap->a_cred);
2088 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
2089 if (*ap->a_vpp)
2090 jrecord_write_vnode_ref(&jrec, *ap->a_vpp);
d0887c34 2091 jrecord_write_vattr(&jrec, ap->a_vap);
558b8e00
MD
2092 jrecord_pop(&jrec, save);
2093 jrecord_done(&jrec, 0);
2094 }
2095 }
2096 return (error);
2097}
2098
b2f7ec6c
MD
2099/*
2100 * Journal vop_nmknod { a_ncp, a_vpp, a_cred, a_vap }
2101 */
558b8e00
MD
2102static
2103int
2104journal_nmknod(struct vop_nmknod_args *ap)
2105{
2106 struct mount *mp;
2107 struct journal *jo;
2108 struct jrecord jrec;
2109 void *save; /* warning, save pointers do not always remain valid */
2110 int error;
2111
2112 error = vop_journal_operate_ap(&ap->a_head);
2113 mp = ap->a_head.a_ops->vv_mount;
2114 if (error == 0) {
2115 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2116 jrecord_init(jo, &jrec, -1);
2117 save = jrecord_push(&jrec, JTYPE_MKNOD);
b2f7ec6c
MD
2118 jrecord_write_cred(&jrec, NULL, ap->a_cred);
2119 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
2120 jrecord_write_vattr(&jrec, ap->a_vap);
2121 if (*ap->a_vpp)
2122 jrecord_write_vnode_ref(&jrec, *ap->a_vpp);
558b8e00
MD
2123 jrecord_pop(&jrec, save);
2124 jrecord_done(&jrec, 0);
2125 }
2126 }
2127 return (error);
2128}
2129
b2f7ec6c
MD
2130/*
2131 * Journal vop_nlink { a_ncp, a_vp, a_cred }
2132 */
558b8e00
MD
2133static
2134int
2135journal_nlink(struct vop_nlink_args *ap)
2136{
2137 struct mount *mp;
2138 struct journal *jo;
2139 struct jrecord jrec;
2140 void *save; /* warning, save pointers do not always remain valid */
2141 int error;
2142
2143 error = vop_journal_operate_ap(&ap->a_head);
2144 mp = ap->a_head.a_ops->vv_mount;
2145 if (error == 0) {
2146 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2147 jrecord_init(jo, &jrec, -1);
2148 save = jrecord_push(&jrec, JTYPE_LINK);
b2f7ec6c
MD
2149 jrecord_write_cred(&jrec, NULL, ap->a_cred);
2150 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
b2f7ec6c 2151 /* XXX PATH to VP and inode number */
f4659a6c
MD
2152 /* XXX this call may not record the correct path when
2153 * multiple paths are available */
2154 jrecord_write_vnode_link(&jrec, ap->a_vp, ap->a_ncp);
558b8e00
MD
2155 jrecord_pop(&jrec, save);
2156 jrecord_done(&jrec, 0);
2157 }
2158 }
2159 return (error);
2160}
2161
b2f7ec6c
MD
2162/*
2163 * Journal vop_symlink { a_ncp, a_vpp, a_cred, a_vap, a_target }
2164 */
558b8e00
MD
2165static
2166int
2167journal_nsymlink(struct vop_nsymlink_args *ap)
2168{
2169 struct mount *mp;
2170 struct journal *jo;
2171 struct jrecord jrec;
2172 void *save; /* warning, save pointers do not always remain valid */
2173 int error;
2174
2175 error = vop_journal_operate_ap(&ap->a_head);
2176 mp = ap->a_head.a_ops->vv_mount;
2177 if (error == 0) {
2178 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2179 jrecord_init(jo, &jrec, -1);
2180 save = jrecord_push(&jrec, JTYPE_SYMLINK);
b2f7ec6c
MD
2181 jrecord_write_cred(&jrec, NULL, ap->a_cred);
2182 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
2183 jrecord_leaf(&jrec, JLEAF_SYMLINKDATA,
2184 ap->a_target, strlen(ap->a_target));
2185 if (*ap->a_vpp)
2186 jrecord_write_vnode_ref(&jrec, *ap->a_vpp);
558b8e00
MD
2187 jrecord_pop(&jrec, save);
2188 jrecord_done(&jrec, 0);
2189 }
2190 }
2191 return (error);
2192}
2193
b2f7ec6c
MD
2194/*
2195 * Journal vop_nwhiteout { a_ncp, a_cred, a_flags }
2196 */
558b8e00
MD
2197static
2198int
2199journal_nwhiteout(struct vop_nwhiteout_args *ap)
2200{
2201 struct mount *mp;
2202 struct journal *jo;
2203 struct jrecord jrec;
2204 void *save; /* warning, save pointers do not always remain valid */
2205 int error;
2206
2207 error = vop_journal_operate_ap(&ap->a_head);
2208 mp = ap->a_head.a_ops->vv_mount;
2209 if (error == 0) {
2210 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2211 jrecord_init(jo, &jrec, -1);
2212 save = jrecord_push(&jrec, JTYPE_WHITEOUT);
b2f7ec6c
MD
2213 jrecord_write_cred(&jrec, NULL, ap->a_cred);
2214 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
558b8e00
MD
2215 jrecord_pop(&jrec, save);
2216 jrecord_done(&jrec, 0);
2217 }
2218 }
2219 return (error);
2220}
2221
b2f7ec6c
MD
2222/*
2223 * Journal vop_nremove { a_ncp, a_cred }
2224 */
558b8e00
MD
2225static
2226int
2227journal_nremove(struct vop_nremove_args *ap)
2228{
2229 struct mount *mp;
2230 struct journal *jo;
2231 struct jrecord jrec;
2232 void *save; /* warning, save pointers do not always remain valid */
2233 int error;
2234
2235 error = vop_journal_operate_ap(&ap->a_head);
2236 mp = ap->a_head.a_ops->vv_mount;
2237 if (error == 0) {
2238 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2239 jrecord_init(jo, &jrec, -1);
2240 save = jrecord_push(&jrec, JTYPE_REMOVE);
b2f7ec6c
MD
2241 jrecord_write_cred(&jrec, NULL, ap->a_cred);
2242 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
558b8e00
MD
2243 jrecord_pop(&jrec, save);
2244 jrecord_done(&jrec, 0);
2245 }
2246 }
2247 return (error);
2248}
2281065e 2249
b2f7ec6c
MD
2250/*
2251 * Journal vop_nmkdir { a_ncp, a_vpp, a_cred, a_vap }
2252 */
2281065e
MD
2253static
2254int
2255journal_nmkdir(struct vop_nmkdir_args *ap)
2256{
82eaef15
MD
2257 struct mount *mp;
2258 struct journal *jo;
2259 struct jrecord jrec;
2260 void *save; /* warning, save pointers do not always remain valid */
2281065e
MD
2261 int error;
2262
2281065e 2263 error = vop_journal_operate_ap(&ap->a_head);
82eaef15
MD
2264 mp = ap->a_head.a_ops->vv_mount;
2265 if (error == 0) {
2266 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2267 jrecord_init(jo, &jrec, -1);
2268 if (jo->flags & MC_JOURNAL_WANT_REVERSABLE) {
2269 save = jrecord_push(&jrec, JTYPE_UNDO);
2270 /* XXX undo operations */
2271 jrecord_pop(&jrec, save);
2272 }
2273#if 0
2274 if (jo->flags & MC_JOURNAL_WANT_AUDIT) {
2275 jrecord_write_audit(&jrec);
2276 }
2277#endif
2278 save = jrecord_push(&jrec, JTYPE_MKDIR);
2279 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
b2f7ec6c 2280 jrecord_write_cred(&jrec, NULL, ap->a_cred);
82eaef15 2281 jrecord_write_vattr(&jrec, ap->a_vap);
b2f7ec6c
MD
2282 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
2283 if (*ap->a_vpp)
2284 jrecord_write_vnode_ref(&jrec, *ap->a_vpp);
82eaef15
MD
2285 jrecord_pop(&jrec, save);
2286 jrecord_done(&jrec, 0);
2287 }
2288 }
2281065e 2289 return (error);
6ddb7618
MD
2290}
2291
b2f7ec6c
MD
2292/*
2293 * Journal vop_nrmdir { a_ncp, a_cred }
2294 */
558b8e00
MD
2295static
2296int
2297journal_nrmdir(struct vop_nrmdir_args *ap)
2298{
2299 struct mount *mp;
2300 struct journal *jo;
2301 struct jrecord jrec;
2302 void *save; /* warning, save pointers do not always remain valid */
2303 int error;
2304
2305 error = vop_journal_operate_ap(&ap->a_head);
2306 mp = ap->a_head.a_ops->vv_mount;
2307 if (error == 0) {
2308 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2309 jrecord_init(jo, &jrec, -1);
2310 save = jrecord_push(&jrec, JTYPE_RMDIR);
b2f7ec6c
MD
2311 jrecord_write_cred(&jrec, NULL, ap->a_cred);
2312 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_ncp);
558b8e00
MD
2313 jrecord_pop(&jrec, save);
2314 jrecord_done(&jrec, 0);
2315 }
2316 }
2317 return (error);
2318}
2319
b2f7ec6c
MD
2320/*
2321 * Journal vop_nrename { a_fncp, a_tncp, a_cred }
2322 */
558b8e00
MD
2323static
2324int
2325journal_nrename(struct vop_nrename_args *ap)
2326{
2327 struct mount *mp;
2328 struct journal *jo;
2329 struct jrecord jrec;
2330 void *save; /* warning, save pointers do not always remain valid */
2331 int error;
2332
2333 error = vop_journal_operate_ap(&ap->a_head);
2334 mp = ap->a_head.a_ops->vv_mount;
2335 if (error == 0) {
2336 TAILQ_FOREACH(jo, &mp->mnt_jlist, jentry) {
2337 jrecord_init(jo, &jrec, -1);
2338 save = jrecord_push(&jrec, JTYPE_RENAME);
b2f7ec6c
MD
2339 jrecord_write_cred(&jrec, NULL, ap->a_cred);
2340 jrecord_write_path(&jrec, JLEAF_PATH1, ap->a_fncp);
2341 jrecord_write_path(&jrec, JLEAF_PATH2, ap->a_tncp);
558b8e00
MD
2342 jrecord_pop(&jrec, save);
2343 jrecord_done(&jrec, 0);
2344 }
2345 }
2346 return (error);
2347}
2348