kernel - Adjust UFS and HAMMER to use uiomovebp()
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.102 2008/10/16 17:24:16 dillon Exp $
35  */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
41 #include <sys/namecache.h>
42 #include <sys/vnode.h>
43 #include <sys/lockf.h>
44 #include <sys/event.h>
45 #include <sys/stat.h>
46 #include <sys/dirent.h>
47 #include <sys/file.h>
48 #include <vm/vm_extern.h>
49 #include <vm/swap_pager.h>
50 #include <vfs/fifofs/fifo.h>
51
52 #include "hammer.h"
53
54 /*
55  * USERFS VNOPS
56  */
57 /*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
58 static int hammer_vop_fsync(struct vop_fsync_args *);
59 static int hammer_vop_read(struct vop_read_args *);
60 static int hammer_vop_write(struct vop_write_args *);
61 static int hammer_vop_access(struct vop_access_args *);
62 static int hammer_vop_advlock(struct vop_advlock_args *);
63 static int hammer_vop_close(struct vop_close_args *);
64 static int hammer_vop_ncreate(struct vop_ncreate_args *);
65 static int hammer_vop_getattr(struct vop_getattr_args *);
66 static int hammer_vop_nresolve(struct vop_nresolve_args *);
67 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
68 static int hammer_vop_nlink(struct vop_nlink_args *);
69 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
70 static int hammer_vop_nmknod(struct vop_nmknod_args *);
71 static int hammer_vop_open(struct vop_open_args *);
72 static int hammer_vop_print(struct vop_print_args *);
73 static int hammer_vop_readdir(struct vop_readdir_args *);
74 static int hammer_vop_readlink(struct vop_readlink_args *);
75 static int hammer_vop_nremove(struct vop_nremove_args *);
76 static int hammer_vop_nrename(struct vop_nrename_args *);
77 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
78 static int hammer_vop_markatime(struct vop_markatime_args *);
79 static int hammer_vop_setattr(struct vop_setattr_args *);
80 static int hammer_vop_strategy(struct vop_strategy_args *);
81 static int hammer_vop_bmap(struct vop_bmap_args *ap);
82 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
83 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
84 static int hammer_vop_ioctl(struct vop_ioctl_args *);
85 static int hammer_vop_mountctl(struct vop_mountctl_args *);
86 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
87
88 static int hammer_vop_fifoclose (struct vop_close_args *);
89 static int hammer_vop_fiforead (struct vop_read_args *);
90 static int hammer_vop_fifowrite (struct vop_write_args *);
91 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
92
93 struct vop_ops hammer_vnode_vops = {
94         .vop_default =          vop_defaultop,
95         .vop_fsync =            hammer_vop_fsync,
96         .vop_getpages =         vop_stdgetpages,
97         .vop_putpages =         vop_stdputpages,
98         .vop_read =             hammer_vop_read,
99         .vop_write =            hammer_vop_write,
100         .vop_access =           hammer_vop_access,
101         .vop_advlock =          hammer_vop_advlock,
102         .vop_close =            hammer_vop_close,
103         .vop_ncreate =          hammer_vop_ncreate,
104         .vop_getattr =          hammer_vop_getattr,
105         .vop_inactive =         hammer_vop_inactive,
106         .vop_reclaim =          hammer_vop_reclaim,
107         .vop_nresolve =         hammer_vop_nresolve,
108         .vop_nlookupdotdot =    hammer_vop_nlookupdotdot,
109         .vop_nlink =            hammer_vop_nlink,
110         .vop_nmkdir =           hammer_vop_nmkdir,
111         .vop_nmknod =           hammer_vop_nmknod,
112         .vop_open =             hammer_vop_open,
113         .vop_pathconf =         vop_stdpathconf,
114         .vop_print =            hammer_vop_print,
115         .vop_readdir =          hammer_vop_readdir,
116         .vop_readlink =         hammer_vop_readlink,
117         .vop_nremove =          hammer_vop_nremove,
118         .vop_nrename =          hammer_vop_nrename,
119         .vop_nrmdir =           hammer_vop_nrmdir,
120         .vop_markatime =        hammer_vop_markatime,
121         .vop_setattr =          hammer_vop_setattr,
122         .vop_bmap =             hammer_vop_bmap,
123         .vop_strategy =         hammer_vop_strategy,
124         .vop_nsymlink =         hammer_vop_nsymlink,
125         .vop_nwhiteout =        hammer_vop_nwhiteout,
126         .vop_ioctl =            hammer_vop_ioctl,
127         .vop_mountctl =         hammer_vop_mountctl,
128         .vop_kqfilter =         hammer_vop_kqfilter
129 };
130
131 struct vop_ops hammer_spec_vops = {
132         .vop_default =          vop_defaultop,
133         .vop_fsync =            hammer_vop_fsync,
134         .vop_read =             vop_stdnoread,
135         .vop_write =            vop_stdnowrite,
136         .vop_access =           hammer_vop_access,
137         .vop_close =            hammer_vop_close,
138         .vop_markatime =        hammer_vop_markatime,
139         .vop_getattr =          hammer_vop_getattr,
140         .vop_inactive =         hammer_vop_inactive,
141         .vop_reclaim =          hammer_vop_reclaim,
142         .vop_setattr =          hammer_vop_setattr
143 };
144
145 struct vop_ops hammer_fifo_vops = {
146         .vop_default =          fifo_vnoperate,
147         .vop_fsync =            hammer_vop_fsync,
148         .vop_read =             hammer_vop_fiforead,
149         .vop_write =            hammer_vop_fifowrite,
150         .vop_access =           hammer_vop_access,
151         .vop_close =            hammer_vop_fifoclose,
152         .vop_markatime =        hammer_vop_markatime,
153         .vop_getattr =          hammer_vop_getattr,
154         .vop_inactive =         hammer_vop_inactive,
155         .vop_reclaim =          hammer_vop_reclaim,
156         .vop_setattr =          hammer_vop_setattr,
157         .vop_kqfilter =         hammer_vop_fifokqfilter
158 };
159
160 static __inline
161 void
162 hammer_knote(struct vnode *vp, int flags)
163 {
164         if (flags)
165                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
166 }
167
168 #ifdef DEBUG_TRUNCATE
169 struct hammer_inode *HammerTruncIp;
170 #endif
171
172 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
173                            struct vnode *dvp, struct ucred *cred,
174                            int flags, int isdir);
175 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
176 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
177
178 #if 0
179 static
180 int
181 hammer_vop_vnoperate(struct vop_generic_args *)
182 {
183         return (VOCALL(&hammer_vnode_vops, ap));
184 }
185 #endif
186
187 /*
188  * hammer_vop_fsync { vp, waitfor }
189  *
190  * fsync() an inode to disk and wait for it to be completely committed
191  * such that the information would not be undone if a crash occured after
192  * return.
193  *
194  * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
195  *       a REDO log.  A sysctl is provided to relax HAMMER's fsync()
196  *       operation.
197  *
198  *       Ultimately the combination of a REDO log and use of fast storage
199  *       to front-end cluster caches will make fsync fast, but it aint
200  *       here yet.  And, in anycase, we need real transactional
201  *       all-or-nothing features which are not restricted to a single file.
202  */
203 static
204 int
205 hammer_vop_fsync(struct vop_fsync_args *ap)
206 {
207         hammer_inode_t ip = VTOI(ap->a_vp);
208         hammer_mount_t hmp = ip->hmp;
209         int waitfor = ap->a_waitfor;
210         int mode;
211
212         lwkt_gettoken(&hmp->fs_token);
213
214         /*
215          * Fsync rule relaxation (default is either full synchronous flush
216          * or REDO semantics with synchronous flush).
217          */
218         if (ap->a_flags & VOP_FSYNC_SYSCALL) {
219                 switch(hammer_fsync_mode) {
220                 case 0:
221 mode0:
222                         /* no REDO, full synchronous flush */
223                         goto skip;
224                 case 1:
225 mode1:
226                         /* no REDO, full asynchronous flush */
227                         if (waitfor == MNT_WAIT)
228                                 waitfor = MNT_NOWAIT;
229                         goto skip;
230                 case 2:
231                         /* REDO semantics, synchronous flush */
232                         if (hmp->version < HAMMER_VOL_VERSION_FOUR)
233                                 goto mode0;
234                         mode = HAMMER_FLUSH_UNDOS_AUTO;
235                         break;
236                 case 3:
237                         /* REDO semantics, relaxed asynchronous flush */
238                         if (hmp->version < HAMMER_VOL_VERSION_FOUR)
239                                 goto mode1;
240                         mode = HAMMER_FLUSH_UNDOS_RELAXED;
241                         if (waitfor == MNT_WAIT)
242                                 waitfor = MNT_NOWAIT;
243                         break;
244                 case 4:
245                         /* ignore the fsync() system call */
246                         lwkt_reltoken(&hmp->fs_token);
247                         return(0);
248                 default:
249                         /* we have to do something */
250                         mode = HAMMER_FLUSH_UNDOS_RELAXED;
251                         if (waitfor == MNT_WAIT)
252                                 waitfor = MNT_NOWAIT;
253                         break;
254                 }
255
256                 /*
257                  * Fast fsync only needs to flush the UNDO/REDO fifo if
258                  * HAMMER_INODE_REDO is non-zero and the only modifications
259                  * made to the file are write or write-extends.
260                  */
261                 if ((ip->flags & HAMMER_INODE_REDO) &&
262                     (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0
263                 ) {
264                         ++hammer_count_fsyncs;
265                         hammer_flusher_flush_undos(hmp, mode);
266                         ip->redo_count = 0;
267                         lwkt_reltoken(&hmp->fs_token);
268                         return(0);
269                 }
270
271                 /*
272                  * REDO is enabled by fsync(), the idea being we really only
273                  * want to lay down REDO records when programs are using
274                  * fsync() heavily.  The first fsync() on the file starts
275                  * the gravy train going and later fsync()s keep it hot by
276                  * resetting the redo_count.
277                  *
278                  * We weren't running REDOs before now so we have to fall
279                  * through and do a full fsync of what we have.
280                  */
281                 if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
282                     (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) {
283                         ip->flags |= HAMMER_INODE_REDO;
284                         ip->redo_count = 0;
285                 }
286         }
287 skip:
288
289         /*
290          * Do a full flush sequence.
291          *
292          * Attempt to release the vnode while waiting for the inode to
293          * finish flushing.  This can really mess up inactive->reclaim
294          * sequences so only do it if the vnode is active.
295          */
296         ++hammer_count_fsyncs;
297         vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
298         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
299         if (waitfor == MNT_WAIT) {
300                 if ((ap->a_vp->v_flag & VINACTIVE) == 0)
301                         vn_unlock(ap->a_vp);
302                 hammer_wait_inode(ip);
303                 if ((ap->a_vp->v_flag & VINACTIVE) == 0)
304                         vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
305         }
306         lwkt_reltoken(&hmp->fs_token);
307         return (ip->error);
308 }
309
310 /*
311  * hammer_vop_read { vp, uio, ioflag, cred }
312  *
313  * MPSAFE (for the cache safe does not require fs_token)
314  */
315 static
316 int
317 hammer_vop_read(struct vop_read_args *ap)
318 {
319         struct hammer_transaction trans;
320         hammer_inode_t ip;
321         hammer_mount_t hmp;
322         off_t offset;
323         struct buf *bp;
324         struct uio *uio;
325         int error;
326         int n;
327         int seqcount;
328         int ioseqcount;
329         int blksize;
330         int bigread;
331         int got_fstoken;
332
333         if (ap->a_vp->v_type != VREG)
334                 return (EINVAL);
335         ip = VTOI(ap->a_vp);
336         hmp = ip->hmp;
337         error = 0;
338         uio = ap->a_uio;
339
340         /*
341          * Allow the UIO's size to override the sequential heuristic.
342          */
343         blksize = hammer_blocksize(uio->uio_offset);
344         seqcount = (uio->uio_resid + (BKVASIZE - 1)) / BKVASIZE;
345         ioseqcount = (ap->a_ioflag >> 16);
346         if (seqcount < ioseqcount)
347                 seqcount = ioseqcount;
348
349         /*
350          * If reading or writing a huge amount of data we have to break
351          * atomicy and allow the operation to be interrupted by a signal
352          * or it can DOS the machine.
353          */
354         bigread = (uio->uio_resid > 100 * 1024 * 1024);
355         got_fstoken = 0;
356
357         /*
358          * Access the data typically in HAMMER_BUFSIZE blocks via the
359          * buffer cache, but HAMMER may use a variable block size based
360          * on the offset.
361          *
362          * XXX Temporary hack, delay the start transaction while we remain
363          *     MPSAFE.  NOTE: ino_data.size cannot change while vnode is
364          *     locked-shared.
365          */
366         while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
367                 int64_t base_offset;
368                 int64_t file_limit;
369
370                 blksize = hammer_blocksize(uio->uio_offset);
371                 offset = (int)uio->uio_offset & (blksize - 1);
372                 base_offset = uio->uio_offset - offset;
373
374                 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
375                         break;
376
377                 /*
378                  * MPSAFE
379                  */
380                 bp = getblk(ap->a_vp, base_offset, blksize, 0, 0);
381                 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == B_CACHE) {
382                         bp->b_flags &= ~B_AGE;
383                         error = 0;
384                         goto skip;
385                 }
386                 if (ap->a_ioflag & IO_NRDELAY) {
387                         bqrelse(bp);
388                         return (EWOULDBLOCK);
389                 }
390
391                 /*
392                  * MPUNSAFE
393                  */
394                 if (got_fstoken == 0) {
395                         lwkt_gettoken(&hmp->fs_token);
396                         got_fstoken = 1;
397                         hammer_start_transaction(&trans, ip->hmp);
398                 }
399
400                 /*
401                  * NOTE: A valid bp has already been acquired, but was not
402                  *       B_CACHE.
403                  */
404                 if (hammer_cluster_enable) {
405                         /*
406                          * Use file_limit to prevent cluster_read() from
407                          * creating buffers of the wrong block size past
408                          * the demarc.
409                          */
410                         file_limit = ip->ino_data.size;
411                         if (base_offset < HAMMER_XDEMARC &&
412                             file_limit > HAMMER_XDEMARC) {
413                                 file_limit = HAMMER_XDEMARC;
414                         }
415                         error = cluster_readx(ap->a_vp,
416                                              file_limit, base_offset,
417                                              blksize, uio->uio_resid,
418                                              seqcount * BKVASIZE, &bp);
419                 } else {
420                         error = breadnx(ap->a_vp, base_offset, blksize,
421                                         NULL, NULL, 0, &bp);
422                 }
423                 if (error) {
424                         brelse(bp);
425                         break;
426                 }
427 skip:
428                 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
429                         kprintf("doff %016jx read file %016jx@%016jx\n",
430                                 (intmax_t)bp->b_bio2.bio_offset,
431                                 (intmax_t)ip->obj_id,
432                                 (intmax_t)bp->b_loffset);
433                 }
434                 bp->b_flags &= ~B_IODEBUG;
435                 if (blksize == HAMMER_XBUFSIZE)
436                         bp->b_flags |= B_CLUSTEROK;
437
438                 n = blksize - offset;
439                 if (n > uio->uio_resid)
440                         n = uio->uio_resid;
441                 if (n > ip->ino_data.size - uio->uio_offset)
442                         n = (int)(ip->ino_data.size - uio->uio_offset);
443                 if (got_fstoken)
444                         lwkt_reltoken(&hmp->fs_token);
445
446                 /*
447                  * Set B_AGE, data has a lower priority than meta-data.
448                  *
449                  * Use a hold/unlock/drop sequence to run the uiomove
450                  * with the buffer unlocked, avoiding deadlocks against
451                  * read()s on mmap()'d spaces.
452                  */
453                 bp->b_flags |= B_AGE;
454                 error = uiomovebp(bp, (char *)bp->b_data + offset, n, uio);
455                 bqrelse(bp);
456
457                 if (got_fstoken)
458                         lwkt_gettoken(&hmp->fs_token);
459
460                 if (error)
461                         break;
462                 hammer_stats_file_read += n;
463         }
464
465         /*
466          * Try to update the atime with just the inode lock for maximum
467          * concurrency.  If we can't shortcut it we have to get the full
468          * blown transaction.
469          */
470         if (got_fstoken == 0 && hammer_update_atime_quick(ip) < 0) {
471                 lwkt_gettoken(&hmp->fs_token);
472                 got_fstoken = 1;
473                 hammer_start_transaction(&trans, ip->hmp);
474         }
475
476         if (got_fstoken) {
477                 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
478                     (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
479                         ip->ino_data.atime = trans.time;
480                         hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
481                 }
482                 hammer_done_transaction(&trans);
483                 lwkt_reltoken(&hmp->fs_token);
484         }
485         return (error);
486 }
487
488 /*
489  * hammer_vop_write { vp, uio, ioflag, cred }
490  */
491 static
492 int
493 hammer_vop_write(struct vop_write_args *ap)
494 {
495         struct hammer_transaction trans;
496         struct hammer_inode *ip;
497         hammer_mount_t hmp;
498         thread_t td;
499         struct uio *uio;
500         int offset;
501         off_t base_offset;
502         int64_t cluster_eof;
503         struct buf *bp;
504         int kflags;
505         int error;
506         int n;
507         int flags;
508         int seqcount;
509         int bigwrite;
510
511         if (ap->a_vp->v_type != VREG)
512                 return (EINVAL);
513         ip = VTOI(ap->a_vp);
514         hmp = ip->hmp;
515         error = 0;
516         kflags = 0;
517         seqcount = ap->a_ioflag >> 16;
518
519         if (ip->flags & HAMMER_INODE_RO)
520                 return (EROFS);
521
522         /*
523          * Create a transaction to cover the operations we perform.
524          */
525         lwkt_gettoken(&hmp->fs_token);
526         hammer_start_transaction(&trans, hmp);
527         uio = ap->a_uio;
528
529         /*
530          * Check append mode
531          */
532         if (ap->a_ioflag & IO_APPEND)
533                 uio->uio_offset = ip->ino_data.size;
534
535         /*
536          * Check for illegal write offsets.  Valid range is 0...2^63-1.
537          *
538          * NOTE: the base_off assignment is required to work around what
539          * I consider to be a GCC-4 optimization bug.
540          */
541         if (uio->uio_offset < 0) {
542                 hammer_done_transaction(&trans);
543                 lwkt_reltoken(&hmp->fs_token);
544                 return (EFBIG);
545         }
546         base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
547         if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
548                 hammer_done_transaction(&trans);
549                 lwkt_reltoken(&hmp->fs_token);
550                 return (EFBIG);
551         }
552
553         if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
554             base_offset > td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
555                 hammer_done_transaction(&trans);
556                 lwkt_reltoken(&hmp->fs_token);
557                 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
558                 return (EFBIG);
559         }
560
561         /*
562          * If reading or writing a huge amount of data we have to break
563          * atomicy and allow the operation to be interrupted by a signal
564          * or it can DOS the machine.
565          *
566          * Preset redo_count so we stop generating REDOs earlier if the
567          * limit is exceeded.
568          */
569         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
570         if ((ip->flags & HAMMER_INODE_REDO) &&
571             ip->redo_count < hammer_limit_redo) {
572                 ip->redo_count += uio->uio_resid;
573         }
574
575         /*
576          * Access the data typically in HAMMER_BUFSIZE blocks via the
577          * buffer cache, but HAMMER may use a variable block size based
578          * on the offset.
579          */
580         while (uio->uio_resid > 0) {
581                 int fixsize = 0;
582                 int blksize;
583                 int blkmask;
584                 int trivial;
585                 int endofblk;
586                 off_t nsize;
587
588                 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
589                         break;
590                 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
591                         break;
592
593                 blksize = hammer_blocksize(uio->uio_offset);
594
595                 /*
596                  * Do not allow HAMMER to blow out the buffer cache.  Very
597                  * large UIOs can lockout other processes due to bwillwrite()
598                  * mechanics.
599                  *
600                  * The hammer inode is not locked during these operations.
601                  * The vnode is locked which can interfere with the pageout
602                  * daemon for non-UIO_NOCOPY writes but should not interfere
603                  * with the buffer cache.  Even so, we cannot afford to
604                  * allow the pageout daemon to build up too many dirty buffer
605                  * cache buffers.
606                  *
607                  * Only call this if we aren't being recursively called from
608                  * a virtual disk device (vn), else we may deadlock.
609                  */
610                 if ((ap->a_ioflag & IO_RECURSE) == 0)
611                         bwillwrite(blksize);
612
613                 /*
614                  * Control the number of pending records associated with
615                  * this inode.  If too many have accumulated start a
616                  * flush.  Try to maintain a pipeline with the flusher.
617                  *
618                  * NOTE: It is possible for other sources to grow the
619                  *       records but not necessarily issue another flush,
620                  *       so use a timeout and ensure that a re-flush occurs.
621                  */
622                 if (ip->rsv_recs >= hammer_limit_inode_recs) {
623                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
624                         while (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
625                                 ip->flags |= HAMMER_INODE_RECSW;
626                                 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
627                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
628                         }
629                 }
630
631 #if 0
632                 /*
633                  * Do not allow HAMMER to blow out system memory by
634                  * accumulating too many records.   Records are so well
635                  * decoupled from the buffer cache that it is possible
636                  * for userland to push data out to the media via
637                  * direct-write, but build up the records queued to the
638                  * backend faster then the backend can flush them out.
639                  * HAMMER has hit its write limit but the frontend has
640                  * no pushback to slow it down.
641                  */
642                 if (hmp->rsv_recs > hammer_limit_recs / 2) {
643                         /*
644                          * Get the inode on the flush list
645                          */
646                         if (ip->rsv_recs >= 64)
647                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
648                         else if (ip->rsv_recs >= 16)
649                                 hammer_flush_inode(ip, 0);
650
651                         /*
652                          * Keep the flusher going if the system keeps
653                          * queueing records.
654                          */
655                         delta = hmp->count_newrecords -
656                                 hmp->last_newrecords;
657                         if (delta < 0 || delta > hammer_limit_recs / 2) {
658                                 hmp->last_newrecords = hmp->count_newrecords;
659                                 hammer_sync_hmp(hmp, MNT_NOWAIT);
660                         }
661
662                         /*
663                          * If we have gotten behind start slowing
664                          * down the writers.
665                          */
666                         delta = (hmp->rsv_recs - hammer_limit_recs) *
667                                 hz / hammer_limit_recs;
668                         if (delta > 0)
669                                 tsleep(&trans, 0, "hmrslo", delta);
670                 }
671 #endif
672
673                 /*
674                  * Calculate the blocksize at the current offset and figure
675                  * out how much we can actually write.
676                  */
677                 blkmask = blksize - 1;
678                 offset = (int)uio->uio_offset & blkmask;
679                 base_offset = uio->uio_offset & ~(int64_t)blkmask;
680                 n = blksize - offset;
681                 if (n > uio->uio_resid) {
682                         n = uio->uio_resid;
683                         endofblk = 0;
684                 } else {
685                         endofblk = 1;
686                 }
687                 nsize = uio->uio_offset + n;
688                 if (nsize > ip->ino_data.size) {
689                         if (uio->uio_offset > ip->ino_data.size)
690                                 trivial = 0;
691                         else
692                                 trivial = 1;
693                         nvextendbuf(ap->a_vp,
694                                     ip->ino_data.size,
695                                     nsize,
696                                     hammer_blocksize(ip->ino_data.size),
697                                     hammer_blocksize(nsize),
698                                     hammer_blockoff(ip->ino_data.size),
699                                     hammer_blockoff(nsize),
700                                     trivial);
701                         fixsize = 1;
702                         kflags |= NOTE_EXTEND;
703                 }
704
705                 if (uio->uio_segflg == UIO_NOCOPY) {
706                         /*
707                          * Issuing a write with the same data backing the
708                          * buffer.  Instantiate the buffer to collect the
709                          * backing vm pages, then read-in any missing bits.
710                          *
711                          * This case is used by vop_stdputpages().
712                          */
713                         bp = getblk(ap->a_vp, base_offset,
714                                     blksize, GETBLK_BHEAVY, 0);
715                         if ((bp->b_flags & B_CACHE) == 0) {
716                                 bqrelse(bp);
717                                 error = bread(ap->a_vp, base_offset,
718                                               blksize, &bp);
719                         }
720                 } else if (offset == 0 && uio->uio_resid >= blksize) {
721                         /*
722                          * Even though we are entirely overwriting the buffer
723                          * we may still have to zero it out to avoid a 
724                          * mmap/write visibility issue.
725                          */
726                         bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
727                         if ((bp->b_flags & B_CACHE) == 0)
728                                 vfs_bio_clrbuf(bp);
729                 } else if (base_offset >= ip->ino_data.size) {
730                         /*
731                          * If the base offset of the buffer is beyond the
732                          * file EOF, we don't have to issue a read.
733                          */
734                         bp = getblk(ap->a_vp, base_offset,
735                                     blksize, GETBLK_BHEAVY, 0);
736                         vfs_bio_clrbuf(bp);
737                 } else {
738                         /*
739                          * Partial overwrite, read in any missing bits then
740                          * replace the portion being written.
741                          */
742                         error = bread(ap->a_vp, base_offset, blksize, &bp);
743                         if (error == 0)
744                                 bheavy(bp);
745                 }
746                 if (error == 0) {
747                         lwkt_reltoken(&hmp->fs_token);
748                         error = uiomovebp(bp, bp->b_data + offset, n, uio);
749                         lwkt_gettoken(&hmp->fs_token);
750                 }
751
752                 /*
753                  * Generate REDO records if enabled and redo_count will not
754                  * exceeded the limit.
755                  *
756                  * If redo_count exceeds the limit we stop generating records
757                  * and clear HAMMER_INODE_REDO.  This will cause the next
758                  * fsync() to do a full meta-data sync instead of just an
759                  * UNDO/REDO fifo update.
760                  *
761                  * When clearing HAMMER_INODE_REDO any pre-existing REDOs
762                  * will still be tracked.  The tracks will be terminated
763                  * when the related meta-data (including possible data
764                  * modifications which are not tracked via REDO) is
765                  * flushed.
766                  */
767                 if ((ip->flags & HAMMER_INODE_REDO) && error == 0) {
768                         if (ip->redo_count < hammer_limit_redo) {
769                                 bp->b_flags |= B_VFSFLAG1;
770                                 error = hammer_generate_redo(&trans, ip,
771                                                      base_offset + offset,
772                                                      HAMMER_REDO_WRITE,
773                                                      bp->b_data + offset,
774                                                      (size_t)n);
775                         } else {
776                                 ip->flags &= ~HAMMER_INODE_REDO;
777                         }
778                 }
779
780                 /*
781                  * If we screwed up we have to undo any VM size changes we
782                  * made.
783                  */
784                 if (error) {
785                         brelse(bp);
786                         if (fixsize) {
787                                 nvtruncbuf(ap->a_vp, ip->ino_data.size,
788                                           hammer_blocksize(ip->ino_data.size),
789                                           hammer_blockoff(ip->ino_data.size));
790                         }
791                         break;
792                 }
793                 kflags |= NOTE_WRITE;
794                 hammer_stats_file_write += n;
795                 if (blksize == HAMMER_XBUFSIZE)
796                         bp->b_flags |= B_CLUSTEROK;
797                 if (ip->ino_data.size < uio->uio_offset) {
798                         ip->ino_data.size = uio->uio_offset;
799                         flags = HAMMER_INODE_SDIRTY;
800                 } else {
801                         flags = 0;
802                 }
803                 ip->ino_data.mtime = trans.time;
804                 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
805                 hammer_modify_inode(&trans, ip, flags);
806
807                 /*
808                  * Once we dirty the buffer any cached zone-X offset
809                  * becomes invalid.  HAMMER NOTE: no-history mode cannot 
810                  * allow overwriting over the same data sector unless
811                  * we provide UNDOs for the old data, which we don't.
812                  */
813                 bp->b_bio2.bio_offset = NOOFFSET;
814
815                 /*
816                  * Final buffer disposition.
817                  *
818                  * Because meta-data updates are deferred, HAMMER is
819                  * especially sensitive to excessive bdwrite()s because
820                  * the I/O stream is not broken up by disk reads.  So the
821                  * buffer cache simply cannot keep up.
822                  *
823                  * WARNING!  blksize is variable.  cluster_write() is
824                  *           expected to not blow up if it encounters
825                  *           buffers that do not match the passed blksize.
826                  *
827                  * NOTE!  Hammer shouldn't need to bawrite()/cluster_write().
828                  *        The ip->rsv_recs check should burst-flush the data.
829                  *        If we queue it immediately the buf could be left
830                  *        locked on the device queue for a very long time.
831                  *
832                  *        However, failing to flush a dirty buffer out when
833                  *        issued from the pageout daemon can result in a low
834                  *        memory deadlock against bio_page_alloc(), so we
835                  *        have to bawrite() on IO_ASYNC as well.
836                  *
837                  * NOTE!  To avoid degenerate stalls due to mismatched block
838                  *        sizes we only honor IO_DIRECT on the write which
839                  *        abuts the end of the buffer.  However, we must
840                  *        honor IO_SYNC in case someone is silly enough to
841                  *        configure a HAMMER file as swap, or when HAMMER
842                  *        is serving NFS (for commits).  Ick ick.
843                  */
844                 bp->b_flags |= B_AGE;
845                 if (blksize == HAMMER_XBUFSIZE)
846                         bp->b_flags |= B_CLUSTEROK;
847
848                 if (ap->a_ioflag & IO_SYNC) {
849                         bwrite(bp);
850                 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
851                         bawrite(bp);
852                 } else if (ap->a_ioflag & IO_ASYNC) {
853                         bawrite(bp);
854                 } else if (hammer_cluster_enable &&
855                            !(ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
856                         if (base_offset < HAMMER_XDEMARC)
857                                 cluster_eof = hammer_blockdemarc(base_offset,
858                                                          ip->ino_data.size);
859                         else
860                                 cluster_eof = ip->ino_data.size;
861                         cluster_write(bp, cluster_eof, blksize, seqcount);
862                 } else {
863                         bdwrite(bp);
864                 }
865         }
866         hammer_done_transaction(&trans);
867         hammer_knote(ap->a_vp, kflags);
868         lwkt_reltoken(&hmp->fs_token);
869         return (error);
870 }
871
872 /*
873  * hammer_vop_access { vp, mode, cred }
874  *
875  * MPSAFE - does not require fs_token
876  */
877 static
878 int
879 hammer_vop_access(struct vop_access_args *ap)
880 {
881         struct hammer_inode *ip = VTOI(ap->a_vp);
882         uid_t uid;
883         gid_t gid;
884         int error;
885
886         ++hammer_stats_file_iopsr;
887         uid = hammer_to_unix_xid(&ip->ino_data.uid);
888         gid = hammer_to_unix_xid(&ip->ino_data.gid);
889
890         error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
891                                   ip->ino_data.uflags);
892         return (error);
893 }
894
895 /*
896  * hammer_vop_advlock { vp, id, op, fl, flags }
897  *
898  * MPSAFE - does not require fs_token
899  */
900 static
901 int
902 hammer_vop_advlock(struct vop_advlock_args *ap)
903 {
904         hammer_inode_t ip = VTOI(ap->a_vp);
905
906         return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
907 }
908
909 /*
910  * hammer_vop_close { vp, fflag }
911  *
912  * We can only sync-on-close for normal closes.  XXX disabled for now.
913  */
914 static
915 int
916 hammer_vop_close(struct vop_close_args *ap)
917 {
918 #if 0
919         struct vnode *vp = ap->a_vp;
920         hammer_inode_t ip = VTOI(vp);
921         int waitfor;
922         if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
923                 if (vn_islocked(vp) == LK_EXCLUSIVE &&
924                     (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
925                         if (ip->flags & HAMMER_INODE_CLOSESYNC)
926                                 waitfor = MNT_WAIT;
927                         else
928                                 waitfor = MNT_NOWAIT;
929                         ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
930                                        HAMMER_INODE_CLOSEASYNC);
931                         VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
932                 }
933         }
934 #endif
935         return (vop_stdclose(ap));
936 }
937
938 /*
939  * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
940  *
941  * The operating system has already ensured that the directory entry
942  * does not exist and done all appropriate namespace locking.
943  */
944 static
945 int
946 hammer_vop_ncreate(struct vop_ncreate_args *ap)
947 {
948         struct hammer_transaction trans;
949         struct hammer_inode *dip;
950         struct hammer_inode *nip;
951         struct nchandle *nch;
952         hammer_mount_t hmp;
953         int error;
954
955         nch = ap->a_nch;
956         dip = VTOI(ap->a_dvp);
957         hmp = dip->hmp;
958
959         if (dip->flags & HAMMER_INODE_RO)
960                 return (EROFS);
961         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
962                 return (error);
963
964         /*
965          * Create a transaction to cover the operations we perform.
966          */
967         lwkt_gettoken(&hmp->fs_token);
968         hammer_start_transaction(&trans, hmp);
969         ++hammer_stats_file_iopsw;
970
971         /*
972          * Create a new filesystem object of the requested type.  The
973          * returned inode will be referenced and shared-locked to prevent
974          * it from being moved to the flusher.
975          */
976         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
977                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
978                                     NULL, &nip);
979         if (error) {
980                 hkprintf("hammer_create_inode error %d\n", error);
981                 hammer_done_transaction(&trans);
982                 *ap->a_vpp = NULL;
983                 lwkt_reltoken(&hmp->fs_token);
984                 return (error);
985         }
986
987         /*
988          * Add the new filesystem object to the directory.  This will also
989          * bump the inode's link count.
990          */
991         error = hammer_ip_add_directory(&trans, dip,
992                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
993                                         nip);
994         if (error)
995                 hkprintf("hammer_ip_add_directory error %d\n", error);
996
997         /*
998          * Finish up.
999          */
1000         if (error) {
1001                 hammer_rel_inode(nip, 0);
1002                 hammer_done_transaction(&trans);
1003                 *ap->a_vpp = NULL;
1004         } else {
1005                 error = hammer_get_vnode(nip, ap->a_vpp);
1006                 hammer_done_transaction(&trans);
1007                 hammer_rel_inode(nip, 0);
1008                 if (error == 0) {
1009                         cache_setunresolved(ap->a_nch);
1010                         cache_setvp(ap->a_nch, *ap->a_vpp);
1011                 }
1012                 hammer_knote(ap->a_dvp, NOTE_WRITE);
1013         }
1014         lwkt_reltoken(&hmp->fs_token);
1015         return (error);
1016 }
1017
1018 /*
1019  * hammer_vop_getattr { vp, vap }
1020  *
1021  * Retrieve an inode's attribute information.  When accessing inodes
1022  * historically we fake the atime field to ensure consistent results.
1023  * The atime field is stored in the B-Tree element and allowed to be
1024  * updated without cycling the element.
1025  *
1026  * MPSAFE - does not require fs_token
1027  */
1028 static
1029 int
1030 hammer_vop_getattr(struct vop_getattr_args *ap)
1031 {
1032         struct hammer_inode *ip = VTOI(ap->a_vp);
1033         struct vattr *vap = ap->a_vap;
1034
1035         /*
1036          * We want the fsid to be different when accessing a filesystem
1037          * with different as-of's so programs like diff don't think
1038          * the files are the same.
1039          *
1040          * We also want the fsid to be the same when comparing snapshots,
1041          * or when comparing mirrors (which might be backed by different
1042          * physical devices).  HAMMER fsids are based on the PFS's
1043          * shared_uuid field.
1044          *
1045          * XXX there is a chance of collision here.  The va_fsid reported
1046          * by stat is different from the more involved fsid used in the
1047          * mount structure.
1048          */
1049         ++hammer_stats_file_iopsr;
1050         hammer_lock_sh(&ip->lock);
1051         vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
1052                        (u_int32_t)(ip->obj_asof >> 32);
1053
1054         vap->va_fileid = ip->ino_leaf.base.obj_id;
1055         vap->va_mode = ip->ino_data.mode;
1056         vap->va_nlink = ip->ino_data.nlinks;
1057         vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1058         vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1059         vap->va_rmajor = 0;
1060         vap->va_rminor = 0;
1061         vap->va_size = ip->ino_data.size;
1062
1063         /*
1064          * Special case for @@PFS softlinks.  The actual size of the
1065          * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1066          * or for MAX_TID is    "@@-1:%05d" == 10 bytes.
1067          */
1068         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
1069             ip->ino_data.size == 10 &&
1070             ip->obj_asof == HAMMER_MAX_TID &&
1071             ip->obj_localization == 0 &&
1072             strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
1073                     if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
1074                             vap->va_size = 26;
1075                     else
1076                             vap->va_size = 10;
1077         }
1078
1079         /*
1080          * We must provide a consistent atime and mtime for snapshots
1081          * so people can do a 'tar cf - ... | md5' on them and get
1082          * consistent results.
1083          */
1084         if (ip->flags & HAMMER_INODE_RO) {
1085                 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
1086                 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
1087         } else {
1088                 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
1089                 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
1090         }
1091         hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
1092         vap->va_flags = ip->ino_data.uflags;
1093         vap->va_gen = 1;        /* hammer inums are unique for all time */
1094         vap->va_blocksize = HAMMER_BUFSIZE;
1095         if (ip->ino_data.size >= HAMMER_XDEMARC) {
1096                 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
1097                                 ~HAMMER_XBUFMASK64;
1098         } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
1099                 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
1100                                 ~HAMMER_BUFMASK64;
1101         } else {
1102                 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
1103         }
1104
1105         vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
1106         vap->va_filerev = 0;    /* XXX */
1107         vap->va_uid_uuid = ip->ino_data.uid;
1108         vap->va_gid_uuid = ip->ino_data.gid;
1109         vap->va_fsid_uuid = ip->hmp->fsid;
1110         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
1111                           VA_FSID_UUID_VALID;
1112
1113         switch (ip->ino_data.obj_type) {
1114         case HAMMER_OBJTYPE_CDEV:
1115         case HAMMER_OBJTYPE_BDEV:
1116                 vap->va_rmajor = ip->ino_data.rmajor;
1117                 vap->va_rminor = ip->ino_data.rminor;
1118                 break;
1119         default:
1120                 break;
1121         }
1122         hammer_unlock(&ip->lock);
1123         return(0);
1124 }
1125
1126 /*
1127  * hammer_vop_nresolve { nch, dvp, cred }
1128  *
1129  * Locate the requested directory entry.
1130  */
1131 static
1132 int
1133 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1134 {
1135         struct hammer_transaction trans;
1136         struct namecache *ncp;
1137         hammer_mount_t hmp;
1138         hammer_inode_t dip;
1139         hammer_inode_t ip;
1140         hammer_tid_t asof;
1141         struct hammer_cursor cursor;
1142         struct vnode *vp;
1143         int64_t namekey;
1144         int error;
1145         int i;
1146         int nlen;
1147         int flags;
1148         int ispfs;
1149         int64_t obj_id;
1150         u_int32_t localization;
1151         u_int32_t max_iterations;
1152
1153         /*
1154          * Misc initialization, plus handle as-of name extensions.  Look for
1155          * the '@@' extension.  Note that as-of files and directories cannot
1156          * be modified.
1157          */
1158         dip = VTOI(ap->a_dvp);
1159         ncp = ap->a_nch->ncp;
1160         asof = dip->obj_asof;
1161         localization = dip->obj_localization;   /* for code consistency */
1162         nlen = ncp->nc_nlen;
1163         flags = dip->flags & HAMMER_INODE_RO;
1164         ispfs = 0;
1165         hmp = dip->hmp;
1166
1167         lwkt_gettoken(&hmp->fs_token);
1168         hammer_simple_transaction(&trans, hmp);
1169         ++hammer_stats_file_iopsr;
1170
1171         for (i = 0; i < nlen; ++i) {
1172                 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1173                         error = hammer_str_to_tid(ncp->nc_name + i + 2,
1174                                                   &ispfs, &asof, &localization);
1175                         if (error != 0) {
1176                                 i = nlen;
1177                                 break;
1178                         }
1179                         if (asof != HAMMER_MAX_TID)
1180                                 flags |= HAMMER_INODE_RO;
1181                         break;
1182                 }
1183         }
1184         nlen = i;
1185
1186         /*
1187          * If this is a PFS softlink we dive into the PFS
1188          */
1189         if (ispfs && nlen == 0) {
1190                 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1191                                       asof, localization,
1192                                       flags, &error);
1193                 if (error == 0) {
1194                         error = hammer_get_vnode(ip, &vp);
1195                         hammer_rel_inode(ip, 0);
1196                 } else {
1197                         vp = NULL;
1198                 }
1199                 if (error == 0) {
1200                         vn_unlock(vp);
1201                         cache_setvp(ap->a_nch, vp);
1202                         vrele(vp);
1203                 }
1204                 goto done;
1205         }
1206
1207         /*
1208          * If there is no path component the time extension is relative to dip.
1209          * e.g. "fubar/@@<snapshot>"
1210          *
1211          * "." is handled by the kernel, but ".@@<snapshot>" is not.
1212          * e.g. "fubar/.@@<snapshot>"
1213          *
1214          * ".." is handled by the kernel.  We do not currently handle
1215          * "..@<snapshot>".
1216          */
1217         if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1218                 ip = hammer_get_inode(&trans, dip, dip->obj_id,
1219                                       asof, dip->obj_localization,
1220                                       flags, &error);
1221                 if (error == 0) {
1222                         error = hammer_get_vnode(ip, &vp);
1223                         hammer_rel_inode(ip, 0);
1224                 } else {
1225                         vp = NULL;
1226                 }
1227                 if (error == 0) {
1228                         vn_unlock(vp);
1229                         cache_setvp(ap->a_nch, vp);
1230                         vrele(vp);
1231                 }
1232                 goto done;
1233         }
1234
1235         /*
1236          * Calculate the namekey and setup the key range for the scan.  This
1237          * works kinda like a chained hash table where the lower 32 bits
1238          * of the namekey synthesize the chain.
1239          *
1240          * The key range is inclusive of both key_beg and key_end.
1241          */
1242         namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1243                                            &max_iterations);
1244
1245         error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1246         cursor.key_beg.localization = dip->obj_localization +
1247                                       hammer_dir_localization(dip);
1248         cursor.key_beg.obj_id = dip->obj_id;
1249         cursor.key_beg.key = namekey;
1250         cursor.key_beg.create_tid = 0;
1251         cursor.key_beg.delete_tid = 0;
1252         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1253         cursor.key_beg.obj_type = 0;
1254
1255         cursor.key_end = cursor.key_beg;
1256         cursor.key_end.key += max_iterations;
1257         cursor.asof = asof;
1258         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1259
1260         /*
1261          * Scan all matching records (the chain), locate the one matching
1262          * the requested path component.
1263          *
1264          * The hammer_ip_*() functions merge in-memory records with on-disk
1265          * records for the purposes of the search.
1266          */
1267         obj_id = 0;
1268         localization = HAMMER_DEF_LOCALIZATION;
1269
1270         if (error == 0) {
1271                 error = hammer_ip_first(&cursor);
1272                 while (error == 0) {
1273                         error = hammer_ip_resolve_data(&cursor);
1274                         if (error)
1275                                 break;
1276                         if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1277                             bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1278                                 obj_id = cursor.data->entry.obj_id;
1279                                 localization = cursor.data->entry.localization;
1280                                 break;
1281                         }
1282                         error = hammer_ip_next(&cursor);
1283                 }
1284         }
1285         hammer_done_cursor(&cursor);
1286
1287         /*
1288          * Lookup the obj_id.  This should always succeed.  If it does not
1289          * the filesystem may be damaged and we return a dummy inode.
1290          */
1291         if (error == 0) {
1292                 ip = hammer_get_inode(&trans, dip, obj_id,
1293                                       asof, localization,
1294                                       flags, &error);
1295                 if (error == ENOENT) {
1296                         kprintf("HAMMER: WARNING: Missing "
1297                                 "inode for dirent \"%s\"\n"
1298                                 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1299                                 ncp->nc_name,
1300                                 (long long)obj_id, (long long)asof,
1301                                 localization);
1302                         error = 0;
1303                         ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1304                                                     asof, localization,
1305                                                     flags, &error);
1306                 }
1307                 if (error == 0) {
1308                         error = hammer_get_vnode(ip, &vp);
1309                         hammer_rel_inode(ip, 0);
1310                 } else {
1311                         vp = NULL;
1312                 }
1313                 if (error == 0) {
1314                         vn_unlock(vp);
1315                         cache_setvp(ap->a_nch, vp);
1316                         vrele(vp);
1317                 }
1318         } else if (error == ENOENT) {
1319                 cache_setvp(ap->a_nch, NULL);
1320         }
1321 done:
1322         hammer_done_transaction(&trans);
1323         lwkt_reltoken(&hmp->fs_token);
1324         return (error);
1325 }
1326
1327 /*
1328  * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1329  *
1330  * Locate the parent directory of a directory vnode.
1331  *
1332  * dvp is referenced but not locked.  *vpp must be returned referenced and
1333  * locked.  A parent_obj_id of 0 does not necessarily indicate that we are
1334  * at the root, instead it could indicate that the directory we were in was
1335  * removed.
1336  *
1337  * NOTE: as-of sequences are not linked into the directory structure.  If
1338  * we are at the root with a different asof then the mount point, reload
1339  * the same directory with the mount point's asof.   I'm not sure what this
1340  * will do to NFS.  We encode ASOF stamps in NFS file handles so it might not
1341  * get confused, but it hasn't been tested.
1342  */
1343 static
1344 int
1345 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1346 {
1347         struct hammer_transaction trans;
1348         struct hammer_inode *dip;
1349         struct hammer_inode *ip;
1350         hammer_mount_t hmp;
1351         int64_t parent_obj_id;
1352         u_int32_t parent_obj_localization;
1353         hammer_tid_t asof;
1354         int error;
1355
1356         dip = VTOI(ap->a_dvp);
1357         asof = dip->obj_asof;
1358         hmp = dip->hmp;
1359
1360         /*
1361          * Whos are parent?  This could be the root of a pseudo-filesystem
1362          * whos parent is in another localization domain.
1363          */
1364         lwkt_gettoken(&hmp->fs_token);
1365         parent_obj_id = dip->ino_data.parent_obj_id;
1366         if (dip->obj_id == HAMMER_OBJID_ROOT)
1367                 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
1368         else
1369                 parent_obj_localization = dip->obj_localization;
1370
1371         if (parent_obj_id == 0) {
1372                 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1373                    asof != hmp->asof) {
1374                         parent_obj_id = dip->obj_id;
1375                         asof = hmp->asof;
1376                         *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1377                         ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1378                                   (long long)dip->obj_asof);
1379                 } else {
1380                         *ap->a_vpp = NULL;
1381                         lwkt_reltoken(&hmp->fs_token);
1382                         return ENOENT;
1383                 }
1384         }
1385
1386         hammer_simple_transaction(&trans, hmp);
1387         ++hammer_stats_file_iopsr;
1388
1389         ip = hammer_get_inode(&trans, dip, parent_obj_id,
1390                               asof, parent_obj_localization,
1391                               dip->flags, &error);
1392         if (ip) {
1393                 error = hammer_get_vnode(ip, ap->a_vpp);
1394                 hammer_rel_inode(ip, 0);
1395         } else {
1396                 *ap->a_vpp = NULL;
1397         }
1398         hammer_done_transaction(&trans);
1399         lwkt_reltoken(&hmp->fs_token);
1400         return (error);
1401 }
1402
1403 /*
1404  * hammer_vop_nlink { nch, dvp, vp, cred }
1405  */
1406 static
1407 int
1408 hammer_vop_nlink(struct vop_nlink_args *ap)
1409 {
1410         struct hammer_transaction trans;
1411         struct hammer_inode *dip;
1412         struct hammer_inode *ip;
1413         struct nchandle *nch;
1414         hammer_mount_t hmp;
1415         int error;
1416
1417         if (ap->a_dvp->v_mount != ap->a_vp->v_mount)    
1418                 return(EXDEV);
1419
1420         nch = ap->a_nch;
1421         dip = VTOI(ap->a_dvp);
1422         ip = VTOI(ap->a_vp);
1423         hmp = dip->hmp;
1424
1425         if (dip->obj_localization != ip->obj_localization)
1426                 return(EXDEV);
1427
1428         if (dip->flags & HAMMER_INODE_RO)
1429                 return (EROFS);
1430         if (ip->flags & HAMMER_INODE_RO)
1431                 return (EROFS);
1432         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1433                 return (error);
1434
1435         /*
1436          * Create a transaction to cover the operations we perform.
1437          */
1438         lwkt_gettoken(&hmp->fs_token);
1439         hammer_start_transaction(&trans, hmp);
1440         ++hammer_stats_file_iopsw;
1441
1442         /*
1443          * Add the filesystem object to the directory.  Note that neither
1444          * dip nor ip are referenced or locked, but their vnodes are
1445          * referenced.  This function will bump the inode's link count.
1446          */
1447         error = hammer_ip_add_directory(&trans, dip,
1448                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1449                                         ip);
1450
1451         /*
1452          * Finish up.
1453          */
1454         if (error == 0) {
1455                 cache_setunresolved(nch);
1456                 cache_setvp(nch, ap->a_vp);
1457         }
1458         hammer_done_transaction(&trans);
1459         hammer_knote(ap->a_vp, NOTE_LINK);
1460         hammer_knote(ap->a_dvp, NOTE_WRITE);
1461         lwkt_reltoken(&hmp->fs_token);
1462         return (error);
1463 }
1464
1465 /*
1466  * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1467  *
1468  * The operating system has already ensured that the directory entry
1469  * does not exist and done all appropriate namespace locking.
1470  */
1471 static
1472 int
1473 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1474 {
1475         struct hammer_transaction trans;
1476         struct hammer_inode *dip;
1477         struct hammer_inode *nip;
1478         struct nchandle *nch;
1479         hammer_mount_t hmp;
1480         int error;
1481
1482         nch = ap->a_nch;
1483         dip = VTOI(ap->a_dvp);
1484         hmp = dip->hmp;
1485
1486         if (dip->flags & HAMMER_INODE_RO)
1487                 return (EROFS);
1488         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1489                 return (error);
1490
1491         /*
1492          * Create a transaction to cover the operations we perform.
1493          */
1494         lwkt_gettoken(&hmp->fs_token);
1495         hammer_start_transaction(&trans, hmp);
1496         ++hammer_stats_file_iopsw;
1497
1498         /*
1499          * Create a new filesystem object of the requested type.  The
1500          * returned inode will be referenced but not locked.
1501          */
1502         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1503                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1504                                     NULL, &nip);
1505         if (error) {
1506                 hkprintf("hammer_mkdir error %d\n", error);
1507                 hammer_done_transaction(&trans);
1508                 *ap->a_vpp = NULL;
1509                 lwkt_reltoken(&hmp->fs_token);
1510                 return (error);
1511         }
1512         /*
1513          * Add the new filesystem object to the directory.  This will also
1514          * bump the inode's link count.
1515          */
1516         error = hammer_ip_add_directory(&trans, dip,
1517                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1518                                         nip);
1519         if (error)
1520                 hkprintf("hammer_mkdir (add) error %d\n", error);
1521
1522         /*
1523          * Finish up.
1524          */
1525         if (error) {
1526                 hammer_rel_inode(nip, 0);
1527                 *ap->a_vpp = NULL;
1528         } else {
1529                 error = hammer_get_vnode(nip, ap->a_vpp);
1530                 hammer_rel_inode(nip, 0);
1531                 if (error == 0) {
1532                         cache_setunresolved(ap->a_nch);
1533                         cache_setvp(ap->a_nch, *ap->a_vpp);
1534                 }
1535         }
1536         hammer_done_transaction(&trans);
1537         if (error == 0)
1538                 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1539         lwkt_reltoken(&hmp->fs_token);
1540         return (error);
1541 }
1542
1543 /*
1544  * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1545  *
1546  * The operating system has already ensured that the directory entry
1547  * does not exist and done all appropriate namespace locking.
1548  */
1549 static
1550 int
1551 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1552 {
1553         struct hammer_transaction trans;
1554         struct hammer_inode *dip;
1555         struct hammer_inode *nip;
1556         struct nchandle *nch;
1557         hammer_mount_t hmp;
1558         int error;
1559
1560         nch = ap->a_nch;
1561         dip = VTOI(ap->a_dvp);
1562         hmp = dip->hmp;
1563
1564         if (dip->flags & HAMMER_INODE_RO)
1565                 return (EROFS);
1566         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1567                 return (error);
1568
1569         /*
1570          * Create a transaction to cover the operations we perform.
1571          */
1572         lwkt_gettoken(&hmp->fs_token);
1573         hammer_start_transaction(&trans, hmp);
1574         ++hammer_stats_file_iopsw;
1575
1576         /*
1577          * Create a new filesystem object of the requested type.  The
1578          * returned inode will be referenced but not locked.
1579          *
1580          * If mknod specifies a directory a pseudo-fs is created.
1581          */
1582         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1583                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1584                                     NULL, &nip);
1585         if (error) {
1586                 hammer_done_transaction(&trans);
1587                 *ap->a_vpp = NULL;
1588                 lwkt_reltoken(&hmp->fs_token);
1589                 return (error);
1590         }
1591
1592         /*
1593          * Add the new filesystem object to the directory.  This will also
1594          * bump the inode's link count.
1595          */
1596         error = hammer_ip_add_directory(&trans, dip,
1597                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1598                                         nip);
1599
1600         /*
1601          * Finish up.
1602          */
1603         if (error) {
1604                 hammer_rel_inode(nip, 0);
1605                 *ap->a_vpp = NULL;
1606         } else {
1607                 error = hammer_get_vnode(nip, ap->a_vpp);
1608                 hammer_rel_inode(nip, 0);
1609                 if (error == 0) {
1610                         cache_setunresolved(ap->a_nch);
1611                         cache_setvp(ap->a_nch, *ap->a_vpp);
1612                 }
1613         }
1614         hammer_done_transaction(&trans);
1615         if (error == 0)
1616                 hammer_knote(ap->a_dvp, NOTE_WRITE);
1617         lwkt_reltoken(&hmp->fs_token);
1618         return (error);
1619 }
1620
1621 /*
1622  * hammer_vop_open { vp, mode, cred, fp }
1623  *
1624  * MPSAFE (does not require fs_token)
1625  */
1626 static
1627 int
1628 hammer_vop_open(struct vop_open_args *ap)
1629 {
1630         hammer_inode_t ip;
1631
1632         ++hammer_stats_file_iopsr;
1633         ip = VTOI(ap->a_vp);
1634
1635         if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1636                 return (EROFS);
1637         return(vop_stdopen(ap));
1638 }
1639
1640 /*
1641  * hammer_vop_print { vp }
1642  */
1643 static
1644 int
1645 hammer_vop_print(struct vop_print_args *ap)
1646 {
1647         return EOPNOTSUPP;
1648 }
1649
1650 /*
1651  * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1652  */
1653 static
1654 int
1655 hammer_vop_readdir(struct vop_readdir_args *ap)
1656 {
1657         struct hammer_transaction trans;
1658         struct hammer_cursor cursor;
1659         struct hammer_inode *ip;
1660         hammer_mount_t hmp;
1661         struct uio *uio;
1662         hammer_base_elm_t base;
1663         int error;
1664         int cookie_index;
1665         int ncookies;
1666         off_t *cookies;
1667         off_t saveoff;
1668         int r;
1669         int dtype;
1670
1671         ++hammer_stats_file_iopsr;
1672         ip = VTOI(ap->a_vp);
1673         uio = ap->a_uio;
1674         saveoff = uio->uio_offset;
1675         hmp = ip->hmp;
1676
1677         if (ap->a_ncookies) {
1678                 ncookies = uio->uio_resid / 16 + 1;
1679                 if (ncookies > 1024)
1680                         ncookies = 1024;
1681                 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1682                 cookie_index = 0;
1683         } else {
1684                 ncookies = -1;
1685                 cookies = NULL;
1686                 cookie_index = 0;
1687         }
1688
1689         lwkt_gettoken(&hmp->fs_token);
1690         hammer_simple_transaction(&trans, hmp);
1691
1692         /*
1693          * Handle artificial entries
1694          *
1695          * It should be noted that the minimum value for a directory
1696          * hash key on-media is 0x0000000100000000, so we can use anything
1697          * less then that to represent our 'special' key space.
1698          */
1699         error = 0;
1700         if (saveoff == 0) {
1701                 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1702                 if (r)
1703                         goto done;
1704                 if (cookies)
1705                         cookies[cookie_index] = saveoff;
1706                 ++saveoff;
1707                 ++cookie_index;
1708                 if (cookie_index == ncookies)
1709                         goto done;
1710         }
1711         if (saveoff == 1) {
1712                 if (ip->ino_data.parent_obj_id) {
1713                         r = vop_write_dirent(&error, uio,
1714                                              ip->ino_data.parent_obj_id,
1715                                              DT_DIR, 2, "..");
1716                 } else {
1717                         r = vop_write_dirent(&error, uio,
1718                                              ip->obj_id, DT_DIR, 2, "..");
1719                 }
1720                 if (r)
1721                         goto done;
1722                 if (cookies)
1723                         cookies[cookie_index] = saveoff;
1724                 ++saveoff;
1725                 ++cookie_index;
1726                 if (cookie_index == ncookies)
1727                         goto done;
1728         }
1729
1730         /*
1731          * Key range (begin and end inclusive) to scan.  Directory keys
1732          * directly translate to a 64 bit 'seek' position.
1733          */
1734         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1735         cursor.key_beg.localization = ip->obj_localization +
1736                                       hammer_dir_localization(ip);
1737         cursor.key_beg.obj_id = ip->obj_id;
1738         cursor.key_beg.create_tid = 0;
1739         cursor.key_beg.delete_tid = 0;
1740         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1741         cursor.key_beg.obj_type = 0;
1742         cursor.key_beg.key = saveoff;
1743
1744         cursor.key_end = cursor.key_beg;
1745         cursor.key_end.key = HAMMER_MAX_KEY;
1746         cursor.asof = ip->obj_asof;
1747         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1748
1749         error = hammer_ip_first(&cursor);
1750
1751         while (error == 0) {
1752                 error = hammer_ip_resolve_data(&cursor);
1753                 if (error)
1754                         break;
1755                 base = &cursor.leaf->base;
1756                 saveoff = base->key;
1757                 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1758
1759                 if (base->obj_id != ip->obj_id)
1760                         panic("readdir: bad record at %p", cursor.node);
1761
1762                 /*
1763                  * Convert pseudo-filesystems into softlinks
1764                  */
1765                 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1766                 r = vop_write_dirent(
1767                              &error, uio, cursor.data->entry.obj_id,
1768                              dtype,
1769                              cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1770                              (void *)cursor.data->entry.name);
1771                 if (r)
1772                         break;
1773                 ++saveoff;
1774                 if (cookies)
1775                         cookies[cookie_index] = base->key;
1776                 ++cookie_index;
1777                 if (cookie_index == ncookies)
1778                         break;
1779                 error = hammer_ip_next(&cursor);
1780         }
1781         hammer_done_cursor(&cursor);
1782
1783 done:
1784         hammer_done_transaction(&trans);
1785
1786         if (ap->a_eofflag)
1787                 *ap->a_eofflag = (error == ENOENT);
1788         uio->uio_offset = saveoff;
1789         if (error && cookie_index == 0) {
1790                 if (error == ENOENT)
1791                         error = 0;
1792                 if (cookies) {
1793                         kfree(cookies, M_TEMP);
1794                         *ap->a_ncookies = 0;
1795                         *ap->a_cookies = NULL;
1796                 }
1797         } else {
1798                 if (error == ENOENT)
1799                         error = 0;
1800                 if (cookies) {
1801                         *ap->a_ncookies = cookie_index;
1802                         *ap->a_cookies = cookies;
1803                 }
1804         }
1805         lwkt_reltoken(&hmp->fs_token);
1806         return(error);
1807 }
1808
1809 /*
1810  * hammer_vop_readlink { vp, uio, cred }
1811  */
1812 static
1813 int
1814 hammer_vop_readlink(struct vop_readlink_args *ap)
1815 {
1816         struct hammer_transaction trans;
1817         struct hammer_cursor cursor;
1818         struct hammer_inode *ip;
1819         hammer_mount_t hmp;
1820         char buf[32];
1821         u_int32_t localization;
1822         hammer_pseudofs_inmem_t pfsm;
1823         int error;
1824
1825         ip = VTOI(ap->a_vp);
1826         hmp = ip->hmp;
1827
1828         lwkt_gettoken(&hmp->fs_token);
1829
1830         /*
1831          * Shortcut if the symlink data was stuffed into ino_data.
1832          *
1833          * Also expand special "@@PFS%05d" softlinks (expansion only
1834          * occurs for non-historical (current) accesses made from the
1835          * primary filesystem).
1836          */
1837         if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1838                 char *ptr;
1839                 int bytes;
1840
1841                 ptr = ip->ino_data.ext.symlink;
1842                 bytes = (int)ip->ino_data.size;
1843                 if (bytes == 10 &&
1844                     ip->obj_asof == HAMMER_MAX_TID &&
1845                     ip->obj_localization == 0 &&
1846                     strncmp(ptr, "@@PFS", 5) == 0) {
1847                         hammer_simple_transaction(&trans, hmp);
1848                         bcopy(ptr + 5, buf, 5);
1849                         buf[5] = 0;
1850                         localization = strtoul(buf, NULL, 10) << 16;
1851                         pfsm = hammer_load_pseudofs(&trans, localization,
1852                                                     &error);
1853                         if (error == 0) {
1854                                 if (pfsm->pfsd.mirror_flags &
1855                                     HAMMER_PFSD_SLAVE) {
1856                                         /* vap->va_size == 26 */
1857                                         ksnprintf(buf, sizeof(buf),
1858                                                   "@@0x%016llx:%05d",
1859                                                   (long long)pfsm->pfsd.sync_end_tid,
1860                                                   localization >> 16);
1861                                 } else {
1862                                         /* vap->va_size == 10 */
1863                                         ksnprintf(buf, sizeof(buf),
1864                                                   "@@-1:%05d",
1865                                                   localization >> 16);
1866 #if 0
1867                                         ksnprintf(buf, sizeof(buf),
1868                                                   "@@0x%016llx:%05d",
1869                                                   (long long)HAMMER_MAX_TID,
1870                                                   localization >> 16);
1871 #endif
1872                                 }
1873                                 ptr = buf;
1874                                 bytes = strlen(buf);
1875                         }
1876                         if (pfsm)
1877                                 hammer_rel_pseudofs(hmp, pfsm);
1878                         hammer_done_transaction(&trans);
1879                 }
1880                 error = uiomove(ptr, bytes, ap->a_uio);
1881                 lwkt_reltoken(&hmp->fs_token);
1882                 return(error);
1883         }
1884
1885         /*
1886          * Long version
1887          */
1888         hammer_simple_transaction(&trans, hmp);
1889         ++hammer_stats_file_iopsr;
1890         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1891
1892         /*
1893          * Key range (begin and end inclusive) to scan.  Directory keys
1894          * directly translate to a 64 bit 'seek' position.
1895          */
1896         cursor.key_beg.localization = ip->obj_localization +
1897                                       HAMMER_LOCALIZE_MISC;
1898         cursor.key_beg.obj_id = ip->obj_id;
1899         cursor.key_beg.create_tid = 0;
1900         cursor.key_beg.delete_tid = 0;
1901         cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1902         cursor.key_beg.obj_type = 0;
1903         cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1904         cursor.asof = ip->obj_asof;
1905         cursor.flags |= HAMMER_CURSOR_ASOF;
1906
1907         error = hammer_ip_lookup(&cursor);
1908         if (error == 0) {
1909                 error = hammer_ip_resolve_data(&cursor);
1910                 if (error == 0) {
1911                         KKASSERT(cursor.leaf->data_len >=
1912                                  HAMMER_SYMLINK_NAME_OFF);
1913                         error = uiomove(cursor.data->symlink.name,
1914                                         cursor.leaf->data_len -
1915                                                 HAMMER_SYMLINK_NAME_OFF,
1916                                         ap->a_uio);
1917                 }
1918         }
1919         hammer_done_cursor(&cursor);
1920         hammer_done_transaction(&trans);
1921         lwkt_reltoken(&hmp->fs_token);
1922         return(error);
1923 }
1924
1925 /*
1926  * hammer_vop_nremove { nch, dvp, cred }
1927  */
1928 static
1929 int
1930 hammer_vop_nremove(struct vop_nremove_args *ap)
1931 {
1932         struct hammer_transaction trans;
1933         struct hammer_inode *dip;
1934         hammer_mount_t hmp;
1935         int error;
1936
1937         dip = VTOI(ap->a_dvp);
1938         hmp = dip->hmp;
1939
1940         if (hammer_nohistory(dip) == 0 &&
1941             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1942                 return (error);
1943         }
1944
1945         lwkt_gettoken(&hmp->fs_token);
1946         hammer_start_transaction(&trans, hmp);
1947         ++hammer_stats_file_iopsw;
1948         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1949         hammer_done_transaction(&trans);
1950         if (error == 0)
1951                 hammer_knote(ap->a_dvp, NOTE_WRITE);
1952         lwkt_reltoken(&hmp->fs_token);
1953         return (error);
1954 }
1955
1956 /*
1957  * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1958  */
1959 static
1960 int
1961 hammer_vop_nrename(struct vop_nrename_args *ap)
1962 {
1963         struct hammer_transaction trans;
1964         struct namecache *fncp;
1965         struct namecache *tncp;
1966         struct hammer_inode *fdip;
1967         struct hammer_inode *tdip;
1968         struct hammer_inode *ip;
1969         hammer_mount_t hmp;
1970         struct hammer_cursor cursor;
1971         int64_t namekey;
1972         u_int32_t max_iterations;
1973         int nlen, error;
1974
1975         if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 
1976                 return(EXDEV);
1977         if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1978                 return(EXDEV);
1979
1980         fdip = VTOI(ap->a_fdvp);
1981         tdip = VTOI(ap->a_tdvp);
1982         fncp = ap->a_fnch->ncp;
1983         tncp = ap->a_tnch->ncp;
1984         ip = VTOI(fncp->nc_vp);
1985         KKASSERT(ip != NULL);
1986
1987         hmp = ip->hmp;
1988
1989         if (fdip->obj_localization != tdip->obj_localization)
1990                 return(EXDEV);
1991         if (fdip->obj_localization != ip->obj_localization)
1992                 return(EXDEV);
1993
1994         if (fdip->flags & HAMMER_INODE_RO)
1995                 return (EROFS);
1996         if (tdip->flags & HAMMER_INODE_RO)
1997                 return (EROFS);
1998         if (ip->flags & HAMMER_INODE_RO)
1999                 return (EROFS);
2000         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
2001                 return (error);
2002
2003         lwkt_gettoken(&hmp->fs_token);
2004         hammer_start_transaction(&trans, hmp);
2005         ++hammer_stats_file_iopsw;
2006
2007         /*
2008          * Remove tncp from the target directory and then link ip as
2009          * tncp. XXX pass trans to dounlink
2010          *
2011          * Force the inode sync-time to match the transaction so it is
2012          * in-sync with the creation of the target directory entry.
2013          */
2014         error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
2015                                 ap->a_cred, 0, -1);
2016         if (error == 0 || error == ENOENT) {
2017                 error = hammer_ip_add_directory(&trans, tdip,
2018                                                 tncp->nc_name, tncp->nc_nlen,
2019                                                 ip);
2020                 if (error == 0) {
2021                         ip->ino_data.parent_obj_id = tdip->obj_id;
2022                         ip->ino_data.ctime = trans.time;
2023                         hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
2024                 }
2025         }
2026         if (error)
2027                 goto failed; /* XXX */
2028
2029         /*
2030          * Locate the record in the originating directory and remove it.
2031          *
2032          * Calculate the namekey and setup the key range for the scan.  This
2033          * works kinda like a chained hash table where the lower 32 bits
2034          * of the namekey synthesize the chain.
2035          *
2036          * The key range is inclusive of both key_beg and key_end.
2037          */
2038         namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
2039                                            &max_iterations);
2040 retry:
2041         hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
2042         cursor.key_beg.localization = fdip->obj_localization +
2043                                       hammer_dir_localization(fdip);
2044         cursor.key_beg.obj_id = fdip->obj_id;
2045         cursor.key_beg.key = namekey;
2046         cursor.key_beg.create_tid = 0;
2047         cursor.key_beg.delete_tid = 0;
2048         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2049         cursor.key_beg.obj_type = 0;
2050
2051         cursor.key_end = cursor.key_beg;
2052         cursor.key_end.key += max_iterations;
2053         cursor.asof = fdip->obj_asof;
2054         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2055
2056         /*
2057          * Scan all matching records (the chain), locate the one matching
2058          * the requested path component.
2059          *
2060          * The hammer_ip_*() functions merge in-memory records with on-disk
2061          * records for the purposes of the search.
2062          */
2063         error = hammer_ip_first(&cursor);
2064         while (error == 0) {
2065                 if (hammer_ip_resolve_data(&cursor) != 0)
2066                         break;
2067                 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2068                 KKASSERT(nlen > 0);
2069                 if (fncp->nc_nlen == nlen &&
2070                     bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2071                         break;
2072                 }
2073                 error = hammer_ip_next(&cursor);
2074         }
2075
2076         /*
2077          * If all is ok we have to get the inode so we can adjust nlinks.
2078          *
2079          * WARNING: hammer_ip_del_directory() may have to terminate the
2080          * cursor to avoid a recursion.  It's ok to call hammer_done_cursor()
2081          * twice.
2082          */
2083         if (error == 0)
2084                 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
2085
2086         /*
2087          * XXX A deadlock here will break rename's atomicy for the purposes
2088          * of crash recovery.
2089          */
2090         if (error == EDEADLK) {
2091                 hammer_done_cursor(&cursor);
2092                 goto retry;
2093         }
2094
2095         /*
2096          * Cleanup and tell the kernel that the rename succeeded.
2097          *
2098          * NOTE: ip->vp, if non-NULL, cannot be directly referenced
2099          *       without formally acquiring the vp since the vp might
2100          *       have zero refs on it, or in the middle of a reclaim,
2101          *       etc.
2102          */
2103         hammer_done_cursor(&cursor);
2104         if (error == 0) {
2105                 cache_rename(ap->a_fnch, ap->a_tnch);
2106                 hammer_knote(ap->a_fdvp, NOTE_WRITE);
2107                 hammer_knote(ap->a_tdvp, NOTE_WRITE);
2108                 while (ip->vp) {
2109                         struct vnode *vp;
2110
2111                         error = hammer_get_vnode(ip, &vp);
2112                         if (error == 0 && vp) {
2113                                 vn_unlock(vp);
2114                                 hammer_knote(ip->vp, NOTE_RENAME);
2115                                 vrele(vp);
2116                                 break;
2117                         }
2118                         kprintf("Debug: HAMMER ip/vp race2 avoided\n");
2119                 }
2120         }
2121
2122 failed:
2123         hammer_done_transaction(&trans);
2124         lwkt_reltoken(&hmp->fs_token);
2125         return (error);
2126 }
2127
2128 /*
2129  * hammer_vop_nrmdir { nch, dvp, cred }
2130  */
2131 static
2132 int
2133 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
2134 {
2135         struct hammer_transaction trans;
2136         struct hammer_inode *dip;
2137         hammer_mount_t hmp;
2138         int error;
2139
2140         dip = VTOI(ap->a_dvp);
2141         hmp = dip->hmp;
2142
2143         if (hammer_nohistory(dip) == 0 &&
2144             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2145                 return (error);
2146         }
2147
2148         lwkt_gettoken(&hmp->fs_token);
2149         hammer_start_transaction(&trans, hmp);
2150         ++hammer_stats_file_iopsw;
2151         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
2152         hammer_done_transaction(&trans);
2153         if (error == 0)
2154                 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2155         lwkt_reltoken(&hmp->fs_token);
2156         return (error);
2157 }
2158
2159 /*
2160  * hammer_vop_markatime { vp, cred }
2161  */
2162 static
2163 int
2164 hammer_vop_markatime(struct vop_markatime_args *ap)
2165 {
2166         struct hammer_transaction trans;
2167         struct hammer_inode *ip;
2168         hammer_mount_t hmp;
2169
2170         ip = VTOI(ap->a_vp);
2171         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2172                 return (EROFS);
2173         if (ip->flags & HAMMER_INODE_RO)
2174                 return (EROFS);
2175         hmp = ip->hmp;
2176         if (hmp->mp->mnt_flag & MNT_NOATIME)
2177                 return (0);
2178         lwkt_gettoken(&hmp->fs_token);
2179         hammer_start_transaction(&trans, hmp);
2180         ++hammer_stats_file_iopsw;
2181
2182         ip->ino_data.atime = trans.time;
2183         hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
2184         hammer_done_transaction(&trans);
2185         hammer_knote(ap->a_vp, NOTE_ATTRIB);
2186         lwkt_reltoken(&hmp->fs_token);
2187         return (0);
2188 }
2189
2190 /*
2191  * hammer_vop_setattr { vp, vap, cred }
2192  */
2193 static
2194 int
2195 hammer_vop_setattr(struct vop_setattr_args *ap)
2196 {
2197         struct hammer_transaction trans;
2198         struct hammer_inode *ip;
2199         struct vattr *vap;
2200         hammer_mount_t hmp;
2201         int modflags;
2202         int error;
2203         int truncating;
2204         int blksize;
2205         int kflags;
2206 #if 0
2207         int64_t aligned_size;
2208 #endif
2209         u_int32_t flags;
2210
2211         vap = ap->a_vap;
2212         ip = ap->a_vp->v_data;
2213         modflags = 0;
2214         kflags = 0;
2215         hmp = ip->hmp;
2216
2217         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2218                 return(EROFS);
2219         if (ip->flags & HAMMER_INODE_RO)
2220                 return (EROFS);
2221         if (hammer_nohistory(ip) == 0 &&
2222             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2223                 return (error);
2224         }
2225
2226         lwkt_gettoken(&hmp->fs_token);
2227         hammer_start_transaction(&trans, hmp);
2228         ++hammer_stats_file_iopsw;
2229         error = 0;
2230
2231         if (vap->va_flags != VNOVAL) {
2232                 flags = ip->ino_data.uflags;
2233                 error = vop_helper_setattr_flags(&flags, vap->va_flags,
2234                                          hammer_to_unix_xid(&ip->ino_data.uid),
2235                                          ap->a_cred);
2236                 if (error == 0) {
2237                         if (ip->ino_data.uflags != flags) {
2238                                 ip->ino_data.uflags = flags;
2239                                 ip->ino_data.ctime = trans.time;
2240                                 modflags |= HAMMER_INODE_DDIRTY;
2241                                 kflags |= NOTE_ATTRIB;
2242                         }
2243                         if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2244                                 error = 0;
2245                                 goto done;
2246                         }
2247                 }
2248                 goto done;
2249         }
2250         if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2251                 error = EPERM;
2252                 goto done;
2253         }
2254         if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2255                 mode_t cur_mode = ip->ino_data.mode;
2256                 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2257                 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2258                 uuid_t uuid_uid;
2259                 uuid_t uuid_gid;
2260
2261                 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2262                                          ap->a_cred,
2263                                          &cur_uid, &cur_gid, &cur_mode);
2264                 if (error == 0) {
2265                         hammer_guid_to_uuid(&uuid_uid, cur_uid);
2266                         hammer_guid_to_uuid(&uuid_gid, cur_gid);
2267                         if (bcmp(&uuid_uid, &ip->ino_data.uid,
2268                                  sizeof(uuid_uid)) ||
2269                             bcmp(&uuid_gid, &ip->ino_data.gid,
2270                                  sizeof(uuid_gid)) ||
2271                             ip->ino_data.mode != cur_mode
2272                         ) {
2273                                 ip->ino_data.uid = uuid_uid;
2274                                 ip->ino_data.gid = uuid_gid;
2275                                 ip->ino_data.mode = cur_mode;
2276                                 ip->ino_data.ctime = trans.time;
2277                                 modflags |= HAMMER_INODE_DDIRTY;
2278                         }
2279                         kflags |= NOTE_ATTRIB;
2280                 }
2281         }
2282         while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2283                 switch(ap->a_vp->v_type) {
2284                 case VREG:
2285                         if (vap->va_size == ip->ino_data.size)
2286                                 break;
2287
2288                         /*
2289                          * Log the operation if in fast-fsync mode or if
2290                          * there are unterminated redo write records present.
2291                          *
2292                          * The second check is needed so the recovery code
2293                          * properly truncates write redos even if nominal
2294                          * REDO operations is turned off due to excessive
2295                          * writes, because the related records might be
2296                          * destroyed and never lay down a TERM_WRITE.
2297                          */
2298                         if ((ip->flags & HAMMER_INODE_REDO) ||
2299                             (ip->flags & HAMMER_INODE_RDIRTY)) {
2300                                 error = hammer_generate_redo(&trans, ip,
2301                                                              vap->va_size,
2302                                                              HAMMER_REDO_TRUNC,
2303                                                              NULL, 0);
2304                         }
2305                         blksize = hammer_blocksize(vap->va_size);
2306
2307                         /*
2308                          * XXX break atomicy, we can deadlock the backend
2309                          * if we do not release the lock.  Probably not a
2310                          * big deal here.
2311                          */
2312                         if (vap->va_size < ip->ino_data.size) {
2313                                 nvtruncbuf(ap->a_vp, vap->va_size,
2314                                            blksize,
2315                                            hammer_blockoff(vap->va_size));
2316                                 truncating = 1;
2317                                 kflags |= NOTE_WRITE;
2318                         } else {
2319                                 nvextendbuf(ap->a_vp,
2320                                             ip->ino_data.size,
2321                                             vap->va_size,
2322                                             hammer_blocksize(ip->ino_data.size),
2323                                             hammer_blocksize(vap->va_size),
2324                                             hammer_blockoff(ip->ino_data.size),
2325                                             hammer_blockoff(vap->va_size),
2326                                             0);
2327                                 truncating = 0;
2328                                 kflags |= NOTE_WRITE | NOTE_EXTEND;
2329                         }
2330                         ip->ino_data.size = vap->va_size;
2331                         ip->ino_data.mtime = trans.time;
2332                         /* XXX safe to use SDIRTY instead of DDIRTY here? */
2333                         modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2334
2335                         /*
2336                          * On-media truncation is cached in the inode until
2337                          * the inode is synchronized.  We must immediately
2338                          * handle any frontend records.
2339                          */
2340                         if (truncating) {
2341                                 hammer_ip_frontend_trunc(ip, vap->va_size);
2342 #ifdef DEBUG_TRUNCATE
2343                                 if (HammerTruncIp == NULL)
2344                                         HammerTruncIp = ip;
2345 #endif
2346                                 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2347                                         ip->flags |= HAMMER_INODE_TRUNCATED;
2348                                         ip->trunc_off = vap->va_size;
2349 #ifdef DEBUG_TRUNCATE
2350                                         if (ip == HammerTruncIp)
2351                                         kprintf("truncate1 %016llx\n",
2352                                                 (long long)ip->trunc_off);
2353 #endif
2354                                 } else if (ip->trunc_off > vap->va_size) {
2355                                         ip->trunc_off = vap->va_size;
2356 #ifdef DEBUG_TRUNCATE
2357                                         if (ip == HammerTruncIp)
2358                                         kprintf("truncate2 %016llx\n",
2359                                                 (long long)ip->trunc_off);
2360 #endif
2361                                 } else {
2362 #ifdef DEBUG_TRUNCATE
2363                                         if (ip == HammerTruncIp)
2364                                         kprintf("truncate3 %016llx (ignored)\n",
2365                                                 (long long)vap->va_size);
2366 #endif
2367                                 }
2368                         }
2369
2370 #if 0
2371                         /*
2372                          * When truncating, nvtruncbuf() may have cleaned out
2373                          * a portion of the last block on-disk in the buffer
2374                          * cache.  We must clean out any frontend records
2375                          * for blocks beyond the new last block.
2376                          */
2377                         aligned_size = (vap->va_size + (blksize - 1)) &
2378                                        ~(int64_t)(blksize - 1);
2379                         if (truncating && vap->va_size < aligned_size) {
2380                                 aligned_size -= blksize;
2381                                 hammer_ip_frontend_trunc(ip, aligned_size);
2382                         }
2383 #endif
2384                         break;
2385                 case VDATABASE:
2386                         if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2387                                 ip->flags |= HAMMER_INODE_TRUNCATED;
2388                                 ip->trunc_off = vap->va_size;
2389                         } else if (ip->trunc_off > vap->va_size) {
2390                                 ip->trunc_off = vap->va_size;
2391                         }
2392                         hammer_ip_frontend_trunc(ip, vap->va_size);
2393                         ip->ino_data.size = vap->va_size;
2394                         ip->ino_data.mtime = trans.time;
2395                         modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2396                         kflags |= NOTE_ATTRIB;
2397                         break;
2398                 default:
2399                         error = EINVAL;
2400                         goto done;
2401                 }
2402                 break;
2403         }
2404         if (vap->va_atime.tv_sec != VNOVAL) {
2405                 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2406                 modflags |= HAMMER_INODE_ATIME;
2407                 kflags |= NOTE_ATTRIB;
2408         }
2409         if (vap->va_mtime.tv_sec != VNOVAL) {
2410                 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2411                 modflags |= HAMMER_INODE_MTIME;
2412                 kflags |= NOTE_ATTRIB;
2413         }
2414         if (vap->va_mode != (mode_t)VNOVAL) {
2415                 mode_t   cur_mode = ip->ino_data.mode;
2416                 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2417                 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2418
2419                 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2420                                          cur_uid, cur_gid, &cur_mode);
2421                 if (error == 0 && ip->ino_data.mode != cur_mode) {
2422                         ip->ino_data.mode = cur_mode;
2423                         ip->ino_data.ctime = trans.time;
2424                         modflags |= HAMMER_INODE_DDIRTY;
2425                         kflags |= NOTE_ATTRIB;
2426                 }
2427         }
2428 done:
2429         if (error == 0)
2430                 hammer_modify_inode(&trans, ip, modflags);
2431         hammer_done_transaction(&trans);
2432         hammer_knote(ap->a_vp, kflags);
2433         lwkt_reltoken(&hmp->fs_token);
2434         return (error);
2435 }
2436
2437 /*
2438  * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2439  */
2440 static
2441 int
2442 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2443 {
2444         struct hammer_transaction trans;
2445         struct hammer_inode *dip;
2446         struct hammer_inode *nip;
2447         hammer_record_t record;
2448         struct nchandle *nch;
2449         hammer_mount_t hmp;
2450         int error;
2451         int bytes;
2452
2453         ap->a_vap->va_type = VLNK;
2454
2455         nch = ap->a_nch;
2456         dip = VTOI(ap->a_dvp);
2457         hmp = dip->hmp;
2458
2459         if (dip->flags & HAMMER_INODE_RO)
2460                 return (EROFS);
2461         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
2462                 return (error);
2463
2464         /*
2465          * Create a transaction to cover the operations we perform.
2466          */
2467         lwkt_gettoken(&hmp->fs_token);
2468         hammer_start_transaction(&trans, hmp);
2469         ++hammer_stats_file_iopsw;
2470
2471         /*
2472          * Create a new filesystem object of the requested type.  The
2473          * returned inode will be referenced but not locked.
2474          */
2475
2476         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2477                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2478                                     NULL, &nip);
2479         if (error) {
2480                 hammer_done_transaction(&trans);
2481                 *ap->a_vpp = NULL;
2482                 lwkt_reltoken(&hmp->fs_token);
2483                 return (error);
2484         }
2485
2486         /*
2487          * Add a record representing the symlink.  symlink stores the link
2488          * as pure data, not a string, and is no \0 terminated.
2489          */
2490         if (error == 0) {
2491                 bytes = strlen(ap->a_target);
2492
2493                 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2494                         bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2495                 } else {
2496                         record = hammer_alloc_mem_record(nip, bytes);
2497                         record->type = HAMMER_MEM_RECORD_GENERAL;
2498
2499                         record->leaf.base.localization = nip->obj_localization +
2500                                                          HAMMER_LOCALIZE_MISC;
2501                         record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2502                         record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2503                         record->leaf.data_len = bytes;
2504                         KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2505                         bcopy(ap->a_target, record->data->symlink.name, bytes);
2506                         error = hammer_ip_add_record(&trans, record);
2507                 }
2508
2509                 /*
2510                  * Set the file size to the length of the link.
2511                  */
2512                 if (error == 0) {
2513                         nip->ino_data.size = bytes;
2514                         hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
2515                 }
2516         }
2517         if (error == 0)
2518                 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2519                                                 nch->ncp->nc_nlen, nip);
2520
2521         /*
2522          * Finish up.
2523          */
2524         if (error) {
2525                 hammer_rel_inode(nip, 0);
2526                 *ap->a_vpp = NULL;
2527         } else {
2528                 error = hammer_get_vnode(nip, ap->a_vpp);
2529                 hammer_rel_inode(nip, 0);
2530                 if (error == 0) {
2531                         cache_setunresolved(ap->a_nch);
2532                         cache_setvp(ap->a_nch, *ap->a_vpp);
2533                         hammer_knote(ap->a_dvp, NOTE_WRITE);
2534                 }
2535         }
2536         hammer_done_transaction(&trans);
2537         lwkt_reltoken(&hmp->fs_token);
2538         return (error);
2539 }
2540
2541 /*
2542  * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2543  */
2544 static
2545 int
2546 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2547 {
2548         struct hammer_transaction trans;
2549         struct hammer_inode *dip;
2550         hammer_mount_t hmp;
2551         int error;
2552
2553         dip = VTOI(ap->a_dvp);
2554         hmp = dip->hmp;
2555
2556         if (hammer_nohistory(dip) == 0 &&
2557             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2558                 return (error);
2559         }
2560
2561         lwkt_gettoken(&hmp->fs_token);
2562         hammer_start_transaction(&trans, hmp);
2563         ++hammer_stats_file_iopsw;
2564         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2565                                 ap->a_cred, ap->a_flags, -1);
2566         hammer_done_transaction(&trans);
2567         lwkt_reltoken(&hmp->fs_token);
2568
2569         return (error);
2570 }
2571
2572 /*
2573  * hammer_vop_ioctl { vp, command, data, fflag, cred }
2574  */
2575 static
2576 int
2577 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2578 {
2579         struct hammer_inode *ip = ap->a_vp->v_data;
2580         hammer_mount_t hmp = ip->hmp;
2581         int error;
2582
2583         ++hammer_stats_file_iopsr;
2584         lwkt_gettoken(&hmp->fs_token);
2585         error = hammer_ioctl(ip, ap->a_command, ap->a_data,
2586                              ap->a_fflag, ap->a_cred);
2587         lwkt_reltoken(&hmp->fs_token);
2588         return (error);
2589 }
2590
2591 static
2592 int
2593 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2594 {
2595         static const struct mountctl_opt extraopt[] = {
2596                 { HMNT_NOHISTORY,       "nohistory" },
2597                 { HMNT_MASTERID,        "master" },
2598                 { 0, NULL}
2599
2600         };
2601         struct hammer_mount *hmp;
2602         struct mount *mp;
2603         int usedbytes;
2604         int error;
2605
2606         error = 0;
2607         usedbytes = 0;
2608         mp = ap->a_head.a_ops->head.vv_mount;
2609         KKASSERT(mp->mnt_data != NULL);
2610         hmp = (struct hammer_mount *)mp->mnt_data;
2611
2612         lwkt_gettoken(&hmp->fs_token);
2613
2614         switch(ap->a_op) {
2615         case MOUNTCTL_SET_EXPORT:
2616                 if (ap->a_ctllen != sizeof(struct export_args))
2617                         error = EINVAL;
2618                 else
2619                         error = hammer_vfs_export(mp, ap->a_op,
2620                                       (const struct export_args *)ap->a_ctl);
2621                 break;
2622         case MOUNTCTL_MOUNTFLAGS:
2623         {
2624                 /*
2625                  * Call standard mountctl VOP function
2626                  * so we get user mount flags.
2627                  */
2628                 error = vop_stdmountctl(ap);
2629                 if (error)
2630                         break;
2631
2632                 usedbytes = *ap->a_res;
2633
2634                 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2635                         usedbytes += vfs_flagstostr(hmp->hflags, extraopt,
2636                                                     ap->a_buf,
2637                                                     ap->a_buflen - usedbytes,
2638                                                     &error);
2639                 }
2640
2641                 *ap->a_res += usedbytes;
2642                 break;
2643         }
2644         default:
2645                 error = vop_stdmountctl(ap);
2646                 break;
2647         }
2648         lwkt_reltoken(&hmp->fs_token);
2649         return(error);
2650 }
2651
2652 /*
2653  * hammer_vop_strategy { vp, bio }
2654  *
2655  * Strategy call, used for regular file read & write only.  Note that the
2656  * bp may represent a cluster.
2657  *
2658  * To simplify operation and allow better optimizations in the future,
2659  * this code does not make any assumptions with regards to buffer alignment
2660  * or size.
2661  */
2662 static
2663 int
2664 hammer_vop_strategy(struct vop_strategy_args *ap)
2665 {
2666         struct buf *bp;
2667         int error;
2668
2669         bp = ap->a_bio->bio_buf;
2670
2671         switch(bp->b_cmd) {
2672         case BUF_CMD_READ:
2673                 error = hammer_vop_strategy_read(ap);
2674                 break;
2675         case BUF_CMD_WRITE:
2676                 error = hammer_vop_strategy_write(ap);
2677                 break;
2678         default:
2679                 bp->b_error = error = EINVAL;
2680                 bp->b_flags |= B_ERROR;
2681                 biodone(ap->a_bio);
2682                 break;
2683         }
2684
2685         /* hammer_dump_dedup_cache(((hammer_inode_t)ap->a_vp->v_data)->hmp); */
2686
2687         return (error);
2688 }
2689
2690 /*
2691  * Read from a regular file.  Iterate the related records and fill in the
2692  * BIO/BUF.  Gaps are zero-filled.
2693  *
2694  * The support code in hammer_object.c should be used to deal with mixed
2695  * in-memory and on-disk records.
2696  *
2697  * NOTE: Can be called from the cluster code with an oversized buf.
2698  *
2699  * XXX atime update
2700  */
2701 static
2702 int
2703 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2704 {
2705         struct hammer_transaction trans;
2706         struct hammer_inode *ip;
2707         struct hammer_inode *dip;
2708         hammer_mount_t hmp;
2709         struct hammer_cursor cursor;
2710         hammer_base_elm_t base;
2711         hammer_off_t disk_offset;
2712         struct bio *bio;
2713         struct bio *nbio;
2714         struct buf *bp;
2715         int64_t rec_offset;
2716         int64_t ran_end;
2717         int64_t tmp64;
2718         int error;
2719         int boff;
2720         int roff;
2721         int n;
2722         int isdedupable;
2723
2724         bio = ap->a_bio;
2725         bp = bio->bio_buf;
2726         ip = ap->a_vp->v_data;
2727         hmp = ip->hmp;
2728
2729         /*
2730          * The zone-2 disk offset may have been set by the cluster code via
2731          * a BMAP operation, or else should be NOOFFSET.
2732          *
2733          * Checking the high bits for a match against zone-2 should suffice.
2734          *
2735          * In cases where a lot of data duplication is present it may be
2736          * more beneficial to drop through and doubule-buffer through the
2737          * device.
2738          */
2739         nbio = push_bio(bio);
2740         if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2741             HAMMER_ZONE_LARGE_DATA) {
2742                 if (hammer_double_buffer == 0) {
2743                         lwkt_gettoken(&hmp->fs_token);
2744                         error = hammer_io_direct_read(hmp, nbio, NULL);
2745                         lwkt_reltoken(&hmp->fs_token);
2746                         return (error);
2747                 }
2748
2749                 /*
2750                  * Try to shortcut requests for double_buffer mode too.
2751                  * Since this mode runs through the device buffer cache
2752                  * only compatible buffer sizes (meaning those generated
2753                  * by normal filesystem buffers) are legal.
2754                  */
2755                 if (hammer_live_dedup == 0 && (bp->b_flags & B_PAGING) == 0) {
2756                         error = hammer_io_indirect_read(hmp, nbio, NULL);
2757                         return (error);
2758                 }
2759         }
2760
2761         /*
2762          * Well, that sucked.  Do it the hard way.  If all the stars are
2763          * aligned we may still be able to issue a direct-read.
2764          */
2765         lwkt_gettoken(&hmp->fs_token);
2766         hammer_simple_transaction(&trans, hmp);
2767         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2768
2769         /*
2770          * Key range (begin and end inclusive) to scan.  Note that the key's
2771          * stored in the actual records represent BASE+LEN, not BASE.  The
2772          * first record containing bio_offset will have a key > bio_offset.
2773          */
2774         cursor.key_beg.localization = ip->obj_localization +
2775                                       HAMMER_LOCALIZE_MISC;
2776         cursor.key_beg.obj_id = ip->obj_id;
2777         cursor.key_beg.create_tid = 0;
2778         cursor.key_beg.delete_tid = 0;
2779         cursor.key_beg.obj_type = 0;
2780         cursor.key_beg.key = bio->bio_offset + 1;
2781         cursor.asof = ip->obj_asof;
2782         cursor.flags |= HAMMER_CURSOR_ASOF;
2783
2784         cursor.key_end = cursor.key_beg;
2785         KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2786 #if 0
2787         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2788                 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2789                 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2790                 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2791         } else
2792 #endif
2793         {
2794                 ran_end = bio->bio_offset + bp->b_bufsize;
2795                 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2796                 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2797                 tmp64 = ran_end + MAXPHYS + 1;  /* work-around GCC-4 bug */
2798                 if (tmp64 < ran_end)
2799                         cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2800                 else
2801                         cursor.key_end.key = ran_end + MAXPHYS + 1;
2802         }
2803         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2804
2805         /*
2806          * Set NOSWAPCACHE for cursor data extraction if double buffering
2807          * is disabled or (if the file is not marked cacheable via chflags
2808          * and vm.swapcache_use_chflags is enabled).
2809          */
2810         if (hammer_double_buffer == 0 ||
2811             ((ap->a_vp->v_flag & VSWAPCACHE) == 0 &&
2812              vm_swapcache_use_chflags)) {
2813                 cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
2814         }
2815
2816         error = hammer_ip_first(&cursor);
2817         boff = 0;
2818
2819         while (error == 0) {
2820                 /*
2821                  * Get the base file offset of the record.  The key for
2822                  * data records is (base + bytes) rather then (base).
2823                  */
2824                 base = &cursor.leaf->base;
2825                 rec_offset = base->key - cursor.leaf->data_len;
2826
2827                 /*
2828                  * Calculate the gap, if any, and zero-fill it.
2829                  *
2830                  * n is the offset of the start of the record verses our
2831                  * current seek offset in the bio.
2832                  */
2833                 n = (int)(rec_offset - (bio->bio_offset + boff));
2834                 if (n > 0) {
2835                         if (n > bp->b_bufsize - boff)
2836                                 n = bp->b_bufsize - boff;
2837                         bzero((char *)bp->b_data + boff, n);
2838                         boff += n;
2839                         n = 0;
2840                 }
2841
2842                 /*
2843                  * Calculate the data offset in the record and the number
2844                  * of bytes we can copy.
2845                  *
2846                  * There are two degenerate cases.  First, boff may already
2847                  * be at bp->b_bufsize.  Secondly, the data offset within
2848                  * the record may exceed the record's size.
2849                  */
2850                 roff = -n;
2851                 rec_offset += roff;
2852                 n = cursor.leaf->data_len - roff;
2853                 if (n <= 0) {
2854                         kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2855                         n = 0;
2856                 } else if (n > bp->b_bufsize - boff) {
2857                         n = bp->b_bufsize - boff;
2858                 }
2859
2860                 /*
2861                  * Deal with cached truncations.  This cool bit of code
2862                  * allows truncate()/ftruncate() to avoid having to sync
2863                  * the file.
2864                  *
2865                  * If the frontend is truncated then all backend records are
2866                  * subject to the frontend's truncation.
2867                  *
2868                  * If the backend is truncated then backend records on-disk
2869                  * (but not in-memory) are subject to the backend's
2870                  * truncation.  In-memory records owned by the backend
2871                  * represent data written after the truncation point on the
2872                  * backend and must not be truncated.
2873                  *
2874                  * Truncate operations deal with frontend buffer cache
2875                  * buffers and frontend-owned in-memory records synchronously.
2876                  */
2877                 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2878                         if (hammer_cursor_ondisk(&cursor)/* ||
2879                             cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2880                                 if (ip->trunc_off <= rec_offset)
2881                                         n = 0;
2882                                 else if (ip->trunc_off < rec_offset + n)
2883                                         n = (int)(ip->trunc_off - rec_offset);
2884                         }
2885                 }
2886                 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2887                         if (hammer_cursor_ondisk(&cursor)) {
2888                                 if (ip->sync_trunc_off <= rec_offset)
2889                                         n = 0;
2890                                 else if (ip->sync_trunc_off < rec_offset + n)
2891                                         n = (int)(ip->sync_trunc_off - rec_offset);
2892                         }
2893                 }
2894
2895                 /*
2896                  * Try to issue a direct read into our bio if possible,
2897                  * otherwise resolve the element data into a hammer_buffer
2898                  * and copy.
2899                  *
2900                  * The buffer on-disk should be zerod past any real
2901                  * truncation point, but may not be for any synthesized
2902                  * truncation point from above.
2903                  *
2904                  * NOTE: disk_offset is only valid if the cursor data is
2905                  *       on-disk.
2906                  */
2907                 disk_offset = cursor.leaf->data_offset + roff;
2908                 isdedupable = (boff == 0 && n == bp->b_bufsize &&
2909                                hammer_cursor_ondisk(&cursor) &&
2910                                ((int)disk_offset & HAMMER_BUFMASK) == 0);
2911
2912                 if (isdedupable && hammer_double_buffer == 0) {
2913                         /*
2914                          * Direct read case
2915                          */
2916                         KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2917                                  HAMMER_ZONE_LARGE_DATA);
2918                         nbio->bio_offset = disk_offset;
2919                         error = hammer_io_direct_read(hmp, nbio, cursor.leaf);
2920                         if (hammer_live_dedup && error == 0)
2921                                 hammer_dedup_cache_add(ip, cursor.leaf);
2922                         goto done;
2923                 } else if (isdedupable) {
2924                         /*
2925                          * Async I/O case for reading from backing store
2926                          * and copying the data to the filesystem buffer.
2927                          * live-dedup has to verify the data anyway if it
2928                          * gets a hit later so we can just add the entry
2929                          * now.
2930                          */
2931                         KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2932                                  HAMMER_ZONE_LARGE_DATA);
2933                         nbio->bio_offset = disk_offset;
2934                         if (hammer_live_dedup)
2935                                 hammer_dedup_cache_add(ip, cursor.leaf);
2936                         error = hammer_io_indirect_read(hmp, nbio, cursor.leaf);
2937                         goto done;
2938                 } else if (n) {
2939                         error = hammer_ip_resolve_data(&cursor);
2940                         if (error == 0) {
2941                                 if (hammer_live_dedup && isdedupable)
2942                                         hammer_dedup_cache_add(ip, cursor.leaf);
2943                                 bcopy((char *)cursor.data + roff,
2944                                       (char *)bp->b_data + boff, n);
2945                         }
2946                 }
2947                 if (error)
2948                         break;
2949
2950                 /*
2951                  * We have to be sure that the only elements added to the
2952                  * dedup cache are those which are already on-media.
2953                  */
2954                 if (hammer_live_dedup && hammer_cursor_ondisk(&cursor))
2955                         hammer_dedup_cache_add(ip, cursor.leaf);
2956
2957                 /*
2958                  * Iterate until we have filled the request.
2959                  */
2960                 boff += n;
2961                 if (boff == bp->b_bufsize)
2962                         break;
2963                 error = hammer_ip_next(&cursor);
2964         }
2965
2966         /*
2967          * There may have been a gap after the last record
2968          */
2969         if (error == ENOENT)
2970                 error = 0;
2971         if (error == 0 && boff != bp->b_bufsize) {
2972                 KKASSERT(boff < bp->b_bufsize);
2973                 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2974                 /* boff = bp->b_bufsize; */
2975         }
2976
2977         /*
2978          * Disallow swapcache operation on the vnode buffer if double
2979          * buffering is enabled, the swapcache will get the data via
2980          * the block device buffer.
2981          */
2982         if (hammer_double_buffer)
2983                 bp->b_flags |= B_NOTMETA;
2984
2985         /*
2986          * Cleanup
2987          */
2988         bp->b_resid = 0;
2989         bp->b_error = error;
2990         if (error)
2991                 bp->b_flags |= B_ERROR;
2992         biodone(ap->a_bio);
2993
2994 done:
2995         /*
2996          * Cache the b-tree node for the last data read in cache[1].
2997          *
2998          * If we hit the file EOF then also cache the node in the
2999          * governing director's cache[3], it will be used to initialize
3000          * the inode's cache[1] for any inodes looked up via the directory.
3001          *
3002          * This doesn't reduce disk accesses since the B-Tree chain is
3003          * likely cached, but it does reduce cpu overhead when looking
3004          * up file offsets for cpdup/tar/cpio style iterations.
3005          */
3006         if (cursor.node)
3007                 hammer_cache_node(&ip->cache[1], cursor.node);
3008         if (ran_end >= ip->ino_data.size) {
3009                 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
3010                                         ip->obj_asof, ip->obj_localization);
3011                 if (dip) {
3012                         hammer_cache_node(&dip->cache[3], cursor.node);
3013                         hammer_rel_inode(dip, 0);
3014                 }
3015         }
3016         hammer_done_cursor(&cursor);
3017         hammer_done_transaction(&trans);
3018         lwkt_reltoken(&hmp->fs_token);
3019         return(error);
3020 }
3021
3022 /*
3023  * BMAP operation - used to support cluster_read() only.
3024  *
3025  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
3026  *
3027  * This routine may return EOPNOTSUPP if the opration is not supported for
3028  * the specified offset.  The contents of the pointer arguments do not
3029  * need to be initialized in that case. 
3030  *
3031  * If a disk address is available and properly aligned return 0 with 
3032  * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
3033  * to the run-length relative to that offset.  Callers may assume that
3034  * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
3035  * large, so return EOPNOTSUPP if it is not sufficiently large.
3036  */
3037 static
3038 int
3039 hammer_vop_bmap(struct vop_bmap_args *ap)
3040 {
3041         struct hammer_transaction trans;
3042         struct hammer_inode *ip;
3043         hammer_mount_t hmp;
3044         struct hammer_cursor cursor;
3045         hammer_base_elm_t base;
3046         int64_t rec_offset;
3047         int64_t ran_end;
3048         int64_t tmp64;
3049         int64_t base_offset;
3050         int64_t base_disk_offset;
3051         int64_t last_offset;
3052         hammer_off_t last_disk_offset;
3053         hammer_off_t disk_offset;
3054         int     rec_len;
3055         int     error;
3056         int     blksize;
3057
3058         ++hammer_stats_file_iopsr;
3059         ip = ap->a_vp->v_data;
3060         hmp = ip->hmp;
3061
3062         /*
3063          * We can only BMAP regular files.  We can't BMAP database files,
3064          * directories, etc.
3065          */
3066         if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
3067                 return(EOPNOTSUPP);
3068
3069         /*
3070          * bmap is typically called with runp/runb both NULL when used
3071          * for writing.  We do not support BMAP for writing atm.
3072          */
3073         if (ap->a_cmd != BUF_CMD_READ)
3074                 return(EOPNOTSUPP);
3075
3076         /*
3077          * Scan the B-Tree to acquire blockmap addresses, then translate
3078          * to raw addresses.
3079          */
3080         lwkt_gettoken(&hmp->fs_token);
3081         hammer_simple_transaction(&trans, hmp);
3082 #if 0
3083         kprintf("bmap_beg %016llx ip->cache %p\n",
3084                 (long long)ap->a_loffset, ip->cache[1]);
3085 #endif
3086         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
3087
3088         /*
3089          * Key range (begin and end inclusive) to scan.  Note that the key's
3090          * stored in the actual records represent BASE+LEN, not BASE.  The
3091          * first record containing bio_offset will have a key > bio_offset.
3092          */
3093         cursor.key_beg.localization = ip->obj_localization +
3094                                       HAMMER_LOCALIZE_MISC;
3095         cursor.key_beg.obj_id = ip->obj_id;
3096         cursor.key_beg.create_tid = 0;
3097         cursor.key_beg.delete_tid = 0;
3098         cursor.key_beg.obj_type = 0;
3099         if (ap->a_runb)
3100                 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
3101         else
3102                 cursor.key_beg.key = ap->a_loffset + 1;
3103         if (cursor.key_beg.key < 0)
3104                 cursor.key_beg.key = 0;
3105         cursor.asof = ip->obj_asof;
3106         cursor.flags |= HAMMER_CURSOR_ASOF;
3107
3108         cursor.key_end = cursor.key_beg;
3109         KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
3110
3111         ran_end = ap->a_loffset + MAXPHYS;
3112         cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
3113         cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
3114         tmp64 = ran_end + MAXPHYS + 1;  /* work-around GCC-4 bug */
3115         if (tmp64 < ran_end)
3116                 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
3117         else
3118                 cursor.key_end.key = ran_end + MAXPHYS + 1;
3119
3120         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
3121
3122         error = hammer_ip_first(&cursor);
3123         base_offset = last_offset = 0;
3124         base_disk_offset = last_disk_offset = 0;
3125
3126         while (error == 0) {
3127                 /*
3128                  * Get the base file offset of the record.  The key for
3129                  * data records is (base + bytes) rather then (base).
3130                  *
3131                  * NOTE: rec_offset + rec_len may exceed the end-of-file.
3132                  * The extra bytes should be zero on-disk and the BMAP op
3133                  * should still be ok.
3134                  */
3135                 base = &cursor.leaf->base;
3136                 rec_offset = base->key - cursor.leaf->data_len;
3137                 rec_len    = cursor.leaf->data_len;
3138
3139                 /*
3140                  * Incorporate any cached truncation.
3141                  *
3142                  * NOTE: Modifications to rec_len based on synthesized
3143                  * truncation points remove the guarantee that any extended
3144                  * data on disk is zero (since the truncations may not have
3145                  * taken place on-media yet).
3146                  */
3147                 if (ip->flags & HAMMER_INODE_TRUNCATED) {
3148                         if (hammer_cursor_ondisk(&cursor) ||
3149                             cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
3150                                 if (ip->trunc_off <= rec_offset)
3151                                         rec_len = 0;
3152                                 else if (ip->trunc_off < rec_offset + rec_len)
3153                                         rec_len = (int)(ip->trunc_off - rec_offset);
3154                         }
3155                 }
3156                 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
3157                         if (hammer_cursor_ondisk(&cursor)) {
3158                                 if (ip->sync_trunc_off <= rec_offset)
3159                                         rec_len = 0;
3160                                 else if (ip->sync_trunc_off < rec_offset + rec_len)
3161                                         rec_len = (int)(ip->sync_trunc_off - rec_offset);
3162                         }
3163                 }
3164
3165                 /*
3166                  * Accumulate information.  If we have hit a discontiguous
3167                  * block reset base_offset unless we are already beyond the
3168                  * requested offset.  If we are, that's it, we stop.
3169                  */
3170                 if (error)
3171                         break;
3172                 if (hammer_cursor_ondisk(&cursor)) {
3173                         disk_offset = cursor.leaf->data_offset;
3174                         if (rec_offset != last_offset ||
3175                             disk_offset != last_disk_offset) {
3176                                 if (rec_offset > ap->a_loffset)
3177                                         break;
3178                                 base_offset = rec_offset;
3179                                 base_disk_offset = disk_offset;
3180                         }
3181                         last_offset = rec_offset + rec_len;
3182                         last_disk_offset = disk_offset + rec_len;
3183
3184                         if (hammer_live_dedup)
3185                                 hammer_dedup_cache_add(ip, cursor.leaf);
3186                 }
3187                 
3188                 error = hammer_ip_next(&cursor);
3189         }
3190
3191 #if 0
3192         kprintf("BMAP %016llx:  %016llx - %016llx\n",
3193                 (long long)ap->a_loffset,
3194                 (long long)base_offset,
3195                 (long long)last_offset);
3196         kprintf("BMAP %16s:  %016llx - %016llx\n", "",
3197                 (long long)base_disk_offset,
3198                 (long long)last_disk_offset);
3199 #endif
3200
3201         if (cursor.node) {
3202                 hammer_cache_node(&ip->cache[1], cursor.node);
3203 #if 0
3204                 kprintf("bmap_end2 %016llx ip->cache %p\n",
3205                         (long long)ap->a_loffset, ip->cache[1]);
3206 #endif
3207         }
3208         hammer_done_cursor(&cursor);
3209         hammer_done_transaction(&trans);
3210         lwkt_reltoken(&hmp->fs_token);
3211
3212         /*
3213          * If we couldn't find any records or the records we did find were
3214          * all behind the requested offset, return failure.  A forward
3215          * truncation can leave a hole w/ no on-disk records.
3216          */
3217         if (last_offset == 0 || last_offset < ap->a_loffset)
3218                 return (EOPNOTSUPP);
3219
3220         /*
3221          * Figure out the block size at the requested offset and adjust
3222          * our limits so the cluster_read() does not create inappropriately
3223          * sized buffer cache buffers.
3224          */
3225         blksize = hammer_blocksize(ap->a_loffset);
3226         if (hammer_blocksize(base_offset) != blksize) {
3227                 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
3228         }
3229         if (last_offset != ap->a_loffset &&
3230             hammer_blocksize(last_offset - 1) != blksize) {
3231                 last_offset = hammer_blockdemarc(ap->a_loffset,
3232                                                  last_offset - 1);
3233         }
3234
3235         /*
3236          * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3237          * from occuring.
3238          */
3239         disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
3240
3241         if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
3242                 /*
3243                  * Only large-data zones can be direct-IOd
3244                  */
3245                 error = EOPNOTSUPP;
3246         } else if ((disk_offset & HAMMER_BUFMASK) ||
3247                    (last_offset - ap->a_loffset) < blksize) {
3248                 /*
3249                  * doffsetp is not aligned or the forward run size does
3250                  * not cover a whole buffer, disallow the direct I/O.
3251                  */
3252                 error = EOPNOTSUPP;
3253         } else {
3254                 /*
3255                  * We're good.
3256                  */
3257                 *ap->a_doffsetp = disk_offset;
3258                 if (ap->a_runb) {
3259                         *ap->a_runb = ap->a_loffset - base_offset;
3260                         KKASSERT(*ap->a_runb >= 0);
3261                 }
3262                 if (ap->a_runp) {
3263                         *ap->a_runp = last_offset - ap->a_loffset;
3264                         KKASSERT(*ap->a_runp >= 0);
3265                 }
3266                 error = 0;
3267         }
3268         return(error);
3269 }
3270
3271 /*
3272  * Write to a regular file.   Because this is a strategy call the OS is
3273  * trying to actually get data onto the media.
3274  */
3275 static
3276 int
3277 hammer_vop_strategy_write(struct vop_strategy_args *ap)
3278 {
3279         hammer_record_t record;
3280         hammer_mount_t hmp;
3281         hammer_inode_t ip;
3282         struct bio *bio;
3283         struct buf *bp;
3284         int blksize;
3285         int bytes;
3286         int error;
3287
3288         bio = ap->a_bio;
3289         bp = bio->bio_buf;
3290         ip = ap->a_vp->v_data;
3291         hmp = ip->hmp;
3292
3293         blksize = hammer_blocksize(bio->bio_offset);
3294         KKASSERT(bp->b_bufsize == blksize);
3295
3296         if (ip->flags & HAMMER_INODE_RO) {
3297                 bp->b_error = EROFS;
3298                 bp->b_flags |= B_ERROR;
3299                 biodone(ap->a_bio);
3300                 return(EROFS);
3301         }
3302
3303         lwkt_gettoken(&hmp->fs_token);
3304
3305         /*
3306          * Disallow swapcache operation on the vnode buffer if double
3307          * buffering is enabled, the swapcache will get the data via
3308          * the block device buffer.
3309          */
3310         if (hammer_double_buffer)
3311                 bp->b_flags |= B_NOTMETA;
3312
3313         /*
3314          * Interlock with inode destruction (no in-kernel or directory
3315          * topology visibility).  If we queue new IO while trying to
3316          * destroy the inode we can deadlock the vtrunc call in
3317          * hammer_inode_unloadable_check().
3318          *
3319          * Besides, there's no point flushing a bp associated with an
3320          * inode that is being destroyed on-media and has no kernel
3321          * references.
3322          */
3323         if ((ip->flags | ip->sync_flags) &
3324             (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3325                 bp->b_resid = 0;
3326                 biodone(ap->a_bio);
3327                 lwkt_reltoken(&hmp->fs_token);
3328                 return(0);
3329         }
3330
3331         /*
3332          * Reserve space and issue a direct-write from the front-end. 
3333          * NOTE: The direct_io code will hammer_bread/bcopy smaller
3334          * allocations.
3335          *
3336          * An in-memory record will be installed to reference the storage
3337          * until the flusher can get to it.
3338          *
3339          * Since we own the high level bio the front-end will not try to
3340          * do a direct-read until the write completes.
3341          *
3342          * NOTE: The only time we do not reserve a full-sized buffers
3343          * worth of data is if the file is small.  We do not try to
3344          * allocate a fragment (from the small-data zone) at the end of
3345          * an otherwise large file as this can lead to wildly separated
3346          * data.
3347          */
3348         KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3349         KKASSERT(bio->bio_offset < ip->ino_data.size);
3350         if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
3351                 bytes = bp->b_bufsize;
3352         else
3353                 bytes = ((int)ip->ino_data.size + 15) & ~15;
3354
3355         record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3356                                     bytes, &error);
3357
3358         /*
3359          * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3360          * in hammer_vop_write().  We must flag the record so the proper
3361          * REDO_TERM_WRITE entry is generated during the flush.
3362          */
3363         if (record) {
3364                 if (bp->b_flags & B_VFSFLAG1) {
3365                         record->flags |= HAMMER_RECF_REDO;
3366                         bp->b_flags &= ~B_VFSFLAG1;
3367                 }
3368                 if (record->flags & HAMMER_RECF_DEDUPED) {
3369                         bp->b_resid = 0;
3370                         hammer_ip_replace_bulk(hmp, record);
3371                         biodone(ap->a_bio);
3372                 } else {
3373                         hammer_io_direct_write(hmp, bio, record);
3374                 }
3375                 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3376                         hammer_flush_inode(ip, 0);
3377         } else {
3378                 bp->b_bio2.bio_offset = NOOFFSET;
3379                 bp->b_error = error;
3380                 bp->b_flags |= B_ERROR;
3381                 biodone(ap->a_bio);
3382         }
3383         lwkt_reltoken(&hmp->fs_token);
3384         return(error);
3385 }
3386
3387 /*
3388  * dounlink - disconnect a directory entry
3389  *
3390  * XXX whiteout support not really in yet
3391  */
3392 static int
3393 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3394                 struct vnode *dvp, struct ucred *cred, 
3395                 int flags, int isdir)
3396 {
3397         struct namecache *ncp;
3398         hammer_inode_t dip;
3399         hammer_inode_t ip;
3400         hammer_mount_t hmp;
3401         struct hammer_cursor cursor;
3402         int64_t namekey;
3403         u_int32_t max_iterations;
3404         int nlen, error;
3405
3406         /*
3407          * Calculate the namekey and setup the key range for the scan.  This
3408          * works kinda like a chained hash table where the lower 32 bits
3409          * of the namekey synthesize the chain.
3410          *
3411          * The key range is inclusive of both key_beg and key_end.
3412          */
3413         dip = VTOI(dvp);
3414         ncp = nch->ncp;
3415         hmp = dip->hmp;
3416
3417         if (dip->flags & HAMMER_INODE_RO)
3418                 return (EROFS);
3419
3420         namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3421                                            &max_iterations);
3422 retry:
3423         hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3424         cursor.key_beg.localization = dip->obj_localization +
3425                                       hammer_dir_localization(dip);
3426         cursor.key_beg.obj_id = dip->obj_id;
3427         cursor.key_beg.key = namekey;
3428         cursor.key_beg.create_tid = 0;
3429         cursor.key_beg.delete_tid = 0;
3430         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3431         cursor.key_beg.obj_type = 0;
3432
3433         cursor.key_end = cursor.key_beg;
3434         cursor.key_end.key += max_iterations;
3435         cursor.asof = dip->obj_asof;
3436         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3437
3438         /*
3439          * Scan all matching records (the chain), locate the one matching
3440          * the requested path component.  info->last_error contains the
3441          * error code on search termination and could be 0, ENOENT, or
3442          * something else.
3443          *
3444          * The hammer_ip_*() functions merge in-memory records with on-disk
3445          * records for the purposes of the search.
3446          */
3447         error = hammer_ip_first(&cursor);
3448
3449         while (error == 0) {
3450                 error = hammer_ip_resolve_data(&cursor);
3451                 if (error)
3452                         break;
3453                 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3454                 KKASSERT(nlen > 0);
3455                 if (ncp->nc_nlen == nlen &&
3456                     bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3457                         break;
3458                 }
3459                 error = hammer_ip_next(&cursor);
3460         }
3461
3462         /*
3463          * If all is ok we have to get the inode so we can adjust nlinks.
3464          * To avoid a deadlock with the flusher we must release the inode
3465          * lock on the directory when acquiring the inode for the entry.
3466          *
3467          * If the target is a directory, it must be empty.
3468          */
3469         if (error == 0) {
3470                 hammer_unlock(&cursor.ip->lock);
3471                 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3472                                       hmp->asof,
3473                                       cursor.data->entry.localization,
3474                                       0, &error);
3475                 hammer_lock_sh(&cursor.ip->lock);
3476                 if (error == ENOENT) {
3477                         kprintf("HAMMER: WARNING: Removing "
3478                                 "dirent w/missing inode \"%s\"\n"
3479                                 "\tobj_id = %016llx\n",
3480                                 ncp->nc_name,
3481                                 (long long)cursor.data->entry.obj_id);
3482                         error = 0;
3483                 }
3484
3485                 /*
3486                  * If isdir >= 0 we validate that the entry is or is not a
3487                  * directory.  If isdir < 0 we don't care.
3488                  */
3489                 if (error == 0 && isdir >= 0 && ip) {
3490                         if (isdir &&
3491                             ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3492                                 error = ENOTDIR;
3493                         } else if (isdir == 0 &&
3494                             ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3495                                 error = EISDIR;
3496                         }
3497                 }
3498
3499                 /*
3500                  * If we are trying to remove a directory the directory must
3501                  * be empty.
3502                  *
3503                  * The check directory code can loop and deadlock/retry.  Our
3504                  * own cursor's node locks must be released to avoid a 3-way
3505                  * deadlock with the flusher if the check directory code
3506                  * blocks.
3507                  *
3508                  * If any changes whatsoever have been made to the cursor
3509                  * set EDEADLK and retry.
3510                  *
3511                  * WARNING: See warnings in hammer_unlock_cursor()
3512                  *          function.
3513                  */
3514                 if (error == 0 && ip && ip->ino_data.obj_type ==
3515                                         HAMMER_OBJTYPE_DIRECTORY) {
3516                         hammer_unlock_cursor(&cursor);
3517                         error = hammer_ip_check_directory_empty(trans, ip);
3518                         hammer_lock_cursor(&cursor);
3519                         if (cursor.flags & HAMMER_CURSOR_RETEST) {
3520                                 kprintf("HAMMER: Warning: avoided deadlock "
3521                                         "on rmdir '%s'\n",
3522                                         ncp->nc_name);
3523                                 error = EDEADLK;
3524                         }
3525                 }
3526
3527                 /*
3528                  * Delete the directory entry.
3529                  *
3530                  * WARNING: hammer_ip_del_directory() may have to terminate
3531                  * the cursor to avoid a deadlock.  It is ok to call
3532                  * hammer_done_cursor() twice.
3533                  */
3534                 if (error == 0) {
3535                         error = hammer_ip_del_directory(trans, &cursor,
3536                                                         dip, ip);
3537                 }
3538                 hammer_done_cursor(&cursor);
3539                 if (error == 0) {
3540                         cache_setunresolved(nch);
3541                         cache_setvp(nch, NULL);
3542
3543                         /*
3544                          * NOTE: ip->vp, if non-NULL, cannot be directly
3545                          *       referenced without formally acquiring the
3546                          *       vp since the vp might have zero refs on it,
3547                          *       or in the middle of a reclaim, etc.
3548                          *
3549                          * NOTE: The cache_setunresolved() can rip the vp
3550                          *       out from under us since the vp may not have
3551                          *       any refs, in which case ip->vp will be NULL
3552                          *       from the outset.
3553                          */
3554                         while (ip && ip->vp) {
3555                                 struct vnode *vp;
3556
3557                                 error = hammer_get_vnode(ip, &vp);
3558                                 if (error == 0 && vp) {
3559                                         vn_unlock(vp);
3560                                         hammer_knote(ip->vp, NOTE_DELETE);
3561                                         cache_inval_vp(ip->vp, CINV_DESTROY);
3562                                         vrele(vp);
3563                                         break;
3564                                 }
3565                                 kprintf("Debug: HAMMER ip/vp race1 avoided\n");
3566                         }
3567                 }
3568                 if (ip)
3569                         hammer_rel_inode(ip, 0);
3570         } else {
3571                 hammer_done_cursor(&cursor);
3572         }
3573         if (error == EDEADLK)
3574                 goto retry;
3575
3576         return (error);
3577 }
3578
3579 /************************************************************************
3580  *                          FIFO AND SPECFS OPS                         *
3581  ************************************************************************
3582  *
3583  */
3584 static int
3585 hammer_vop_fifoclose (struct vop_close_args *ap)
3586 {
3587         /* XXX update itimes */
3588         return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3589 }
3590
3591 static int
3592 hammer_vop_fiforead (struct vop_read_args *ap)
3593 {
3594         int error;
3595
3596         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3597         /* XXX update access time */
3598         return (error);
3599 }
3600
3601 static int
3602 hammer_vop_fifowrite (struct vop_write_args *ap)
3603 {
3604         int error;
3605
3606         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3607         /* XXX update access time */
3608         return (error);
3609 }
3610
3611 static
3612 int
3613 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3614 {
3615         int error;
3616
3617         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3618         if (error)
3619                 error = hammer_vop_kqfilter(ap);
3620         return(error);
3621 }
3622
3623 /************************************************************************
3624  *                          KQFILTER OPS                                *
3625  ************************************************************************
3626  *
3627  */
3628 static void filt_hammerdetach(struct knote *kn);
3629 static int filt_hammerread(struct knote *kn, long hint);
3630 static int filt_hammerwrite(struct knote *kn, long hint);
3631 static int filt_hammervnode(struct knote *kn, long hint);
3632
3633 static struct filterops hammerread_filtops =
3634         { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammerread };
3635 static struct filterops hammerwrite_filtops =
3636         { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammerwrite };
3637 static struct filterops hammervnode_filtops =
3638         { FILTEROP_ISFD, NULL, filt_hammerdetach, filt_hammervnode };
3639
3640 static
3641 int
3642 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3643 {
3644         struct vnode *vp = ap->a_vp;
3645         struct knote *kn = ap->a_kn;
3646
3647         switch (kn->kn_filter) {
3648         case EVFILT_READ:
3649                 kn->kn_fop = &hammerread_filtops;
3650                 break;
3651         case EVFILT_WRITE:
3652                 kn->kn_fop = &hammerwrite_filtops;
3653                 break;
3654         case EVFILT_VNODE:
3655                 kn->kn_fop = &hammervnode_filtops;
3656                 break;
3657         default:
3658                 return (EOPNOTSUPP);
3659         }
3660
3661         kn->kn_hook = (caddr_t)vp;
3662
3663         knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3664
3665         return(0);
3666 }
3667
3668 static void
3669 filt_hammerdetach(struct knote *kn)
3670 {
3671         struct vnode *vp = (void *)kn->kn_hook;
3672
3673         knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3674 }
3675
3676 static int
3677 filt_hammerread(struct knote *kn, long hint)
3678 {
3679         struct vnode *vp = (void *)kn->kn_hook;
3680         hammer_inode_t ip = VTOI(vp);
3681         hammer_mount_t hmp = ip->hmp;
3682         off_t off;
3683
3684         if (hint == NOTE_REVOKE) {
3685                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3686                 return(1);
3687         }
3688         lwkt_gettoken(&hmp->fs_token);  /* XXX use per-ip-token */
3689         off = ip->ino_data.size - kn->kn_fp->f_offset;
3690         kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
3691         lwkt_reltoken(&hmp->fs_token);
3692         if (kn->kn_sfflags & NOTE_OLDAPI)
3693                 return(1);
3694         return (kn->kn_data != 0);
3695 }
3696
3697 static int
3698 filt_hammerwrite(struct knote *kn, long hint)
3699 {
3700         if (hint == NOTE_REVOKE)
3701                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3702         kn->kn_data = 0;
3703         return (1);
3704 }
3705
3706 static int
3707 filt_hammervnode(struct knote *kn, long hint)
3708 {
3709         if (kn->kn_sfflags & hint)
3710                 kn->kn_fflags |= hint;
3711         if (hint == NOTE_REVOKE) {
3712                 kn->kn_flags |= (EV_EOF | EV_NODATA);
3713                 return (1);
3714         }
3715         return (kn->kn_fflags != 0);
3716 }
3717