Merge branch 'vendor/DHCPCD'
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/mountctl.h>
36 #include <sys/namecache.h>
37 #include <sys/buf2.h>
38 #include <vfs/fifofs/fifo.h>
39
40 #include "hammer.h"
41
42 /*
43  * USERFS VNOPS
44  */
45 static int hammer_vop_fsync(struct vop_fsync_args *);
46 static int hammer_vop_read(struct vop_read_args *);
47 static int hammer_vop_write(struct vop_write_args *);
48 static int hammer_vop_access(struct vop_access_args *);
49 static int hammer_vop_advlock(struct vop_advlock_args *);
50 static int hammer_vop_close(struct vop_close_args *);
51 static int hammer_vop_ncreate(struct vop_ncreate_args *);
52 static int hammer_vop_getattr(struct vop_getattr_args *);
53 static int hammer_vop_nresolve(struct vop_nresolve_args *);
54 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
55 static int hammer_vop_nlink(struct vop_nlink_args *);
56 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
57 static int hammer_vop_nmknod(struct vop_nmknod_args *);
58 static int hammer_vop_open(struct vop_open_args *);
59 static int hammer_vop_print(struct vop_print_args *);
60 static int hammer_vop_readdir(struct vop_readdir_args *);
61 static int hammer_vop_readlink(struct vop_readlink_args *);
62 static int hammer_vop_nremove(struct vop_nremove_args *);
63 static int hammer_vop_nrename(struct vop_nrename_args *);
64 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
65 static int hammer_vop_markatime(struct vop_markatime_args *);
66 static int hammer_vop_setattr(struct vop_setattr_args *);
67 static int hammer_vop_strategy(struct vop_strategy_args *);
68 static int hammer_vop_bmap(struct vop_bmap_args *ap);
69 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
70 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
71 static int hammer_vop_ioctl(struct vop_ioctl_args *);
72 static int hammer_vop_mountctl(struct vop_mountctl_args *);
73 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
74
75 static int hammer_vop_fifoclose (struct vop_close_args *);
76 static int hammer_vop_fiforead (struct vop_read_args *);
77 static int hammer_vop_fifowrite (struct vop_write_args *);
78 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
79
80 struct vop_ops hammer_vnode_vops = {
81         .vop_default =          vop_defaultop,
82         .vop_fsync =            hammer_vop_fsync,
83         .vop_getpages =         vop_stdgetpages,
84         .vop_putpages =         vop_stdputpages,
85         .vop_read =             hammer_vop_read,
86         .vop_write =            hammer_vop_write,
87         .vop_access =           hammer_vop_access,
88         .vop_advlock =          hammer_vop_advlock,
89         .vop_close =            hammer_vop_close,
90         .vop_ncreate =          hammer_vop_ncreate,
91         .vop_getattr =          hammer_vop_getattr,
92         .vop_inactive =         hammer_vop_inactive,
93         .vop_reclaim =          hammer_vop_reclaim,
94         .vop_nresolve =         hammer_vop_nresolve,
95         .vop_nlookupdotdot =    hammer_vop_nlookupdotdot,
96         .vop_nlink =            hammer_vop_nlink,
97         .vop_nmkdir =           hammer_vop_nmkdir,
98         .vop_nmknod =           hammer_vop_nmknod,
99         .vop_open =             hammer_vop_open,
100         .vop_pathconf =         vop_stdpathconf,
101         .vop_print =            hammer_vop_print,
102         .vop_readdir =          hammer_vop_readdir,
103         .vop_readlink =         hammer_vop_readlink,
104         .vop_nremove =          hammer_vop_nremove,
105         .vop_nrename =          hammer_vop_nrename,
106         .vop_nrmdir =           hammer_vop_nrmdir,
107         .vop_markatime =        hammer_vop_markatime,
108         .vop_setattr =          hammer_vop_setattr,
109         .vop_bmap =             hammer_vop_bmap,
110         .vop_strategy =         hammer_vop_strategy,
111         .vop_nsymlink =         hammer_vop_nsymlink,
112         .vop_nwhiteout =        hammer_vop_nwhiteout,
113         .vop_ioctl =            hammer_vop_ioctl,
114         .vop_mountctl =         hammer_vop_mountctl,
115         .vop_kqfilter =         hammer_vop_kqfilter
116 };
117
118 struct vop_ops hammer_spec_vops = {
119         .vop_default =          vop_defaultop,
120         .vop_fsync =            hammer_vop_fsync,
121         .vop_read =             vop_stdnoread,
122         .vop_write =            vop_stdnowrite,
123         .vop_access =           hammer_vop_access,
124         .vop_close =            hammer_vop_close,
125         .vop_markatime =        hammer_vop_markatime,
126         .vop_getattr =          hammer_vop_getattr,
127         .vop_inactive =         hammer_vop_inactive,
128         .vop_reclaim =          hammer_vop_reclaim,
129         .vop_setattr =          hammer_vop_setattr
130 };
131
132 struct vop_ops hammer_fifo_vops = {
133         .vop_default =          fifo_vnoperate,
134         .vop_fsync =            hammer_vop_fsync,
135         .vop_read =             hammer_vop_fiforead,
136         .vop_write =            hammer_vop_fifowrite,
137         .vop_access =           hammer_vop_access,
138         .vop_close =            hammer_vop_fifoclose,
139         .vop_markatime =        hammer_vop_markatime,
140         .vop_getattr =          hammer_vop_getattr,
141         .vop_inactive =         hammer_vop_inactive,
142         .vop_reclaim =          hammer_vop_reclaim,
143         .vop_setattr =          hammer_vop_setattr,
144         .vop_kqfilter =         hammer_vop_fifokqfilter
145 };
146
147 static __inline
148 void
149 hammer_knote(struct vnode *vp, int flags)
150 {
151         if (flags)
152                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
153 }
154
155 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
156                            struct vnode *dvp, struct ucred *cred,
157                            int flags, int isdir);
158 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
159 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
160
161 /*
162  * hammer_vop_fsync { vp, waitfor }
163  *
164  * fsync() an inode to disk and wait for it to be completely committed
165  * such that the information would not be undone if a crash occured after
166  * return.
167  *
168  * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
169  *       a REDO log.  A sysctl is provided to relax HAMMER's fsync()
170  *       operation.
171  *
172  *       Ultimately the combination of a REDO log and use of fast storage
173  *       to front-end cluster caches will make fsync fast, but it aint
174  *       here yet.  And, in anycase, we need real transactional
175  *       all-or-nothing features which are not restricted to a single file.
176  */
177 static
178 int
179 hammer_vop_fsync(struct vop_fsync_args *ap)
180 {
181         hammer_inode_t ip = VTOI(ap->a_vp);
182         hammer_mount_t hmp = ip->hmp;
183         int waitfor = ap->a_waitfor;
184         int mode;
185
186         lwkt_gettoken(&hmp->fs_token);
187
188         /*
189          * Fsync rule relaxation (default is either full synchronous flush
190          * or REDO semantics with synchronous flush).
191          */
192         if (ap->a_flags & VOP_FSYNC_SYSCALL) {
193                 switch(hammer_fsync_mode) {
194                 case 0:
195 mode0:
196                         /* no REDO, full synchronous flush */
197                         goto skip;
198                 case 1:
199 mode1:
200                         /* no REDO, full asynchronous flush */
201                         if (waitfor == MNT_WAIT)
202                                 waitfor = MNT_NOWAIT;
203                         goto skip;
204                 case 2:
205                         /* REDO semantics, synchronous flush */
206                         if (hmp->version < HAMMER_VOL_VERSION_FOUR)
207                                 goto mode0;
208                         mode = HAMMER_FLUSH_UNDOS_AUTO;
209                         break;
210                 case 3:
211                         /* REDO semantics, relaxed asynchronous flush */
212                         if (hmp->version < HAMMER_VOL_VERSION_FOUR)
213                                 goto mode1;
214                         mode = HAMMER_FLUSH_UNDOS_RELAXED;
215                         if (waitfor == MNT_WAIT)
216                                 waitfor = MNT_NOWAIT;
217                         break;
218                 case 4:
219                         /* ignore the fsync() system call */
220                         lwkt_reltoken(&hmp->fs_token);
221                         return(0);
222                 default:
223                         /* we have to do something */
224                         mode = HAMMER_FLUSH_UNDOS_RELAXED;
225                         if (waitfor == MNT_WAIT)
226                                 waitfor = MNT_NOWAIT;
227                         break;
228                 }
229
230                 /*
231                  * Fast fsync only needs to flush the UNDO/REDO fifo if
232                  * HAMMER_INODE_REDO is non-zero and the only modifications
233                  * made to the file are write or write-extends.
234                  */
235                 if ((ip->flags & HAMMER_INODE_REDO) &&
236                     (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0) {
237                         ++hammer_count_fsyncs;
238                         hammer_flusher_flush_undos(hmp, mode);
239                         ip->redo_count = 0;
240                         if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0)
241                                 vclrisdirty(ip->vp);
242                         lwkt_reltoken(&hmp->fs_token);
243                         return(0);
244                 }
245
246                 /*
247                  * REDO is enabled by fsync(), the idea being we really only
248                  * want to lay down REDO records when programs are using
249                  * fsync() heavily.  The first fsync() on the file starts
250                  * the gravy train going and later fsync()s keep it hot by
251                  * resetting the redo_count.
252                  *
253                  * We weren't running REDOs before now so we have to fall
254                  * through and do a full fsync of what we have.
255                  */
256                 if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
257                     (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) {
258                         ip->flags |= HAMMER_INODE_REDO;
259                         ip->redo_count = 0;
260                 }
261         }
262 skip:
263
264         /*
265          * Do a full flush sequence.
266          *
267          * Attempt to release the vnode while waiting for the inode to
268          * finish flushing.  This can really mess up inactive->reclaim
269          * sequences so only do it if the vnode is active.
270          *
271          * WARNING! The VX lock functions must be used.  vn_lock() will
272          *          fail when this is part of a VOP_RECLAIM sequence.
273          */
274         ++hammer_count_fsyncs;
275         vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
276         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
277         if (waitfor == MNT_WAIT) {
278                 int dorelock;
279
280                 if ((ap->a_vp->v_flag & VRECLAIMED) == 0) {
281                         vx_unlock(ap->a_vp);
282                         dorelock = 1;
283                 } else {
284                         dorelock = 0;
285                 }
286                 hammer_wait_inode(ip);
287                 if (dorelock)
288                         vx_lock(ap->a_vp);
289         }
290         if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0)
291                 vclrisdirty(ip->vp);
292         lwkt_reltoken(&hmp->fs_token);
293         return (ip->error);
294 }
295
296 /*
297  * hammer_vop_read { vp, uio, ioflag, cred }
298  *
299  * MPSAFE (for the cache safe does not require fs_token)
300  */
301 static
302 int
303 hammer_vop_read(struct vop_read_args *ap)
304 {
305         struct hammer_transaction trans;
306         hammer_inode_t ip;
307         hammer_mount_t hmp;
308         off_t offset;
309         struct buf *bp;
310         struct uio *uio;
311         int error;
312         int n;
313         int seqcount;
314         int ioseqcount;
315         int blksize;
316         int bigread;
317         int got_trans;
318         size_t resid;
319
320         if (ap->a_vp->v_type != VREG)
321                 return (EINVAL);
322         ip = VTOI(ap->a_vp);
323         hmp = ip->hmp;
324         error = 0;
325         got_trans = 0;
326         uio = ap->a_uio;
327
328         /*
329          * Attempt to shortcut directly to the VM object using lwbufs.
330          * This is much faster than instantiating buffer cache buffers.
331          */
332         resid = uio->uio_resid;
333         error = vop_helper_read_shortcut(ap);
334         hammer_stats_file_read += resid - uio->uio_resid;
335         if (error)
336                 return (error);
337         if (uio->uio_resid == 0)
338                 goto finished;
339
340         /*
341          * Allow the UIO's size to override the sequential heuristic.
342          */
343         blksize = hammer_blocksize(uio->uio_offset);
344         seqcount = (uio->uio_resid + (MAXBSIZE - 1)) / MAXBSIZE;
345         ioseqcount = (ap->a_ioflag >> 16);
346         if (seqcount < ioseqcount)
347                 seqcount = ioseqcount;
348
349         /*
350          * If reading or writing a huge amount of data we have to break
351          * atomicy and allow the operation to be interrupted by a signal
352          * or it can DOS the machine.
353          */
354         bigread = (uio->uio_resid > 100 * 1024 * 1024);
355
356         /*
357          * Access the data typically in HAMMER_BUFSIZE blocks via the
358          * buffer cache, but HAMMER may use a variable block size based
359          * on the offset.
360          *
361          * XXX Temporary hack, delay the start transaction while we remain
362          *     MPSAFE.  NOTE: ino_data.size cannot change while vnode is
363          *     locked-shared.
364          */
365         while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
366                 int64_t base_offset;
367                 int64_t file_limit;
368
369                 blksize = hammer_blocksize(uio->uio_offset);
370                 offset = (int)uio->uio_offset & (blksize - 1);
371                 base_offset = uio->uio_offset - offset;
372
373                 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
374                         break;
375
376                 /*
377                  * MPSAFE
378                  */
379                 bp = getblk(ap->a_vp, base_offset, blksize, 0, 0);
380                 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == B_CACHE) {
381                         bp->b_flags &= ~B_AGE;
382                         error = 0;
383                         goto skip;
384                 }
385                 if (ap->a_ioflag & IO_NRDELAY) {
386                         bqrelse(bp);
387                         return (EWOULDBLOCK);
388                 }
389
390                 /*
391                  * MPUNSAFE
392                  */
393                 if (got_trans == 0) {
394                         hammer_start_transaction(&trans, ip->hmp);
395                         got_trans = 1;
396                 }
397
398                 /*
399                  * NOTE: A valid bp has already been acquired, but was not
400                  *       B_CACHE.
401                  */
402                 if (hammer_cluster_enable) {
403                         /*
404                          * Use file_limit to prevent cluster_read() from
405                          * creating buffers of the wrong block size past
406                          * the demarc.
407                          */
408                         file_limit = ip->ino_data.size;
409                         if (base_offset < HAMMER_XDEMARC &&
410                             file_limit > HAMMER_XDEMARC) {
411                                 file_limit = HAMMER_XDEMARC;
412                         }
413                         error = cluster_readx(ap->a_vp,
414                                              file_limit, base_offset,
415                                              blksize, B_NOTMETA,
416                                              uio->uio_resid,
417                                              seqcount * MAXBSIZE,
418                                              &bp);
419                 } else {
420                         error = breadnx(ap->a_vp, base_offset,
421                                         blksize, B_NOTMETA,
422                                         NULL, NULL, 0, &bp);
423                 }
424                 if (error) {
425                         brelse(bp);
426                         break;
427                 }
428 skip:
429                 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IOISSUED)) {
430                         hdkprintf("zone2_offset %016jx read file %016jx@%016jx\n",
431                                 (intmax_t)bp->b_bio2.bio_offset,
432                                 (intmax_t)ip->obj_id,
433                                 (intmax_t)bp->b_loffset);
434                 }
435                 bp->b_flags &= ~B_IOISSUED;
436                 if (blksize == HAMMER_XBUFSIZE)
437                         bp->b_flags |= B_CLUSTEROK;
438
439                 n = blksize - offset;
440                 if (n > uio->uio_resid)
441                         n = uio->uio_resid;
442                 if (n > ip->ino_data.size - uio->uio_offset)
443                         n = (int)(ip->ino_data.size - uio->uio_offset);
444
445                 /*
446                  * Set B_AGE, data has a lower priority than meta-data.
447                  *
448                  * Use a hold/unlock/drop sequence to run the uiomove
449                  * with the buffer unlocked, avoiding deadlocks against
450                  * read()s on mmap()'d spaces.
451                  */
452                 bp->b_flags |= B_AGE;
453                 error = uiomovebp(bp, (char *)bp->b_data + offset, n, uio);
454                 bqrelse(bp);
455
456                 if (error)
457                         break;
458                 hammer_stats_file_read += n;
459         }
460
461 finished:
462
463         /*
464          * Try to update the atime with just the inode lock for maximum
465          * concurrency.  If we can't shortcut it we have to get the full
466          * blown transaction.
467          */
468         if (got_trans == 0 && hammer_update_atime_quick(ip) < 0) {
469                 hammer_start_transaction(&trans, ip->hmp);
470                 got_trans = 1;
471         }
472
473         if (got_trans) {
474                 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
475                     (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
476                         lwkt_gettoken(&hmp->fs_token);
477                         ip->ino_data.atime = trans.time;
478                         hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
479                         hammer_done_transaction(&trans);
480                         lwkt_reltoken(&hmp->fs_token);
481                 } else {
482                         hammer_done_transaction(&trans);
483                 }
484         }
485         return (error);
486 }
487
488 /*
489  * hammer_vop_write { vp, uio, ioflag, cred }
490  */
491 static
492 int
493 hammer_vop_write(struct vop_write_args *ap)
494 {
495         struct hammer_transaction trans;
496         hammer_inode_t ip;
497         hammer_mount_t hmp;
498         thread_t td;
499         struct vnode *vp;
500         struct uio *uio;
501         int offset;
502         off_t base_offset;
503         int64_t cluster_eof;
504         struct buf *bp;
505         int kflags;
506         int error;
507         int n;
508         int flags;
509         int seqcount;
510         int bigwrite;
511
512         vp = ap->a_vp;
513         if (vp->v_type != VREG)
514                 return (EINVAL);
515         ip = VTOI(ap->a_vp);
516         hmp = ip->hmp;
517         error = 0;
518         kflags = 0;
519         seqcount = ap->a_ioflag >> 16;
520
521         if (ip->flags & HAMMER_INODE_RO)
522                 return (EROFS);
523
524         /*
525          * Create a transaction to cover the operations we perform.
526          */
527         hammer_start_transaction(&trans, hmp);
528         uio = ap->a_uio;
529
530         /*
531          * Use v_lastwrite_ts if file not open for writing
532          * (i.e. a late msync)
533          */
534         if (uio->uio_segflg == UIO_NOCOPY) {
535                 if (vp->v_flag & VLASTWRITETS) {
536                         trans.time = vp->v_lastwrite_ts.tv_sec * 1000000 +
537                                      vp->v_lastwrite_ts.tv_nsec / 1000;
538                 } else {
539                         trans.time = ip->ino_data.mtime;
540                 }
541         } else {
542                 vclrflags(vp, VLASTWRITETS);
543         }
544
545         /*
546          * Check append mode
547          */
548         if (ap->a_ioflag & IO_APPEND)
549                 uio->uio_offset = ip->ino_data.size;
550
551         /*
552          * Check for illegal write offsets.  Valid range is 0...2^63-1.
553          *
554          * NOTE: the base_off assignment is required to work around what
555          * I consider to be a GCC-4 optimization bug.
556          */
557         if (uio->uio_offset < 0) {
558                 hammer_done_transaction(&trans);
559                 return (EFBIG);
560         }
561         base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
562         if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
563                 hammer_done_transaction(&trans);
564                 return (EFBIG);
565         }
566
567         if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
568             base_offset > td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
569                 hammer_done_transaction(&trans);
570                 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
571                 return (EFBIG);
572         }
573
574         /*
575          * If reading or writing a huge amount of data we have to break
576          * atomicy and allow the operation to be interrupted by a signal
577          * or it can DOS the machine.
578          *
579          * Preset redo_count so we stop generating REDOs earlier if the
580          * limit is exceeded.
581          *
582          * redo_count is heuristical, SMP races are ok
583          */
584         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
585         if ((ip->flags & HAMMER_INODE_REDO) &&
586             ip->redo_count < hammer_limit_redo) {
587                 ip->redo_count += uio->uio_resid;
588         }
589
590         /*
591          * Access the data typically in HAMMER_BUFSIZE blocks via the
592          * buffer cache, but HAMMER may use a variable block size based
593          * on the offset.
594          */
595         while (uio->uio_resid > 0) {
596                 int fixsize = 0;
597                 int blksize;
598                 int blkmask;
599                 int trivial;
600                 int endofblk;
601                 off_t nsize;
602
603                 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
604                         break;
605                 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
606                         break;
607
608                 blksize = hammer_blocksize(uio->uio_offset);
609
610                 /*
611                  * Control the number of pending records associated with
612                  * this inode.  If too many have accumulated start a
613                  * flush.  Try to maintain a pipeline with the flusher.
614                  *
615                  * NOTE: It is possible for other sources to grow the
616                  *       records but not necessarily issue another flush,
617                  *       so use a timeout and ensure that a re-flush occurs.
618                  */
619                 if (ip->rsv_recs >= hammer_limit_inode_recs) {
620                         lwkt_gettoken(&hmp->fs_token);
621                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
622                         while (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
623                                 ip->flags |= HAMMER_INODE_RECSW;
624                                 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
625                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
626                         }
627                         lwkt_reltoken(&hmp->fs_token);
628                 }
629
630                 /*
631                  * Do not allow HAMMER to blow out the buffer cache.  Very
632                  * large UIOs can lockout other processes due to bwillwrite()
633                  * mechanics.
634                  *
635                  * The hammer inode is not locked during these operations.
636                  * The vnode is locked which can interfere with the pageout
637                  * daemon for non-UIO_NOCOPY writes but should not interfere
638                  * with the buffer cache.  Even so, we cannot afford to
639                  * allow the pageout daemon to build up too many dirty buffer
640                  * cache buffers.
641                  *
642                  * Only call this if we aren't being recursively called from
643                  * a virtual disk device (vn), else we may deadlock.
644                  */
645                 if ((ap->a_ioflag & IO_RECURSE) == 0)
646                         bwillwrite(blksize);
647
648                 /*
649                  * Calculate the blocksize at the current offset and figure
650                  * out how much we can actually write.
651                  */
652                 blkmask = blksize - 1;
653                 offset = (int)uio->uio_offset & blkmask;
654                 base_offset = uio->uio_offset & ~(int64_t)blkmask;
655                 n = blksize - offset;
656                 if (n > uio->uio_resid) {
657                         n = uio->uio_resid;
658                         endofblk = 0;
659                 } else {
660                         endofblk = 1;
661                 }
662                 nsize = uio->uio_offset + n;
663                 if (nsize > ip->ino_data.size) {
664                         if (uio->uio_offset > ip->ino_data.size)
665                                 trivial = 0;
666                         else
667                                 trivial = 1;
668                         nvextendbuf(ap->a_vp,
669                                     ip->ino_data.size,
670                                     nsize,
671                                     hammer_blocksize(ip->ino_data.size),
672                                     hammer_blocksize(nsize),
673                                     hammer_blockoff(ip->ino_data.size),
674                                     hammer_blockoff(nsize),
675                                     trivial);
676                         fixsize = 1;
677                         kflags |= NOTE_EXTEND;
678                 }
679
680                 if (uio->uio_segflg == UIO_NOCOPY) {
681                         /*
682                          * Issuing a write with the same data backing the
683                          * buffer.  Instantiate the buffer to collect the
684                          * backing vm pages, then read-in any missing bits.
685                          *
686                          * This case is used by vop_stdputpages().
687                          */
688                         bp = getblk(ap->a_vp, base_offset,
689                                     blksize, GETBLK_BHEAVY, 0);
690                         if ((bp->b_flags & B_CACHE) == 0) {
691                                 bqrelse(bp);
692                                 error = bread(ap->a_vp, base_offset,
693                                               blksize, &bp);
694                         }
695                 } else if (offset == 0 && uio->uio_resid >= blksize) {
696                         /*
697                          * Even though we are entirely overwriting the buffer
698                          * we may still have to zero it out to avoid a
699                          * mmap/write visibility issue.
700                          */
701                         bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
702                         if ((bp->b_flags & B_CACHE) == 0)
703                                 vfs_bio_clrbuf(bp);
704                 } else if (base_offset >= ip->ino_data.size) {
705                         /*
706                          * If the base offset of the buffer is beyond the
707                          * file EOF, we don't have to issue a read.
708                          */
709                         bp = getblk(ap->a_vp, base_offset,
710                                     blksize, GETBLK_BHEAVY, 0);
711                         vfs_bio_clrbuf(bp);
712                 } else {
713                         /*
714                          * Partial overwrite, read in any missing bits then
715                          * replace the portion being written.
716                          */
717                         error = bread(ap->a_vp, base_offset, blksize, &bp);
718                         if (error == 0)
719                                 bheavy(bp);
720                 }
721                 if (error == 0)
722                         error = uiomovebp(bp, bp->b_data + offset, n, uio);
723
724                 lwkt_gettoken(&hmp->fs_token);
725
726                 /*
727                  * Generate REDO records if enabled and redo_count will not
728                  * exceeded the limit.
729                  *
730                  * If redo_count exceeds the limit we stop generating records
731                  * and clear HAMMER_INODE_REDO.  This will cause the next
732                  * fsync() to do a full meta-data sync instead of just an
733                  * UNDO/REDO fifo update.
734                  *
735                  * When clearing HAMMER_INODE_REDO any pre-existing REDOs
736                  * will still be tracked.  The tracks will be terminated
737                  * when the related meta-data (including possible data
738                  * modifications which are not tracked via REDO) is
739                  * flushed.
740                  */
741                 if ((ip->flags & HAMMER_INODE_REDO) && error == 0) {
742                         if (ip->redo_count < hammer_limit_redo) {
743                                 bp->b_flags |= B_VFSFLAG1;
744                                 error = hammer_generate_redo(&trans, ip,
745                                                      base_offset + offset,
746                                                      HAMMER_REDO_WRITE,
747                                                      bp->b_data + offset,
748                                                      (size_t)n);
749                         } else {
750                                 ip->flags &= ~HAMMER_INODE_REDO;
751                         }
752                 }
753
754                 /*
755                  * If we screwed up we have to undo any VM size changes we
756                  * made.
757                  */
758                 if (error) {
759                         brelse(bp);
760                         if (fixsize) {
761                                 nvtruncbuf(ap->a_vp, ip->ino_data.size,
762                                           hammer_blocksize(ip->ino_data.size),
763                                           hammer_blockoff(ip->ino_data.size),
764                                           0);
765                         }
766                         lwkt_reltoken(&hmp->fs_token);
767                         break;
768                 }
769                 kflags |= NOTE_WRITE;
770                 hammer_stats_file_write += n;
771                 if (blksize == HAMMER_XBUFSIZE)
772                         bp->b_flags |= B_CLUSTEROK;
773                 if (ip->ino_data.size < uio->uio_offset) {
774                         ip->ino_data.size = uio->uio_offset;
775                         flags = HAMMER_INODE_SDIRTY;
776                 } else {
777                         flags = 0;
778                 }
779                 ip->ino_data.mtime = trans.time;
780                 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
781                 hammer_modify_inode(&trans, ip, flags);
782
783                 /*
784                  * Once we dirty the buffer any cached zone-X offset
785                  * becomes invalid.  HAMMER NOTE: no-history mode cannot
786                  * allow overwriting over the same data sector unless
787                  * we provide UNDOs for the old data, which we don't.
788                  */
789                 bp->b_bio2.bio_offset = NOOFFSET;
790
791                 lwkt_reltoken(&hmp->fs_token);
792
793                 /*
794                  * Final buffer disposition.
795                  *
796                  * Because meta-data updates are deferred, HAMMER is
797                  * especially sensitive to excessive bdwrite()s because
798                  * the I/O stream is not broken up by disk reads.  So the
799                  * buffer cache simply cannot keep up.
800                  *
801                  * WARNING!  blksize is variable.  cluster_write() is
802                  *           expected to not blow up if it encounters
803                  *           buffers that do not match the passed blksize.
804                  *
805                  * NOTE!  Hammer shouldn't need to bawrite()/cluster_write().
806                  *        The ip->rsv_recs check should burst-flush the data.
807                  *        If we queue it immediately the buf could be left
808                  *        locked on the device queue for a very long time.
809                  *
810                  *        However, failing to flush a dirty buffer out when
811                  *        issued from the pageout daemon can result in a low
812                  *        memory deadlock against bio_page_alloc(), so we
813                  *        have to bawrite() on IO_ASYNC as well.
814                  *
815                  * NOTE!  To avoid degenerate stalls due to mismatched block
816                  *        sizes we only honor IO_DIRECT on the write which
817                  *        abuts the end of the buffer.  However, we must
818                  *        honor IO_SYNC in case someone is silly enough to
819                  *        configure a HAMMER file as swap, or when HAMMER
820                  *        is serving NFS (for commits).  Ick ick.
821                  */
822                 bp->b_flags |= B_AGE;
823                 if (blksize == HAMMER_XBUFSIZE)
824                         bp->b_flags |= B_CLUSTEROK;
825
826                 if (ap->a_ioflag & IO_SYNC) {
827                         bwrite(bp);
828                 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
829                         bawrite(bp);
830                 } else if (ap->a_ioflag & IO_ASYNC) {
831                         bawrite(bp);
832                 } else if (hammer_cluster_enable &&
833                            !(ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
834                         if (base_offset < HAMMER_XDEMARC)
835                                 cluster_eof = hammer_blockdemarc(base_offset,
836                                                          ip->ino_data.size);
837                         else
838                                 cluster_eof = ip->ino_data.size;
839                         cluster_write(bp, cluster_eof, blksize, seqcount);
840                 } else {
841                         bdwrite(bp);
842                 }
843         }
844         hammer_done_transaction(&trans);
845         hammer_knote(ap->a_vp, kflags);
846
847         return (error);
848 }
849
850 /*
851  * hammer_vop_access { vp, mode, cred }
852  *
853  * MPSAFE - does not require fs_token
854  */
855 static
856 int
857 hammer_vop_access(struct vop_access_args *ap)
858 {
859         hammer_inode_t ip = VTOI(ap->a_vp);
860         uid_t uid;
861         gid_t gid;
862         int error;
863
864         uid = hammer_to_unix_xid(&ip->ino_data.uid);
865         gid = hammer_to_unix_xid(&ip->ino_data.gid);
866
867         error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
868                                   ip->ino_data.uflags);
869         return (error);
870 }
871
872 /*
873  * hammer_vop_advlock { vp, id, op, fl, flags }
874  *
875  * MPSAFE - does not require fs_token
876  */
877 static
878 int
879 hammer_vop_advlock(struct vop_advlock_args *ap)
880 {
881         hammer_inode_t ip = VTOI(ap->a_vp);
882
883         return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
884 }
885
886 /*
887  * hammer_vop_close { vp, fflag }
888  *
889  * We can only sync-on-close for normal closes.  XXX disabled for now.
890  */
891 static
892 int
893 hammer_vop_close(struct vop_close_args *ap)
894 {
895 #if 0
896         struct vnode *vp = ap->a_vp;
897         hammer_inode_t ip = VTOI(vp);
898         int waitfor;
899         if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
900                 if (vn_islocked(vp) == LK_EXCLUSIVE &&
901                     (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
902                         if (ip->flags & HAMMER_INODE_CLOSESYNC)
903                                 waitfor = MNT_WAIT;
904                         else
905                                 waitfor = MNT_NOWAIT;
906                         ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
907                                        HAMMER_INODE_CLOSEASYNC);
908                         VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
909                 }
910         }
911 #endif
912         return (vop_stdclose(ap));
913 }
914
915 /*
916  * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
917  *
918  * The operating system has already ensured that the directory entry
919  * does not exist and done all appropriate namespace locking.
920  */
921 static
922 int
923 hammer_vop_ncreate(struct vop_ncreate_args *ap)
924 {
925         struct hammer_transaction trans;
926         hammer_inode_t dip;
927         hammer_inode_t nip;
928         struct nchandle *nch;
929         hammer_mount_t hmp;
930         int error;
931
932         nch = ap->a_nch;
933         dip = VTOI(ap->a_dvp);
934         hmp = dip->hmp;
935
936         if (dip->flags & HAMMER_INODE_RO)
937                 return (EROFS);
938         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
939                 return (error);
940
941         /*
942          * Create a transaction to cover the operations we perform.
943          */
944         lwkt_gettoken(&hmp->fs_token);
945         hammer_start_transaction(&trans, hmp);
946
947         /*
948          * Create a new filesystem object of the requested type.  The
949          * returned inode will be referenced and shared-locked to prevent
950          * it from being moved to the flusher.
951          */
952         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
953                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
954                                     NULL, &nip);
955         if (error) {
956                 hkprintf("hammer_create_inode error %d\n", error);
957                 hammer_done_transaction(&trans);
958                 *ap->a_vpp = NULL;
959                 lwkt_reltoken(&hmp->fs_token);
960                 return (error);
961         }
962
963         /*
964          * Add the new filesystem object to the directory.  This will also
965          * bump the inode's link count.
966          */
967         error = hammer_ip_add_direntry(&trans, dip,
968                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
969                                         nip);
970         if (error)
971                 hkprintf("hammer_ip_add_direntry error %d\n", error);
972
973         /*
974          * Finish up.
975          */
976         if (error) {
977                 hammer_rel_inode(nip, 0);
978                 hammer_done_transaction(&trans);
979                 *ap->a_vpp = NULL;
980         } else {
981                 error = hammer_get_vnode(nip, ap->a_vpp);
982                 hammer_done_transaction(&trans);
983                 hammer_rel_inode(nip, 0);
984                 if (error == 0) {
985                         cache_setunresolved(ap->a_nch);
986                         cache_setvp(ap->a_nch, *ap->a_vpp);
987                 }
988                 hammer_knote(ap->a_dvp, NOTE_WRITE);
989         }
990         lwkt_reltoken(&hmp->fs_token);
991         return (error);
992 }
993
994 /*
995  * hammer_vop_getattr { vp, vap }
996  *
997  * Retrieve an inode's attribute information.  When accessing inodes
998  * historically we fake the atime field to ensure consistent results.
999  * The atime field is stored in the B-Tree element and allowed to be
1000  * updated without cycling the element.
1001  *
1002  * MPSAFE - does not require fs_token
1003  */
1004 static
1005 int
1006 hammer_vop_getattr(struct vop_getattr_args *ap)
1007 {
1008         hammer_inode_t ip = VTOI(ap->a_vp);
1009         struct vattr *vap = ap->a_vap;
1010
1011         /*
1012          * We want the fsid to be different when accessing a filesystem
1013          * with different as-of's so programs like diff don't think
1014          * the files are the same.
1015          *
1016          * We also want the fsid to be the same when comparing snapshots,
1017          * or when comparing mirrors (which might be backed by different
1018          * physical devices).  HAMMER fsids are based on the PFS's
1019          * shared_uuid field.
1020          *
1021          * XXX there is a chance of collision here.  The va_fsid reported
1022          * by stat is different from the more involved fsid used in the
1023          * mount structure.
1024          */
1025         hammer_lock_sh(&ip->lock);
1026         vap->va_fsid = ip->pfsm->fsid_udev ^ (uint32_t)ip->obj_asof ^
1027                        (uint32_t)(ip->obj_asof >> 32);
1028
1029         vap->va_fileid = ip->ino_leaf.base.obj_id;
1030         vap->va_mode = ip->ino_data.mode;
1031         vap->va_nlink = ip->ino_data.nlinks;
1032         vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1033         vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1034         vap->va_rmajor = 0;
1035         vap->va_rminor = 0;
1036         vap->va_size = ip->ino_data.size;
1037
1038         /*
1039          * Special case for @@PFS softlinks.  The actual size of the
1040          * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1041          * or for MAX_TID is    "@@-1:%05d" == 10 bytes.
1042          *
1043          * Note that userspace hammer command does not allow users to
1044          * create a @@PFS softlink under an existing other PFS (id!=0)
1045          * so the ip localization here for @@PFS softlink is always 0.
1046          */
1047         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
1048             ip->ino_data.size == 10 &&
1049             ip->obj_asof == HAMMER_MAX_TID &&
1050             ip->obj_localization == HAMMER_DEF_LOCALIZATION &&
1051             strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
1052                 if (hammer_is_pfs_slave(&ip->pfsm->pfsd))
1053                         vap->va_size = 26;
1054                 else
1055                         vap->va_size = 10;
1056         }
1057
1058         /*
1059          * We must provide a consistent atime and mtime for snapshots
1060          * so people can do a 'tar cf - ... | md5' on them and get
1061          * consistent results.
1062          */
1063         if (ip->flags & HAMMER_INODE_RO) {
1064                 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
1065                 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
1066         } else {
1067                 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
1068                 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
1069         }
1070         hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
1071         vap->va_flags = ip->ino_data.uflags;
1072         vap->va_gen = 1;        /* hammer inums are unique for all time */
1073         vap->va_blocksize = HAMMER_BUFSIZE;
1074         if (ip->ino_data.size >= HAMMER_XDEMARC) {
1075                 vap->va_bytes = HAMMER_XBUFSIZE64_DOALIGN(ip->ino_data.size);
1076         } else if (ip->ino_data.size > HAMMER_HBUFSIZE) {
1077                 vap->va_bytes = HAMMER_BUFSIZE64_DOALIGN(ip->ino_data.size);
1078         } else {
1079                 vap->va_bytes = HAMMER_DATA_DOALIGN(ip->ino_data.size);
1080         }
1081
1082         vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
1083         vap->va_filerev = 0;    /* XXX */
1084         vap->va_uid_uuid = ip->ino_data.uid;
1085         vap->va_gid_uuid = ip->ino_data.gid;
1086         vap->va_fsid_uuid = ip->hmp->fsid;
1087         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
1088                           VA_FSID_UUID_VALID;
1089
1090         switch (ip->ino_data.obj_type) {
1091         case HAMMER_OBJTYPE_CDEV:
1092         case HAMMER_OBJTYPE_BDEV:
1093                 vap->va_rmajor = ip->ino_data.rmajor;
1094                 vap->va_rminor = ip->ino_data.rminor;
1095                 break;
1096         default:
1097                 break;
1098         }
1099         hammer_unlock(&ip->lock);
1100         return(0);
1101 }
1102
1103 /*
1104  * hammer_vop_nresolve { nch, dvp, cred }
1105  *
1106  * Locate the requested directory entry.
1107  */
1108 static
1109 int
1110 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1111 {
1112         struct hammer_transaction trans;
1113         struct namecache *ncp;
1114         hammer_mount_t hmp;
1115         hammer_inode_t dip;
1116         hammer_inode_t ip;
1117         hammer_tid_t asof;
1118         struct hammer_cursor cursor;
1119         struct vnode *vp;
1120         int64_t namekey;
1121         int error;
1122         int i;
1123         int nlen;
1124         int flags;
1125         int ispfs;
1126         int64_t obj_id;
1127         uint32_t localization;
1128         uint32_t max_iterations;
1129
1130         /*
1131          * Misc initialization, plus handle as-of name extensions.  Look for
1132          * the '@@' extension.  Note that as-of files and directories cannot
1133          * be modified.
1134          */
1135         dip = VTOI(ap->a_dvp);
1136         ncp = ap->a_nch->ncp;
1137         asof = dip->obj_asof;
1138         localization = dip->obj_localization;   /* for code consistency */
1139         nlen = ncp->nc_nlen;
1140         flags = dip->flags & HAMMER_INODE_RO;
1141         ispfs = 0;
1142         hmp = dip->hmp;
1143
1144         lwkt_gettoken(&hmp->fs_token);
1145         hammer_simple_transaction(&trans, hmp);
1146
1147         for (i = 0; i < nlen; ++i) {
1148                 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1149                         error = hammer_str_to_tid(ncp->nc_name + i + 2,
1150                                                   &ispfs, &asof, &localization);
1151                         if (error != 0) {
1152                                 i = nlen;
1153                                 break;
1154                         }
1155                         if (asof != HAMMER_MAX_TID)
1156                                 flags |= HAMMER_INODE_RO;
1157                         break;
1158                 }
1159         }
1160         nlen = i;
1161
1162         /*
1163          * If this is a PFS we dive into the PFS root inode
1164          */
1165         if (ispfs && nlen == 0) {
1166                 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1167                                       asof, localization,
1168                                       flags, &error);
1169                 if (error == 0) {
1170                         error = hammer_get_vnode(ip, &vp);
1171                         hammer_rel_inode(ip, 0);
1172                 } else {
1173                         vp = NULL;
1174                 }
1175                 if (error == 0) {
1176                         vn_unlock(vp);
1177                         cache_setvp(ap->a_nch, vp);
1178                         vrele(vp);
1179                 }
1180                 goto done;
1181         }
1182
1183         /*
1184          * If there is no path component the time extension is relative to dip.
1185          * e.g. "fubar/@@<snapshot>"
1186          *
1187          * "." is handled by the kernel, but ".@@<snapshot>" is not.
1188          * e.g. "fubar/.@@<snapshot>"
1189          *
1190          * ".." is handled by the kernel.  We do not currently handle
1191          * "..@<snapshot>".
1192          */
1193         if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1194                 ip = hammer_get_inode(&trans, dip, dip->obj_id,
1195                                       asof, dip->obj_localization,
1196                                       flags, &error);
1197                 if (error == 0) {
1198                         error = hammer_get_vnode(ip, &vp);
1199                         hammer_rel_inode(ip, 0);
1200                 } else {
1201                         vp = NULL;
1202                 }
1203                 if (error == 0) {
1204                         vn_unlock(vp);
1205                         cache_setvp(ap->a_nch, vp);
1206                         vrele(vp);
1207                 }
1208                 goto done;
1209         }
1210
1211         /*
1212          * Calculate the namekey and setup the key range for the scan.  This
1213          * works kinda like a chained hash table where the lower 32 bits
1214          * of the namekey synthesize the chain.
1215          *
1216          * The key range is inclusive of both key_beg and key_end.
1217          */
1218         namekey = hammer_direntry_namekey(dip, ncp->nc_name, nlen,
1219                                            &max_iterations);
1220
1221         error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1222         cursor.key_beg.localization = dip->obj_localization |
1223                                       hammer_dir_localization(dip);
1224         cursor.key_beg.obj_id = dip->obj_id;
1225         cursor.key_beg.key = namekey;
1226         cursor.key_beg.create_tid = 0;
1227         cursor.key_beg.delete_tid = 0;
1228         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1229         cursor.key_beg.obj_type = 0;
1230
1231         cursor.key_end = cursor.key_beg;
1232         cursor.key_end.key += max_iterations;
1233         cursor.asof = asof;
1234         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1235
1236         /*
1237          * Scan all matching records (the chain), locate the one matching
1238          * the requested path component.
1239          *
1240          * The hammer_ip_*() functions merge in-memory records with on-disk
1241          * records for the purposes of the search.
1242          */
1243         obj_id = 0;
1244         localization = HAMMER_DEF_LOCALIZATION;
1245
1246         if (error == 0) {
1247                 error = hammer_ip_first(&cursor);
1248                 while (error == 0) {
1249                         error = hammer_ip_resolve_data(&cursor);
1250                         if (error)
1251                                 break;
1252                         if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1253                             bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1254                                 obj_id = cursor.data->entry.obj_id;
1255                                 localization = cursor.data->entry.localization;
1256                                 break;
1257                         }
1258                         error = hammer_ip_next(&cursor);
1259                 }
1260         }
1261         hammer_done_cursor(&cursor);
1262
1263         /*
1264          * Lookup the obj_id.  This should always succeed.  If it does not
1265          * the filesystem may be damaged and we return a dummy inode.
1266          */
1267         if (error == 0) {
1268                 ip = hammer_get_inode(&trans, dip, obj_id,
1269                                       asof, localization,
1270                                       flags, &error);
1271                 if (error == ENOENT) {
1272                         hkprintf("WARNING: Missing inode for dirent \"%s\"\n"
1273                                 "\tobj_id = %016jx, asof=%016jx, lo=%08x\n",
1274                                 ncp->nc_name,
1275                                 (intmax_t)obj_id, (intmax_t)asof,
1276                                 localization);
1277                         error = 0;
1278                         ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1279                                                     asof, localization,
1280                                                     flags, &error);
1281                 }
1282                 if (error == 0) {
1283                         error = hammer_get_vnode(ip, &vp);
1284                         hammer_rel_inode(ip, 0);
1285                 } else {
1286                         vp = NULL;
1287                 }
1288                 if (error == 0) {
1289                         vn_unlock(vp);
1290                         cache_setvp(ap->a_nch, vp);
1291                         vrele(vp);
1292                 }
1293         } else if (error == ENOENT) {
1294                 cache_setvp(ap->a_nch, NULL);
1295         }
1296 done:
1297         hammer_done_transaction(&trans);
1298         lwkt_reltoken(&hmp->fs_token);
1299         return (error);
1300 }
1301
1302 /*
1303  * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1304  *
1305  * Locate the parent directory of a directory vnode.
1306  *
1307  * dvp is referenced but not locked.  *vpp must be returned referenced and
1308  * locked.  A parent_obj_id of 0 indicates that we are at the root.
1309  *
1310  * NOTE: as-of sequences are not linked into the directory structure.  If
1311  * we are at the root with a different asof then the mount point, reload
1312  * the same directory with the mount point's asof.   I'm not sure what this
1313  * will do to NFS.  We encode ASOF stamps in NFS file handles so it might not
1314  * get confused, but it hasn't been tested.
1315  */
1316 static
1317 int
1318 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1319 {
1320         struct hammer_transaction trans;
1321         hammer_inode_t dip;
1322         hammer_inode_t ip;
1323         hammer_mount_t hmp;
1324         int64_t parent_obj_id;
1325         uint32_t parent_obj_localization;
1326         hammer_tid_t asof;
1327         int error;
1328
1329         dip = VTOI(ap->a_dvp);
1330         asof = dip->obj_asof;
1331         hmp = dip->hmp;
1332
1333         /*
1334          * Whos are parent?  This could be the root of a pseudo-filesystem
1335          * whos parent is in another localization domain.
1336          */
1337         lwkt_gettoken(&hmp->fs_token);
1338         parent_obj_id = dip->ino_data.parent_obj_id;
1339         if (dip->obj_id == HAMMER_OBJID_ROOT)
1340                 parent_obj_localization = HAMMER_DEF_LOCALIZATION;
1341         else
1342                 parent_obj_localization = dip->obj_localization;
1343
1344         /*
1345          * It's probably a PFS root when dip->ino_data.parent_obj_id is 0.
1346          */
1347         if (parent_obj_id == 0) {
1348                 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1349                    asof != hmp->asof) {
1350                         parent_obj_id = dip->obj_id;
1351                         asof = hmp->asof;
1352                         *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1353                         ksnprintf(*ap->a_fakename, 19, "0x%016jx",
1354                                   (intmax_t)dip->obj_asof);
1355                 } else {
1356                         *ap->a_vpp = NULL;
1357                         lwkt_reltoken(&hmp->fs_token);
1358                         return ENOENT;
1359                 }
1360         }
1361
1362         hammer_simple_transaction(&trans, hmp);
1363
1364         ip = hammer_get_inode(&trans, dip, parent_obj_id,
1365                               asof, parent_obj_localization,
1366                               dip->flags, &error);
1367         if (ip) {
1368                 error = hammer_get_vnode(ip, ap->a_vpp);
1369                 hammer_rel_inode(ip, 0);
1370         } else {
1371                 *ap->a_vpp = NULL;
1372         }
1373         hammer_done_transaction(&trans);
1374         lwkt_reltoken(&hmp->fs_token);
1375         return (error);
1376 }
1377
1378 /*
1379  * hammer_vop_nlink { nch, dvp, vp, cred }
1380  */
1381 static
1382 int
1383 hammer_vop_nlink(struct vop_nlink_args *ap)
1384 {
1385         struct hammer_transaction trans;
1386         hammer_inode_t dip;
1387         hammer_inode_t ip;
1388         struct nchandle *nch;
1389         hammer_mount_t hmp;
1390         int error;
1391
1392         if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1393                 return(EXDEV);
1394
1395         nch = ap->a_nch;
1396         dip = VTOI(ap->a_dvp);
1397         ip = VTOI(ap->a_vp);
1398         hmp = dip->hmp;
1399
1400         if (dip->obj_localization != ip->obj_localization)
1401                 return(EXDEV);
1402
1403         if (dip->flags & HAMMER_INODE_RO)
1404                 return (EROFS);
1405         if (ip->flags & HAMMER_INODE_RO)
1406                 return (EROFS);
1407         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1408                 return (error);
1409
1410         /*
1411          * Create a transaction to cover the operations we perform.
1412          */
1413         lwkt_gettoken(&hmp->fs_token);
1414         hammer_start_transaction(&trans, hmp);
1415
1416         /*
1417          * Add the filesystem object to the directory.  Note that neither
1418          * dip nor ip are referenced or locked, but their vnodes are
1419          * referenced.  This function will bump the inode's link count.
1420          */
1421         error = hammer_ip_add_direntry(&trans, dip,
1422                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1423                                         ip);
1424
1425         /*
1426          * Finish up.
1427          */
1428         if (error == 0) {
1429                 cache_setunresolved(nch);
1430                 cache_setvp(nch, ap->a_vp);
1431         }
1432         hammer_done_transaction(&trans);
1433         hammer_knote(ap->a_vp, NOTE_LINK);
1434         hammer_knote(ap->a_dvp, NOTE_WRITE);
1435         lwkt_reltoken(&hmp->fs_token);
1436         return (error);
1437 }
1438
1439 /*
1440  * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1441  *
1442  * The operating system has already ensured that the directory entry
1443  * does not exist and done all appropriate namespace locking.
1444  */
1445 static
1446 int
1447 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1448 {
1449         struct hammer_transaction trans;
1450         hammer_inode_t dip;
1451         hammer_inode_t nip;
1452         struct nchandle *nch;
1453         hammer_mount_t hmp;
1454         int error;
1455
1456         nch = ap->a_nch;
1457         dip = VTOI(ap->a_dvp);
1458         hmp = dip->hmp;
1459
1460         if (dip->flags & HAMMER_INODE_RO)
1461                 return (EROFS);
1462         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1463                 return (error);
1464
1465         /*
1466          * Create a transaction to cover the operations we perform.
1467          */
1468         lwkt_gettoken(&hmp->fs_token);
1469         hammer_start_transaction(&trans, hmp);
1470
1471         /*
1472          * Create a new filesystem object of the requested type.  The
1473          * returned inode will be referenced but not locked.
1474          */
1475         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1476                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1477                                     NULL, &nip);
1478         if (error) {
1479                 hammer_done_transaction(&trans);
1480                 *ap->a_vpp = NULL;
1481                 lwkt_reltoken(&hmp->fs_token);
1482                 return (error);
1483         }
1484         /*
1485          * Add the new filesystem object to the directory.  This will also
1486          * bump the inode's link count.
1487          */
1488         error = hammer_ip_add_direntry(&trans, dip,
1489                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1490                                         nip);
1491         if (error)
1492                 hkprintf("hammer_mkdir (add) error %d\n", error);
1493
1494         /*
1495          * Finish up.
1496          */
1497         if (error) {
1498                 hammer_rel_inode(nip, 0);
1499                 *ap->a_vpp = NULL;
1500         } else {
1501                 error = hammer_get_vnode(nip, ap->a_vpp);
1502                 hammer_rel_inode(nip, 0);
1503                 if (error == 0) {
1504                         cache_setunresolved(ap->a_nch);
1505                         cache_setvp(ap->a_nch, *ap->a_vpp);
1506                 }
1507         }
1508         hammer_done_transaction(&trans);
1509         if (error == 0)
1510                 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1511         lwkt_reltoken(&hmp->fs_token);
1512         return (error);
1513 }
1514
1515 /*
1516  * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1517  *
1518  * The operating system has already ensured that the directory entry
1519  * does not exist and done all appropriate namespace locking.
1520  */
1521 static
1522 int
1523 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1524 {
1525         struct hammer_transaction trans;
1526         hammer_inode_t dip;
1527         hammer_inode_t nip;
1528         struct nchandle *nch;
1529         hammer_mount_t hmp;
1530         int error;
1531
1532         nch = ap->a_nch;
1533         dip = VTOI(ap->a_dvp);
1534         hmp = dip->hmp;
1535
1536         if (dip->flags & HAMMER_INODE_RO)
1537                 return (EROFS);
1538         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1539                 return (error);
1540
1541         /*
1542          * Create a transaction to cover the operations we perform.
1543          */
1544         lwkt_gettoken(&hmp->fs_token);
1545         hammer_start_transaction(&trans, hmp);
1546
1547         /*
1548          * Create a new filesystem object of the requested type.  The
1549          * returned inode will be referenced but not locked.
1550          *
1551          * If mknod specifies a directory a pseudo-fs is created.
1552          */
1553         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1554                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1555                                     NULL, &nip);
1556         if (error) {
1557                 hammer_done_transaction(&trans);
1558                 *ap->a_vpp = NULL;
1559                 lwkt_reltoken(&hmp->fs_token);
1560                 return (error);
1561         }
1562
1563         /*
1564          * Add the new filesystem object to the directory.  This will also
1565          * bump the inode's link count.
1566          */
1567         error = hammer_ip_add_direntry(&trans, dip,
1568                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1569                                         nip);
1570
1571         /*
1572          * Finish up.
1573          */
1574         if (error) {
1575                 hammer_rel_inode(nip, 0);
1576                 *ap->a_vpp = NULL;
1577         } else {
1578                 error = hammer_get_vnode(nip, ap->a_vpp);
1579                 hammer_rel_inode(nip, 0);
1580                 if (error == 0) {
1581                         cache_setunresolved(ap->a_nch);
1582                         cache_setvp(ap->a_nch, *ap->a_vpp);
1583                 }
1584         }
1585         hammer_done_transaction(&trans);
1586         if (error == 0)
1587                 hammer_knote(ap->a_dvp, NOTE_WRITE);
1588         lwkt_reltoken(&hmp->fs_token);
1589         return (error);
1590 }
1591
1592 /*
1593  * hammer_vop_open { vp, mode, cred, fp }
1594  *
1595  * MPSAFE (does not require fs_token)
1596  */
1597 static
1598 int
1599 hammer_vop_open(struct vop_open_args *ap)
1600 {
1601         hammer_inode_t ip;
1602
1603         ip = VTOI(ap->a_vp);
1604
1605         if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1606                 return (EROFS);
1607         return(vop_stdopen(ap));
1608 }
1609
1610 /*
1611  * hammer_vop_print { vp }
1612  */
1613 static
1614 int
1615 hammer_vop_print(struct vop_print_args *ap)
1616 {
1617         return EOPNOTSUPP;
1618 }
1619
1620 /*
1621  * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1622  */
1623 static
1624 int
1625 hammer_vop_readdir(struct vop_readdir_args *ap)
1626 {
1627         struct hammer_transaction trans;
1628         struct hammer_cursor cursor;
1629         hammer_inode_t ip;
1630         hammer_mount_t hmp;
1631         struct uio *uio;
1632         hammer_base_elm_t base;
1633         int error;
1634         int cookie_index;
1635         int ncookies;
1636         off_t *cookies;
1637         off_t saveoff;
1638         int r;
1639         int dtype;
1640
1641         ip = VTOI(ap->a_vp);
1642         uio = ap->a_uio;
1643         saveoff = uio->uio_offset;
1644         hmp = ip->hmp;
1645
1646         if (ap->a_ncookies) {
1647                 ncookies = uio->uio_resid / 16 + 1;
1648                 if (ncookies > 1024)
1649                         ncookies = 1024;
1650                 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1651                 cookie_index = 0;
1652         } else {
1653                 ncookies = -1;
1654                 cookies = NULL;
1655                 cookie_index = 0;
1656         }
1657
1658         lwkt_gettoken(&hmp->fs_token);
1659         hammer_simple_transaction(&trans, hmp);
1660
1661         /*
1662          * Handle artificial entries
1663          *
1664          * It should be noted that the minimum value for a directory
1665          * hash key on-media is 0x0000000100000000, so we can use anything
1666          * less then that to represent our 'special' key space.
1667          */
1668         error = 0;
1669         if (saveoff == 0) {
1670                 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1671                 if (r)
1672                         goto done;
1673                 if (cookies)
1674                         cookies[cookie_index] = saveoff;
1675                 ++saveoff;
1676                 ++cookie_index;
1677                 if (cookie_index == ncookies)
1678                         goto done;
1679         }
1680         if (saveoff == 1) {
1681                 if (ip->ino_data.parent_obj_id) {
1682                         r = vop_write_dirent(&error, uio,
1683                                              ip->ino_data.parent_obj_id,
1684                                              DT_DIR, 2, "..");
1685                 } else {
1686                         r = vop_write_dirent(&error, uio,
1687                                              ip->obj_id, DT_DIR, 2, "..");
1688                 }
1689                 if (r)
1690                         goto done;
1691                 if (cookies)
1692                         cookies[cookie_index] = saveoff;
1693                 ++saveoff;
1694                 ++cookie_index;
1695                 if (cookie_index == ncookies)
1696                         goto done;
1697         }
1698
1699         /*
1700          * Key range (begin and end inclusive) to scan.  Directory keys
1701          * directly translate to a 64 bit 'seek' position.
1702          */
1703         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1704         cursor.key_beg.localization = ip->obj_localization |
1705                                       hammer_dir_localization(ip);
1706         cursor.key_beg.obj_id = ip->obj_id;
1707         cursor.key_beg.create_tid = 0;
1708         cursor.key_beg.delete_tid = 0;
1709         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1710         cursor.key_beg.obj_type = 0;
1711         cursor.key_beg.key = saveoff;
1712
1713         cursor.key_end = cursor.key_beg;
1714         cursor.key_end.key = HAMMER_MAX_KEY;
1715         cursor.asof = ip->obj_asof;
1716         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1717
1718         error = hammer_ip_first(&cursor);
1719
1720         while (error == 0) {
1721                 error = hammer_ip_resolve_data(&cursor);
1722                 if (error)
1723                         break;
1724                 base = &cursor.leaf->base;
1725                 saveoff = base->key;
1726                 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1727
1728                 if (base->obj_id != ip->obj_id)
1729                         hpanic("bad record at %p", cursor.node);
1730
1731                 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1732                 r = vop_write_dirent(
1733                              &error, uio, cursor.data->entry.obj_id,
1734                              dtype,
1735                              cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1736                              (void *)cursor.data->entry.name);
1737                 if (r)
1738                         break;
1739                 ++saveoff;
1740                 if (cookies)
1741                         cookies[cookie_index] = base->key;
1742                 ++cookie_index;
1743                 if (cookie_index == ncookies)
1744                         break;
1745                 error = hammer_ip_next(&cursor);
1746         }
1747         hammer_done_cursor(&cursor);
1748
1749 done:
1750         hammer_done_transaction(&trans);
1751
1752         if (ap->a_eofflag)
1753                 *ap->a_eofflag = (error == ENOENT);
1754         uio->uio_offset = saveoff;
1755         if (error && cookie_index == 0) {
1756                 if (error == ENOENT)
1757                         error = 0;
1758                 if (cookies) {
1759                         kfree(cookies, M_TEMP);
1760                         *ap->a_ncookies = 0;
1761                         *ap->a_cookies = NULL;
1762                 }
1763         } else {
1764                 if (error == ENOENT)
1765                         error = 0;
1766                 if (cookies) {
1767                         *ap->a_ncookies = cookie_index;
1768                         *ap->a_cookies = cookies;
1769                 }
1770         }
1771         lwkt_reltoken(&hmp->fs_token);
1772         return(error);
1773 }
1774
1775 /*
1776  * hammer_vop_readlink { vp, uio, cred }
1777  */
1778 static
1779 int
1780 hammer_vop_readlink(struct vop_readlink_args *ap)
1781 {
1782         struct hammer_transaction trans;
1783         struct hammer_cursor cursor;
1784         hammer_inode_t ip;
1785         hammer_mount_t hmp;
1786         char buf[32];
1787         uint32_t localization;
1788         hammer_pseudofs_inmem_t pfsm;
1789         int error;
1790
1791         ip = VTOI(ap->a_vp);
1792         hmp = ip->hmp;
1793
1794         lwkt_gettoken(&hmp->fs_token);
1795
1796         /*
1797          * Shortcut if the symlink data was stuffed into ino_data.
1798          *
1799          * Also expand special "@@PFS%05d" softlinks (expansion only
1800          * occurs for non-historical (current) accesses made from the
1801          * primary filesystem).
1802          *
1803          * Note that userspace hammer command does not allow users to
1804          * create a @@PFS softlink under an existing other PFS (id!=0)
1805          * so the ip localization here for @@PFS softlink is always 0.
1806          */
1807         if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1808                 char *ptr;
1809                 int bytes;
1810
1811                 ptr = ip->ino_data.ext.symlink;
1812                 bytes = (int)ip->ino_data.size;
1813                 if (bytes == 10 &&
1814                     ip->obj_asof == HAMMER_MAX_TID &&
1815                     ip->obj_localization == HAMMER_DEF_LOCALIZATION &&
1816                     strncmp(ptr, "@@PFS", 5) == 0) {
1817                         hammer_simple_transaction(&trans, hmp);
1818                         bcopy(ptr + 5, buf, 5);
1819                         buf[5] = 0;
1820                         localization = pfs_to_lo(strtoul(buf, NULL, 10));
1821                         pfsm = hammer_load_pseudofs(&trans, localization,
1822                                                     &error);
1823                         if (error == 0) {
1824                                 if (hammer_is_pfs_slave(&pfsm->pfsd)) {
1825                                         /* vap->va_size == 26 */
1826                                         ksnprintf(buf, sizeof(buf),
1827                                                   "@@0x%016jx:%05d",
1828                                                   (intmax_t)pfsm->pfsd.sync_end_tid,
1829                                                   lo_to_pfs(localization));
1830                                 } else {
1831                                         /* vap->va_size == 10 */
1832                                         ksnprintf(buf, sizeof(buf),
1833                                                   "@@-1:%05d",
1834                                                   lo_to_pfs(localization));
1835                                 }
1836                                 ptr = buf;
1837                                 bytes = strlen(buf);
1838                         }
1839                         if (pfsm)
1840                                 hammer_rel_pseudofs(hmp, pfsm);
1841                         hammer_done_transaction(&trans);
1842                 }
1843                 error = uiomove(ptr, bytes, ap->a_uio);
1844                 lwkt_reltoken(&hmp->fs_token);
1845                 return(error);
1846         }
1847
1848         /*
1849          * Long version
1850          */
1851         hammer_simple_transaction(&trans, hmp);
1852         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1853
1854         /*
1855          * Key range (begin and end inclusive) to scan.  Directory keys
1856          * directly translate to a 64 bit 'seek' position.
1857          */
1858         cursor.key_beg.localization = ip->obj_localization |
1859                                       HAMMER_LOCALIZE_MISC;
1860         cursor.key_beg.obj_id = ip->obj_id;
1861         cursor.key_beg.create_tid = 0;
1862         cursor.key_beg.delete_tid = 0;
1863         cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1864         cursor.key_beg.obj_type = 0;
1865         cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1866         cursor.asof = ip->obj_asof;
1867         cursor.flags |= HAMMER_CURSOR_ASOF;
1868
1869         error = hammer_ip_lookup(&cursor);
1870         if (error == 0) {
1871                 error = hammer_ip_resolve_data(&cursor);
1872                 if (error == 0) {
1873                         KKASSERT(cursor.leaf->data_len >=
1874                                  HAMMER_SYMLINK_NAME_OFF);
1875                         error = uiomove(cursor.data->symlink.name,
1876                                         cursor.leaf->data_len -
1877                                                 HAMMER_SYMLINK_NAME_OFF,
1878                                         ap->a_uio);
1879                 }
1880         }
1881         hammer_done_cursor(&cursor);
1882         hammer_done_transaction(&trans);
1883         lwkt_reltoken(&hmp->fs_token);
1884         return(error);
1885 }
1886
1887 /*
1888  * hammer_vop_nremove { nch, dvp, cred }
1889  */
1890 static
1891 int
1892 hammer_vop_nremove(struct vop_nremove_args *ap)
1893 {
1894         struct hammer_transaction trans;
1895         hammer_inode_t dip;
1896         hammer_mount_t hmp;
1897         int error;
1898
1899         dip = VTOI(ap->a_dvp);
1900         hmp = dip->hmp;
1901
1902         if (hammer_nohistory(dip) == 0 &&
1903             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1904                 return (error);
1905         }
1906
1907         lwkt_gettoken(&hmp->fs_token);
1908         hammer_start_transaction(&trans, hmp);
1909         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1910         hammer_done_transaction(&trans);
1911         if (error == 0)
1912                 hammer_knote(ap->a_dvp, NOTE_WRITE);
1913         lwkt_reltoken(&hmp->fs_token);
1914         return (error);
1915 }
1916
1917 /*
1918  * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1919  */
1920 static
1921 int
1922 hammer_vop_nrename(struct vop_nrename_args *ap)
1923 {
1924         struct hammer_transaction trans;
1925         struct namecache *fncp;
1926         struct namecache *tncp;
1927         hammer_inode_t fdip;
1928         hammer_inode_t tdip;
1929         hammer_inode_t ip;
1930         hammer_mount_t hmp;
1931         struct hammer_cursor cursor;
1932         int64_t namekey;
1933         uint32_t max_iterations;
1934         int nlen, error;
1935
1936         if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1937                 return(EXDEV);
1938         if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1939                 return(EXDEV);
1940
1941         fdip = VTOI(ap->a_fdvp);
1942         tdip = VTOI(ap->a_tdvp);
1943         fncp = ap->a_fnch->ncp;
1944         tncp = ap->a_tnch->ncp;
1945         ip = VTOI(fncp->nc_vp);
1946         KKASSERT(ip != NULL);
1947
1948         hmp = ip->hmp;
1949
1950         if (fdip->obj_localization != tdip->obj_localization)
1951                 return(EXDEV);
1952         if (fdip->obj_localization != ip->obj_localization)
1953                 return(EXDEV);
1954
1955         if (fdip->flags & HAMMER_INODE_RO)
1956                 return (EROFS);
1957         if (tdip->flags & HAMMER_INODE_RO)
1958                 return (EROFS);
1959         if (ip->flags & HAMMER_INODE_RO)
1960                 return (EROFS);
1961         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1962                 return (error);
1963
1964         lwkt_gettoken(&hmp->fs_token);
1965         hammer_start_transaction(&trans, hmp);
1966
1967         /*
1968          * Remove tncp from the target directory and then link ip as
1969          * tncp. XXX pass trans to dounlink
1970          *
1971          * Force the inode sync-time to match the transaction so it is
1972          * in-sync with the creation of the target directory entry.
1973          */
1974         error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1975                                 ap->a_cred, 0, -1);
1976         if (error == 0 || error == ENOENT) {
1977                 error = hammer_ip_add_direntry(&trans, tdip,
1978                                                 tncp->nc_name, tncp->nc_nlen,
1979                                                 ip);
1980                 if (error == 0) {
1981                         ip->ino_data.parent_obj_id = tdip->obj_id;
1982                         ip->ino_data.ctime = trans.time;
1983                         hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
1984                 }
1985         }
1986         if (error)
1987                 goto failed; /* XXX */
1988
1989         /*
1990          * Locate the record in the originating directory and remove it.
1991          *
1992          * Calculate the namekey and setup the key range for the scan.  This
1993          * works kinda like a chained hash table where the lower 32 bits
1994          * of the namekey synthesize the chain.
1995          *
1996          * The key range is inclusive of both key_beg and key_end.
1997          */
1998         namekey = hammer_direntry_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
1999                                            &max_iterations);
2000 retry:
2001         hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
2002         cursor.key_beg.localization = fdip->obj_localization |
2003                                       hammer_dir_localization(fdip);
2004         cursor.key_beg.obj_id = fdip->obj_id;
2005         cursor.key_beg.key = namekey;
2006         cursor.key_beg.create_tid = 0;
2007         cursor.key_beg.delete_tid = 0;
2008         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2009         cursor.key_beg.obj_type = 0;
2010
2011         cursor.key_end = cursor.key_beg;
2012         cursor.key_end.key += max_iterations;
2013         cursor.asof = fdip->obj_asof;
2014         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2015
2016         /*
2017          * Scan all matching records (the chain), locate the one matching
2018          * the requested path component.
2019          *
2020          * The hammer_ip_*() functions merge in-memory records with on-disk
2021          * records for the purposes of the search.
2022          */
2023         error = hammer_ip_first(&cursor);
2024         while (error == 0) {
2025                 if (hammer_ip_resolve_data(&cursor) != 0)
2026                         break;
2027                 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2028                 KKASSERT(nlen > 0);
2029                 if (fncp->nc_nlen == nlen &&
2030                     bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2031                         break;
2032                 }
2033                 error = hammer_ip_next(&cursor);
2034         }
2035
2036         /*
2037          * If all is ok we have to get the inode so we can adjust nlinks.
2038          *
2039          * WARNING: hammer_ip_del_direntry() may have to terminate the
2040          * cursor to avoid a recursion.  It's ok to call hammer_done_cursor()
2041          * twice.
2042          */
2043         if (error == 0)
2044                 error = hammer_ip_del_direntry(&trans, &cursor, fdip, ip);
2045
2046         /*
2047          * XXX A deadlock here will break rename's atomicy for the purposes
2048          * of crash recovery.
2049          */
2050         if (error == EDEADLK) {
2051                 hammer_done_cursor(&cursor);
2052                 goto retry;
2053         }
2054
2055         /*
2056          * Cleanup and tell the kernel that the rename succeeded.
2057          *
2058          * NOTE: ip->vp, if non-NULL, cannot be directly referenced
2059          *       without formally acquiring the vp since the vp might
2060          *       have zero refs on it, or in the middle of a reclaim,
2061          *       etc.
2062          */
2063         hammer_done_cursor(&cursor);
2064         if (error == 0) {
2065                 cache_rename(ap->a_fnch, ap->a_tnch);
2066                 hammer_knote(ap->a_fdvp, NOTE_WRITE);
2067                 hammer_knote(ap->a_tdvp, NOTE_WRITE);
2068                 while (ip->vp) {
2069                         struct vnode *vp;
2070
2071                         error = hammer_get_vnode(ip, &vp);
2072                         if (error == 0 && vp) {
2073                                 vn_unlock(vp);
2074                                 hammer_knote(ip->vp, NOTE_RENAME);
2075                                 vrele(vp);
2076                                 break;
2077                         }
2078                         hdkprintf("ip/vp race2 avoided\n");
2079                 }
2080         }
2081
2082 failed:
2083         hammer_done_transaction(&trans);
2084         lwkt_reltoken(&hmp->fs_token);
2085         return (error);
2086 }
2087
2088 /*
2089  * hammer_vop_nrmdir { nch, dvp, cred }
2090  */
2091 static
2092 int
2093 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
2094 {
2095         struct hammer_transaction trans;
2096         hammer_inode_t dip;
2097         hammer_mount_t hmp;
2098         int error;
2099
2100         dip = VTOI(ap->a_dvp);
2101         hmp = dip->hmp;
2102
2103         if (hammer_nohistory(dip) == 0 &&
2104             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2105                 return (error);
2106         }
2107
2108         lwkt_gettoken(&hmp->fs_token);
2109         hammer_start_transaction(&trans, hmp);
2110         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
2111         hammer_done_transaction(&trans);
2112         if (error == 0)
2113                 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2114         lwkt_reltoken(&hmp->fs_token);
2115         return (error);
2116 }
2117
2118 /*
2119  * hammer_vop_markatime { vp, cred }
2120  */
2121 static
2122 int
2123 hammer_vop_markatime(struct vop_markatime_args *ap)
2124 {
2125         struct hammer_transaction trans;
2126         hammer_inode_t ip;
2127         hammer_mount_t hmp;
2128
2129         ip = VTOI(ap->a_vp);
2130         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2131                 return (EROFS);
2132         if (ip->flags & HAMMER_INODE_RO)
2133                 return (EROFS);
2134         hmp = ip->hmp;
2135         if (hmp->mp->mnt_flag & MNT_NOATIME)
2136                 return (0);
2137         lwkt_gettoken(&hmp->fs_token);
2138         hammer_start_transaction(&trans, hmp);
2139
2140         ip->ino_data.atime = trans.time;
2141         hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
2142         hammer_done_transaction(&trans);
2143         hammer_knote(ap->a_vp, NOTE_ATTRIB);
2144         lwkt_reltoken(&hmp->fs_token);
2145         return (0);
2146 }
2147
2148 /*
2149  * hammer_vop_setattr { vp, vap, cred }
2150  */
2151 static
2152 int
2153 hammer_vop_setattr(struct vop_setattr_args *ap)
2154 {
2155         struct hammer_transaction trans;
2156         hammer_inode_t ip;
2157         struct vattr *vap;
2158         hammer_mount_t hmp;
2159         int modflags;
2160         int error;
2161         int truncating;
2162         int blksize;
2163         int kflags;
2164 #if 0
2165         int64_t aligned_size;
2166 #endif
2167         uint32_t flags;
2168
2169         vap = ap->a_vap;
2170         ip = ap->a_vp->v_data;
2171         modflags = 0;
2172         kflags = 0;
2173         hmp = ip->hmp;
2174
2175         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2176                 return(EROFS);
2177         if (ip->flags & HAMMER_INODE_RO)
2178                 return (EROFS);
2179         if (hammer_nohistory(ip) == 0 &&
2180             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2181                 return (error);
2182         }
2183
2184         lwkt_gettoken(&hmp->fs_token);
2185         hammer_start_transaction(&trans, hmp);
2186         error = 0;
2187
2188         if (vap->va_flags != VNOVAL) {
2189                 flags = ip->ino_data.uflags;
2190                 error = vop_helper_setattr_flags(&flags, vap->va_flags,
2191                                          hammer_to_unix_xid(&ip->ino_data.uid),
2192                                          ap->a_cred);
2193                 if (error == 0) {
2194                         if (ip->ino_data.uflags != flags) {
2195                                 ip->ino_data.uflags = flags;
2196                                 ip->ino_data.ctime = trans.time;
2197                                 modflags |= HAMMER_INODE_DDIRTY;
2198                                 kflags |= NOTE_ATTRIB;
2199                         }
2200                         if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2201                                 error = 0;
2202                                 goto done;
2203                         }
2204                 }
2205                 goto done;
2206         }
2207         if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2208                 error = EPERM;
2209                 goto done;
2210         }
2211         if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2212                 mode_t cur_mode = ip->ino_data.mode;
2213                 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2214                 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2215                 hammer_uuid_t uuid_uid;
2216                 hammer_uuid_t uuid_gid;
2217
2218                 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2219                                          ap->a_cred,
2220                                          &cur_uid, &cur_gid, &cur_mode);
2221                 if (error == 0) {
2222                         hammer_guid_to_uuid(&uuid_uid, cur_uid);
2223                         hammer_guid_to_uuid(&uuid_gid, cur_gid);
2224                         if (kuuid_compare(&uuid_uid, &ip->ino_data.uid) ||
2225                             kuuid_compare(&uuid_gid, &ip->ino_data.gid) ||
2226                             ip->ino_data.mode != cur_mode) {
2227                                 ip->ino_data.uid = uuid_uid;
2228                                 ip->ino_data.gid = uuid_gid;
2229                                 ip->ino_data.mode = cur_mode;
2230                                 ip->ino_data.ctime = trans.time;
2231                                 modflags |= HAMMER_INODE_DDIRTY;
2232                         }
2233                         kflags |= NOTE_ATTRIB;
2234                 }
2235         }
2236         while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2237                 switch(ap->a_vp->v_type) {
2238                 case VREG:
2239                         if (vap->va_size == ip->ino_data.size)
2240                                 break;
2241
2242                         /*
2243                          * Log the operation if in fast-fsync mode or if
2244                          * there are unterminated redo write records present.
2245                          *
2246                          * The second check is needed so the recovery code
2247                          * properly truncates write redos even if nominal
2248                          * REDO operations is turned off due to excessive
2249                          * writes, because the related records might be
2250                          * destroyed and never lay down a TERM_WRITE.
2251                          */
2252                         if ((ip->flags & HAMMER_INODE_REDO) ||
2253                             (ip->flags & HAMMER_INODE_RDIRTY)) {
2254                                 error = hammer_generate_redo(&trans, ip,
2255                                                              vap->va_size,
2256                                                              HAMMER_REDO_TRUNC,
2257                                                              NULL, 0);
2258                         }
2259                         blksize = hammer_blocksize(vap->va_size);
2260
2261                         /*
2262                          * XXX break atomicy, we can deadlock the backend
2263                          * if we do not release the lock.  Probably not a
2264                          * big deal here.
2265                          */
2266                         if (vap->va_size < ip->ino_data.size) {
2267                                 nvtruncbuf(ap->a_vp, vap->va_size,
2268                                            blksize,
2269                                            hammer_blockoff(vap->va_size),
2270                                            0);
2271                                 truncating = 1;
2272                                 kflags |= NOTE_WRITE;
2273                         } else {
2274                                 nvextendbuf(ap->a_vp,
2275                                             ip->ino_data.size,
2276                                             vap->va_size,
2277                                             hammer_blocksize(ip->ino_data.size),
2278                                             hammer_blocksize(vap->va_size),
2279                                             hammer_blockoff(ip->ino_data.size),
2280                                             hammer_blockoff(vap->va_size),
2281                                             0);
2282                                 truncating = 0;
2283                                 kflags |= NOTE_WRITE | NOTE_EXTEND;
2284                         }
2285                         ip->ino_data.size = vap->va_size;
2286                         ip->ino_data.mtime = trans.time;
2287                         /* XXX safe to use SDIRTY instead of DDIRTY here? */
2288                         modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2289                         vclrflags(ap->a_vp, VLASTWRITETS);
2290
2291                         /*
2292                          * On-media truncation is cached in the inode until
2293                          * the inode is synchronized.  We must immediately
2294                          * handle any frontend records.
2295                          */
2296                         if (truncating) {
2297                                 hammer_ip_frontend_trunc(ip, vap->va_size);
2298                                 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2299                                         ip->flags |= HAMMER_INODE_TRUNCATED;
2300                                         ip->trunc_off = vap->va_size;
2301                                         hammer_inode_dirty(ip);
2302                                 } else if (ip->trunc_off > vap->va_size) {
2303                                         ip->trunc_off = vap->va_size;
2304                                 }
2305                         }
2306
2307 #if 0
2308                         /*
2309                          * When truncating, nvtruncbuf() may have cleaned out
2310                          * a portion of the last block on-disk in the buffer
2311                          * cache.  We must clean out any frontend records
2312                          * for blocks beyond the new last block.
2313                          */
2314                         aligned_size = (vap->va_size + (blksize - 1)) &
2315                                        ~(int64_t)(blksize - 1);
2316                         if (truncating && vap->va_size < aligned_size) {
2317                                 aligned_size -= blksize;
2318                                 hammer_ip_frontend_trunc(ip, aligned_size);
2319                         }
2320 #endif
2321                         break;
2322                 case VDATABASE:
2323                         if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2324                                 ip->flags |= HAMMER_INODE_TRUNCATED;
2325                                 ip->trunc_off = vap->va_size;
2326                                 hammer_inode_dirty(ip);
2327                         } else if (ip->trunc_off > vap->va_size) {
2328                                 ip->trunc_off = vap->va_size;
2329                         }
2330                         hammer_ip_frontend_trunc(ip, vap->va_size);
2331                         ip->ino_data.size = vap->va_size;
2332                         ip->ino_data.mtime = trans.time;
2333                         modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2334                         vclrflags(ap->a_vp, VLASTWRITETS);
2335                         kflags |= NOTE_ATTRIB;
2336                         break;
2337                 default:
2338                         error = EINVAL;
2339                         goto done;
2340                 }
2341                 break;
2342         }
2343         if (vap->va_atime.tv_sec != VNOVAL) {
2344                 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2345                 modflags |= HAMMER_INODE_ATIME;
2346                 kflags |= NOTE_ATTRIB;
2347         }
2348         if (vap->va_mtime.tv_sec != VNOVAL) {
2349                 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2350                 modflags |= HAMMER_INODE_MTIME;
2351                 kflags |= NOTE_ATTRIB;
2352                 vclrflags(ap->a_vp, VLASTWRITETS);
2353         }
2354         if (vap->va_mode != (mode_t)VNOVAL) {
2355                 mode_t   cur_mode = ip->ino_data.mode;
2356                 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2357                 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2358
2359                 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2360                                          cur_uid, cur_gid, &cur_mode);
2361                 if (error == 0 && ip->ino_data.mode != cur_mode) {
2362                         ip->ino_data.mode = cur_mode;
2363                         ip->ino_data.ctime = trans.time;
2364                         modflags |= HAMMER_INODE_DDIRTY;
2365                         kflags |= NOTE_ATTRIB;
2366                 }
2367         }
2368 done:
2369         if (error == 0)
2370                 hammer_modify_inode(&trans, ip, modflags);
2371         hammer_done_transaction(&trans);
2372         hammer_knote(ap->a_vp, kflags);
2373         lwkt_reltoken(&hmp->fs_token);
2374         return (error);
2375 }
2376
2377 /*
2378  * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2379  */
2380 static
2381 int
2382 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2383 {
2384         struct hammer_transaction trans;
2385         hammer_inode_t dip;
2386         hammer_inode_t nip;
2387         hammer_record_t record;
2388         struct nchandle *nch;
2389         hammer_mount_t hmp;
2390         int error;
2391         int bytes;
2392
2393         ap->a_vap->va_type = VLNK;
2394
2395         nch = ap->a_nch;
2396         dip = VTOI(ap->a_dvp);
2397         hmp = dip->hmp;
2398
2399         if (dip->flags & HAMMER_INODE_RO)
2400                 return (EROFS);
2401         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
2402                 return (error);
2403
2404         /*
2405          * Create a transaction to cover the operations we perform.
2406          */
2407         lwkt_gettoken(&hmp->fs_token);
2408         hammer_start_transaction(&trans, hmp);
2409
2410         /*
2411          * Create a new filesystem object of the requested type.  The
2412          * returned inode will be referenced but not locked.
2413          */
2414
2415         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2416                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2417                                     NULL, &nip);
2418         if (error) {
2419                 hammer_done_transaction(&trans);
2420                 *ap->a_vpp = NULL;
2421                 lwkt_reltoken(&hmp->fs_token);
2422                 return (error);
2423         }
2424
2425         /*
2426          * Add a record representing the symlink.  symlink stores the link
2427          * as pure data, not a string, and is no \0 terminated.
2428          */
2429         if (error == 0) {
2430                 bytes = strlen(ap->a_target);
2431
2432                 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2433                         bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2434                 } else {
2435                         record = hammer_alloc_mem_record(nip, bytes);
2436                         record->type = HAMMER_MEM_RECORD_GENERAL;
2437
2438                         record->leaf.base.localization = nip->obj_localization |
2439                                                          HAMMER_LOCALIZE_MISC;
2440                         record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2441                         record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2442                         record->leaf.data_len = bytes;
2443                         KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2444                         bcopy(ap->a_target, record->data->symlink.name, bytes);
2445                         error = hammer_ip_add_record(&trans, record);
2446                 }
2447
2448                 /*
2449                  * Set the file size to the length of the link.
2450                  */
2451                 if (error == 0) {
2452                         nip->ino_data.size = bytes;
2453                         hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
2454                 }
2455         }
2456         if (error == 0)
2457                 error = hammer_ip_add_direntry(&trans, dip, nch->ncp->nc_name,
2458                                                 nch->ncp->nc_nlen, nip);
2459
2460         /*
2461          * Finish up.
2462          */
2463         if (error) {
2464                 hammer_rel_inode(nip, 0);
2465                 *ap->a_vpp = NULL;
2466         } else {
2467                 error = hammer_get_vnode(nip, ap->a_vpp);
2468                 hammer_rel_inode(nip, 0);
2469                 if (error == 0) {
2470                         cache_setunresolved(ap->a_nch);
2471                         cache_setvp(ap->a_nch, *ap->a_vpp);
2472                         hammer_knote(ap->a_dvp, NOTE_WRITE);
2473                 }
2474         }
2475         hammer_done_transaction(&trans);
2476         lwkt_reltoken(&hmp->fs_token);
2477         return (error);
2478 }
2479
2480 /*
2481  * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2482  */
2483 static
2484 int
2485 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2486 {
2487         struct hammer_transaction trans;
2488         hammer_inode_t dip;
2489         hammer_mount_t hmp;
2490         int error;
2491
2492         dip = VTOI(ap->a_dvp);
2493         hmp = dip->hmp;
2494
2495         if (hammer_nohistory(dip) == 0 &&
2496             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2497                 return (error);
2498         }
2499
2500         lwkt_gettoken(&hmp->fs_token);
2501         hammer_start_transaction(&trans, hmp);
2502         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2503                                 ap->a_cred, ap->a_flags, -1);
2504         hammer_done_transaction(&trans);
2505         lwkt_reltoken(&hmp->fs_token);
2506
2507         return (error);
2508 }
2509
2510 /*
2511  * hammer_vop_ioctl { vp, command, data, fflag, cred }
2512  */
2513 static
2514 int
2515 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2516 {
2517         hammer_inode_t ip = ap->a_vp->v_data;
2518         hammer_mount_t hmp = ip->hmp;
2519         int error;
2520
2521         lwkt_gettoken(&hmp->fs_token);
2522         error = hammer_ioctl(ip, ap->a_command, ap->a_data,
2523                              ap->a_fflag, ap->a_cred);
2524         lwkt_reltoken(&hmp->fs_token);
2525         return (error);
2526 }
2527
2528 static
2529 int
2530 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2531 {
2532         static const struct mountctl_opt extraopt[] = {
2533                 { HMNT_NOHISTORY,       "nohistory" },
2534                 { HMNT_MASTERID,        "master" },
2535                 { HMNT_NOMIRROR,        "nomirror" },
2536                 { 0, NULL}
2537
2538         };
2539         hammer_mount_t hmp;
2540         struct mount *mp;
2541         int usedbytes;
2542         int error;
2543
2544         error = 0;
2545         usedbytes = 0;
2546         mp = ap->a_head.a_ops->head.vv_mount;
2547         KKASSERT(mp->mnt_data != NULL);
2548         hmp = (hammer_mount_t)mp->mnt_data;
2549
2550         lwkt_gettoken(&hmp->fs_token);
2551
2552         switch(ap->a_op) {
2553         case MOUNTCTL_SET_EXPORT:
2554                 if (ap->a_ctllen != sizeof(struct export_args))
2555                         error = EINVAL;
2556                 else
2557                         error = hammer_vfs_export(mp, ap->a_op,
2558                                       (const struct export_args *)ap->a_ctl);
2559                 break;
2560         case MOUNTCTL_MOUNTFLAGS:
2561                 /*
2562                  * Call standard mountctl VOP function
2563                  * so we get user mount flags.
2564                  */
2565                 error = vop_stdmountctl(ap);
2566                 if (error)
2567                         break;
2568
2569                 usedbytes = *ap->a_res;
2570
2571                 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2572                         usedbytes += vfs_flagstostr(hmp->hflags, extraopt,
2573                                                     ap->a_buf,
2574                                                     ap->a_buflen - usedbytes,
2575                                                     &error);
2576                 }
2577
2578                 *ap->a_res += usedbytes;
2579                 break;
2580         default:
2581                 error = vop_stdmountctl(ap);
2582                 break;
2583         }
2584         lwkt_reltoken(&hmp->fs_token);
2585         return(error);
2586 }
2587
2588 /*
2589  * hammer_vop_strategy { vp, bio }
2590  *
2591  * Strategy call, used for regular file read & write only.  Note that the
2592  * bp may represent a cluster.
2593  *
2594  * To simplify operation and allow better optimizations in the future,
2595  * this code does not make any assumptions with regards to buffer alignment
2596  * or size.
2597  */
2598 static
2599 int
2600 hammer_vop_strategy(struct vop_strategy_args *ap)
2601 {
2602         struct buf *bp;
2603         int error;
2604
2605         bp = ap->a_bio->bio_buf;
2606
2607         switch(bp->b_cmd) {
2608         case BUF_CMD_READ:
2609                 error = hammer_vop_strategy_read(ap);
2610                 break;
2611         case BUF_CMD_WRITE:
2612                 error = hammer_vop_strategy_write(ap);
2613                 break;
2614         default:
2615                 bp->b_error = error = EINVAL;
2616                 bp->b_flags |= B_ERROR;
2617                 biodone(ap->a_bio);
2618                 break;
2619         }
2620         return (error);
2621 }
2622
2623 /*
2624  * Read from a regular file.  Iterate the related records and fill in the
2625  * BIO/BUF.  Gaps are zero-filled.
2626  *
2627  * The support code in hammer_object.c should be used to deal with mixed
2628  * in-memory and on-disk records.
2629  *
2630  * NOTE: Can be called from the cluster code with an oversized buf.
2631  *
2632  * XXX atime update
2633  */
2634 static
2635 int
2636 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2637 {
2638         struct hammer_transaction trans;
2639         hammer_inode_t ip;
2640         hammer_inode_t dip;
2641         hammer_mount_t hmp;
2642         struct hammer_cursor cursor;
2643         hammer_base_elm_t base;
2644         hammer_off_t disk_offset;
2645         struct bio *bio;
2646         struct bio *nbio;
2647         struct buf *bp;
2648         int64_t rec_offset;
2649         int64_t ran_end;
2650         int64_t tmp64;
2651         int error;
2652         int boff;
2653         int roff;
2654         int n;
2655         int isdedupable;
2656
2657         bio = ap->a_bio;
2658         bp = bio->bio_buf;
2659         ip = ap->a_vp->v_data;
2660         hmp = ip->hmp;
2661
2662         /*
2663          * The zone-2 disk offset may have been set by the cluster code via
2664          * a BMAP operation, or else should be NOOFFSET.
2665          *
2666          * Checking the high bits for a match against zone-2 should suffice.
2667          *
2668          * In cases where a lot of data duplication is present it may be
2669          * more beneficial to drop through and doubule-buffer through the
2670          * device.
2671          */
2672         nbio = push_bio(bio);
2673         if (hammer_is_zone_large_data(nbio->bio_offset)) {
2674                 if (hammer_double_buffer == 0) {
2675                         lwkt_gettoken(&hmp->fs_token);
2676                         error = hammer_io_direct_read(hmp, nbio, NULL);
2677                         lwkt_reltoken(&hmp->fs_token);
2678                         return (error);
2679                 }
2680
2681                 /*
2682                  * Try to shortcut requests for double_buffer mode too.
2683                  * Since this mode runs through the device buffer cache
2684                  * only compatible buffer sizes (meaning those generated
2685                  * by normal filesystem buffers) are legal.
2686                  */
2687                 if ((bp->b_flags & B_PAGING) == 0) {
2688                         lwkt_gettoken(&hmp->fs_token);
2689                         error = hammer_io_indirect_read(hmp, nbio, NULL);
2690                         lwkt_reltoken(&hmp->fs_token);
2691                         return (error);
2692                 }
2693         }
2694
2695         /*
2696          * Well, that sucked.  Do it the hard way.  If all the stars are
2697          * aligned we may still be able to issue a direct-read.
2698          */
2699         lwkt_gettoken(&hmp->fs_token);
2700         hammer_simple_transaction(&trans, hmp);
2701         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2702
2703         /*
2704          * Key range (begin and end inclusive) to scan.  Note that the key's
2705          * stored in the actual records represent BASE+LEN, not BASE.  The
2706          * first record containing bio_offset will have a key > bio_offset.
2707          */
2708         cursor.key_beg.localization = ip->obj_localization |
2709                                       HAMMER_LOCALIZE_MISC;
2710         cursor.key_beg.obj_id = ip->obj_id;
2711         cursor.key_beg.create_tid = 0;
2712         cursor.key_beg.delete_tid = 0;
2713         cursor.key_beg.obj_type = 0;
2714         cursor.key_beg.key = bio->bio_offset + 1;
2715         cursor.asof = ip->obj_asof;
2716         cursor.flags |= HAMMER_CURSOR_ASOF;
2717
2718         cursor.key_end = cursor.key_beg;
2719         KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2720 #if 0
2721         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2722                 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2723                 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2724                 cursor.key_end.key = HAMMER_MAX_KEY;
2725         } else
2726 #endif
2727         {
2728                 ran_end = bio->bio_offset + bp->b_bufsize;
2729                 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2730                 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2731                 tmp64 = ran_end + MAXPHYS + 1;  /* work-around GCC-4 bug */
2732                 if (tmp64 < ran_end)
2733                         cursor.key_end.key = HAMMER_MAX_KEY;
2734                 else
2735                         cursor.key_end.key = ran_end + MAXPHYS + 1;
2736         }
2737         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2738
2739         /*
2740          * Set NOSWAPCACHE for cursor data extraction if double buffering
2741          * is disabled or (if the file is not marked cacheable via chflags
2742          * and vm.swapcache_use_chflags is enabled).
2743          */
2744         if (hammer_double_buffer == 0 ||
2745             ((ap->a_vp->v_flag & VSWAPCACHE) == 0 &&
2746              vm_swapcache_use_chflags)) {
2747                 cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
2748         }
2749
2750         error = hammer_ip_first(&cursor);
2751         boff = 0;
2752
2753         while (error == 0) {
2754                 /*
2755                  * Get the base file offset of the record.  The key for
2756                  * data records is (base + bytes) rather then (base).
2757                  */
2758                 base = &cursor.leaf->base;
2759                 rec_offset = base->key - cursor.leaf->data_len;
2760
2761                 /*
2762                  * Calculate the gap, if any, and zero-fill it.
2763                  *
2764                  * n is the offset of the start of the record verses our
2765                  * current seek offset in the bio.
2766                  */
2767                 n = (int)(rec_offset - (bio->bio_offset + boff));
2768                 if (n > 0) {
2769                         if (n > bp->b_bufsize - boff)
2770                                 n = bp->b_bufsize - boff;
2771                         bzero((char *)bp->b_data + boff, n);
2772                         boff += n;
2773                         n = 0;
2774                 }
2775
2776                 /*
2777                  * Calculate the data offset in the record and the number
2778                  * of bytes we can copy.
2779                  *
2780                  * There are two degenerate cases.  First, boff may already
2781                  * be at bp->b_bufsize.  Secondly, the data offset within
2782                  * the record may exceed the record's size.
2783                  */
2784                 roff = -n;
2785                 rec_offset += roff;
2786                 n = cursor.leaf->data_len - roff;
2787                 if (n <= 0) {
2788                         hdkprintf("bad n=%d roff=%d\n", n, roff);
2789                         n = 0;
2790                 } else if (n > bp->b_bufsize - boff) {
2791                         n = bp->b_bufsize - boff;
2792                 }
2793
2794                 /*
2795                  * Deal with cached truncations.  This cool bit of code
2796                  * allows truncate()/ftruncate() to avoid having to sync
2797                  * the file.
2798                  *
2799                  * If the frontend is truncated then all backend records are
2800                  * subject to the frontend's truncation.
2801                  *
2802                  * If the backend is truncated then backend records on-disk
2803                  * (but not in-memory) are subject to the backend's
2804                  * truncation.  In-memory records owned by the backend
2805                  * represent data written after the truncation point on the
2806                  * backend and must not be truncated.
2807                  *
2808                  * Truncate operations deal with frontend buffer cache
2809                  * buffers and frontend-owned in-memory records synchronously.
2810                  */
2811                 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2812                         if (hammer_cursor_ondisk(&cursor)/* ||
2813                             cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2814                                 if (ip->trunc_off <= rec_offset)
2815                                         n = 0;
2816                                 else if (ip->trunc_off < rec_offset + n)
2817                                         n = (int)(ip->trunc_off - rec_offset);
2818                         }
2819                 }
2820                 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2821                         if (hammer_cursor_ondisk(&cursor)) {
2822                                 if (ip->sync_trunc_off <= rec_offset)
2823                                         n = 0;
2824                                 else if (ip->sync_trunc_off < rec_offset + n)
2825                                         n = (int)(ip->sync_trunc_off - rec_offset);
2826                         }
2827                 }
2828
2829                 /*
2830                  * Try to issue a direct read into our bio if possible,
2831                  * otherwise resolve the element data into a hammer_buffer
2832                  * and copy.
2833                  *
2834                  * The buffer on-disk should be zerod past any real
2835                  * truncation point, but may not be for any synthesized
2836                  * truncation point from above.
2837                  *
2838                  * NOTE: disk_offset is only valid if the cursor data is
2839                  *       on-disk.
2840                  */
2841                 disk_offset = cursor.leaf->data_offset + roff;
2842                 isdedupable = (boff == 0 && n == bp->b_bufsize &&
2843                                hammer_cursor_ondisk(&cursor) &&
2844                                ((int)disk_offset & HAMMER_BUFMASK) == 0);
2845
2846                 if (isdedupable && hammer_double_buffer == 0) {
2847                         /*
2848                          * Direct read case
2849                          */
2850                         KKASSERT(hammer_is_zone_large_data(disk_offset));
2851                         nbio->bio_offset = disk_offset;
2852                         error = hammer_io_direct_read(hmp, nbio, cursor.leaf);
2853                         goto done;
2854                 } else if (isdedupable) {
2855                         /*
2856                          * Async I/O case for reading from backing store
2857                          * and copying the data to the filesystem buffer.
2858                          */
2859                         KKASSERT(hammer_is_zone_large_data(disk_offset));
2860                         nbio->bio_offset = disk_offset;
2861                         error = hammer_io_indirect_read(hmp, nbio, cursor.leaf);
2862                         goto done;
2863                 } else if (n) {
2864                         error = hammer_ip_resolve_data(&cursor);
2865                         if (error == 0) {
2866                                 bcopy((char *)cursor.data + roff,
2867                                       (char *)bp->b_data + boff, n);
2868                         }
2869                 }
2870                 if (error)
2871                         break;
2872
2873                 /*
2874                  * Iterate until we have filled the request.
2875                  */
2876                 boff += n;
2877                 if (boff == bp->b_bufsize)
2878                         break;
2879                 error = hammer_ip_next(&cursor);
2880         }
2881
2882         /*
2883          * There may have been a gap after the last record
2884          */
2885         if (error == ENOENT)
2886                 error = 0;
2887         if (error == 0 && boff != bp->b_bufsize) {
2888                 KKASSERT(boff < bp->b_bufsize);
2889                 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2890                 /* boff = bp->b_bufsize; */
2891         }
2892
2893         /*
2894          * Disallow swapcache operation on the vnode buffer if double
2895          * buffering is enabled, the swapcache will get the data via
2896          * the block device buffer.
2897          */
2898         if (hammer_double_buffer)
2899                 bp->b_flags |= B_NOTMETA;
2900
2901         /*
2902          * Cleanup
2903          */
2904         bp->b_resid = 0;
2905         bp->b_error = error;
2906         if (error)
2907                 bp->b_flags |= B_ERROR;
2908         biodone(ap->a_bio);
2909
2910 done:
2911         /*
2912          * Cache the b-tree node for the last data read in cache[1].
2913          *
2914          * If we hit the file EOF then also cache the node in the
2915          * governing directory's cache[3], it will be used to initialize
2916          * the new inode's cache[1] for any inodes looked up via the directory.
2917          *
2918          * This doesn't reduce disk accesses since the B-Tree chain is
2919          * likely cached, but it does reduce cpu overhead when looking
2920          * up file offsets for cpdup/tar/cpio style iterations.
2921          */
2922         if (cursor.node)
2923                 hammer_cache_node(&ip->cache[1], cursor.node);
2924         if (ran_end >= ip->ino_data.size) {
2925                 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2926                                         ip->obj_asof, ip->obj_localization);
2927                 if (dip) {
2928                         hammer_cache_node(&dip->cache[3], cursor.node);
2929                         hammer_rel_inode(dip, 0);
2930                 }
2931         }
2932         hammer_done_cursor(&cursor);
2933         hammer_done_transaction(&trans);
2934         lwkt_reltoken(&hmp->fs_token);
2935         return(error);
2936 }
2937
2938 /*
2939  * BMAP operation - used to support cluster_read() only.
2940  *
2941  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2942  *
2943  * This routine may return EOPNOTSUPP if the opration is not supported for
2944  * the specified offset.  The contents of the pointer arguments do not
2945  * need to be initialized in that case.
2946  *
2947  * If a disk address is available and properly aligned return 0 with
2948  * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2949  * to the run-length relative to that offset.  Callers may assume that
2950  * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2951  * large, so return EOPNOTSUPP if it is not sufficiently large.
2952  */
2953 static
2954 int
2955 hammer_vop_bmap(struct vop_bmap_args *ap)
2956 {
2957         struct hammer_transaction trans;
2958         hammer_inode_t ip;
2959         hammer_mount_t hmp;
2960         struct hammer_cursor cursor;
2961         hammer_base_elm_t base;
2962         int64_t rec_offset;
2963         int64_t ran_end;
2964         int64_t tmp64;
2965         int64_t base_offset;
2966         int64_t base_disk_offset;
2967         int64_t last_offset;
2968         hammer_off_t last_disk_offset;
2969         hammer_off_t disk_offset;
2970         int     rec_len;
2971         int     error;
2972         int     blksize;
2973
2974         ip = ap->a_vp->v_data;
2975         hmp = ip->hmp;
2976
2977         /*
2978          * We can only BMAP regular files.  We can't BMAP database files,
2979          * directories, etc.
2980          */
2981         if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2982                 return(EOPNOTSUPP);
2983
2984         /*
2985          * bmap is typically called with runp/runb both NULL when used
2986          * for writing.  We do not support BMAP for writing atm.
2987          */
2988         if (ap->a_cmd != BUF_CMD_READ)
2989                 return(EOPNOTSUPP);
2990
2991         /*
2992          * Scan the B-Tree to acquire blockmap addresses, then translate
2993          * to raw addresses.
2994          */
2995         lwkt_gettoken(&hmp->fs_token);
2996         hammer_simple_transaction(&trans, hmp);
2997
2998         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2999
3000         /*
3001          * Key range (begin and end inclusive) to scan.  Note that the key's
3002          * stored in the actual records represent BASE+LEN, not BASE.  The
3003          * first record containing bio_offset will have a key > bio_offset.
3004          */
3005         cursor.key_beg.localization = ip->obj_localization |
3006                                       HAMMER_LOCALIZE_MISC;
3007         cursor.key_beg.obj_id = ip->obj_id;
3008         cursor.key_beg.create_tid = 0;
3009         cursor.key_beg.delete_tid = 0;
3010         cursor.key_beg.obj_type = 0;
3011         if (ap->a_runb)
3012                 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
3013         else
3014                 cursor.key_beg.key = ap->a_loffset + 1;
3015         if (cursor.key_beg.key < 0)
3016                 cursor.key_beg.key = 0;
3017         cursor.asof = ip->obj_asof;
3018         cursor.flags |= HAMMER_CURSOR_ASOF;
3019
3020         cursor.key_end = cursor.key_beg;
3021         KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
3022
3023         ran_end = ap->a_loffset + MAXPHYS;
3024         cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
3025         cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
3026         tmp64 = ran_end + MAXPHYS + 1;  /* work-around GCC-4 bug */
3027         if (tmp64 < ran_end)
3028                 cursor.key_end.key = HAMMER_MAX_KEY;
3029         else
3030                 cursor.key_end.key = ran_end + MAXPHYS + 1;
3031
3032         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
3033
3034         error = hammer_ip_first(&cursor);
3035         base_offset = last_offset = 0;
3036         base_disk_offset = last_disk_offset = 0;
3037
3038         while (error == 0) {
3039                 /*
3040                  * Get the base file offset of the record.  The key for
3041                  * data records is (base + bytes) rather then (base).
3042                  *
3043                  * NOTE: rec_offset + rec_len may exceed the end-of-file.
3044                  * The extra bytes should be zero on-disk and the BMAP op
3045                  * should still be ok.
3046                  */
3047                 base = &cursor.leaf->base;
3048                 rec_offset = base->key - cursor.leaf->data_len;
3049                 rec_len    = cursor.leaf->data_len;
3050
3051                 /*
3052                  * Incorporate any cached truncation.
3053                  *
3054                  * NOTE: Modifications to rec_len based on synthesized
3055                  * truncation points remove the guarantee that any extended
3056                  * data on disk is zero (since the truncations may not have
3057                  * taken place on-media yet).
3058                  */
3059                 if (ip->flags & HAMMER_INODE_TRUNCATED) {
3060                         if (hammer_cursor_ondisk(&cursor) ||
3061                             cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
3062                                 if (ip->trunc_off <= rec_offset)
3063                                         rec_len = 0;
3064                                 else if (ip->trunc_off < rec_offset + rec_len)
3065                                         rec_len = (int)(ip->trunc_off - rec_offset);
3066                         }
3067                 }
3068                 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
3069                         if (hammer_cursor_ondisk(&cursor)) {
3070                                 if (ip->sync_trunc_off <= rec_offset)
3071                                         rec_len = 0;
3072                                 else if (ip->sync_trunc_off < rec_offset + rec_len)
3073                                         rec_len = (int)(ip->sync_trunc_off - rec_offset);
3074                         }
3075                 }
3076
3077                 /*
3078                  * Accumulate information.  If we have hit a discontiguous
3079                  * block reset base_offset unless we are already beyond the
3080                  * requested offset.  If we are, that's it, we stop.
3081                  */
3082                 if (error)
3083                         break;
3084                 if (hammer_cursor_ondisk(&cursor)) {
3085                         disk_offset = cursor.leaf->data_offset;
3086                         if (rec_offset != last_offset ||
3087                             disk_offset != last_disk_offset) {
3088                                 if (rec_offset > ap->a_loffset)
3089                                         break;
3090                                 base_offset = rec_offset;
3091                                 base_disk_offset = disk_offset;
3092                         }
3093                         last_offset = rec_offset + rec_len;
3094                         last_disk_offset = disk_offset + rec_len;
3095                 }
3096                 error = hammer_ip_next(&cursor);
3097         }
3098
3099         if (cursor.node)
3100                 hammer_cache_node(&ip->cache[1], cursor.node);
3101
3102         hammer_done_cursor(&cursor);
3103         hammer_done_transaction(&trans);
3104         lwkt_reltoken(&hmp->fs_token);
3105
3106         /*
3107          * If we couldn't find any records or the records we did find were
3108          * all behind the requested offset, return failure.  A forward
3109          * truncation can leave a hole w/ no on-disk records.
3110          */
3111         if (last_offset == 0 || last_offset < ap->a_loffset)
3112                 return (EOPNOTSUPP);
3113
3114         /*
3115          * Figure out the block size at the requested offset and adjust
3116          * our limits so the cluster_read() does not create inappropriately
3117          * sized buffer cache buffers.
3118          */
3119         blksize = hammer_blocksize(ap->a_loffset);
3120         if (hammer_blocksize(base_offset) != blksize) {
3121                 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
3122         }
3123         if (last_offset != ap->a_loffset &&
3124             hammer_blocksize(last_offset - 1) != blksize) {
3125                 last_offset = hammer_blockdemarc(ap->a_loffset,
3126                                                  last_offset - 1);
3127         }
3128
3129         /*
3130          * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3131          * from occuring.
3132          */
3133         disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
3134
3135         if (!hammer_is_zone_large_data(disk_offset)) {
3136                 /*
3137                  * Only large-data zones can be direct-IOd
3138                  */
3139                 error = EOPNOTSUPP;
3140         } else if ((disk_offset & HAMMER_BUFMASK) ||
3141                    (last_offset - ap->a_loffset) < blksize) {
3142                 /*
3143                  * doffsetp is not aligned or the forward run size does
3144                  * not cover a whole buffer, disallow the direct I/O.
3145                  */
3146                 error = EOPNOTSUPP;
3147         } else {
3148                 /*
3149                  * We're good.
3150                  */
3151                 *ap->a_doffsetp = disk_offset;
3152                 if (ap->a_runb) {
3153                         *ap->a_runb = ap->a_loffset - base_offset;
3154                         KKASSERT(*ap->a_runb >= 0);
3155                 }
3156                 if (ap->a_runp) {
3157                         *ap->a_runp = last_offset - ap->a_loffset;
3158                         KKASSERT(*ap->a_runp >= 0);
3159                 }
3160                 error = 0;
3161         }
3162         return(error);
3163 }
3164
3165 /*
3166  * Write to a regular file.   Because this is a strategy call the OS is
3167  * trying to actually get data onto the media.
3168  */
3169 static
3170 int
3171 hammer_vop_strategy_write(struct vop_strategy_args *ap)
3172 {
3173         hammer_record_t record;
3174         hammer_mount_t hmp;
3175         hammer_inode_t ip;
3176         struct bio *bio;
3177         struct buf *bp;
3178         int blksize __debugvar;
3179         int bytes;
3180         int error;
3181
3182         bio = ap->a_bio;
3183         bp = bio->bio_buf;
3184         ip = ap->a_vp->v_data;
3185         hmp = ip->hmp;
3186
3187         blksize = hammer_blocksize(bio->bio_offset);
3188         KKASSERT(bp->b_bufsize == blksize);
3189
3190         if (ip->flags & HAMMER_INODE_RO) {
3191                 bp->b_error = EROFS;
3192                 bp->b_flags |= B_ERROR;
3193                 biodone(ap->a_bio);
3194                 return(EROFS);
3195         }
3196
3197         lwkt_gettoken(&hmp->fs_token);
3198
3199         /*
3200          * Disallow swapcache operation on the vnode buffer if double
3201          * buffering is enabled, the swapcache will get the data via
3202          * the block device buffer.
3203          */
3204         if (hammer_double_buffer)
3205                 bp->b_flags |= B_NOTMETA;
3206
3207         /*
3208          * Interlock with inode destruction (no in-kernel or directory
3209          * topology visibility).  If we queue new IO while trying to
3210          * destroy the inode we can deadlock the vtrunc call in
3211          * hammer_inode_unloadable_check().
3212          *
3213          * Besides, there's no point flushing a bp associated with an
3214          * inode that is being destroyed on-media and has no kernel
3215          * references.
3216          */
3217         if ((ip->flags | ip->sync_flags) &
3218             (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3219                 bp->b_resid = 0;
3220                 biodone(ap->a_bio);
3221                 lwkt_reltoken(&hmp->fs_token);
3222                 return(0);
3223         }
3224
3225         /*
3226          * Reserve space and issue a direct-write from the front-end.
3227          * NOTE: The direct_io code will hammer_bread/bcopy smaller
3228          * allocations.
3229          *
3230          * An in-memory record will be installed to reference the storage
3231          * until the flusher can get to it.
3232          *
3233          * Since we own the high level bio the front-end will not try to
3234          * do a direct-read until the write completes.
3235          *
3236          * NOTE: The only time we do not reserve a full-sized buffers
3237          * worth of data is if the file is small.  We do not try to
3238          * allocate a fragment (from the small-data zone) at the end of
3239          * an otherwise large file as this can lead to wildly separated
3240          * data.
3241          */
3242         KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3243         KKASSERT(bio->bio_offset < ip->ino_data.size);
3244         if (bio->bio_offset || ip->ino_data.size > HAMMER_HBUFSIZE)
3245                 bytes = bp->b_bufsize;
3246         else
3247                 bytes = HAMMER_DATA_DOALIGN_WITH(int, ip->ino_data.size);
3248
3249         record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3250                                     bytes, &error);
3251
3252         /*
3253          * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3254          * in hammer_vop_write().  We must flag the record so the proper
3255          * REDO_TERM_WRITE entry is generated during the flush.
3256          */
3257         if (record) {
3258                 if (bp->b_flags & B_VFSFLAG1) {
3259                         record->flags |= HAMMER_RECF_REDO;
3260                         bp->b_flags &= ~B_VFSFLAG1;
3261                 }
3262                 hammer_io_direct_write(hmp, bio, record);
3263                 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3264                         hammer_flush_inode(ip, 0);
3265         } else {
3266                 bp->b_bio2.bio_offset = NOOFFSET;
3267                 bp->b_error = error;
3268                 bp->b_flags |= B_ERROR;
3269                 biodone(ap->a_bio);
3270         }
3271         lwkt_reltoken(&hmp->fs_token);
3272         return(error);
3273 }
3274
3275 /*
3276  * dounlink - disconnect a directory entry
3277  *
3278  * XXX whiteout support not really in yet
3279  */
3280 static int
3281 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3282                 struct vnode *dvp, struct ucred *cred,
3283                 int flags, int isdir)
3284 {
3285         struct namecache *ncp;
3286         hammer_inode_t dip;
3287         hammer_inode_t ip;
3288         hammer_mount_t hmp;
3289         struct hammer_cursor cursor;
3290         int64_t namekey;
3291         uint32_t max_iterations;
3292         int nlen, error;
3293
3294         /*
3295          * Calculate the namekey and setup the key range for the scan.  This
3296          * works kinda like a chained hash table where the lower 32 bits
3297          * of the namekey synthesize the chain.
3298          *
3299          * The key range is inclusive of both key_beg and key_end.
3300          */
3301         dip = VTOI(dvp);
3302         ncp = nch->ncp;
3303         hmp = dip->hmp;
3304
3305         if (dip->flags & HAMMER_INODE_RO)
3306                 return (EROFS);
3307
3308         namekey = hammer_direntry_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3309                                            &max_iterations);
3310 retry:
3311         hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3312         cursor.key_beg.localization = dip->obj_localization |
3313                                       hammer_dir_localization(dip);
3314         cursor.key_beg.obj_id = dip->obj_id;
3315         cursor.key_beg.key = namekey;
3316         cursor.key_beg.create_tid = 0;
3317         cursor.key_beg.delete_tid = 0;
3318         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3319         cursor.key_beg.obj_type = 0;
3320
3321         cursor.key_end = cursor.key_beg;
3322         cursor.key_end.key += max_iterations;
3323         cursor.asof = dip->obj_asof;
3324         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3325
3326         /*
3327          * Scan all matching records (the chain), locate the one matching
3328          * the requested path component.  info->last_error contains the
3329          * error code on search termination and could be 0, ENOENT, or
3330          * something else.
3331          *
3332          * The hammer_ip_*() functions merge in-memory records with on-disk
3333          * records for the purposes of the search.
3334          */
3335         error = hammer_ip_first(&cursor);
3336
3337         while (error == 0) {
3338                 error = hammer_ip_resolve_data(&cursor);
3339                 if (error)
3340                         break;
3341                 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3342                 KKASSERT(nlen > 0);
3343                 if (ncp->nc_nlen == nlen &&
3344                     bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3345                         break;
3346                 }
3347                 error = hammer_ip_next(&cursor);
3348         }
3349
3350         /*
3351          * If all is ok we have to get the inode so we can adjust nlinks.
3352          * To avoid a deadlock with the flusher we must release the inode
3353          * lock on the directory when acquiring the inode for the entry.
3354          *
3355          * If the target is a directory, it must be empty.
3356          */
3357         if (error == 0) {
3358                 hammer_unlock(&cursor.ip->lock);
3359                 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3360                                       hmp->asof,
3361                                       cursor.data->entry.localization,
3362                                       0, &error);
3363                 hammer_lock_sh(&cursor.ip->lock);
3364                 if (error == ENOENT) {
3365                         hkprintf("WARNING: Removing dirent w/missing inode "
3366                                 "\"%s\"\n"
3367                                 "\tobj_id = %016jx\n",
3368                                 ncp->nc_name,
3369                                 (intmax_t)cursor.data->entry.obj_id);
3370                         error = 0;
3371                 }
3372
3373                 /*
3374                  * If isdir >= 0 we validate that the entry is or is not a
3375                  * directory.  If isdir < 0 we don't care.
3376                  */
3377                 if (error == 0 && isdir >= 0 && ip) {
3378                         if (isdir &&
3379                             ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3380                                 error = ENOTDIR;
3381                         } else if (isdir == 0 &&
3382                             ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3383                                 error = EISDIR;
3384                         }
3385                 }
3386
3387                 /*
3388                  * If we are trying to remove a directory the directory must
3389                  * be empty.
3390                  *
3391                  * The check directory code can loop and deadlock/retry.  Our
3392                  * own cursor's node locks must be released to avoid a 3-way
3393                  * deadlock with the flusher if the check directory code
3394                  * blocks.
3395                  *
3396                  * If any changes whatsoever have been made to the cursor
3397                  * set EDEADLK and retry.
3398                  *
3399                  * WARNING: See warnings in hammer_unlock_cursor()
3400                  *          function.
3401                  */
3402                 if (error == 0 && ip && ip->ino_data.obj_type ==
3403                                         HAMMER_OBJTYPE_DIRECTORY) {
3404                         hammer_unlock_cursor(&cursor);
3405                         error = hammer_ip_check_directory_empty(trans, ip);
3406                         hammer_lock_cursor(&cursor);
3407                         if (cursor.flags & HAMMER_CURSOR_RETEST) {
3408                                 hkprintf("Warning: avoided deadlock "
3409                                         "on rmdir '%s'\n",
3410                                         ncp->nc_name);
3411                                 error = EDEADLK;
3412                         }
3413                 }
3414
3415                 /*
3416                  * Delete the directory entry.
3417                  *
3418                  * WARNING: hammer_ip_del_direntry() may have to terminate
3419                  * the cursor to avoid a deadlock.  It is ok to call
3420                  * hammer_done_cursor() twice.
3421                  */
3422                 if (error == 0) {
3423                         error = hammer_ip_del_direntry(trans, &cursor,
3424                                                         dip, ip);
3425                 }
3426                 hammer_done_cursor(&cursor);
3427                 if (error == 0) {
3428                         /*
3429                          * Tell the namecache that we are now unlinked.
3430                          */
3431                         cache_unlink(nch);
3432
3433                         /*
3434                          * NOTE: ip->vp, if non-NULL, cannot be directly
3435                          *       referenced without formally acquiring the
3436                          *       vp since the vp might have zero refs on it,
3437                          *       or in the middle of a reclaim, etc.
3438                          *
3439                          * NOTE: The cache_setunresolved() can rip the vp
3440                          *       out from under us since the vp may not have
3441                          *       any refs, in which case ip->vp will be NULL
3442                          *       from the outset.
3443                          */
3444                         while (ip && ip->vp) {
3445                                 struct vnode *vp;
3446
3447                                 error = hammer_get_vnode(ip, &vp);
3448                                 if (error == 0 && vp) {
3449                                         vn_unlock(vp);
3450                                         hammer_knote(ip->vp, NOTE_DELETE);
3451 #if 0
3452                                         /*
3453                                          * Don't do this, it can deadlock
3454                                          * on concurrent rm's of hardlinks.
3455                                          * Shouldn't be needed any more.
3456                                          */
3457                                         cache_inval_vp(ip->vp, CINV_DESTROY);
3458 #endif
3459                                         vrele(vp);
3460                                         break;
3461                                 }
3462                                 hdkprintf("ip/vp race1 avoided\n");
3463                         }
3464                 }
3465                 if (ip)
3466                         hammer_rel_inode(ip, 0);
3467         } else {
3468                 hammer_done_cursor(&cursor);
3469         }
3470         if (error == EDEADLK)
3471                 goto retry;
3472
3473         return (error);
3474 }
3475
3476 /************************************************************************
3477  *                          FIFO AND SPECFS OPS                         *
3478  ************************************************************************
3479  *
3480  */
3481 static int
3482 hammer_vop_fifoclose (struct vop_close_args *ap)
3483 {
3484         /* XXX update itimes */
3485         return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3486 }
3487
3488 static int
3489 hammer_vop_fiforead (struct vop_read_args *ap)
3490 {
3491         int error;
3492
3493         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3494         /* XXX update access time */
3495         return (error);
3496 }
3497
3498 static int
3499 hammer_vop_fifowrite (struct vop_write_args *ap)
3500 {
3501         int error;
3502
3503         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3504         /* XXX update access time */
3505         return (error);
3506 }
3507
3508 static
3509 int
3510 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3511 {
3512         int error;
3513
3514         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3515         if (error)
3516                 error = hammer_vop_kqfilter(ap);
3517         return(error);
3518 }
3519
3520 /************************************************************************
3521  *                          KQFILTER OPS                                *
3522  ************************************************************************
3523  *
3524  */
3525 static void filt_hammerdetach(struct knote *kn);
3526 static int filt_hammerread(struct knote *kn, long hint);
3527 static int filt_hammerwrite(struct knote *kn, long hint);
3528 static int filt_hammervnode(struct knote *kn, long hint);
3529
3530 static struct filterops hammerread_filtops =
3531         { FILTEROP_ISFD | FILTEROP_MPSAFE,
3532           NULL, filt_hammerdetach, filt_hammerread };
3533 static struct filterops hammerwrite_filtops =
3534         { FILTEROP_ISFD | FILTEROP_MPSAFE,
3535           NULL, filt_hammerdetach, filt_hammerwrite };
3536 static struct filterops hammervnode_filtops =
3537         { FILTEROP_ISFD | FILTEROP_MPSAFE,
3538           NULL, filt_hammerdetach, filt_hammervnode };
3539
3540 static
3541 int
3542 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3543 {
3544         struct vnode *vp = ap->a_vp;
3545         struct knote *kn = ap->a_kn;
3546
3547         switch (kn->kn_filter) {
3548         case EVFILT_READ:
3549                 kn->kn_fop = &hammerread_filtops;
3550                 break;
3551         case EVFILT_WRITE:
3552                 kn->kn_fop = &hammerwrite_filtops;
3553                 break;
3554         case EVFILT_VNODE:
3555                 kn->kn_fop = &hammervnode_filtops;
3556                 break;
3557         default:
3558                 return (EOPNOTSUPP);
3559         }
3560
3561         kn->kn_hook = (caddr_t)vp;
3562
3563         knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3564
3565         return(0);
3566 }
3567
3568 static void
3569 filt_hammerdetach(struct knote *kn)
3570 {
3571         struct vnode *vp = (void *)kn->kn_hook;
3572
3573         knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3574 }
3575
3576 static int
3577 filt_hammerread(struct knote *kn, long hint)
3578 {
3579         struct vnode *vp = (void *)kn->kn_hook;
3580         hammer_inode_t ip = VTOI(vp);
3581         hammer_mount_t hmp = ip->hmp;
3582         off_t off;
3583
3584         if (hint == NOTE_REVOKE) {
3585                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3586                 return(1);
3587         }
3588         lwkt_gettoken(&hmp->fs_token);  /* XXX use per-ip-token */
3589         off = ip->ino_data.size - kn->kn_fp->f_offset;
3590         kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
3591         lwkt_reltoken(&hmp->fs_token);
3592         if (kn->kn_sfflags & NOTE_OLDAPI)
3593                 return(1);
3594         return (kn->kn_data != 0);
3595 }
3596
3597 static int
3598 filt_hammerwrite(struct knote *kn, long hint)
3599 {
3600         if (hint == NOTE_REVOKE)
3601                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3602         kn->kn_data = 0;
3603         return (1);
3604 }
3605
3606 static int
3607 filt_hammervnode(struct knote *kn, long hint)
3608 {
3609         if (kn->kn_sfflags & hint)
3610                 kn->kn_fflags |= hint;
3611         if (hint == NOTE_REVOKE) {
3612                 kn->kn_flags |= (EV_EOF | EV_NODATA);
3613                 return (1);
3614         }
3615         return (kn->kn_fflags != 0);
3616 }
3617