sys/vfs/hammer: Add hammer_is_pfs_{master|slave|deleted}()
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <sys/mountctl.h>
36 #include <sys/namecache.h>
37 #include <sys/buf2.h>
38 #include <vfs/fifofs/fifo.h>
39
40 #include "hammer.h"
41
42 /*
43  * USERFS VNOPS
44  */
45 static int hammer_vop_fsync(struct vop_fsync_args *);
46 static int hammer_vop_read(struct vop_read_args *);
47 static int hammer_vop_write(struct vop_write_args *);
48 static int hammer_vop_access(struct vop_access_args *);
49 static int hammer_vop_advlock(struct vop_advlock_args *);
50 static int hammer_vop_close(struct vop_close_args *);
51 static int hammer_vop_ncreate(struct vop_ncreate_args *);
52 static int hammer_vop_getattr(struct vop_getattr_args *);
53 static int hammer_vop_nresolve(struct vop_nresolve_args *);
54 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
55 static int hammer_vop_nlink(struct vop_nlink_args *);
56 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
57 static int hammer_vop_nmknod(struct vop_nmknod_args *);
58 static int hammer_vop_open(struct vop_open_args *);
59 static int hammer_vop_print(struct vop_print_args *);
60 static int hammer_vop_readdir(struct vop_readdir_args *);
61 static int hammer_vop_readlink(struct vop_readlink_args *);
62 static int hammer_vop_nremove(struct vop_nremove_args *);
63 static int hammer_vop_nrename(struct vop_nrename_args *);
64 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
65 static int hammer_vop_markatime(struct vop_markatime_args *);
66 static int hammer_vop_setattr(struct vop_setattr_args *);
67 static int hammer_vop_strategy(struct vop_strategy_args *);
68 static int hammer_vop_bmap(struct vop_bmap_args *ap);
69 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
70 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
71 static int hammer_vop_ioctl(struct vop_ioctl_args *);
72 static int hammer_vop_mountctl(struct vop_mountctl_args *);
73 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
74
75 static int hammer_vop_fifoclose (struct vop_close_args *);
76 static int hammer_vop_fiforead (struct vop_read_args *);
77 static int hammer_vop_fifowrite (struct vop_write_args *);
78 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
79
80 struct vop_ops hammer_vnode_vops = {
81         .vop_default =          vop_defaultop,
82         .vop_fsync =            hammer_vop_fsync,
83         .vop_getpages =         vop_stdgetpages,
84         .vop_putpages =         vop_stdputpages,
85         .vop_read =             hammer_vop_read,
86         .vop_write =            hammer_vop_write,
87         .vop_access =           hammer_vop_access,
88         .vop_advlock =          hammer_vop_advlock,
89         .vop_close =            hammer_vop_close,
90         .vop_ncreate =          hammer_vop_ncreate,
91         .vop_getattr =          hammer_vop_getattr,
92         .vop_inactive =         hammer_vop_inactive,
93         .vop_reclaim =          hammer_vop_reclaim,
94         .vop_nresolve =         hammer_vop_nresolve,
95         .vop_nlookupdotdot =    hammer_vop_nlookupdotdot,
96         .vop_nlink =            hammer_vop_nlink,
97         .vop_nmkdir =           hammer_vop_nmkdir,
98         .vop_nmknod =           hammer_vop_nmknod,
99         .vop_open =             hammer_vop_open,
100         .vop_pathconf =         vop_stdpathconf,
101         .vop_print =            hammer_vop_print,
102         .vop_readdir =          hammer_vop_readdir,
103         .vop_readlink =         hammer_vop_readlink,
104         .vop_nremove =          hammer_vop_nremove,
105         .vop_nrename =          hammer_vop_nrename,
106         .vop_nrmdir =           hammer_vop_nrmdir,
107         .vop_markatime =        hammer_vop_markatime,
108         .vop_setattr =          hammer_vop_setattr,
109         .vop_bmap =             hammer_vop_bmap,
110         .vop_strategy =         hammer_vop_strategy,
111         .vop_nsymlink =         hammer_vop_nsymlink,
112         .vop_nwhiteout =        hammer_vop_nwhiteout,
113         .vop_ioctl =            hammer_vop_ioctl,
114         .vop_mountctl =         hammer_vop_mountctl,
115         .vop_kqfilter =         hammer_vop_kqfilter
116 };
117
118 struct vop_ops hammer_spec_vops = {
119         .vop_default =          vop_defaultop,
120         .vop_fsync =            hammer_vop_fsync,
121         .vop_read =             vop_stdnoread,
122         .vop_write =            vop_stdnowrite,
123         .vop_access =           hammer_vop_access,
124         .vop_close =            hammer_vop_close,
125         .vop_markatime =        hammer_vop_markatime,
126         .vop_getattr =          hammer_vop_getattr,
127         .vop_inactive =         hammer_vop_inactive,
128         .vop_reclaim =          hammer_vop_reclaim,
129         .vop_setattr =          hammer_vop_setattr
130 };
131
132 struct vop_ops hammer_fifo_vops = {
133         .vop_default =          fifo_vnoperate,
134         .vop_fsync =            hammer_vop_fsync,
135         .vop_read =             hammer_vop_fiforead,
136         .vop_write =            hammer_vop_fifowrite,
137         .vop_access =           hammer_vop_access,
138         .vop_close =            hammer_vop_fifoclose,
139         .vop_markatime =        hammer_vop_markatime,
140         .vop_getattr =          hammer_vop_getattr,
141         .vop_inactive =         hammer_vop_inactive,
142         .vop_reclaim =          hammer_vop_reclaim,
143         .vop_setattr =          hammer_vop_setattr,
144         .vop_kqfilter =         hammer_vop_fifokqfilter
145 };
146
147 static __inline
148 void
149 hammer_knote(struct vnode *vp, int flags)
150 {
151         if (flags)
152                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
153 }
154
155 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
156                            struct vnode *dvp, struct ucred *cred,
157                            int flags, int isdir);
158 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
159 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
160
161 /*
162  * hammer_vop_fsync { vp, waitfor }
163  *
164  * fsync() an inode to disk and wait for it to be completely committed
165  * such that the information would not be undone if a crash occured after
166  * return.
167  *
168  * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
169  *       a REDO log.  A sysctl is provided to relax HAMMER's fsync()
170  *       operation.
171  *
172  *       Ultimately the combination of a REDO log and use of fast storage
173  *       to front-end cluster caches will make fsync fast, but it aint
174  *       here yet.  And, in anycase, we need real transactional
175  *       all-or-nothing features which are not restricted to a single file.
176  */
177 static
178 int
179 hammer_vop_fsync(struct vop_fsync_args *ap)
180 {
181         hammer_inode_t ip = VTOI(ap->a_vp);
182         hammer_mount_t hmp = ip->hmp;
183         int waitfor = ap->a_waitfor;
184         int mode;
185
186         lwkt_gettoken(&hmp->fs_token);
187
188         /*
189          * Fsync rule relaxation (default is either full synchronous flush
190          * or REDO semantics with synchronous flush).
191          */
192         if (ap->a_flags & VOP_FSYNC_SYSCALL) {
193                 switch(hammer_fsync_mode) {
194                 case 0:
195 mode0:
196                         /* no REDO, full synchronous flush */
197                         goto skip;
198                 case 1:
199 mode1:
200                         /* no REDO, full asynchronous flush */
201                         if (waitfor == MNT_WAIT)
202                                 waitfor = MNT_NOWAIT;
203                         goto skip;
204                 case 2:
205                         /* REDO semantics, synchronous flush */
206                         if (hmp->version < HAMMER_VOL_VERSION_FOUR)
207                                 goto mode0;
208                         mode = HAMMER_FLUSH_UNDOS_AUTO;
209                         break;
210                 case 3:
211                         /* REDO semantics, relaxed asynchronous flush */
212                         if (hmp->version < HAMMER_VOL_VERSION_FOUR)
213                                 goto mode1;
214                         mode = HAMMER_FLUSH_UNDOS_RELAXED;
215                         if (waitfor == MNT_WAIT)
216                                 waitfor = MNT_NOWAIT;
217                         break;
218                 case 4:
219                         /* ignore the fsync() system call */
220                         lwkt_reltoken(&hmp->fs_token);
221                         return(0);
222                 default:
223                         /* we have to do something */
224                         mode = HAMMER_FLUSH_UNDOS_RELAXED;
225                         if (waitfor == MNT_WAIT)
226                                 waitfor = MNT_NOWAIT;
227                         break;
228                 }
229
230                 /*
231                  * Fast fsync only needs to flush the UNDO/REDO fifo if
232                  * HAMMER_INODE_REDO is non-zero and the only modifications
233                  * made to the file are write or write-extends.
234                  */
235                 if ((ip->flags & HAMMER_INODE_REDO) &&
236                     (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0) {
237                         ++hammer_count_fsyncs;
238                         hammer_flusher_flush_undos(hmp, mode);
239                         ip->redo_count = 0;
240                         if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0)
241                                 vclrisdirty(ip->vp);
242                         lwkt_reltoken(&hmp->fs_token);
243                         return(0);
244                 }
245
246                 /*
247                  * REDO is enabled by fsync(), the idea being we really only
248                  * want to lay down REDO records when programs are using
249                  * fsync() heavily.  The first fsync() on the file starts
250                  * the gravy train going and later fsync()s keep it hot by
251                  * resetting the redo_count.
252                  *
253                  * We weren't running REDOs before now so we have to fall
254                  * through and do a full fsync of what we have.
255                  */
256                 if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
257                     (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) {
258                         ip->flags |= HAMMER_INODE_REDO;
259                         ip->redo_count = 0;
260                 }
261         }
262 skip:
263
264         /*
265          * Do a full flush sequence.
266          *
267          * Attempt to release the vnode while waiting for the inode to
268          * finish flushing.  This can really mess up inactive->reclaim
269          * sequences so only do it if the vnode is active.
270          *
271          * WARNING! The VX lock functions must be used.  vn_lock() will
272          *          fail when this is part of a VOP_RECLAIM sequence.
273          */
274         ++hammer_count_fsyncs;
275         vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
276         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
277         if (waitfor == MNT_WAIT) {
278                 int dorelock;
279
280                 if ((ap->a_vp->v_flag & VRECLAIMED) == 0) {
281                         vx_unlock(ap->a_vp);
282                         dorelock = 1;
283                 } else {
284                         dorelock = 0;
285                 }
286                 hammer_wait_inode(ip);
287                 if (dorelock)
288                         vx_lock(ap->a_vp);
289         }
290         if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0)
291                 vclrisdirty(ip->vp);
292         lwkt_reltoken(&hmp->fs_token);
293         return (ip->error);
294 }
295
296 /*
297  * hammer_vop_read { vp, uio, ioflag, cred }
298  *
299  * MPSAFE (for the cache safe does not require fs_token)
300  */
301 static
302 int
303 hammer_vop_read(struct vop_read_args *ap)
304 {
305         struct hammer_transaction trans;
306         hammer_inode_t ip;
307         hammer_mount_t hmp;
308         off_t offset;
309         struct buf *bp;
310         struct uio *uio;
311         int error;
312         int n;
313         int seqcount;
314         int ioseqcount;
315         int blksize;
316         int bigread;
317         int got_trans;
318         size_t resid;
319
320         if (ap->a_vp->v_type != VREG)
321                 return (EINVAL);
322         ip = VTOI(ap->a_vp);
323         hmp = ip->hmp;
324         error = 0;
325         got_trans = 0;
326         uio = ap->a_uio;
327
328         /*
329          * Attempt to shortcut directly to the VM object using lwbufs.
330          * This is much faster than instantiating buffer cache buffers.
331          */
332         resid = uio->uio_resid;
333         error = vop_helper_read_shortcut(ap);
334         hammer_stats_file_read += resid - uio->uio_resid;
335         if (error)
336                 return (error);
337         if (uio->uio_resid == 0)
338                 goto finished;
339
340         /*
341          * Allow the UIO's size to override the sequential heuristic.
342          */
343         blksize = hammer_blocksize(uio->uio_offset);
344         seqcount = (uio->uio_resid + (BKVASIZE - 1)) / BKVASIZE;
345         ioseqcount = (ap->a_ioflag >> 16);
346         if (seqcount < ioseqcount)
347                 seqcount = ioseqcount;
348
349         /*
350          * If reading or writing a huge amount of data we have to break
351          * atomicy and allow the operation to be interrupted by a signal
352          * or it can DOS the machine.
353          */
354         bigread = (uio->uio_resid > 100 * 1024 * 1024);
355
356         /*
357          * Access the data typically in HAMMER_BUFSIZE blocks via the
358          * buffer cache, but HAMMER may use a variable block size based
359          * on the offset.
360          *
361          * XXX Temporary hack, delay the start transaction while we remain
362          *     MPSAFE.  NOTE: ino_data.size cannot change while vnode is
363          *     locked-shared.
364          */
365         while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
366                 int64_t base_offset;
367                 int64_t file_limit;
368
369                 blksize = hammer_blocksize(uio->uio_offset);
370                 offset = (int)uio->uio_offset & (blksize - 1);
371                 base_offset = uio->uio_offset - offset;
372
373                 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
374                         break;
375
376                 /*
377                  * MPSAFE
378                  */
379                 bp = getblk(ap->a_vp, base_offset, blksize, 0, 0);
380                 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == B_CACHE) {
381                         bp->b_flags &= ~B_AGE;
382                         error = 0;
383                         goto skip;
384                 }
385                 if (ap->a_ioflag & IO_NRDELAY) {
386                         bqrelse(bp);
387                         return (EWOULDBLOCK);
388                 }
389
390                 /*
391                  * MPUNSAFE
392                  */
393                 if (got_trans == 0) {
394                         hammer_start_transaction(&trans, ip->hmp);
395                         got_trans = 1;
396                 }
397
398                 /*
399                  * NOTE: A valid bp has already been acquired, but was not
400                  *       B_CACHE.
401                  */
402                 if (hammer_cluster_enable) {
403                         /*
404                          * Use file_limit to prevent cluster_read() from
405                          * creating buffers of the wrong block size past
406                          * the demarc.
407                          */
408                         file_limit = ip->ino_data.size;
409                         if (base_offset < HAMMER_XDEMARC &&
410                             file_limit > HAMMER_XDEMARC) {
411                                 file_limit = HAMMER_XDEMARC;
412                         }
413                         error = cluster_readx(ap->a_vp,
414                                              file_limit, base_offset,
415                                              blksize, uio->uio_resid,
416                                              seqcount * BKVASIZE, &bp);
417                 } else {
418                         error = breadnx(ap->a_vp, base_offset, blksize,
419                                         NULL, NULL, 0, &bp);
420                 }
421                 if (error) {
422                         brelse(bp);
423                         break;
424                 }
425 skip:
426                 if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
427                         hdkprintf("zone2_offset %016jx read file %016jx@%016jx\n",
428                                 (intmax_t)bp->b_bio2.bio_offset,
429                                 (intmax_t)ip->obj_id,
430                                 (intmax_t)bp->b_loffset);
431                 }
432                 bp->b_flags &= ~B_IODEBUG;
433                 if (blksize == HAMMER_XBUFSIZE)
434                         bp->b_flags |= B_CLUSTEROK;
435
436                 n = blksize - offset;
437                 if (n > uio->uio_resid)
438                         n = uio->uio_resid;
439                 if (n > ip->ino_data.size - uio->uio_offset)
440                         n = (int)(ip->ino_data.size - uio->uio_offset);
441
442                 /*
443                  * Set B_AGE, data has a lower priority than meta-data.
444                  *
445                  * Use a hold/unlock/drop sequence to run the uiomove
446                  * with the buffer unlocked, avoiding deadlocks against
447                  * read()s on mmap()'d spaces.
448                  */
449                 bp->b_flags |= B_AGE;
450                 error = uiomovebp(bp, (char *)bp->b_data + offset, n, uio);
451                 bqrelse(bp);
452
453                 if (error)
454                         break;
455                 hammer_stats_file_read += n;
456         }
457
458 finished:
459
460         /*
461          * Try to update the atime with just the inode lock for maximum
462          * concurrency.  If we can't shortcut it we have to get the full
463          * blown transaction.
464          */
465         if (got_trans == 0 && hammer_update_atime_quick(ip) < 0) {
466                 hammer_start_transaction(&trans, ip->hmp);
467                 got_trans = 1;
468         }
469
470         if (got_trans) {
471                 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
472                     (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
473                         lwkt_gettoken(&hmp->fs_token);
474                         ip->ino_data.atime = trans.time;
475                         hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
476                         hammer_done_transaction(&trans);
477                         lwkt_reltoken(&hmp->fs_token);
478                 } else {
479                         hammer_done_transaction(&trans);
480                 }
481         }
482         return (error);
483 }
484
485 /*
486  * hammer_vop_write { vp, uio, ioflag, cred }
487  */
488 static
489 int
490 hammer_vop_write(struct vop_write_args *ap)
491 {
492         struct hammer_transaction trans;
493         struct hammer_inode *ip;
494         hammer_mount_t hmp;
495         thread_t td;
496         struct uio *uio;
497         int offset;
498         off_t base_offset;
499         int64_t cluster_eof;
500         struct buf *bp;
501         int kflags;
502         int error;
503         int n;
504         int flags;
505         int seqcount;
506         int bigwrite;
507
508         if (ap->a_vp->v_type != VREG)
509                 return (EINVAL);
510         ip = VTOI(ap->a_vp);
511         hmp = ip->hmp;
512         error = 0;
513         kflags = 0;
514         seqcount = ap->a_ioflag >> 16;
515
516         if (ip->flags & HAMMER_INODE_RO)
517                 return (EROFS);
518
519         /*
520          * Create a transaction to cover the operations we perform.
521          */
522         hammer_start_transaction(&trans, hmp);
523         uio = ap->a_uio;
524
525         /*
526          * Check append mode
527          */
528         if (ap->a_ioflag & IO_APPEND)
529                 uio->uio_offset = ip->ino_data.size;
530
531         /*
532          * Check for illegal write offsets.  Valid range is 0...2^63-1.
533          *
534          * NOTE: the base_off assignment is required to work around what
535          * I consider to be a GCC-4 optimization bug.
536          */
537         if (uio->uio_offset < 0) {
538                 hammer_done_transaction(&trans);
539                 return (EFBIG);
540         }
541         base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
542         if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
543                 hammer_done_transaction(&trans);
544                 return (EFBIG);
545         }
546
547         if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
548             base_offset > td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
549                 hammer_done_transaction(&trans);
550                 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
551                 return (EFBIG);
552         }
553
554         /*
555          * If reading or writing a huge amount of data we have to break
556          * atomicy and allow the operation to be interrupted by a signal
557          * or it can DOS the machine.
558          *
559          * Preset redo_count so we stop generating REDOs earlier if the
560          * limit is exceeded.
561          *
562          * redo_count is heuristical, SMP races are ok
563          */
564         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
565         if ((ip->flags & HAMMER_INODE_REDO) &&
566             ip->redo_count < hammer_limit_redo) {
567                 ip->redo_count += uio->uio_resid;
568         }
569
570         /*
571          * Access the data typically in HAMMER_BUFSIZE blocks via the
572          * buffer cache, but HAMMER may use a variable block size based
573          * on the offset.
574          */
575         while (uio->uio_resid > 0) {
576                 int fixsize = 0;
577                 int blksize;
578                 int blkmask;
579                 int trivial;
580                 int endofblk;
581                 off_t nsize;
582
583                 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
584                         break;
585                 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
586                         break;
587
588                 blksize = hammer_blocksize(uio->uio_offset);
589
590                 /*
591                  * Control the number of pending records associated with
592                  * this inode.  If too many have accumulated start a
593                  * flush.  Try to maintain a pipeline with the flusher.
594                  *
595                  * NOTE: It is possible for other sources to grow the
596                  *       records but not necessarily issue another flush,
597                  *       so use a timeout and ensure that a re-flush occurs.
598                  */
599                 if (ip->rsv_recs >= hammer_limit_inode_recs) {
600                         lwkt_gettoken(&hmp->fs_token);
601                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
602                         while (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
603                                 ip->flags |= HAMMER_INODE_RECSW;
604                                 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
605                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
606                         }
607                         lwkt_reltoken(&hmp->fs_token);
608                 }
609
610                 /*
611                  * Do not allow HAMMER to blow out the buffer cache.  Very
612                  * large UIOs can lockout other processes due to bwillwrite()
613                  * mechanics.
614                  *
615                  * The hammer inode is not locked during these operations.
616                  * The vnode is locked which can interfere with the pageout
617                  * daemon for non-UIO_NOCOPY writes but should not interfere
618                  * with the buffer cache.  Even so, we cannot afford to
619                  * allow the pageout daemon to build up too many dirty buffer
620                  * cache buffers.
621                  *
622                  * Only call this if we aren't being recursively called from
623                  * a virtual disk device (vn), else we may deadlock.
624                  */
625                 if ((ap->a_ioflag & IO_RECURSE) == 0)
626                         bwillwrite(blksize);
627
628                 /*
629                  * Calculate the blocksize at the current offset and figure
630                  * out how much we can actually write.
631                  */
632                 blkmask = blksize - 1;
633                 offset = (int)uio->uio_offset & blkmask;
634                 base_offset = uio->uio_offset & ~(int64_t)blkmask;
635                 n = blksize - offset;
636                 if (n > uio->uio_resid) {
637                         n = uio->uio_resid;
638                         endofblk = 0;
639                 } else {
640                         endofblk = 1;
641                 }
642                 nsize = uio->uio_offset + n;
643                 if (nsize > ip->ino_data.size) {
644                         if (uio->uio_offset > ip->ino_data.size)
645                                 trivial = 0;
646                         else
647                                 trivial = 1;
648                         nvextendbuf(ap->a_vp,
649                                     ip->ino_data.size,
650                                     nsize,
651                                     hammer_blocksize(ip->ino_data.size),
652                                     hammer_blocksize(nsize),
653                                     hammer_blockoff(ip->ino_data.size),
654                                     hammer_blockoff(nsize),
655                                     trivial);
656                         fixsize = 1;
657                         kflags |= NOTE_EXTEND;
658                 }
659
660                 if (uio->uio_segflg == UIO_NOCOPY) {
661                         /*
662                          * Issuing a write with the same data backing the
663                          * buffer.  Instantiate the buffer to collect the
664                          * backing vm pages, then read-in any missing bits.
665                          *
666                          * This case is used by vop_stdputpages().
667                          */
668                         bp = getblk(ap->a_vp, base_offset,
669                                     blksize, GETBLK_BHEAVY, 0);
670                         if ((bp->b_flags & B_CACHE) == 0) {
671                                 bqrelse(bp);
672                                 error = bread(ap->a_vp, base_offset,
673                                               blksize, &bp);
674                         }
675                 } else if (offset == 0 && uio->uio_resid >= blksize) {
676                         /*
677                          * Even though we are entirely overwriting the buffer
678                          * we may still have to zero it out to avoid a
679                          * mmap/write visibility issue.
680                          */
681                         bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
682                         if ((bp->b_flags & B_CACHE) == 0)
683                                 vfs_bio_clrbuf(bp);
684                 } else if (base_offset >= ip->ino_data.size) {
685                         /*
686                          * If the base offset of the buffer is beyond the
687                          * file EOF, we don't have to issue a read.
688                          */
689                         bp = getblk(ap->a_vp, base_offset,
690                                     blksize, GETBLK_BHEAVY, 0);
691                         vfs_bio_clrbuf(bp);
692                 } else {
693                         /*
694                          * Partial overwrite, read in any missing bits then
695                          * replace the portion being written.
696                          */
697                         error = bread(ap->a_vp, base_offset, blksize, &bp);
698                         if (error == 0)
699                                 bheavy(bp);
700                 }
701                 if (error == 0)
702                         error = uiomovebp(bp, bp->b_data + offset, n, uio);
703
704                 lwkt_gettoken(&hmp->fs_token);
705
706                 /*
707                  * Generate REDO records if enabled and redo_count will not
708                  * exceeded the limit.
709                  *
710                  * If redo_count exceeds the limit we stop generating records
711                  * and clear HAMMER_INODE_REDO.  This will cause the next
712                  * fsync() to do a full meta-data sync instead of just an
713                  * UNDO/REDO fifo update.
714                  *
715                  * When clearing HAMMER_INODE_REDO any pre-existing REDOs
716                  * will still be tracked.  The tracks will be terminated
717                  * when the related meta-data (including possible data
718                  * modifications which are not tracked via REDO) is
719                  * flushed.
720                  */
721                 if ((ip->flags & HAMMER_INODE_REDO) && error == 0) {
722                         if (ip->redo_count < hammer_limit_redo) {
723                                 bp->b_flags |= B_VFSFLAG1;
724                                 error = hammer_generate_redo(&trans, ip,
725                                                      base_offset + offset,
726                                                      HAMMER_REDO_WRITE,
727                                                      bp->b_data + offset,
728                                                      (size_t)n);
729                         } else {
730                                 ip->flags &= ~HAMMER_INODE_REDO;
731                         }
732                 }
733
734                 /*
735                  * If we screwed up we have to undo any VM size changes we
736                  * made.
737                  */
738                 if (error) {
739                         brelse(bp);
740                         if (fixsize) {
741                                 nvtruncbuf(ap->a_vp, ip->ino_data.size,
742                                           hammer_blocksize(ip->ino_data.size),
743                                           hammer_blockoff(ip->ino_data.size),
744                                           0);
745                         }
746                         lwkt_reltoken(&hmp->fs_token);
747                         break;
748                 }
749                 kflags |= NOTE_WRITE;
750                 hammer_stats_file_write += n;
751                 if (blksize == HAMMER_XBUFSIZE)
752                         bp->b_flags |= B_CLUSTEROK;
753                 if (ip->ino_data.size < uio->uio_offset) {
754                         ip->ino_data.size = uio->uio_offset;
755                         flags = HAMMER_INODE_SDIRTY;
756                 } else {
757                         flags = 0;
758                 }
759                 ip->ino_data.mtime = trans.time;
760                 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
761                 hammer_modify_inode(&trans, ip, flags);
762
763                 /*
764                  * Once we dirty the buffer any cached zone-X offset
765                  * becomes invalid.  HAMMER NOTE: no-history mode cannot
766                  * allow overwriting over the same data sector unless
767                  * we provide UNDOs for the old data, which we don't.
768                  */
769                 bp->b_bio2.bio_offset = NOOFFSET;
770
771                 lwkt_reltoken(&hmp->fs_token);
772
773                 /*
774                  * Final buffer disposition.
775                  *
776                  * Because meta-data updates are deferred, HAMMER is
777                  * especially sensitive to excessive bdwrite()s because
778                  * the I/O stream is not broken up by disk reads.  So the
779                  * buffer cache simply cannot keep up.
780                  *
781                  * WARNING!  blksize is variable.  cluster_write() is
782                  *           expected to not blow up if it encounters
783                  *           buffers that do not match the passed blksize.
784                  *
785                  * NOTE!  Hammer shouldn't need to bawrite()/cluster_write().
786                  *        The ip->rsv_recs check should burst-flush the data.
787                  *        If we queue it immediately the buf could be left
788                  *        locked on the device queue for a very long time.
789                  *
790                  *        However, failing to flush a dirty buffer out when
791                  *        issued from the pageout daemon can result in a low
792                  *        memory deadlock against bio_page_alloc(), so we
793                  *        have to bawrite() on IO_ASYNC as well.
794                  *
795                  * NOTE!  To avoid degenerate stalls due to mismatched block
796                  *        sizes we only honor IO_DIRECT on the write which
797                  *        abuts the end of the buffer.  However, we must
798                  *        honor IO_SYNC in case someone is silly enough to
799                  *        configure a HAMMER file as swap, or when HAMMER
800                  *        is serving NFS (for commits).  Ick ick.
801                  */
802                 bp->b_flags |= B_AGE;
803                 if (blksize == HAMMER_XBUFSIZE)
804                         bp->b_flags |= B_CLUSTEROK;
805
806                 if (ap->a_ioflag & IO_SYNC) {
807                         bwrite(bp);
808                 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
809                         bawrite(bp);
810                 } else if (ap->a_ioflag & IO_ASYNC) {
811                         bawrite(bp);
812                 } else if (hammer_cluster_enable &&
813                            !(ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
814                         if (base_offset < HAMMER_XDEMARC)
815                                 cluster_eof = hammer_blockdemarc(base_offset,
816                                                          ip->ino_data.size);
817                         else
818                                 cluster_eof = ip->ino_data.size;
819                         cluster_write(bp, cluster_eof, blksize, seqcount);
820                 } else {
821                         bdwrite(bp);
822                 }
823         }
824         hammer_done_transaction(&trans);
825         hammer_knote(ap->a_vp, kflags);
826
827         return (error);
828 }
829
830 /*
831  * hammer_vop_access { vp, mode, cred }
832  *
833  * MPSAFE - does not require fs_token
834  */
835 static
836 int
837 hammer_vop_access(struct vop_access_args *ap)
838 {
839         struct hammer_inode *ip = VTOI(ap->a_vp);
840         uid_t uid;
841         gid_t gid;
842         int error;
843
844         ++hammer_stats_file_iopsr;
845         uid = hammer_to_unix_xid(&ip->ino_data.uid);
846         gid = hammer_to_unix_xid(&ip->ino_data.gid);
847
848         error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
849                                   ip->ino_data.uflags);
850         return (error);
851 }
852
853 /*
854  * hammer_vop_advlock { vp, id, op, fl, flags }
855  *
856  * MPSAFE - does not require fs_token
857  */
858 static
859 int
860 hammer_vop_advlock(struct vop_advlock_args *ap)
861 {
862         hammer_inode_t ip = VTOI(ap->a_vp);
863
864         return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
865 }
866
867 /*
868  * hammer_vop_close { vp, fflag }
869  *
870  * We can only sync-on-close for normal closes.  XXX disabled for now.
871  */
872 static
873 int
874 hammer_vop_close(struct vop_close_args *ap)
875 {
876 #if 0
877         struct vnode *vp = ap->a_vp;
878         hammer_inode_t ip = VTOI(vp);
879         int waitfor;
880         if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
881                 if (vn_islocked(vp) == LK_EXCLUSIVE &&
882                     (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
883                         if (ip->flags & HAMMER_INODE_CLOSESYNC)
884                                 waitfor = MNT_WAIT;
885                         else
886                                 waitfor = MNT_NOWAIT;
887                         ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
888                                        HAMMER_INODE_CLOSEASYNC);
889                         VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
890                 }
891         }
892 #endif
893         return (vop_stdclose(ap));
894 }
895
896 /*
897  * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
898  *
899  * The operating system has already ensured that the directory entry
900  * does not exist and done all appropriate namespace locking.
901  */
902 static
903 int
904 hammer_vop_ncreate(struct vop_ncreate_args *ap)
905 {
906         struct hammer_transaction trans;
907         struct hammer_inode *dip;
908         struct hammer_inode *nip;
909         struct nchandle *nch;
910         hammer_mount_t hmp;
911         int error;
912
913         nch = ap->a_nch;
914         dip = VTOI(ap->a_dvp);
915         hmp = dip->hmp;
916
917         if (dip->flags & HAMMER_INODE_RO)
918                 return (EROFS);
919         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
920                 return (error);
921
922         /*
923          * Create a transaction to cover the operations we perform.
924          */
925         lwkt_gettoken(&hmp->fs_token);
926         hammer_start_transaction(&trans, hmp);
927         ++hammer_stats_file_iopsw;
928
929         /*
930          * Create a new filesystem object of the requested type.  The
931          * returned inode will be referenced and shared-locked to prevent
932          * it from being moved to the flusher.
933          */
934         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
935                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
936                                     NULL, &nip);
937         if (error) {
938                 hkprintf("hammer_create_inode error %d\n", error);
939                 hammer_done_transaction(&trans);
940                 *ap->a_vpp = NULL;
941                 lwkt_reltoken(&hmp->fs_token);
942                 return (error);
943         }
944
945         /*
946          * Add the new filesystem object to the directory.  This will also
947          * bump the inode's link count.
948          */
949         error = hammer_ip_add_directory(&trans, dip,
950                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
951                                         nip);
952         if (error)
953                 hkprintf("hammer_ip_add_directory error %d\n", error);
954
955         /*
956          * Finish up.
957          */
958         if (error) {
959                 hammer_rel_inode(nip, 0);
960                 hammer_done_transaction(&trans);
961                 *ap->a_vpp = NULL;
962         } else {
963                 error = hammer_get_vnode(nip, ap->a_vpp);
964                 hammer_done_transaction(&trans);
965                 hammer_rel_inode(nip, 0);
966                 if (error == 0) {
967                         cache_setunresolved(ap->a_nch);
968                         cache_setvp(ap->a_nch, *ap->a_vpp);
969                 }
970                 hammer_knote(ap->a_dvp, NOTE_WRITE);
971         }
972         lwkt_reltoken(&hmp->fs_token);
973         return (error);
974 }
975
976 /*
977  * hammer_vop_getattr { vp, vap }
978  *
979  * Retrieve an inode's attribute information.  When accessing inodes
980  * historically we fake the atime field to ensure consistent results.
981  * The atime field is stored in the B-Tree element and allowed to be
982  * updated without cycling the element.
983  *
984  * MPSAFE - does not require fs_token
985  */
986 static
987 int
988 hammer_vop_getattr(struct vop_getattr_args *ap)
989 {
990         struct hammer_inode *ip = VTOI(ap->a_vp);
991         struct vattr *vap = ap->a_vap;
992
993         /*
994          * We want the fsid to be different when accessing a filesystem
995          * with different as-of's so programs like diff don't think
996          * the files are the same.
997          *
998          * We also want the fsid to be the same when comparing snapshots,
999          * or when comparing mirrors (which might be backed by different
1000          * physical devices).  HAMMER fsids are based on the PFS's
1001          * shared_uuid field.
1002          *
1003          * XXX there is a chance of collision here.  The va_fsid reported
1004          * by stat is different from the more involved fsid used in the
1005          * mount structure.
1006          */
1007         ++hammer_stats_file_iopsr;
1008         hammer_lock_sh(&ip->lock);
1009         vap->va_fsid = ip->pfsm->fsid_udev ^ (uint32_t)ip->obj_asof ^
1010                        (uint32_t)(ip->obj_asof >> 32);
1011
1012         vap->va_fileid = ip->ino_leaf.base.obj_id;
1013         vap->va_mode = ip->ino_data.mode;
1014         vap->va_nlink = ip->ino_data.nlinks;
1015         vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1016         vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1017         vap->va_rmajor = 0;
1018         vap->va_rminor = 0;
1019         vap->va_size = ip->ino_data.size;
1020
1021         /*
1022          * Special case for @@PFS softlinks.  The actual size of the
1023          * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1024          * or for MAX_TID is    "@@-1:%05d" == 10 bytes.
1025          *
1026          * Note that userspace hammer command does not allow users to
1027          * create a @@PFS softlink under an existing other PFS (id!=0)
1028          * so the ip localization here for @@PFS softlink is always 0.
1029          */
1030         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
1031             ip->ino_data.size == 10 &&
1032             ip->obj_asof == HAMMER_MAX_TID &&
1033             ip->obj_localization == HAMMER_DEF_LOCALIZATION &&
1034             strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
1035                 if (hammer_is_pfs_slave(&ip->pfsm->pfsd))
1036                         vap->va_size = 26;
1037                 else
1038                         vap->va_size = 10;
1039         }
1040
1041         /*
1042          * We must provide a consistent atime and mtime for snapshots
1043          * so people can do a 'tar cf - ... | md5' on them and get
1044          * consistent results.
1045          */
1046         if (ip->flags & HAMMER_INODE_RO) {
1047                 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
1048                 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
1049         } else {
1050                 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
1051                 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
1052         }
1053         hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
1054         vap->va_flags = ip->ino_data.uflags;
1055         vap->va_gen = 1;        /* hammer inums are unique for all time */
1056         vap->va_blocksize = HAMMER_BUFSIZE;
1057         if (ip->ino_data.size >= HAMMER_XDEMARC) {
1058                 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
1059                                 ~HAMMER_XBUFMASK64;
1060         } else if (ip->ino_data.size > HAMMER_HBUFSIZE) {
1061                 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
1062                                 ~HAMMER_BUFMASK64;
1063         } else {
1064                 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
1065         }
1066
1067         vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
1068         vap->va_filerev = 0;    /* XXX */
1069         vap->va_uid_uuid = ip->ino_data.uid;
1070         vap->va_gid_uuid = ip->ino_data.gid;
1071         vap->va_fsid_uuid = ip->hmp->fsid;
1072         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
1073                           VA_FSID_UUID_VALID;
1074
1075         switch (ip->ino_data.obj_type) {
1076         case HAMMER_OBJTYPE_CDEV:
1077         case HAMMER_OBJTYPE_BDEV:
1078                 vap->va_rmajor = ip->ino_data.rmajor;
1079                 vap->va_rminor = ip->ino_data.rminor;
1080                 break;
1081         default:
1082                 break;
1083         }
1084         hammer_unlock(&ip->lock);
1085         return(0);
1086 }
1087
1088 /*
1089  * hammer_vop_nresolve { nch, dvp, cred }
1090  *
1091  * Locate the requested directory entry.
1092  */
1093 static
1094 int
1095 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1096 {
1097         struct hammer_transaction trans;
1098         struct namecache *ncp;
1099         hammer_mount_t hmp;
1100         hammer_inode_t dip;
1101         hammer_inode_t ip;
1102         hammer_tid_t asof;
1103         struct hammer_cursor cursor;
1104         struct vnode *vp;
1105         int64_t namekey;
1106         int error;
1107         int i;
1108         int nlen;
1109         int flags;
1110         int ispfs;
1111         int64_t obj_id;
1112         uint32_t localization;
1113         uint32_t max_iterations;
1114
1115         /*
1116          * Misc initialization, plus handle as-of name extensions.  Look for
1117          * the '@@' extension.  Note that as-of files and directories cannot
1118          * be modified.
1119          */
1120         dip = VTOI(ap->a_dvp);
1121         ncp = ap->a_nch->ncp;
1122         asof = dip->obj_asof;
1123         localization = dip->obj_localization;   /* for code consistency */
1124         nlen = ncp->nc_nlen;
1125         flags = dip->flags & HAMMER_INODE_RO;
1126         ispfs = 0;
1127         hmp = dip->hmp;
1128
1129         lwkt_gettoken(&hmp->fs_token);
1130         hammer_simple_transaction(&trans, hmp);
1131         ++hammer_stats_file_iopsr;
1132
1133         for (i = 0; i < nlen; ++i) {
1134                 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1135                         error = hammer_str_to_tid(ncp->nc_name + i + 2,
1136                                                   &ispfs, &asof, &localization);
1137                         if (error != 0) {
1138                                 i = nlen;
1139                                 break;
1140                         }
1141                         if (asof != HAMMER_MAX_TID)
1142                                 flags |= HAMMER_INODE_RO;
1143                         break;
1144                 }
1145         }
1146         nlen = i;
1147
1148         /*
1149          * If this is a PFS softlink we dive into the PFS
1150          */
1151         if (ispfs && nlen == 0) {
1152                 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1153                                       asof, localization,
1154                                       flags, &error);
1155                 if (error == 0) {
1156                         error = hammer_get_vnode(ip, &vp);
1157                         hammer_rel_inode(ip, 0);
1158                 } else {
1159                         vp = NULL;
1160                 }
1161                 if (error == 0) {
1162                         vn_unlock(vp);
1163                         cache_setvp(ap->a_nch, vp);
1164                         vrele(vp);
1165                 }
1166                 goto done;
1167         }
1168
1169         /*
1170          * If there is no path component the time extension is relative to dip.
1171          * e.g. "fubar/@@<snapshot>"
1172          *
1173          * "." is handled by the kernel, but ".@@<snapshot>" is not.
1174          * e.g. "fubar/.@@<snapshot>"
1175          *
1176          * ".." is handled by the kernel.  We do not currently handle
1177          * "..@<snapshot>".
1178          */
1179         if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1180                 ip = hammer_get_inode(&trans, dip, dip->obj_id,
1181                                       asof, dip->obj_localization,
1182                                       flags, &error);
1183                 if (error == 0) {
1184                         error = hammer_get_vnode(ip, &vp);
1185                         hammer_rel_inode(ip, 0);
1186                 } else {
1187                         vp = NULL;
1188                 }
1189                 if (error == 0) {
1190                         vn_unlock(vp);
1191                         cache_setvp(ap->a_nch, vp);
1192                         vrele(vp);
1193                 }
1194                 goto done;
1195         }
1196
1197         /*
1198          * Calculate the namekey and setup the key range for the scan.  This
1199          * works kinda like a chained hash table where the lower 32 bits
1200          * of the namekey synthesize the chain.
1201          *
1202          * The key range is inclusive of both key_beg and key_end.
1203          */
1204         namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1205                                            &max_iterations);
1206
1207         error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1208         cursor.key_beg.localization = dip->obj_localization |
1209                                       hammer_dir_localization(dip);
1210         cursor.key_beg.obj_id = dip->obj_id;
1211         cursor.key_beg.key = namekey;
1212         cursor.key_beg.create_tid = 0;
1213         cursor.key_beg.delete_tid = 0;
1214         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1215         cursor.key_beg.obj_type = 0;
1216
1217         cursor.key_end = cursor.key_beg;
1218         cursor.key_end.key += max_iterations;
1219         cursor.asof = asof;
1220         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1221
1222         /*
1223          * Scan all matching records (the chain), locate the one matching
1224          * the requested path component.
1225          *
1226          * The hammer_ip_*() functions merge in-memory records with on-disk
1227          * records for the purposes of the search.
1228          */
1229         obj_id = 0;
1230         localization = HAMMER_DEF_LOCALIZATION;
1231
1232         if (error == 0) {
1233                 error = hammer_ip_first(&cursor);
1234                 while (error == 0) {
1235                         error = hammer_ip_resolve_data(&cursor);
1236                         if (error)
1237                                 break;
1238                         if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1239                             bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1240                                 obj_id = cursor.data->entry.obj_id;
1241                                 localization = cursor.data->entry.localization;
1242                                 break;
1243                         }
1244                         error = hammer_ip_next(&cursor);
1245                 }
1246         }
1247         hammer_done_cursor(&cursor);
1248
1249         /*
1250          * Lookup the obj_id.  This should always succeed.  If it does not
1251          * the filesystem may be damaged and we return a dummy inode.
1252          */
1253         if (error == 0) {
1254                 ip = hammer_get_inode(&trans, dip, obj_id,
1255                                       asof, localization,
1256                                       flags, &error);
1257                 if (error == ENOENT) {
1258                         hkprintf("WARNING: Missing inode for dirent \"%s\"\n"
1259                                 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1260                                 ncp->nc_name,
1261                                 (long long)obj_id, (long long)asof,
1262                                 localization);
1263                         error = 0;
1264                         ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1265                                                     asof, localization,
1266                                                     flags, &error);
1267                 }
1268                 if (error == 0) {
1269                         error = hammer_get_vnode(ip, &vp);
1270                         hammer_rel_inode(ip, 0);
1271                 } else {
1272                         vp = NULL;
1273                 }
1274                 if (error == 0) {
1275                         vn_unlock(vp);
1276                         cache_setvp(ap->a_nch, vp);
1277                         vrele(vp);
1278                 }
1279         } else if (error == ENOENT) {
1280                 cache_setvp(ap->a_nch, NULL);
1281         }
1282 done:
1283         hammer_done_transaction(&trans);
1284         lwkt_reltoken(&hmp->fs_token);
1285         return (error);
1286 }
1287
1288 /*
1289  * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1290  *
1291  * Locate the parent directory of a directory vnode.
1292  *
1293  * dvp is referenced but not locked.  *vpp must be returned referenced and
1294  * locked.  A parent_obj_id of 0 does not necessarily indicate that we are
1295  * at the root, instead it could indicate that the directory we were in was
1296  * removed.
1297  *
1298  * NOTE: as-of sequences are not linked into the directory structure.  If
1299  * we are at the root with a different asof then the mount point, reload
1300  * the same directory with the mount point's asof.   I'm not sure what this
1301  * will do to NFS.  We encode ASOF stamps in NFS file handles so it might not
1302  * get confused, but it hasn't been tested.
1303  */
1304 static
1305 int
1306 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1307 {
1308         struct hammer_transaction trans;
1309         struct hammer_inode *dip;
1310         struct hammer_inode *ip;
1311         hammer_mount_t hmp;
1312         int64_t parent_obj_id;
1313         uint32_t parent_obj_localization;
1314         hammer_tid_t asof;
1315         int error;
1316
1317         dip = VTOI(ap->a_dvp);
1318         asof = dip->obj_asof;
1319         hmp = dip->hmp;
1320
1321         /*
1322          * Whos are parent?  This could be the root of a pseudo-filesystem
1323          * whos parent is in another localization domain.
1324          */
1325         lwkt_gettoken(&hmp->fs_token);
1326         parent_obj_id = dip->ino_data.parent_obj_id;
1327         if (dip->obj_id == HAMMER_OBJID_ROOT)
1328                 parent_obj_localization = HAMMER_DEF_LOCALIZATION;
1329         else
1330                 parent_obj_localization = dip->obj_localization;
1331
1332         /*
1333          * It's probably a PFS root when dip->ino_data.parent_obj_id is 0.
1334          */
1335         if (parent_obj_id == 0) {
1336                 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1337                    asof != hmp->asof) {
1338                         parent_obj_id = dip->obj_id;
1339                         asof = hmp->asof;
1340                         *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1341                         ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1342                                   (long long)dip->obj_asof);
1343                 } else {
1344                         *ap->a_vpp = NULL;
1345                         lwkt_reltoken(&hmp->fs_token);
1346                         return ENOENT;
1347                 }
1348         }
1349
1350         hammer_simple_transaction(&trans, hmp);
1351         ++hammer_stats_file_iopsr;
1352
1353         ip = hammer_get_inode(&trans, dip, parent_obj_id,
1354                               asof, parent_obj_localization,
1355                               dip->flags, &error);
1356         if (ip) {
1357                 error = hammer_get_vnode(ip, ap->a_vpp);
1358                 hammer_rel_inode(ip, 0);
1359         } else {
1360                 *ap->a_vpp = NULL;
1361         }
1362         hammer_done_transaction(&trans);
1363         lwkt_reltoken(&hmp->fs_token);
1364         return (error);
1365 }
1366
1367 /*
1368  * hammer_vop_nlink { nch, dvp, vp, cred }
1369  */
1370 static
1371 int
1372 hammer_vop_nlink(struct vop_nlink_args *ap)
1373 {
1374         struct hammer_transaction trans;
1375         struct hammer_inode *dip;
1376         struct hammer_inode *ip;
1377         struct nchandle *nch;
1378         hammer_mount_t hmp;
1379         int error;
1380
1381         if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1382                 return(EXDEV);
1383
1384         nch = ap->a_nch;
1385         dip = VTOI(ap->a_dvp);
1386         ip = VTOI(ap->a_vp);
1387         hmp = dip->hmp;
1388
1389         if (dip->obj_localization != ip->obj_localization)
1390                 return(EXDEV);
1391
1392         if (dip->flags & HAMMER_INODE_RO)
1393                 return (EROFS);
1394         if (ip->flags & HAMMER_INODE_RO)
1395                 return (EROFS);
1396         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1397                 return (error);
1398
1399         /*
1400          * Create a transaction to cover the operations we perform.
1401          */
1402         lwkt_gettoken(&hmp->fs_token);
1403         hammer_start_transaction(&trans, hmp);
1404         ++hammer_stats_file_iopsw;
1405
1406         /*
1407          * Add the filesystem object to the directory.  Note that neither
1408          * dip nor ip are referenced or locked, but their vnodes are
1409          * referenced.  This function will bump the inode's link count.
1410          */
1411         error = hammer_ip_add_directory(&trans, dip,
1412                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1413                                         ip);
1414
1415         /*
1416          * Finish up.
1417          */
1418         if (error == 0) {
1419                 cache_setunresolved(nch);
1420                 cache_setvp(nch, ap->a_vp);
1421         }
1422         hammer_done_transaction(&trans);
1423         hammer_knote(ap->a_vp, NOTE_LINK);
1424         hammer_knote(ap->a_dvp, NOTE_WRITE);
1425         lwkt_reltoken(&hmp->fs_token);
1426         return (error);
1427 }
1428
1429 /*
1430  * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1431  *
1432  * The operating system has already ensured that the directory entry
1433  * does not exist and done all appropriate namespace locking.
1434  */
1435 static
1436 int
1437 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1438 {
1439         struct hammer_transaction trans;
1440         struct hammer_inode *dip;
1441         struct hammer_inode *nip;
1442         struct nchandle *nch;
1443         hammer_mount_t hmp;
1444         int error;
1445
1446         nch = ap->a_nch;
1447         dip = VTOI(ap->a_dvp);
1448         hmp = dip->hmp;
1449
1450         if (dip->flags & HAMMER_INODE_RO)
1451                 return (EROFS);
1452         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1453                 return (error);
1454
1455         /*
1456          * Create a transaction to cover the operations we perform.
1457          */
1458         lwkt_gettoken(&hmp->fs_token);
1459         hammer_start_transaction(&trans, hmp);
1460         ++hammer_stats_file_iopsw;
1461
1462         /*
1463          * Create a new filesystem object of the requested type.  The
1464          * returned inode will be referenced but not locked.
1465          */
1466         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1467                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1468                                     NULL, &nip);
1469         if (error) {
1470                 hammer_done_transaction(&trans);
1471                 *ap->a_vpp = NULL;
1472                 lwkt_reltoken(&hmp->fs_token);
1473                 return (error);
1474         }
1475         /*
1476          * Add the new filesystem object to the directory.  This will also
1477          * bump the inode's link count.
1478          */
1479         error = hammer_ip_add_directory(&trans, dip,
1480                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1481                                         nip);
1482         if (error)
1483                 hkprintf("hammer_mkdir (add) error %d\n", error);
1484
1485         /*
1486          * Finish up.
1487          */
1488         if (error) {
1489                 hammer_rel_inode(nip, 0);
1490                 *ap->a_vpp = NULL;
1491         } else {
1492                 error = hammer_get_vnode(nip, ap->a_vpp);
1493                 hammer_rel_inode(nip, 0);
1494                 if (error == 0) {
1495                         cache_setunresolved(ap->a_nch);
1496                         cache_setvp(ap->a_nch, *ap->a_vpp);
1497                 }
1498         }
1499         hammer_done_transaction(&trans);
1500         if (error == 0)
1501                 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1502         lwkt_reltoken(&hmp->fs_token);
1503         return (error);
1504 }
1505
1506 /*
1507  * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1508  *
1509  * The operating system has already ensured that the directory entry
1510  * does not exist and done all appropriate namespace locking.
1511  */
1512 static
1513 int
1514 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1515 {
1516         struct hammer_transaction trans;
1517         struct hammer_inode *dip;
1518         struct hammer_inode *nip;
1519         struct nchandle *nch;
1520         hammer_mount_t hmp;
1521         int error;
1522
1523         nch = ap->a_nch;
1524         dip = VTOI(ap->a_dvp);
1525         hmp = dip->hmp;
1526
1527         if (dip->flags & HAMMER_INODE_RO)
1528                 return (EROFS);
1529         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1530                 return (error);
1531
1532         /*
1533          * Create a transaction to cover the operations we perform.
1534          */
1535         lwkt_gettoken(&hmp->fs_token);
1536         hammer_start_transaction(&trans, hmp);
1537         ++hammer_stats_file_iopsw;
1538
1539         /*
1540          * Create a new filesystem object of the requested type.  The
1541          * returned inode will be referenced but not locked.
1542          *
1543          * If mknod specifies a directory a pseudo-fs is created.
1544          */
1545         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1546                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1547                                     NULL, &nip);
1548         if (error) {
1549                 hammer_done_transaction(&trans);
1550                 *ap->a_vpp = NULL;
1551                 lwkt_reltoken(&hmp->fs_token);
1552                 return (error);
1553         }
1554
1555         /*
1556          * Add the new filesystem object to the directory.  This will also
1557          * bump the inode's link count.
1558          */
1559         error = hammer_ip_add_directory(&trans, dip,
1560                                         nch->ncp->nc_name, nch->ncp->nc_nlen,
1561                                         nip);
1562
1563         /*
1564          * Finish up.
1565          */
1566         if (error) {
1567                 hammer_rel_inode(nip, 0);
1568                 *ap->a_vpp = NULL;
1569         } else {
1570                 error = hammer_get_vnode(nip, ap->a_vpp);
1571                 hammer_rel_inode(nip, 0);
1572                 if (error == 0) {
1573                         cache_setunresolved(ap->a_nch);
1574                         cache_setvp(ap->a_nch, *ap->a_vpp);
1575                 }
1576         }
1577         hammer_done_transaction(&trans);
1578         if (error == 0)
1579                 hammer_knote(ap->a_dvp, NOTE_WRITE);
1580         lwkt_reltoken(&hmp->fs_token);
1581         return (error);
1582 }
1583
1584 /*
1585  * hammer_vop_open { vp, mode, cred, fp }
1586  *
1587  * MPSAFE (does not require fs_token)
1588  */
1589 static
1590 int
1591 hammer_vop_open(struct vop_open_args *ap)
1592 {
1593         hammer_inode_t ip;
1594
1595         ++hammer_stats_file_iopsr;
1596         ip = VTOI(ap->a_vp);
1597
1598         if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1599                 return (EROFS);
1600         return(vop_stdopen(ap));
1601 }
1602
1603 /*
1604  * hammer_vop_print { vp }
1605  */
1606 static
1607 int
1608 hammer_vop_print(struct vop_print_args *ap)
1609 {
1610         return EOPNOTSUPP;
1611 }
1612
1613 /*
1614  * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1615  */
1616 static
1617 int
1618 hammer_vop_readdir(struct vop_readdir_args *ap)
1619 {
1620         struct hammer_transaction trans;
1621         struct hammer_cursor cursor;
1622         struct hammer_inode *ip;
1623         hammer_mount_t hmp;
1624         struct uio *uio;
1625         hammer_base_elm_t base;
1626         int error;
1627         int cookie_index;
1628         int ncookies;
1629         off_t *cookies;
1630         off_t saveoff;
1631         int r;
1632         int dtype;
1633
1634         ++hammer_stats_file_iopsr;
1635         ip = VTOI(ap->a_vp);
1636         uio = ap->a_uio;
1637         saveoff = uio->uio_offset;
1638         hmp = ip->hmp;
1639
1640         if (ap->a_ncookies) {
1641                 ncookies = uio->uio_resid / 16 + 1;
1642                 if (ncookies > 1024)
1643                         ncookies = 1024;
1644                 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1645                 cookie_index = 0;
1646         } else {
1647                 ncookies = -1;
1648                 cookies = NULL;
1649                 cookie_index = 0;
1650         }
1651
1652         lwkt_gettoken(&hmp->fs_token);
1653         hammer_simple_transaction(&trans, hmp);
1654
1655         /*
1656          * Handle artificial entries
1657          *
1658          * It should be noted that the minimum value for a directory
1659          * hash key on-media is 0x0000000100000000, so we can use anything
1660          * less then that to represent our 'special' key space.
1661          */
1662         error = 0;
1663         if (saveoff == 0) {
1664                 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1665                 if (r)
1666                         goto done;
1667                 if (cookies)
1668                         cookies[cookie_index] = saveoff;
1669                 ++saveoff;
1670                 ++cookie_index;
1671                 if (cookie_index == ncookies)
1672                         goto done;
1673         }
1674         if (saveoff == 1) {
1675                 if (ip->ino_data.parent_obj_id) {
1676                         r = vop_write_dirent(&error, uio,
1677                                              ip->ino_data.parent_obj_id,
1678                                              DT_DIR, 2, "..");
1679                 } else {
1680                         r = vop_write_dirent(&error, uio,
1681                                              ip->obj_id, DT_DIR, 2, "..");
1682                 }
1683                 if (r)
1684                         goto done;
1685                 if (cookies)
1686                         cookies[cookie_index] = saveoff;
1687                 ++saveoff;
1688                 ++cookie_index;
1689                 if (cookie_index == ncookies)
1690                         goto done;
1691         }
1692
1693         /*
1694          * Key range (begin and end inclusive) to scan.  Directory keys
1695          * directly translate to a 64 bit 'seek' position.
1696          */
1697         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1698         cursor.key_beg.localization = ip->obj_localization |
1699                                       hammer_dir_localization(ip);
1700         cursor.key_beg.obj_id = ip->obj_id;
1701         cursor.key_beg.create_tid = 0;
1702         cursor.key_beg.delete_tid = 0;
1703         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1704         cursor.key_beg.obj_type = 0;
1705         cursor.key_beg.key = saveoff;
1706
1707         cursor.key_end = cursor.key_beg;
1708         cursor.key_end.key = HAMMER_MAX_KEY;
1709         cursor.asof = ip->obj_asof;
1710         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1711
1712         error = hammer_ip_first(&cursor);
1713
1714         while (error == 0) {
1715                 error = hammer_ip_resolve_data(&cursor);
1716                 if (error)
1717                         break;
1718                 base = &cursor.leaf->base;
1719                 saveoff = base->key;
1720                 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1721
1722                 if (base->obj_id != ip->obj_id)
1723                         hpanic("bad record at %p", cursor.node);
1724
1725                 /*
1726                  * Convert pseudo-filesystems into softlinks
1727                  */
1728                 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1729                 r = vop_write_dirent(
1730                              &error, uio, cursor.data->entry.obj_id,
1731                              dtype,
1732                              cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1733                              (void *)cursor.data->entry.name);
1734                 if (r)
1735                         break;
1736                 ++saveoff;
1737                 if (cookies)
1738                         cookies[cookie_index] = base->key;
1739                 ++cookie_index;
1740                 if (cookie_index == ncookies)
1741                         break;
1742                 error = hammer_ip_next(&cursor);
1743         }
1744         hammer_done_cursor(&cursor);
1745
1746 done:
1747         hammer_done_transaction(&trans);
1748
1749         if (ap->a_eofflag)
1750                 *ap->a_eofflag = (error == ENOENT);
1751         uio->uio_offset = saveoff;
1752         if (error && cookie_index == 0) {
1753                 if (error == ENOENT)
1754                         error = 0;
1755                 if (cookies) {
1756                         kfree(cookies, M_TEMP);
1757                         *ap->a_ncookies = 0;
1758                         *ap->a_cookies = NULL;
1759                 }
1760         } else {
1761                 if (error == ENOENT)
1762                         error = 0;
1763                 if (cookies) {
1764                         *ap->a_ncookies = cookie_index;
1765                         *ap->a_cookies = cookies;
1766                 }
1767         }
1768         lwkt_reltoken(&hmp->fs_token);
1769         return(error);
1770 }
1771
1772 /*
1773  * hammer_vop_readlink { vp, uio, cred }
1774  */
1775 static
1776 int
1777 hammer_vop_readlink(struct vop_readlink_args *ap)
1778 {
1779         struct hammer_transaction trans;
1780         struct hammer_cursor cursor;
1781         struct hammer_inode *ip;
1782         hammer_mount_t hmp;
1783         char buf[32];
1784         uint32_t localization;
1785         hammer_pseudofs_inmem_t pfsm;
1786         int error;
1787
1788         ip = VTOI(ap->a_vp);
1789         hmp = ip->hmp;
1790
1791         lwkt_gettoken(&hmp->fs_token);
1792
1793         /*
1794          * Shortcut if the symlink data was stuffed into ino_data.
1795          *
1796          * Also expand special "@@PFS%05d" softlinks (expansion only
1797          * occurs for non-historical (current) accesses made from the
1798          * primary filesystem).
1799          *
1800          * Note that userspace hammer command does not allow users to
1801          * create a @@PFS softlink under an existing other PFS (id!=0)
1802          * so the ip localization here for @@PFS softlink is always 0.
1803          */
1804         if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1805                 char *ptr;
1806                 int bytes;
1807
1808                 ptr = ip->ino_data.ext.symlink;
1809                 bytes = (int)ip->ino_data.size;
1810                 if (bytes == 10 &&
1811                     ip->obj_asof == HAMMER_MAX_TID &&
1812                     ip->obj_localization == HAMMER_DEF_LOCALIZATION &&
1813                     strncmp(ptr, "@@PFS", 5) == 0) {
1814                         hammer_simple_transaction(&trans, hmp);
1815                         bcopy(ptr + 5, buf, 5);
1816                         buf[5] = 0;
1817                         localization = pfs_to_lo(strtoul(buf, NULL, 10));
1818                         pfsm = hammer_load_pseudofs(&trans, localization,
1819                                                     &error);
1820                         if (error == 0) {
1821                                 if (hammer_is_pfs_slave(&pfsm->pfsd)) {
1822                                         /* vap->va_size == 26 */
1823                                         ksnprintf(buf, sizeof(buf),
1824                                                   "@@0x%016llx:%05d",
1825                                                   (long long)pfsm->pfsd.sync_end_tid,
1826                                                   lo_to_pfs(localization));
1827                                 } else {
1828                                         /* vap->va_size == 10 */
1829                                         ksnprintf(buf, sizeof(buf),
1830                                                   "@@-1:%05d",
1831                                                   lo_to_pfs(localization));
1832                                 }
1833                                 ptr = buf;
1834                                 bytes = strlen(buf);
1835                         }
1836                         if (pfsm)
1837                                 hammer_rel_pseudofs(hmp, pfsm);
1838                         hammer_done_transaction(&trans);
1839                 }
1840                 error = uiomove(ptr, bytes, ap->a_uio);
1841                 lwkt_reltoken(&hmp->fs_token);
1842                 return(error);
1843         }
1844
1845         /*
1846          * Long version
1847          */
1848         hammer_simple_transaction(&trans, hmp);
1849         ++hammer_stats_file_iopsr;
1850         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1851
1852         /*
1853          * Key range (begin and end inclusive) to scan.  Directory keys
1854          * directly translate to a 64 bit 'seek' position.
1855          */
1856         cursor.key_beg.localization = ip->obj_localization |
1857                                       HAMMER_LOCALIZE_MISC;
1858         cursor.key_beg.obj_id = ip->obj_id;
1859         cursor.key_beg.create_tid = 0;
1860         cursor.key_beg.delete_tid = 0;
1861         cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1862         cursor.key_beg.obj_type = 0;
1863         cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1864         cursor.asof = ip->obj_asof;
1865         cursor.flags |= HAMMER_CURSOR_ASOF;
1866
1867         error = hammer_ip_lookup(&cursor);
1868         if (error == 0) {
1869                 error = hammer_ip_resolve_data(&cursor);
1870                 if (error == 0) {
1871                         KKASSERT(cursor.leaf->data_len >=
1872                                  HAMMER_SYMLINK_NAME_OFF);
1873                         error = uiomove(cursor.data->symlink.name,
1874                                         cursor.leaf->data_len -
1875                                                 HAMMER_SYMLINK_NAME_OFF,
1876                                         ap->a_uio);
1877                 }
1878         }
1879         hammer_done_cursor(&cursor);
1880         hammer_done_transaction(&trans);
1881         lwkt_reltoken(&hmp->fs_token);
1882         return(error);
1883 }
1884
1885 /*
1886  * hammer_vop_nremove { nch, dvp, cred }
1887  */
1888 static
1889 int
1890 hammer_vop_nremove(struct vop_nremove_args *ap)
1891 {
1892         struct hammer_transaction trans;
1893         struct hammer_inode *dip;
1894         hammer_mount_t hmp;
1895         int error;
1896
1897         dip = VTOI(ap->a_dvp);
1898         hmp = dip->hmp;
1899
1900         if (hammer_nohistory(dip) == 0 &&
1901             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1902                 return (error);
1903         }
1904
1905         lwkt_gettoken(&hmp->fs_token);
1906         hammer_start_transaction(&trans, hmp);
1907         ++hammer_stats_file_iopsw;
1908         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1909         hammer_done_transaction(&trans);
1910         if (error == 0)
1911                 hammer_knote(ap->a_dvp, NOTE_WRITE);
1912         lwkt_reltoken(&hmp->fs_token);
1913         return (error);
1914 }
1915
1916 /*
1917  * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1918  */
1919 static
1920 int
1921 hammer_vop_nrename(struct vop_nrename_args *ap)
1922 {
1923         struct hammer_transaction trans;
1924         struct namecache *fncp;
1925         struct namecache *tncp;
1926         struct hammer_inode *fdip;
1927         struct hammer_inode *tdip;
1928         struct hammer_inode *ip;
1929         hammer_mount_t hmp;
1930         struct hammer_cursor cursor;
1931         int64_t namekey;
1932         uint32_t max_iterations;
1933         int nlen, error;
1934
1935         if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1936                 return(EXDEV);
1937         if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1938                 return(EXDEV);
1939
1940         fdip = VTOI(ap->a_fdvp);
1941         tdip = VTOI(ap->a_tdvp);
1942         fncp = ap->a_fnch->ncp;
1943         tncp = ap->a_tnch->ncp;
1944         ip = VTOI(fncp->nc_vp);
1945         KKASSERT(ip != NULL);
1946
1947         hmp = ip->hmp;
1948
1949         if (fdip->obj_localization != tdip->obj_localization)
1950                 return(EXDEV);
1951         if (fdip->obj_localization != ip->obj_localization)
1952                 return(EXDEV);
1953
1954         if (fdip->flags & HAMMER_INODE_RO)
1955                 return (EROFS);
1956         if (tdip->flags & HAMMER_INODE_RO)
1957                 return (EROFS);
1958         if (ip->flags & HAMMER_INODE_RO)
1959                 return (EROFS);
1960         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1961                 return (error);
1962
1963         lwkt_gettoken(&hmp->fs_token);
1964         hammer_start_transaction(&trans, hmp);
1965         ++hammer_stats_file_iopsw;
1966
1967         /*
1968          * Remove tncp from the target directory and then link ip as
1969          * tncp. XXX pass trans to dounlink
1970          *
1971          * Force the inode sync-time to match the transaction so it is
1972          * in-sync with the creation of the target directory entry.
1973          */
1974         error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1975                                 ap->a_cred, 0, -1);
1976         if (error == 0 || error == ENOENT) {
1977                 error = hammer_ip_add_directory(&trans, tdip,
1978                                                 tncp->nc_name, tncp->nc_nlen,
1979                                                 ip);
1980                 if (error == 0) {
1981                         ip->ino_data.parent_obj_id = tdip->obj_id;
1982                         ip->ino_data.ctime = trans.time;
1983                         hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
1984                 }
1985         }
1986         if (error)
1987                 goto failed; /* XXX */
1988
1989         /*
1990          * Locate the record in the originating directory and remove it.
1991          *
1992          * Calculate the namekey and setup the key range for the scan.  This
1993          * works kinda like a chained hash table where the lower 32 bits
1994          * of the namekey synthesize the chain.
1995          *
1996          * The key range is inclusive of both key_beg and key_end.
1997          */
1998         namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
1999                                            &max_iterations);
2000 retry:
2001         hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
2002         cursor.key_beg.localization = fdip->obj_localization |
2003                                       hammer_dir_localization(fdip);
2004         cursor.key_beg.obj_id = fdip->obj_id;
2005         cursor.key_beg.key = namekey;
2006         cursor.key_beg.create_tid = 0;
2007         cursor.key_beg.delete_tid = 0;
2008         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2009         cursor.key_beg.obj_type = 0;
2010
2011         cursor.key_end = cursor.key_beg;
2012         cursor.key_end.key += max_iterations;
2013         cursor.asof = fdip->obj_asof;
2014         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2015
2016         /*
2017          * Scan all matching records (the chain), locate the one matching
2018          * the requested path component.
2019          *
2020          * The hammer_ip_*() functions merge in-memory records with on-disk
2021          * records for the purposes of the search.
2022          */
2023         error = hammer_ip_first(&cursor);
2024         while (error == 0) {
2025                 if (hammer_ip_resolve_data(&cursor) != 0)
2026                         break;
2027                 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2028                 KKASSERT(nlen > 0);
2029                 if (fncp->nc_nlen == nlen &&
2030                     bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2031                         break;
2032                 }
2033                 error = hammer_ip_next(&cursor);
2034         }
2035
2036         /*
2037          * If all is ok we have to get the inode so we can adjust nlinks.
2038          *
2039          * WARNING: hammer_ip_del_directory() may have to terminate the
2040          * cursor to avoid a recursion.  It's ok to call hammer_done_cursor()
2041          * twice.
2042          */
2043         if (error == 0)
2044                 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
2045
2046         /*
2047          * XXX A deadlock here will break rename's atomicy for the purposes
2048          * of crash recovery.
2049          */
2050         if (error == EDEADLK) {
2051                 hammer_done_cursor(&cursor);
2052                 goto retry;
2053         }
2054
2055         /*
2056          * Cleanup and tell the kernel that the rename succeeded.
2057          *
2058          * NOTE: ip->vp, if non-NULL, cannot be directly referenced
2059          *       without formally acquiring the vp since the vp might
2060          *       have zero refs on it, or in the middle of a reclaim,
2061          *       etc.
2062          */
2063         hammer_done_cursor(&cursor);
2064         if (error == 0) {
2065                 cache_rename(ap->a_fnch, ap->a_tnch);
2066                 hammer_knote(ap->a_fdvp, NOTE_WRITE);
2067                 hammer_knote(ap->a_tdvp, NOTE_WRITE);
2068                 while (ip->vp) {
2069                         struct vnode *vp;
2070
2071                         error = hammer_get_vnode(ip, &vp);
2072                         if (error == 0 && vp) {
2073                                 vn_unlock(vp);
2074                                 hammer_knote(ip->vp, NOTE_RENAME);
2075                                 vrele(vp);
2076                                 break;
2077                         }
2078                         hdkprintf("ip/vp race2 avoided\n");
2079                 }
2080         }
2081
2082 failed:
2083         hammer_done_transaction(&trans);
2084         lwkt_reltoken(&hmp->fs_token);
2085         return (error);
2086 }
2087
2088 /*
2089  * hammer_vop_nrmdir { nch, dvp, cred }
2090  */
2091 static
2092 int
2093 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
2094 {
2095         struct hammer_transaction trans;
2096         struct hammer_inode *dip;
2097         hammer_mount_t hmp;
2098         int error;
2099
2100         dip = VTOI(ap->a_dvp);
2101         hmp = dip->hmp;
2102
2103         if (hammer_nohistory(dip) == 0 &&
2104             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2105                 return (error);
2106         }
2107
2108         lwkt_gettoken(&hmp->fs_token);
2109         hammer_start_transaction(&trans, hmp);
2110         ++hammer_stats_file_iopsw;
2111         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
2112         hammer_done_transaction(&trans);
2113         if (error == 0)
2114                 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2115         lwkt_reltoken(&hmp->fs_token);
2116         return (error);
2117 }
2118
2119 /*
2120  * hammer_vop_markatime { vp, cred }
2121  */
2122 static
2123 int
2124 hammer_vop_markatime(struct vop_markatime_args *ap)
2125 {
2126         struct hammer_transaction trans;
2127         struct hammer_inode *ip;
2128         hammer_mount_t hmp;
2129
2130         ip = VTOI(ap->a_vp);
2131         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2132                 return (EROFS);
2133         if (ip->flags & HAMMER_INODE_RO)
2134                 return (EROFS);
2135         hmp = ip->hmp;
2136         if (hmp->mp->mnt_flag & MNT_NOATIME)
2137                 return (0);
2138         lwkt_gettoken(&hmp->fs_token);
2139         hammer_start_transaction(&trans, hmp);
2140         ++hammer_stats_file_iopsw;
2141
2142         ip->ino_data.atime = trans.time;
2143         hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
2144         hammer_done_transaction(&trans);
2145         hammer_knote(ap->a_vp, NOTE_ATTRIB);
2146         lwkt_reltoken(&hmp->fs_token);
2147         return (0);
2148 }
2149
2150 /*
2151  * hammer_vop_setattr { vp, vap, cred }
2152  */
2153 static
2154 int
2155 hammer_vop_setattr(struct vop_setattr_args *ap)
2156 {
2157         struct hammer_transaction trans;
2158         struct hammer_inode *ip;
2159         struct vattr *vap;
2160         hammer_mount_t hmp;
2161         int modflags;
2162         int error;
2163         int truncating;
2164         int blksize;
2165         int kflags;
2166 #if 0
2167         int64_t aligned_size;
2168 #endif
2169         uint32_t flags;
2170
2171         vap = ap->a_vap;
2172         ip = ap->a_vp->v_data;
2173         modflags = 0;
2174         kflags = 0;
2175         hmp = ip->hmp;
2176
2177         if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2178                 return(EROFS);
2179         if (ip->flags & HAMMER_INODE_RO)
2180                 return (EROFS);
2181         if (hammer_nohistory(ip) == 0 &&
2182             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2183                 return (error);
2184         }
2185
2186         lwkt_gettoken(&hmp->fs_token);
2187         hammer_start_transaction(&trans, hmp);
2188         ++hammer_stats_file_iopsw;
2189         error = 0;
2190
2191         if (vap->va_flags != VNOVAL) {
2192                 flags = ip->ino_data.uflags;
2193                 error = vop_helper_setattr_flags(&flags, vap->va_flags,
2194                                          hammer_to_unix_xid(&ip->ino_data.uid),
2195                                          ap->a_cred);
2196                 if (error == 0) {
2197                         if (ip->ino_data.uflags != flags) {
2198                                 ip->ino_data.uflags = flags;
2199                                 ip->ino_data.ctime = trans.time;
2200                                 modflags |= HAMMER_INODE_DDIRTY;
2201                                 kflags |= NOTE_ATTRIB;
2202                         }
2203                         if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2204                                 error = 0;
2205                                 goto done;
2206                         }
2207                 }
2208                 goto done;
2209         }
2210         if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2211                 error = EPERM;
2212                 goto done;
2213         }
2214         if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2215                 mode_t cur_mode = ip->ino_data.mode;
2216                 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2217                 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2218                 uuid_t uuid_uid;
2219                 uuid_t uuid_gid;
2220
2221                 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2222                                          ap->a_cred,
2223                                          &cur_uid, &cur_gid, &cur_mode);
2224                 if (error == 0) {
2225                         hammer_guid_to_uuid(&uuid_uid, cur_uid);
2226                         hammer_guid_to_uuid(&uuid_gid, cur_gid);
2227                         if (bcmp(&uuid_uid, &ip->ino_data.uid,
2228                                  sizeof(uuid_uid)) ||
2229                             bcmp(&uuid_gid, &ip->ino_data.gid,
2230                                  sizeof(uuid_gid)) ||
2231                             ip->ino_data.mode != cur_mode) {
2232                                 ip->ino_data.uid = uuid_uid;
2233                                 ip->ino_data.gid = uuid_gid;
2234                                 ip->ino_data.mode = cur_mode;
2235                                 ip->ino_data.ctime = trans.time;
2236                                 modflags |= HAMMER_INODE_DDIRTY;
2237                         }
2238                         kflags |= NOTE_ATTRIB;
2239                 }
2240         }
2241         while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2242                 switch(ap->a_vp->v_type) {
2243                 case VREG:
2244                         if (vap->va_size == ip->ino_data.size)
2245                                 break;
2246
2247                         /*
2248                          * Log the operation if in fast-fsync mode or if
2249                          * there are unterminated redo write records present.
2250                          *
2251                          * The second check is needed so the recovery code
2252                          * properly truncates write redos even if nominal
2253                          * REDO operations is turned off due to excessive
2254                          * writes, because the related records might be
2255                          * destroyed and never lay down a TERM_WRITE.
2256                          */
2257                         if ((ip->flags & HAMMER_INODE_REDO) ||
2258                             (ip->flags & HAMMER_INODE_RDIRTY)) {
2259                                 error = hammer_generate_redo(&trans, ip,
2260                                                              vap->va_size,
2261                                                              HAMMER_REDO_TRUNC,
2262                                                              NULL, 0);
2263                         }
2264                         blksize = hammer_blocksize(vap->va_size);
2265
2266                         /*
2267                          * XXX break atomicy, we can deadlock the backend
2268                          * if we do not release the lock.  Probably not a
2269                          * big deal here.
2270                          */
2271                         if (vap->va_size < ip->ino_data.size) {
2272                                 nvtruncbuf(ap->a_vp, vap->va_size,
2273                                            blksize,
2274                                            hammer_blockoff(vap->va_size),
2275                                            0);
2276                                 truncating = 1;
2277                                 kflags |= NOTE_WRITE;
2278                         } else {
2279                                 nvextendbuf(ap->a_vp,
2280                                             ip->ino_data.size,
2281                                             vap->va_size,
2282                                             hammer_blocksize(ip->ino_data.size),
2283                                             hammer_blocksize(vap->va_size),
2284                                             hammer_blockoff(ip->ino_data.size),
2285                                             hammer_blockoff(vap->va_size),
2286                                             0);
2287                                 truncating = 0;
2288                                 kflags |= NOTE_WRITE | NOTE_EXTEND;
2289                         }
2290                         ip->ino_data.size = vap->va_size;
2291                         ip->ino_data.mtime = trans.time;
2292                         /* XXX safe to use SDIRTY instead of DDIRTY here? */
2293                         modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2294
2295                         /*
2296                          * On-media truncation is cached in the inode until
2297                          * the inode is synchronized.  We must immediately
2298                          * handle any frontend records.
2299                          */
2300                         if (truncating) {
2301                                 hammer_ip_frontend_trunc(ip, vap->va_size);
2302                                 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2303                                         ip->flags |= HAMMER_INODE_TRUNCATED;
2304                                         ip->trunc_off = vap->va_size;
2305                                         hammer_inode_dirty(ip);
2306                                 } else if (ip->trunc_off > vap->va_size) {
2307                                         ip->trunc_off = vap->va_size;
2308                                 }
2309                         }
2310
2311 #if 0
2312                         /*
2313                          * When truncating, nvtruncbuf() may have cleaned out
2314                          * a portion of the last block on-disk in the buffer
2315                          * cache.  We must clean out any frontend records
2316                          * for blocks beyond the new last block.
2317                          */
2318                         aligned_size = (vap->va_size + (blksize - 1)) &
2319                                        ~(int64_t)(blksize - 1);
2320                         if (truncating && vap->va_size < aligned_size) {
2321                                 aligned_size -= blksize;
2322                                 hammer_ip_frontend_trunc(ip, aligned_size);
2323                         }
2324 #endif
2325                         break;
2326                 case VDATABASE:
2327                         if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2328                                 ip->flags |= HAMMER_INODE_TRUNCATED;
2329                                 ip->trunc_off = vap->va_size;
2330                                 hammer_inode_dirty(ip);
2331                         } else if (ip->trunc_off > vap->va_size) {
2332                                 ip->trunc_off = vap->va_size;
2333                         }
2334                         hammer_ip_frontend_trunc(ip, vap->va_size);
2335                         ip->ino_data.size = vap->va_size;
2336                         ip->ino_data.mtime = trans.time;
2337                         modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2338                         kflags |= NOTE_ATTRIB;
2339                         break;
2340                 default:
2341                         error = EINVAL;
2342                         goto done;
2343                 }
2344                 break;
2345         }
2346         if (vap->va_atime.tv_sec != VNOVAL) {
2347                 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2348                 modflags |= HAMMER_INODE_ATIME;
2349                 kflags |= NOTE_ATTRIB;
2350         }
2351         if (vap->va_mtime.tv_sec != VNOVAL) {
2352                 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2353                 modflags |= HAMMER_INODE_MTIME;
2354                 kflags |= NOTE_ATTRIB;
2355         }
2356         if (vap->va_mode != (mode_t)VNOVAL) {
2357                 mode_t   cur_mode = ip->ino_data.mode;
2358                 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2359                 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2360
2361                 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2362                                          cur_uid, cur_gid, &cur_mode);
2363                 if (error == 0 && ip->ino_data.mode != cur_mode) {
2364                         ip->ino_data.mode = cur_mode;
2365                         ip->ino_data.ctime = trans.time;
2366                         modflags |= HAMMER_INODE_DDIRTY;
2367                         kflags |= NOTE_ATTRIB;
2368                 }
2369         }
2370 done:
2371         if (error == 0)
2372                 hammer_modify_inode(&trans, ip, modflags);
2373         hammer_done_transaction(&trans);
2374         hammer_knote(ap->a_vp, kflags);
2375         lwkt_reltoken(&hmp->fs_token);
2376         return (error);
2377 }
2378
2379 /*
2380  * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2381  */
2382 static
2383 int
2384 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2385 {
2386         struct hammer_transaction trans;
2387         struct hammer_inode *dip;
2388         struct hammer_inode *nip;
2389         hammer_record_t record;
2390         struct nchandle *nch;
2391         hammer_mount_t hmp;
2392         int error;
2393         int bytes;
2394
2395         ap->a_vap->va_type = VLNK;
2396
2397         nch = ap->a_nch;
2398         dip = VTOI(ap->a_dvp);
2399         hmp = dip->hmp;
2400
2401         if (dip->flags & HAMMER_INODE_RO)
2402                 return (EROFS);
2403         if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
2404                 return (error);
2405
2406         /*
2407          * Create a transaction to cover the operations we perform.
2408          */
2409         lwkt_gettoken(&hmp->fs_token);
2410         hammer_start_transaction(&trans, hmp);
2411         ++hammer_stats_file_iopsw;
2412
2413         /*
2414          * Create a new filesystem object of the requested type.  The
2415          * returned inode will be referenced but not locked.
2416          */
2417
2418         error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2419                                     dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2420                                     NULL, &nip);
2421         if (error) {
2422                 hammer_done_transaction(&trans);
2423                 *ap->a_vpp = NULL;
2424                 lwkt_reltoken(&hmp->fs_token);
2425                 return (error);
2426         }
2427
2428         /*
2429          * Add a record representing the symlink.  symlink stores the link
2430          * as pure data, not a string, and is no \0 terminated.
2431          */
2432         if (error == 0) {
2433                 bytes = strlen(ap->a_target);
2434
2435                 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2436                         bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2437                 } else {
2438                         record = hammer_alloc_mem_record(nip, bytes);
2439                         record->type = HAMMER_MEM_RECORD_GENERAL;
2440
2441                         record->leaf.base.localization = nip->obj_localization |
2442                                                          HAMMER_LOCALIZE_MISC;
2443                         record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2444                         record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2445                         record->leaf.data_len = bytes;
2446                         KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2447                         bcopy(ap->a_target, record->data->symlink.name, bytes);
2448                         error = hammer_ip_add_record(&trans, record);
2449                 }
2450
2451                 /*
2452                  * Set the file size to the length of the link.
2453                  */
2454                 if (error == 0) {
2455                         nip->ino_data.size = bytes;
2456                         hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
2457                 }
2458         }
2459         if (error == 0)
2460                 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2461                                                 nch->ncp->nc_nlen, nip);
2462
2463         /*
2464          * Finish up.
2465          */
2466         if (error) {
2467                 hammer_rel_inode(nip, 0);
2468                 *ap->a_vpp = NULL;
2469         } else {
2470                 error = hammer_get_vnode(nip, ap->a_vpp);
2471                 hammer_rel_inode(nip, 0);
2472                 if (error == 0) {
2473                         cache_setunresolved(ap->a_nch);
2474                         cache_setvp(ap->a_nch, *ap->a_vpp);
2475                         hammer_knote(ap->a_dvp, NOTE_WRITE);
2476                 }
2477         }
2478         hammer_done_transaction(&trans);
2479         lwkt_reltoken(&hmp->fs_token);
2480         return (error);
2481 }
2482
2483 /*
2484  * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2485  */
2486 static
2487 int
2488 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2489 {
2490         struct hammer_transaction trans;
2491         struct hammer_inode *dip;
2492         hammer_mount_t hmp;
2493         int error;
2494
2495         dip = VTOI(ap->a_dvp);
2496         hmp = dip->hmp;
2497
2498         if (hammer_nohistory(dip) == 0 &&
2499             (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2500                 return (error);
2501         }
2502
2503         lwkt_gettoken(&hmp->fs_token);
2504         hammer_start_transaction(&trans, hmp);
2505         ++hammer_stats_file_iopsw;
2506         error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2507                                 ap->a_cred, ap->a_flags, -1);
2508         hammer_done_transaction(&trans);
2509         lwkt_reltoken(&hmp->fs_token);
2510
2511         return (error);
2512 }
2513
2514 /*
2515  * hammer_vop_ioctl { vp, command, data, fflag, cred }
2516  */
2517 static
2518 int
2519 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2520 {
2521         struct hammer_inode *ip = ap->a_vp->v_data;
2522         hammer_mount_t hmp = ip->hmp;
2523         int error;
2524
2525         ++hammer_stats_file_iopsr;
2526         lwkt_gettoken(&hmp->fs_token);
2527         error = hammer_ioctl(ip, ap->a_command, ap->a_data,
2528                              ap->a_fflag, ap->a_cred);
2529         lwkt_reltoken(&hmp->fs_token);
2530         return (error);
2531 }
2532
2533 static
2534 int
2535 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2536 {
2537         static const struct mountctl_opt extraopt[] = {
2538                 { HMNT_NOHISTORY,       "nohistory" },
2539                 { HMNT_MASTERID,        "master" },
2540                 { HMNT_NOMIRROR,        "nomirror" },
2541                 { 0, NULL}
2542
2543         };
2544         struct hammer_mount *hmp;
2545         struct mount *mp;
2546         int usedbytes;
2547         int error;
2548
2549         error = 0;
2550         usedbytes = 0;
2551         mp = ap->a_head.a_ops->head.vv_mount;
2552         KKASSERT(mp->mnt_data != NULL);
2553         hmp = (struct hammer_mount *)mp->mnt_data;
2554
2555         lwkt_gettoken(&hmp->fs_token);
2556
2557         switch(ap->a_op) {
2558         case MOUNTCTL_SET_EXPORT:
2559                 if (ap->a_ctllen != sizeof(struct export_args))
2560                         error = EINVAL;
2561                 else
2562                         error = hammer_vfs_export(mp, ap->a_op,
2563                                       (const struct export_args *)ap->a_ctl);
2564                 break;
2565         case MOUNTCTL_MOUNTFLAGS:
2566                 /*
2567                  * Call standard mountctl VOP function
2568                  * so we get user mount flags.
2569                  */
2570                 error = vop_stdmountctl(ap);
2571                 if (error)
2572                         break;
2573
2574                 usedbytes = *ap->a_res;
2575
2576                 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2577                         usedbytes += vfs_flagstostr(hmp->hflags, extraopt,
2578                                                     ap->a_buf,
2579                                                     ap->a_buflen - usedbytes,
2580                                                     &error);
2581                 }
2582
2583                 *ap->a_res += usedbytes;
2584                 break;
2585         default:
2586                 error = vop_stdmountctl(ap);
2587                 break;
2588         }
2589         lwkt_reltoken(&hmp->fs_token);
2590         return(error);
2591 }
2592
2593 /*
2594  * hammer_vop_strategy { vp, bio }
2595  *
2596  * Strategy call, used for regular file read & write only.  Note that the
2597  * bp may represent a cluster.
2598  *
2599  * To simplify operation and allow better optimizations in the future,
2600  * this code does not make any assumptions with regards to buffer alignment
2601  * or size.
2602  */
2603 static
2604 int
2605 hammer_vop_strategy(struct vop_strategy_args *ap)
2606 {
2607         struct buf *bp;
2608         int error;
2609
2610         bp = ap->a_bio->bio_buf;
2611
2612         switch(bp->b_cmd) {
2613         case BUF_CMD_READ:
2614                 error = hammer_vop_strategy_read(ap);
2615                 break;
2616         case BUF_CMD_WRITE:
2617                 error = hammer_vop_strategy_write(ap);
2618                 break;
2619         default:
2620                 bp->b_error = error = EINVAL;
2621                 bp->b_flags |= B_ERROR;
2622                 biodone(ap->a_bio);
2623                 break;
2624         }
2625
2626         /* hammer_dump_dedup_cache(((hammer_inode_t)ap->a_vp->v_data)->hmp); */
2627
2628         return (error);
2629 }
2630
2631 /*
2632  * Read from a regular file.  Iterate the related records and fill in the
2633  * BIO/BUF.  Gaps are zero-filled.
2634  *
2635  * The support code in hammer_object.c should be used to deal with mixed
2636  * in-memory and on-disk records.
2637  *
2638  * NOTE: Can be called from the cluster code with an oversized buf.
2639  *
2640  * XXX atime update
2641  */
2642 static
2643 int
2644 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2645 {
2646         struct hammer_transaction trans;
2647         struct hammer_inode *ip;
2648         struct hammer_inode *dip;
2649         hammer_mount_t hmp;
2650         struct hammer_cursor cursor;
2651         hammer_base_elm_t base;
2652         hammer_off_t disk_offset;
2653         struct bio *bio;
2654         struct bio *nbio;
2655         struct buf *bp;
2656         int64_t rec_offset;
2657         int64_t ran_end;
2658         int64_t tmp64;
2659         int error;
2660         int boff;
2661         int roff;
2662         int n;
2663         int isdedupable;
2664
2665         bio = ap->a_bio;
2666         bp = bio->bio_buf;
2667         ip = ap->a_vp->v_data;
2668         hmp = ip->hmp;
2669
2670         /*
2671          * The zone-2 disk offset may have been set by the cluster code via
2672          * a BMAP operation, or else should be NOOFFSET.
2673          *
2674          * Checking the high bits for a match against zone-2 should suffice.
2675          *
2676          * In cases where a lot of data duplication is present it may be
2677          * more beneficial to drop through and doubule-buffer through the
2678          * device.
2679          */
2680         nbio = push_bio(bio);
2681         if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2682             HAMMER_ZONE_LARGE_DATA) {
2683                 if (hammer_double_buffer == 0) {
2684                         lwkt_gettoken(&hmp->fs_token);
2685                         error = hammer_io_direct_read(hmp, nbio, NULL);
2686                         lwkt_reltoken(&hmp->fs_token);
2687                         return (error);
2688                 }
2689
2690                 /*
2691                  * Try to shortcut requests for double_buffer mode too.
2692                  * Since this mode runs through the device buffer cache
2693                  * only compatible buffer sizes (meaning those generated
2694                  * by normal filesystem buffers) are legal.
2695                  */
2696                 if (hammer_live_dedup == 0 && (bp->b_flags & B_PAGING) == 0) {
2697                         lwkt_gettoken(&hmp->fs_token);
2698                         error = hammer_io_indirect_read(hmp, nbio, NULL);
2699                         lwkt_reltoken(&hmp->fs_token);
2700                         return (error);
2701                 }
2702         }
2703
2704         /*
2705          * Well, that sucked.  Do it the hard way.  If all the stars are
2706          * aligned we may still be able to issue a direct-read.
2707          */
2708         lwkt_gettoken(&hmp->fs_token);
2709         hammer_simple_transaction(&trans, hmp);
2710         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2711
2712         /*
2713          * Key range (begin and end inclusive) to scan.  Note that the key's
2714          * stored in the actual records represent BASE+LEN, not BASE.  The
2715          * first record containing bio_offset will have a key > bio_offset.
2716          */
2717         cursor.key_beg.localization = ip->obj_localization |
2718                                       HAMMER_LOCALIZE_MISC;
2719         cursor.key_beg.obj_id = ip->obj_id;
2720         cursor.key_beg.create_tid = 0;
2721         cursor.key_beg.delete_tid = 0;
2722         cursor.key_beg.obj_type = 0;
2723         cursor.key_beg.key = bio->bio_offset + 1;
2724         cursor.asof = ip->obj_asof;
2725         cursor.flags |= HAMMER_CURSOR_ASOF;
2726
2727         cursor.key_end = cursor.key_beg;
2728         KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2729 #if 0
2730         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2731                 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2732                 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2733                 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2734         } else
2735 #endif
2736         {
2737                 ran_end = bio->bio_offset + bp->b_bufsize;
2738                 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2739                 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2740                 tmp64 = ran_end + MAXPHYS + 1;  /* work-around GCC-4 bug */
2741                 if (tmp64 < ran_end)
2742                         cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2743                 else
2744                         cursor.key_end.key = ran_end + MAXPHYS + 1;
2745         }
2746         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2747
2748         /*
2749          * Set NOSWAPCACHE for cursor data extraction if double buffering
2750          * is disabled or (if the file is not marked cacheable via chflags
2751          * and vm.swapcache_use_chflags is enabled).
2752          */
2753         if (hammer_double_buffer == 0 ||
2754             ((ap->a_vp->v_flag & VSWAPCACHE) == 0 &&
2755              vm_swapcache_use_chflags)) {
2756                 cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
2757         }
2758
2759         error = hammer_ip_first(&cursor);
2760         boff = 0;
2761
2762         while (error == 0) {
2763                 /*
2764                  * Get the base file offset of the record.  The key for
2765                  * data records is (base + bytes) rather then (base).
2766                  */
2767                 base = &cursor.leaf->base;
2768                 rec_offset = base->key - cursor.leaf->data_len;
2769
2770                 /*
2771                  * Calculate the gap, if any, and zero-fill it.
2772                  *
2773                  * n is the offset of the start of the record verses our
2774                  * current seek offset in the bio.
2775                  */
2776                 n = (int)(rec_offset - (bio->bio_offset + boff));
2777                 if (n > 0) {
2778                         if (n > bp->b_bufsize - boff)
2779                                 n = bp->b_bufsize - boff;
2780                         bzero((char *)bp->b_data + boff, n);
2781                         boff += n;
2782                         n = 0;
2783                 }
2784
2785                 /*
2786                  * Calculate the data offset in the record and the number
2787                  * of bytes we can copy.
2788                  *
2789                  * There are two degenerate cases.  First, boff may already
2790                  * be at bp->b_bufsize.  Secondly, the data offset within
2791                  * the record may exceed the record's size.
2792                  */
2793                 roff = -n;
2794                 rec_offset += roff;
2795                 n = cursor.leaf->data_len - roff;
2796                 if (n <= 0) {
2797                         hdkprintf("bad n=%d roff=%d\n", n, roff);
2798                         n = 0;
2799                 } else if (n > bp->b_bufsize - boff) {
2800                         n = bp->b_bufsize - boff;
2801                 }
2802
2803                 /*
2804                  * Deal with cached truncations.  This cool bit of code
2805                  * allows truncate()/ftruncate() to avoid having to sync
2806                  * the file.
2807                  *
2808                  * If the frontend is truncated then all backend records are
2809                  * subject to the frontend's truncation.
2810                  *
2811                  * If the backend is truncated then backend records on-disk
2812                  * (but not in-memory) are subject to the backend's
2813                  * truncation.  In-memory records owned by the backend
2814                  * represent data written after the truncation point on the
2815                  * backend and must not be truncated.
2816                  *
2817                  * Truncate operations deal with frontend buffer cache
2818                  * buffers and frontend-owned in-memory records synchronously.
2819                  */
2820                 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2821                         if (hammer_cursor_ondisk(&cursor)/* ||
2822                             cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2823                                 if (ip->trunc_off <= rec_offset)
2824                                         n = 0;
2825                                 else if (ip->trunc_off < rec_offset + n)
2826                                         n = (int)(ip->trunc_off - rec_offset);
2827                         }
2828                 }
2829                 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2830                         if (hammer_cursor_ondisk(&cursor)) {
2831                                 if (ip->sync_trunc_off <= rec_offset)
2832                                         n = 0;
2833                                 else if (ip->sync_trunc_off < rec_offset + n)
2834                                         n = (int)(ip->sync_trunc_off - rec_offset);
2835                         }
2836                 }
2837
2838                 /*
2839                  * Try to issue a direct read into our bio if possible,
2840                  * otherwise resolve the element data into a hammer_buffer
2841                  * and copy.
2842                  *
2843                  * The buffer on-disk should be zerod past any real
2844                  * truncation point, but may not be for any synthesized
2845                  * truncation point from above.
2846                  *
2847                  * NOTE: disk_offset is only valid if the cursor data is
2848                  *       on-disk.
2849                  */
2850                 disk_offset = cursor.leaf->data_offset + roff;
2851                 isdedupable = (boff == 0 && n == bp->b_bufsize &&
2852                                hammer_cursor_ondisk(&cursor) &&
2853                                ((int)disk_offset & HAMMER_BUFMASK) == 0);
2854
2855                 if (isdedupable && hammer_double_buffer == 0) {
2856                         /*
2857                          * Direct read case
2858                          */
2859                         KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2860                                  HAMMER_ZONE_LARGE_DATA);
2861                         nbio->bio_offset = disk_offset;
2862                         error = hammer_io_direct_read(hmp, nbio, cursor.leaf);
2863                         if (hammer_live_dedup && error == 0)
2864                                 hammer_dedup_cache_add(ip, cursor.leaf);
2865                         goto done;
2866                 } else if (isdedupable) {
2867                         /*
2868                          * Async I/O case for reading from backing store
2869                          * and copying the data to the filesystem buffer.
2870                          * live-dedup has to verify the data anyway if it
2871                          * gets a hit later so we can just add the entry
2872                          * now.
2873                          */
2874                         KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2875                                  HAMMER_ZONE_LARGE_DATA);
2876                         nbio->bio_offset = disk_offset;
2877                         if (hammer_live_dedup)
2878                                 hammer_dedup_cache_add(ip, cursor.leaf);
2879                         error = hammer_io_indirect_read(hmp, nbio, cursor.leaf);
2880                         goto done;
2881                 } else if (n) {
2882                         error = hammer_ip_resolve_data(&cursor);
2883                         if (error == 0) {
2884                                 if (hammer_live_dedup && isdedupable)
2885                                         hammer_dedup_cache_add(ip, cursor.leaf);
2886                                 bcopy((char *)cursor.data + roff,
2887                                       (char *)bp->b_data + boff, n);
2888                         }
2889                 }
2890                 if (error)
2891                         break;
2892
2893                 /*
2894                  * We have to be sure that the only elements added to the
2895                  * dedup cache are those which are already on-media.
2896                  */
2897                 if (hammer_live_dedup && hammer_cursor_ondisk(&cursor))
2898                         hammer_dedup_cache_add(ip, cursor.leaf);
2899
2900                 /*
2901                  * Iterate until we have filled the request.
2902                  */
2903                 boff += n;
2904                 if (boff == bp->b_bufsize)
2905                         break;
2906                 error = hammer_ip_next(&cursor);
2907         }
2908
2909         /*
2910          * There may have been a gap after the last record
2911          */
2912         if (error == ENOENT)
2913                 error = 0;
2914         if (error == 0 && boff != bp->b_bufsize) {
2915                 KKASSERT(boff < bp->b_bufsize);
2916                 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2917                 /* boff = bp->b_bufsize; */
2918         }
2919
2920         /*
2921          * Disallow swapcache operation on the vnode buffer if double
2922          * buffering is enabled, the swapcache will get the data via
2923          * the block device buffer.
2924          */
2925         if (hammer_double_buffer)
2926                 bp->b_flags |= B_NOTMETA;
2927
2928         /*
2929          * Cleanup
2930          */
2931         bp->b_resid = 0;
2932         bp->b_error = error;
2933         if (error)
2934                 bp->b_flags |= B_ERROR;
2935         biodone(ap->a_bio);
2936
2937 done:
2938         /*
2939          * Cache the b-tree node for the last data read in cache[1].
2940          *
2941          * If we hit the file EOF then also cache the node in the
2942          * governing directory's cache[3], it will be used to initialize
2943          * the new inode's cache[1] for any inodes looked up via the directory.
2944          *
2945          * This doesn't reduce disk accesses since the B-Tree chain is
2946          * likely cached, but it does reduce cpu overhead when looking
2947          * up file offsets for cpdup/tar/cpio style iterations.
2948          */
2949         if (cursor.node)
2950                 hammer_cache_node(&ip->cache[1], cursor.node);
2951         if (ran_end >= ip->ino_data.size) {
2952                 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2953                                         ip->obj_asof, ip->obj_localization);
2954                 if (dip) {
2955                         hammer_cache_node(&dip->cache[3], cursor.node);
2956                         hammer_rel_inode(dip, 0);
2957                 }
2958         }
2959         hammer_done_cursor(&cursor);
2960         hammer_done_transaction(&trans);
2961         lwkt_reltoken(&hmp->fs_token);
2962         return(error);
2963 }
2964
2965 /*
2966  * BMAP operation - used to support cluster_read() only.
2967  *
2968  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2969  *
2970  * This routine may return EOPNOTSUPP if the opration is not supported for
2971  * the specified offset.  The contents of the pointer arguments do not
2972  * need to be initialized in that case.
2973  *
2974  * If a disk address is available and properly aligned return 0 with
2975  * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2976  * to the run-length relative to that offset.  Callers may assume that
2977  * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2978  * large, so return EOPNOTSUPP if it is not sufficiently large.
2979  */
2980 static
2981 int
2982 hammer_vop_bmap(struct vop_bmap_args *ap)
2983 {
2984         struct hammer_transaction trans;
2985         struct hammer_inode *ip;
2986         hammer_mount_t hmp;
2987         struct hammer_cursor cursor;
2988         hammer_base_elm_t base;
2989         int64_t rec_offset;
2990         int64_t ran_end;
2991         int64_t tmp64;
2992         int64_t base_offset;
2993         int64_t base_disk_offset;
2994         int64_t last_offset;
2995         hammer_off_t last_disk_offset;
2996         hammer_off_t disk_offset;
2997         int     rec_len;
2998         int     error;
2999         int     blksize;
3000
3001         ++hammer_stats_file_iopsr;
3002         ip = ap->a_vp->v_data;
3003         hmp = ip->hmp;
3004
3005         /*
3006          * We can only BMAP regular files.  We can't BMAP database files,
3007          * directories, etc.
3008          */
3009         if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
3010                 return(EOPNOTSUPP);
3011
3012         /*
3013          * bmap is typically called with runp/runb both NULL when used
3014          * for writing.  We do not support BMAP for writing atm.
3015          */
3016         if (ap->a_cmd != BUF_CMD_READ)
3017                 return(EOPNOTSUPP);
3018
3019         /*
3020          * Scan the B-Tree to acquire blockmap addresses, then translate
3021          * to raw addresses.
3022          */
3023         lwkt_gettoken(&hmp->fs_token);
3024         hammer_simple_transaction(&trans, hmp);
3025
3026         hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
3027
3028         /*
3029          * Key range (begin and end inclusive) to scan.  Note that the key's
3030          * stored in the actual records represent BASE+LEN, not BASE.  The
3031          * first record containing bio_offset will have a key > bio_offset.
3032          */
3033         cursor.key_beg.localization = ip->obj_localization |
3034                                       HAMMER_LOCALIZE_MISC;
3035         cursor.key_beg.obj_id = ip->obj_id;
3036         cursor.key_beg.create_tid = 0;
3037         cursor.key_beg.delete_tid = 0;
3038         cursor.key_beg.obj_type = 0;
3039         if (ap->a_runb)
3040                 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
3041         else
3042                 cursor.key_beg.key = ap->a_loffset + 1;
3043         if (cursor.key_beg.key < 0)
3044                 cursor.key_beg.key = 0;
3045         cursor.asof = ip->obj_asof;
3046         cursor.flags |= HAMMER_CURSOR_ASOF;
3047
3048         cursor.key_end = cursor.key_beg;
3049         KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
3050
3051         ran_end = ap->a_loffset + MAXPHYS;
3052         cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
3053         cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
3054         tmp64 = ran_end + MAXPHYS + 1;  /* work-around GCC-4 bug */
3055         if (tmp64 < ran_end)
3056                 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
3057         else
3058                 cursor.key_end.key = ran_end + MAXPHYS + 1;
3059
3060         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
3061
3062         error = hammer_ip_first(&cursor);
3063         base_offset = last_offset = 0;
3064         base_disk_offset = last_disk_offset = 0;
3065
3066         while (error == 0) {
3067                 /*
3068                  * Get the base file offset of the record.  The key for
3069                  * data records is (base + bytes) rather then (base).
3070                  *
3071                  * NOTE: rec_offset + rec_len may exceed the end-of-file.
3072                  * The extra bytes should be zero on-disk and the BMAP op
3073                  * should still be ok.
3074                  */
3075                 base = &cursor.leaf->base;
3076                 rec_offset = base->key - cursor.leaf->data_len;
3077                 rec_len    = cursor.leaf->data_len;
3078
3079                 /*
3080                  * Incorporate any cached truncation.
3081                  *
3082                  * NOTE: Modifications to rec_len based on synthesized
3083                  * truncation points remove the guarantee that any extended
3084                  * data on disk is zero (since the truncations may not have
3085                  * taken place on-media yet).
3086                  */
3087                 if (ip->flags & HAMMER_INODE_TRUNCATED) {
3088                         if (hammer_cursor_ondisk(&cursor) ||
3089                             cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
3090                                 if (ip->trunc_off <= rec_offset)
3091                                         rec_len = 0;
3092                                 else if (ip->trunc_off < rec_offset + rec_len)
3093                                         rec_len = (int)(ip->trunc_off - rec_offset);
3094                         }
3095                 }
3096                 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
3097                         if (hammer_cursor_ondisk(&cursor)) {
3098                                 if (ip->sync_trunc_off <= rec_offset)
3099                                         rec_len = 0;
3100                                 else if (ip->sync_trunc_off < rec_offset + rec_len)
3101                                         rec_len = (int)(ip->sync_trunc_off - rec_offset);
3102                         }
3103                 }
3104
3105                 /*
3106                  * Accumulate information.  If we have hit a discontiguous
3107                  * block reset base_offset unless we are already beyond the
3108                  * requested offset.  If we are, that's it, we stop.
3109                  */
3110                 if (error)
3111                         break;
3112                 if (hammer_cursor_ondisk(&cursor)) {
3113                         disk_offset = cursor.leaf->data_offset;
3114                         if (rec_offset != last_offset ||
3115                             disk_offset != last_disk_offset) {
3116                                 if (rec_offset > ap->a_loffset)
3117                                         break;
3118                                 base_offset = rec_offset;
3119                                 base_disk_offset = disk_offset;
3120                         }
3121                         last_offset = rec_offset + rec_len;
3122                         last_disk_offset = disk_offset + rec_len;
3123
3124                         if (hammer_live_dedup)
3125                                 hammer_dedup_cache_add(ip, cursor.leaf);
3126                 }
3127
3128                 error = hammer_ip_next(&cursor);
3129         }
3130
3131         if (cursor.node)
3132                 hammer_cache_node(&ip->cache[1], cursor.node);
3133
3134         hammer_done_cursor(&cursor);
3135         hammer_done_transaction(&trans);
3136         lwkt_reltoken(&hmp->fs_token);
3137
3138         /*
3139          * If we couldn't find any records or the records we did find were
3140          * all behind the requested offset, return failure.  A forward
3141          * truncation can leave a hole w/ no on-disk records.
3142          */
3143         if (last_offset == 0 || last_offset < ap->a_loffset)
3144                 return (EOPNOTSUPP);
3145
3146         /*
3147          * Figure out the block size at the requested offset and adjust
3148          * our limits so the cluster_read() does not create inappropriately
3149          * sized buffer cache buffers.
3150          */
3151         blksize = hammer_blocksize(ap->a_loffset);
3152         if (hammer_blocksize(base_offset) != blksize) {
3153                 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
3154         }
3155         if (last_offset != ap->a_loffset &&
3156             hammer_blocksize(last_offset - 1) != blksize) {
3157                 last_offset = hammer_blockdemarc(ap->a_loffset,
3158                                                  last_offset - 1);
3159         }
3160
3161         /*
3162          * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3163          * from occuring.
3164          */
3165         disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
3166
3167         if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
3168                 /*
3169                  * Only large-data zones can be direct-IOd
3170                  */
3171                 error = EOPNOTSUPP;
3172         } else if ((disk_offset & HAMMER_BUFMASK) ||
3173                    (last_offset - ap->a_loffset) < blksize) {
3174                 /*
3175                  * doffsetp is not aligned or the forward run size does
3176                  * not cover a whole buffer, disallow the direct I/O.
3177                  */
3178                 error = EOPNOTSUPP;
3179         } else {
3180                 /*
3181                  * We're good.
3182                  */
3183                 *ap->a_doffsetp = disk_offset;
3184                 if (ap->a_runb) {
3185                         *ap->a_runb = ap->a_loffset - base_offset;
3186                         KKASSERT(*ap->a_runb >= 0);
3187                 }
3188                 if (ap->a_runp) {
3189                         *ap->a_runp = last_offset - ap->a_loffset;
3190                         KKASSERT(*ap->a_runp >= 0);
3191                 }
3192                 error = 0;
3193         }
3194         return(error);
3195 }
3196
3197 /*
3198  * Write to a regular file.   Because this is a strategy call the OS is
3199  * trying to actually get data onto the media.
3200  */
3201 static
3202 int
3203 hammer_vop_strategy_write(struct vop_strategy_args *ap)
3204 {
3205         hammer_record_t record;
3206         hammer_mount_t hmp;
3207         hammer_inode_t ip;
3208         struct bio *bio;
3209         struct buf *bp;
3210         int blksize __debugvar;
3211         int bytes;
3212         int error;
3213
3214         bio = ap->a_bio;
3215         bp = bio->bio_buf;
3216         ip = ap->a_vp->v_data;
3217         hmp = ip->hmp;
3218
3219         blksize = hammer_blocksize(bio->bio_offset);
3220         KKASSERT(bp->b_bufsize == blksize);
3221
3222         if (ip->flags & HAMMER_INODE_RO) {
3223                 bp->b_error = EROFS;
3224                 bp->b_flags |= B_ERROR;
3225                 biodone(ap->a_bio);
3226                 return(EROFS);
3227         }
3228
3229         lwkt_gettoken(&hmp->fs_token);
3230
3231         /*
3232          * Disallow swapcache operation on the vnode buffer if double
3233          * buffering is enabled, the swapcache will get the data via
3234          * the block device buffer.
3235          */
3236         if (hammer_double_buffer)
3237                 bp->b_flags |= B_NOTMETA;
3238
3239         /*
3240          * Interlock with inode destruction (no in-kernel or directory
3241          * topology visibility).  If we queue new IO while trying to
3242          * destroy the inode we can deadlock the vtrunc call in
3243          * hammer_inode_unloadable_check().
3244          *
3245          * Besides, there's no point flushing a bp associated with an
3246          * inode that is being destroyed on-media and has no kernel
3247          * references.
3248          */
3249         if ((ip->flags | ip->sync_flags) &
3250             (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3251                 bp->b_resid = 0;
3252                 biodone(ap->a_bio);
3253                 lwkt_reltoken(&hmp->fs_token);
3254                 return(0);
3255         }
3256
3257         /*
3258          * Reserve space and issue a direct-write from the front-end.
3259          * NOTE: The direct_io code will hammer_bread/bcopy smaller
3260          * allocations.
3261          *
3262          * An in-memory record will be installed to reference the storage
3263          * until the flusher can get to it.
3264          *
3265          * Since we own the high level bio the front-end will not try to
3266          * do a direct-read until the write completes.
3267          *
3268          * NOTE: The only time we do not reserve a full-sized buffers
3269          * worth of data is if the file is small.  We do not try to
3270          * allocate a fragment (from the small-data zone) at the end of
3271          * an otherwise large file as this can lead to wildly separated
3272          * data.
3273          */
3274         KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3275         KKASSERT(bio->bio_offset < ip->ino_data.size);
3276         if (bio->bio_offset || ip->ino_data.size > HAMMER_HBUFSIZE)
3277                 bytes = bp->b_bufsize;
3278         else
3279                 bytes = ((int)ip->ino_data.size + 15) & ~15;
3280
3281         record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3282                                     bytes, &error);
3283
3284         /*
3285          * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3286          * in hammer_vop_write().  We must flag the record so the proper
3287          * REDO_TERM_WRITE entry is generated during the flush.
3288          */
3289         if (record) {
3290                 if (bp->b_flags & B_VFSFLAG1) {
3291                         record->flags |= HAMMER_RECF_REDO;
3292                         bp->b_flags &= ~B_VFSFLAG1;
3293                 }
3294                 if (record->flags & HAMMER_RECF_DEDUPED) {
3295                         bp->b_resid = 0;
3296                         hammer_ip_replace_bulk(hmp, record);
3297                         biodone(ap->a_bio);
3298                 } else {
3299                         hammer_io_direct_write(hmp, bio, record);
3300                 }
3301                 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3302                         hammer_flush_inode(ip, 0);
3303         } else {
3304                 bp->b_bio2.bio_offset = NOOFFSET;
3305                 bp->b_error = error;
3306                 bp->b_flags |= B_ERROR;
3307                 biodone(ap->a_bio);
3308         }
3309         lwkt_reltoken(&hmp->fs_token);
3310         return(error);
3311 }
3312
3313 /*
3314  * dounlink - disconnect a directory entry
3315  *
3316  * XXX whiteout support not really in yet
3317  */
3318 static int
3319 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3320                 struct vnode *dvp, struct ucred *cred,
3321                 int flags, int isdir)
3322 {
3323         struct namecache *ncp;
3324         hammer_inode_t dip;
3325         hammer_inode_t ip;
3326         hammer_mount_t hmp;
3327         struct hammer_cursor cursor;
3328         int64_t namekey;
3329         uint32_t max_iterations;
3330         int nlen, error;
3331
3332         /*
3333          * Calculate the namekey and setup the key range for the scan.  This
3334          * works kinda like a chained hash table where the lower 32 bits
3335          * of the namekey synthesize the chain.
3336          *
3337          * The key range is inclusive of both key_beg and key_end.
3338          */
3339         dip = VTOI(dvp);
3340         ncp = nch->ncp;
3341         hmp = dip->hmp;
3342
3343         if (dip->flags & HAMMER_INODE_RO)
3344                 return (EROFS);
3345
3346         namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3347                                            &max_iterations);
3348 retry:
3349         hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3350         cursor.key_beg.localization = dip->obj_localization |
3351                                       hammer_dir_localization(dip);
3352         cursor.key_beg.obj_id = dip->obj_id;
3353         cursor.key_beg.key = namekey;
3354         cursor.key_beg.create_tid = 0;
3355         cursor.key_beg.delete_tid = 0;
3356         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3357         cursor.key_beg.obj_type = 0;
3358
3359         cursor.key_end = cursor.key_beg;
3360         cursor.key_end.key += max_iterations;
3361         cursor.asof = dip->obj_asof;
3362         cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3363
3364         /*
3365          * Scan all matching records (the chain), locate the one matching
3366          * the requested path component.  info->last_error contains the
3367          * error code on search termination and could be 0, ENOENT, or
3368          * something else.
3369          *
3370          * The hammer_ip_*() functions merge in-memory records with on-disk
3371          * records for the purposes of the search.
3372          */
3373         error = hammer_ip_first(&cursor);
3374
3375         while (error == 0) {
3376                 error = hammer_ip_resolve_data(&cursor);
3377                 if (error)
3378                         break;
3379                 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3380                 KKASSERT(nlen > 0);
3381                 if (ncp->nc_nlen == nlen &&
3382                     bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3383                         break;
3384                 }
3385                 error = hammer_ip_next(&cursor);
3386         }
3387
3388         /*
3389          * If all is ok we have to get the inode so we can adjust nlinks.
3390          * To avoid a deadlock with the flusher we must release the inode
3391          * lock on the directory when acquiring the inode for the entry.
3392          *
3393          * If the target is a directory, it must be empty.
3394          */
3395         if (error == 0) {
3396                 hammer_unlock(&cursor.ip->lock);
3397                 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3398                                       hmp->asof,
3399                                       cursor.data->entry.localization,
3400                                       0, &error);
3401                 hammer_lock_sh(&cursor.ip->lock);
3402                 if (error == ENOENT) {
3403                         hkprintf("WARNING: Removing dirent w/missing inode "
3404                                 "\"%s\"\n"
3405                                 "\tobj_id = %016llx\n",
3406                                 ncp->nc_name,
3407                                 (long long)cursor.data->entry.obj_id);
3408                         error = 0;
3409                 }
3410
3411                 /*
3412                  * If isdir >= 0 we validate that the entry is or is not a
3413                  * directory.  If isdir < 0 we don't care.
3414                  */
3415                 if (error == 0 && isdir >= 0 && ip) {
3416                         if (isdir &&
3417                             ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3418                                 error = ENOTDIR;
3419                         } else if (isdir == 0 &&
3420                             ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3421                                 error = EISDIR;
3422                         }
3423                 }
3424
3425                 /*
3426                  * If we are trying to remove a directory the directory must
3427                  * be empty.
3428                  *
3429                  * The check directory code can loop and deadlock/retry.  Our
3430                  * own cursor's node locks must be released to avoid a 3-way
3431                  * deadlock with the flusher if the check directory code
3432                  * blocks.
3433                  *
3434                  * If any changes whatsoever have been made to the cursor
3435                  * set EDEADLK and retry.
3436                  *
3437                  * WARNING: See warnings in hammer_unlock_cursor()
3438                  *          function.
3439                  */
3440                 if (error == 0 && ip && ip->ino_data.obj_type ==
3441                                         HAMMER_OBJTYPE_DIRECTORY) {
3442                         hammer_unlock_cursor(&cursor);
3443                         error = hammer_ip_check_directory_empty(trans, ip);
3444                         hammer_lock_cursor(&cursor);
3445                         if (cursor.flags & HAMMER_CURSOR_RETEST) {
3446                                 hkprintf("Warning: avoided deadlock "
3447                                         "on rmdir '%s'\n",
3448                                         ncp->nc_name);
3449                                 error = EDEADLK;
3450                         }
3451                 }
3452
3453                 /*
3454                  * Delete the directory entry.
3455                  *
3456                  * WARNING: hammer_ip_del_directory() may have to terminate
3457                  * the cursor to avoid a deadlock.  It is ok to call
3458                  * hammer_done_cursor() twice.
3459                  */
3460                 if (error == 0) {
3461                         error = hammer_ip_del_directory(trans, &cursor,
3462                                                         dip, ip);
3463                 }
3464                 hammer_done_cursor(&cursor);
3465                 if (error == 0) {
3466                         /*
3467                          * Tell the namecache that we are now unlinked.
3468                          */
3469                         cache_unlink(nch);
3470
3471                         /*
3472                          * NOTE: ip->vp, if non-NULL, cannot be directly
3473                          *       referenced without formally acquiring the
3474                          *       vp since the vp might have zero refs on it,
3475                          *       or in the middle of a reclaim, etc.
3476                          *
3477                          * NOTE: The cache_setunresolved() can rip the vp
3478                          *       out from under us since the vp may not have
3479                          *       any refs, in which case ip->vp will be NULL
3480                          *       from the outset.
3481                          */
3482                         while (ip && ip->vp) {
3483                                 struct vnode *vp;
3484
3485                                 error = hammer_get_vnode(ip, &vp);
3486                                 if (error == 0 && vp) {
3487                                         vn_unlock(vp);
3488                                         hammer_knote(ip->vp, NOTE_DELETE);
3489 #if 0
3490                                         /*
3491                                          * Don't do this, it can deadlock
3492                                          * on concurrent rm's of hardlinks.
3493                                          * Shouldn't be needed any more.
3494                                          */
3495                                         cache_inval_vp(ip->vp, CINV_DESTROY);
3496 #endif
3497                                         vrele(vp);
3498                                         break;
3499                                 }
3500                                 hdkprintf("ip/vp race1 avoided\n");
3501                         }
3502                 }
3503                 if (ip)
3504                         hammer_rel_inode(ip, 0);
3505         } else {
3506                 hammer_done_cursor(&cursor);
3507         }
3508         if (error == EDEADLK)
3509                 goto retry;
3510
3511         return (error);
3512 }
3513
3514 /************************************************************************
3515  *                          FIFO AND SPECFS OPS                         *
3516  ************************************************************************
3517  *
3518  */
3519 static int
3520 hammer_vop_fifoclose (struct vop_close_args *ap)
3521 {
3522         /* XXX update itimes */
3523         return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3524 }
3525
3526 static int
3527 hammer_vop_fiforead (struct vop_read_args *ap)
3528 {
3529         int error;
3530
3531         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3532         /* XXX update access time */
3533         return (error);
3534 }
3535
3536 static int
3537 hammer_vop_fifowrite (struct vop_write_args *ap)
3538 {
3539         int error;
3540
3541         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3542         /* XXX update access time */
3543         return (error);
3544 }
3545
3546 static
3547 int
3548 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3549 {
3550         int error;
3551
3552         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3553         if (error)
3554                 error = hammer_vop_kqfilter(ap);
3555         return(error);
3556 }
3557
3558 /************************************************************************
3559  *                          KQFILTER OPS                                *
3560  ************************************************************************
3561  *
3562  */
3563 static void filt_hammerdetach(struct knote *kn);
3564 static int filt_hammerread(struct knote *kn, long hint);
3565 static int filt_hammerwrite(struct knote *kn, long hint);
3566 static int filt_hammervnode(struct knote *kn, long hint);
3567
3568 static struct filterops hammerread_filtops =
3569         { FILTEROP_ISFD | FILTEROP_MPSAFE,
3570           NULL, filt_hammerdetach, filt_hammerread };
3571 static struct filterops hammerwrite_filtops =
3572         { FILTEROP_ISFD | FILTEROP_MPSAFE,
3573           NULL, filt_hammerdetach, filt_hammerwrite };
3574 static struct filterops hammervnode_filtops =
3575         { FILTEROP_ISFD | FILTEROP_MPSAFE,
3576           NULL, filt_hammerdetach, filt_hammervnode };
3577
3578 static
3579 int
3580 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3581 {
3582         struct vnode *vp = ap->a_vp;
3583         struct knote *kn = ap->a_kn;
3584
3585         switch (kn->kn_filter) {
3586         case EVFILT_READ:
3587                 kn->kn_fop = &hammerread_filtops;
3588                 break;
3589         case EVFILT_WRITE:
3590                 kn->kn_fop = &hammerwrite_filtops;
3591                 break;
3592         case EVFILT_VNODE:
3593                 kn->kn_fop = &hammervnode_filtops;
3594                 break;
3595         default:
3596                 return (EOPNOTSUPP);
3597         }
3598
3599         kn->kn_hook = (caddr_t)vp;
3600
3601         knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3602
3603         return(0);
3604 }
3605
3606 static void
3607 filt_hammerdetach(struct knote *kn)
3608 {
3609         struct vnode *vp = (void *)kn->kn_hook;
3610
3611         knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3612 }
3613
3614 static int
3615 filt_hammerread(struct knote *kn, long hint)
3616 {
3617         struct vnode *vp = (void *)kn->kn_hook;
3618         hammer_inode_t ip = VTOI(vp);
3619         hammer_mount_t hmp = ip->hmp;
3620         off_t off;
3621
3622         if (hint == NOTE_REVOKE) {
3623                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3624                 return(1);
3625         }
3626         lwkt_gettoken(&hmp->fs_token);  /* XXX use per-ip-token */
3627         off = ip->ino_data.size - kn->kn_fp->f_offset;
3628         kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
3629         lwkt_reltoken(&hmp->fs_token);
3630         if (kn->kn_sfflags & NOTE_OLDAPI)
3631                 return(1);
3632         return (kn->kn_data != 0);
3633 }
3634
3635 static int
3636 filt_hammerwrite(struct knote *kn, long hint)
3637 {
3638         if (hint == NOTE_REVOKE)
3639                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3640         kn->kn_data = 0;
3641         return (1);
3642 }
3643
3644 static int
3645 filt_hammervnode(struct knote *kn, long hint)
3646 {
3647         if (kn->kn_sfflags & hint)
3648                 kn->kn_fflags |= hint;
3649         if (hint == NOTE_REVOKE) {
3650                 kn->kn_flags |= (EV_EOF | EV_NODATA);
3651                 return (1);
3652         }
3653         return (kn->kn_fflags != 0);
3654 }
3655