hammer2 - update documentation, cleanup
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *       to the inode as its underlying chain may have changed.
41  */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
59
60 #include "hammer2.h"
61 #include "hammer2_lz4.h"
62
63 #include "zlib/hammer2_zlib.h"
64
65 #define ZFOFFSET        (-2LL)
66
67 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
68                                 int seqcount);
69 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
70                                 int ioflag, int seqcount);
71 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
72 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
73
74 struct objcache *cache_buffer_read;
75 struct objcache *cache_buffer_write;
76
77 /* 
78  * Callback used in read path in case that a block is compressed with LZ4.
79  */
80 static
81 void
82 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
83 {
84         struct buf *bp;
85         char *compressed_buffer;
86         int compressed_size;
87         int result;
88
89         bp = bio->bio_buf;
90
91 #if 0
92         if bio->bio_caller_info2.index &&
93               bio->bio_caller_info1.uvalue32 !=
94               crc32(bp->b_data, bp->b_bufsize) --- return error
95 #endif
96
97         KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
98         compressed_size = *(const int *)data;
99         KKASSERT(compressed_size <= bytes - sizeof(int));
100
101         compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
102         result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
103                                      compressed_buffer,
104                                      compressed_size,
105                                      bp->b_bufsize);
106         if (result < 0) {
107                 kprintf("READ PATH: Error during decompression."
108                         "bio %016jx/%d\n",
109                         (intmax_t)bio->bio_offset, bytes);
110                 /* make sure it isn't random garbage */
111                 bzero(compressed_buffer, bp->b_bufsize);
112         }
113         KKASSERT(result <= bp->b_bufsize);
114         bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
115         if (result < bp->b_bufsize)
116                 bzero(bp->b_data + result, bp->b_bufsize - result);
117         objcache_put(cache_buffer_read, compressed_buffer);
118         bp->b_resid = 0;
119         bp->b_flags |= B_AGE;
120 }
121
122 /*
123  * Callback used in read path in case that a block is compressed with ZLIB.
124  * It is almost identical to LZ4 callback, so in theory they can be unified,
125  * but we didn't want to make changes in bio structure for that.
126  */
127 static
128 void
129 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
130 {
131         struct buf *bp;
132         char *compressed_buffer;
133         z_stream strm_decompress;
134         int result;
135         int ret;
136
137         bp = bio->bio_buf;
138
139         KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
140         strm_decompress.avail_in = 0;
141         strm_decompress.next_in = Z_NULL;
142
143         ret = inflateInit(&strm_decompress);
144
145         if (ret != Z_OK)
146                 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
147
148         compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
149         strm_decompress.next_in = __DECONST(char *, data);
150
151         /* XXX supply proper size, subset of device bp */
152         strm_decompress.avail_in = bytes;
153         strm_decompress.next_out = compressed_buffer;
154         strm_decompress.avail_out = bp->b_bufsize;
155
156         ret = inflate(&strm_decompress, Z_FINISH);
157         if (ret != Z_STREAM_END) {
158                 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
159                 bzero(compressed_buffer, bp->b_bufsize);
160         }
161         bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
162         result = bp->b_bufsize - strm_decompress.avail_out;
163         if (result < bp->b_bufsize)
164                 bzero(bp->b_data + result, strm_decompress.avail_out);
165         objcache_put(cache_buffer_read, compressed_buffer);
166         ret = inflateEnd(&strm_decompress);
167
168         bp->b_resid = 0;
169         bp->b_flags |= B_AGE;
170 }
171
172 static __inline
173 void
174 hammer2_knote(struct vnode *vp, int flags)
175 {
176         if (flags)
177                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
178 }
179
180 /*
181  * Last reference to a vnode is going away but it is still cached.
182  */
183 static
184 int
185 hammer2_vop_inactive(struct vop_inactive_args *ap)
186 {
187         hammer2_inode_t *ip;
188         hammer2_cluster_t *cluster;
189         struct vnode *vp;
190
191         LOCKSTART;
192         vp = ap->a_vp;
193         ip = VTOI(vp);
194
195         /*
196          * Degenerate case
197          */
198         if (ip == NULL) {
199                 vrecycle(vp);
200                 LOCKSTOP;
201                 return (0);
202         }
203
204         /*
205          * Detect updates to the embedded data which may be synchronized by
206          * the strategy code.  Simply mark the inode modified so it gets
207          * picked up by our normal flush.
208          */
209         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_NEVER |
210                                          HAMMER2_RESOLVE_RDONLY);
211         KKASSERT(cluster);
212
213         /*
214          * Check for deleted inodes and recycle immediately.
215          *
216          * WARNING: nvtruncbuf() can only be safely called without the inode
217          *          lock held due to the way our write thread works.
218          */
219         if (hammer2_cluster_isunlinked(cluster)) {
220                 hammer2_key_t lbase;
221                 int nblksize;
222
223                 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
224                 hammer2_inode_unlock(ip, cluster);
225                 nvtruncbuf(vp, 0, nblksize, 0, 0);
226                 vrecycle(vp);
227         } else {
228                 hammer2_inode_unlock(ip, cluster);
229         }
230         LOCKSTOP;
231         return (0);
232 }
233
234 /*
235  * Reclaim a vnode so that it can be reused; after the inode is
236  * disassociated, the filesystem must manage it alone.
237  */
238 static
239 int
240 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
241 {
242         hammer2_cluster_t *cluster;
243         hammer2_inode_t *ip;
244         hammer2_pfs_t *pmp;
245         struct vnode *vp;
246
247         LOCKSTART;
248         vp = ap->a_vp;
249         ip = VTOI(vp);
250         if (ip == NULL) {
251                 LOCKSTOP;
252                 return(0);
253         }
254
255         /*
256          * Inode must be locked for reclaim.
257          */
258         pmp = ip->pmp;
259         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_NEVER |
260                                          HAMMER2_RESOLVE_RDONLY);
261
262         /*
263          * The final close of a deleted file or directory marks it for
264          * destruction.  The DELETED flag allows the flusher to shortcut
265          * any modified blocks still unflushed (that is, just ignore them).
266          *
267          * HAMMER2 usually does not try to optimize the freemap by returning
268          * deleted blocks to it as it does not usually know how many snapshots
269          * might be referencing portions of the file/dir.
270          */
271         vp->v_data = NULL;
272         ip->vp = NULL;
273
274         /*
275          * NOTE! We do not attempt to flush chains here, flushing is
276          *       really fragile and could also deadlock.
277          */
278         vclrisdirty(vp);
279
280         /*
281          * A reclaim can occur at any time so we cannot safely start a
282          * transaction to handle reclamation of unlinked files.  Instead,
283          * the ip is left with a reference and placed on a linked list and
284          * handled later on.
285          */
286         if (hammer2_cluster_isunlinked(cluster)) {
287                 hammer2_inode_unlink_t *ipul;
288
289                 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
290                 ipul->ip = ip;
291
292                 hammer2_spin_ex(&pmp->list_spin);
293                 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry);
294                 hammer2_spin_unex(&pmp->list_spin);
295                 hammer2_inode_unlock(ip, cluster);      /* unlock */
296                 /* retain ref from vp for ipul */
297         } else {
298                 hammer2_inode_unlock(ip, cluster);      /* unlock */
299                 hammer2_inode_drop(ip);                 /* vp ref */
300         }
301         /* cluster no longer referenced */
302         /* cluster = NULL; not needed */
303
304         /*
305          * XXX handle background sync when ip dirty, kernel will no longer
306          * notify us regarding this inode because there is no longer a
307          * vnode attached to it.
308          */
309
310         LOCKSTOP;
311         return (0);
312 }
313
314 static
315 int
316 hammer2_vop_fsync(struct vop_fsync_args *ap)
317 {
318         hammer2_inode_t *ip;
319         hammer2_trans_t trans;
320         hammer2_cluster_t *cluster;
321         struct vnode *vp;
322
323         LOCKSTART;
324         vp = ap->a_vp;
325         ip = VTOI(vp);
326
327 #if 0
328         /* XXX can't do this yet */
329         hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_ISFLUSH);
330         vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
331 #endif
332         hammer2_trans_init(&trans, ip->pmp, 0);
333         vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
334
335         /*
336          * Calling chain_flush here creates a lot of duplicative
337          * COW operations due to non-optimal vnode ordering.
338          *
339          * Only do it for an actual fsync() syscall.  The other forms
340          * which call this function will eventually call chain_flush
341          * on the volume root as a catch-all, which is far more optimal.
342          */
343         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
344         atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
345         vclrisdirty(vp);
346         if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MTIME))
347                 hammer2_inode_fsync(&trans, ip, cluster);
348
349 #if 0
350         /*
351          * XXX creates discontinuity w/modify_tid
352          */
353         if (ap->a_flags & VOP_FSYNC_SYSCALL) {
354                 hammer2_flush(&trans, cluster);
355         }
356 #endif
357         hammer2_inode_unlock(ip, cluster);
358         hammer2_trans_done(&trans);
359
360         LOCKSTOP;
361         return (0);
362 }
363
364 static
365 int
366 hammer2_vop_access(struct vop_access_args *ap)
367 {
368         hammer2_inode_t *ip = VTOI(ap->a_vp);
369         const hammer2_inode_data_t *ripdata;
370         hammer2_cluster_t *cluster;
371         uid_t uid;
372         gid_t gid;
373         int error;
374
375         LOCKSTART;
376         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
377                                          HAMMER2_RESOLVE_SHARED);
378         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
379         uid = hammer2_to_unix_xid(&ripdata->uid);
380         gid = hammer2_to_unix_xid(&ripdata->gid);
381         error = vop_helper_access(ap, uid, gid, ripdata->mode, ripdata->uflags);
382         hammer2_inode_unlock(ip, cluster);
383
384         LOCKSTOP;
385         return (error);
386 }
387
388 static
389 int
390 hammer2_vop_getattr(struct vop_getattr_args *ap)
391 {
392         const hammer2_inode_data_t *ripdata;
393         hammer2_cluster_t *cluster;
394         hammer2_pfs_t *pmp;
395         hammer2_inode_t *ip;
396         struct vnode *vp;
397         struct vattr *vap;
398
399         LOCKSTART;
400         vp = ap->a_vp;
401         vap = ap->a_vap;
402
403         ip = VTOI(vp);
404         pmp = ip->pmp;
405
406         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
407                                          HAMMER2_RESOLVE_SHARED);
408         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
409         KKASSERT(hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
410
411         vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
412         vap->va_fileid = ripdata->inum;
413         vap->va_mode = ripdata->mode;
414         vap->va_nlink = ripdata->nlinks;
415         vap->va_uid = hammer2_to_unix_xid(&ripdata->uid);
416         vap->va_gid = hammer2_to_unix_xid(&ripdata->gid);
417         vap->va_rmajor = 0;
418         vap->va_rminor = 0;
419         vap->va_size = ip->size;        /* protected by shared lock */
420         vap->va_blocksize = HAMMER2_PBUFSIZE;
421         vap->va_flags = ripdata->uflags;
422         hammer2_time_to_timespec(ripdata->ctime, &vap->va_ctime);
423         hammer2_time_to_timespec(ripdata->mtime, &vap->va_mtime);
424         hammer2_time_to_timespec(ripdata->mtime, &vap->va_atime);
425         vap->va_gen = 1;
426         vap->va_bytes = vap->va_size;   /* XXX */
427         vap->va_type = hammer2_get_vtype(ripdata);
428         vap->va_filerev = 0;
429         vap->va_uid_uuid = ripdata->uid;
430         vap->va_gid_uuid = ripdata->gid;
431         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
432                           VA_FSID_UUID_VALID;
433
434         hammer2_inode_unlock(ip, cluster);
435
436         LOCKSTOP;
437         return (0);
438 }
439
440 static
441 int
442 hammer2_vop_setattr(struct vop_setattr_args *ap)
443 {
444         const hammer2_inode_data_t *ripdata;
445         hammer2_inode_data_t *wipdata;
446         hammer2_inode_t *ip;
447         hammer2_cluster_t *cluster;
448         hammer2_trans_t trans;
449         struct vnode *vp;
450         struct vattr *vap;
451         int error;
452         int kflags = 0;
453         int domtime = 0;
454         int dosync = 0;
455         uint64_t ctime;
456
457         LOCKSTART;
458         vp = ap->a_vp;
459         vap = ap->a_vap;
460         hammer2_update_time(&ctime);
461
462         ip = VTOI(vp);
463
464         if (ip->pmp->ronly) {
465                 LOCKSTOP;
466                 return(EROFS);
467         }
468
469         hammer2_pfs_memory_wait(ip->pmp);
470         hammer2_trans_init(&trans, ip->pmp, 0);
471         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
472         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
473         error = 0;
474
475         if (vap->va_flags != VNOVAL) {
476                 u_int32_t flags;
477
478                 flags = ripdata->uflags;
479                 error = vop_helper_setattr_flags(&flags, vap->va_flags,
480                                          hammer2_to_unix_xid(&ripdata->uid),
481                                          ap->a_cred);
482                 if (error == 0) {
483                         if (ripdata->uflags != flags) {
484                                 wipdata = hammer2_cluster_modify_ip(&trans, ip,
485                                                                     cluster, 0);
486                                 wipdata->uflags = flags;
487                                 wipdata->ctime = ctime;
488                                 kflags |= NOTE_ATTRIB;
489                                 dosync = 1;
490                                 ripdata = wipdata;
491                         }
492                         if (ripdata->uflags & (IMMUTABLE | APPEND)) {
493                                 error = 0;
494                                 goto done;
495                         }
496                 }
497                 goto done;
498         }
499         if (ripdata->uflags & (IMMUTABLE | APPEND)) {
500                 error = EPERM;
501                 goto done;
502         }
503         if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
504                 mode_t cur_mode = ripdata->mode;
505                 uid_t cur_uid = hammer2_to_unix_xid(&ripdata->uid);
506                 gid_t cur_gid = hammer2_to_unix_xid(&ripdata->gid);
507                 uuid_t uuid_uid;
508                 uuid_t uuid_gid;
509
510                 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
511                                          ap->a_cred,
512                                          &cur_uid, &cur_gid, &cur_mode);
513                 if (error == 0) {
514                         hammer2_guid_to_uuid(&uuid_uid, cur_uid);
515                         hammer2_guid_to_uuid(&uuid_gid, cur_gid);
516                         if (bcmp(&uuid_uid, &ripdata->uid, sizeof(uuid_uid)) ||
517                             bcmp(&uuid_gid, &ripdata->gid, sizeof(uuid_gid)) ||
518                             ripdata->mode != cur_mode
519                         ) {
520                                 wipdata = hammer2_cluster_modify_ip(&trans, ip,
521                                                                     cluster, 0);
522                                 wipdata->uid = uuid_uid;
523                                 wipdata->gid = uuid_gid;
524                                 wipdata->mode = cur_mode;
525                                 wipdata->ctime = ctime;
526                                 dosync = 1;
527                                 ripdata = wipdata;
528                         }
529                         kflags |= NOTE_ATTRIB;
530                 }
531         }
532
533         /*
534          * Resize the file
535          */
536         if (vap->va_size != VNOVAL && ip->size != vap->va_size) {
537                 switch(vp->v_type) {
538                 case VREG:
539                         if (vap->va_size == ip->size)
540                                 break;
541                         hammer2_inode_unlock(ip, cluster);
542                         if (vap->va_size < ip->size) {
543                                 hammer2_truncate_file(ip, vap->va_size);
544                         } else {
545                                 hammer2_extend_file(ip, vap->va_size);
546                         }
547                         cluster = hammer2_inode_lock(ip,
548                                                      HAMMER2_RESOLVE_ALWAYS);
549                         /* RELOAD */
550                         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
551                         domtime = 1;
552                         break;
553                 default:
554                         error = EINVAL;
555                         goto done;
556                 }
557         }
558 #if 0
559         /* atime not supported */
560         if (vap->va_atime.tv_sec != VNOVAL) {
561                 wipdata = hammer2_cluster_modify_ip(&trans, ip, cluster, 0);
562                 wipdata->atime = hammer2_timespec_to_time(&vap->va_atime);
563                 kflags |= NOTE_ATTRIB;
564                 dosync = 1;
565                 ripdata = wipdata;
566         }
567 #endif
568         if (vap->va_mtime.tv_sec != VNOVAL) {
569                 wipdata = hammer2_cluster_modify_ip(&trans, ip, cluster, 0);
570                 wipdata->mtime = hammer2_timespec_to_time(&vap->va_mtime);
571                 kflags |= NOTE_ATTRIB;
572                 domtime = 0;
573                 dosync = 1;
574                 ripdata = wipdata;
575         }
576         if (vap->va_mode != (mode_t)VNOVAL) {
577                 mode_t cur_mode = ripdata->mode;
578                 uid_t cur_uid = hammer2_to_unix_xid(&ripdata->uid);
579                 gid_t cur_gid = hammer2_to_unix_xid(&ripdata->gid);
580
581                 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
582                                          cur_uid, cur_gid, &cur_mode);
583                 if (error == 0 && ripdata->mode != cur_mode) {
584                         wipdata = hammer2_cluster_modify_ip(&trans, ip,
585                                                             cluster, 0);
586                         wipdata->mode = cur_mode;
587                         wipdata->ctime = ctime;
588                         kflags |= NOTE_ATTRIB;
589                         dosync = 1;
590                         ripdata = wipdata;
591                 }
592         }
593
594         /*
595          * If a truncation occurred we must call inode_fsync() now in order
596          * to trim the related data chains, otherwise a later expansion can
597          * cause havoc.
598          */
599         if (dosync) {
600                 hammer2_cluster_modsync(cluster);
601                 dosync = 0;
602         }
603         hammer2_inode_fsync(&trans, ip, cluster);
604
605         /*
606          * Cleanup.  If domtime is set an additional inode modification
607          * must be flagged.  All other modifications will have already
608          * set INODE_MODIFIED and called vsetisdirty().
609          */
610 done:
611         if (domtime) {
612                 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED |
613                                            HAMMER2_INODE_MTIME);
614                 vsetisdirty(ip->vp);
615         }
616         if (dosync)
617                 hammer2_cluster_modsync(cluster);
618         hammer2_inode_unlock(ip, cluster);
619         hammer2_trans_done(&trans);
620         hammer2_knote(ip->vp, kflags);
621
622         LOCKSTOP;
623         return (error);
624 }
625
626 static
627 int
628 hammer2_vop_readdir(struct vop_readdir_args *ap)
629 {
630         const hammer2_inode_data_t *ripdata;
631         hammer2_inode_t *ip;
632         hammer2_inode_t *xip;
633         hammer2_cluster_t *cparent;
634         hammer2_cluster_t *cluster;
635         hammer2_cluster_t *xcluster;
636         hammer2_blockref_t bref;
637         hammer2_tid_t inum;
638         hammer2_key_t key_next;
639         hammer2_key_t lkey;
640         struct uio *uio;
641         off_t *cookies;
642         off_t saveoff;
643         int cookie_index;
644         int ncookies;
645         int error;
646         int dtype;
647         int r;
648
649         LOCKSTART;
650         ip = VTOI(ap->a_vp);
651         uio = ap->a_uio;
652         saveoff = uio->uio_offset;
653
654         /*
655          * Setup cookies directory entry cookies if requested
656          */
657         if (ap->a_ncookies) {
658                 ncookies = uio->uio_resid / 16 + 1;
659                 if (ncookies > 1024)
660                         ncookies = 1024;
661                 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
662         } else {
663                 ncookies = -1;
664                 cookies = NULL;
665         }
666         cookie_index = 0;
667
668         cparent = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
669                                          HAMMER2_RESOLVE_SHARED);
670
671         ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
672
673         /*
674          * Handle artificial entries.  To ensure that only positive 64 bit
675          * quantities are returned to userland we always strip off bit 63.
676          * The hash code is designed such that codes 0x0000-0x7FFF are not
677          * used, allowing us to use these codes for articial entries.
678          *
679          * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
680          * allow '..' to cross the mount point into (e.g.) the super-root.
681          */
682         error = 0;
683         cluster = (void *)(intptr_t)-1; /* non-NULL for early goto done case */
684
685         if (saveoff == 0) {
686                 inum = ripdata->inum & HAMMER2_DIRHASH_USERMSK;
687                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
688                 if (r)
689                         goto done;
690                 if (cookies)
691                         cookies[cookie_index] = saveoff;
692                 ++saveoff;
693                 ++cookie_index;
694                 if (cookie_index == ncookies)
695                         goto done;
696         }
697
698         if (saveoff == 1) {
699                 /*
700                  * Be careful with lockorder when accessing ".."
701                  *
702                  * (ip is the current dir. xip is the parent dir).
703                  */
704                 inum = ripdata->inum & HAMMER2_DIRHASH_USERMSK;
705                 while (ip->pip != NULL && ip != ip->pmp->iroot) {
706                         xip = ip->pip;
707                         hammer2_inode_ref(xip);
708                         hammer2_inode_unlock(ip, cparent);
709                         xcluster = hammer2_inode_lock(xip,
710                                                       HAMMER2_RESOLVE_ALWAYS |
711                                                       HAMMER2_RESOLVE_SHARED);
712
713                         cparent = hammer2_inode_lock(ip,
714                                                       HAMMER2_RESOLVE_ALWAYS |
715                                                       HAMMER2_RESOLVE_SHARED);
716                         hammer2_inode_drop(xip);
717                         ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
718                         if (xip == ip->pip) {
719                                 inum = hammer2_cluster_rdata(xcluster)->
720                                         ipdata.inum & HAMMER2_DIRHASH_USERMSK;
721                                 hammer2_inode_unlock(xip, xcluster);
722                                 break;
723                         }
724                         hammer2_inode_unlock(xip, xcluster);
725                 }
726                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
727                 if (r)
728                         goto done;
729                 if (cookies)
730                         cookies[cookie_index] = saveoff;
731                 ++saveoff;
732                 ++cookie_index;
733                 if (cookie_index == ncookies)
734                         goto done;
735         }
736
737         lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
738         if (hammer2_debug & 0x0020)
739                 kprintf("readdir: lkey %016jx\n", lkey);
740
741         /*
742          * parent is the inode cluster, already locked for us.  Don't
743          * double lock shared locks as this will screw up upgrades.
744          */
745         if (error) {
746                 goto done;
747         }
748         cluster = hammer2_cluster_lookup(cparent, &key_next, lkey, lkey,
749                                      HAMMER2_LOOKUP_SHARED);
750         if (cluster == NULL) {
751                 cluster = hammer2_cluster_lookup(cparent, &key_next,
752                                              lkey, (hammer2_key_t)-1,
753                                              HAMMER2_LOOKUP_SHARED);
754         }
755         if (cluster)
756                 hammer2_cluster_bref(cluster, &bref);
757         while (cluster) {
758                 if (hammer2_debug & 0x0020)
759                         kprintf("readdir: p=%p chain=%p %016jx (next %016jx)\n",
760                                 cparent->focus, cluster->focus,
761                                 bref.key, key_next);
762
763                 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
764                         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
765                         dtype = hammer2_get_dtype(ripdata);
766                         saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
767                         r = vop_write_dirent(&error, uio,
768                                              ripdata->inum &
769                                               HAMMER2_DIRHASH_USERMSK,
770                                              dtype,
771                                              ripdata->name_len,
772                                              ripdata->filename);
773                         if (r)
774                                 break;
775                         if (cookies)
776                                 cookies[cookie_index] = saveoff;
777                         ++cookie_index;
778                 } else {
779                         /* XXX chain error */
780                         kprintf("bad chain type readdir %d\n", bref.type);
781                 }
782
783                 /*
784                  * Keys may not be returned in order so once we have a
785                  * placemarker (cluster) the scan must allow the full range
786                  * or some entries will be missed.
787                  */
788                 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
789                                                key_next, (hammer2_key_t)-1,
790                                                HAMMER2_LOOKUP_SHARED);
791                 if (cluster) {
792                         hammer2_cluster_bref(cluster, &bref);
793                         saveoff = (bref.key & HAMMER2_DIRHASH_USERMSK) + 1;
794                 } else {
795                         saveoff = (hammer2_key_t)-1;
796                 }
797                 if (cookie_index == ncookies)
798                         break;
799         }
800         if (cluster)
801                 hammer2_cluster_unlock(cluster);
802 done:
803         hammer2_inode_unlock(ip, cparent);
804         if (ap->a_eofflag)
805                 *ap->a_eofflag = (cluster == NULL);
806         if (hammer2_debug & 0x0020)
807                 kprintf("readdir: done at %016jx\n", saveoff);
808         uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
809         if (error && cookie_index == 0) {
810                 if (cookies) {
811                         kfree(cookies, M_TEMP);
812                         *ap->a_ncookies = 0;
813                         *ap->a_cookies = NULL;
814                 }
815         } else {
816                 if (cookies) {
817                         *ap->a_ncookies = cookie_index;
818                         *ap->a_cookies = cookies;
819                 }
820         }
821         LOCKSTOP;
822         return (error);
823 }
824
825 /*
826  * hammer2_vop_readlink { vp, uio, cred }
827  */
828 static
829 int
830 hammer2_vop_readlink(struct vop_readlink_args *ap)
831 {
832         struct vnode *vp;
833         hammer2_inode_t *ip;
834         int error;
835
836         vp = ap->a_vp;
837         if (vp->v_type != VLNK)
838                 return (EINVAL);
839         ip = VTOI(vp);
840
841         error = hammer2_read_file(ip, ap->a_uio, 0);
842         return (error);
843 }
844
845 static
846 int
847 hammer2_vop_read(struct vop_read_args *ap)
848 {
849         struct vnode *vp;
850         hammer2_inode_t *ip;
851         struct uio *uio;
852         int error;
853         int seqcount;
854         int bigread;
855
856         /*
857          * Read operations supported on this vnode?
858          */
859         vp = ap->a_vp;
860         if (vp->v_type != VREG)
861                 return (EINVAL);
862
863         /*
864          * Misc
865          */
866         ip = VTOI(vp);
867         uio = ap->a_uio;
868         error = 0;
869
870         seqcount = ap->a_ioflag >> 16;
871         bigread = (uio->uio_resid > 100 * 1024 * 1024);
872
873         error = hammer2_read_file(ip, uio, seqcount);
874         return (error);
875 }
876
877 static
878 int
879 hammer2_vop_write(struct vop_write_args *ap)
880 {
881         hammer2_inode_t *ip;
882         hammer2_trans_t trans;
883         thread_t td;
884         struct vnode *vp;
885         struct uio *uio;
886         int error;
887         int seqcount;
888         int bigwrite;
889
890         /*
891          * Read operations supported on this vnode?
892          */
893         vp = ap->a_vp;
894         if (vp->v_type != VREG)
895                 return (EINVAL);
896
897         /*
898          * Misc
899          */
900         ip = VTOI(vp);
901         uio = ap->a_uio;
902         error = 0;
903         if (ip->pmp->ronly) {
904                 return (EROFS);
905         }
906
907         seqcount = ap->a_ioflag >> 16;
908         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
909
910         /*
911          * Check resource limit
912          */
913         if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
914             uio->uio_offset + uio->uio_resid >
915              td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
916                 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
917                 return (EFBIG);
918         }
919
920         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
921
922         /*
923          * The transaction interlocks against flushes initiations
924          * (note: but will run concurrently with the actual flush).
925          */
926         hammer2_trans_init(&trans, ip->pmp, 0);
927         error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
928         hammer2_trans_done(&trans);
929
930         return (error);
931 }
932
933 /*
934  * Perform read operations on a file or symlink given an UNLOCKED
935  * inode and uio.
936  *
937  * The passed ip is not locked.
938  */
939 static
940 int
941 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
942 {
943         hammer2_off_t size;
944         struct buf *bp;
945         int error;
946
947         error = 0;
948
949         /*
950          * UIO read loop.
951          *
952          * WARNING! Assumes that the kernel interlocks size changes at the
953          *          vnode level.
954          */
955         hammer2_mtx_sh(&ip->lock);
956         size = ip->size;
957         hammer2_mtx_unlock(&ip->lock);
958
959         while (uio->uio_resid > 0 && uio->uio_offset < size) {
960                 hammer2_key_t lbase;
961                 hammer2_key_t leof;
962                 int lblksize;
963                 int loff;
964                 int n;
965
966                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
967                                                 &lbase, &leof);
968
969                 error = cluster_read(ip->vp, leof, lbase, lblksize,
970                                      uio->uio_resid, seqcount * BKVASIZE,
971                                      &bp);
972
973                 if (error)
974                         break;
975                 loff = (int)(uio->uio_offset - lbase);
976                 n = lblksize - loff;
977                 if (n > uio->uio_resid)
978                         n = uio->uio_resid;
979                 if (n > size - uio->uio_offset)
980                         n = (int)(size - uio->uio_offset);
981                 bp->b_flags |= B_AGE;
982                 uiomove((char *)bp->b_data + loff, n, uio);
983                 bqrelse(bp);
984         }
985         return (error);
986 }
987
988 /*
989  * Write to the file represented by the inode via the logical buffer cache.
990  * The inode may represent a regular file or a symlink.
991  *
992  * The inode must not be locked.
993  */
994 static
995 int
996 hammer2_write_file(hammer2_inode_t *ip,
997                    struct uio *uio, int ioflag, int seqcount)
998 {
999         hammer2_key_t old_eof;
1000         hammer2_key_t new_eof;
1001         struct buf *bp;
1002         int kflags;
1003         int error;
1004         int modified;
1005
1006         /*
1007          * Setup if append
1008          *
1009          * WARNING! Assumes that the kernel interlocks size changes at the
1010          *          vnode level.
1011          */
1012         hammer2_mtx_ex(&ip->lock);
1013         if (ioflag & IO_APPEND)
1014                 uio->uio_offset = ip->size;
1015         old_eof = ip->size;
1016         hammer2_mtx_unlock(&ip->lock);
1017
1018         /*
1019          * Extend the file if necessary.  If the write fails at some point
1020          * we will truncate it back down to cover as much as we were able
1021          * to write.
1022          *
1023          * Doing this now makes it easier to calculate buffer sizes in
1024          * the loop.
1025          */
1026         kflags = 0;
1027         error = 0;
1028         modified = 0;
1029
1030         if (uio->uio_offset + uio->uio_resid > old_eof) {
1031                 new_eof = uio->uio_offset + uio->uio_resid;
1032                 modified = 1;
1033                 hammer2_extend_file(ip, new_eof);
1034                 kflags |= NOTE_EXTEND;
1035         } else {
1036                 new_eof = old_eof;
1037         }
1038         
1039         /*
1040          * UIO write loop
1041          */
1042         while (uio->uio_resid > 0) {
1043                 hammer2_key_t lbase;
1044                 int trivial;
1045                 int endofblk;
1046                 int lblksize;
1047                 int loff;
1048                 int n;
1049
1050                 /*
1051                  * Don't allow the buffer build to blow out the buffer
1052                  * cache.
1053                  */
1054                 if ((ioflag & IO_RECURSE) == 0)
1055                         bwillwrite(HAMMER2_PBUFSIZE);
1056
1057                 /*
1058                  * This nominally tells us how much we can cluster and
1059                  * what the logical buffer size needs to be.  Currently
1060                  * we don't try to cluster the write and just handle one
1061                  * block at a time.
1062                  */
1063                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1064                                                 &lbase, NULL);
1065                 loff = (int)(uio->uio_offset - lbase);
1066                 
1067                 KKASSERT(lblksize <= 65536);
1068
1069                 /*
1070                  * Calculate bytes to copy this transfer and whether the
1071                  * copy completely covers the buffer or not.
1072                  */
1073                 trivial = 0;
1074                 n = lblksize - loff;
1075                 if (n > uio->uio_resid) {
1076                         n = uio->uio_resid;
1077                         if (loff == lbase && uio->uio_offset + n == new_eof)
1078                                 trivial = 1;
1079                         endofblk = 0;
1080                 } else {
1081                         if (loff == 0)
1082                                 trivial = 1;
1083                         endofblk = 1;
1084                 }
1085
1086                 /*
1087                  * Get the buffer
1088                  */
1089                 if (uio->uio_segflg == UIO_NOCOPY) {
1090                         /*
1091                          * Issuing a write with the same data backing the
1092                          * buffer.  Instantiate the buffer to collect the
1093                          * backing vm pages, then read-in any missing bits.
1094                          *
1095                          * This case is used by vop_stdputpages().
1096                          */
1097                         bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1098                         if ((bp->b_flags & B_CACHE) == 0) {
1099                                 bqrelse(bp);
1100                                 error = bread(ip->vp, lbase, lblksize, &bp);
1101                         }
1102                 } else if (trivial) {
1103                         /*
1104                          * Even though we are entirely overwriting the buffer
1105                          * we may still have to zero it out to avoid a
1106                          * mmap/write visibility issue.
1107                          */
1108                         bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1109                         if ((bp->b_flags & B_CACHE) == 0)
1110                                 vfs_bio_clrbuf(bp);
1111                 } else {
1112                         /*
1113                          * Partial overwrite, read in any missing bits then
1114                          * replace the portion being written.
1115                          *
1116                          * (The strategy code will detect zero-fill physical
1117                          * blocks for this case).
1118                          */
1119                         error = bread(ip->vp, lbase, lblksize, &bp);
1120                         if (error == 0)
1121                                 bheavy(bp);
1122                 }
1123
1124                 if (error) {
1125                         brelse(bp);
1126                         break;
1127                 }
1128
1129                 /*
1130                  * Ok, copy the data in
1131                  */
1132                 error = uiomove(bp->b_data + loff, n, uio);
1133                 kflags |= NOTE_WRITE;
1134                 modified = 1;
1135                 if (error) {
1136                         brelse(bp);
1137                         break;
1138                 }
1139
1140                 /*
1141                  * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1142                  *          with IO_SYNC or IO_ASYNC set.  These writes
1143                  *          must be handled as the pageout daemon expects.
1144                  */
1145                 if (ioflag & IO_SYNC) {
1146                         bwrite(bp);
1147                 } else if ((ioflag & IO_DIRECT) && endofblk) {
1148                         bawrite(bp);
1149                 } else if (ioflag & IO_ASYNC) {
1150                         bawrite(bp);
1151                 } else {
1152                         bdwrite(bp);
1153                 }
1154         }
1155
1156         /*
1157          * Cleanup.  If we extended the file EOF but failed to write through
1158          * the entire write is a failure and we have to back-up.
1159          */
1160         if (error && new_eof != old_eof) {
1161                 hammer2_truncate_file(ip, old_eof);
1162         } else if (modified) {
1163                 hammer2_mtx_ex(&ip->lock);
1164                 hammer2_update_time(&ip->mtime);
1165                 atomic_set_int(&ip->flags, HAMMER2_INODE_MTIME);
1166                 hammer2_mtx_unlock(&ip->lock);
1167         }
1168         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1169         hammer2_knote(ip->vp, kflags);
1170         vsetisdirty(ip->vp);
1171
1172         return error;
1173 }
1174
1175 /*
1176  * Truncate the size of a file.  The inode must not be locked.
1177  *
1178  * NOTE:    Caller handles setting HAMMER2_INODE_MODIFIED
1179  *
1180  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1181  *          held due to the way our write thread works.
1182  *
1183  * WARNING! Assumes that the kernel interlocks size changes at the
1184  *          vnode level.
1185  */
1186 static
1187 void
1188 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1189 {
1190         hammer2_key_t lbase;
1191         int nblksize;
1192
1193         LOCKSTART;
1194         if (ip->vp) {
1195                 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1196                 nvtruncbuf(ip->vp, nsize,
1197                            nblksize, (int)nsize & (nblksize - 1),
1198                            0);
1199         }
1200         hammer2_mtx_ex(&ip->lock);
1201         ip->size = nsize;
1202         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1203         hammer2_mtx_unlock(&ip->lock);
1204         LOCKSTOP;
1205 }
1206
1207 /*
1208  * Extend the size of a file.  The inode must not be locked.
1209  *
1210  * WARNING! Assumes that the kernel interlocks size changes at the
1211  *          vnode level.
1212  *
1213  * NOTE: Caller handles setting HAMMER2_INODE_MODIFIED
1214  */
1215 static
1216 void
1217 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1218 {
1219         hammer2_key_t lbase;
1220         hammer2_key_t osize;
1221         int oblksize;
1222         int nblksize;
1223
1224         LOCKSTART;
1225         hammer2_mtx_ex(&ip->lock);
1226         osize = ip->size;
1227         ip->size = nsize;
1228         hammer2_mtx_unlock(&ip->lock);
1229
1230         if (ip->vp) {
1231                 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1232                 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1233                 nvextendbuf(ip->vp,
1234                             osize, nsize,
1235                             oblksize, nblksize,
1236                             -1, -1, 0);
1237         }
1238         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1239         LOCKSTOP;
1240 }
1241
1242 static
1243 int
1244 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1245 {
1246         hammer2_inode_t *ip;
1247         hammer2_inode_t *dip;
1248         hammer2_cluster_t *cparent;
1249         hammer2_cluster_t *cluster;
1250         const hammer2_inode_data_t *ripdata;
1251         hammer2_key_t key_next;
1252         hammer2_key_t lhc;
1253         struct namecache *ncp;
1254         const uint8_t *name;
1255         size_t name_len;
1256         int error = 0;
1257         struct vnode *vp;
1258
1259         LOCKSTART;
1260         dip = VTOI(ap->a_dvp);
1261         ncp = ap->a_nch->ncp;
1262         name = ncp->nc_name;
1263         name_len = ncp->nc_nlen;
1264         lhc = hammer2_dirhash(name, name_len);
1265
1266         /*
1267          * Note: In DragonFly the kernel handles '.' and '..'.
1268          */
1269         cparent = hammer2_inode_lock(dip, HAMMER2_RESOLVE_ALWAYS |
1270                                           HAMMER2_RESOLVE_SHARED);
1271
1272         cluster = hammer2_cluster_lookup(cparent, &key_next,
1273                                          lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1274                                          HAMMER2_LOOKUP_SHARED);
1275         while (cluster) {
1276                 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE) {
1277                         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1278                         if (ripdata->name_len == name_len &&
1279                             bcmp(ripdata->filename, name, name_len) == 0) {
1280                                 break;
1281                         }
1282                 }
1283                 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
1284                                                key_next,
1285                                                lhc + HAMMER2_DIRHASH_LOMASK,
1286                                                HAMMER2_LOOKUP_SHARED);
1287         }
1288         hammer2_inode_unlock(dip, cparent);
1289
1290         /*
1291          * Resolve hardlink entries before acquiring the inode.
1292          */
1293         if (cluster) {
1294                 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1295                 if (ripdata->type == HAMMER2_OBJTYPE_HARDLINK) {
1296                         hammer2_tid_t inum = ripdata->inum;
1297                         error = hammer2_hardlink_find(dip, NULL, &cluster);
1298                         if (error) {
1299                                 kprintf("hammer2: unable to find hardlink "
1300                                         "0x%016jx\n", inum);
1301                                 LOCKSTOP;
1302
1303                                 return error;
1304                         }
1305                 }
1306         }
1307
1308         /*
1309          * nresolve needs to resolve hardlinks, the original cluster is not
1310          * sufficient.
1311          */
1312         if (cluster) {
1313                 ip = hammer2_inode_get(dip->pmp, dip, cluster);
1314                 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1315                 if (ripdata->type == HAMMER2_OBJTYPE_HARDLINK) {
1316                         kprintf("nresolve: fixup hardlink\n");
1317                         hammer2_inode_ref(ip);
1318                         hammer2_inode_unlock(ip, NULL);
1319                         hammer2_cluster_unlock(cluster);
1320                         cluster = hammer2_inode_lock(ip,
1321                                                      HAMMER2_RESOLVE_ALWAYS);
1322                         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1323                         hammer2_inode_drop(ip);
1324                         kprintf("nresolve: fixup to type %02x\n",
1325                                 ripdata->type);
1326                 }
1327         } else {
1328                 ip = NULL;
1329         }
1330
1331 #if 0
1332         /*
1333          * Deconsolidate any hardlink whos nlinks == 1.  Ignore errors.
1334          * If an error occurs chain and ip are left alone.
1335          *
1336          * XXX upgrade shared lock?
1337          */
1338         if (ochain && chain &&
1339             chain->data->ipdata.nlinks == 1 && !dip->pmp->ronly) {
1340                 kprintf("hammer2: need to unconsolidate hardlink for %s\n",
1341                         chain->data->ipdata.filename);
1342                 /* XXX retain shared lock on dip? (currently not held) */
1343                 hammer2_trans_init(&trans, dip->pmp, 0);
1344                 hammer2_hardlink_deconsolidate(&trans, dip, &chain, &ochain);
1345                 hammer2_trans_done(&trans);
1346         }
1347 #endif
1348
1349         /*
1350          * Acquire the related vnode
1351          *
1352          * NOTE: For error processing, only ENOENT resolves the namecache
1353          *       entry to NULL, otherwise we just return the error and
1354          *       leave the namecache unresolved.
1355          *
1356          * NOTE: multiple hammer2_inode structures can be aliased to the
1357          *       same chain element, for example for hardlinks.  This
1358          *       use case does not 'reattach' inode associations that
1359          *       might already exist, but always allocates a new one.
1360          *
1361          * WARNING: inode structure is locked exclusively via inode_get
1362          *          but chain was locked shared.  inode_unlock()
1363          *          will handle it properly.
1364          */
1365         if (cluster) {
1366                 vp = hammer2_igetv(ip, cluster, &error);
1367                 if (error == 0) {
1368                         vn_unlock(vp);
1369                         cache_setvp(ap->a_nch, vp);
1370                 } else if (error == ENOENT) {
1371                         cache_setvp(ap->a_nch, NULL);
1372                 }
1373                 hammer2_inode_unlock(ip, cluster);
1374
1375                 /*
1376                  * The vp should not be released until after we've disposed
1377                  * of our locks, because it might cause vop_inactive() to
1378                  * be called.
1379                  */
1380                 if (vp)
1381                         vrele(vp);
1382         } else {
1383                 error = ENOENT;
1384                 cache_setvp(ap->a_nch, NULL);
1385         }
1386         KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1387                 ("resolve error %d/%p ap %p\n",
1388                  error, ap->a_nch->ncp->nc_vp, ap));
1389         LOCKSTOP;
1390         return error;
1391 }
1392
1393 static
1394 int
1395 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1396 {
1397         hammer2_inode_t *dip;
1398         hammer2_inode_t *ip;
1399         hammer2_cluster_t *cparent;
1400         int error;
1401
1402         LOCKSTART;
1403         dip = VTOI(ap->a_dvp);
1404
1405         if ((ip = dip->pip) == NULL) {
1406                 *ap->a_vpp = NULL;
1407                 LOCKSTOP;
1408                 return ENOENT;
1409         }
1410         cparent = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1411         *ap->a_vpp = hammer2_igetv(ip, cparent, &error);
1412         hammer2_inode_unlock(ip, cparent);
1413
1414         LOCKSTOP;
1415         return error;
1416 }
1417
1418 static
1419 int
1420 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1421 {
1422         hammer2_inode_t *dip;
1423         hammer2_inode_t *nip;
1424         hammer2_trans_t trans;
1425         hammer2_cluster_t *cluster;
1426         struct namecache *ncp;
1427         const uint8_t *name;
1428         size_t name_len;
1429         int error;
1430
1431         LOCKSTART;
1432         dip = VTOI(ap->a_dvp);
1433         if (dip->pmp->ronly) {
1434                 LOCKSTOP;
1435                 return (EROFS);
1436         }
1437
1438         ncp = ap->a_nch->ncp;
1439         name = ncp->nc_name;
1440         name_len = ncp->nc_nlen;
1441         cluster = NULL;
1442
1443         hammer2_pfs_memory_wait(dip->pmp);
1444         hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1445         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1446                                    name, name_len,
1447                                    &cluster, 0, &error);
1448         if (error) {
1449                 KKASSERT(nip == NULL);
1450                 *ap->a_vpp = NULL;
1451         } else {
1452                 *ap->a_vpp = hammer2_igetv(nip, cluster, &error);
1453                 hammer2_inode_unlock(nip, cluster);
1454         }
1455         hammer2_trans_done(&trans);
1456
1457         if (error == 0) {
1458                 cache_setunresolved(ap->a_nch);
1459                 cache_setvp(ap->a_nch, *ap->a_vpp);
1460         }
1461         LOCKSTOP;
1462         return error;
1463 }
1464
1465 /*
1466  * Return the largest contiguous physical disk range for the logical
1467  * request, in bytes.
1468  *
1469  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1470  *
1471  * Basically disabled, the logical buffer write thread has to deal with
1472  * buffers one-at-a-time.
1473  */
1474 static
1475 int
1476 hammer2_vop_bmap(struct vop_bmap_args *ap)
1477 {
1478         *ap->a_doffsetp = NOOFFSET;
1479         if (ap->a_runp)
1480                 *ap->a_runp = 0;
1481         if (ap->a_runb)
1482                 *ap->a_runb = 0;
1483         return (EOPNOTSUPP);
1484 }
1485
1486 static
1487 int
1488 hammer2_vop_open(struct vop_open_args *ap)
1489 {
1490         return vop_stdopen(ap);
1491 }
1492
1493 /*
1494  * hammer2_vop_advlock { vp, id, op, fl, flags }
1495  */
1496 static
1497 int
1498 hammer2_vop_advlock(struct vop_advlock_args *ap)
1499 {
1500         hammer2_inode_t *ip = VTOI(ap->a_vp);
1501         const hammer2_inode_data_t *ripdata;
1502         hammer2_cluster_t *cparent;
1503         hammer2_off_t size;
1504
1505         cparent = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
1506                                          HAMMER2_RESOLVE_SHARED);
1507         ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
1508         size = ripdata->size;
1509         hammer2_inode_unlock(ip, cparent);
1510         return (lf_advlock(ap, &ip->advlock, size));
1511 }
1512
1513
1514 static
1515 int
1516 hammer2_vop_close(struct vop_close_args *ap)
1517 {
1518         return vop_stdclose(ap);
1519 }
1520
1521 /*
1522  * hammer2_vop_nlink { nch, dvp, vp, cred }
1523  *
1524  * Create a hardlink from (vp) to {dvp, nch}.
1525  */
1526 static
1527 int
1528 hammer2_vop_nlink(struct vop_nlink_args *ap)
1529 {
1530         hammer2_inode_t *fdip;  /* target directory to create link in */
1531         hammer2_inode_t *tdip;  /* target directory to create link in */
1532         hammer2_inode_t *cdip;  /* common parent directory */
1533         hammer2_inode_t *ip;    /* inode we are hardlinking to */
1534         hammer2_cluster_t *cluster;
1535         hammer2_cluster_t *fdcluster;
1536         hammer2_cluster_t *tdcluster;
1537         hammer2_cluster_t *cdcluster;
1538         hammer2_trans_t trans;
1539         struct namecache *ncp;
1540         const uint8_t *name;
1541         size_t name_len;
1542         int error;
1543
1544         LOCKSTART;
1545         tdip = VTOI(ap->a_dvp);
1546         if (tdip->pmp->ronly) {
1547                 LOCKSTOP;
1548                 return (EROFS);
1549         }
1550
1551         ncp = ap->a_nch->ncp;
1552         name = ncp->nc_name;
1553         name_len = ncp->nc_nlen;
1554
1555         /*
1556          * ip represents the file being hardlinked.  The file could be a
1557          * normal file or a hardlink target if it has already been hardlinked.
1558          * If ip is a hardlinked target then ip->pip represents the location
1559          * of the hardlinked target, NOT the location of the hardlink pointer.
1560          *
1561          * Bump nlinks and potentially also create or move the hardlink
1562          * target in the parent directory common to (ip) and (tdip).  The
1563          * consolidation code can modify ip->cluster and ip->pip.  The
1564          * returned cluster is locked.
1565          */
1566         ip = VTOI(ap->a_vp);
1567         hammer2_pfs_memory_wait(ip->pmp);
1568         hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_NEWINODE);
1569
1570         /*
1571          * The common parent directory must be locked first to avoid deadlocks.
1572          * Also note that fdip and/or tdip might match cdip.
1573          */
1574         fdip = ip->pip;
1575         cdip = hammer2_inode_common_parent(fdip, tdip);
1576         cdcluster = hammer2_inode_lock(cdip, HAMMER2_RESOLVE_ALWAYS);
1577         fdcluster = hammer2_inode_lock(fdip, HAMMER2_RESOLVE_ALWAYS);
1578         tdcluster = hammer2_inode_lock(tdip, HAMMER2_RESOLVE_ALWAYS);
1579         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1580         error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1581                                              cdip, cdcluster, 1);
1582         if (error)
1583                 goto done;
1584
1585         /*
1586          * Create a directory entry connected to the specified cluster.
1587          *
1588          * WARNING! chain can get moved by the connect (indirectly due to
1589          *          potential indirect block creation).
1590          */
1591         error = hammer2_inode_connect(&trans, &cluster, 1,
1592                                       tdip, tdcluster,
1593                                       name, name_len, 0);
1594         if (error == 0) {
1595                 cache_setunresolved(ap->a_nch);
1596                 cache_setvp(ap->a_nch, ap->a_vp);
1597         }
1598 done:
1599         hammer2_inode_unlock(ip, cluster);
1600         hammer2_inode_unlock(tdip, tdcluster);
1601         hammer2_inode_unlock(fdip, fdcluster);
1602         hammer2_inode_unlock(cdip, cdcluster);
1603         hammer2_inode_drop(cdip);
1604         hammer2_trans_done(&trans);
1605
1606         LOCKSTOP;
1607         return error;
1608 }
1609
1610 /*
1611  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1612  *
1613  * The operating system has already ensured that the directory entry
1614  * does not exist and done all appropriate namespace locking.
1615  */
1616 static
1617 int
1618 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1619 {
1620         hammer2_inode_t *dip;
1621         hammer2_inode_t *nip;
1622         hammer2_trans_t trans;
1623         hammer2_cluster_t *ncluster;
1624         struct namecache *ncp;
1625         const uint8_t *name;
1626         size_t name_len;
1627         int error;
1628
1629         LOCKSTART;
1630         dip = VTOI(ap->a_dvp);
1631         if (dip->pmp->ronly) {
1632                 LOCKSTOP;
1633                 return (EROFS);
1634         }
1635
1636         ncp = ap->a_nch->ncp;
1637         name = ncp->nc_name;
1638         name_len = ncp->nc_nlen;
1639         hammer2_pfs_memory_wait(dip->pmp);
1640         hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1641         ncluster = NULL;
1642
1643         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1644                                    name, name_len,
1645                                    &ncluster, 0, &error);
1646         if (error) {
1647                 KKASSERT(nip == NULL);
1648                 *ap->a_vpp = NULL;
1649         } else {
1650                 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1651                 hammer2_inode_unlock(nip, ncluster);
1652         }
1653         hammer2_trans_done(&trans);
1654
1655         if (error == 0) {
1656                 cache_setunresolved(ap->a_nch);
1657                 cache_setvp(ap->a_nch, *ap->a_vpp);
1658         }
1659         LOCKSTOP;
1660         return error;
1661 }
1662
1663 /*
1664  * Make a device node (typically a fifo)
1665  */
1666 static
1667 int
1668 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1669 {
1670         hammer2_inode_t *dip;
1671         hammer2_inode_t *nip;
1672         hammer2_trans_t trans;
1673         hammer2_cluster_t *ncluster;
1674         struct namecache *ncp;
1675         const uint8_t *name;
1676         size_t name_len;
1677         int error;
1678
1679         LOCKSTART;
1680         dip = VTOI(ap->a_dvp);
1681         if (dip->pmp->ronly) {
1682                 LOCKSTOP;
1683                 return (EROFS);
1684         }
1685
1686         ncp = ap->a_nch->ncp;
1687         name = ncp->nc_name;
1688         name_len = ncp->nc_nlen;
1689         hammer2_pfs_memory_wait(dip->pmp);
1690         hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1691         ncluster = NULL;
1692
1693         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1694                                    name, name_len,
1695                                    &ncluster, 0, &error);
1696         if (error) {
1697                 KKASSERT(nip == NULL);
1698                 *ap->a_vpp = NULL;
1699         } else {
1700                 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1701                 hammer2_inode_unlock(nip, ncluster);
1702         }
1703         hammer2_trans_done(&trans);
1704
1705         if (error == 0) {
1706                 cache_setunresolved(ap->a_nch);
1707                 cache_setvp(ap->a_nch, *ap->a_vpp);
1708         }
1709         LOCKSTOP;
1710         return error;
1711 }
1712
1713 /*
1714  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1715  */
1716 static
1717 int
1718 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1719 {
1720         hammer2_inode_t *dip;
1721         hammer2_inode_t *nip;
1722         hammer2_cluster_t *ncparent;
1723         hammer2_trans_t trans;
1724         struct namecache *ncp;
1725         const uint8_t *name;
1726         size_t name_len;
1727         int error;
1728         
1729         dip = VTOI(ap->a_dvp);
1730         if (dip->pmp->ronly)
1731                 return (EROFS);
1732
1733         ncp = ap->a_nch->ncp;
1734         name = ncp->nc_name;
1735         name_len = ncp->nc_nlen;
1736         hammer2_pfs_memory_wait(dip->pmp);
1737         hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1738         ncparent = NULL;
1739
1740         ap->a_vap->va_type = VLNK;      /* enforce type */
1741
1742         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1743                                    name, name_len,
1744                                    &ncparent, 0, &error);
1745         if (error) {
1746                 KKASSERT(nip == NULL);
1747                 *ap->a_vpp = NULL;
1748                 hammer2_trans_done(&trans);
1749                 return error;
1750         }
1751         *ap->a_vpp = hammer2_igetv(nip, ncparent, &error);
1752
1753         /*
1754          * Build the softlink (~like file data) and finalize the namecache.
1755          */
1756         if (error == 0) {
1757                 size_t bytes;
1758                 struct uio auio;
1759                 struct iovec aiov;
1760                 hammer2_inode_data_t *nipdata;
1761
1762                 nipdata = &hammer2_cluster_wdata(ncparent)->ipdata;
1763                 /* nipdata = &nip->chain->data->ipdata;XXX */
1764                 bytes = strlen(ap->a_target);
1765
1766                 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1767                         KKASSERT(nipdata->op_flags &
1768                                  HAMMER2_OPFLAG_DIRECTDATA);
1769                         bcopy(ap->a_target, nipdata->u.data, bytes);
1770                         nipdata->size = bytes;
1771                         nip->size = bytes;
1772                         hammer2_cluster_modsync(ncparent);
1773                         hammer2_inode_unlock(nip, ncparent);
1774                         /* nipdata = NULL; not needed */
1775                 } else {
1776                         hammer2_inode_unlock(nip, ncparent);
1777                         /* nipdata = NULL; not needed */
1778                         bzero(&auio, sizeof(auio));
1779                         bzero(&aiov, sizeof(aiov));
1780                         auio.uio_iov = &aiov;
1781                         auio.uio_segflg = UIO_SYSSPACE;
1782                         auio.uio_rw = UIO_WRITE;
1783                         auio.uio_resid = bytes;
1784                         auio.uio_iovcnt = 1;
1785                         auio.uio_td = curthread;
1786                         aiov.iov_base = ap->a_target;
1787                         aiov.iov_len = bytes;
1788                         error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1789                         /* XXX handle error */
1790                         error = 0;
1791                 }
1792         } else {
1793                 hammer2_inode_unlock(nip, ncparent);
1794         }
1795         hammer2_trans_done(&trans);
1796
1797         /*
1798          * Finalize namecache
1799          */
1800         if (error == 0) {
1801                 cache_setunresolved(ap->a_nch);
1802                 cache_setvp(ap->a_nch, *ap->a_vpp);
1803                 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1804         }
1805         return error;
1806 }
1807
1808 /*
1809  * hammer2_vop_nremove { nch, dvp, cred }
1810  */
1811 static
1812 int
1813 hammer2_vop_nremove(struct vop_nremove_args *ap)
1814 {
1815         hammer2_inode_t *dip;
1816         hammer2_trans_t trans;
1817         struct namecache *ncp;
1818         const uint8_t *name;
1819         size_t name_len;
1820         int error;
1821
1822         LOCKSTART;
1823         dip = VTOI(ap->a_dvp);
1824         if (dip->pmp->ronly) {
1825                 LOCKSTOP;
1826                 return(EROFS);
1827         }
1828
1829         ncp = ap->a_nch->ncp;
1830         name = ncp->nc_name;
1831         name_len = ncp->nc_nlen;
1832
1833         hammer2_pfs_memory_wait(dip->pmp);
1834         hammer2_trans_init(&trans, dip->pmp, 0);
1835         error = hammer2_unlink_file(&trans, dip, name, name_len,
1836                                     0, NULL, ap->a_nch, -1);
1837         hammer2_run_unlinkq(&trans, dip->pmp);
1838         hammer2_trans_done(&trans);
1839         if (error == 0)
1840                 cache_unlink(ap->a_nch);
1841         LOCKSTOP;
1842         return (error);
1843 }
1844
1845 /*
1846  * hammer2_vop_nrmdir { nch, dvp, cred }
1847  */
1848 static
1849 int
1850 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1851 {
1852         hammer2_inode_t *dip;
1853         hammer2_trans_t trans;
1854         struct namecache *ncp;
1855         const uint8_t *name;
1856         size_t name_len;
1857         int error;
1858
1859         LOCKSTART;
1860         dip = VTOI(ap->a_dvp);
1861         if (dip->pmp->ronly) {
1862                 LOCKSTOP;
1863                 return(EROFS);
1864         }
1865
1866         ncp = ap->a_nch->ncp;
1867         name = ncp->nc_name;
1868         name_len = ncp->nc_nlen;
1869
1870         hammer2_pfs_memory_wait(dip->pmp);
1871         hammer2_trans_init(&trans, dip->pmp, 0);
1872         hammer2_run_unlinkq(&trans, dip->pmp);
1873         error = hammer2_unlink_file(&trans, dip, name, name_len,
1874                                     1, NULL, ap->a_nch, -1);
1875         hammer2_trans_done(&trans);
1876         if (error == 0)
1877                 cache_unlink(ap->a_nch);
1878         LOCKSTOP;
1879         return (error);
1880 }
1881
1882 /*
1883  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1884  */
1885 static
1886 int
1887 hammer2_vop_nrename(struct vop_nrename_args *ap)
1888 {
1889         struct namecache *fncp;
1890         struct namecache *tncp;
1891         hammer2_inode_t *cdip;
1892         hammer2_inode_t *fdip;
1893         hammer2_inode_t *tdip;
1894         hammer2_inode_t *ip;
1895         hammer2_cluster_t *cluster;
1896         hammer2_cluster_t *fdcluster;
1897         hammer2_cluster_t *tdcluster;
1898         hammer2_cluster_t *cdcluster;
1899         hammer2_trans_t trans;
1900         const uint8_t *fname;
1901         size_t fname_len;
1902         const uint8_t *tname;
1903         size_t tname_len;
1904         int error;
1905         int tnch_error;
1906         int hlink;
1907
1908         if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1909                 return(EXDEV);
1910         if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1911                 return(EXDEV);
1912
1913         fdip = VTOI(ap->a_fdvp);        /* source directory */
1914         tdip = VTOI(ap->a_tdvp);        /* target directory */
1915
1916         if (fdip->pmp->ronly)
1917                 return(EROFS);
1918
1919         LOCKSTART;
1920         fncp = ap->a_fnch->ncp;         /* entry name in source */
1921         fname = fncp->nc_name;
1922         fname_len = fncp->nc_nlen;
1923
1924         tncp = ap->a_tnch->ncp;         /* entry name in target */
1925         tname = tncp->nc_name;
1926         tname_len = tncp->nc_nlen;
1927
1928         hammer2_pfs_memory_wait(tdip->pmp);
1929         hammer2_trans_init(&trans, tdip->pmp, 0);
1930
1931         /*
1932          * ip is the inode being renamed.  If this is a hardlink then
1933          * ip represents the actual file and not the hardlink marker.
1934          */
1935         ip = VTOI(fncp->nc_vp);
1936         cluster = NULL;
1937
1938
1939         /*
1940          * The common parent directory must be locked first to avoid deadlocks.
1941          * Also note that fdip and/or tdip might match cdip.
1942          *
1943          * WARNING! fdip may not match ip->pip.  That is, if the source file
1944          *          is already a hardlink then what we are renaming is the
1945          *          hardlink pointer, not the hardlink itself.  The hardlink
1946          *          directory (ip->pip) will already be at a common parent
1947          *          of fdrip.
1948          *
1949          *          Be sure to use ip->pip when finding the common parent
1950          *          against tdip or we might accidently move the hardlink
1951          *          target into a subdirectory that makes it inaccessible to
1952          *          other pointers.
1953          */
1954         cdip = hammer2_inode_common_parent(ip->pip, tdip);
1955         cdcluster = hammer2_inode_lock(cdip, HAMMER2_RESOLVE_ALWAYS);
1956         fdcluster = hammer2_inode_lock(fdip, HAMMER2_RESOLVE_ALWAYS);
1957         tdcluster = hammer2_inode_lock(tdip, HAMMER2_RESOLVE_ALWAYS);
1958
1959         /*
1960          * Keep a tight grip on the inode so the temporary unlinking from
1961          * the source location prior to linking to the target location
1962          * does not cause the cluster to be destroyed.
1963          *
1964          * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1965          *       unlinking elements from their directories.  Locking
1966          *       the nlinks field does not lock the whole inode.
1967          */
1968         hammer2_inode_ref(ip);
1969
1970         /*
1971          * Remove target if it exists.
1972          */
1973         error = hammer2_unlink_file(&trans, tdip, tname, tname_len,
1974                                     -1, NULL, ap->a_tnch, -1);
1975         tnch_error = error;
1976         if (error && error != ENOENT)
1977                 goto done;
1978
1979         /*
1980          * When renaming a hardlinked file we may have to re-consolidate
1981          * the location of the hardlink target.
1982          *
1983          * If ip represents a regular file the consolidation code essentially
1984          * does nothing other than return the same locked cluster that was
1985          * passed in.
1986          *
1987          * The returned cluster will be locked.
1988          *
1989          * WARNING!  We do not currently have a local copy of ipdata but
1990          *           we do use one later remember that it must be reloaded
1991          *           on any modification to the inode, including connects.
1992          */
1993         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1994         error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1995                                              cdip, cdcluster, 0);
1996         if (error)
1997                 goto done;
1998
1999         /*
2000          * Disconnect (fdip, fname) from the source directory.  This will
2001          * disconnect (ip) if it represents a direct file.  If (ip) represents
2002          * a hardlink the HARDLINK pointer object will be removed but the
2003          * hardlink will stay intact.
2004          *
2005          * Always pass nch as NULL because we intend to reconnect the inode,
2006          * so we don't want hammer2_unlink_file() to rename it to the hidden
2007          * open-but-unlinked directory.
2008          *
2009          * The target cluster may be marked DELETED but will not be destroyed
2010          * since we retain our hold on ip and cluster.
2011          *
2012          * NOTE: We pass nlinks as 0 (not -1) in order to retain the file's
2013          *       link count.
2014          */
2015         error = hammer2_unlink_file(&trans, fdip, fname, fname_len,
2016                                     -1, &hlink, NULL, 0);
2017         KKASSERT(error != EAGAIN);
2018         if (error)
2019                 goto done;
2020
2021         /*
2022          * Reconnect ip to target directory using cluster.  Chains cannot
2023          * actually be moved, so this will duplicate the cluster in the new
2024          * spot and assign it to the ip, replacing the old cluster.
2025          *
2026          * WARNING: Because recursive locks are allowed and we unlinked the
2027          *          file that we have a cluster-in-hand for just above, the
2028          *          cluster might have been delete-duplicated.  We must
2029          *          refactor the cluster.
2030          *
2031          * WARNING: Chain locks can lock buffer cache buffers, to avoid
2032          *          deadlocks we want to unlock before issuing a cache_*()
2033          *          op (that might have to lock a vnode).
2034          *
2035          * NOTE:    Pass nlinks as 0 because we retained the link count from
2036          *          the unlink, so we do not have to modify it.
2037          */
2038         error = hammer2_inode_connect(&trans, &cluster, hlink,
2039                                       tdip, tdcluster,
2040                                       tname, tname_len, 0);
2041         if (error == 0) {
2042                 KKASSERT(cluster != NULL);
2043                 hammer2_inode_repoint(ip, (hlink ? ip->pip : tdip), cluster);
2044         }
2045 done:
2046         hammer2_inode_unlock(ip, cluster);
2047         hammer2_inode_unlock(tdip, tdcluster);
2048         hammer2_inode_unlock(fdip, fdcluster);
2049         hammer2_inode_unlock(cdip, cdcluster);
2050         hammer2_inode_drop(ip);
2051         hammer2_inode_drop(cdip);
2052         hammer2_run_unlinkq(&trans, fdip->pmp);
2053         hammer2_trans_done(&trans);
2054
2055         /*
2056          * Issue the namecache update after unlocking all the internal
2057          * hammer structures, otherwise we might deadlock.
2058          */
2059         if (tnch_error == 0) {
2060                 cache_unlink(ap->a_tnch);
2061                 cache_setunresolved(ap->a_tnch);
2062         }
2063         if (error == 0)
2064                 cache_rename(ap->a_fnch, ap->a_tnch);
2065
2066         LOCKSTOP;
2067         return (error);
2068 }
2069
2070 /*
2071  * Strategy code (async logical file buffer I/O from system)
2072  *
2073  * WARNING: The strategy code cannot safely use hammer2 transactions
2074  *          as this can deadlock against vfs_sync's vfsync() call
2075  *          if multiple flushes are queued.  All H2 structures must
2076  *          already be present and ready for the DIO.
2077  *
2078  *          Reads can be initiated asynchronously, writes have to be
2079  *          spooled to a separate thread for action to avoid deadlocks.
2080  */
2081 static int hammer2_strategy_read(struct vop_strategy_args *ap);
2082 static int hammer2_strategy_write(struct vop_strategy_args *ap);
2083 static void hammer2_strategy_read_callback(hammer2_iocb_t *iocb);
2084
2085 static
2086 int
2087 hammer2_vop_strategy(struct vop_strategy_args *ap)
2088 {
2089         struct bio *biop;
2090         struct buf *bp;
2091         int error;
2092
2093         biop = ap->a_bio;
2094         bp = biop->bio_buf;
2095
2096         switch(bp->b_cmd) {
2097         case BUF_CMD_READ:
2098                 error = hammer2_strategy_read(ap);
2099                 ++hammer2_iod_file_read;
2100                 break;
2101         case BUF_CMD_WRITE:
2102                 error = hammer2_strategy_write(ap);
2103                 ++hammer2_iod_file_write;
2104                 break;
2105         default:
2106                 bp->b_error = error = EINVAL;
2107                 bp->b_flags |= B_ERROR;
2108                 biodone(biop);
2109                 break;
2110         }
2111         return (error);
2112 }
2113
2114 /*
2115  * Logical buffer I/O, async read.
2116  */
2117 static
2118 int
2119 hammer2_strategy_read(struct vop_strategy_args *ap)
2120 {
2121         struct buf *bp;
2122         struct bio *bio;
2123         struct bio *nbio;
2124         hammer2_inode_t *ip;
2125         hammer2_cluster_t *cparent;
2126         hammer2_cluster_t *cluster;
2127         hammer2_key_t key_dummy;
2128         hammer2_key_t lbase;
2129         uint8_t btype;
2130
2131         bio = ap->a_bio;
2132         bp = bio->bio_buf;
2133         ip = VTOI(ap->a_vp);
2134         nbio = push_bio(bio);
2135
2136         lbase = bio->bio_offset;
2137         KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
2138
2139         /*
2140          * Lookup the file offset.
2141          */
2142         cparent = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
2143                                          HAMMER2_RESOLVE_SHARED);
2144         cluster = hammer2_cluster_lookup(cparent, &key_dummy,
2145                                        lbase, lbase,
2146                                        HAMMER2_LOOKUP_NODATA |
2147                                        HAMMER2_LOOKUP_SHARED);
2148         hammer2_inode_unlock(ip, cparent);
2149
2150         /*
2151          * Data is zero-fill if no cluster could be found
2152          * (XXX or EIO on a cluster failure).
2153          */
2154         if (cluster == NULL) {
2155                 bp->b_resid = 0;
2156                 bp->b_error = 0;
2157                 bzero(bp->b_data, bp->b_bcount);
2158                 biodone(nbio);
2159                 return(0);
2160         }
2161
2162         /*
2163          * Cluster elements must be type INODE or type DATA, but the
2164          * compression mode (or not) for DATA chains can be different for
2165          * each chain.  This will be handled by the callback.
2166          *
2167          * If the cluster already has valid data the callback will be made
2168          * immediately/synchronously.
2169          */
2170         btype = hammer2_cluster_type(cluster);
2171         if (btype != HAMMER2_BREF_TYPE_INODE &&
2172             btype != HAMMER2_BREF_TYPE_DATA) {
2173                 panic("READ PATH: hammer2_strategy_read: unknown bref type");
2174         }
2175         hammer2_cluster_load_async(cluster, hammer2_strategy_read_callback,
2176                                    nbio);
2177         return(0);
2178 }
2179
2180 /*
2181  * Read callback for hammer2_cluster_load_async().  The load function may
2182  * start several actual I/Os but will only make one callback, typically with
2183  * the first valid I/O XXX
2184  */
2185 static
2186 void
2187 hammer2_strategy_read_callback(hammer2_iocb_t *iocb)
2188 {
2189         struct bio *bio = iocb->ptr;    /* original logical buffer */
2190         struct buf *bp = bio->bio_buf;  /* original logical buffer */
2191         hammer2_chain_t *chain;
2192         hammer2_cluster_t *cluster;
2193         hammer2_io_t *dio;
2194         char *data;
2195         int i;
2196
2197         /*
2198          * Extract data and handle iteration on I/O failure.  iocb->off
2199          * is the cluster index for iteration.
2200          */
2201         cluster = iocb->cluster;
2202         dio = iocb->dio;        /* can be NULL if iocb not in progress */
2203
2204         /*
2205          * Work to do if INPROG set, else dio is already good or dio is
2206          * NULL (which is the shortcut case if chain->data is already good).
2207          */
2208         if (iocb->flags & HAMMER2_IOCB_INPROG) {
2209                 /*
2210                  * Read attempt not yet made.  Issue an asynchronous read
2211                  * if necessary and return, operation will chain back to
2212                  * this function.
2213                  */
2214                 if ((iocb->flags & HAMMER2_IOCB_READ) == 0) {
2215                         if (dio->bp == NULL ||
2216                             (dio->bp->b_flags & B_CACHE) == 0) {
2217                                 if (dio->bp) {
2218                                         bqrelse(dio->bp);
2219                                         dio->bp = NULL;
2220                                 }
2221                                 iocb->flags |= HAMMER2_IOCB_READ;
2222                                 breadcb(dio->hmp->devvp,
2223                                         dio->pbase, dio->psize,
2224                                         hammer2_io_callback, iocb);
2225                                 return;
2226                         }
2227                 }
2228         }
2229
2230         /*
2231          * If we have a DIO it is now done, check for an error and
2232          * calculate the data.
2233          *
2234          * If there is no DIO it is an optimization by
2235          * hammer2_cluster_load_async(), the data is available in
2236          * chain->data.
2237          */
2238         if (dio) {
2239                 if (dio->bp->b_flags & B_ERROR) {
2240                         i = (int)iocb->lbase + 1;
2241                         if (i >= cluster->nchains) {
2242                                 bp->b_flags |= B_ERROR;
2243                                 bp->b_error = dio->bp->b_error;
2244                                 hammer2_io_complete(iocb);
2245                                 biodone(bio);
2246                                 hammer2_cluster_unlock(cluster);
2247                         } else {
2248                                 hammer2_io_complete(iocb); /* XXX */
2249                                 chain = cluster->array[i].chain;
2250                                 kprintf("hammer2: IO CHAIN-%d %p\n", i, chain);
2251                                 hammer2_adjreadcounter(&chain->bref,
2252                                                        chain->bytes);
2253                                 iocb->chain = chain;
2254                                 iocb->lbase = (off_t)i;
2255                                 iocb->flags = 0;
2256                                 iocb->error = 0;
2257                                 hammer2_io_getblk(chain->hmp,
2258                                                   chain->bref.data_off,
2259                                                   chain->bytes,
2260                                                   iocb);
2261                         }
2262                         return;
2263                 }
2264                 chain = iocb->chain;
2265                 data = hammer2_io_data(dio, chain->bref.data_off);
2266         } else {
2267                 /*
2268                  * Special synchronous case, data present in chain->data.
2269                  */
2270                 chain = iocb->chain;
2271                 data = (void *)chain->data;
2272         }
2273
2274         if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2275                 /*
2276                  * Data is embedded in the inode (copy from inode).
2277                  */
2278                 bcopy(((hammer2_inode_data_t *)data)->u.data,
2279                       bp->b_data, HAMMER2_EMBEDDED_BYTES);
2280                 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
2281                       bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
2282                 bp->b_resid = 0;
2283                 bp->b_error = 0;
2284         } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2285                 /*
2286                  * Data is on-media, issue device I/O and copy.
2287                  *
2288                  * XXX direct-IO shortcut could go here XXX.
2289                  */
2290                 switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
2291                 case HAMMER2_COMP_LZ4:
2292                         hammer2_decompress_LZ4_callback(data, chain->bytes,
2293                                                         bio);
2294                         break;
2295                 case HAMMER2_COMP_ZLIB:
2296                         hammer2_decompress_ZLIB_callback(data, chain->bytes,
2297                                                          bio);
2298                         break;
2299                 case HAMMER2_COMP_NONE:
2300                         KKASSERT(chain->bytes <= bp->b_bcount);
2301                         bcopy(data, bp->b_data, chain->bytes);
2302                         if (chain->bytes < bp->b_bcount) {
2303                                 bzero(bp->b_data + chain->bytes,
2304                                       bp->b_bcount - chain->bytes);
2305                         }
2306                         bp->b_flags |= B_NOTMETA;
2307                         bp->b_resid = 0;
2308                         bp->b_error = 0;
2309                         break;
2310                 default:
2311                         panic("hammer2_strategy_read: "
2312                               "unknown compression type");
2313                 }
2314         } else {
2315                 /* bqrelse the dio to help stabilize the call to panic() */
2316                 if (dio)
2317                         hammer2_io_bqrelse(&dio);
2318                 panic("hammer2_strategy_read: unknown bref type");
2319         }
2320
2321         /*
2322          * Once the iocb is cleaned up the DIO (if any) will no longer be
2323          * in-progress but will still have a ref.  Be sure to release
2324          * the ref.
2325          */
2326         hammer2_io_complete(iocb);              /* physical management */
2327         if (dio)                                /* physical dio & buffer */
2328                 hammer2_io_bqrelse(&dio);
2329         hammer2_cluster_unlock(cluster);        /* cluster management */
2330         biodone(bio);                           /* logical buffer */
2331 }
2332
2333 static
2334 int
2335 hammer2_strategy_write(struct vop_strategy_args *ap)
2336 {       
2337         hammer2_pfs_t *pmp;
2338         struct bio *bio;
2339         struct buf *bp;
2340         hammer2_inode_t *ip;
2341         
2342         bio = ap->a_bio;
2343         bp = bio->bio_buf;
2344         ip = VTOI(ap->a_vp);
2345         pmp = ip->pmp;
2346         
2347         hammer2_lwinprog_ref(pmp);
2348         hammer2_mtx_ex(&pmp->wthread_mtx);
2349         if (TAILQ_EMPTY(&pmp->wthread_bioq.queue)) {
2350                 bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2351                 hammer2_mtx_unlock(&pmp->wthread_mtx);
2352                 wakeup(&pmp->wthread_bioq);
2353         } else {
2354                 bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2355                 hammer2_mtx_unlock(&pmp->wthread_mtx);
2356         }
2357         hammer2_lwinprog_wait(pmp);
2358
2359         return(0);
2360 }
2361
2362 /*
2363  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2364  */
2365 static
2366 int
2367 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2368 {
2369         hammer2_inode_t *ip;
2370         int error;
2371
2372         LOCKSTART;
2373         ip = VTOI(ap->a_vp);
2374
2375         error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2376                               ap->a_fflag, ap->a_cred);
2377         LOCKSTOP;
2378         return (error);
2379 }
2380
2381 static
2382 int 
2383 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2384 {
2385         struct mount *mp;
2386         hammer2_pfs_t *pmp;
2387         int rc;
2388
2389         LOCKSTART;
2390         switch (ap->a_op) {
2391         case (MOUNTCTL_SET_EXPORT):
2392                 mp = ap->a_head.a_ops->head.vv_mount;
2393                 pmp = MPTOPMP(mp);
2394
2395                 if (ap->a_ctllen != sizeof(struct export_args))
2396                         rc = (EINVAL);
2397                 else
2398                         rc = vfs_export(mp, &pmp->export,
2399                                         (const struct export_args *)ap->a_ctl);
2400                 break;
2401         default:
2402                 rc = vop_stdmountctl(ap);
2403                 break;
2404         }
2405         LOCKSTOP;
2406         return (rc);
2407 }
2408
2409 /*
2410  * This handles unlinked open files after the vnode is finally dereferenced.
2411  * To avoid deadlocks it cannot be called from the normal vnode recycling
2412  * path, so we call it (1) after a unlink, rmdir, or rename, (2) on every
2413  * flush, and (3) on umount.
2414  */
2415 void
2416 hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfs_t *pmp)
2417 {
2418         const hammer2_inode_data_t *ripdata;
2419         hammer2_inode_unlink_t *ipul;
2420         hammer2_inode_t *ip;
2421         hammer2_cluster_t *cluster;
2422         hammer2_cluster_t *cparent;
2423
2424         if (TAILQ_EMPTY(&pmp->unlinkq))
2425                 return;
2426
2427         LOCKSTART;
2428         hammer2_spin_ex(&pmp->list_spin);
2429         while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
2430                 TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
2431                 hammer2_spin_unex(&pmp->list_spin);
2432                 ip = ipul->ip;
2433                 kfree(ipul, pmp->minode);
2434
2435                 cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
2436                 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
2437                 if (hammer2_debug & 0x400) {
2438                         kprintf("hammer2: unlink on reclaim: %s refs=%d\n",
2439                                 ripdata->filename, ip->refs);
2440                 }
2441                 KKASSERT(ripdata->nlinks == 0);
2442
2443                 cparent = hammer2_cluster_parent(cluster);
2444                 hammer2_cluster_delete(trans, cparent, cluster,
2445                                        HAMMER2_DELETE_PERMANENT);
2446                 hammer2_cluster_unlock(cparent);
2447                 hammer2_inode_unlock(ip, cluster);      /* inode lock */
2448                 hammer2_inode_drop(ip);                 /* ipul ref */
2449
2450                 hammer2_spin_ex(&pmp->list_spin);
2451         }
2452         hammer2_spin_unex(&pmp->list_spin);
2453         LOCKSTOP;
2454 }
2455
2456
2457 /*
2458  * KQFILTER
2459  */
2460 static void filt_hammer2detach(struct knote *kn);
2461 static int filt_hammer2read(struct knote *kn, long hint);
2462 static int filt_hammer2write(struct knote *kn, long hint);
2463 static int filt_hammer2vnode(struct knote *kn, long hint);
2464
2465 static struct filterops hammer2read_filtops =
2466         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2467           NULL, filt_hammer2detach, filt_hammer2read };
2468 static struct filterops hammer2write_filtops =
2469         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2470           NULL, filt_hammer2detach, filt_hammer2write };
2471 static struct filterops hammer2vnode_filtops =
2472         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2473           NULL, filt_hammer2detach, filt_hammer2vnode };
2474
2475 static
2476 int
2477 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2478 {
2479         struct vnode *vp = ap->a_vp;
2480         struct knote *kn = ap->a_kn;
2481
2482         switch (kn->kn_filter) {
2483         case EVFILT_READ:
2484                 kn->kn_fop = &hammer2read_filtops;
2485                 break;
2486         case EVFILT_WRITE:
2487                 kn->kn_fop = &hammer2write_filtops;
2488                 break;
2489         case EVFILT_VNODE:
2490                 kn->kn_fop = &hammer2vnode_filtops;
2491                 break;
2492         default:
2493                 return (EOPNOTSUPP);
2494         }
2495
2496         kn->kn_hook = (caddr_t)vp;
2497
2498         knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2499
2500         return(0);
2501 }
2502
2503 static void
2504 filt_hammer2detach(struct knote *kn)
2505 {
2506         struct vnode *vp = (void *)kn->kn_hook;
2507
2508         knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2509 }
2510
2511 static int
2512 filt_hammer2read(struct knote *kn, long hint)
2513 {
2514         struct vnode *vp = (void *)kn->kn_hook;
2515         hammer2_inode_t *ip = VTOI(vp);
2516         off_t off;
2517
2518         if (hint == NOTE_REVOKE) {
2519                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2520                 return(1);
2521         }
2522         off = ip->size - kn->kn_fp->f_offset;
2523         kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2524         if (kn->kn_sfflags & NOTE_OLDAPI)
2525                 return(1);
2526         return (kn->kn_data != 0);
2527 }
2528
2529
2530 static int
2531 filt_hammer2write(struct knote *kn, long hint)
2532 {
2533         if (hint == NOTE_REVOKE)
2534                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2535         kn->kn_data = 0;
2536         return (1);
2537 }
2538
2539 static int
2540 filt_hammer2vnode(struct knote *kn, long hint)
2541 {
2542         if (kn->kn_sfflags & hint)
2543                 kn->kn_fflags |= hint;
2544         if (hint == NOTE_REVOKE) {
2545                 kn->kn_flags |= (EV_EOF | EV_NODATA);
2546                 return (1);
2547         }
2548         return (kn->kn_fflags != 0);
2549 }
2550
2551 /*
2552  * FIFO VOPS
2553  */
2554 static
2555 int
2556 hammer2_vop_markatime(struct vop_markatime_args *ap)
2557 {
2558         hammer2_inode_t *ip;
2559         struct vnode *vp;
2560
2561         vp = ap->a_vp;
2562         ip = VTOI(vp);
2563
2564         if (ip->pmp->ronly)
2565                 return(EROFS);
2566         return(0);
2567 }
2568
2569 static
2570 int
2571 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2572 {
2573         int error;
2574
2575         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2576         if (error)
2577                 error = hammer2_vop_kqfilter(ap);
2578         return(error);
2579 }
2580
2581 /*
2582  * VOPS vector
2583  */
2584 struct vop_ops hammer2_vnode_vops = {
2585         .vop_default    = vop_defaultop,
2586         .vop_fsync      = hammer2_vop_fsync,
2587         .vop_getpages   = vop_stdgetpages,
2588         .vop_putpages   = vop_stdputpages,
2589         .vop_access     = hammer2_vop_access,
2590         .vop_advlock    = hammer2_vop_advlock,
2591         .vop_close      = hammer2_vop_close,
2592         .vop_nlink      = hammer2_vop_nlink,
2593         .vop_ncreate    = hammer2_vop_ncreate,
2594         .vop_nsymlink   = hammer2_vop_nsymlink,
2595         .vop_nremove    = hammer2_vop_nremove,
2596         .vop_nrmdir     = hammer2_vop_nrmdir,
2597         .vop_nrename    = hammer2_vop_nrename,
2598         .vop_getattr    = hammer2_vop_getattr,
2599         .vop_setattr    = hammer2_vop_setattr,
2600         .vop_readdir    = hammer2_vop_readdir,
2601         .vop_readlink   = hammer2_vop_readlink,
2602         .vop_getpages   = vop_stdgetpages,
2603         .vop_putpages   = vop_stdputpages,
2604         .vop_read       = hammer2_vop_read,
2605         .vop_write      = hammer2_vop_write,
2606         .vop_open       = hammer2_vop_open,
2607         .vop_inactive   = hammer2_vop_inactive,
2608         .vop_reclaim    = hammer2_vop_reclaim,
2609         .vop_nresolve   = hammer2_vop_nresolve,
2610         .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2611         .vop_nmkdir     = hammer2_vop_nmkdir,
2612         .vop_nmknod     = hammer2_vop_nmknod,
2613         .vop_ioctl      = hammer2_vop_ioctl,
2614         .vop_mountctl   = hammer2_vop_mountctl,
2615         .vop_bmap       = hammer2_vop_bmap,
2616         .vop_strategy   = hammer2_vop_strategy,
2617         .vop_kqfilter   = hammer2_vop_kqfilter
2618 };
2619
2620 struct vop_ops hammer2_spec_vops = {
2621         .vop_default =          vop_defaultop,
2622         .vop_fsync =            hammer2_vop_fsync,
2623         .vop_read =             vop_stdnoread,
2624         .vop_write =            vop_stdnowrite,
2625         .vop_access =           hammer2_vop_access,
2626         .vop_close =            hammer2_vop_close,
2627         .vop_markatime =        hammer2_vop_markatime,
2628         .vop_getattr =          hammer2_vop_getattr,
2629         .vop_inactive =         hammer2_vop_inactive,
2630         .vop_reclaim =          hammer2_vop_reclaim,
2631         .vop_setattr =          hammer2_vop_setattr
2632 };
2633
2634 struct vop_ops hammer2_fifo_vops = {
2635         .vop_default =          fifo_vnoperate,
2636         .vop_fsync =            hammer2_vop_fsync,
2637 #if 0
2638         .vop_read =             hammer2_vop_fiforead,
2639         .vop_write =            hammer2_vop_fifowrite,
2640 #endif
2641         .vop_access =           hammer2_vop_access,
2642 #if 0
2643         .vop_close =            hammer2_vop_fifoclose,
2644 #endif
2645         .vop_markatime =        hammer2_vop_markatime,
2646         .vop_getattr =          hammer2_vop_getattr,
2647         .vop_inactive =         hammer2_vop_inactive,
2648         .vop_reclaim =          hammer2_vop_reclaim,
2649         .vop_setattr =          hammer2_vop_setattr,
2650         .vop_kqfilter =         hammer2_vop_fifokqfilter
2651 };
2652