Merge branch 'vendor/GCC50'
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
1 /*
2  * Copyright (c) 2011-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *       to the inode as its underlying chain may have changed.
41  */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
59
60 #include "hammer2.h"
61 #include "hammer2_lz4.h"
62
63 #include "zlib/hammer2_zlib.h"
64
65 #define ZFOFFSET        (-2LL)
66
67 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
68                                 int seqcount);
69 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
70                                 int ioflag, int seqcount);
71 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
72 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
73
74 struct objcache *cache_buffer_read;
75 struct objcache *cache_buffer_write;
76
77 /* 
78  * Callback used in read path in case that a block is compressed with LZ4.
79  */
80 static
81 void
82 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
83 {
84         struct buf *bp;
85         char *compressed_buffer;
86         int compressed_size;
87         int result;
88
89         bp = bio->bio_buf;
90
91 #if 0
92         if bio->bio_caller_info2.index &&
93               bio->bio_caller_info1.uvalue32 !=
94               crc32(bp->b_data, bp->b_bufsize) --- return error
95 #endif
96
97         KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
98         compressed_size = *(const int *)data;
99         KKASSERT(compressed_size <= bytes - sizeof(int));
100
101         compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
102         result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
103                                      compressed_buffer,
104                                      compressed_size,
105                                      bp->b_bufsize);
106         if (result < 0) {
107                 kprintf("READ PATH: Error during decompression."
108                         "bio %016jx/%d\n",
109                         (intmax_t)bio->bio_offset, bytes);
110                 /* make sure it isn't random garbage */
111                 bzero(compressed_buffer, bp->b_bufsize);
112         }
113         KKASSERT(result <= bp->b_bufsize);
114         bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
115         if (result < bp->b_bufsize)
116                 bzero(bp->b_data + result, bp->b_bufsize - result);
117         objcache_put(cache_buffer_read, compressed_buffer);
118         bp->b_resid = 0;
119         bp->b_flags |= B_AGE;
120 }
121
122 /*
123  * Callback used in read path in case that a block is compressed with ZLIB.
124  * It is almost identical to LZ4 callback, so in theory they can be unified,
125  * but we didn't want to make changes in bio structure for that.
126  */
127 static
128 void
129 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
130 {
131         struct buf *bp;
132         char *compressed_buffer;
133         z_stream strm_decompress;
134         int result;
135         int ret;
136
137         bp = bio->bio_buf;
138
139         KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
140         strm_decompress.avail_in = 0;
141         strm_decompress.next_in = Z_NULL;
142
143         ret = inflateInit(&strm_decompress);
144
145         if (ret != Z_OK)
146                 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
147
148         compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
149         strm_decompress.next_in = __DECONST(char *, data);
150
151         /* XXX supply proper size, subset of device bp */
152         strm_decompress.avail_in = bytes;
153         strm_decompress.next_out = compressed_buffer;
154         strm_decompress.avail_out = bp->b_bufsize;
155
156         ret = inflate(&strm_decompress, Z_FINISH);
157         if (ret != Z_STREAM_END) {
158                 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
159                 bzero(compressed_buffer, bp->b_bufsize);
160         }
161         bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
162         result = bp->b_bufsize - strm_decompress.avail_out;
163         if (result < bp->b_bufsize)
164                 bzero(bp->b_data + result, strm_decompress.avail_out);
165         objcache_put(cache_buffer_read, compressed_buffer);
166         ret = inflateEnd(&strm_decompress);
167
168         bp->b_resid = 0;
169         bp->b_flags |= B_AGE;
170 }
171
172 static __inline
173 void
174 hammer2_knote(struct vnode *vp, int flags)
175 {
176         if (flags)
177                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
178 }
179
180 /*
181  * Last reference to a vnode is going away but it is still cached.
182  */
183 static
184 int
185 hammer2_vop_inactive(struct vop_inactive_args *ap)
186 {
187         hammer2_inode_t *ip;
188         hammer2_cluster_t *cluster;
189         struct vnode *vp;
190
191         LOCKSTART;
192         vp = ap->a_vp;
193         ip = VTOI(vp);
194
195         /*
196          * Degenerate case
197          */
198         if (ip == NULL) {
199                 vrecycle(vp);
200                 LOCKSTOP;
201                 return (0);
202         }
203
204         /*
205          * Detect updates to the embedded data which may be synchronized by
206          * the strategy code.  Simply mark the inode modified so it gets
207          * picked up by our normal flush.
208          */
209         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_NEVER |
210                                          HAMMER2_RESOLVE_RDONLY);
211         KKASSERT(cluster);
212
213         /*
214          * Check for deleted inodes and recycle immediately.
215          *
216          * WARNING: nvtruncbuf() can only be safely called without the inode
217          *          lock held due to the way our write thread works.
218          */
219         if (hammer2_cluster_isunlinked(cluster)) {
220                 hammer2_key_t lbase;
221                 int nblksize;
222
223                 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
224                 hammer2_inode_unlock(ip, cluster);
225                 nvtruncbuf(vp, 0, nblksize, 0, 0);
226                 vrecycle(vp);
227         } else {
228                 hammer2_inode_unlock(ip, cluster);
229         }
230         LOCKSTOP;
231         return (0);
232 }
233
234 /*
235  * Reclaim a vnode so that it can be reused; after the inode is
236  * disassociated, the filesystem must manage it alone.
237  */
238 static
239 int
240 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
241 {
242         hammer2_cluster_t *cluster;
243         hammer2_inode_t *ip;
244         hammer2_pfs_t *pmp;
245         struct vnode *vp;
246
247         LOCKSTART;
248         vp = ap->a_vp;
249         ip = VTOI(vp);
250         if (ip == NULL) {
251                 LOCKSTOP;
252                 return(0);
253         }
254
255         /*
256          * Inode must be locked for reclaim.
257          */
258         pmp = ip->pmp;
259         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_NEVER |
260                                          HAMMER2_RESOLVE_RDONLY);
261
262         /*
263          * The final close of a deleted file or directory marks it for
264          * destruction.  The DELETED flag allows the flusher to shortcut
265          * any modified blocks still unflushed (that is, just ignore them).
266          *
267          * HAMMER2 usually does not try to optimize the freemap by returning
268          * deleted blocks to it as it does not usually know how many snapshots
269          * might be referencing portions of the file/dir.
270          */
271         vp->v_data = NULL;
272         ip->vp = NULL;
273
274         /*
275          * NOTE! We do not attempt to flush chains here, flushing is
276          *       really fragile and could also deadlock.
277          */
278         vclrisdirty(vp);
279
280         /*
281          * A reclaim can occur at any time so we cannot safely start a
282          * transaction to handle reclamation of unlinked files.  Instead,
283          * the ip is left with a reference and placed on a linked list and
284          * handled later on.
285          */
286         if (hammer2_cluster_isunlinked(cluster)) {
287                 hammer2_inode_unlink_t *ipul;
288
289                 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
290                 ipul->ip = ip;
291
292                 hammer2_spin_ex(&pmp->list_spin);
293                 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry);
294                 hammer2_spin_unex(&pmp->list_spin);
295                 hammer2_inode_unlock(ip, cluster);      /* unlock */
296                 /* retain ref from vp for ipul */
297         } else {
298                 hammer2_inode_unlock(ip, cluster);      /* unlock */
299                 hammer2_inode_drop(ip);                 /* vp ref */
300         }
301         /* cluster no longer referenced */
302         /* cluster = NULL; not needed */
303
304         /*
305          * XXX handle background sync when ip dirty, kernel will no longer
306          * notify us regarding this inode because there is no longer a
307          * vnode attached to it.
308          */
309
310         LOCKSTOP;
311         return (0);
312 }
313
314 static
315 int
316 hammer2_vop_fsync(struct vop_fsync_args *ap)
317 {
318         hammer2_inode_t *ip;
319         hammer2_trans_t trans;
320         hammer2_cluster_t *cluster;
321         struct vnode *vp;
322
323         LOCKSTART;
324         vp = ap->a_vp;
325         ip = VTOI(vp);
326
327 #if 0
328         /* XXX can't do this yet */
329         hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_ISFLUSH);
330         vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
331 #endif
332         hammer2_trans_init(&trans, ip->pmp, 0);
333         vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
334
335         /*
336          * Calling chain_flush here creates a lot of duplicative
337          * COW operations due to non-optimal vnode ordering.
338          *
339          * Only do it for an actual fsync() syscall.  The other forms
340          * which call this function will eventually call chain_flush
341          * on the volume root as a catch-all, which is far more optimal.
342          */
343         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
344         atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
345         /*vclrisdirty(vp);*/
346         if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MTIME))
347                 hammer2_inode_fsync(&trans, ip, cluster);
348
349         hammer2_inode_unlock(ip, cluster);
350         hammer2_trans_done(&trans);
351
352         LOCKSTOP;
353         return (0);
354 }
355
356 static
357 int
358 hammer2_vop_access(struct vop_access_args *ap)
359 {
360         hammer2_inode_t *ip = VTOI(ap->a_vp);
361         const hammer2_inode_data_t *ripdata;
362         hammer2_cluster_t *cluster;
363         uid_t uid;
364         gid_t gid;
365         int error;
366
367         LOCKSTART;
368         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
369                                          HAMMER2_RESOLVE_SHARED);
370         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
371         uid = hammer2_to_unix_xid(&ripdata->uid);
372         gid = hammer2_to_unix_xid(&ripdata->gid);
373         error = vop_helper_access(ap, uid, gid, ripdata->mode, ripdata->uflags);
374         hammer2_inode_unlock(ip, cluster);
375
376         LOCKSTOP;
377         return (error);
378 }
379
380 static
381 int
382 hammer2_vop_getattr(struct vop_getattr_args *ap)
383 {
384         const hammer2_inode_data_t *ripdata;
385         hammer2_cluster_t *cluster;
386         hammer2_pfs_t *pmp;
387         hammer2_inode_t *ip;
388         struct vnode *vp;
389         struct vattr *vap;
390
391         LOCKSTART;
392         vp = ap->a_vp;
393         vap = ap->a_vap;
394
395         ip = VTOI(vp);
396         pmp = ip->pmp;
397
398         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
399                                          HAMMER2_RESOLVE_SHARED);
400         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
401         KKASSERT(hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE);
402
403         vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
404         vap->va_fileid = ripdata->inum;
405         vap->va_mode = ripdata->mode;
406         vap->va_nlink = ripdata->nlinks;
407         vap->va_uid = hammer2_to_unix_xid(&ripdata->uid);
408         vap->va_gid = hammer2_to_unix_xid(&ripdata->gid);
409         vap->va_rmajor = 0;
410         vap->va_rminor = 0;
411         vap->va_size = ip->size;        /* protected by shared lock */
412         vap->va_blocksize = HAMMER2_PBUFSIZE;
413         vap->va_flags = ripdata->uflags;
414         hammer2_time_to_timespec(ripdata->ctime, &vap->va_ctime);
415         hammer2_time_to_timespec(ripdata->mtime, &vap->va_mtime);
416         hammer2_time_to_timespec(ripdata->mtime, &vap->va_atime);
417         vap->va_gen = 1;
418         vap->va_bytes = vap->va_size;   /* XXX */
419         vap->va_type = hammer2_get_vtype(ripdata);
420         vap->va_filerev = 0;
421         vap->va_uid_uuid = ripdata->uid;
422         vap->va_gid_uuid = ripdata->gid;
423         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
424                           VA_FSID_UUID_VALID;
425
426         hammer2_inode_unlock(ip, cluster);
427
428         LOCKSTOP;
429         return (0);
430 }
431
432 static
433 int
434 hammer2_vop_setattr(struct vop_setattr_args *ap)
435 {
436         const hammer2_inode_data_t *ripdata;
437         hammer2_inode_data_t *wipdata;
438         hammer2_inode_t *ip;
439         hammer2_cluster_t *cluster;
440         hammer2_trans_t trans;
441         struct vnode *vp;
442         struct vattr *vap;
443         int error;
444         int kflags = 0;
445         int domtime = 0;
446         int dosync = 0;
447         uint64_t ctime;
448
449         LOCKSTART;
450         vp = ap->a_vp;
451         vap = ap->a_vap;
452         hammer2_update_time(&ctime);
453
454         ip = VTOI(vp);
455
456         if (ip->pmp->ronly) {
457                 LOCKSTOP;
458                 return(EROFS);
459         }
460
461         hammer2_pfs_memory_wait(ip->pmp);
462         hammer2_trans_init(&trans, ip->pmp, 0);
463         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
464         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
465         error = 0;
466
467         if (vap->va_flags != VNOVAL) {
468                 u_int32_t flags;
469
470                 flags = ripdata->uflags;
471                 error = vop_helper_setattr_flags(&flags, vap->va_flags,
472                                          hammer2_to_unix_xid(&ripdata->uid),
473                                          ap->a_cred);
474                 if (error == 0) {
475                         if (ripdata->uflags != flags) {
476                                 wipdata = hammer2_cluster_modify_ip(&trans, ip,
477                                                                     cluster, 0);
478                                 wipdata->uflags = flags;
479                                 wipdata->ctime = ctime;
480                                 kflags |= NOTE_ATTRIB;
481                                 dosync = 1;
482                                 ripdata = wipdata;
483                         }
484                         if (ripdata->uflags & (IMMUTABLE | APPEND)) {
485                                 error = 0;
486                                 goto done;
487                         }
488                 }
489                 goto done;
490         }
491         if (ripdata->uflags & (IMMUTABLE | APPEND)) {
492                 error = EPERM;
493                 goto done;
494         }
495         if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
496                 mode_t cur_mode = ripdata->mode;
497                 uid_t cur_uid = hammer2_to_unix_xid(&ripdata->uid);
498                 gid_t cur_gid = hammer2_to_unix_xid(&ripdata->gid);
499                 uuid_t uuid_uid;
500                 uuid_t uuid_gid;
501
502                 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
503                                          ap->a_cred,
504                                          &cur_uid, &cur_gid, &cur_mode);
505                 if (error == 0) {
506                         hammer2_guid_to_uuid(&uuid_uid, cur_uid);
507                         hammer2_guid_to_uuid(&uuid_gid, cur_gid);
508                         if (bcmp(&uuid_uid, &ripdata->uid, sizeof(uuid_uid)) ||
509                             bcmp(&uuid_gid, &ripdata->gid, sizeof(uuid_gid)) ||
510                             ripdata->mode != cur_mode
511                         ) {
512                                 wipdata = hammer2_cluster_modify_ip(&trans, ip,
513                                                                     cluster, 0);
514                                 wipdata->uid = uuid_uid;
515                                 wipdata->gid = uuid_gid;
516                                 wipdata->mode = cur_mode;
517                                 wipdata->ctime = ctime;
518                                 dosync = 1;
519                                 ripdata = wipdata;
520                         }
521                         kflags |= NOTE_ATTRIB;
522                 }
523         }
524
525         /*
526          * Resize the file
527          */
528         if (vap->va_size != VNOVAL && ip->size != vap->va_size) {
529                 switch(vp->v_type) {
530                 case VREG:
531                         if (vap->va_size == ip->size)
532                                 break;
533                         hammer2_inode_unlock(ip, cluster);
534                         if (vap->va_size < ip->size) {
535                                 hammer2_truncate_file(ip, vap->va_size);
536                         } else {
537                                 hammer2_extend_file(ip, vap->va_size);
538                         }
539                         cluster = hammer2_inode_lock(ip,
540                                                      HAMMER2_RESOLVE_ALWAYS);
541                         /* RELOAD */
542                         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
543                         domtime = 1;
544                         break;
545                 default:
546                         error = EINVAL;
547                         goto done;
548                 }
549         }
550 #if 0
551         /* atime not supported */
552         if (vap->va_atime.tv_sec != VNOVAL) {
553                 wipdata = hammer2_cluster_modify_ip(&trans, ip, cluster, 0);
554                 wipdata->atime = hammer2_timespec_to_time(&vap->va_atime);
555                 kflags |= NOTE_ATTRIB;
556                 dosync = 1;
557                 ripdata = wipdata;
558         }
559 #endif
560         if (vap->va_mtime.tv_sec != VNOVAL) {
561                 wipdata = hammer2_cluster_modify_ip(&trans, ip, cluster, 0);
562                 wipdata->mtime = hammer2_timespec_to_time(&vap->va_mtime);
563                 kflags |= NOTE_ATTRIB;
564                 domtime = 0;
565                 dosync = 1;
566                 ripdata = wipdata;
567         }
568         if (vap->va_mode != (mode_t)VNOVAL) {
569                 mode_t cur_mode = ripdata->mode;
570                 uid_t cur_uid = hammer2_to_unix_xid(&ripdata->uid);
571                 gid_t cur_gid = hammer2_to_unix_xid(&ripdata->gid);
572
573                 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
574                                          cur_uid, cur_gid, &cur_mode);
575                 if (error == 0 && ripdata->mode != cur_mode) {
576                         wipdata = hammer2_cluster_modify_ip(&trans, ip,
577                                                             cluster, 0);
578                         wipdata->mode = cur_mode;
579                         wipdata->ctime = ctime;
580                         kflags |= NOTE_ATTRIB;
581                         dosync = 1;
582                         ripdata = wipdata;
583                 }
584         }
585
586         /*
587          * If a truncation occurred we must call inode_fsync() now in order
588          * to trim the related data chains, otherwise a later expansion can
589          * cause havoc.
590          */
591         if (dosync) {
592                 hammer2_cluster_modsync(cluster);
593                 dosync = 0;
594         }
595         hammer2_inode_fsync(&trans, ip, cluster);
596
597         /*
598          * Cleanup.  If domtime is set an additional inode modification
599          * must be flagged.  All other modifications will have already
600          * set INODE_MODIFIED and called vsetisdirty().
601          */
602 done:
603         if (domtime) {
604                 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED |
605                                            HAMMER2_INODE_MTIME);
606                 vsetisdirty(ip->vp);
607         }
608         if (dosync)
609                 hammer2_cluster_modsync(cluster);
610         hammer2_inode_unlock(ip, cluster);
611         hammer2_trans_done(&trans);
612         hammer2_knote(ip->vp, kflags);
613
614         LOCKSTOP;
615         return (error);
616 }
617
618 static
619 int
620 hammer2_vop_readdir(struct vop_readdir_args *ap)
621 {
622         const hammer2_inode_data_t *ripdata;
623         hammer2_inode_t *ip;
624         hammer2_inode_t *xip;
625         hammer2_cluster_t *cparent;
626         hammer2_cluster_t *cluster;
627         hammer2_cluster_t *xcluster;
628         hammer2_blockref_t bref;
629         hammer2_tid_t inum;
630         hammer2_key_t key_next;
631         hammer2_key_t lkey;
632         struct uio *uio;
633         off_t *cookies;
634         off_t saveoff;
635         int cookie_index;
636         int ncookies;
637         int error;
638         int dtype;
639         int r;
640
641         LOCKSTART;
642         ip = VTOI(ap->a_vp);
643         uio = ap->a_uio;
644         saveoff = uio->uio_offset;
645
646         /*
647          * Setup cookies directory entry cookies if requested
648          */
649         if (ap->a_ncookies) {
650                 ncookies = uio->uio_resid / 16 + 1;
651                 if (ncookies > 1024)
652                         ncookies = 1024;
653                 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
654         } else {
655                 ncookies = -1;
656                 cookies = NULL;
657         }
658         cookie_index = 0;
659
660         cparent = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
661                                          HAMMER2_RESOLVE_SHARED);
662
663         ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
664
665         /*
666          * Handle artificial entries.  To ensure that only positive 64 bit
667          * quantities are returned to userland we always strip off bit 63.
668          * The hash code is designed such that codes 0x0000-0x7FFF are not
669          * used, allowing us to use these codes for articial entries.
670          *
671          * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
672          * allow '..' to cross the mount point into (e.g.) the super-root.
673          */
674         error = 0;
675         cluster = (void *)(intptr_t)-1; /* non-NULL for early goto done case */
676
677         if (saveoff == 0) {
678                 inum = ripdata->inum & HAMMER2_DIRHASH_USERMSK;
679                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
680                 if (r)
681                         goto done;
682                 if (cookies)
683                         cookies[cookie_index] = saveoff;
684                 ++saveoff;
685                 ++cookie_index;
686                 if (cookie_index == ncookies)
687                         goto done;
688         }
689
690         if (saveoff == 1) {
691                 /*
692                  * Be careful with lockorder when accessing ".."
693                  *
694                  * (ip is the current dir. xip is the parent dir).
695                  */
696                 inum = ripdata->inum & HAMMER2_DIRHASH_USERMSK;
697                 while (ip->pip != NULL && ip != ip->pmp->iroot) {
698                         xip = ip->pip;
699                         hammer2_inode_ref(xip);
700                         hammer2_inode_unlock(ip, cparent);
701                         xcluster = hammer2_inode_lock(xip,
702                                                       HAMMER2_RESOLVE_ALWAYS |
703                                                       HAMMER2_RESOLVE_SHARED);
704
705                         cparent = hammer2_inode_lock(ip,
706                                                       HAMMER2_RESOLVE_ALWAYS |
707                                                       HAMMER2_RESOLVE_SHARED);
708                         hammer2_inode_drop(xip);
709                         ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
710                         if (xip == ip->pip) {
711                                 inum = hammer2_cluster_rdata(xcluster)->
712                                         ipdata.inum & HAMMER2_DIRHASH_USERMSK;
713                                 hammer2_inode_unlock(xip, xcluster);
714                                 break;
715                         }
716                         hammer2_inode_unlock(xip, xcluster);
717                 }
718                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
719                 if (r)
720                         goto done;
721                 if (cookies)
722                         cookies[cookie_index] = saveoff;
723                 ++saveoff;
724                 ++cookie_index;
725                 if (cookie_index == ncookies)
726                         goto done;
727         }
728
729         lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
730         if (hammer2_debug & 0x0020)
731                 kprintf("readdir: lkey %016jx\n", lkey);
732
733         /*
734          * parent is the inode cluster, already locked for us.  Don't
735          * double lock shared locks as this will screw up upgrades.
736          */
737         if (error) {
738                 goto done;
739         }
740         cluster = hammer2_cluster_lookup(cparent, &key_next, lkey, lkey,
741                                      HAMMER2_LOOKUP_SHARED);
742         if (cluster == NULL) {
743                 cluster = hammer2_cluster_lookup(cparent, &key_next,
744                                              lkey, (hammer2_key_t)-1,
745                                              HAMMER2_LOOKUP_SHARED);
746         }
747         if (cluster)
748                 hammer2_cluster_bref(cluster, &bref);
749         while (cluster) {
750                 if (hammer2_debug & 0x0020)
751                         kprintf("readdir: p=%p chain=%p %016jx (next %016jx)\n",
752                                 cparent->focus, cluster->focus,
753                                 bref.key, key_next);
754
755                 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
756                         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
757                         dtype = hammer2_get_dtype(ripdata);
758                         saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
759                         r = vop_write_dirent(&error, uio,
760                                              ripdata->inum &
761                                               HAMMER2_DIRHASH_USERMSK,
762                                              dtype,
763                                              ripdata->name_len,
764                                              ripdata->filename);
765                         if (r)
766                                 break;
767                         if (cookies)
768                                 cookies[cookie_index] = saveoff;
769                         ++cookie_index;
770                 } else {
771                         /* XXX chain error */
772                         kprintf("bad chain type readdir %d\n", bref.type);
773                 }
774
775                 /*
776                  * Keys may not be returned in order so once we have a
777                  * placemarker (cluster) the scan must allow the full range
778                  * or some entries will be missed.
779                  */
780                 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
781                                                key_next, (hammer2_key_t)-1,
782                                                HAMMER2_LOOKUP_SHARED);
783                 if (cluster) {
784                         hammer2_cluster_bref(cluster, &bref);
785                         saveoff = (bref.key & HAMMER2_DIRHASH_USERMSK) + 1;
786                 } else {
787                         saveoff = (hammer2_key_t)-1;
788                 }
789                 if (cookie_index == ncookies)
790                         break;
791         }
792         if (cluster) {
793                 hammer2_cluster_unlock(cluster);
794                 hammer2_cluster_drop(cluster);
795         }
796 done:
797         hammer2_inode_unlock(ip, cparent);
798         if (ap->a_eofflag)
799                 *ap->a_eofflag = (cluster == NULL);
800         if (hammer2_debug & 0x0020)
801                 kprintf("readdir: done at %016jx\n", saveoff);
802         uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
803         if (error && cookie_index == 0) {
804                 if (cookies) {
805                         kfree(cookies, M_TEMP);
806                         *ap->a_ncookies = 0;
807                         *ap->a_cookies = NULL;
808                 }
809         } else {
810                 if (cookies) {
811                         *ap->a_ncookies = cookie_index;
812                         *ap->a_cookies = cookies;
813                 }
814         }
815         LOCKSTOP;
816         return (error);
817 }
818
819 /*
820  * hammer2_vop_readlink { vp, uio, cred }
821  */
822 static
823 int
824 hammer2_vop_readlink(struct vop_readlink_args *ap)
825 {
826         struct vnode *vp;
827         hammer2_inode_t *ip;
828         int error;
829
830         vp = ap->a_vp;
831         if (vp->v_type != VLNK)
832                 return (EINVAL);
833         ip = VTOI(vp);
834
835         error = hammer2_read_file(ip, ap->a_uio, 0);
836         return (error);
837 }
838
839 static
840 int
841 hammer2_vop_read(struct vop_read_args *ap)
842 {
843         struct vnode *vp;
844         hammer2_inode_t *ip;
845         struct uio *uio;
846         int error;
847         int seqcount;
848         int bigread;
849
850         /*
851          * Read operations supported on this vnode?
852          */
853         vp = ap->a_vp;
854         if (vp->v_type != VREG)
855                 return (EINVAL);
856
857         /*
858          * Misc
859          */
860         ip = VTOI(vp);
861         uio = ap->a_uio;
862         error = 0;
863
864         seqcount = ap->a_ioflag >> 16;
865         bigread = (uio->uio_resid > 100 * 1024 * 1024);
866
867         error = hammer2_read_file(ip, uio, seqcount);
868         return (error);
869 }
870
871 static
872 int
873 hammer2_vop_write(struct vop_write_args *ap)
874 {
875         hammer2_inode_t *ip;
876         hammer2_trans_t trans;
877         thread_t td;
878         struct vnode *vp;
879         struct uio *uio;
880         int error;
881         int seqcount;
882         int bigwrite;
883
884         /*
885          * Read operations supported on this vnode?
886          */
887         vp = ap->a_vp;
888         if (vp->v_type != VREG)
889                 return (EINVAL);
890
891         /*
892          * Misc
893          */
894         ip = VTOI(vp);
895         uio = ap->a_uio;
896         error = 0;
897         if (ip->pmp->ronly) {
898                 return (EROFS);
899         }
900
901         seqcount = ap->a_ioflag >> 16;
902         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
903
904         /*
905          * Check resource limit
906          */
907         if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
908             uio->uio_offset + uio->uio_resid >
909              td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
910                 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
911                 return (EFBIG);
912         }
913
914         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
915
916         /*
917          * The transaction interlocks against flushes initiations
918          * (note: but will run concurrently with the actual flush).
919          */
920         hammer2_trans_init(&trans, ip->pmp, 0);
921         error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
922         hammer2_trans_done(&trans);
923
924         return (error);
925 }
926
927 /*
928  * Perform read operations on a file or symlink given an UNLOCKED
929  * inode and uio.
930  *
931  * The passed ip is not locked.
932  */
933 static
934 int
935 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
936 {
937         hammer2_off_t size;
938         struct buf *bp;
939         int error;
940
941         error = 0;
942
943         /*
944          * UIO read loop.
945          *
946          * WARNING! Assumes that the kernel interlocks size changes at the
947          *          vnode level.
948          */
949         hammer2_mtx_sh(&ip->lock);
950         size = ip->size;
951         hammer2_mtx_unlock(&ip->lock);
952
953         while (uio->uio_resid > 0 && uio->uio_offset < size) {
954                 hammer2_key_t lbase;
955                 hammer2_key_t leof;
956                 int lblksize;
957                 int loff;
958                 int n;
959
960                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
961                                                 &lbase, &leof);
962
963                 error = cluster_read(ip->vp, leof, lbase, lblksize,
964                                      uio->uio_resid, seqcount * BKVASIZE,
965                                      &bp);
966
967                 if (error)
968                         break;
969                 loff = (int)(uio->uio_offset - lbase);
970                 n = lblksize - loff;
971                 if (n > uio->uio_resid)
972                         n = uio->uio_resid;
973                 if (n > size - uio->uio_offset)
974                         n = (int)(size - uio->uio_offset);
975                 bp->b_flags |= B_AGE;
976                 uiomove((char *)bp->b_data + loff, n, uio);
977                 bqrelse(bp);
978         }
979         return (error);
980 }
981
982 /*
983  * Write to the file represented by the inode via the logical buffer cache.
984  * The inode may represent a regular file or a symlink.
985  *
986  * The inode must not be locked.
987  */
988 static
989 int
990 hammer2_write_file(hammer2_inode_t *ip,
991                    struct uio *uio, int ioflag, int seqcount)
992 {
993         hammer2_key_t old_eof;
994         hammer2_key_t new_eof;
995         struct buf *bp;
996         int kflags;
997         int error;
998         int modified;
999
1000         /*
1001          * Setup if append
1002          *
1003          * WARNING! Assumes that the kernel interlocks size changes at the
1004          *          vnode level.
1005          */
1006         hammer2_mtx_ex(&ip->lock);
1007         if (ioflag & IO_APPEND)
1008                 uio->uio_offset = ip->size;
1009         old_eof = ip->size;
1010         hammer2_mtx_unlock(&ip->lock);
1011
1012         /*
1013          * Extend the file if necessary.  If the write fails at some point
1014          * we will truncate it back down to cover as much as we were able
1015          * to write.
1016          *
1017          * Doing this now makes it easier to calculate buffer sizes in
1018          * the loop.
1019          */
1020         kflags = 0;
1021         error = 0;
1022         modified = 0;
1023
1024         if (uio->uio_offset + uio->uio_resid > old_eof) {
1025                 new_eof = uio->uio_offset + uio->uio_resid;
1026                 modified = 1;
1027                 hammer2_extend_file(ip, new_eof);
1028                 kflags |= NOTE_EXTEND;
1029         } else {
1030                 new_eof = old_eof;
1031         }
1032         
1033         /*
1034          * UIO write loop
1035          */
1036         while (uio->uio_resid > 0) {
1037                 hammer2_key_t lbase;
1038                 int trivial;
1039                 int endofblk;
1040                 int lblksize;
1041                 int loff;
1042                 int n;
1043
1044                 /*
1045                  * Don't allow the buffer build to blow out the buffer
1046                  * cache.
1047                  */
1048                 if ((ioflag & IO_RECURSE) == 0)
1049                         bwillwrite(HAMMER2_PBUFSIZE);
1050
1051                 /*
1052                  * This nominally tells us how much we can cluster and
1053                  * what the logical buffer size needs to be.  Currently
1054                  * we don't try to cluster the write and just handle one
1055                  * block at a time.
1056                  */
1057                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1058                                                 &lbase, NULL);
1059                 loff = (int)(uio->uio_offset - lbase);
1060                 
1061                 KKASSERT(lblksize <= 65536);
1062
1063                 /*
1064                  * Calculate bytes to copy this transfer and whether the
1065                  * copy completely covers the buffer or not.
1066                  */
1067                 trivial = 0;
1068                 n = lblksize - loff;
1069                 if (n > uio->uio_resid) {
1070                         n = uio->uio_resid;
1071                         if (loff == lbase && uio->uio_offset + n == new_eof)
1072                                 trivial = 1;
1073                         endofblk = 0;
1074                 } else {
1075                         if (loff == 0)
1076                                 trivial = 1;
1077                         endofblk = 1;
1078                 }
1079
1080                 /*
1081                  * Get the buffer
1082                  */
1083                 if (uio->uio_segflg == UIO_NOCOPY) {
1084                         /*
1085                          * Issuing a write with the same data backing the
1086                          * buffer.  Instantiate the buffer to collect the
1087                          * backing vm pages, then read-in any missing bits.
1088                          *
1089                          * This case is used by vop_stdputpages().
1090                          */
1091                         bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1092                         if ((bp->b_flags & B_CACHE) == 0) {
1093                                 bqrelse(bp);
1094                                 error = bread(ip->vp, lbase, lblksize, &bp);
1095                         }
1096                 } else if (trivial) {
1097                         /*
1098                          * Even though we are entirely overwriting the buffer
1099                          * we may still have to zero it out to avoid a
1100                          * mmap/write visibility issue.
1101                          */
1102                         bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1103                         if ((bp->b_flags & B_CACHE) == 0)
1104                                 vfs_bio_clrbuf(bp);
1105                 } else {
1106                         /*
1107                          * Partial overwrite, read in any missing bits then
1108                          * replace the portion being written.
1109                          *
1110                          * (The strategy code will detect zero-fill physical
1111                          * blocks for this case).
1112                          */
1113                         error = bread(ip->vp, lbase, lblksize, &bp);
1114                         if (error == 0)
1115                                 bheavy(bp);
1116                 }
1117
1118                 if (error) {
1119                         brelse(bp);
1120                         break;
1121                 }
1122
1123                 /*
1124                  * Ok, copy the data in
1125                  */
1126                 error = uiomove(bp->b_data + loff, n, uio);
1127                 kflags |= NOTE_WRITE;
1128                 modified = 1;
1129                 if (error) {
1130                         brelse(bp);
1131                         break;
1132                 }
1133
1134                 /*
1135                  * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1136                  *          with IO_SYNC or IO_ASYNC set.  These writes
1137                  *          must be handled as the pageout daemon expects.
1138                  */
1139                 if (ioflag & IO_SYNC) {
1140                         bwrite(bp);
1141                 } else if ((ioflag & IO_DIRECT) && endofblk) {
1142                         bawrite(bp);
1143                 } else if (ioflag & IO_ASYNC) {
1144                         bawrite(bp);
1145                 } else {
1146                         bdwrite(bp);
1147                 }
1148         }
1149
1150         /*
1151          * Cleanup.  If we extended the file EOF but failed to write through
1152          * the entire write is a failure and we have to back-up.
1153          */
1154         if (error && new_eof != old_eof) {
1155                 hammer2_truncate_file(ip, old_eof);
1156         } else if (modified) {
1157                 hammer2_mtx_ex(&ip->lock);
1158                 hammer2_update_time(&ip->mtime);
1159                 atomic_set_int(&ip->flags, HAMMER2_INODE_MTIME);
1160                 hammer2_mtx_unlock(&ip->lock);
1161         }
1162         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1163         hammer2_knote(ip->vp, kflags);
1164         vsetisdirty(ip->vp);
1165         hammer2_trans_assert_strategy(ip->pmp);
1166
1167         return error;
1168 }
1169
1170 /*
1171  * Truncate the size of a file.  The inode must not be locked.
1172  *
1173  * NOTE:    Caller handles setting HAMMER2_INODE_MODIFIED
1174  *
1175  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1176  *          held due to the way our write thread works.
1177  *
1178  * WARNING! Assumes that the kernel interlocks size changes at the
1179  *          vnode level.
1180  */
1181 static
1182 void
1183 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1184 {
1185         hammer2_key_t lbase;
1186         int nblksize;
1187
1188         LOCKSTART;
1189         if (ip->vp) {
1190                 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1191                 nvtruncbuf(ip->vp, nsize,
1192                            nblksize, (int)nsize & (nblksize - 1),
1193                            0);
1194         }
1195         hammer2_mtx_ex(&ip->lock);
1196         ip->size = nsize;
1197         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1198         hammer2_mtx_unlock(&ip->lock);
1199         LOCKSTOP;
1200 }
1201
1202 /*
1203  * Extend the size of a file.  The inode must not be locked.
1204  *
1205  * WARNING! Assumes that the kernel interlocks size changes at the
1206  *          vnode level.
1207  *
1208  * NOTE: Caller handles setting HAMMER2_INODE_MODIFIED
1209  */
1210 static
1211 void
1212 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1213 {
1214         hammer2_key_t lbase;
1215         hammer2_key_t osize;
1216         int oblksize;
1217         int nblksize;
1218
1219         LOCKSTART;
1220         hammer2_mtx_ex(&ip->lock);
1221         osize = ip->size;
1222         ip->size = nsize;
1223         hammer2_mtx_unlock(&ip->lock);
1224
1225         if (ip->vp) {
1226                 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1227                 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1228                 nvextendbuf(ip->vp,
1229                             osize, nsize,
1230                             oblksize, nblksize,
1231                             -1, -1, 0);
1232         }
1233         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1234         LOCKSTOP;
1235 }
1236
1237 static
1238 int
1239 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1240 {
1241         hammer2_inode_t *ip;
1242         hammer2_inode_t *dip;
1243         hammer2_cluster_t *cparent;
1244         hammer2_cluster_t *cluster;
1245         const hammer2_inode_data_t *ripdata;
1246         hammer2_key_t key_next;
1247         hammer2_key_t lhc;
1248         struct namecache *ncp;
1249         const uint8_t *name;
1250         size_t name_len;
1251         int error = 0;
1252         struct vnode *vp;
1253
1254         LOCKSTART;
1255         dip = VTOI(ap->a_dvp);
1256         ncp = ap->a_nch->ncp;
1257         name = ncp->nc_name;
1258         name_len = ncp->nc_nlen;
1259         lhc = hammer2_dirhash(name, name_len);
1260
1261         /*
1262          * Note: In DragonFly the kernel handles '.' and '..'.
1263          */
1264         cparent = hammer2_inode_lock(dip, HAMMER2_RESOLVE_ALWAYS |
1265                                           HAMMER2_RESOLVE_SHARED);
1266
1267         cluster = hammer2_cluster_lookup(cparent, &key_next,
1268                                          lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1269                                          HAMMER2_LOOKUP_SHARED);
1270         while (cluster) {
1271                 if (hammer2_cluster_type(cluster) == HAMMER2_BREF_TYPE_INODE) {
1272                         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1273                         if (ripdata->name_len == name_len &&
1274                             bcmp(ripdata->filename, name, name_len) == 0) {
1275                                 break;
1276                         }
1277                 }
1278                 cluster = hammer2_cluster_next(cparent, cluster, &key_next,
1279                                                key_next,
1280                                                lhc + HAMMER2_DIRHASH_LOMASK,
1281                                                HAMMER2_LOOKUP_SHARED);
1282         }
1283         hammer2_inode_unlock(dip, cparent);
1284
1285         /*
1286          * Resolve hardlink entries before acquiring the inode.
1287          */
1288         if (cluster) {
1289                 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1290                 if (ripdata->type == HAMMER2_OBJTYPE_HARDLINK) {
1291                         hammer2_tid_t inum = ripdata->inum;
1292                         error = hammer2_hardlink_find(dip, NULL, &cluster);
1293                         if (error) {
1294                                 kprintf("hammer2: unable to find hardlink "
1295                                         "0x%016jx\n", inum);
1296                                 LOCKSTOP;
1297
1298                                 return error;
1299                         }
1300                 }
1301         }
1302
1303         /*
1304          * nresolve needs to resolve hardlinks, the original cluster is not
1305          * sufficient.
1306          */
1307         if (cluster) {
1308                 ip = hammer2_inode_get(dip->pmp, dip, cluster);
1309                 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1310                 if (ripdata->type == HAMMER2_OBJTYPE_HARDLINK) {
1311                         kprintf("nresolve: fixup hardlink\n");
1312                         hammer2_inode_ref(ip);
1313                         hammer2_inode_unlock(ip, NULL);
1314                         hammer2_cluster_unlock(cluster);
1315                         hammer2_cluster_drop(cluster);
1316                         cluster = hammer2_inode_lock(ip,
1317                                                      HAMMER2_RESOLVE_ALWAYS);
1318                         ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
1319                         hammer2_inode_drop(ip);
1320                         kprintf("nresolve: fixup to type %02x\n",
1321                                 ripdata->type);
1322                 }
1323         } else {
1324                 ip = NULL;
1325         }
1326
1327 #if 0
1328         /*
1329          * Deconsolidate any hardlink whos nlinks == 1.  Ignore errors.
1330          * If an error occurs chain and ip are left alone.
1331          *
1332          * XXX upgrade shared lock?
1333          */
1334         if (ochain && chain &&
1335             chain->data->ipdata.nlinks == 1 && !dip->pmp->ronly) {
1336                 kprintf("hammer2: need to unconsolidate hardlink for %s\n",
1337                         chain->data->ipdata.filename);
1338                 /* XXX retain shared lock on dip? (currently not held) */
1339                 hammer2_trans_init(&trans, dip->pmp, 0);
1340                 hammer2_hardlink_deconsolidate(&trans, dip, &chain, &ochain);
1341                 hammer2_trans_done(&trans);
1342         }
1343 #endif
1344
1345         /*
1346          * Acquire the related vnode
1347          *
1348          * NOTE: For error processing, only ENOENT resolves the namecache
1349          *       entry to NULL, otherwise we just return the error and
1350          *       leave the namecache unresolved.
1351          *
1352          * NOTE: multiple hammer2_inode structures can be aliased to the
1353          *       same chain element, for example for hardlinks.  This
1354          *       use case does not 'reattach' inode associations that
1355          *       might already exist, but always allocates a new one.
1356          *
1357          * WARNING: inode structure is locked exclusively via inode_get
1358          *          but chain was locked shared.  inode_unlock()
1359          *          will handle it properly.
1360          */
1361         if (cluster) {
1362                 vp = hammer2_igetv(ip, cluster, &error);
1363                 if (error == 0) {
1364                         vn_unlock(vp);
1365                         cache_setvp(ap->a_nch, vp);
1366                 } else if (error == ENOENT) {
1367                         cache_setvp(ap->a_nch, NULL);
1368                 }
1369                 hammer2_inode_unlock(ip, cluster);
1370
1371                 /*
1372                  * The vp should not be released until after we've disposed
1373                  * of our locks, because it might cause vop_inactive() to
1374                  * be called.
1375                  */
1376                 if (vp)
1377                         vrele(vp);
1378         } else {
1379                 error = ENOENT;
1380                 cache_setvp(ap->a_nch, NULL);
1381         }
1382         KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1383                 ("resolve error %d/%p ap %p\n",
1384                  error, ap->a_nch->ncp->nc_vp, ap));
1385         LOCKSTOP;
1386         return error;
1387 }
1388
1389 static
1390 int
1391 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1392 {
1393         hammer2_inode_t *dip;
1394         hammer2_inode_t *ip;
1395         hammer2_cluster_t *cparent;
1396         int error;
1397
1398         LOCKSTART;
1399         dip = VTOI(ap->a_dvp);
1400
1401         if ((ip = dip->pip) == NULL) {
1402                 *ap->a_vpp = NULL;
1403                 LOCKSTOP;
1404                 return ENOENT;
1405         }
1406         cparent = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1407         *ap->a_vpp = hammer2_igetv(ip, cparent, &error);
1408         hammer2_inode_unlock(ip, cparent);
1409
1410         LOCKSTOP;
1411         return error;
1412 }
1413
1414 static
1415 int
1416 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1417 {
1418         hammer2_inode_t *dip;
1419         hammer2_inode_t *nip;
1420         hammer2_trans_t trans;
1421         hammer2_cluster_t *cluster;
1422         struct namecache *ncp;
1423         const uint8_t *name;
1424         size_t name_len;
1425         int error;
1426
1427         LOCKSTART;
1428         dip = VTOI(ap->a_dvp);
1429         if (dip->pmp->ronly) {
1430                 LOCKSTOP;
1431                 return (EROFS);
1432         }
1433
1434         ncp = ap->a_nch->ncp;
1435         name = ncp->nc_name;
1436         name_len = ncp->nc_nlen;
1437         cluster = NULL;
1438
1439         hammer2_pfs_memory_wait(dip->pmp);
1440         hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1441         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1442                                    name, name_len,
1443                                    &cluster, 0, &error);
1444         if (error) {
1445                 KKASSERT(nip == NULL);
1446                 *ap->a_vpp = NULL;
1447         } else {
1448                 *ap->a_vpp = hammer2_igetv(nip, cluster, &error);
1449                 hammer2_inode_unlock(nip, cluster);
1450         }
1451         hammer2_trans_done(&trans);
1452
1453         if (error == 0) {
1454                 cache_setunresolved(ap->a_nch);
1455                 cache_setvp(ap->a_nch, *ap->a_vpp);
1456         }
1457         LOCKSTOP;
1458         return error;
1459 }
1460
1461 /*
1462  * Return the largest contiguous physical disk range for the logical
1463  * request, in bytes.
1464  *
1465  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1466  *
1467  * Basically disabled, the logical buffer write thread has to deal with
1468  * buffers one-at-a-time.
1469  */
1470 static
1471 int
1472 hammer2_vop_bmap(struct vop_bmap_args *ap)
1473 {
1474         *ap->a_doffsetp = NOOFFSET;
1475         if (ap->a_runp)
1476                 *ap->a_runp = 0;
1477         if (ap->a_runb)
1478                 *ap->a_runb = 0;
1479         return (EOPNOTSUPP);
1480 }
1481
1482 static
1483 int
1484 hammer2_vop_open(struct vop_open_args *ap)
1485 {
1486         return vop_stdopen(ap);
1487 }
1488
1489 /*
1490  * hammer2_vop_advlock { vp, id, op, fl, flags }
1491  */
1492 static
1493 int
1494 hammer2_vop_advlock(struct vop_advlock_args *ap)
1495 {
1496         hammer2_inode_t *ip = VTOI(ap->a_vp);
1497         const hammer2_inode_data_t *ripdata;
1498         hammer2_cluster_t *cparent;
1499         hammer2_off_t size;
1500
1501         cparent = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
1502                                          HAMMER2_RESOLVE_SHARED);
1503         ripdata = &hammer2_cluster_rdata(cparent)->ipdata;
1504         size = ripdata->size;
1505         hammer2_inode_unlock(ip, cparent);
1506         return (lf_advlock(ap, &ip->advlock, size));
1507 }
1508
1509
1510 static
1511 int
1512 hammer2_vop_close(struct vop_close_args *ap)
1513 {
1514         return vop_stdclose(ap);
1515 }
1516
1517 /*
1518  * hammer2_vop_nlink { nch, dvp, vp, cred }
1519  *
1520  * Create a hardlink from (vp) to {dvp, nch}.
1521  */
1522 static
1523 int
1524 hammer2_vop_nlink(struct vop_nlink_args *ap)
1525 {
1526         hammer2_inode_t *fdip;  /* target directory to create link in */
1527         hammer2_inode_t *tdip;  /* target directory to create link in */
1528         hammer2_inode_t *cdip;  /* common parent directory */
1529         hammer2_inode_t *ip;    /* inode we are hardlinking to */
1530         hammer2_cluster_t *cluster;
1531         hammer2_cluster_t *fdcluster;
1532         hammer2_cluster_t *tdcluster;
1533         hammer2_cluster_t *cdcluster;
1534         hammer2_trans_t trans;
1535         struct namecache *ncp;
1536         const uint8_t *name;
1537         size_t name_len;
1538         int error;
1539
1540         LOCKSTART;
1541         tdip = VTOI(ap->a_dvp);
1542         if (tdip->pmp->ronly) {
1543                 LOCKSTOP;
1544                 return (EROFS);
1545         }
1546
1547         ncp = ap->a_nch->ncp;
1548         name = ncp->nc_name;
1549         name_len = ncp->nc_nlen;
1550
1551         /*
1552          * ip represents the file being hardlinked.  The file could be a
1553          * normal file or a hardlink target if it has already been hardlinked.
1554          * If ip is a hardlinked target then ip->pip represents the location
1555          * of the hardlinked target, NOT the location of the hardlink pointer.
1556          *
1557          * Bump nlinks and potentially also create or move the hardlink
1558          * target in the parent directory common to (ip) and (tdip).  The
1559          * consolidation code can modify ip->cluster and ip->pip.  The
1560          * returned cluster is locked.
1561          */
1562         ip = VTOI(ap->a_vp);
1563         hammer2_pfs_memory_wait(ip->pmp);
1564         hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_NEWINODE);
1565
1566         /*
1567          * The common parent directory must be locked first to avoid deadlocks.
1568          * Also note that fdip and/or tdip might match cdip.
1569          */
1570         fdip = ip->pip;
1571         cdip = hammer2_inode_common_parent(fdip, tdip);
1572         cdcluster = hammer2_inode_lock(cdip, HAMMER2_RESOLVE_ALWAYS);
1573         fdcluster = hammer2_inode_lock(fdip, HAMMER2_RESOLVE_ALWAYS);
1574         tdcluster = hammer2_inode_lock(tdip, HAMMER2_RESOLVE_ALWAYS);
1575         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1576         error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1577                                              cdip, cdcluster, 1);
1578         if (error)
1579                 goto done;
1580
1581         /*
1582          * Create a directory entry connected to the specified cluster.
1583          *
1584          * WARNING! chain can get moved by the connect (indirectly due to
1585          *          potential indirect block creation).
1586          */
1587         error = hammer2_inode_connect(&trans, &cluster, 1,
1588                                       tdip, tdcluster,
1589                                       name, name_len, 0);
1590         if (error == 0) {
1591                 cache_setunresolved(ap->a_nch);
1592                 cache_setvp(ap->a_nch, ap->a_vp);
1593         }
1594 done:
1595         hammer2_inode_unlock(ip, cluster);
1596         hammer2_inode_unlock(tdip, tdcluster);
1597         hammer2_inode_unlock(fdip, fdcluster);
1598         hammer2_inode_unlock(cdip, cdcluster);
1599         hammer2_inode_drop(cdip);
1600         hammer2_trans_done(&trans);
1601
1602         LOCKSTOP;
1603         return error;
1604 }
1605
1606 /*
1607  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1608  *
1609  * The operating system has already ensured that the directory entry
1610  * does not exist and done all appropriate namespace locking.
1611  */
1612 static
1613 int
1614 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1615 {
1616         hammer2_inode_t *dip;
1617         hammer2_inode_t *nip;
1618         hammer2_trans_t trans;
1619         hammer2_cluster_t *ncluster;
1620         struct namecache *ncp;
1621         const uint8_t *name;
1622         size_t name_len;
1623         int error;
1624
1625         LOCKSTART;
1626         dip = VTOI(ap->a_dvp);
1627         if (dip->pmp->ronly) {
1628                 LOCKSTOP;
1629                 return (EROFS);
1630         }
1631
1632         ncp = ap->a_nch->ncp;
1633         name = ncp->nc_name;
1634         name_len = ncp->nc_nlen;
1635         hammer2_pfs_memory_wait(dip->pmp);
1636         hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1637         ncluster = NULL;
1638
1639         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1640                                    name, name_len,
1641                                    &ncluster, 0, &error);
1642         if (error) {
1643                 KKASSERT(nip == NULL);
1644                 *ap->a_vpp = NULL;
1645         } else {
1646                 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1647                 hammer2_inode_unlock(nip, ncluster);
1648         }
1649         hammer2_trans_done(&trans);
1650
1651         if (error == 0) {
1652                 cache_setunresolved(ap->a_nch);
1653                 cache_setvp(ap->a_nch, *ap->a_vpp);
1654         }
1655         LOCKSTOP;
1656         return error;
1657 }
1658
1659 /*
1660  * Make a device node (typically a fifo)
1661  */
1662 static
1663 int
1664 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1665 {
1666         hammer2_inode_t *dip;
1667         hammer2_inode_t *nip;
1668         hammer2_trans_t trans;
1669         hammer2_cluster_t *ncluster;
1670         struct namecache *ncp;
1671         const uint8_t *name;
1672         size_t name_len;
1673         int error;
1674
1675         LOCKSTART;
1676         dip = VTOI(ap->a_dvp);
1677         if (dip->pmp->ronly) {
1678                 LOCKSTOP;
1679                 return (EROFS);
1680         }
1681
1682         ncp = ap->a_nch->ncp;
1683         name = ncp->nc_name;
1684         name_len = ncp->nc_nlen;
1685         hammer2_pfs_memory_wait(dip->pmp);
1686         hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1687         ncluster = NULL;
1688
1689         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1690                                    name, name_len,
1691                                    &ncluster, 0, &error);
1692         if (error) {
1693                 KKASSERT(nip == NULL);
1694                 *ap->a_vpp = NULL;
1695         } else {
1696                 *ap->a_vpp = hammer2_igetv(nip, ncluster, &error);
1697                 hammer2_inode_unlock(nip, ncluster);
1698         }
1699         hammer2_trans_done(&trans);
1700
1701         if (error == 0) {
1702                 cache_setunresolved(ap->a_nch);
1703                 cache_setvp(ap->a_nch, *ap->a_vpp);
1704         }
1705         LOCKSTOP;
1706         return error;
1707 }
1708
1709 /*
1710  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1711  */
1712 static
1713 int
1714 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1715 {
1716         hammer2_inode_t *dip;
1717         hammer2_inode_t *nip;
1718         hammer2_cluster_t *ncparent;
1719         hammer2_trans_t trans;
1720         struct namecache *ncp;
1721         const uint8_t *name;
1722         size_t name_len;
1723         int error;
1724         
1725         dip = VTOI(ap->a_dvp);
1726         if (dip->pmp->ronly)
1727                 return (EROFS);
1728
1729         ncp = ap->a_nch->ncp;
1730         name = ncp->nc_name;
1731         name_len = ncp->nc_nlen;
1732         hammer2_pfs_memory_wait(dip->pmp);
1733         hammer2_trans_init(&trans, dip->pmp, HAMMER2_TRANS_NEWINODE);
1734         ncparent = NULL;
1735
1736         ap->a_vap->va_type = VLNK;      /* enforce type */
1737
1738         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1739                                    name, name_len,
1740                                    &ncparent, 0, &error);
1741         if (error) {
1742                 KKASSERT(nip == NULL);
1743                 *ap->a_vpp = NULL;
1744                 hammer2_trans_done(&trans);
1745                 return error;
1746         }
1747         *ap->a_vpp = hammer2_igetv(nip, ncparent, &error);
1748
1749         /*
1750          * Build the softlink (~like file data) and finalize the namecache.
1751          */
1752         if (error == 0) {
1753                 size_t bytes;
1754                 struct uio auio;
1755                 struct iovec aiov;
1756                 hammer2_inode_data_t *nipdata;
1757
1758                 nipdata = &hammer2_cluster_wdata(ncparent)->ipdata;
1759                 /* nipdata = &nip->chain->data->ipdata;XXX */
1760                 bytes = strlen(ap->a_target);
1761
1762                 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1763                         KKASSERT(nipdata->op_flags &
1764                                  HAMMER2_OPFLAG_DIRECTDATA);
1765                         bcopy(ap->a_target, nipdata->u.data, bytes);
1766                         nipdata->size = bytes;
1767                         nip->size = bytes;
1768                         hammer2_cluster_modsync(ncparent);
1769                         hammer2_inode_unlock(nip, ncparent);
1770                         /* nipdata = NULL; not needed */
1771                 } else {
1772                         hammer2_inode_unlock(nip, ncparent);
1773                         /* nipdata = NULL; not needed */
1774                         bzero(&auio, sizeof(auio));
1775                         bzero(&aiov, sizeof(aiov));
1776                         auio.uio_iov = &aiov;
1777                         auio.uio_segflg = UIO_SYSSPACE;
1778                         auio.uio_rw = UIO_WRITE;
1779                         auio.uio_resid = bytes;
1780                         auio.uio_iovcnt = 1;
1781                         auio.uio_td = curthread;
1782                         aiov.iov_base = ap->a_target;
1783                         aiov.iov_len = bytes;
1784                         error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1785                         /* XXX handle error */
1786                         error = 0;
1787                 }
1788         } else {
1789                 hammer2_inode_unlock(nip, ncparent);
1790         }
1791         hammer2_trans_done(&trans);
1792
1793         /*
1794          * Finalize namecache
1795          */
1796         if (error == 0) {
1797                 cache_setunresolved(ap->a_nch);
1798                 cache_setvp(ap->a_nch, *ap->a_vpp);
1799                 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1800         }
1801         return error;
1802 }
1803
1804 /*
1805  * hammer2_vop_nremove { nch, dvp, cred }
1806  */
1807 static
1808 int
1809 hammer2_vop_nremove(struct vop_nremove_args *ap)
1810 {
1811         hammer2_inode_t *dip;
1812         hammer2_trans_t trans;
1813         struct namecache *ncp;
1814         const uint8_t *name;
1815         size_t name_len;
1816         int error;
1817
1818         LOCKSTART;
1819         dip = VTOI(ap->a_dvp);
1820         if (dip->pmp->ronly) {
1821                 LOCKSTOP;
1822                 return(EROFS);
1823         }
1824
1825         ncp = ap->a_nch->ncp;
1826         name = ncp->nc_name;
1827         name_len = ncp->nc_nlen;
1828
1829         hammer2_pfs_memory_wait(dip->pmp);
1830         hammer2_trans_init(&trans, dip->pmp, 0);
1831         error = hammer2_unlink_file(&trans, dip, name, name_len,
1832                                     0, NULL, ap->a_nch, -1);
1833         hammer2_run_unlinkq(&trans, dip->pmp);
1834         hammer2_trans_done(&trans);
1835         if (error == 0)
1836                 cache_unlink(ap->a_nch);
1837         LOCKSTOP;
1838         return (error);
1839 }
1840
1841 /*
1842  * hammer2_vop_nrmdir { nch, dvp, cred }
1843  */
1844 static
1845 int
1846 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1847 {
1848         hammer2_inode_t *dip;
1849         hammer2_trans_t trans;
1850         struct namecache *ncp;
1851         const uint8_t *name;
1852         size_t name_len;
1853         int error;
1854
1855         LOCKSTART;
1856         dip = VTOI(ap->a_dvp);
1857         if (dip->pmp->ronly) {
1858                 LOCKSTOP;
1859                 return(EROFS);
1860         }
1861
1862         ncp = ap->a_nch->ncp;
1863         name = ncp->nc_name;
1864         name_len = ncp->nc_nlen;
1865
1866         hammer2_pfs_memory_wait(dip->pmp);
1867         hammer2_trans_init(&trans, dip->pmp, 0);
1868         hammer2_run_unlinkq(&trans, dip->pmp);
1869         error = hammer2_unlink_file(&trans, dip, name, name_len,
1870                                     1, NULL, ap->a_nch, -1);
1871         hammer2_trans_done(&trans);
1872         if (error == 0)
1873                 cache_unlink(ap->a_nch);
1874         LOCKSTOP;
1875         return (error);
1876 }
1877
1878 /*
1879  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1880  */
1881 static
1882 int
1883 hammer2_vop_nrename(struct vop_nrename_args *ap)
1884 {
1885         struct namecache *fncp;
1886         struct namecache *tncp;
1887         hammer2_inode_t *cdip;
1888         hammer2_inode_t *fdip;
1889         hammer2_inode_t *tdip;
1890         hammer2_inode_t *ip;
1891         hammer2_cluster_t *cluster;
1892         hammer2_cluster_t *fdcluster;
1893         hammer2_cluster_t *tdcluster;
1894         hammer2_cluster_t *cdcluster;
1895         hammer2_trans_t trans;
1896         const uint8_t *fname;
1897         size_t fname_len;
1898         const uint8_t *tname;
1899         size_t tname_len;
1900         int error;
1901         int tnch_error;
1902         int hlink;
1903
1904         if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1905                 return(EXDEV);
1906         if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1907                 return(EXDEV);
1908
1909         fdip = VTOI(ap->a_fdvp);        /* source directory */
1910         tdip = VTOI(ap->a_tdvp);        /* target directory */
1911
1912         if (fdip->pmp->ronly)
1913                 return(EROFS);
1914
1915         LOCKSTART;
1916         fncp = ap->a_fnch->ncp;         /* entry name in source */
1917         fname = fncp->nc_name;
1918         fname_len = fncp->nc_nlen;
1919
1920         tncp = ap->a_tnch->ncp;         /* entry name in target */
1921         tname = tncp->nc_name;
1922         tname_len = tncp->nc_nlen;
1923
1924         hammer2_pfs_memory_wait(tdip->pmp);
1925         hammer2_trans_init(&trans, tdip->pmp, 0);
1926
1927         /*
1928          * ip is the inode being renamed.  If this is a hardlink then
1929          * ip represents the actual file and not the hardlink marker.
1930          */
1931         ip = VTOI(fncp->nc_vp);
1932         cluster = NULL;
1933
1934
1935         /*
1936          * The common parent directory must be locked first to avoid deadlocks.
1937          * Also note that fdip and/or tdip might match cdip.
1938          *
1939          * WARNING! fdip may not match ip->pip.  That is, if the source file
1940          *          is already a hardlink then what we are renaming is the
1941          *          hardlink pointer, not the hardlink itself.  The hardlink
1942          *          directory (ip->pip) will already be at a common parent
1943          *          of fdrip.
1944          *
1945          *          Be sure to use ip->pip when finding the common parent
1946          *          against tdip or we might accidently move the hardlink
1947          *          target into a subdirectory that makes it inaccessible to
1948          *          other pointers.
1949          */
1950         cdip = hammer2_inode_common_parent(ip->pip, tdip);
1951         cdcluster = hammer2_inode_lock(cdip, HAMMER2_RESOLVE_ALWAYS);
1952         fdcluster = hammer2_inode_lock(fdip, HAMMER2_RESOLVE_ALWAYS);
1953         tdcluster = hammer2_inode_lock(tdip, HAMMER2_RESOLVE_ALWAYS);
1954
1955         /*
1956          * Keep a tight grip on the inode so the temporary unlinking from
1957          * the source location prior to linking to the target location
1958          * does not cause the cluster to be destroyed.
1959          *
1960          * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1961          *       unlinking elements from their directories.  Locking
1962          *       the nlinks field does not lock the whole inode.
1963          */
1964         hammer2_inode_ref(ip);
1965
1966         /*
1967          * Remove target if it exists.
1968          */
1969         error = hammer2_unlink_file(&trans, tdip, tname, tname_len,
1970                                     -1, NULL, ap->a_tnch, -1);
1971         tnch_error = error;
1972         if (error && error != ENOENT)
1973                 goto done;
1974
1975         /*
1976          * When renaming a hardlinked file we may have to re-consolidate
1977          * the location of the hardlink target.
1978          *
1979          * If ip represents a regular file the consolidation code essentially
1980          * does nothing other than return the same locked cluster that was
1981          * passed in.
1982          *
1983          * The returned cluster will be locked.
1984          *
1985          * WARNING!  We do not currently have a local copy of ipdata but
1986          *           we do use one later remember that it must be reloaded
1987          *           on any modification to the inode, including connects.
1988          */
1989         cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
1990         error = hammer2_hardlink_consolidate(&trans, ip, &cluster,
1991                                              cdip, cdcluster, 0);
1992         if (error)
1993                 goto done;
1994
1995         /*
1996          * Disconnect (fdip, fname) from the source directory.  This will
1997          * disconnect (ip) if it represents a direct file.  If (ip) represents
1998          * a hardlink the HARDLINK pointer object will be removed but the
1999          * hardlink will stay intact.
2000          *
2001          * Always pass nch as NULL because we intend to reconnect the inode,
2002          * so we don't want hammer2_unlink_file() to rename it to the hidden
2003          * open-but-unlinked directory.
2004          *
2005          * The target cluster may be marked DELETED but will not be destroyed
2006          * since we retain our hold on ip and cluster.
2007          *
2008          * NOTE: We pass nlinks as 0 (not -1) in order to retain the file's
2009          *       link count.
2010          */
2011         error = hammer2_unlink_file(&trans, fdip, fname, fname_len,
2012                                     -1, &hlink, NULL, 0);
2013         KKASSERT(error != EAGAIN);
2014         if (error)
2015                 goto done;
2016
2017         /*
2018          * Reconnect ip to target directory using cluster.  Chains cannot
2019          * actually be moved, so this will duplicate the cluster in the new
2020          * spot and assign it to the ip, replacing the old cluster.
2021          *
2022          * WARNING: Because recursive locks are allowed and we unlinked the
2023          *          file that we have a cluster-in-hand for just above, the
2024          *          cluster might have been delete-duplicated.  We must
2025          *          refactor the cluster.
2026          *
2027          * WARNING: Chain locks can lock buffer cache buffers, to avoid
2028          *          deadlocks we want to unlock before issuing a cache_*()
2029          *          op (that might have to lock a vnode).
2030          *
2031          * NOTE:    Pass nlinks as 0 because we retained the link count from
2032          *          the unlink, so we do not have to modify it.
2033          */
2034         error = hammer2_inode_connect(&trans, &cluster, hlink,
2035                                       tdip, tdcluster,
2036                                       tname, tname_len, 0);
2037         if (error == 0) {
2038                 KKASSERT(cluster != NULL);
2039                 hammer2_inode_repoint(ip, (hlink ? ip->pip : tdip), cluster);
2040         }
2041 done:
2042         hammer2_inode_unlock(ip, cluster);
2043         hammer2_inode_unlock(tdip, tdcluster);
2044         hammer2_inode_unlock(fdip, fdcluster);
2045         hammer2_inode_unlock(cdip, cdcluster);
2046         hammer2_inode_drop(ip);
2047         hammer2_inode_drop(cdip);
2048         hammer2_run_unlinkq(&trans, fdip->pmp);
2049         hammer2_trans_done(&trans);
2050
2051         /*
2052          * Issue the namecache update after unlocking all the internal
2053          * hammer structures, otherwise we might deadlock.
2054          */
2055         if (tnch_error == 0) {
2056                 cache_unlink(ap->a_tnch);
2057                 cache_setunresolved(ap->a_tnch);
2058         }
2059         if (error == 0)
2060                 cache_rename(ap->a_fnch, ap->a_tnch);
2061
2062         LOCKSTOP;
2063         return (error);
2064 }
2065
2066 /*
2067  * Strategy code (async logical file buffer I/O from system)
2068  *
2069  * WARNING: The strategy code cannot safely use hammer2 transactions
2070  *          as this can deadlock against vfs_sync's vfsync() call
2071  *          if multiple flushes are queued.  All H2 structures must
2072  *          already be present and ready for the DIO.
2073  *
2074  *          Reads can be initiated asynchronously, writes have to be
2075  *          spooled to a separate thread for action to avoid deadlocks.
2076  */
2077 static int hammer2_strategy_read(struct vop_strategy_args *ap);
2078 static int hammer2_strategy_write(struct vop_strategy_args *ap);
2079 static void hammer2_strategy_read_callback(hammer2_iocb_t *iocb);
2080
2081 static
2082 int
2083 hammer2_vop_strategy(struct vop_strategy_args *ap)
2084 {
2085         struct bio *biop;
2086         struct buf *bp;
2087         int error;
2088
2089         biop = ap->a_bio;
2090         bp = biop->bio_buf;
2091
2092         switch(bp->b_cmd) {
2093         case BUF_CMD_READ:
2094                 error = hammer2_strategy_read(ap);
2095                 ++hammer2_iod_file_read;
2096                 break;
2097         case BUF_CMD_WRITE:
2098                 error = hammer2_strategy_write(ap);
2099                 ++hammer2_iod_file_write;
2100                 break;
2101         default:
2102                 bp->b_error = error = EINVAL;
2103                 bp->b_flags |= B_ERROR;
2104                 biodone(biop);
2105                 break;
2106         }
2107         return (error);
2108 }
2109
2110 /*
2111  * Logical buffer I/O, async read.
2112  */
2113 static
2114 int
2115 hammer2_strategy_read(struct vop_strategy_args *ap)
2116 {
2117         struct buf *bp;
2118         struct bio *bio;
2119         struct bio *nbio;
2120         hammer2_inode_t *ip;
2121         hammer2_cluster_t *cparent;
2122         hammer2_cluster_t *cluster;
2123         hammer2_key_t key_dummy;
2124         hammer2_key_t lbase;
2125         uint8_t btype;
2126
2127         bio = ap->a_bio;
2128         bp = bio->bio_buf;
2129         ip = VTOI(ap->a_vp);
2130         nbio = push_bio(bio);
2131
2132         lbase = bio->bio_offset;
2133         KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
2134
2135         /*
2136          * Lookup the file offset.
2137          */
2138         cparent = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS |
2139                                          HAMMER2_RESOLVE_SHARED);
2140         cluster = hammer2_cluster_lookup(cparent, &key_dummy,
2141                                        lbase, lbase,
2142                                        HAMMER2_LOOKUP_NODATA |
2143                                        HAMMER2_LOOKUP_SHARED);
2144         hammer2_inode_unlock(ip, cparent);
2145
2146         /*
2147          * Data is zero-fill if no cluster could be found
2148          * (XXX or EIO on a cluster failure).
2149          */
2150         if (cluster == NULL) {
2151                 bp->b_resid = 0;
2152                 bp->b_error = 0;
2153                 bzero(bp->b_data, bp->b_bcount);
2154                 biodone(nbio);
2155                 return(0);
2156         }
2157
2158         /*
2159          * Cluster elements must be type INODE or type DATA, but the
2160          * compression mode (or not) for DATA chains can be different for
2161          * each chain.  This will be handled by the callback.
2162          *
2163          * If the cluster already has valid data the callback will be made
2164          * immediately/synchronously.
2165          */
2166         btype = hammer2_cluster_type(cluster);
2167         if (btype != HAMMER2_BREF_TYPE_INODE &&
2168             btype != HAMMER2_BREF_TYPE_DATA) {
2169                 panic("READ PATH: hammer2_strategy_read: unknown bref type");
2170         }
2171         hammer2_cluster_load_async(cluster, hammer2_strategy_read_callback,
2172                                    nbio);
2173         return(0);
2174 }
2175
2176 /*
2177  * Read callback for hammer2_cluster_load_async().  The load function may
2178  * start several actual I/Os but will only make one callback, typically with
2179  * the first valid I/O XXX
2180  */
2181 static
2182 void
2183 hammer2_strategy_read_callback(hammer2_iocb_t *iocb)
2184 {
2185         struct bio *bio = iocb->ptr;    /* original logical buffer */
2186         struct buf *bp = bio->bio_buf;  /* original logical buffer */
2187         hammer2_chain_t *chain;
2188         hammer2_cluster_t *cluster;
2189         hammer2_io_t *dio;
2190         char *data;
2191         int i;
2192
2193         /*
2194          * Extract data and handle iteration on I/O failure.  iocb->off
2195          * is the cluster index for iteration.
2196          */
2197         cluster = iocb->cluster;
2198         dio = iocb->dio;        /* can be NULL if iocb not in progress */
2199
2200         /*
2201          * Work to do if INPROG set, else dio is already good or dio is
2202          * NULL (which is the shortcut case if chain->data is already good).
2203          */
2204         if (iocb->flags & HAMMER2_IOCB_INPROG) {
2205                 /*
2206                  * Read attempt not yet made.  Issue an asynchronous read
2207                  * if necessary and return, operation will chain back to
2208                  * this function.
2209                  */
2210                 if ((iocb->flags & HAMMER2_IOCB_READ) == 0) {
2211                         if (dio->bp == NULL ||
2212                             (dio->bp->b_flags & B_CACHE) == 0) {
2213                                 if (dio->bp) {
2214                                         bqrelse(dio->bp);
2215                                         dio->bp = NULL;
2216                                 }
2217                                 iocb->flags |= HAMMER2_IOCB_READ;
2218                                 breadcb(dio->hmp->devvp,
2219                                         dio->pbase, dio->psize,
2220                                         hammer2_io_callback, iocb);
2221                                 return;
2222                         }
2223                 }
2224         }
2225
2226         /*
2227          * If we have a DIO it is now done, check for an error and
2228          * calculate the data.
2229          *
2230          * If there is no DIO it is an optimization by
2231          * hammer2_cluster_load_async(), the data is available in
2232          * chain->data.
2233          */
2234         if (dio) {
2235                 if (dio->bp->b_flags & B_ERROR) {
2236                         i = (int)iocb->lbase + 1;
2237                         if (i >= cluster->nchains) {
2238                                 bp->b_flags |= B_ERROR;
2239                                 bp->b_error = dio->bp->b_error;
2240                                 hammer2_io_complete(iocb);
2241                                 biodone(bio);
2242                                 hammer2_cluster_unlock(cluster);
2243                                 hammer2_cluster_drop(cluster);
2244                         } else {
2245                                 hammer2_io_complete(iocb); /* XXX */
2246                                 chain = cluster->array[i].chain;
2247                                 kprintf("hammer2: IO CHAIN-%d %p\n", i, chain);
2248                                 hammer2_adjreadcounter(&chain->bref,
2249                                                        chain->bytes);
2250                                 iocb->chain = chain;
2251                                 iocb->lbase = (off_t)i;
2252                                 iocb->flags = 0;
2253                                 iocb->error = 0;
2254                                 hammer2_io_getblk(chain->hmp,
2255                                                   chain->bref.data_off,
2256                                                   chain->bytes,
2257                                                   iocb);
2258                         }
2259                         return;
2260                 }
2261                 chain = iocb->chain;
2262                 data = hammer2_io_data(dio, chain->bref.data_off);
2263         } else {
2264                 /*
2265                  * Special synchronous case, data present in chain->data.
2266                  */
2267                 chain = iocb->chain;
2268                 data = (void *)chain->data;
2269         }
2270
2271         if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2272                 /*
2273                  * Data is embedded in the inode (copy from inode).
2274                  */
2275                 bcopy(((hammer2_inode_data_t *)data)->u.data,
2276                       bp->b_data, HAMMER2_EMBEDDED_BYTES);
2277                 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
2278                       bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
2279                 bp->b_resid = 0;
2280                 bp->b_error = 0;
2281         } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2282                 /*
2283                  * Data is on-media, issue device I/O and copy.
2284                  *
2285                  * XXX direct-IO shortcut could go here XXX.
2286                  */
2287                 switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
2288                 case HAMMER2_COMP_LZ4:
2289                         hammer2_decompress_LZ4_callback(data, chain->bytes,
2290                                                         bio);
2291                         break;
2292                 case HAMMER2_COMP_ZLIB:
2293                         hammer2_decompress_ZLIB_callback(data, chain->bytes,
2294                                                          bio);
2295                         break;
2296                 case HAMMER2_COMP_NONE:
2297                         KKASSERT(chain->bytes <= bp->b_bcount);
2298                         bcopy(data, bp->b_data, chain->bytes);
2299                         if (chain->bytes < bp->b_bcount) {
2300                                 bzero(bp->b_data + chain->bytes,
2301                                       bp->b_bcount - chain->bytes);
2302                         }
2303                         bp->b_flags |= B_NOTMETA;
2304                         bp->b_resid = 0;
2305                         bp->b_error = 0;
2306                         break;
2307                 default:
2308                         panic("hammer2_strategy_read: "
2309                               "unknown compression type");
2310                 }
2311         } else {
2312                 /* bqrelse the dio to help stabilize the call to panic() */
2313                 if (dio)
2314                         hammer2_io_bqrelse(&dio);
2315                 panic("hammer2_strategy_read: unknown bref type");
2316         }
2317
2318         /*
2319          * Once the iocb is cleaned up the DIO (if any) will no longer be
2320          * in-progress but will still have a ref.  Be sure to release
2321          * the ref.
2322          */
2323         hammer2_io_complete(iocb);              /* physical management */
2324         if (dio)                                /* physical dio & buffer */
2325                 hammer2_io_bqrelse(&dio);
2326         hammer2_cluster_unlock(cluster);        /* cluster management */
2327         hammer2_cluster_drop(cluster);          /* cluster management */
2328         biodone(bio);                           /* logical buffer */
2329 }
2330
2331 static
2332 int
2333 hammer2_strategy_write(struct vop_strategy_args *ap)
2334 {       
2335         hammer2_pfs_t *pmp;
2336         struct bio *bio;
2337         struct buf *bp;
2338         hammer2_inode_t *ip;
2339         
2340         bio = ap->a_bio;
2341         bp = bio->bio_buf;
2342         ip = VTOI(ap->a_vp);
2343         pmp = ip->pmp;
2344         
2345         hammer2_lwinprog_ref(pmp);
2346         hammer2_trans_assert_strategy(pmp);
2347         hammer2_mtx_ex(&pmp->wthread_mtx);
2348         if (TAILQ_EMPTY(&pmp->wthread_bioq.queue)) {
2349                 bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2350                 hammer2_mtx_unlock(&pmp->wthread_mtx);
2351                 wakeup(&pmp->wthread_bioq);
2352         } else {
2353                 bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2354                 hammer2_mtx_unlock(&pmp->wthread_mtx);
2355         }
2356         hammer2_lwinprog_wait(pmp);
2357
2358         return(0);
2359 }
2360
2361 /*
2362  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2363  */
2364 static
2365 int
2366 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2367 {
2368         hammer2_inode_t *ip;
2369         int error;
2370
2371         LOCKSTART;
2372         ip = VTOI(ap->a_vp);
2373
2374         error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2375                               ap->a_fflag, ap->a_cred);
2376         LOCKSTOP;
2377         return (error);
2378 }
2379
2380 static
2381 int 
2382 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2383 {
2384         struct mount *mp;
2385         hammer2_pfs_t *pmp;
2386         int rc;
2387
2388         LOCKSTART;
2389         switch (ap->a_op) {
2390         case (MOUNTCTL_SET_EXPORT):
2391                 mp = ap->a_head.a_ops->head.vv_mount;
2392                 pmp = MPTOPMP(mp);
2393
2394                 if (ap->a_ctllen != sizeof(struct export_args))
2395                         rc = (EINVAL);
2396                 else
2397                         rc = vfs_export(mp, &pmp->export,
2398                                         (const struct export_args *)ap->a_ctl);
2399                 break;
2400         default:
2401                 rc = vop_stdmountctl(ap);
2402                 break;
2403         }
2404         LOCKSTOP;
2405         return (rc);
2406 }
2407
2408 /*
2409  * This handles unlinked open files after the vnode is finally dereferenced.
2410  * To avoid deadlocks it cannot be called from the normal vnode recycling
2411  * path, so we call it (1) after a unlink, rmdir, or rename, (2) on every
2412  * flush, and (3) on umount.
2413  */
2414 void
2415 hammer2_run_unlinkq(hammer2_trans_t *trans, hammer2_pfs_t *pmp)
2416 {
2417         const hammer2_inode_data_t *ripdata;
2418         hammer2_inode_unlink_t *ipul;
2419         hammer2_inode_t *ip;
2420         hammer2_cluster_t *cluster;
2421         hammer2_cluster_t *cparent;
2422
2423         if (TAILQ_EMPTY(&pmp->unlinkq))
2424                 return;
2425
2426         LOCKSTART;
2427         hammer2_spin_ex(&pmp->list_spin);
2428         while ((ipul = TAILQ_FIRST(&pmp->unlinkq)) != NULL) {
2429                 TAILQ_REMOVE(&pmp->unlinkq, ipul, entry);
2430                 hammer2_spin_unex(&pmp->list_spin);
2431                 ip = ipul->ip;
2432                 kfree(ipul, pmp->minode);
2433
2434                 cluster = hammer2_inode_lock(ip, HAMMER2_RESOLVE_ALWAYS);
2435                 ripdata = &hammer2_cluster_rdata(cluster)->ipdata;
2436                 if (hammer2_debug & 0x400) {
2437                         kprintf("hammer2: unlink on reclaim: %s refs=%d\n",
2438                                 ripdata->filename, ip->refs);
2439                 }
2440                 KKASSERT(ripdata->nlinks == 0);
2441
2442                 cparent = hammer2_cluster_parent(cluster);
2443                 hammer2_cluster_delete(trans, cparent, cluster,
2444                                        HAMMER2_DELETE_PERMANENT);
2445                 hammer2_cluster_unlock(cparent);
2446                 hammer2_cluster_drop(cparent);
2447                 hammer2_inode_unlock(ip, cluster);      /* inode lock */
2448                 hammer2_inode_drop(ip);                 /* ipul ref */
2449
2450                 hammer2_spin_ex(&pmp->list_spin);
2451         }
2452         hammer2_spin_unex(&pmp->list_spin);
2453         LOCKSTOP;
2454 }
2455
2456
2457 /*
2458  * KQFILTER
2459  */
2460 static void filt_hammer2detach(struct knote *kn);
2461 static int filt_hammer2read(struct knote *kn, long hint);
2462 static int filt_hammer2write(struct knote *kn, long hint);
2463 static int filt_hammer2vnode(struct knote *kn, long hint);
2464
2465 static struct filterops hammer2read_filtops =
2466         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2467           NULL, filt_hammer2detach, filt_hammer2read };
2468 static struct filterops hammer2write_filtops =
2469         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2470           NULL, filt_hammer2detach, filt_hammer2write };
2471 static struct filterops hammer2vnode_filtops =
2472         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2473           NULL, filt_hammer2detach, filt_hammer2vnode };
2474
2475 static
2476 int
2477 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2478 {
2479         struct vnode *vp = ap->a_vp;
2480         struct knote *kn = ap->a_kn;
2481
2482         switch (kn->kn_filter) {
2483         case EVFILT_READ:
2484                 kn->kn_fop = &hammer2read_filtops;
2485                 break;
2486         case EVFILT_WRITE:
2487                 kn->kn_fop = &hammer2write_filtops;
2488                 break;
2489         case EVFILT_VNODE:
2490                 kn->kn_fop = &hammer2vnode_filtops;
2491                 break;
2492         default:
2493                 return (EOPNOTSUPP);
2494         }
2495
2496         kn->kn_hook = (caddr_t)vp;
2497
2498         knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2499
2500         return(0);
2501 }
2502
2503 static void
2504 filt_hammer2detach(struct knote *kn)
2505 {
2506         struct vnode *vp = (void *)kn->kn_hook;
2507
2508         knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2509 }
2510
2511 static int
2512 filt_hammer2read(struct knote *kn, long hint)
2513 {
2514         struct vnode *vp = (void *)kn->kn_hook;
2515         hammer2_inode_t *ip = VTOI(vp);
2516         off_t off;
2517
2518         if (hint == NOTE_REVOKE) {
2519                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2520                 return(1);
2521         }
2522         off = ip->size - kn->kn_fp->f_offset;
2523         kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2524         if (kn->kn_sfflags & NOTE_OLDAPI)
2525                 return(1);
2526         return (kn->kn_data != 0);
2527 }
2528
2529
2530 static int
2531 filt_hammer2write(struct knote *kn, long hint)
2532 {
2533         if (hint == NOTE_REVOKE)
2534                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2535         kn->kn_data = 0;
2536         return (1);
2537 }
2538
2539 static int
2540 filt_hammer2vnode(struct knote *kn, long hint)
2541 {
2542         if (kn->kn_sfflags & hint)
2543                 kn->kn_fflags |= hint;
2544         if (hint == NOTE_REVOKE) {
2545                 kn->kn_flags |= (EV_EOF | EV_NODATA);
2546                 return (1);
2547         }
2548         return (kn->kn_fflags != 0);
2549 }
2550
2551 /*
2552  * FIFO VOPS
2553  */
2554 static
2555 int
2556 hammer2_vop_markatime(struct vop_markatime_args *ap)
2557 {
2558         hammer2_inode_t *ip;
2559         struct vnode *vp;
2560
2561         vp = ap->a_vp;
2562         ip = VTOI(vp);
2563
2564         if (ip->pmp->ronly)
2565                 return(EROFS);
2566         return(0);
2567 }
2568
2569 static
2570 int
2571 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2572 {
2573         int error;
2574
2575         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2576         if (error)
2577                 error = hammer2_vop_kqfilter(ap);
2578         return(error);
2579 }
2580
2581 /*
2582  * VOPS vector
2583  */
2584 struct vop_ops hammer2_vnode_vops = {
2585         .vop_default    = vop_defaultop,
2586         .vop_fsync      = hammer2_vop_fsync,
2587         .vop_getpages   = vop_stdgetpages,
2588         .vop_putpages   = vop_stdputpages,
2589         .vop_access     = hammer2_vop_access,
2590         .vop_advlock    = hammer2_vop_advlock,
2591         .vop_close      = hammer2_vop_close,
2592         .vop_nlink      = hammer2_vop_nlink,
2593         .vop_ncreate    = hammer2_vop_ncreate,
2594         .vop_nsymlink   = hammer2_vop_nsymlink,
2595         .vop_nremove    = hammer2_vop_nremove,
2596         .vop_nrmdir     = hammer2_vop_nrmdir,
2597         .vop_nrename    = hammer2_vop_nrename,
2598         .vop_getattr    = hammer2_vop_getattr,
2599         .vop_setattr    = hammer2_vop_setattr,
2600         .vop_readdir    = hammer2_vop_readdir,
2601         .vop_readlink   = hammer2_vop_readlink,
2602         .vop_getpages   = vop_stdgetpages,
2603         .vop_putpages   = vop_stdputpages,
2604         .vop_read       = hammer2_vop_read,
2605         .vop_write      = hammer2_vop_write,
2606         .vop_open       = hammer2_vop_open,
2607         .vop_inactive   = hammer2_vop_inactive,
2608         .vop_reclaim    = hammer2_vop_reclaim,
2609         .vop_nresolve   = hammer2_vop_nresolve,
2610         .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2611         .vop_nmkdir     = hammer2_vop_nmkdir,
2612         .vop_nmknod     = hammer2_vop_nmknod,
2613         .vop_ioctl      = hammer2_vop_ioctl,
2614         .vop_mountctl   = hammer2_vop_mountctl,
2615         .vop_bmap       = hammer2_vop_bmap,
2616         .vop_strategy   = hammer2_vop_strategy,
2617         .vop_kqfilter   = hammer2_vop_kqfilter
2618 };
2619
2620 struct vop_ops hammer2_spec_vops = {
2621         .vop_default =          vop_defaultop,
2622         .vop_fsync =            hammer2_vop_fsync,
2623         .vop_read =             vop_stdnoread,
2624         .vop_write =            vop_stdnowrite,
2625         .vop_access =           hammer2_vop_access,
2626         .vop_close =            hammer2_vop_close,
2627         .vop_markatime =        hammer2_vop_markatime,
2628         .vop_getattr =          hammer2_vop_getattr,
2629         .vop_inactive =         hammer2_vop_inactive,
2630         .vop_reclaim =          hammer2_vop_reclaim,
2631         .vop_setattr =          hammer2_vop_setattr
2632 };
2633
2634 struct vop_ops hammer2_fifo_vops = {
2635         .vop_default =          fifo_vnoperate,
2636         .vop_fsync =            hammer2_vop_fsync,
2637 #if 0
2638         .vop_read =             hammer2_vop_fiforead,
2639         .vop_write =            hammer2_vop_fifowrite,
2640 #endif
2641         .vop_access =           hammer2_vop_access,
2642 #if 0
2643         .vop_close =            hammer2_vop_fifoclose,
2644 #endif
2645         .vop_markatime =        hammer2_vop_markatime,
2646         .vop_getattr =          hammer2_vop_getattr,
2647         .vop_inactive =         hammer2_vop_inactive,
2648         .vop_reclaim =          hammer2_vop_reclaim,
2649         .vop_setattr =          hammer2_vop_setattr,
2650         .vop_kqfilter =         hammer2_vop_fifokqfilter
2651 };
2652