hammer2 - Move write thread from hmp to pmp
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
1 /*
2  * Copyright (c) 2011-2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *       to the inode as its underlying chain may have changed.
41  */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56
57 #include "hammer2.h"
58 #include "hammer2_lz4.h"
59
60 #include "zlib/hammer2_zlib.h"
61
62 #define ZFOFFSET        (-2LL)
63
64 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
65                                 int seqcount);
66 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
67                                 int ioflag, int seqcount);
68 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
69 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
70 static void hammer2_decompress_LZ4_callback(struct bio *bio);
71 static void hammer2_decompress_ZLIB_callback(struct bio *bio);
72
73 struct objcache *cache_buffer_read;
74 struct objcache *cache_buffer_write;
75
76 /* 
77  * Callback used in read path in case that a block is compressed with LZ4.
78  */
79 static
80 void
81 hammer2_decompress_LZ4_callback(struct bio *bio)
82 {
83         struct buf *bp = bio->bio_buf;
84         struct buf *obp;
85         struct bio *obio;
86         int loff;
87
88         /*
89          * If BIO_DONE is already set the device buffer was already
90          * fully valid (B_CACHE).  If it is not set then I/O was issued
91          * and we have to run I/O completion as the last bio.
92          *
93          * Nobody is waiting for our device I/O to complete, we are
94          * responsible for bqrelse()ing it which means we also have to do
95          * the equivalent of biowait() and clear BIO_DONE (which breadcb()
96          * may have set).
97          *
98          * Any preexisting device buffer should match the requested size,
99          * but due to bigblock recycling and other factors there is some
100          * fragility there, so we assert that the device buffer covers
101          * the request.
102          */
103         if ((bio->bio_flags & BIO_DONE) == 0)
104                 bpdone(bp, 0);
105         bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
106
107         obio = bio->bio_caller_info1.ptr;
108         obp = obio->bio_buf;
109         loff = obio->bio_caller_info3.value;
110
111         if (bp->b_flags & B_ERROR) {
112                 obp->b_flags |= B_ERROR;
113                 obp->b_error = bp->b_error;
114         } else if (obio->bio_caller_info2.index &&
115                    obio->bio_caller_info1.uvalue32 !=
116                     crc32(bp->b_data, bp->b_bufsize)) {
117                 obp->b_flags |= B_ERROR;
118                 obp->b_error = EIO;
119         } else {
120                 KKASSERT(obp->b_bufsize <= 65536);
121                 
122                 char *buffer;
123                 char *compressed_buffer;
124                 int *compressed_size;
125                 
126                 buffer = bp->b_data + loff;
127                 compressed_size = (int*)buffer;
128                 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
129                 KKASSERT((unsigned int)*compressed_size <= 65536);
130                 int result = LZ4_decompress_safe(&buffer[sizeof(int)],
131                         compressed_buffer, *compressed_size, obp->b_bufsize);
132                 if (result < 0) {
133                         kprintf("READ PATH: Error during decompression."
134                                 "bio %016jx/%d loff=%d\n",
135                                 (intmax_t)bio->bio_offset, bio->bio_buf->b_bufsize, loff);
136                         /* make sure it isn't random garbage */
137                         bzero(compressed_buffer, obp->b_bufsize);
138                 }
139                 KKASSERT(result <= obp->b_bufsize);
140                 bcopy(compressed_buffer, obp->b_data, obp->b_bufsize);
141                 if (result < obp->b_bufsize)
142                         bzero(obp->b_data + result, obp->b_bufsize - result);
143                 objcache_put(cache_buffer_read, compressed_buffer);
144                 obp->b_resid = 0;
145                 obp->b_flags |= B_AGE;
146         }
147         biodone(obio);
148         bqrelse(bp);
149 }
150
151 /*
152  * Callback used in read path in case that a block is compressed with ZLIB.
153  * It is almost identical to LZ4 callback, so in theory they can be unified,
154  * but we didn't want to make changes in bio structure for that.
155  */
156 static
157 void
158 hammer2_decompress_ZLIB_callback(struct bio *bio)
159 {
160         struct buf *bp = bio->bio_buf;
161         struct buf *obp;
162         struct bio *obio;
163         int loff;
164
165         /*
166          * If BIO_DONE is already set the device buffer was already
167          * fully valid (B_CACHE).  If it is not set then I/O was issued
168          * and we have to run I/O completion as the last bio.
169          *
170          * Nobody is waiting for our device I/O to complete, we are
171          * responsible for bqrelse()ing it which means we also have to do
172          * the equivalent of biowait() and clear BIO_DONE (which breadcb()
173          * may have set).
174          *
175          * Any preexisting device buffer should match the requested size,
176          * but due to bigblock recycling and other factors there is some
177          * fragility there, so we assert that the device buffer covers
178          * the request.
179          */
180         if ((bio->bio_flags & BIO_DONE) == 0)
181                 bpdone(bp, 0);
182         bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
183
184         obio = bio->bio_caller_info1.ptr;
185         obp = obio->bio_buf;
186         loff = obio->bio_caller_info3.value;
187
188         if (bp->b_flags & B_ERROR) {
189                 obp->b_flags |= B_ERROR;
190                 obp->b_error = bp->b_error;
191         } else if (obio->bio_caller_info2.index &&
192                    obio->bio_caller_info1.uvalue32 !=
193                     crc32(bp->b_data, bp->b_bufsize)) {
194                 obp->b_flags |= B_ERROR;
195                 obp->b_error = EIO;
196         } else {
197                 KKASSERT(obp->b_bufsize <= 65536);
198                 
199                 char *buffer;
200                 char *compressed_buffer;
201                 int ret;
202                 
203                 z_stream strm_decompress;
204
205                 strm_decompress.avail_in = 0;
206                 strm_decompress.next_in = Z_NULL;
207                 
208                 ret = inflateInit(&strm_decompress);
209                 
210                 if (ret != Z_OK)
211                                 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
212                 
213                 buffer = bp->b_data + loff;
214                 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
215                 strm_decompress.next_in = buffer;
216
217                 /* XXX supply proper size, subset of device bp */
218                 strm_decompress.avail_in = bp->b_bufsize - loff;
219                 strm_decompress.next_out = compressed_buffer;
220                 strm_decompress.avail_out = obp->b_bufsize;
221                 
222                 ret = inflate(&strm_decompress, Z_FINISH);
223                 if (ret != Z_STREAM_END) {
224                         kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
225                         bzero(compressed_buffer, obp->b_bufsize);
226                 }
227                 bcopy(compressed_buffer, obp->b_data, obp->b_bufsize);
228                 int result = obp->b_bufsize - strm_decompress.avail_out;
229                 if (result < obp->b_bufsize)
230                         bzero(obp->b_data + result, strm_decompress.avail_out);
231                 objcache_put(cache_buffer_read, compressed_buffer);
232                 obp->b_resid = 0;
233                 obp->b_flags |= B_AGE;
234                 ret = inflateEnd(&strm_decompress);
235         }
236         biodone(obio);
237         bqrelse(bp);
238 }
239
240 static __inline
241 void
242 hammer2_knote(struct vnode *vp, int flags)
243 {
244         if (flags)
245                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
246 }
247
248 /*
249  * Last reference to a vnode is going away but it is still cached.
250  */
251 static
252 int
253 hammer2_vop_inactive(struct vop_inactive_args *ap)
254 {
255         hammer2_inode_t *ip;
256         hammer2_chain_t *parent;
257         struct vnode *vp;
258
259         vp = ap->a_vp;
260         ip = VTOI(vp);
261
262         /*
263          * Degenerate case
264          */
265         if (ip == NULL) {
266                 vrecycle(vp);
267                 return (0);
268         }
269
270         /*
271          * Detect updates to the embedded data which may be synchronized by
272          * the strategy code.  Simply mark the inode modified so it gets
273          * picked up by our normal flush.
274          */
275         parent = hammer2_inode_lock_ex(ip);
276         KKASSERT(parent);
277
278         /*
279          * Check for deleted inodes and recycle immediately.
280          */
281         if (parent->flags & HAMMER2_CHAIN_DELETED) {
282                 hammer2_inode_unlock_ex(ip, parent);
283                 vrecycle(vp);
284         } else {
285                 hammer2_inode_unlock_ex(ip, parent);
286         }
287         return (0);
288 }
289
290 /*
291  * Reclaim a vnode so that it can be reused; after the inode is
292  * disassociated, the filesystem must manage it alone.
293  */
294 static
295 int
296 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
297 {
298         hammer2_chain_t *chain;
299         hammer2_inode_t *ip;
300 #if 0
301         hammer2_trans_t trans;
302 #endif
303         struct vnode *vp;
304
305         vp = ap->a_vp;
306         ip = VTOI(vp);
307         if (ip == NULL)
308                 return(0);
309
310         /*
311          * Set SUBMODIFIED so we can detect and propagate the DESTROYED
312          * bit in the flush code.
313          *
314          * ip->chain might be stale, correct it before checking as older
315          * versions of the chain are likely marked deleted even if the
316          * file hasn't been.  XXX ip->chain should never be stale on
317          * reclaim.
318          */
319         chain = hammer2_inode_lock_ex(ip);
320 #if 0
321         if (chain->next_parent)
322                 kprintf("RECLAIM DUPLINKED IP: %p ip->ch=%p ch=%p np=%p\n",
323                         ip, ip->chain, chain, chain->next_parent);
324 #endif
325
326         /*
327          * The final close of a deleted file or directory marks it for
328          * destruction.  The DESTROYED flag allows the flusher to shortcut
329          * any modified blocks still unflushed (that is, just ignore them).
330          *
331          * HAMMER2 usually does not try to optimize the freemap by returning
332          * deleted blocks to it as it does not usually know how many snapshots
333          * might be referencing portions of the file/dir.  XXX TODO.
334          *
335          * XXX TODO - However, any modified file as-of when a snapshot is made
336          *            cannot use this optimization as some of the modifications
337          *            may wind up being part of the snapshot.
338          */
339         vp->v_data = NULL;
340         ip->vp = NULL;
341         if (chain->flags & HAMMER2_CHAIN_DELETED) {
342                 KKASSERT(chain->flags & HAMMER2_CHAIN_DELETED);
343                 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DESTROYED |
344                                               HAMMER2_CHAIN_SUBMODIFIED);
345         }
346 #if 0
347         /*
348          * XXX chains will be flushed on sync, no need to do it here.
349          */
350         if (chain->flags & (HAMMER2_CHAIN_MODIFIED |
351                             HAMMER2_CHAIN_DELETED |
352                             HAMMER2_CHAIN_SUBMODIFIED)) {
353                 hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_ISFLUSH);
354                 hammer2_chain_flush(&trans, chain);
355                 hammer2_trans_done(&trans);
356         }
357 #endif
358         hammer2_inode_unlock_ex(ip, chain);             /* unlock */
359         hammer2_inode_drop(ip);                         /* vp ref */
360         /* chain no longer referenced */
361         /* chain = NULL; not needed */
362
363         /*
364          * XXX handle background sync when ip dirty, kernel will no longer
365          * notify us regarding this inode because there is no longer a
366          * vnode attached to it.
367          */
368
369         return (0);
370 }
371
372 static
373 int
374 hammer2_vop_fsync(struct vop_fsync_args *ap)
375 {
376         hammer2_inode_t *ip;
377         hammer2_trans_t trans;
378         hammer2_chain_t *chain;
379         struct vnode *vp;
380
381         vp = ap->a_vp;
382         ip = VTOI(vp);
383
384         /*
385          * WARNING: The vfsync interacts with the buffer cache and might
386          *          block, we can't hold the inode lock and we can't
387          *          have a flush transaction pending.
388          */
389         hammer2_trans_init(&trans, ip->pmp, HAMMER2_TRANS_ISFLUSH);
390         vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
391
392         /*
393          * Calling chain_flush here creates a lot of duplicative
394          * COW operations due to non-optimal vnode ordering.
395          *
396          * Only do it for an actual fsync() syscall.  The other forms
397          * which call this function will eventually call chain_flush
398          * on the volume root as a catch-all, which is far more optimal.
399          */
400         chain = hammer2_inode_lock_ex(ip);
401         atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
402         if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MTIME))
403                 hammer2_inode_fsync(&trans, ip, &chain);
404
405         if (ap->a_flags & VOP_FSYNC_SYSCALL) {
406                 hammer2_chain_flush(&trans, chain);
407         }
408         hammer2_inode_unlock_ex(ip, chain);
409         hammer2_trans_done(&trans);
410
411         return (0);
412 }
413
414 static
415 int
416 hammer2_vop_access(struct vop_access_args *ap)
417 {
418         hammer2_inode_t *ip = VTOI(ap->a_vp);
419         hammer2_inode_data_t *ipdata;
420         hammer2_chain_t *chain;
421         uid_t uid;
422         gid_t gid;
423         int error;
424
425         chain = hammer2_inode_lock_sh(ip);
426         ipdata = &chain->data->ipdata;
427         uid = hammer2_to_unix_xid(&ipdata->uid);
428         gid = hammer2_to_unix_xid(&ipdata->gid);
429         error = vop_helper_access(ap, uid, gid, ipdata->mode, ipdata->uflags);
430         hammer2_inode_unlock_sh(ip, chain);
431
432         return (error);
433 }
434
435 static
436 int
437 hammer2_vop_getattr(struct vop_getattr_args *ap)
438 {
439         hammer2_inode_data_t *ipdata;
440         hammer2_chain_t *chain;
441         hammer2_pfsmount_t *pmp;
442         hammer2_inode_t *ip;
443         struct vnode *vp;
444         struct vattr *vap;
445
446         vp = ap->a_vp;
447         vap = ap->a_vap;
448
449         ip = VTOI(vp);
450         pmp = ip->pmp;
451
452         chain = hammer2_inode_lock_sh(ip);
453         ipdata = &chain->data->ipdata;
454
455         vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
456         vap->va_fileid = ipdata->inum;
457         vap->va_mode = ipdata->mode;
458         vap->va_nlink = ipdata->nlinks;
459         vap->va_uid = hammer2_to_unix_xid(&ipdata->uid);
460         vap->va_gid = hammer2_to_unix_xid(&ipdata->gid);
461         vap->va_rmajor = 0;
462         vap->va_rminor = 0;
463         vap->va_size = ip->size;        /* protected by shared lock */
464         vap->va_blocksize = HAMMER2_PBUFSIZE;
465         vap->va_flags = ipdata->uflags;
466         hammer2_time_to_timespec(ipdata->ctime, &vap->va_ctime);
467         hammer2_time_to_timespec(ipdata->mtime, &vap->va_mtime);
468         hammer2_time_to_timespec(ipdata->mtime, &vap->va_atime);
469         vap->va_gen = 1;
470         vap->va_bytes = vap->va_size;   /* XXX */
471         vap->va_type = hammer2_get_vtype(chain);
472         vap->va_filerev = 0;
473         vap->va_uid_uuid = ipdata->uid;
474         vap->va_gid_uuid = ipdata->gid;
475         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
476                           VA_FSID_UUID_VALID;
477
478         hammer2_inode_unlock_sh(ip, chain);
479
480         return (0);
481 }
482
483 static
484 int
485 hammer2_vop_setattr(struct vop_setattr_args *ap)
486 {
487         hammer2_inode_data_t *ipdata;
488         hammer2_inode_t *ip;
489         hammer2_chain_t *chain;
490         hammer2_trans_t trans;
491         struct vnode *vp;
492         struct vattr *vap;
493         int error;
494         int kflags = 0;
495         int domtime = 0;
496         uint64_t ctime;
497
498         vp = ap->a_vp;
499         vap = ap->a_vap;
500         hammer2_update_time(&ctime);
501
502         ip = VTOI(vp);
503
504         if (ip->pmp->ronly)
505                 return(EROFS);
506
507         hammer2_chain_memory_wait(ip->pmp);
508         hammer2_trans_init(&trans, ip->pmp, 0);
509         chain = hammer2_inode_lock_ex(ip);
510         ipdata = &chain->data->ipdata;
511         error = 0;
512
513         if (vap->va_flags != VNOVAL) {
514                 u_int32_t flags;
515
516                 flags = ipdata->uflags;
517                 error = vop_helper_setattr_flags(&flags, vap->va_flags,
518                                          hammer2_to_unix_xid(&ipdata->uid),
519                                          ap->a_cred);
520                 if (error == 0) {
521                         if (ipdata->uflags != flags) {
522                                 ipdata = hammer2_chain_modify_ip(&trans, ip,
523                                                                  &chain, 0);
524                                 ipdata->uflags = flags;
525                                 ipdata->ctime = ctime;
526                                 kflags |= NOTE_ATTRIB;
527                         }
528                         if (ipdata->uflags & (IMMUTABLE | APPEND)) {
529                                 error = 0;
530                                 goto done;
531                         }
532                 }
533                 goto done;
534         }
535         if (ipdata->uflags & (IMMUTABLE | APPEND)) {
536                 error = EPERM;
537                 goto done;
538         }
539         if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
540                 mode_t cur_mode = ipdata->mode;
541                 uid_t cur_uid = hammer2_to_unix_xid(&ipdata->uid);
542                 gid_t cur_gid = hammer2_to_unix_xid(&ipdata->gid);
543                 uuid_t uuid_uid;
544                 uuid_t uuid_gid;
545
546                 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
547                                          ap->a_cred,
548                                          &cur_uid, &cur_gid, &cur_mode);
549                 if (error == 0) {
550                         hammer2_guid_to_uuid(&uuid_uid, cur_uid);
551                         hammer2_guid_to_uuid(&uuid_gid, cur_gid);
552                         if (bcmp(&uuid_uid, &ipdata->uid, sizeof(uuid_uid)) ||
553                             bcmp(&uuid_gid, &ipdata->gid, sizeof(uuid_gid)) ||
554                             ipdata->mode != cur_mode
555                         ) {
556                                 ipdata = hammer2_chain_modify_ip(&trans, ip,
557                                                                  &chain, 0);
558                                 ipdata->uid = uuid_uid;
559                                 ipdata->gid = uuid_gid;
560                                 ipdata->mode = cur_mode;
561                                 ipdata->ctime = ctime;
562                         }
563                         kflags |= NOTE_ATTRIB;
564                 }
565         }
566
567         /*
568          * Resize the file
569          */
570         if (vap->va_size != VNOVAL && ip->size != vap->va_size) {
571                 switch(vp->v_type) {
572                 case VREG:
573                         if (vap->va_size == ip->size)
574                                 break;
575                         hammer2_inode_unlock_ex(ip, chain);
576                         if (vap->va_size < ip->size) {
577                                 hammer2_truncate_file(ip, vap->va_size);
578                         } else {
579                                 hammer2_extend_file(ip, vap->va_size);
580                         }
581                         chain = hammer2_inode_lock_ex(ip);
582                         ipdata = &chain->data->ipdata; /* RELOAD */
583                         domtime = 1;
584                         break;
585                 default:
586                         error = EINVAL;
587                         goto done;
588                 }
589         }
590 #if 0
591         /* atime not supported */
592         if (vap->va_atime.tv_sec != VNOVAL) {
593                 ipdata = hammer2_chain_modify_ip(&trans, ip, &chain, 0);
594                 ipdata->atime = hammer2_timespec_to_time(&vap->va_atime);
595                 kflags |= NOTE_ATTRIB;
596         }
597 #endif
598         if (vap->va_mtime.tv_sec != VNOVAL) {
599                 ipdata = hammer2_chain_modify_ip(&trans, ip, &chain, 0);
600                 ipdata->mtime = hammer2_timespec_to_time(&vap->va_mtime);
601                 kflags |= NOTE_ATTRIB;
602         }
603         if (vap->va_mode != (mode_t)VNOVAL) {
604                 mode_t cur_mode = ipdata->mode;
605                 uid_t cur_uid = hammer2_to_unix_xid(&ipdata->uid);
606                 gid_t cur_gid = hammer2_to_unix_xid(&ipdata->gid);
607
608                 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
609                                          cur_uid, cur_gid, &cur_mode);
610                 if (error == 0 && ipdata->mode != cur_mode) {
611                         ipdata = hammer2_chain_modify_ip(&trans, ip, &chain, 0);
612                         ipdata->mode = cur_mode;
613                         ipdata->ctime = ctime;
614                         kflags |= NOTE_ATTRIB;
615                 }
616         }
617
618         /*
619          * If a truncation occurred we must call inode_fsync() now in order
620          * to trim the related data chains, otherwise a later expansion can
621          * cause havoc.
622          */
623         hammer2_inode_fsync(&trans, ip, &chain);
624 done:
625         hammer2_inode_unlock_ex(ip, chain);
626         hammer2_trans_done(&trans);
627         return (error);
628 }
629
630 static
631 int
632 hammer2_vop_readdir(struct vop_readdir_args *ap)
633 {
634         hammer2_inode_data_t *ipdata;
635         hammer2_inode_t *ip;
636         hammer2_inode_t *xip;
637         hammer2_chain_t *parent;
638         hammer2_chain_t *chain;
639         hammer2_chain_t *xchain;
640         hammer2_tid_t inum;
641         hammer2_key_t key_next;
642         hammer2_key_t lkey;
643         struct uio *uio;
644         off_t *cookies;
645         off_t saveoff;
646         int cookie_index;
647         int cache_index = -1;
648         int ncookies;
649         int error;
650         int dtype;
651         int r;
652
653         ip = VTOI(ap->a_vp);
654         uio = ap->a_uio;
655         saveoff = uio->uio_offset;
656
657         /*
658          * Setup cookies directory entry cookies if requested
659          */
660         if (ap->a_ncookies) {
661                 ncookies = uio->uio_resid / 16 + 1;
662                 if (ncookies > 1024)
663                         ncookies = 1024;
664                 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
665         } else {
666                 ncookies = -1;
667                 cookies = NULL;
668         }
669         cookie_index = 0;
670
671         parent = hammer2_inode_lock_sh(ip);
672         ipdata = &parent->data->ipdata;
673
674         /*
675          * Handle artificial entries.  To ensure that only positive 64 bit
676          * quantities are returned to userland we always strip off bit 63.
677          * The hash code is designed such that codes 0x0000-0x7FFF are not
678          * used, allowing us to use these codes for articial entries.
679          *
680          * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
681          * allow '..' to cross the mount point into (e.g.) the super-root.
682          */
683         error = 0;
684         chain = (void *)(intptr_t)-1;   /* non-NULL for early goto done case */
685
686         if (saveoff == 0) {
687                 inum = ipdata->inum & HAMMER2_DIRHASH_USERMSK;
688                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
689                 if (r)
690                         goto done;
691                 if (cookies)
692                         cookies[cookie_index] = saveoff;
693                 ++saveoff;
694                 ++cookie_index;
695                 if (cookie_index == ncookies)
696                         goto done;
697         }
698
699         if (saveoff == 1) {
700                 /*
701                  * Be careful with lockorder when accessing ".."
702                  *
703                  * (ip is the current dir. xip is the parent dir).
704                  */
705                 inum = ipdata->inum & HAMMER2_DIRHASH_USERMSK;
706                 while (ip->pip != NULL && ip != ip->pmp->iroot) {
707                         xip = ip->pip;
708                         hammer2_inode_ref(xip);
709                         hammer2_inode_unlock_sh(ip, parent);
710                         xchain = hammer2_inode_lock_sh(xip);
711                         parent = hammer2_inode_lock_sh(ip);
712                         hammer2_inode_drop(xip);
713                         if (xip == ip->pip) {
714                                 inum = xchain->data->ipdata.inum &
715                                        HAMMER2_DIRHASH_USERMSK;
716                                 hammer2_inode_unlock_sh(xip, xchain);
717                                 break;
718                         }
719                         hammer2_inode_unlock_sh(xip, xchain);
720                 }
721                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
722                 if (r)
723                         goto done;
724                 if (cookies)
725                         cookies[cookie_index] = saveoff;
726                 ++saveoff;
727                 ++cookie_index;
728                 if (cookie_index == ncookies)
729                         goto done;
730         }
731
732         lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
733
734         /*
735          * parent is the inode chain, already locked for us.  Don't
736          * double lock shared locks as this will screw up upgrades.
737          */
738         if (error) {
739                 goto done;
740         }
741         chain = hammer2_chain_lookup(&parent, &key_next, lkey, lkey,
742                                      &cache_index, HAMMER2_LOOKUP_SHARED);
743         if (chain == NULL) {
744                 chain = hammer2_chain_lookup(&parent, &key_next,
745                                              lkey, (hammer2_key_t)-1,
746                                              &cache_index,
747                                              HAMMER2_LOOKUP_SHARED);
748         }
749         while (chain) {
750                 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
751                         dtype = hammer2_get_dtype(chain);
752                         saveoff = chain->bref.key & HAMMER2_DIRHASH_USERMSK;
753                         r = vop_write_dirent(&error, uio,
754                                              chain->data->ipdata.inum &
755                                               HAMMER2_DIRHASH_USERMSK,
756                                              dtype,
757                                              chain->data->ipdata.name_len,
758                                              chain->data->ipdata.filename);
759                         if (r)
760                                 break;
761                         if (cookies)
762                                 cookies[cookie_index] = saveoff;
763                         ++cookie_index;
764                 } else {
765                         /* XXX chain error */
766                         kprintf("bad chain type readdir %d\n",
767                                 chain->bref.type);
768                 }
769
770                 /*
771                  * Keys may not be returned in order so once we have a
772                  * placemarker (chain) the scan must allow the full range
773                  * or some entries will be missed.
774                  */
775                 chain = hammer2_chain_next(&parent, chain, &key_next,
776                                            key_next, (hammer2_key_t)-1,
777                                            &cache_index, HAMMER2_LOOKUP_SHARED);
778                 if (chain) {
779                         saveoff = (chain->bref.key &
780                                    HAMMER2_DIRHASH_USERMSK) + 1;
781                 } else {
782                         saveoff = (hammer2_key_t)-1;
783                 }
784                 if (cookie_index == ncookies)
785                         break;
786         }
787         if (chain)
788                 hammer2_chain_unlock(chain);
789 done:
790         hammer2_inode_unlock_sh(ip, parent);
791         if (ap->a_eofflag)
792                 *ap->a_eofflag = (chain == NULL);
793         uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
794         if (error && cookie_index == 0) {
795                 if (cookies) {
796                         kfree(cookies, M_TEMP);
797                         *ap->a_ncookies = 0;
798                         *ap->a_cookies = NULL;
799                 }
800         } else {
801                 if (cookies) {
802                         *ap->a_ncookies = cookie_index;
803                         *ap->a_cookies = cookies;
804                 }
805         }
806         return (error);
807 }
808
809 /*
810  * hammer2_vop_readlink { vp, uio, cred }
811  */
812 static
813 int
814 hammer2_vop_readlink(struct vop_readlink_args *ap)
815 {
816         struct vnode *vp;
817         hammer2_inode_t *ip;
818         int error;
819
820         vp = ap->a_vp;
821         if (vp->v_type != VLNK)
822                 return (EINVAL);
823         ip = VTOI(vp);
824
825         error = hammer2_read_file(ip, ap->a_uio, 0);
826         return (error);
827 }
828
829 static
830 int
831 hammer2_vop_read(struct vop_read_args *ap)
832 {
833         struct vnode *vp;
834         hammer2_inode_t *ip;
835         struct uio *uio;
836         int error;
837         int seqcount;
838         int bigread;
839
840         /*
841          * Read operations supported on this vnode?
842          */
843         vp = ap->a_vp;
844         if (vp->v_type != VREG)
845                 return (EINVAL);
846
847         /*
848          * Misc
849          */
850         ip = VTOI(vp);
851         uio = ap->a_uio;
852         error = 0;
853
854         seqcount = ap->a_ioflag >> 16;
855         bigread = (uio->uio_resid > 100 * 1024 * 1024);
856
857         error = hammer2_read_file(ip, uio, seqcount);
858         return (error);
859 }
860
861 static
862 int
863 hammer2_vop_write(struct vop_write_args *ap)
864 {
865         hammer2_inode_t *ip;
866         hammer2_trans_t trans;
867         thread_t td;
868         struct vnode *vp;
869         struct uio *uio;
870         int error;
871         int seqcount;
872         int bigwrite;
873
874         /*
875          * Read operations supported on this vnode?
876          */
877         vp = ap->a_vp;
878         if (vp->v_type != VREG)
879                 return (EINVAL);
880
881         /*
882          * Misc
883          */
884         ip = VTOI(vp);
885         uio = ap->a_uio;
886         error = 0;
887         if (ip->pmp->ronly)
888                 return (EROFS);
889
890         seqcount = ap->a_ioflag >> 16;
891         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
892
893         /*
894          * Check resource limit
895          */
896         if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
897             uio->uio_offset + uio->uio_resid >
898              td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
899                 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
900                 return (EFBIG);
901         }
902
903         bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
904
905         /*
906          * The transaction interlocks against flushes initiations
907          * (note: but will run concurrently with the actual flush).
908          */
909         hammer2_trans_init(&trans, ip->pmp, 0);
910         error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
911         hammer2_trans_done(&trans);
912
913         return (error);
914 }
915
916 /*
917  * Perform read operations on a file or symlink given an UNLOCKED
918  * inode and uio.
919  *
920  * The passed ip is not locked.
921  */
922 static
923 int
924 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
925 {
926         hammer2_off_t size;
927         struct buf *bp;
928         int error;
929
930         error = 0;
931
932         /*
933          * UIO read loop.
934          */
935         ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
936         size = ip->size;
937         ccms_thread_unlock(&ip->topo_cst);
938
939         while (uio->uio_resid > 0 && uio->uio_offset < size) {
940                 hammer2_key_t lbase;
941                 hammer2_key_t leof;
942                 int lblksize;
943                 int loff;
944                 int n;
945
946                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
947                                                 &lbase, &leof);
948
949                 error = cluster_read(ip->vp, leof, lbase, lblksize,
950                                      uio->uio_resid, seqcount * BKVASIZE,
951                                      &bp);
952
953                 if (error)
954                         break;
955                 loff = (int)(uio->uio_offset - lbase);
956                 n = lblksize - loff;
957                 if (n > uio->uio_resid)
958                         n = uio->uio_resid;
959                 if (n > size - uio->uio_offset)
960                         n = (int)(size - uio->uio_offset);
961                 bp->b_flags |= B_AGE;
962                 uiomove((char *)bp->b_data + loff, n, uio);
963                 bqrelse(bp);
964         }
965         return (error);
966 }
967
968 /*
969  * Write to the file represented by the inode via the logical buffer cache.
970  * The inode may represent a regular file or a symlink.
971  *
972  * The inode must not be locked.
973  */
974 static
975 int
976 hammer2_write_file(hammer2_inode_t *ip,
977                    struct uio *uio, int ioflag, int seqcount)
978 {
979         hammer2_key_t old_eof;
980         hammer2_key_t new_eof;
981         struct buf *bp;
982         int kflags;
983         int error;
984         int modified;
985
986         /*
987          * Setup if append
988          */
989         ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
990         if (ioflag & IO_APPEND)
991                 uio->uio_offset = ip->size;
992         old_eof = ip->size;
993         ccms_thread_unlock(&ip->topo_cst);
994
995         /*
996          * Extend the file if necessary.  If the write fails at some point
997          * we will truncate it back down to cover as much as we were able
998          * to write.
999          *
1000          * Doing this now makes it easier to calculate buffer sizes in
1001          * the loop.
1002          */
1003         kflags = 0;
1004         error = 0;
1005         modified = 0;
1006
1007         if (uio->uio_offset + uio->uio_resid > old_eof) {
1008                 new_eof = uio->uio_offset + uio->uio_resid;
1009                 modified = 1;
1010                 hammer2_extend_file(ip, new_eof);
1011                 kflags |= NOTE_EXTEND;
1012         } else {
1013                 new_eof = old_eof;
1014         }
1015         
1016         /*
1017          * UIO write loop
1018          */
1019         while (uio->uio_resid > 0) {
1020                 hammer2_key_t lbase;
1021                 int trivial;
1022                 int endofblk;
1023                 int lblksize;
1024                 int loff;
1025                 int n;
1026
1027                 /*
1028                  * Don't allow the buffer build to blow out the buffer
1029                  * cache.
1030                  */
1031                 if ((ioflag & IO_RECURSE) == 0)
1032                         bwillwrite(HAMMER2_PBUFSIZE);
1033
1034                 /*
1035                  * This nominally tells us how much we can cluster and
1036                  * what the logical buffer size needs to be.  Currently
1037                  * we don't try to cluster the write and just handle one
1038                  * block at a time.
1039                  */
1040                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1041                                                 &lbase, NULL);
1042                 loff = (int)(uio->uio_offset - lbase);
1043                 
1044                 KKASSERT(lblksize <= 65536);
1045
1046                 /*
1047                  * Calculate bytes to copy this transfer and whether the
1048                  * copy completely covers the buffer or not.
1049                  */
1050                 trivial = 0;
1051                 n = lblksize - loff;
1052                 if (n > uio->uio_resid) {
1053                         n = uio->uio_resid;
1054                         if (loff == lbase && uio->uio_offset + n == new_eof)
1055                                 trivial = 1;
1056                         endofblk = 0;
1057                 } else {
1058                         if (loff == 0)
1059                                 trivial = 1;
1060                         endofblk = 1;
1061                 }
1062
1063                 /*
1064                  * Get the buffer
1065                  */
1066                 if (uio->uio_segflg == UIO_NOCOPY) {
1067                         /*
1068                          * Issuing a write with the same data backing the
1069                          * buffer.  Instantiate the buffer to collect the
1070                          * backing vm pages, then read-in any missing bits.
1071                          *
1072                          * This case is used by vop_stdputpages().
1073                          */
1074                         bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1075                         if ((bp->b_flags & B_CACHE) == 0) {
1076                                 bqrelse(bp);
1077                                 error = bread(ip->vp, lbase, lblksize, &bp);
1078                         }
1079                 } else if (trivial) {
1080                         /*
1081                          * Even though we are entirely overwriting the buffer
1082                          * we may still have to zero it out to avoid a
1083                          * mmap/write visibility issue.
1084                          */
1085                         bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1086                         if ((bp->b_flags & B_CACHE) == 0)
1087                                 vfs_bio_clrbuf(bp);
1088                 } else {
1089                         /*
1090                          * Partial overwrite, read in any missing bits then
1091                          * replace the portion being written.
1092                          *
1093                          * (The strategy code will detect zero-fill physical
1094                          * blocks for this case).
1095                          */
1096                         error = bread(ip->vp, lbase, lblksize, &bp);
1097                         if (error == 0)
1098                                 bheavy(bp);
1099                 }
1100
1101                 if (error) {
1102                         brelse(bp);
1103                         break;
1104                 }
1105
1106                 /*
1107                  * Ok, copy the data in
1108                  */
1109                 error = uiomove(bp->b_data + loff, n, uio);
1110                 kflags |= NOTE_WRITE;
1111                 modified = 1;
1112                 if (error) {
1113                         brelse(bp);
1114                         break;
1115                 }
1116
1117                 /*
1118                  * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1119                  *          with IO_SYNC or IO_ASYNC set.  These writes
1120                  *          must be handled as the pageout daemon expects.
1121                  */
1122                 if (ioflag & IO_SYNC) {
1123                         bwrite(bp);
1124                 } else if ((ioflag & IO_DIRECT) && endofblk) {
1125                         bawrite(bp);
1126                 } else if (ioflag & IO_ASYNC) {
1127                         bawrite(bp);
1128                 } else {
1129                         bdwrite(bp);
1130                 }
1131         }
1132
1133         /*
1134          * Cleanup.  If we extended the file EOF but failed to write through
1135          * the entire write is a failure and we have to back-up.
1136          */
1137         if (error && new_eof != old_eof) {
1138                 hammer2_truncate_file(ip, old_eof);
1139         } else if (modified) {
1140                 ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1141                 hammer2_update_time(&ip->mtime);
1142                 atomic_set_int(&ip->flags, HAMMER2_INODE_MTIME);
1143                 ccms_thread_unlock(&ip->topo_cst);
1144         }
1145         atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1146         hammer2_knote(ip->vp, kflags);
1147
1148         return error;
1149 }
1150
1151 /*
1152  * Truncate the size of a file.  The inode must not be locked.
1153  */
1154 static
1155 void
1156 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1157 {
1158         hammer2_key_t lbase;
1159         int nblksize;
1160
1161         if (ip->vp) {
1162                 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1163                 nvtruncbuf(ip->vp, nsize,
1164                            nblksize, (int)nsize & (nblksize - 1),
1165                            0);
1166         }
1167         ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1168         ip->size = nsize;
1169         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1170         ccms_thread_unlock(&ip->topo_cst);
1171 }
1172
1173 /*
1174  * Extend the size of a file.  The inode must not be locked.
1175  */
1176 static
1177 void
1178 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1179 {
1180         hammer2_key_t lbase;
1181         hammer2_key_t osize;
1182         int oblksize;
1183         int nblksize;
1184
1185         ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1186         osize = ip->size;
1187         ip->size = nsize;
1188         ccms_thread_unlock(&ip->topo_cst);
1189
1190         if (ip->vp) {
1191                 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1192                 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1193                 nvextendbuf(ip->vp,
1194                             osize, nsize,
1195                             oblksize, nblksize,
1196                             -1, -1, 0);
1197         }
1198         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1199 }
1200
1201 static
1202 int
1203 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1204 {
1205         hammer2_inode_t *ip;
1206         hammer2_inode_t *dip;
1207         hammer2_chain_t *parent;
1208         hammer2_chain_t *chain;
1209         hammer2_chain_t *ochain;
1210         hammer2_trans_t trans;
1211         hammer2_key_t key_next;
1212         hammer2_key_t lhc;
1213         struct namecache *ncp;
1214         const uint8_t *name;
1215         size_t name_len;
1216         int error = 0;
1217         int cache_index = -1;
1218         struct vnode *vp;
1219
1220         dip = VTOI(ap->a_dvp);
1221         ncp = ap->a_nch->ncp;
1222         name = ncp->nc_name;
1223         name_len = ncp->nc_nlen;
1224         lhc = hammer2_dirhash(name, name_len);
1225
1226         /*
1227          * Note: In DragonFly the kernel handles '.' and '..'.
1228          */
1229         parent = hammer2_inode_lock_sh(dip);
1230         chain = hammer2_chain_lookup(&parent, &key_next,
1231                                      lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1232                                      &cache_index, HAMMER2_LOOKUP_SHARED);
1233         while (chain) {
1234                 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1235                     name_len == chain->data->ipdata.name_len &&
1236                     bcmp(name, chain->data->ipdata.filename, name_len) == 0) {
1237                         break;
1238                 }
1239                 chain = hammer2_chain_next(&parent, chain, &key_next,
1240                                            key_next,
1241                                            lhc + HAMMER2_DIRHASH_LOMASK,
1242                                            &cache_index, HAMMER2_LOOKUP_SHARED);
1243         }
1244         hammer2_inode_unlock_sh(dip, parent);
1245
1246         /*
1247          * If the inode represents a forwarding entry for a hardlink we have
1248          * to locate the actual inode.  The original ip is saved for possible
1249          * deconsolidation.  (ip) will only be set to non-NULL when we have
1250          * to locate the real file via a hardlink.  ip will be referenced but
1251          * not locked in that situation.  chain is passed in locked and
1252          * returned locked.
1253          *
1254          * XXX what kind of chain lock?
1255          */
1256         ochain = NULL;
1257         if (chain && chain->data->ipdata.type == HAMMER2_OBJTYPE_HARDLINK) {
1258                 error = hammer2_hardlink_find(dip, &chain, &ochain);
1259                 if (error) {
1260                         kprintf("hammer2: unable to find hardlink\n");
1261                         if (chain) {
1262                                 hammer2_chain_unlock(chain);
1263                                 chain = NULL;
1264                         }
1265                         goto failed;
1266                 }
1267         }
1268
1269         /*
1270          * Deconsolidate any hardlink whos nlinks == 1.  Ignore errors.
1271          * If an error occurs chain and ip are left alone.
1272          *
1273          * XXX upgrade shared lock?
1274          */
1275         if (ochain && chain &&
1276             chain->data->ipdata.nlinks == 1 && !dip->pmp->ronly) {
1277                 kprintf("hammer2: need to unconsolidate hardlink for %s\n",
1278                         chain->data->ipdata.filename);
1279                 /* XXX retain shared lock on dip? (currently not held) */
1280                 hammer2_trans_init(&trans, dip->pmp, 0);
1281                 hammer2_hardlink_deconsolidate(&trans, dip, &chain, &ochain);
1282                 hammer2_trans_done(&trans);
1283         }
1284
1285         /*
1286          * Acquire the related vnode
1287          *
1288          * NOTE: For error processing, only ENOENT resolves the namecache
1289          *       entry to NULL, otherwise we just return the error and
1290          *       leave the namecache unresolved.
1291          *
1292          * NOTE: multiple hammer2_inode structures can be aliased to the
1293          *       same chain element, for example for hardlinks.  This
1294          *       use case does not 'reattach' inode associations that
1295          *       might already exist, but always allocates a new one.
1296          *
1297          * WARNING: inode structure is locked exclusively via inode_get
1298          *          but chain was locked shared.  inode_unlock_ex()
1299          *          will handle it properly.
1300          */
1301         if (chain) {
1302                 ip = hammer2_inode_get(dip->pmp, dip, chain);
1303                 vp = hammer2_igetv(ip, &error);
1304                 if (error == 0) {
1305                         vn_unlock(vp);
1306                         cache_setvp(ap->a_nch, vp);
1307                 } else if (error == ENOENT) {
1308                         cache_setvp(ap->a_nch, NULL);
1309                 }
1310                 hammer2_inode_unlock_ex(ip, chain);
1311
1312                 /*
1313                  * The vp should not be released until after we've disposed
1314                  * of our locks, because it might cause vop_inactive() to
1315                  * be called.
1316                  */
1317                 if (vp)
1318                         vrele(vp);
1319         } else {
1320                 error = ENOENT;
1321                 cache_setvp(ap->a_nch, NULL);
1322         }
1323 failed:
1324         KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1325                 ("resolve error %d/%p chain %p ap %p\n",
1326                  error, ap->a_nch->ncp->nc_vp, chain, ap));
1327         if (ochain)
1328                 hammer2_chain_drop(ochain);
1329         return error;
1330 }
1331
1332 static
1333 int
1334 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1335 {
1336         hammer2_inode_t *dip;
1337         hammer2_inode_t *ip;
1338         hammer2_chain_t *parent;
1339         int error;
1340
1341         dip = VTOI(ap->a_dvp);
1342
1343         if ((ip = dip->pip) == NULL) {
1344                 *ap->a_vpp = NULL;
1345                 return ENOENT;
1346         }
1347         parent = hammer2_inode_lock_ex(ip);
1348         *ap->a_vpp = hammer2_igetv(ip, &error);
1349         hammer2_inode_unlock_ex(ip, parent);
1350
1351         return error;
1352 }
1353
1354 static
1355 int
1356 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1357 {
1358         hammer2_inode_t *dip;
1359         hammer2_inode_t *nip;
1360         hammer2_trans_t trans;
1361         hammer2_chain_t *chain;
1362         struct namecache *ncp;
1363         const uint8_t *name;
1364         size_t name_len;
1365         int error;
1366
1367         dip = VTOI(ap->a_dvp);
1368         if (dip->pmp->ronly)
1369                 return (EROFS);
1370
1371         ncp = ap->a_nch->ncp;
1372         name = ncp->nc_name;
1373         name_len = ncp->nc_nlen;
1374
1375         hammer2_chain_memory_wait(dip->pmp);
1376         hammer2_trans_init(&trans, dip->pmp, 0);
1377         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1378                                    name, name_len, &chain, &error);
1379         if (error) {
1380                 KKASSERT(nip == NULL);
1381                 *ap->a_vpp = NULL;
1382         } else {
1383                 *ap->a_vpp = hammer2_igetv(nip, &error);
1384                 hammer2_inode_unlock_ex(nip, chain);
1385         }
1386         hammer2_trans_done(&trans);
1387
1388         if (error == 0) {
1389                 cache_setunresolved(ap->a_nch);
1390                 cache_setvp(ap->a_nch, *ap->a_vpp);
1391         }
1392         return error;
1393 }
1394
1395 /*
1396  * Return the largest contiguous physical disk range for the logical
1397  * request, in bytes.
1398  *
1399  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1400  *
1401  * Basically disabled, the logical buffer write thread has to deal with
1402  * buffers one-at-a-time.
1403  */
1404 static
1405 int
1406 hammer2_vop_bmap(struct vop_bmap_args *ap)
1407 {
1408         *ap->a_doffsetp = NOOFFSET;
1409         if (ap->a_runp)
1410                 *ap->a_runp = 0;
1411         if (ap->a_runb)
1412                 *ap->a_runb = 0;
1413         return (EOPNOTSUPP);
1414 }
1415
1416 static
1417 int
1418 hammer2_vop_open(struct vop_open_args *ap)
1419 {
1420         return vop_stdopen(ap);
1421 }
1422
1423 /*
1424  * hammer2_vop_advlock { vp, id, op, fl, flags }
1425  */
1426 static
1427 int
1428 hammer2_vop_advlock(struct vop_advlock_args *ap)
1429 {
1430         hammer2_inode_t *ip = VTOI(ap->a_vp);
1431         hammer2_chain_t *parent;
1432         hammer2_off_t size;
1433
1434         parent = hammer2_inode_lock_sh(ip);
1435         size = parent->data->ipdata.size;
1436         hammer2_inode_unlock_sh(ip, parent);
1437         return (lf_advlock(ap, &ip->advlock, size));
1438 }
1439
1440
1441 static
1442 int
1443 hammer2_vop_close(struct vop_close_args *ap)
1444 {
1445         return vop_stdclose(ap);
1446 }
1447
1448 /*
1449  * hammer2_vop_nlink { nch, dvp, vp, cred }
1450  *
1451  * Create a hardlink from (vp) to {dvp, nch}.
1452  */
1453 static
1454 int
1455 hammer2_vop_nlink(struct vop_nlink_args *ap)
1456 {
1457         hammer2_inode_t *dip;   /* target directory to create link in */
1458         hammer2_inode_t *ip;    /* inode we are hardlinking to */
1459         hammer2_chain_t *chain;
1460         hammer2_trans_t trans;
1461         struct namecache *ncp;
1462         const uint8_t *name;
1463         size_t name_len;
1464         int error;
1465
1466         dip = VTOI(ap->a_dvp);
1467         if (dip->pmp->ronly)
1468                 return (EROFS);
1469
1470         ncp = ap->a_nch->ncp;
1471         name = ncp->nc_name;
1472         name_len = ncp->nc_nlen;
1473
1474         /*
1475          * ip represents the file being hardlinked.  The file could be a
1476          * normal file or a hardlink target if it has already been hardlinked.
1477          * If ip is a hardlinked target then ip->pip represents the location
1478          * of the hardlinked target, NOT the location of the hardlink pointer.
1479          *
1480          * Bump nlinks and potentially also create or move the hardlink
1481          * target in the parent directory common to (ip) and (dip).  The
1482          * consolidation code can modify ip->chain and ip->pip.  The
1483          * returned chain is locked.
1484          */
1485         ip = VTOI(ap->a_vp);
1486         hammer2_chain_memory_wait(ip->pmp);
1487         hammer2_trans_init(&trans, ip->pmp, 0);
1488
1489         chain = hammer2_inode_lock_ex(ip);
1490         error = hammer2_hardlink_consolidate(&trans, ip, &chain, dip, 1);
1491         if (error)
1492                 goto done;
1493
1494         /*
1495          * Create a directory entry connected to the specified chain.
1496          * The hardlink consolidation code has already adjusted ip->pip
1497          * to the common parent directory containing the actual hardlink
1498          *
1499          * (which may be different from dip where we created our hardlink
1500          * entry. ip->chain always represents the actual hardlink and not
1501          * any of the pointers to the actual hardlink).
1502          */
1503         error = hammer2_inode_connect(&trans, 1,
1504                                       dip, &chain,
1505                                       name, name_len);
1506         if (error == 0) {
1507                 cache_setunresolved(ap->a_nch);
1508                 cache_setvp(ap->a_nch, ap->a_vp);
1509         }
1510 done:
1511         hammer2_inode_unlock_ex(ip, chain);
1512         hammer2_trans_done(&trans);
1513
1514         return error;
1515 }
1516
1517 /*
1518  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1519  *
1520  * The operating system has already ensured that the directory entry
1521  * does not exist and done all appropriate namespace locking.
1522  */
1523 static
1524 int
1525 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1526 {
1527         hammer2_inode_t *dip;
1528         hammer2_inode_t *nip;
1529         hammer2_trans_t trans;
1530         hammer2_chain_t *nchain;
1531         struct namecache *ncp;
1532         const uint8_t *name;
1533         size_t name_len;
1534         int error;
1535
1536         dip = VTOI(ap->a_dvp);
1537         if (dip->pmp->ronly)
1538                 return (EROFS);
1539
1540         ncp = ap->a_nch->ncp;
1541         name = ncp->nc_name;
1542         name_len = ncp->nc_nlen;
1543         hammer2_chain_memory_wait(dip->pmp);
1544         hammer2_trans_init(&trans, dip->pmp, 0);
1545
1546         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1547                                    name, name_len, &nchain, &error);
1548         if (error) {
1549                 KKASSERT(nip == NULL);
1550                 *ap->a_vpp = NULL;
1551         } else {
1552                 *ap->a_vpp = hammer2_igetv(nip, &error);
1553                 hammer2_inode_unlock_ex(nip, nchain);
1554         }
1555         hammer2_trans_done(&trans);
1556
1557         if (error == 0) {
1558                 cache_setunresolved(ap->a_nch);
1559                 cache_setvp(ap->a_nch, *ap->a_vpp);
1560         }
1561         return error;
1562 }
1563
1564 /*
1565  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1566  */
1567 static
1568 int
1569 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1570 {
1571         hammer2_inode_t *dip;
1572         hammer2_inode_t *nip;
1573         hammer2_chain_t *nparent;
1574         hammer2_trans_t trans;
1575         struct namecache *ncp;
1576         const uint8_t *name;
1577         size_t name_len;
1578         int error;
1579         
1580         dip = VTOI(ap->a_dvp);
1581         if (dip->pmp->ronly)
1582                 return (EROFS);
1583
1584         ncp = ap->a_nch->ncp;
1585         name = ncp->nc_name;
1586         name_len = ncp->nc_nlen;
1587         hammer2_chain_memory_wait(dip->pmp);
1588         hammer2_trans_init(&trans, dip->pmp, 0);
1589
1590         ap->a_vap->va_type = VLNK;      /* enforce type */
1591
1592         nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1593                                    name, name_len, &nparent, &error);
1594         if (error) {
1595                 KKASSERT(nip == NULL);
1596                 *ap->a_vpp = NULL;
1597                 hammer2_trans_done(&trans);
1598                 return error;
1599         }
1600         *ap->a_vpp = hammer2_igetv(nip, &error);
1601
1602         /*
1603          * Build the softlink (~like file data) and finalize the namecache.
1604          */
1605         if (error == 0) {
1606                 size_t bytes;
1607                 struct uio auio;
1608                 struct iovec aiov;
1609                 hammer2_inode_data_t *nipdata;
1610
1611                 nipdata = &nip->chain->data->ipdata;
1612                 bytes = strlen(ap->a_target);
1613
1614                 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1615                         KKASSERT(nipdata->op_flags &
1616                                  HAMMER2_OPFLAG_DIRECTDATA);
1617                         bcopy(ap->a_target, nipdata->u.data, bytes);
1618                         nipdata->size = bytes;
1619                         nip->size = bytes;
1620                         hammer2_inode_unlock_ex(nip, nparent);
1621                 } else {
1622                         hammer2_inode_unlock_ex(nip, nparent);
1623                         bzero(&auio, sizeof(auio));
1624                         bzero(&aiov, sizeof(aiov));
1625                         auio.uio_iov = &aiov;
1626                         auio.uio_segflg = UIO_SYSSPACE;
1627                         auio.uio_rw = UIO_WRITE;
1628                         auio.uio_resid = bytes;
1629                         auio.uio_iovcnt = 1;
1630                         auio.uio_td = curthread;
1631                         aiov.iov_base = ap->a_target;
1632                         aiov.iov_len = bytes;
1633                         error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1634                         nipdata = &nip->chain->data->ipdata; /* RELOAD */
1635                         /* XXX handle error */
1636                         error = 0;
1637                 }
1638         } else {
1639                 hammer2_inode_unlock_ex(nip, nparent);
1640         }
1641         hammer2_trans_done(&trans);
1642
1643         /*
1644          * Finalize namecache
1645          */
1646         if (error == 0) {
1647                 cache_setunresolved(ap->a_nch);
1648                 cache_setvp(ap->a_nch, *ap->a_vpp);
1649                 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1650         }
1651         return error;
1652 }
1653
1654 /*
1655  * hammer2_vop_nremove { nch, dvp, cred }
1656  */
1657 static
1658 int
1659 hammer2_vop_nremove(struct vop_nremove_args *ap)
1660 {
1661         hammer2_inode_t *dip;
1662         hammer2_trans_t trans;
1663         struct namecache *ncp;
1664         const uint8_t *name;
1665         size_t name_len;
1666         int error;
1667
1668         dip = VTOI(ap->a_dvp);
1669         if (dip->pmp->ronly)
1670                 return(EROFS);
1671
1672         ncp = ap->a_nch->ncp;
1673         name = ncp->nc_name;
1674         name_len = ncp->nc_nlen;
1675         hammer2_chain_memory_wait(dip->pmp);
1676         hammer2_trans_init(&trans, dip->pmp, 0);
1677         error = hammer2_unlink_file(&trans, dip, name, name_len, 0, NULL);
1678         hammer2_trans_done(&trans);
1679         if (error == 0) {
1680                 cache_unlink(ap->a_nch);
1681         }
1682         return (error);
1683 }
1684
1685 /*
1686  * hammer2_vop_nrmdir { nch, dvp, cred }
1687  */
1688 static
1689 int
1690 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1691 {
1692         hammer2_inode_t *dip;
1693         hammer2_trans_t trans;
1694         struct namecache *ncp;
1695         const uint8_t *name;
1696         size_t name_len;
1697         int error;
1698
1699         dip = VTOI(ap->a_dvp);
1700         if (dip->pmp->ronly)
1701                 return(EROFS);
1702
1703         ncp = ap->a_nch->ncp;
1704         name = ncp->nc_name;
1705         name_len = ncp->nc_nlen;
1706
1707         hammer2_chain_memory_wait(dip->pmp);
1708         hammer2_trans_init(&trans, dip->pmp, 0);
1709         error = hammer2_unlink_file(&trans, dip, name, name_len, 1, NULL);
1710         hammer2_trans_done(&trans);
1711         if (error == 0) {
1712                 cache_unlink(ap->a_nch);
1713         }
1714         return (error);
1715 }
1716
1717 /*
1718  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1719  */
1720 static
1721 int
1722 hammer2_vop_nrename(struct vop_nrename_args *ap)
1723 {
1724         struct namecache *fncp;
1725         struct namecache *tncp;
1726         hammer2_inode_t *fdip;
1727         hammer2_inode_t *tdip;
1728         hammer2_inode_t *ip;
1729         hammer2_chain_t *chain;
1730         hammer2_trans_t trans;
1731         const uint8_t *fname;
1732         size_t fname_len;
1733         const uint8_t *tname;
1734         size_t tname_len;
1735         int error;
1736         int hlink;
1737
1738         if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1739                 return(EXDEV);
1740         if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1741                 return(EXDEV);
1742
1743         fdip = VTOI(ap->a_fdvp);        /* source directory */
1744         tdip = VTOI(ap->a_tdvp);        /* target directory */
1745
1746         if (fdip->pmp->ronly)
1747                 return(EROFS);
1748
1749         fncp = ap->a_fnch->ncp;         /* entry name in source */
1750         fname = fncp->nc_name;
1751         fname_len = fncp->nc_nlen;
1752
1753         tncp = ap->a_tnch->ncp;         /* entry name in target */
1754         tname = tncp->nc_name;
1755         tname_len = tncp->nc_nlen;
1756
1757         hammer2_chain_memory_wait(tdip->pmp);
1758         hammer2_trans_init(&trans, tdip->pmp, 0);
1759
1760         /*
1761          * ip is the inode being renamed.  If this is a hardlink then
1762          * ip represents the actual file and not the hardlink marker.
1763          */
1764         ip = VTOI(fncp->nc_vp);
1765         chain = NULL;
1766
1767         /*
1768          * Keep a tight grip on the inode so the temporary unlinking from
1769          * the source location prior to linking to the target location
1770          * does not cause the chain to be destroyed.
1771          *
1772          * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1773          *       unlinking elements from their directories.  Locking
1774          *       the nlinks field does not lock the whole inode.
1775          */
1776         hammer2_inode_ref(ip);
1777
1778         /*
1779          * Remove target if it exists
1780          */
1781         error = hammer2_unlink_file(&trans, tdip, tname, tname_len, -1, NULL);
1782         if (error && error != ENOENT)
1783                 goto done;
1784         cache_setunresolved(ap->a_tnch);
1785
1786         /*
1787          * When renaming a hardlinked file we may have to re-consolidate
1788          * the location of the hardlink target.  Since the element is simply
1789          * being moved, nlinks is not modified in this case.
1790          *
1791          * If ip represents a regular file the consolidation code essentially
1792          * does nothing other than return the same locked chain that was
1793          * passed in.
1794          *
1795          * The returned chain will be locked.
1796          *
1797          * WARNING!  We do not currently have a local copy of ipdata but
1798          *           we do use one later remember that it must be reloaded
1799          *           on any modification to the inode, including connects.
1800          */
1801         chain = hammer2_inode_lock_ex(ip);
1802         error = hammer2_hardlink_consolidate(&trans, ip, &chain, tdip, 0);
1803         if (error)
1804                 goto done;
1805
1806         /*
1807          * Disconnect (fdip, fname) from the source directory.  This will
1808          * disconnect (ip) if it represents a direct file.  If (ip) represents
1809          * a hardlink the HARDLINK pointer object will be removed but the
1810          * hardlink will stay intact.
1811          *
1812          * The target chain may be marked DELETED but will not be destroyed
1813          * since we retain our hold on ip and chain.
1814          */
1815         error = hammer2_unlink_file(&trans, fdip, fname, fname_len, -1, &hlink);
1816         KKASSERT(error != EAGAIN);
1817         if (error)
1818                 goto done;
1819
1820         /*
1821          * Reconnect ip to target directory using chain.  Chains cannot
1822          * actually be moved, so this will duplicate the chain in the new
1823          * spot and assign it to the ip, replacing the old chain.
1824          *
1825          * WARNING: chain locks can lock buffer cache buffers, to avoid
1826          *          deadlocks we want to unlock before issuing a cache_*()
1827          *          op (that might have to lock a vnode).
1828          */
1829         error = hammer2_inode_connect(&trans, hlink,
1830                                       tdip, &chain,
1831                                       tname, tname_len);
1832         if (error == 0) {
1833                 KKASSERT(chain != NULL);
1834                 hammer2_inode_repoint(ip, (hlink ? ip->pip : tdip), chain);
1835                 cache_rename(ap->a_fnch, ap->a_tnch);
1836         }
1837 done:
1838         hammer2_inode_unlock_ex(ip, chain);
1839         hammer2_inode_drop(ip);
1840         hammer2_trans_done(&trans);
1841
1842         return (error);
1843 }
1844
1845 /*
1846  * Strategy code
1847  *
1848  * WARNING: The strategy code cannot safely use hammer2 transactions
1849  *          as this can deadlock against vfs_sync's vfsync() call
1850  *          if multiple flushes are queued.
1851  */
1852 static int hammer2_strategy_read(struct vop_strategy_args *ap);
1853 static int hammer2_strategy_write(struct vop_strategy_args *ap);
1854 static void hammer2_strategy_read_callback(hammer2_chain_t *chain,
1855                                 struct buf *dbp, char *data, void *arg);
1856
1857 static
1858 int
1859 hammer2_vop_strategy(struct vop_strategy_args *ap)
1860 {
1861         struct bio *biop;
1862         struct buf *bp;
1863         int error;
1864
1865         biop = ap->a_bio;
1866         bp = biop->bio_buf;
1867
1868         switch(bp->b_cmd) {
1869         case BUF_CMD_READ:
1870                 error = hammer2_strategy_read(ap);
1871                 ++hammer2_iod_file_read;
1872                 break;
1873         case BUF_CMD_WRITE:
1874                 error = hammer2_strategy_write(ap);
1875                 ++hammer2_iod_file_write;
1876                 break;
1877         default:
1878                 bp->b_error = error = EINVAL;
1879                 bp->b_flags |= B_ERROR;
1880                 biodone(biop);
1881                 break;
1882         }
1883
1884         return (error);
1885 }
1886
1887 static
1888 int
1889 hammer2_strategy_read(struct vop_strategy_args *ap)
1890 {
1891         struct buf *bp;
1892         struct bio *bio;
1893         struct bio *nbio;
1894         hammer2_inode_t *ip;
1895         hammer2_chain_t *parent;
1896         hammer2_chain_t *chain;
1897         hammer2_key_t key_dummy;
1898         hammer2_key_t lbase;
1899         int loff;
1900         int cache_index = -1;
1901
1902         bio = ap->a_bio;
1903         bp = bio->bio_buf;
1904         ip = VTOI(ap->a_vp);
1905         nbio = push_bio(bio);
1906
1907         lbase = bio->bio_offset;
1908         chain = NULL;
1909         KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
1910
1911         parent = hammer2_inode_lock_sh(ip);
1912         chain = hammer2_chain_lookup(&parent, &key_dummy,
1913                                      lbase, lbase,
1914                                      &cache_index,
1915                                      HAMMER2_LOOKUP_NODATA |
1916                                      HAMMER2_LOOKUP_SHARED);
1917
1918         if (chain == NULL) {
1919                 /*
1920                  * Data is zero-fill
1921                  */
1922                 bp->b_resid = 0;
1923                 bp->b_error = 0;
1924                 bzero(bp->b_data, bp->b_bcount);
1925                 biodone(nbio);
1926         } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1927                 /*
1928                  * Data is embedded in the inode (copy from inode).
1929                  */
1930                 hammer2_chain_load_async(chain, hammer2_strategy_read_callback,
1931                                          nbio);
1932         } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1933                 /*
1934                  * Data is on-media, issue device I/O and copy.
1935                  *
1936                  * XXX direct-IO shortcut could go here XXX.
1937                  */
1938                 if (HAMMER2_DEC_COMP(chain->bref.methods) == HAMMER2_COMP_LZ4) {
1939                         /*
1940                          * Block compression is determined by bref.methods value.
1941                          */
1942                         hammer2_blockref_t *bref;
1943                         hammer2_off_t pbase;
1944                         hammer2_off_t pmask;
1945                         size_t psize;
1946                                 
1947                         bref = &chain->bref;
1948                         psize = hammer2_devblksize(chain->bytes);
1949                         pmask = (hammer2_off_t)psize - 1;
1950                         pbase = bref->data_off & ~pmask;
1951                         loff = (int)((bref->data_off &
1952                                       ~HAMMER2_OFF_MASK_RADIX) - pbase);
1953                         nbio->bio_caller_info3.value = loff;
1954                         breadcb(chain->hmp->devvp, pbase, psize,
1955                                 hammer2_decompress_LZ4_callback, nbio);
1956                         /* XXX async read dev blk not protected by chain lk */
1957                         hammer2_chain_unlock(chain);
1958                 } else if (HAMMER2_DEC_COMP(chain->bref.methods) == HAMMER2_COMP_ZLIB) {
1959                         hammer2_blockref_t *bref;
1960                         hammer2_off_t pbase;
1961                         hammer2_off_t pmask;
1962                         size_t psize;
1963                                 
1964                         bref = &chain->bref;
1965                         psize = hammer2_devblksize(chain->bytes);
1966                         pmask = (hammer2_off_t)psize - 1;
1967                         pbase = bref->data_off & ~pmask;
1968                         loff = (int)((bref->data_off &
1969                                       ~HAMMER2_OFF_MASK_RADIX) - pbase);
1970                         nbio->bio_caller_info3.value = loff;
1971                         breadcb(chain->hmp->devvp, pbase, psize,
1972                                 hammer2_decompress_ZLIB_callback, nbio);
1973                         /* XXX async read dev blk not protected by chain lk */
1974                         hammer2_chain_unlock(chain);
1975                 }
1976                 else {
1977                         hammer2_chain_load_async(chain,
1978                                                  hammer2_strategy_read_callback,
1979                                                  nbio);
1980                 }
1981         } else {
1982                 panic("READ PATH: hammer2_strategy_read: unknown bref type");
1983                 chain = NULL;
1984         }
1985         hammer2_inode_unlock_sh(ip, parent);
1986         return (0);
1987 }
1988
1989 /*
1990  * Read callback for block that is not compressed.
1991  */
1992 static
1993 void
1994 hammer2_strategy_read_callback(hammer2_chain_t *chain, struct buf *dbp,
1995                                char *data, void *arg)
1996 {
1997         struct bio *nbio = arg;
1998         struct buf *bp = nbio->bio_buf;
1999
2000         if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2001                 /*
2002                  * Data is embedded in the inode (copy from inode).
2003                  */
2004                 bcopy(((hammer2_inode_data_t *)data)->u.data,
2005                       bp->b_data, HAMMER2_EMBEDDED_BYTES);
2006                 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
2007                       bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
2008                 bp->b_resid = 0;
2009                 bp->b_error = 0;
2010                 hammer2_chain_unlock(chain);
2011                 biodone(nbio);
2012         } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2013                 /*
2014                  * Data is on-media, issue device I/O and copy.
2015                  *
2016                  * XXX direct-IO shortcut could go here XXX.
2017                  */
2018                 KKASSERT(chain->bytes <= bp->b_bcount);
2019                 bcopy(data, bp->b_data, chain->bytes);
2020                 if (chain->bytes < bp->b_bcount); {
2021                         bzero(bp->b_data + chain->bytes,
2022                               bp->b_bcount - chain->bytes);
2023                 }
2024                 bp->b_flags |= B_NOTMETA;
2025                 bp->b_resid = 0;
2026                 bp->b_error = 0;
2027                 hammer2_chain_unlock(chain);
2028                 biodone(nbio);
2029         } else {
2030                 if (dbp)
2031                         bqrelse(dbp);
2032                 panic("hammer2_strategy_read: unknown bref type");
2033                 /*hammer2_chain_unlock(chain);*/
2034                 /*chain = NULL;*/
2035         }
2036 }
2037
2038 static
2039 int
2040 hammer2_strategy_write(struct vop_strategy_args *ap)
2041 {       
2042         hammer2_pfsmount_t *pmp;
2043         struct bio *bio;
2044         struct buf *bp;
2045         hammer2_inode_t *ip;
2046         
2047         bio = ap->a_bio;
2048         bp = bio->bio_buf;
2049         ip = VTOI(ap->a_vp);
2050         pmp = ip->pmp;
2051         
2052         mtx_lock(&pmp->wthread_mtx);
2053         bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2054         wakeup(&pmp->wthread_bioq);
2055         mtx_unlock(&pmp->wthread_mtx);
2056
2057         return(0);
2058 }
2059
2060 /*
2061  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2062  */
2063 static
2064 int
2065 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2066 {
2067         hammer2_inode_t *ip;
2068         int error;
2069
2070         ip = VTOI(ap->a_vp);
2071
2072         error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2073                               ap->a_fflag, ap->a_cred);
2074         return (error);
2075 }
2076
2077 static
2078 int 
2079 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2080 {
2081         struct mount *mp;
2082         hammer2_pfsmount_t *pmp;
2083         int rc;
2084
2085         switch (ap->a_op) {
2086         case (MOUNTCTL_SET_EXPORT):
2087                 mp = ap->a_head.a_ops->head.vv_mount;
2088                 pmp = MPTOPMP(mp);
2089
2090                 if (ap->a_ctllen != sizeof(struct export_args))
2091                         rc = (EINVAL);
2092                 else
2093                         rc = vfs_export(mp, &pmp->export,
2094                                         (const struct export_args *)ap->a_ctl);
2095                 break;
2096         default:
2097                 rc = vop_stdmountctl(ap);
2098                 break;
2099         }
2100         return (rc);
2101 }
2102
2103 struct vop_ops hammer2_vnode_vops = {
2104         .vop_default    = vop_defaultop,
2105         .vop_fsync      = hammer2_vop_fsync,
2106         .vop_getpages   = vop_stdgetpages,
2107         .vop_putpages   = vop_stdputpages,
2108         .vop_access     = hammer2_vop_access,
2109         .vop_advlock    = hammer2_vop_advlock,
2110         .vop_close      = hammer2_vop_close,
2111         .vop_nlink      = hammer2_vop_nlink,
2112         .vop_ncreate    = hammer2_vop_ncreate,
2113         .vop_nsymlink   = hammer2_vop_nsymlink,
2114         .vop_nremove    = hammer2_vop_nremove,
2115         .vop_nrmdir     = hammer2_vop_nrmdir,
2116         .vop_nrename    = hammer2_vop_nrename,
2117         .vop_getattr    = hammer2_vop_getattr,
2118         .vop_setattr    = hammer2_vop_setattr,
2119         .vop_readdir    = hammer2_vop_readdir,
2120         .vop_readlink   = hammer2_vop_readlink,
2121         .vop_getpages   = vop_stdgetpages,
2122         .vop_putpages   = vop_stdputpages,
2123         .vop_read       = hammer2_vop_read,
2124         .vop_write      = hammer2_vop_write,
2125         .vop_open       = hammer2_vop_open,
2126         .vop_inactive   = hammer2_vop_inactive,
2127         .vop_reclaim    = hammer2_vop_reclaim,
2128         .vop_nresolve   = hammer2_vop_nresolve,
2129         .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2130         .vop_nmkdir     = hammer2_vop_nmkdir,
2131         .vop_ioctl      = hammer2_vop_ioctl,
2132         .vop_mountctl   = hammer2_vop_mountctl,
2133         .vop_bmap       = hammer2_vop_bmap,
2134         .vop_strategy   = hammer2_vop_strategy,
2135 };
2136
2137 struct vop_ops hammer2_spec_vops = {
2138
2139 };
2140
2141 struct vop_ops hammer2_fifo_vops = {
2142
2143 };
2144