c0afe80b4207ec6cb5b5fa358a964794e4dfaede
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *       to the inode as its underlying chain may have changed.
41  */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
59
60 #include "hammer2.h"
61
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63                                 int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65                                 int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
68
69 struct objcache *cache_xops;
70
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
74 {
75         if (flags)
76                 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
77 }
78
79 /*
80  * Last reference to a vnode is going away but it is still cached.
81  */
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
85 {
86         hammer2_inode_t *ip;
87         struct vnode *vp;
88
89         vp = ap->a_vp;
90         ip = VTOI(vp);
91
92         /*
93          * Degenerate case
94          */
95         if (ip == NULL) {
96                 vrecycle(vp);
97                 return (0);
98         }
99
100         /*
101          * Check for deleted inodes and recycle immediately on the last
102          * release.  Be sure to destroy any left-over buffer cache buffers
103          * so we do not waste time trying to flush them.
104          *
105          * Note that deleting the file block chains under the inode chain
106          * would just be a waste of energy, so don't do it.
107          *
108          * WARNING: nvtruncbuf() can only be safely called without the inode
109          *          lock held due to the way our write thread works.
110          */
111         if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
112                 hammer2_key_t lbase;
113                 int nblksize;
114
115                 /*
116                  * Detect updates to the embedded data which may be
117                  * synchronized by the strategy code.  Simply mark the
118                  * inode modified so it gets picked up by our normal flush.
119                  */
120                 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121                 nvtruncbuf(vp, 0, nblksize, 0, 0);
122                 vrecycle(vp);
123         }
124         return (0);
125 }
126
127 /*
128  * Reclaim a vnode so that it can be reused; after the inode is
129  * disassociated, the filesystem must manage it alone.
130  */
131 static
132 int
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
134 {
135         hammer2_inode_t *ip;
136         hammer2_pfs_t *pmp;
137         struct vnode *vp;
138
139         vp = ap->a_vp;
140         ip = VTOI(vp);
141         if (ip == NULL) {
142                 return(0);
143         }
144         pmp = ip->pmp;
145
146         /*
147          * The final close of a deleted file or directory marks it for
148          * destruction.  The DELETED flag allows the flusher to shortcut
149          * any modified blocks still unflushed (that is, just ignore them).
150          *
151          * HAMMER2 usually does not try to optimize the freemap by returning
152          * deleted blocks to it as it does not usually know how many snapshots
153          * might be referencing portions of the file/dir.
154          */
155         vp->v_data = NULL;
156         ip->vp = NULL;
157
158         /*
159          * NOTE! We do not attempt to flush chains here, flushing is
160          *       really fragile and could also deadlock.
161          */
162         vclrisdirty(vp);
163
164         /*
165          * A modified inode may require chain synchronization.  This
166          * synchronization is usually handled by VOP_SYNC / VOP_FSYNC
167          * when vfsync() is called.  However, that requires a vnode.
168          *
169          * When the vnode is disassociated we must keep track of any modified
170          * inode to be flushed in a later filesystem sync.  We cannot safely
171          * synchronize the inode from inside the reclaim due to potentially
172          * deep locks held as-of when the reclaim occurs.
173          * Interactions and potential deadlocks abound.
174          *
175          * Place the inode on SIDEQ, unless it is already on the SIDEQ or
176          * SYNCQ.  It will be transfered to the SYNCQ in the next filesystem
177          * sync.  It is not safe to try to shoehorn it into the current fs
178          * sync.
179          */
180         if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
181                           HAMMER2_INODE_MODIFIED |
182                           HAMMER2_INODE_RESIZED |
183                           HAMMER2_INODE_DIRTYDATA)) &&
184             (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
185                 hammer2_spin_ex(&pmp->list_spin);
186                 if ((ip->flags & (HAMMER2_INODE_SYNCQ |
187                                   HAMMER2_INODE_SIDEQ)) == 0) {
188                         /* ref -> sideq */
189                         atomic_set_int(&ip->flags, HAMMER2_INODE_SIDEQ);
190                         TAILQ_INSERT_TAIL(&pmp->sideq, ip, entry);
191                         ++pmp->sideq_count;
192                         hammer2_spin_unex(&pmp->list_spin);
193                         /* retain ip ref for SIDEQ linkage */
194                 } else {
195                         hammer2_spin_unex(&pmp->list_spin);
196                         hammer2_inode_drop(ip);         /* vp ref */
197                 }
198         } else {
199                 hammer2_inode_drop(ip);                 /* vp ref */
200         }
201
202         /*
203          * XXX handle background sync when ip dirty, kernel will no longer
204          * notify us regarding this inode because there is no longer a
205          * vnode attached to it.
206          */
207
208         return (0);
209 }
210
211 /*
212  * Currently this function synchronizes the front-end inode state to the
213  * backend chain topology, then flushes the inode's chain and sub-topology
214  * to backend media.  This function does not flush the root topology down to
215  * the inode.
216  */
217 static
218 int
219 hammer2_vop_fsync(struct vop_fsync_args *ap)
220 {
221         hammer2_inode_t *ip;
222         struct vnode *vp;
223         int error1;
224         int error2;
225
226         vp = ap->a_vp;
227         ip = VTOI(vp);
228         error1 = 0;
229
230         hammer2_trans_init(ip->pmp, 0);
231
232         /*
233          * Flush dirty buffers in the file's logical buffer cache.
234          * It is best to wait for the strategy code to commit the
235          * buffers to the device's backing buffer cache before
236          * then trying to flush the inode.
237          *
238          * This should be quick, but certain inode modifications cached
239          * entirely in the hammer2_inode structure may not trigger a
240          * buffer read until the flush so the fsync can wind up also
241          * doing scattered reads.
242          */
243         vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
244         bio_track_wait(&vp->v_track_write, 0, 0);
245
246         /*
247          * Flush any inode changes
248          */
249         hammer2_inode_lock(ip, 0);
250         if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED))
251                 error1 = hammer2_inode_chain_sync(ip);
252
253         /*
254          * Flush dirty chains related to the inode.
255          *
256          * NOTE! XXX We do not currently flush to the volume root, ultimately
257          *       we will want to have a shortcut for the flushed inode stored
258          *       in the volume root for recovery purposes.
259          */
260         error2 = hammer2_inode_chain_flush(ip);
261         if (error2)
262                 error1 = error2;
263
264         /*
265          * We may be able to clear the vnode dirty flag.  The
266          * hammer2_pfs_moderate() code depends on this usually working.
267          */
268         if ((ip->flags & (HAMMER2_INODE_MODIFIED |
269                           HAMMER2_INODE_RESIZED |
270                           HAMMER2_INODE_DIRTYDATA)) == 0 &&
271             RB_EMPTY(&vp->v_rbdirty_tree) &&
272             !bio_track_active(&vp->v_track_write)) {
273                 vclrisdirty(vp);
274         }
275         hammer2_inode_unlock(ip);
276         hammer2_trans_done(ip->pmp, 0);
277
278         return (error1);
279 }
280
281 static
282 int
283 hammer2_vop_access(struct vop_access_args *ap)
284 {
285         hammer2_inode_t *ip = VTOI(ap->a_vp);
286         uid_t uid;
287         gid_t gid;
288         int error;
289
290         hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
291         uid = hammer2_to_unix_xid(&ip->meta.uid);
292         gid = hammer2_to_unix_xid(&ip->meta.gid);
293         error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
294         hammer2_inode_unlock(ip);
295
296         return (error);
297 }
298
299 static
300 int
301 hammer2_vop_getattr(struct vop_getattr_args *ap)
302 {
303         hammer2_pfs_t *pmp;
304         hammer2_inode_t *ip;
305         struct vnode *vp;
306         struct vattr *vap;
307         hammer2_chain_t *chain;
308         int i;
309
310         vp = ap->a_vp;
311         vap = ap->a_vap;
312
313         ip = VTOI(vp);
314         pmp = ip->pmp;
315
316         hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
317
318         vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
319         vap->va_fileid = ip->meta.inum;
320         vap->va_mode = ip->meta.mode;
321         vap->va_nlink = ip->meta.nlinks;
322         vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
323         vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
324         vap->va_rmajor = 0;
325         vap->va_rminor = 0;
326         vap->va_size = ip->meta.size;   /* protected by shared lock */
327         vap->va_blocksize = HAMMER2_PBUFSIZE;
328         vap->va_flags = ip->meta.uflags;
329         hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
330         hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
331         hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
332         vap->va_gen = 1;
333         vap->va_bytes = 0;
334         if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
335                 /*
336                  * Can't really calculate directory use sans the files under
337                  * it, just assume one block for now.
338                  */
339                 vap->va_bytes += HAMMER2_INODE_BYTES;
340         } else {
341                 for (i = 0; i < ip->cluster.nchains; ++i) {
342                         if ((chain = ip->cluster.array[i].chain) != NULL) {
343                                 if (vap->va_bytes <
344                                     chain->bref.embed.stats.data_count) {
345                                         vap->va_bytes =
346                                             chain->bref.embed.stats.data_count;
347                                 }
348                         }
349                 }
350         }
351         vap->va_type = hammer2_get_vtype(ip->meta.type);
352         vap->va_filerev = 0;
353         vap->va_uid_uuid = ip->meta.uid;
354         vap->va_gid_uuid = ip->meta.gid;
355         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
356                           VA_FSID_UUID_VALID;
357
358         hammer2_inode_unlock(ip);
359
360         return (0);
361 }
362
363 static
364 int
365 hammer2_vop_setattr(struct vop_setattr_args *ap)
366 {
367         hammer2_inode_t *ip;
368         struct vnode *vp;
369         struct vattr *vap;
370         int error;
371         int kflags = 0;
372         uint64_t ctime;
373
374         vp = ap->a_vp;
375         vap = ap->a_vap;
376         hammer2_update_time(&ctime);
377
378         ip = VTOI(vp);
379
380         if (ip->pmp->ronly)
381                 return (EROFS);
382         if (hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1)
383                 return (ENOSPC);
384
385         hammer2_pfs_memory_wait(ip, 0);
386         hammer2_trans_init(ip->pmp, 0);
387         hammer2_inode_lock(ip, 0);
388         error = 0;
389
390         if (vap->va_flags != VNOVAL) {
391                 uint32_t flags;
392
393                 flags = ip->meta.uflags;
394                 error = vop_helper_setattr_flags(&flags, vap->va_flags,
395                                      hammer2_to_unix_xid(&ip->meta.uid),
396                                      ap->a_cred);
397                 if (error == 0) {
398                         if (ip->meta.uflags != flags) {
399                                 hammer2_inode_modify(ip);
400                                 ip->meta.uflags = flags;
401                                 ip->meta.ctime = ctime;
402                                 kflags |= NOTE_ATTRIB;
403                         }
404                         if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
405                                 error = 0;
406                                 goto done;
407                         }
408                 }
409                 goto done;
410         }
411         if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
412                 error = EPERM;
413                 goto done;
414         }
415         if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
416                 mode_t cur_mode = ip->meta.mode;
417                 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
418                 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
419                 uuid_t uuid_uid;
420                 uuid_t uuid_gid;
421
422                 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
423                                          ap->a_cred,
424                                          &cur_uid, &cur_gid, &cur_mode);
425                 if (error == 0) {
426                         hammer2_guid_to_uuid(&uuid_uid, cur_uid);
427                         hammer2_guid_to_uuid(&uuid_gid, cur_gid);
428                         if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
429                             bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
430                             ip->meta.mode != cur_mode
431                         ) {
432                                 hammer2_inode_modify(ip);
433                                 ip->meta.uid = uuid_uid;
434                                 ip->meta.gid = uuid_gid;
435                                 ip->meta.mode = cur_mode;
436                                 ip->meta.ctime = ctime;
437                         }
438                         kflags |= NOTE_ATTRIB;
439                 }
440         }
441
442         /*
443          * Resize the file
444          */
445         if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
446                 switch(vp->v_type) {
447                 case VREG:
448                         if (vap->va_size == ip->meta.size)
449                                 break;
450                         if (vap->va_size < ip->meta.size) {
451                                 hammer2_mtx_ex(&ip->truncate_lock);
452                                 hammer2_truncate_file(ip, vap->va_size);
453                                 hammer2_mtx_unlock(&ip->truncate_lock);
454                                 kflags |= NOTE_WRITE;
455                         } else {
456                                 hammer2_extend_file(ip, vap->va_size);
457                                 kflags |= NOTE_WRITE | NOTE_EXTEND;
458                         }
459                         hammer2_inode_modify(ip);
460                         ip->meta.mtime = ctime;
461                         vclrflags(vp, VLASTWRITETS);
462                         break;
463                 default:
464                         error = EINVAL;
465                         goto done;
466                 }
467         }
468 #if 0
469         /* atime not supported */
470         if (vap->va_atime.tv_sec != VNOVAL) {
471                 hammer2_inode_modify(ip);
472                 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
473                 kflags |= NOTE_ATTRIB;
474         }
475 #endif
476         if (vap->va_mode != (mode_t)VNOVAL) {
477                 mode_t cur_mode = ip->meta.mode;
478                 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
479                 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
480
481                 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
482                                          cur_uid, cur_gid, &cur_mode);
483                 if (error == 0 && ip->meta.mode != cur_mode) {
484                         hammer2_inode_modify(ip);
485                         ip->meta.mode = cur_mode;
486                         ip->meta.ctime = ctime;
487                         kflags |= NOTE_ATTRIB;
488                 }
489         }
490
491         if (vap->va_mtime.tv_sec != VNOVAL) {
492                 hammer2_inode_modify(ip);
493                 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
494                 kflags |= NOTE_ATTRIB;
495                 vclrflags(vp, VLASTWRITETS);
496         }
497
498 done:
499         /*
500          * If a truncation occurred we must call chain_sync() now in order
501          * to trim the related data chains, otherwise a later expansion can
502          * cause havoc.
503          *
504          * If an extend occured that changed the DIRECTDATA state, we must
505          * call inode_fsync now in order to prepare the inode's indirect
506          * block table.
507          *
508          * WARNING! This means we are making an adjustment to the inode's
509          * chain outside of sync/fsync, and not just to inode->meta, which
510          * may result in some consistency issues if a crash were to occur
511          * at just the wrong time.
512          */
513         if (ip->flags & HAMMER2_INODE_RESIZED)
514                 hammer2_inode_chain_sync(ip);
515
516         /*
517          * Cleanup.
518          */
519         hammer2_inode_unlock(ip);
520         hammer2_trans_done(ip->pmp, 1);
521         hammer2_knote(ip->vp, kflags);
522
523         return (error);
524 }
525
526 static
527 int
528 hammer2_vop_readdir(struct vop_readdir_args *ap)
529 {
530         hammer2_xop_readdir_t *xop;
531         hammer2_blockref_t bref;
532         hammer2_inode_t *ip;
533         hammer2_tid_t inum;
534         hammer2_key_t lkey;
535         struct uio *uio;
536         off_t *cookies;
537         off_t saveoff;
538         int cookie_index;
539         int ncookies;
540         int error;
541         int eofflag;
542         int r;
543
544         ip = VTOI(ap->a_vp);
545         uio = ap->a_uio;
546         saveoff = uio->uio_offset;
547         eofflag = 0;
548         error = 0;
549
550         /*
551          * Setup cookies directory entry cookies if requested
552          */
553         if (ap->a_ncookies) {
554                 ncookies = uio->uio_resid / 16 + 1;
555                 if (ncookies > 1024)
556                         ncookies = 1024;
557                 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
558         } else {
559                 ncookies = -1;
560                 cookies = NULL;
561         }
562         cookie_index = 0;
563
564         hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
565
566         /*
567          * Handle artificial entries.  To ensure that only positive 64 bit
568          * quantities are returned to userland we always strip off bit 63.
569          * The hash code is designed such that codes 0x0000-0x7FFF are not
570          * used, allowing us to use these codes for articial entries.
571          *
572          * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
573          * allow '..' to cross the mount point into (e.g.) the super-root.
574          */
575         if (saveoff == 0) {
576                 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
577                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
578                 if (r)
579                         goto done;
580                 if (cookies)
581                         cookies[cookie_index] = saveoff;
582                 ++saveoff;
583                 ++cookie_index;
584                 if (cookie_index == ncookies)
585                         goto done;
586         }
587
588         if (saveoff == 1) {
589                 /*
590                  * Be careful with lockorder when accessing ".."
591                  *
592                  * (ip is the current dir. xip is the parent dir).
593                  */
594                 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
595                 if (ip != ip->pmp->iroot)
596                         inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
597                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
598                 if (r)
599                         goto done;
600                 if (cookies)
601                         cookies[cookie_index] = saveoff;
602                 ++saveoff;
603                 ++cookie_index;
604                 if (cookie_index == ncookies)
605                         goto done;
606         }
607
608         lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
609         if (hammer2_debug & 0x0020)
610                 kprintf("readdir: lkey %016jx\n", lkey);
611         if (error)
612                 goto done;
613
614         /*
615          * Use XOP for cluster scan.
616          *
617          * parent is the inode cluster, already locked for us.  Don't
618          * double lock shared locks as this will screw up upgrades.
619          */
620         xop = hammer2_xop_alloc(ip, 0);
621         xop->lkey = lkey;
622         hammer2_xop_start(&xop->head, &hammer2_readdir_desc);
623
624         for (;;) {
625                 const hammer2_inode_data_t *ripdata;
626                 const char *dname;
627                 int dtype;
628
629                 error = hammer2_xop_collect(&xop->head, 0);
630                 error = hammer2_error_to_errno(error);
631                 if (error) {
632                         break;
633                 }
634                 if (cookie_index == ncookies)
635                         break;
636                 if (hammer2_debug & 0x0020)
637                 kprintf("cluster chain %p %p\n",
638                         xop->head.cluster.focus,
639                         (xop->head.cluster.focus ?
640                          xop->head.cluster.focus->data : (void *)-1));
641                 hammer2_cluster_bref(&xop->head.cluster, &bref);
642
643                 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
644                         ripdata = &hammer2_xop_gdata(&xop->head)->ipdata;
645                         dtype = hammer2_get_dtype(ripdata->meta.type);
646                         saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
647                         r = vop_write_dirent(&error, uio,
648                                              ripdata->meta.inum &
649                                               HAMMER2_DIRHASH_USERMSK,
650                                              dtype,
651                                              ripdata->meta.name_len,
652                                              ripdata->filename);
653                         hammer2_xop_pdata(&xop->head);
654                         if (r)
655                                 break;
656                         if (cookies)
657                                 cookies[cookie_index] = saveoff;
658                         ++cookie_index;
659                 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
660                         uint16_t namlen;
661
662                         dtype = hammer2_get_dtype(bref.embed.dirent.type);
663                         saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
664                         namlen = bref.embed.dirent.namlen;
665                         if (namlen <= sizeof(bref.check.buf)) {
666                                 dname = bref.check.buf;
667                         } else {
668                                 dname = hammer2_xop_gdata(&xop->head)->buf;
669                         }
670                         r = vop_write_dirent(&error, uio,
671                                              bref.embed.dirent.inum, dtype,
672                                              namlen, dname);
673                         if (namlen > sizeof(bref.check.buf))
674                                 hammer2_xop_pdata(&xop->head);
675                         if (r)
676                                 break;
677                         if (cookies)
678                                 cookies[cookie_index] = saveoff;
679                         ++cookie_index;
680                 } else {
681                         /* XXX chain error */
682                         kprintf("bad chain type readdir %d\n", bref.type);
683                 }
684         }
685         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
686         if (error == ENOENT) {
687                 error = 0;
688                 eofflag = 1;
689                 saveoff = (hammer2_key_t)-1;
690         } else {
691                 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
692         }
693 done:
694         hammer2_inode_unlock(ip);
695         if (ap->a_eofflag)
696                 *ap->a_eofflag = eofflag;
697         if (hammer2_debug & 0x0020)
698                 kprintf("readdir: done at %016jx\n", saveoff);
699         uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
700         if (error && cookie_index == 0) {
701                 if (cookies) {
702                         kfree(cookies, M_TEMP);
703                         *ap->a_ncookies = 0;
704                         *ap->a_cookies = NULL;
705                 }
706         } else {
707                 if (cookies) {
708                         *ap->a_ncookies = cookie_index;
709                         *ap->a_cookies = cookies;
710                 }
711         }
712         return (error);
713 }
714
715 /*
716  * hammer2_vop_readlink { vp, uio, cred }
717  */
718 static
719 int
720 hammer2_vop_readlink(struct vop_readlink_args *ap)
721 {
722         struct vnode *vp;
723         hammer2_inode_t *ip;
724         int error;
725
726         vp = ap->a_vp;
727         if (vp->v_type != VLNK)
728                 return (EINVAL);
729         ip = VTOI(vp);
730
731         error = hammer2_read_file(ip, ap->a_uio, 0);
732         return (error);
733 }
734
735 static
736 int
737 hammer2_vop_read(struct vop_read_args *ap)
738 {
739         struct vnode *vp;
740         hammer2_inode_t *ip;
741         struct uio *uio;
742         int error;
743         int seqcount;
744         int bigread;
745
746         /*
747          * Read operations supported on this vnode?
748          */
749         vp = ap->a_vp;
750         if (vp->v_type != VREG)
751                 return (EINVAL);
752
753         /*
754          * Misc
755          */
756         ip = VTOI(vp);
757         uio = ap->a_uio;
758         error = 0;
759
760         seqcount = ap->a_ioflag >> 16;
761         bigread = (uio->uio_resid > 100 * 1024 * 1024);
762
763         error = hammer2_read_file(ip, uio, seqcount);
764         return (error);
765 }
766
767 static
768 int
769 hammer2_vop_write(struct vop_write_args *ap)
770 {
771         hammer2_inode_t *ip;
772         thread_t td;
773         struct vnode *vp;
774         struct uio *uio;
775         int error;
776         int seqcount;
777         int ioflag;
778
779         /*
780          * Read operations supported on this vnode?
781          */
782         vp = ap->a_vp;
783         if (vp->v_type != VREG)
784                 return (EINVAL);
785
786         /*
787          * Misc
788          */
789         ip = VTOI(vp);
790         ioflag = ap->a_ioflag;
791         uio = ap->a_uio;
792         error = 0;
793         if (ip->pmp->ronly)
794                 return (EROFS);
795         switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
796         case 2:
797                 return (ENOSPC);
798         case 1:
799                 ioflag |= IO_DIRECT;    /* semi-synchronous */
800                 /* fall through */
801         default:
802                 break;
803         }
804
805         seqcount = ioflag >> 16;
806
807         /*
808          * Check resource limit
809          */
810         if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
811             uio->uio_offset + uio->uio_resid >
812              td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
813                 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
814                 return (EFBIG);
815         }
816
817         /*
818          * The transaction interlocks against flush initiations
819          * (note: but will run concurrently with the actual flush).
820          *
821          * To avoid deadlocking against the VM system, we must flag any
822          * transaction related to the buffer cache or other direct
823          * VM page manipulation.
824          */
825         if (uio->uio_segflg == UIO_NOCOPY) {
826                 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
827         } else {
828                 hammer2_pfs_memory_wait(ip, 0);
829                 hammer2_trans_init(ip->pmp, 0);
830         }
831         error = hammer2_write_file(ip, uio, ioflag, seqcount);
832         hammer2_trans_done(ip->pmp, 1);
833
834         return (error);
835 }
836
837 /*
838  * Perform read operations on a file or symlink given an UNLOCKED
839  * inode and uio.
840  *
841  * The passed ip is not locked.
842  */
843 static
844 int
845 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
846 {
847         hammer2_off_t size;
848         struct buf *bp;
849         int error;
850
851         error = 0;
852
853         /*
854          * UIO read loop.
855          *
856          * WARNING! Assumes that the kernel interlocks size changes at the
857          *          vnode level.
858          */
859         hammer2_mtx_sh(&ip->lock);
860         hammer2_mtx_sh(&ip->truncate_lock);
861         size = ip->meta.size;
862         hammer2_mtx_unlock(&ip->lock);
863
864         while (uio->uio_resid > 0 && uio->uio_offset < size) {
865                 hammer2_key_t lbase;
866                 hammer2_key_t leof;
867                 int lblksize;
868                 int loff;
869                 int n;
870
871                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
872                                                 &lbase, &leof);
873
874 #if 1
875                 bp = NULL;
876                 error = cluster_readx(ip->vp, leof, lbase, lblksize,
877                                       B_NOTMETA | B_KVABIO,
878                                       uio->uio_resid,
879                                       seqcount * MAXBSIZE,
880                                       &bp);
881 #else
882                 if (uio->uio_segflg == UIO_NOCOPY) {
883                         bp = getblk(ip->vp, lbase, lblksize,
884                                     GETBLK_BHEAVY | GETBLK_KVABIO, 0);
885                         if (bp->b_flags & B_CACHE) {
886                                 int i;
887                                 int j = 0;
888                                 if (bp->b_xio.xio_npages != 16)
889                                         kprintf("NPAGES BAD\n");
890                                 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
891                                         vm_page_t m;
892                                         m = bp->b_xio.xio_pages[i];
893                                         if (m == NULL || m->valid == 0) {
894                                                 kprintf("bp %016jx %016jx pg %d inv",
895                                                         lbase, leof, i);
896                                                 if (m)
897                                                         kprintf("m->object %p/%p", m->object, ip->vp->v_object);
898                                                 kprintf("\n");
899                                                 j = 1;
900                                         }
901                                 }
902                                 if (j)
903                                         kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
904                         }
905                         bqrelse(bp);
906                 }
907                 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
908 #endif
909                 if (error) {
910                         brelse(bp);
911                         break;
912                 }
913                 bkvasync(bp);
914                 loff = (int)(uio->uio_offset - lbase);
915                 n = lblksize - loff;
916                 if (n > uio->uio_resid)
917                         n = uio->uio_resid;
918                 if (n > size - uio->uio_offset)
919                         n = (int)(size - uio->uio_offset);
920                 bp->b_flags |= B_AGE;
921                 uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
922                 bqrelse(bp);
923         }
924         hammer2_mtx_unlock(&ip->truncate_lock);
925
926         return (error);
927 }
928
929 /*
930  * Write to the file represented by the inode via the logical buffer cache.
931  * The inode may represent a regular file or a symlink.
932  *
933  * The inode must not be locked.
934  */
935 static
936 int
937 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
938                    int ioflag, int seqcount)
939 {
940         hammer2_key_t old_eof;
941         hammer2_key_t new_eof;
942         struct buf *bp;
943         int kflags;
944         int error;
945         int modified;
946
947         /*
948          * Setup if append
949          *
950          * WARNING! Assumes that the kernel interlocks size changes at the
951          *          vnode level.
952          */
953         hammer2_mtx_ex(&ip->lock);
954         hammer2_mtx_sh(&ip->truncate_lock);
955         if (ioflag & IO_APPEND)
956                 uio->uio_offset = ip->meta.size;
957         old_eof = ip->meta.size;
958
959         /*
960          * Extend the file if necessary.  If the write fails at some point
961          * we will truncate it back down to cover as much as we were able
962          * to write.
963          *
964          * Doing this now makes it easier to calculate buffer sizes in
965          * the loop.
966          */
967         kflags = 0;
968         error = 0;
969         modified = 0;
970
971         if (uio->uio_offset + uio->uio_resid > old_eof) {
972                 new_eof = uio->uio_offset + uio->uio_resid;
973                 modified = 1;
974                 hammer2_extend_file(ip, new_eof);
975                 kflags |= NOTE_EXTEND;
976         } else {
977                 new_eof = old_eof;
978         }
979         hammer2_mtx_unlock(&ip->lock);
980
981         /*
982          * UIO write loop
983          */
984         while (uio->uio_resid > 0) {
985                 hammer2_key_t lbase;
986                 int trivial;
987                 int endofblk;
988                 int lblksize;
989                 int loff;
990                 int n;
991
992                 /*
993                  * Don't allow the buffer build to blow out the buffer
994                  * cache.
995                  */
996                 if ((ioflag & IO_RECURSE) == 0)
997                         bwillwrite(HAMMER2_PBUFSIZE);
998
999                 /*
1000                  * This nominally tells us how much we can cluster and
1001                  * what the logical buffer size needs to be.  Currently
1002                  * we don't try to cluster the write and just handle one
1003                  * block at a time.
1004                  */
1005                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1006                                                 &lbase, NULL);
1007                 loff = (int)(uio->uio_offset - lbase);
1008
1009                 KKASSERT(lblksize <= 65536);
1010
1011                 /*
1012                  * Calculate bytes to copy this transfer and whether the
1013                  * copy completely covers the buffer or not.
1014                  */
1015                 trivial = 0;
1016                 n = lblksize - loff;
1017                 if (n > uio->uio_resid) {
1018                         n = uio->uio_resid;
1019                         if (loff == lbase && uio->uio_offset + n == new_eof)
1020                                 trivial = 1;
1021                         endofblk = 0;
1022                 } else {
1023                         if (loff == 0)
1024                                 trivial = 1;
1025                         endofblk = 1;
1026                 }
1027                 if (lbase >= new_eof)
1028                         trivial = 1;
1029
1030                 /*
1031                  * Get the buffer
1032                  */
1033                 if (uio->uio_segflg == UIO_NOCOPY) {
1034                         /*
1035                          * Issuing a write with the same data backing the
1036                          * buffer.  Instantiate the buffer to collect the
1037                          * backing vm pages, then read-in any missing bits.
1038                          *
1039                          * This case is used by vop_stdputpages().
1040                          */
1041                         bp = getblk(ip->vp, lbase, lblksize,
1042                                     GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1043                         if ((bp->b_flags & B_CACHE) == 0) {
1044                                 bqrelse(bp);
1045                                 error = bread_kvabio(ip->vp, lbase,
1046                                                      lblksize, &bp);
1047                         }
1048                 } else if (trivial) {
1049                         /*
1050                          * Even though we are entirely overwriting the buffer
1051                          * we may still have to zero it out to avoid a
1052                          * mmap/write visibility issue.
1053                          */
1054                         bp = getblk(ip->vp, lbase, lblksize,
1055                                     GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1056                         if ((bp->b_flags & B_CACHE) == 0)
1057                                 vfs_bio_clrbuf(bp);
1058                 } else {
1059                         /*
1060                          * Partial overwrite, read in any missing bits then
1061                          * replace the portion being written.
1062                          *
1063                          * (The strategy code will detect zero-fill physical
1064                          * blocks for this case).
1065                          */
1066                         error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1067                         if (error == 0)
1068                                 bheavy(bp);
1069                 }
1070
1071                 if (error) {
1072                         brelse(bp);
1073                         break;
1074                 }
1075
1076                 /*
1077                  * Ok, copy the data in
1078                  */
1079                 bkvasync(bp);
1080                 error = uiomovebp(bp, bp->b_data + loff, n, uio);
1081                 kflags |= NOTE_WRITE;
1082                 modified = 1;
1083                 if (error) {
1084                         brelse(bp);
1085                         break;
1086                 }
1087
1088                 /*
1089                  * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1090                  *          with IO_SYNC or IO_ASYNC set.  These writes
1091                  *          must be handled as the pageout daemon expects.
1092                  *
1093                  * NOTE!    H2 relies on cluster_write() here because it
1094                  *          cannot preallocate disk blocks at the logical
1095                  *          level due to not knowing what the compression
1096                  *          size will be at this time.
1097                  *
1098                  *          We must use cluster_write() here and we depend
1099                  *          on the write-behind feature to flush buffers
1100                  *          appropriately.  If we let the buffer daemons do
1101                  *          it the block allocations will be all over the
1102                  *          map.
1103                  */
1104                 if (ioflag & IO_SYNC) {
1105                         bwrite(bp);
1106                 } else if ((ioflag & IO_DIRECT) && endofblk) {
1107                         bawrite(bp);
1108                 } else if (ioflag & IO_ASYNC) {
1109                         bawrite(bp);
1110                 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1111                         bdwrite(bp);
1112                 } else {
1113 #if 1
1114                         bp->b_flags |= B_CLUSTEROK;
1115                         cluster_write(bp, new_eof, lblksize, seqcount);
1116 #else
1117                         bp->b_flags |= B_CLUSTEROK;
1118                         bdwrite(bp);
1119 #endif
1120                 }
1121         }
1122
1123         /*
1124          * Cleanup.  If we extended the file EOF but failed to write through
1125          * the entire write is a failure and we have to back-up.
1126          */
1127         if (error && new_eof != old_eof) {
1128                 hammer2_mtx_unlock(&ip->truncate_lock);
1129                 hammer2_mtx_ex(&ip->lock);
1130                 hammer2_mtx_ex(&ip->truncate_lock);
1131                 hammer2_truncate_file(ip, old_eof);
1132                 if (ip->flags & HAMMER2_INODE_MODIFIED)
1133                         hammer2_inode_chain_sync(ip);
1134                 hammer2_mtx_unlock(&ip->lock);
1135         } else if (modified) {
1136                 struct vnode *vp = ip->vp;
1137
1138                 hammer2_mtx_ex(&ip->lock);
1139                 hammer2_inode_modify(ip);
1140                 if (uio->uio_segflg == UIO_NOCOPY) {
1141                         if (vp->v_flag & VLASTWRITETS) {
1142                                 ip->meta.mtime =
1143                                     (unsigned long)vp->v_lastwrite_ts.tv_sec *
1144                                     1000000 +
1145                                     vp->v_lastwrite_ts.tv_nsec / 1000;
1146                         }
1147                 } else {
1148                         hammer2_update_time(&ip->meta.mtime);
1149                         vclrflags(vp, VLASTWRITETS);
1150                 }
1151
1152 #if 0
1153                 /*
1154                  * REMOVED - handled by hammer2_extend_file().  Do not issue
1155                  * a chain_sync() outside of a sync/fsync except for DIRECTDATA
1156                  * state changes.
1157                  *
1158                  * Under normal conditions we only issue a chain_sync if
1159                  * the inode's DIRECTDATA state changed.
1160                  */
1161                 if (ip->flags & HAMMER2_INODE_RESIZED)
1162                         hammer2_inode_chain_sync(ip);
1163 #endif
1164                 hammer2_mtx_unlock(&ip->lock);
1165                 hammer2_knote(ip->vp, kflags);
1166         }
1167         hammer2_trans_assert_strategy(ip->pmp);
1168         hammer2_mtx_unlock(&ip->truncate_lock);
1169
1170         return error;
1171 }
1172
1173 /*
1174  * Truncate the size of a file.  The inode must not be locked.
1175  *
1176  * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1177  * ensure that any on-media data beyond the new file EOF has been destroyed.
1178  *
1179  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1180  *          held due to the way our write thread works.  If the truncation
1181  *          occurs in the middle of a buffer, nvtruncbuf() is responsible
1182  *          for dirtying that buffer and zeroing out trailing bytes.
1183  *
1184  * WARNING! Assumes that the kernel interlocks size changes at the
1185  *          vnode level.
1186  *
1187  * WARNING! Caller assumes responsibility for removing dead blocks
1188  *          if INODE_RESIZED is set.
1189  */
1190 static
1191 void
1192 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1193 {
1194         hammer2_key_t lbase;
1195         int nblksize;
1196
1197         hammer2_mtx_unlock(&ip->lock);
1198         if (ip->vp) {
1199                 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1200                 nvtruncbuf(ip->vp, nsize,
1201                            nblksize, (int)nsize & (nblksize - 1),
1202                            0);
1203         }
1204         hammer2_mtx_ex(&ip->lock);
1205         KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1206         ip->osize = ip->meta.size;
1207         ip->meta.size = nsize;
1208         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1209         hammer2_inode_modify(ip);
1210 }
1211
1212 /*
1213  * Extend the size of a file.  The inode must not be locked.
1214  *
1215  * Even though the file size is changing, we do not have to set the
1216  * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1217  * boundary.  When this occurs a hammer2_inode_chain_sync() is required
1218  * to prepare the inode cluster's indirect block table, otherwise
1219  * async execution of the strategy code will implode on us.
1220  *
1221  * WARNING! Assumes that the kernel interlocks size changes at the
1222  *          vnode level.
1223  *
1224  * WARNING! Caller assumes responsibility for transitioning out
1225  *          of the inode DIRECTDATA mode if INODE_RESIZED is set.
1226  */
1227 static
1228 void
1229 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1230 {
1231         hammer2_key_t lbase;
1232         hammer2_key_t osize;
1233         int oblksize;
1234         int nblksize;
1235
1236         KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1237         hammer2_inode_modify(ip);
1238         osize = ip->meta.size;
1239         ip->osize = osize;
1240         ip->meta.size = nsize;
1241
1242         /*
1243          * We must issue a chain_sync() when the DIRECTDATA state changes
1244          * to prevent confusion between the flush code and the in-memory
1245          * state.  This is not perfect because we are doing it outside of
1246          * a sync/fsync operation, so it might not be fully synchronized
1247          * with the meta-data topology flush.
1248          */
1249         if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1250                 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1251                 hammer2_inode_chain_sync(ip);
1252         }
1253
1254         hammer2_mtx_unlock(&ip->lock);
1255         if (ip->vp) {
1256                 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1257                 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1258                 nvextendbuf(ip->vp,
1259                             osize, nsize,
1260                             oblksize, nblksize,
1261                             -1, -1, 0);
1262         }
1263         hammer2_mtx_ex(&ip->lock);
1264 }
1265
1266 static
1267 int
1268 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1269 {
1270         hammer2_xop_nresolve_t *xop;
1271         hammer2_inode_t *ip;
1272         hammer2_inode_t *dip;
1273         struct namecache *ncp;
1274         struct vnode *vp;
1275         int error;
1276
1277         dip = VTOI(ap->a_dvp);
1278         xop = hammer2_xop_alloc(dip, 0);
1279
1280         ncp = ap->a_nch->ncp;
1281         hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1282
1283         /*
1284          * Note: In DragonFly the kernel handles '.' and '..'.
1285          */
1286         hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1287         hammer2_xop_start(&xop->head, &hammer2_nresolve_desc);
1288
1289         error = hammer2_xop_collect(&xop->head, 0);
1290         error = hammer2_error_to_errno(error);
1291         if (error) {
1292                 ip = NULL;
1293         } else {
1294                 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
1295         }
1296         hammer2_inode_unlock(dip);
1297
1298         /*
1299          * Acquire the related vnode
1300          *
1301          * NOTE: For error processing, only ENOENT resolves the namecache
1302          *       entry to NULL, otherwise we just return the error and
1303          *       leave the namecache unresolved.
1304          *
1305          * NOTE: multiple hammer2_inode structures can be aliased to the
1306          *       same chain element, for example for hardlinks.  This
1307          *       use case does not 'reattach' inode associations that
1308          *       might already exist, but always allocates a new one.
1309          *
1310          * WARNING: inode structure is locked exclusively via inode_get
1311          *          but chain was locked shared.  inode_unlock()
1312          *          will handle it properly.
1313          */
1314         if (ip) {
1315                 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */
1316                 if (error == 0) {
1317                         vn_unlock(vp);
1318                         cache_setvp(ap->a_nch, vp);
1319                 } else if (error == ENOENT) {
1320                         cache_setvp(ap->a_nch, NULL);
1321                 }
1322                 hammer2_inode_unlock(ip);
1323
1324                 /*
1325                  * The vp should not be released until after we've disposed
1326                  * of our locks, because it might cause vop_inactive() to
1327                  * be called.
1328                  */
1329                 if (vp)
1330                         vrele(vp);
1331         } else {
1332                 error = ENOENT;
1333                 cache_setvp(ap->a_nch, NULL);
1334         }
1335         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1336         KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1337                 ("resolve error %d/%p ap %p\n",
1338                  error, ap->a_nch->ncp->nc_vp, ap));
1339
1340         return error;
1341 }
1342
1343 static
1344 int
1345 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1346 {
1347         hammer2_inode_t *dip;
1348         hammer2_tid_t inum;
1349         int error;
1350
1351         dip = VTOI(ap->a_dvp);
1352         inum = dip->meta.iparent;
1353         *ap->a_vpp = NULL;
1354
1355         if (inum) {
1356                 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1357                                          inum, ap->a_vpp);
1358         } else {
1359                 error = ENOENT;
1360         }
1361         return error;
1362 }
1363
1364 static
1365 int
1366 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1367 {
1368         hammer2_inode_t *dip;
1369         hammer2_inode_t *nip;
1370         struct namecache *ncp;
1371         const uint8_t *name;
1372         size_t name_len;
1373         hammer2_tid_t inum;
1374         int error;
1375
1376         dip = VTOI(ap->a_dvp);
1377         if (dip->pmp->ronly)
1378                 return (EROFS);
1379         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1380                 return (ENOSPC);
1381
1382         ncp = ap->a_nch->ncp;
1383         name = ncp->nc_name;
1384         name_len = ncp->nc_nlen;
1385
1386         hammer2_pfs_memory_wait(dip, 1);
1387         hammer2_trans_init(dip->pmp, 0);
1388
1389         inum = hammer2_trans_newinum(dip->pmp);
1390
1391         /*
1392          * Create the actual inode as a hidden file in the iroot, then
1393          * create the directory entry.  The creation of the actual inode
1394          * sets its nlinks to 1 which is the value we desire.
1395          */
1396         nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1397                                           inum, &error);
1398         if (error) {
1399                 error = hammer2_error_to_errno(error);
1400         } else {
1401                 error = hammer2_dirent_create(dip, name, name_len,
1402                                               nip->meta.inum, nip->meta.type);
1403                 /* returns UNIX error code */
1404         }
1405         if (error) {
1406                 if (nip) {
1407                         hammer2_inode_unlink_finisher(nip, 0);
1408                         hammer2_inode_unlock(nip);
1409                         nip = NULL;
1410                 }
1411                 *ap->a_vpp = NULL;
1412         } else {
1413                 *ap->a_vpp = hammer2_igetv(nip, &error);
1414                 hammer2_inode_unlock(nip);
1415         }
1416
1417         /*
1418          * Update dip's mtime
1419          *
1420          * We can use a shared inode lock and allow the meta.mtime update
1421          * SMP race.  hammer2_inode_modify() is MPSAFE w/a shared lock.
1422          */
1423         if (error == 0) {
1424                 uint64_t mtime;
1425
1426                 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1427                 hammer2_update_time(&mtime);
1428                 hammer2_inode_modify(dip);
1429                 dip->meta.mtime = mtime;
1430                 hammer2_inode_unlock(dip);
1431         }
1432
1433         hammer2_trans_done(dip->pmp, 1);
1434
1435         if (error == 0) {
1436                 cache_setunresolved(ap->a_nch);
1437                 cache_setvp(ap->a_nch, *ap->a_vpp);
1438                 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1439         }
1440         return error;
1441 }
1442
1443 static
1444 int
1445 hammer2_vop_open(struct vop_open_args *ap)
1446 {
1447         return vop_stdopen(ap);
1448 }
1449
1450 /*
1451  * hammer2_vop_advlock { vp, id, op, fl, flags }
1452  */
1453 static
1454 int
1455 hammer2_vop_advlock(struct vop_advlock_args *ap)
1456 {
1457         hammer2_inode_t *ip = VTOI(ap->a_vp);
1458         hammer2_off_t size;
1459
1460         size = ip->meta.size;
1461         return (lf_advlock(ap, &ip->advlock, size));
1462 }
1463
1464 static
1465 int
1466 hammer2_vop_close(struct vop_close_args *ap)
1467 {
1468         return vop_stdclose(ap);
1469 }
1470
1471 /*
1472  * hammer2_vop_nlink { nch, dvp, vp, cred }
1473  *
1474  * Create a hardlink from (vp) to {dvp, nch}.
1475  */
1476 static
1477 int
1478 hammer2_vop_nlink(struct vop_nlink_args *ap)
1479 {
1480         hammer2_inode_t *tdip;  /* target directory to create link in */
1481         hammer2_inode_t *ip;    /* inode we are hardlinking to */
1482         struct namecache *ncp;
1483         const uint8_t *name;
1484         size_t name_len;
1485         int error;
1486
1487         if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1488                 return(EXDEV);
1489
1490         tdip = VTOI(ap->a_dvp);
1491         if (tdip->pmp->ronly)
1492                 return (EROFS);
1493         if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1494                 return (ENOSPC);
1495
1496         ncp = ap->a_nch->ncp;
1497         name = ncp->nc_name;
1498         name_len = ncp->nc_nlen;
1499
1500         /*
1501          * ip represents the file being hardlinked.  The file could be a
1502          * normal file or a hardlink target if it has already been hardlinked.
1503          * (with the new semantics, it will almost always be a hardlink
1504          * target).
1505          *
1506          * Bump nlinks and potentially also create or move the hardlink
1507          * target in the parent directory common to (ip) and (tdip).  The
1508          * consolidation code can modify ip->cluster.  The returned cluster
1509          * is locked.
1510          */
1511         ip = VTOI(ap->a_vp);
1512         KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1513         hammer2_pfs_memory_wait(ip, 0);
1514         hammer2_trans_init(ip->pmp, 0);
1515
1516         /*
1517          * Target should be an indexed inode or there's no way we will ever
1518          * be able to find it!
1519          */
1520         KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1521
1522         error = 0;
1523
1524         /*
1525          * Can return NULL and error == EXDEV if the common parent
1526          * crosses a directory with the xlink flag set.
1527          */
1528         hammer2_inode_lock(tdip, 0);
1529         hammer2_inode_lock(ip, 0);
1530
1531         /*
1532          * Create the directory entry and bump nlinks.
1533          */
1534         if (error == 0) {
1535                 error = hammer2_dirent_create(tdip, name, name_len,
1536                                               ip->meta.inum, ip->meta.type);
1537                 hammer2_inode_modify(ip);
1538                 ++ip->meta.nlinks;
1539         }
1540         if (error == 0) {
1541                 /*
1542                  * Update dip's mtime
1543                  */
1544                 uint64_t mtime;
1545
1546                 hammer2_update_time(&mtime);
1547                 hammer2_inode_modify(tdip);
1548                 tdip->meta.mtime = mtime;
1549
1550                 cache_setunresolved(ap->a_nch);
1551                 cache_setvp(ap->a_nch, ap->a_vp);
1552         }
1553         hammer2_inode_unlock(ip);
1554         hammer2_inode_unlock(tdip);
1555
1556         hammer2_trans_done(ip->pmp, 1);
1557         hammer2_knote(ap->a_vp, NOTE_LINK);
1558         hammer2_knote(ap->a_dvp, NOTE_WRITE);
1559
1560         return error;
1561 }
1562
1563 /*
1564  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1565  *
1566  * The operating system has already ensured that the directory entry
1567  * does not exist and done all appropriate namespace locking.
1568  */
1569 static
1570 int
1571 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1572 {
1573         hammer2_inode_t *dip;
1574         hammer2_inode_t *nip;
1575         struct namecache *ncp;
1576         const uint8_t *name;
1577         size_t name_len;
1578         hammer2_tid_t inum;
1579         int error;
1580
1581         dip = VTOI(ap->a_dvp);
1582         if (dip->pmp->ronly)
1583                 return (EROFS);
1584         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1585                 return (ENOSPC);
1586
1587         ncp = ap->a_nch->ncp;
1588         name = ncp->nc_name;
1589         name_len = ncp->nc_nlen;
1590         hammer2_pfs_memory_wait(dip, 1);
1591         hammer2_trans_init(dip->pmp, 0);
1592
1593         inum = hammer2_trans_newinum(dip->pmp);
1594
1595         /*
1596          * Create the actual inode as a hidden file in the iroot, then
1597          * create the directory entry.  The creation of the actual inode
1598          * sets its nlinks to 1 which is the value we desire.
1599          */
1600         nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1601                                           inum, &error);
1602
1603         if (error) {
1604                 error = hammer2_error_to_errno(error);
1605         } else {
1606                 error = hammer2_dirent_create(dip, name, name_len,
1607                                               nip->meta.inum, nip->meta.type);
1608         }
1609         if (error) {
1610                 if (nip) {
1611                         hammer2_inode_unlink_finisher(nip, 0);
1612                         hammer2_inode_unlock(nip);
1613                         nip = NULL;
1614                 }
1615                 *ap->a_vpp = NULL;
1616         } else {
1617                 *ap->a_vpp = hammer2_igetv(nip, &error);
1618                 hammer2_inode_unlock(nip);
1619         }
1620
1621         /*
1622          * Update dip's mtime
1623          */
1624         if (error == 0) {
1625                 uint64_t mtime;
1626
1627                 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1628                 hammer2_update_time(&mtime);
1629                 hammer2_inode_modify(dip);
1630                 dip->meta.mtime = mtime;
1631                 hammer2_inode_unlock(dip);
1632         }
1633
1634         hammer2_trans_done(dip->pmp, 1);
1635
1636         if (error == 0) {
1637                 cache_setunresolved(ap->a_nch);
1638                 cache_setvp(ap->a_nch, *ap->a_vpp);
1639                 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1640         }
1641         return error;
1642 }
1643
1644 /*
1645  * Make a device node (typically a fifo)
1646  */
1647 static
1648 int
1649 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1650 {
1651         hammer2_inode_t *dip;
1652         hammer2_inode_t *nip;
1653         struct namecache *ncp;
1654         const uint8_t *name;
1655         size_t name_len;
1656         hammer2_tid_t inum;
1657         int error;
1658
1659         dip = VTOI(ap->a_dvp);
1660         if (dip->pmp->ronly)
1661                 return (EROFS);
1662         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1663                 return (ENOSPC);
1664
1665         ncp = ap->a_nch->ncp;
1666         name = ncp->nc_name;
1667         name_len = ncp->nc_nlen;
1668         hammer2_pfs_memory_wait(dip, 1);
1669         hammer2_trans_init(dip->pmp, 0);
1670
1671         /*
1672          * Create the device inode and then create the directory entry.
1673          */
1674         inum = hammer2_trans_newinum(dip->pmp);
1675         nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1676                                           inum, &error);
1677         if (error == 0) {
1678                 error = hammer2_dirent_create(dip, name, name_len,
1679                                               nip->meta.inum, nip->meta.type);
1680         }
1681         if (error) {
1682                 if (nip) {
1683                         hammer2_inode_unlink_finisher(nip, 0);
1684                         hammer2_inode_unlock(nip);
1685                         nip = NULL;
1686                 }
1687                 *ap->a_vpp = NULL;
1688         } else {
1689                 *ap->a_vpp = hammer2_igetv(nip, &error);
1690                 hammer2_inode_unlock(nip);
1691         }
1692
1693         /*
1694          * Update dip's mtime
1695          */
1696         if (error == 0) {
1697                 uint64_t mtime;
1698
1699                 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1700                 hammer2_update_time(&mtime);
1701                 hammer2_inode_modify(dip);
1702                 dip->meta.mtime = mtime;
1703                 hammer2_inode_unlock(dip);
1704         }
1705
1706         hammer2_trans_done(dip->pmp, 1);
1707
1708         if (error == 0) {
1709                 cache_setunresolved(ap->a_nch);
1710                 cache_setvp(ap->a_nch, *ap->a_vpp);
1711                 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1712         }
1713         return error;
1714 }
1715
1716 /*
1717  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1718  */
1719 static
1720 int
1721 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1722 {
1723         hammer2_inode_t *dip;
1724         hammer2_inode_t *nip;
1725         struct namecache *ncp;
1726         const uint8_t *name;
1727         size_t name_len;
1728         hammer2_tid_t inum;
1729         int error;
1730
1731         dip = VTOI(ap->a_dvp);
1732         if (dip->pmp->ronly)
1733                 return (EROFS);
1734         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1735                 return (ENOSPC);
1736
1737         ncp = ap->a_nch->ncp;
1738         name = ncp->nc_name;
1739         name_len = ncp->nc_nlen;
1740         hammer2_pfs_memory_wait(dip, 1);
1741         hammer2_trans_init(dip->pmp, 0);
1742
1743         ap->a_vap->va_type = VLNK;      /* enforce type */
1744
1745         /*
1746          * Create the softlink as an inode and then create the directory
1747          * entry.
1748          */
1749         inum = hammer2_trans_newinum(dip->pmp);
1750
1751         nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1752                                           inum, &error);
1753         if (error == 0) {
1754                 error = hammer2_dirent_create(dip, name, name_len,
1755                                               nip->meta.inum, nip->meta.type);
1756         }
1757         if (error) {
1758                 if (nip) {
1759                         hammer2_inode_unlink_finisher(nip, 0);
1760                         hammer2_inode_unlock(nip);
1761                         nip = NULL;
1762                 }
1763                 *ap->a_vpp = NULL;
1764                 hammer2_trans_done(dip->pmp, 1);
1765                 return error;
1766         }
1767         *ap->a_vpp = hammer2_igetv(nip, &error);
1768
1769         /*
1770          * Build the softlink (~like file data) and finalize the namecache.
1771          */
1772         if (error == 0) {
1773                 size_t bytes;
1774                 struct uio auio;
1775                 struct iovec aiov;
1776
1777                 bytes = strlen(ap->a_target);
1778
1779                 hammer2_inode_unlock(nip);
1780                 bzero(&auio, sizeof(auio));
1781                 bzero(&aiov, sizeof(aiov));
1782                 auio.uio_iov = &aiov;
1783                 auio.uio_segflg = UIO_SYSSPACE;
1784                 auio.uio_rw = UIO_WRITE;
1785                 auio.uio_resid = bytes;
1786                 auio.uio_iovcnt = 1;
1787                 auio.uio_td = curthread;
1788                 aiov.iov_base = ap->a_target;
1789                 aiov.iov_len = bytes;
1790                 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1791                 /* XXX handle error */
1792                 error = 0;
1793         } else {
1794                 hammer2_inode_unlock(nip);
1795         }
1796
1797         /*
1798          * Update dip's mtime
1799          */
1800         if (error == 0) {
1801                 uint64_t mtime;
1802
1803                 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1804                 hammer2_update_time(&mtime);
1805                 hammer2_inode_modify(dip);
1806                 dip->meta.mtime = mtime;
1807                 hammer2_inode_unlock(dip);
1808         }
1809
1810         hammer2_trans_done(dip->pmp, 1);
1811
1812         /*
1813          * Finalize namecache
1814          */
1815         if (error == 0) {
1816                 cache_setunresolved(ap->a_nch);
1817                 cache_setvp(ap->a_nch, *ap->a_vpp);
1818                 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1819         }
1820         return error;
1821 }
1822
1823 /*
1824  * hammer2_vop_nremove { nch, dvp, cred }
1825  */
1826 static
1827 int
1828 hammer2_vop_nremove(struct vop_nremove_args *ap)
1829 {
1830         hammer2_xop_unlink_t *xop;
1831         hammer2_inode_t *dip;
1832         hammer2_inode_t *ip;
1833         struct namecache *ncp;
1834         int error;
1835         int isopen;
1836
1837         dip = VTOI(ap->a_dvp);
1838         if (dip->pmp->ronly)
1839                 return (EROFS);
1840 #if 0
1841         /* allow removals, except user to also bulkfree */
1842         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1843                 return (ENOSPC);
1844 #endif
1845
1846         ncp = ap->a_nch->ncp;
1847
1848         hammer2_pfs_memory_wait(dip, 1);
1849         hammer2_trans_init(dip->pmp, 0);
1850         hammer2_inode_lock(dip, 0);
1851
1852         /*
1853          * The unlink XOP unlinks the path from the directory and
1854          * locates and returns the cluster associated with the real inode.
1855          * We have to handle nlinks here on the frontend.
1856          */
1857         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1858         hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1859
1860         /*
1861          * The namecache entry is locked so nobody can use this namespace.
1862          * Calculate isopen to determine if this namespace has an open vp
1863          * associated with it and resolve the vp only if it does.
1864          *
1865          * We try to avoid resolving the vnode if nobody has it open, but
1866          * note that the test is via this namespace only.
1867          */
1868         isopen = cache_isopen(ap->a_nch);
1869         xop->isdir = 0;
1870         xop->dopermanent = 0;
1871         hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
1872
1873         /*
1874          * Collect the real inode and adjust nlinks, destroy the real
1875          * inode if nlinks transitions to 0 and it was the real inode
1876          * (else it has already been removed).
1877          */
1878         error = hammer2_xop_collect(&xop->head, 0);
1879         error = hammer2_error_to_errno(error);
1880         hammer2_inode_unlock(dip);
1881
1882         if (error == 0) {
1883                 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
1884                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1885                 if (ip) {
1886                         hammer2_inode_unlink_finisher(ip, isopen);
1887                         hammer2_inode_unlock(ip);
1888                 }
1889         } else {
1890                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1891         }
1892
1893         /*
1894          * Update dip's mtime
1895          */
1896         if (error == 0) {
1897                 uint64_t mtime;
1898
1899                 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1900                 hammer2_update_time(&mtime);
1901                 hammer2_inode_modify(dip);
1902                 dip->meta.mtime = mtime;
1903                 hammer2_inode_unlock(dip);
1904         }
1905
1906         hammer2_trans_done(dip->pmp, 1);
1907         if (error == 0) {
1908                 cache_unlink(ap->a_nch);
1909                 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1910         }
1911         return (error);
1912 }
1913
1914 /*
1915  * hammer2_vop_nrmdir { nch, dvp, cred }
1916  */
1917 static
1918 int
1919 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1920 {
1921         hammer2_xop_unlink_t *xop;
1922         hammer2_inode_t *dip;
1923         hammer2_inode_t *ip;
1924         struct namecache *ncp;
1925         int isopen;
1926         int error;
1927
1928         dip = VTOI(ap->a_dvp);
1929         if (dip->pmp->ronly)
1930                 return (EROFS);
1931 #if 0
1932         /* allow removals, except user to also bulkfree */
1933         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1934                 return (ENOSPC);
1935 #endif
1936
1937         hammer2_pfs_memory_wait(dip, 1);
1938         hammer2_trans_init(dip->pmp, 0);
1939         hammer2_inode_lock(dip, 0);
1940
1941         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1942
1943         ncp = ap->a_nch->ncp;
1944         hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1945         isopen = cache_isopen(ap->a_nch);
1946         xop->isdir = 1;
1947         xop->dopermanent = 0;
1948         hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
1949
1950         /*
1951          * Collect the real inode and adjust nlinks, destroy the real
1952          * inode if nlinks transitions to 0 and it was the real inode
1953          * (else it has already been removed).
1954          */
1955         error = hammer2_xop_collect(&xop->head, 0);
1956         error = hammer2_error_to_errno(error);
1957         hammer2_inode_unlock(dip);
1958
1959         if (error == 0) {
1960                 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
1961                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1962                 if (ip) {
1963                         hammer2_inode_unlink_finisher(ip, isopen);
1964                         hammer2_inode_unlock(ip);
1965                 }
1966         } else {
1967                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1968         }
1969
1970         /*
1971          * Update dip's mtime
1972          */
1973         if (error == 0) {
1974                 uint64_t mtime;
1975
1976                 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1977                 hammer2_update_time(&mtime);
1978                 hammer2_inode_modify(dip);
1979                 dip->meta.mtime = mtime;
1980                 hammer2_inode_unlock(dip);
1981         }
1982
1983         hammer2_trans_done(dip->pmp, 1);
1984         if (error == 0) {
1985                 cache_unlink(ap->a_nch);
1986                 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1987         }
1988         return (error);
1989 }
1990
1991 /*
1992  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1993  */
1994 static
1995 int
1996 hammer2_vop_nrename(struct vop_nrename_args *ap)
1997 {
1998         struct namecache *fncp;
1999         struct namecache *tncp;
2000         hammer2_inode_t *fdip;  /* source directory */
2001         hammer2_inode_t *tdip;  /* target directory */
2002         hammer2_inode_t *ip;    /* file being renamed */
2003         hammer2_inode_t *tip;   /* replaced target during rename or NULL */
2004         const uint8_t *fname;
2005         size_t fname_len;
2006         const uint8_t *tname;
2007         size_t tname_len;
2008         int error;
2009         int update_tdip;
2010         int update_fdip;
2011         hammer2_key_t tlhc;
2012
2013         if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
2014                 return(EXDEV);
2015         if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
2016                 return(EXDEV);
2017
2018         fdip = VTOI(ap->a_fdvp);        /* source directory */
2019         tdip = VTOI(ap->a_tdvp);        /* target directory */
2020
2021         if (fdip->pmp->ronly)
2022                 return (EROFS);
2023         if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
2024                 return (ENOSPC);
2025
2026         fncp = ap->a_fnch->ncp;         /* entry name in source */
2027         fname = fncp->nc_name;
2028         fname_len = fncp->nc_nlen;
2029
2030         tncp = ap->a_tnch->ncp;         /* entry name in target */
2031         tname = tncp->nc_name;
2032         tname_len = tncp->nc_nlen;
2033
2034         hammer2_pfs_memory_wait(tdip, 0);
2035         hammer2_trans_init(tdip->pmp, 0);
2036
2037         update_tdip = 0;
2038         update_fdip = 0;
2039
2040         ip = VTOI(fncp->nc_vp);
2041         hammer2_inode_ref(ip);          /* extra ref */
2042
2043         /*
2044          * Lookup the target name to determine if a directory entry
2045          * is being overwritten.  We only hold related inode locks
2046          * temporarily, the operating system is expected to protect
2047          * against rename races.
2048          */
2049         tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
2050         if (tip)
2051                 hammer2_inode_ref(tip); /* extra ref */
2052
2053         /*
2054          * Can return NULL and error == EXDEV if the common parent
2055          * crosses a directory with the xlink flag set.
2056          *
2057          * For now try to avoid deadlocks with a simple pointer address
2058          * test.  (tip) can be NULL.
2059          */
2060         error = 0;
2061         if (fdip <= tdip) {
2062                 hammer2_inode_lock(fdip, 0);
2063                 hammer2_inode_lock(tdip, 0);
2064         } else {
2065                 hammer2_inode_lock(tdip, 0);
2066                 hammer2_inode_lock(fdip, 0);
2067         }
2068         if (tip) {
2069                 if (ip <= tip) {
2070                         hammer2_inode_lock(ip, 0);
2071                         hammer2_inode_lock(tip, 0);
2072                 } else {
2073                         hammer2_inode_lock(tip, 0);
2074                         hammer2_inode_lock(ip, 0);
2075                 }
2076         } else {
2077                 hammer2_inode_lock(ip, 0);
2078         }
2079
2080         /*
2081          * Resolve the collision space for (tdip, tname, tname_len)
2082          *
2083          * tdip must be held exclusively locked to prevent races since
2084          * multiple filenames can end up in the same collision space.
2085          */
2086         {
2087                 hammer2_xop_scanlhc_t *sxop;
2088                 hammer2_tid_t lhcbase;
2089
2090                 tlhc = hammer2_dirhash(tname, tname_len);
2091                 lhcbase = tlhc;
2092                 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2093                 sxop->lhc = tlhc;
2094                 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
2095                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2096                         if (tlhc != sxop->head.cluster.focus->bref.key)
2097                                 break;
2098                         ++tlhc;
2099                 }
2100                 error = hammer2_error_to_errno(error);
2101                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2102
2103                 if (error) {
2104                         if (error != ENOENT)
2105                                 goto done2;
2106                         ++tlhc;
2107                         error = 0;
2108                 }
2109                 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2110                         error = ENOSPC;
2111                         goto done2;
2112                 }
2113         }
2114
2115         /*
2116          * Ready to go, issue the rename to the backend.  Note that meta-data
2117          * updates to the related inodes occur separately from the rename
2118          * operation.
2119          *
2120          * NOTE: While it is not necessary to update ip->meta.name*, doing
2121          *       so aids catastrophic recovery and debugging.
2122          */
2123         if (error == 0) {
2124                 hammer2_xop_nrename_t *xop4;
2125
2126                 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2127                 xop4->lhc = tlhc;
2128                 xop4->ip_key = ip->meta.name_key;
2129                 hammer2_xop_setip2(&xop4->head, ip);
2130                 hammer2_xop_setip3(&xop4->head, tdip);
2131                 hammer2_xop_setname(&xop4->head, fname, fname_len);
2132                 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2133                 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc);
2134
2135                 error = hammer2_xop_collect(&xop4->head, 0);
2136                 error = hammer2_error_to_errno(error);
2137                 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2138
2139                 if (error == ENOENT)
2140                         error = 0;
2141
2142                 /*
2143                  * Update inode meta-data.
2144                  *
2145                  * WARNING!  The in-memory inode (ip) structure does not
2146                  *           maintain a copy of the inode's filename buffer.
2147                  */
2148                 if (error == 0 &&
2149                     (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2150                         hammer2_inode_modify(ip);
2151                         ip->meta.name_len = tname_len;
2152                         ip->meta.name_key = tlhc;
2153                 }
2154                 if (error == 0) {
2155                         hammer2_inode_modify(ip);
2156                         ip->meta.iparent = tdip->meta.inum;
2157                 }
2158                 update_fdip = 1;
2159                 update_tdip = 1;
2160         }
2161
2162 done2:
2163         /*
2164          * If no error, the backend has replaced the target directory entry.
2165          * We must adjust nlinks on the original replace target if it exists.
2166          */
2167         if (error == 0 && tip) {
2168                 int isopen;
2169
2170                 isopen = cache_isopen(ap->a_tnch);
2171                 hammer2_inode_unlink_finisher(tip, isopen);
2172         }
2173
2174         /*
2175          * Update directory mtimes to represent the something changed.
2176          */
2177         if (update_fdip || update_tdip) {
2178                 uint64_t mtime;
2179
2180                 hammer2_update_time(&mtime);
2181                 if (update_fdip) {
2182                         hammer2_inode_modify(fdip);
2183                         fdip->meta.mtime = mtime;
2184                 }
2185                 if (update_tdip) {
2186                         hammer2_inode_modify(tdip);
2187                         tdip->meta.mtime = mtime;
2188                 }
2189         }
2190         if (tip) {
2191                 hammer2_inode_unlock(tip);
2192                 hammer2_inode_drop(tip);
2193         }
2194         hammer2_inode_unlock(ip);
2195         hammer2_inode_unlock(tdip);
2196         hammer2_inode_unlock(fdip);
2197         hammer2_inode_drop(ip);
2198         hammer2_trans_done(tdip->pmp, 1);
2199
2200         /*
2201          * Issue the namecache update after unlocking all the internal
2202          * hammer2 structures, otherwise we might deadlock.
2203          *
2204          * WARNING! The target namespace must be updated atomically,
2205          *          and we depend on cache_rename() to handle that for
2206          *          us.  Do not do a separate cache_unlink() because
2207          *          that leaves a small window of opportunity for other
2208          *          threads to allocate the target namespace before we
2209          *          manage to complete our rename.
2210          *
2211          * WARNING! cache_rename() (and cache_unlink()) will properly
2212          *          set VREF_FINALIZE on any attached vnode.  Do not
2213          *          call cache_setunresolved() manually before-hand as
2214          *          this will prevent the flag from being set later via
2215          *          cache_rename().  If VREF_FINALIZE is not properly set
2216          *          and the inode is no longer in the topology, related
2217          *          chains can remain dirty indefinitely.
2218          */
2219         if (error == 0 && tip) {
2220                 /*cache_unlink(ap->a_tnch); see above */
2221                 /*cache_setunresolved(ap->a_tnch); see above */
2222         }
2223         if (error == 0) {
2224                 cache_rename(ap->a_fnch, ap->a_tnch);
2225                 hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2226                 hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2227                 hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2228         }
2229
2230         return (error);
2231 }
2232
2233 /*
2234  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2235  */
2236 static
2237 int
2238 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2239 {
2240         hammer2_inode_t *ip;
2241         int error;
2242
2243         ip = VTOI(ap->a_vp);
2244
2245         error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2246                               ap->a_fflag, ap->a_cred);
2247         return (error);
2248 }
2249
2250 static
2251 int
2252 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2253 {
2254         struct mount *mp;
2255         hammer2_pfs_t *pmp;
2256         int rc;
2257
2258         switch (ap->a_op) {
2259         case (MOUNTCTL_SET_EXPORT):
2260                 mp = ap->a_head.a_ops->head.vv_mount;
2261                 pmp = MPTOPMP(mp);
2262
2263                 if (ap->a_ctllen != sizeof(struct export_args))
2264                         rc = (EINVAL);
2265                 else
2266                         rc = vfs_export(mp, &pmp->export,
2267                                         (const struct export_args *)ap->a_ctl);
2268                 break;
2269         default:
2270                 rc = vop_stdmountctl(ap);
2271                 break;
2272         }
2273         return (rc);
2274 }
2275
2276 /*
2277  * KQFILTER
2278  */
2279 static void filt_hammer2detach(struct knote *kn);
2280 static int filt_hammer2read(struct knote *kn, long hint);
2281 static int filt_hammer2write(struct knote *kn, long hint);
2282 static int filt_hammer2vnode(struct knote *kn, long hint);
2283
2284 static struct filterops hammer2read_filtops =
2285         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2286           NULL, filt_hammer2detach, filt_hammer2read };
2287 static struct filterops hammer2write_filtops =
2288         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2289           NULL, filt_hammer2detach, filt_hammer2write };
2290 static struct filterops hammer2vnode_filtops =
2291         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2292           NULL, filt_hammer2detach, filt_hammer2vnode };
2293
2294 static
2295 int
2296 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2297 {
2298         struct vnode *vp = ap->a_vp;
2299         struct knote *kn = ap->a_kn;
2300
2301         switch (kn->kn_filter) {
2302         case EVFILT_READ:
2303                 kn->kn_fop = &hammer2read_filtops;
2304                 break;
2305         case EVFILT_WRITE:
2306                 kn->kn_fop = &hammer2write_filtops;
2307                 break;
2308         case EVFILT_VNODE:
2309                 kn->kn_fop = &hammer2vnode_filtops;
2310                 break;
2311         default:
2312                 return (EOPNOTSUPP);
2313         }
2314
2315         kn->kn_hook = (caddr_t)vp;
2316
2317         knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2318
2319         return(0);
2320 }
2321
2322 static void
2323 filt_hammer2detach(struct knote *kn)
2324 {
2325         struct vnode *vp = (void *)kn->kn_hook;
2326
2327         knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2328 }
2329
2330 static int
2331 filt_hammer2read(struct knote *kn, long hint)
2332 {
2333         struct vnode *vp = (void *)kn->kn_hook;
2334         hammer2_inode_t *ip = VTOI(vp);
2335         off_t off;
2336
2337         if (hint == NOTE_REVOKE) {
2338                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2339                 return(1);
2340         }
2341         off = ip->meta.size - kn->kn_fp->f_offset;
2342         kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2343         if (kn->kn_sfflags & NOTE_OLDAPI)
2344                 return(1);
2345         return (kn->kn_data != 0);
2346 }
2347
2348
2349 static int
2350 filt_hammer2write(struct knote *kn, long hint)
2351 {
2352         if (hint == NOTE_REVOKE)
2353                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2354         kn->kn_data = 0;
2355         return (1);
2356 }
2357
2358 static int
2359 filt_hammer2vnode(struct knote *kn, long hint)
2360 {
2361         if (kn->kn_sfflags & hint)
2362                 kn->kn_fflags |= hint;
2363         if (hint == NOTE_REVOKE) {
2364                 kn->kn_flags |= (EV_EOF | EV_NODATA);
2365                 return (1);
2366         }
2367         return (kn->kn_fflags != 0);
2368 }
2369
2370 /*
2371  * FIFO VOPS
2372  */
2373 static
2374 int
2375 hammer2_vop_markatime(struct vop_markatime_args *ap)
2376 {
2377         hammer2_inode_t *ip;
2378         struct vnode *vp;
2379
2380         vp = ap->a_vp;
2381         ip = VTOI(vp);
2382
2383         if (ip->pmp->ronly)
2384                 return (EROFS);
2385         return(0);
2386 }
2387
2388 static
2389 int
2390 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2391 {
2392         int error;
2393
2394         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2395         if (error)
2396                 error = hammer2_vop_kqfilter(ap);
2397         return(error);
2398 }
2399
2400 /*
2401  * VOPS vector
2402  */
2403 struct vop_ops hammer2_vnode_vops = {
2404         .vop_default    = vop_defaultop,
2405         .vop_fsync      = hammer2_vop_fsync,
2406         .vop_getpages   = vop_stdgetpages,
2407         .vop_putpages   = vop_stdputpages,
2408         .vop_access     = hammer2_vop_access,
2409         .vop_advlock    = hammer2_vop_advlock,
2410         .vop_close      = hammer2_vop_close,
2411         .vop_nlink      = hammer2_vop_nlink,
2412         .vop_ncreate    = hammer2_vop_ncreate,
2413         .vop_nsymlink   = hammer2_vop_nsymlink,
2414         .vop_nremove    = hammer2_vop_nremove,
2415         .vop_nrmdir     = hammer2_vop_nrmdir,
2416         .vop_nrename    = hammer2_vop_nrename,
2417         .vop_getattr    = hammer2_vop_getattr,
2418         .vop_setattr    = hammer2_vop_setattr,
2419         .vop_readdir    = hammer2_vop_readdir,
2420         .vop_readlink   = hammer2_vop_readlink,
2421         .vop_read       = hammer2_vop_read,
2422         .vop_write      = hammer2_vop_write,
2423         .vop_open       = hammer2_vop_open,
2424         .vop_inactive   = hammer2_vop_inactive,
2425         .vop_reclaim    = hammer2_vop_reclaim,
2426         .vop_nresolve   = hammer2_vop_nresolve,
2427         .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2428         .vop_nmkdir     = hammer2_vop_nmkdir,
2429         .vop_nmknod     = hammer2_vop_nmknod,
2430         .vop_ioctl      = hammer2_vop_ioctl,
2431         .vop_mountctl   = hammer2_vop_mountctl,
2432         .vop_bmap       = hammer2_vop_bmap,
2433         .vop_strategy   = hammer2_vop_strategy,
2434         .vop_kqfilter   = hammer2_vop_kqfilter
2435 };
2436
2437 struct vop_ops hammer2_spec_vops = {
2438         .vop_default =          vop_defaultop,
2439         .vop_fsync =            hammer2_vop_fsync,
2440         .vop_read =             vop_stdnoread,
2441         .vop_write =            vop_stdnowrite,
2442         .vop_access =           hammer2_vop_access,
2443         .vop_close =            hammer2_vop_close,
2444         .vop_markatime =        hammer2_vop_markatime,
2445         .vop_getattr =          hammer2_vop_getattr,
2446         .vop_inactive =         hammer2_vop_inactive,
2447         .vop_reclaim =          hammer2_vop_reclaim,
2448         .vop_setattr =          hammer2_vop_setattr
2449 };
2450
2451 struct vop_ops hammer2_fifo_vops = {
2452         .vop_default =          fifo_vnoperate,
2453         .vop_fsync =            hammer2_vop_fsync,
2454 #if 0
2455         .vop_read =             hammer2_vop_fiforead,
2456         .vop_write =            hammer2_vop_fifowrite,
2457 #endif
2458         .vop_access =           hammer2_vop_access,
2459 #if 0
2460         .vop_close =            hammer2_vop_fifoclose,
2461 #endif
2462         .vop_markatime =        hammer2_vop_markatime,
2463         .vop_getattr =          hammer2_vop_getattr,
2464         .vop_inactive =         hammer2_vop_inactive,
2465         .vop_reclaim =          hammer2_vop_reclaim,
2466         .vop_setattr =          hammer2_vop_setattr,
2467         .vop_kqfilter =         hammer2_vop_fifokqfilter
2468 };
2469