nrelease - fix/improve livecd
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *       to the inode as its underlying chain may have changed.
41  */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/vnode.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
53 #include <sys/uio.h>
54 #include <sys/objcache.h>
55 #include <sys/event.h>
56 #include <sys/file.h>
57 #include <vfs/fifofs/fifo.h>
58
59 #include "hammer2.h"
60
61 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
62                                 int seqcount);
63 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
64                                 int ioflag, int seqcount);
65 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
66 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67
68 /*
69  * Last reference to a vnode is going away but it is still cached.
70  */
71 static
72 int
73 hammer2_vop_inactive(struct vop_inactive_args *ap)
74 {
75         hammer2_inode_t *ip;
76         struct vnode *vp;
77
78         vp = ap->a_vp;
79         ip = VTOI(vp);
80
81         /*
82          * Degenerate case
83          */
84         if (ip == NULL) {
85                 vrecycle(vp);
86                 return (0);
87         }
88
89         /*
90          * Aquire the inode lock to interlock against vp updates via
91          * the inode path and file deletions and such (which can be
92          * namespace-only operations that might not hold the vnode).
93          */
94         hammer2_inode_lock(ip, 0);
95         if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
96                 int nblksize;
97
98                 /*
99                  * If the inode has been unlinked we can throw away all
100                  * buffers (dirty or not) and clean the file out.
101                  *
102                  * Because vrecycle() calls are not guaranteed, try to
103                  * dispose of the inode as much as possible right here.
104                  */
105                 nblksize = hammer2_calc_logical(ip, 0, NULL, NULL);
106                 nvtruncbuf(vp, 0, nblksize, 0, 0);
107
108                 /*
109                  * Delete the file on-media.
110                  */
111                 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
112                         atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
113                         hammer2_inode_delayed_sideq(ip);
114                 }
115                 hammer2_inode_unlock(ip);
116
117                 /*
118                  * Recycle immediately if possible
119                  */
120                 vrecycle(vp);
121         } else {
122                 hammer2_inode_unlock(ip);
123         }
124         return (0);
125 }
126
127 /*
128  * Reclaim a vnode so that it can be reused; after the inode is
129  * disassociated, the filesystem must manage it alone.
130  */
131 static
132 int
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
134 {
135         hammer2_inode_t *ip;
136         struct vnode *vp;
137
138         vp = ap->a_vp;
139         ip = VTOI(vp);
140         if (ip == NULL)
141                 return(0);
142
143         /*
144          * NOTE! We do not attempt to flush chains here, flushing is
145          *       really fragile and could also deadlock.
146          */
147         vclrisdirty(vp);
148
149         /*
150          * The inode lock is required to disconnect it.
151          */
152         hammer2_inode_lock(ip, 0);
153         vp->v_data = NULL;
154         ip->vp = NULL;
155
156         /*
157          * Delete the file on-media.  This should have been handled by the
158          * inactivation.  The operation is likely still queued on the inode
159          * though so only complain if the stars don't align.
160          */
161         if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | HAMMER2_INODE_DELETING)) ==
162             HAMMER2_INODE_ISUNLINKED)
163         {
164                 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
165                 hammer2_inode_delayed_sideq(ip);
166                 kprintf("hammer2: vp=%p ip=%p unlinked but not disposed\n",
167                         vp, ip);
168         }
169         hammer2_inode_unlock(ip);
170
171         /*
172          * Modified inodes will already be on SIDEQ or SYNCQ, no further
173          * action is needed.
174          *
175          * We cannot safely synchronize the inode from inside the reclaim
176          * due to potentially deep locks held as-of when the reclaim occurs.
177          * Interactions and potential deadlocks abound.  We also can't do it
178          * here without desynchronizing from the related directory entrie(s).
179          */
180         hammer2_inode_drop(ip);                 /* vp ref */
181
182         /*
183          * XXX handle background sync when ip dirty, kernel will no longer
184          * notify us regarding this inode because there is no longer a
185          * vnode attached to it.
186          */
187
188         return (0);
189 }
190
191 /*
192  * Currently this function synchronizes the front-end inode state to the
193  * backend chain topology, then flushes the inode's chain and sub-topology
194  * to backend media.  This function does not flush the root topology down to
195  * the inode.
196  */
197 static
198 int
199 hammer2_vop_fsync(struct vop_fsync_args *ap)
200 {
201         hammer2_inode_t *ip;
202         struct vnode *vp;
203         int error1;
204         int error2;
205
206         vp = ap->a_vp;
207         ip = VTOI(vp);
208         error1 = 0;
209
210         hammer2_trans_init(ip->pmp, 0);
211
212         /*
213          * Flush dirty buffers in the file's logical buffer cache.
214          * It is best to wait for the strategy code to commit the
215          * buffers to the device's backing buffer cache before
216          * then trying to flush the inode.
217          *
218          * This should be quick, but certain inode modifications cached
219          * entirely in the hammer2_inode structure may not trigger a
220          * buffer read until the flush so the fsync can wind up also
221          * doing scattered reads.
222          */
223         vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
224         bio_track_wait(&vp->v_track_write, 0, 0);
225
226         /*
227          * Flush any inode changes
228          */
229         hammer2_inode_lock(ip, 0);
230         if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED))
231                 error1 = hammer2_inode_chain_sync(ip);
232
233         /*
234          * Flush dirty chains related to the inode.
235          *
236          * NOTE! We are not in a flush transaction.  The inode remains on
237          *       the sideq so the filesystem syncer can synchronize it to
238          *       the volume root.
239          */
240         error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP);
241         if (error2)
242                 error1 = error2;
243
244         /*
245          * We may be able to clear the vnode dirty flag.
246          */
247         if ((ip->flags & (HAMMER2_INODE_MODIFIED |
248                           HAMMER2_INODE_RESIZED |
249                           HAMMER2_INODE_DIRTYDATA)) == 0 &&
250             RB_EMPTY(&vp->v_rbdirty_tree) &&
251             !bio_track_active(&vp->v_track_write)) {
252                 vclrisdirty(vp);
253         }
254         hammer2_inode_unlock(ip);
255         hammer2_trans_done(ip->pmp, 0);
256
257         return (error1);
258 }
259
260 /*
261  * No lock needed, just handle ip->update
262  */
263 static
264 int
265 hammer2_vop_access(struct vop_access_args *ap)
266 {
267         hammer2_inode_t *ip = VTOI(ap->a_vp);
268         uid_t uid;
269         gid_t gid;
270         mode_t mode;
271         uint32_t uflags;
272         int error;
273         int update;
274
275 retry:
276         update = spin_access_start(&ip->cluster_spin);
277
278         /*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/
279         uid = hammer2_to_unix_xid(&ip->meta.uid);
280         gid = hammer2_to_unix_xid(&ip->meta.gid);
281         mode = ip->meta.mode;
282         uflags = ip->meta.uflags;
283         /*hammer2_inode_unlock(ip);*/
284
285         if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
286                 goto retry;
287
288         error = vop_helper_access(ap, uid, gid, mode, uflags);
289
290         return (error);
291 }
292
293 static
294 int
295 hammer2_vop_getattr(struct vop_getattr_args *ap)
296 {
297         hammer2_pfs_t *pmp;
298         hammer2_inode_t *ip;
299         struct vnode *vp;
300         struct vattr *vap;
301         int update;
302
303         vp = ap->a_vp;
304         vap = ap->a_vap;
305
306         ip = VTOI(vp);
307         pmp = ip->pmp;
308
309 retry:
310         update = spin_access_start(&ip->cluster_spin);
311
312         vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
313         vap->va_fileid = ip->meta.inum;
314         vap->va_mode = ip->meta.mode;
315         vap->va_nlink = ip->meta.nlinks;
316         vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
317         vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
318         vap->va_rmajor = 0;
319         vap->va_rminor = 0;
320         vap->va_size = ip->meta.size;   /* protected by shared lock */
321         vap->va_blocksize = HAMMER2_PBUFSIZE;
322         vap->va_flags = ip->meta.uflags;
323         hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
324         hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
325         hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
326         vap->va_gen = 1;
327         vap->va_bytes = 0;
328         if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
329                 /*
330                  * Can't really calculate directory use sans the files under
331                  * it, just assume one block for now.
332                  */
333                 vap->va_bytes += HAMMER2_INODE_BYTES;
334         } else {
335                 vap->va_bytes = hammer2_inode_data_count(ip);
336         }
337         vap->va_type = hammer2_get_vtype(ip->meta.type);
338         vap->va_filerev = 0;
339         vap->va_uid_uuid = ip->meta.uid;
340         vap->va_gid_uuid = ip->meta.gid;
341         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
342                           VA_FSID_UUID_VALID;
343
344         if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
345                 goto retry;
346
347         return (0);
348 }
349
350 static
351 int
352 hammer2_vop_getattr_lite(struct vop_getattr_lite_args *ap)
353 {
354         hammer2_pfs_t *pmp;
355         hammer2_inode_t *ip;
356         struct vnode *vp;
357         struct vattr_lite *lvap;
358         int update;
359
360         vp = ap->a_vp;
361         lvap = ap->a_lvap;
362
363         ip = VTOI(vp);
364         pmp = ip->pmp;
365
366 retry:
367         update = spin_access_start(&ip->cluster_spin);
368
369 #if 0
370         vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
371         vap->va_fileid = ip->meta.inum;
372 #endif
373         lvap->va_mode = ip->meta.mode;
374         lvap->va_nlink = ip->meta.nlinks;
375         lvap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
376         lvap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
377 #if 0
378         vap->va_rmajor = 0;
379         vap->va_rminor = 0;
380 #endif
381         lvap->va_size = ip->meta.size;
382 #if 0
383         vap->va_blocksize = HAMMER2_PBUFSIZE;
384 #endif
385         lvap->va_flags = ip->meta.uflags;
386         lvap->va_type = hammer2_get_vtype(ip->meta.type);
387 #if 0
388         vap->va_filerev = 0;
389         vap->va_uid_uuid = ip->meta.uid;
390         vap->va_gid_uuid = ip->meta.gid;
391         vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
392                           VA_FSID_UUID_VALID;
393 #endif
394
395         if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
396                 goto retry;
397
398         return (0);
399 }
400
401 static
402 int
403 hammer2_vop_setattr(struct vop_setattr_args *ap)
404 {
405         hammer2_inode_t *ip;
406         struct vnode *vp;
407         struct vattr *vap;
408         int error;
409         int kflags = 0;
410         uint64_t ctime;
411
412         vp = ap->a_vp;
413         vap = ap->a_vap;
414         hammer2_update_time(&ctime);
415
416         ip = VTOI(vp);
417
418         if (ip->pmp->ronly)
419                 return (EROFS);
420
421         /*
422          * Normally disallow setattr if there is no space, unless we
423          * are in emergency mode (might be needed to chflags -R noschg
424          * files prior to removal).
425          */
426         if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 &&
427             hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) {
428                 return (ENOSPC);
429         }
430
431         hammer2_trans_init(ip->pmp, 0);
432         hammer2_inode_lock(ip, 0);
433         error = 0;
434
435         if (vap->va_flags != VNOVAL) {
436                 uint32_t flags;
437
438                 flags = ip->meta.uflags;
439                 error = vop_helper_setattr_flags(&flags, vap->va_flags,
440                                      hammer2_to_unix_xid(&ip->meta.uid),
441                                      ap->a_cred);
442                 if (error == 0) {
443                         if (ip->meta.uflags != flags) {
444                                 hammer2_inode_modify(ip);
445                                 hammer2_spin_lock_update(&ip->cluster_spin);
446                                 ip->meta.uflags = flags;
447                                 ip->meta.ctime = ctime;
448                                 hammer2_spin_unlock_update(&ip->cluster_spin);
449                                 kflags |= NOTE_ATTRIB;
450                         }
451                         if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
452                                 error = 0;
453                                 goto done;
454                         }
455                 }
456                 goto done;
457         }
458         if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
459                 error = EPERM;
460                 goto done;
461         }
462         if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
463                 mode_t cur_mode = ip->meta.mode;
464                 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
465                 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
466                 uuid_t uuid_uid;
467                 uuid_t uuid_gid;
468
469                 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
470                                          ap->a_cred,
471                                          &cur_uid, &cur_gid, &cur_mode);
472                 if (error == 0) {
473                         hammer2_guid_to_uuid(&uuid_uid, cur_uid);
474                         hammer2_guid_to_uuid(&uuid_gid, cur_gid);
475                         if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
476                             bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
477                             ip->meta.mode != cur_mode
478                         ) {
479                                 hammer2_inode_modify(ip);
480                                 hammer2_spin_lock_update(&ip->cluster_spin);
481                                 ip->meta.uid = uuid_uid;
482                                 ip->meta.gid = uuid_gid;
483                                 ip->meta.mode = cur_mode;
484                                 ip->meta.ctime = ctime;
485                                 hammer2_spin_unlock_update(&ip->cluster_spin);
486                         }
487                         kflags |= NOTE_ATTRIB;
488                 }
489         }
490
491         /*
492          * Resize the file
493          */
494         if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
495                 switch(vp->v_type) {
496                 case VREG:
497                         if (vap->va_size == ip->meta.size)
498                                 break;
499                         if (vap->va_size < ip->meta.size) {
500                                 hammer2_mtx_ex(&ip->truncate_lock);
501                                 hammer2_truncate_file(ip, vap->va_size);
502                                 hammer2_mtx_unlock(&ip->truncate_lock);
503                                 kflags |= NOTE_WRITE;
504                         } else {
505                                 hammer2_extend_file(ip, vap->va_size);
506                                 kflags |= NOTE_WRITE | NOTE_EXTEND;
507                         }
508                         hammer2_inode_modify(ip);
509                         ip->meta.mtime = ctime;
510                         vclrflags(vp, VLASTWRITETS);
511                         break;
512                 default:
513                         error = EINVAL;
514                         goto done;
515                 }
516         }
517 #if 0
518         /* atime not supported */
519         if (vap->va_atime.tv_sec != VNOVAL) {
520                 hammer2_inode_modify(ip);
521                 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
522                 kflags |= NOTE_ATTRIB;
523         }
524 #endif
525         if (vap->va_mode != (mode_t)VNOVAL) {
526                 mode_t cur_mode = ip->meta.mode;
527                 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
528                 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
529
530                 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
531                                          cur_uid, cur_gid, &cur_mode);
532                 if (error == 0) {
533                         hammer2_inode_modify(ip);
534                         hammer2_spin_lock_update(&ip->cluster_spin);
535                         ip->meta.mode = cur_mode;
536                         ip->meta.ctime = ctime;
537                         hammer2_spin_unlock_update(&ip->cluster_spin);
538                         kflags |= NOTE_ATTRIB;
539                 }
540         }
541
542         if (vap->va_mtime.tv_sec != VNOVAL) {
543                 hammer2_inode_modify(ip);
544                 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
545                 kflags |= NOTE_ATTRIB;
546                 vclrflags(vp, VLASTWRITETS);
547         }
548
549 done:
550         /*
551          * If a truncation occurred we must call chain_sync() now in order
552          * to trim the related data chains, otherwise a later expansion can
553          * cause havoc.
554          *
555          * If an extend occured that changed the DIRECTDATA state, we must
556          * call inode_chain_sync now in order to prepare the inode's indirect
557          * block table.
558          *
559          * WARNING! This means we are making an adjustment to the inode's
560          * chain outside of sync/fsync, and not just to inode->meta, which
561          * may result in some consistency issues if a crash were to occur
562          * at just the wrong time.
563          */
564         if (ip->flags & HAMMER2_INODE_RESIZED)
565                 hammer2_inode_chain_sync(ip);
566
567         /*
568          * Cleanup.
569          */
570         hammer2_inode_unlock(ip);
571         hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
572         hammer2_knote(ip->vp, kflags);
573
574         return (error);
575 }
576
577 static
578 int
579 hammer2_vop_readdir(struct vop_readdir_args *ap)
580 {
581         hammer2_xop_readdir_t *xop;
582         hammer2_blockref_t bref;
583         hammer2_inode_t *ip;
584         hammer2_tid_t inum;
585         hammer2_key_t lkey;
586         struct uio *uio;
587         off_t *cookies;
588         off_t saveoff;
589         int cookie_index;
590         int ncookies;
591         int error;
592         int eofflag;
593         int r;
594
595         ip = VTOI(ap->a_vp);
596         uio = ap->a_uio;
597         saveoff = uio->uio_offset;
598         eofflag = 0;
599         error = 0;
600
601         /*
602          * Setup cookies directory entry cookies if requested
603          */
604         if (ap->a_ncookies) {
605                 ncookies = uio->uio_resid / 16 + 1;
606                 if (ncookies > 1024)
607                         ncookies = 1024;
608                 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
609         } else {
610                 ncookies = -1;
611                 cookies = NULL;
612         }
613         cookie_index = 0;
614
615         hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
616
617         /*
618          * Handle artificial entries.  To ensure that only positive 64 bit
619          * quantities are returned to userland we always strip off bit 63.
620          * The hash code is designed such that codes 0x0000-0x7FFF are not
621          * used, allowing us to use these codes for articial entries.
622          *
623          * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
624          * allow '..' to cross the mount point into (e.g.) the super-root.
625          */
626         if (saveoff == 0) {
627                 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
628                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
629                 if (r)
630                         goto done;
631                 if (cookies)
632                         cookies[cookie_index] = saveoff;
633                 ++saveoff;
634                 ++cookie_index;
635                 if (cookie_index == ncookies)
636                         goto done;
637         }
638
639         if (saveoff == 1) {
640                 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
641                 if (ip != ip->pmp->iroot)
642                         inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
643                 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
644                 if (r)
645                         goto done;
646                 if (cookies)
647                         cookies[cookie_index] = saveoff;
648                 ++saveoff;
649                 ++cookie_index;
650                 if (cookie_index == ncookies)
651                         goto done;
652         }
653
654         lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
655         if (hammer2_debug & 0x0020)
656                 kprintf("readdir: lkey %016jx\n", lkey);
657         if (error)
658                 goto done;
659
660         xop = hammer2_xop_alloc(ip, 0);
661         xop->lkey = lkey;
662         hammer2_xop_start(&xop->head, &hammer2_readdir_desc);
663
664         for (;;) {
665                 const hammer2_inode_data_t *ripdata;
666                 const char *dname;
667                 int dtype;
668
669                 error = hammer2_xop_collect(&xop->head, 0);
670                 error = hammer2_error_to_errno(error);
671                 if (error) {
672                         break;
673                 }
674                 if (cookie_index == ncookies)
675                         break;
676                 if (hammer2_debug & 0x0020)
677                         kprintf("cluster chain %p %p\n",
678                                 xop->head.cluster.focus,
679                                 (xop->head.cluster.focus ?
680                                  xop->head.cluster.focus->data : (void *)-1));
681                 hammer2_cluster_bref(&xop->head.cluster, &bref);
682
683                 if (bref.type == HAMMER2_BREF_TYPE_INODE) {
684                         ripdata = &hammer2_xop_gdata(&xop->head)->ipdata;
685                         dtype = hammer2_get_dtype(ripdata->meta.type);
686                         saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
687                         r = vop_write_dirent(&error, uio,
688                                              ripdata->meta.inum &
689                                               HAMMER2_DIRHASH_USERMSK,
690                                              dtype,
691                                              ripdata->meta.name_len,
692                                              ripdata->filename);
693                         hammer2_xop_pdata(&xop->head);
694                         if (r)
695                                 break;
696                         if (cookies)
697                                 cookies[cookie_index] = saveoff;
698                         ++cookie_index;
699                 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
700                         uint16_t namlen;
701
702                         dtype = hammer2_get_dtype(bref.embed.dirent.type);
703                         saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
704                         namlen = bref.embed.dirent.namlen;
705                         if (namlen <= sizeof(bref.check.buf)) {
706                                 dname = bref.check.buf;
707                         } else {
708                                 dname = hammer2_xop_gdata(&xop->head)->buf;
709                         }
710                         r = vop_write_dirent(&error, uio,
711                                              bref.embed.dirent.inum, dtype,
712                                              namlen, dname);
713                         if (namlen > sizeof(bref.check.buf))
714                                 hammer2_xop_pdata(&xop->head);
715                         if (r)
716                                 break;
717                         if (cookies)
718                                 cookies[cookie_index] = saveoff;
719                         ++cookie_index;
720                 } else {
721                         /* XXX chain error */
722                         kprintf("bad chain type readdir %d\n", bref.type);
723                 }
724         }
725         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
726         if (error == ENOENT) {
727                 error = 0;
728                 eofflag = 1;
729                 saveoff = (hammer2_key_t)-1;
730         } else {
731                 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
732         }
733 done:
734         hammer2_inode_unlock(ip);
735         if (ap->a_eofflag)
736                 *ap->a_eofflag = eofflag;
737         if (hammer2_debug & 0x0020)
738                 kprintf("readdir: done at %016jx\n", saveoff);
739         uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
740         if (error && cookie_index == 0) {
741                 if (cookies) {
742                         kfree(cookies, M_TEMP);
743                         *ap->a_ncookies = 0;
744                         *ap->a_cookies = NULL;
745                 }
746         } else {
747                 if (cookies) {
748                         *ap->a_ncookies = cookie_index;
749                         *ap->a_cookies = cookies;
750                 }
751         }
752         return (error);
753 }
754
755 /*
756  * hammer2_vop_readlink { vp, uio, cred }
757  */
758 static
759 int
760 hammer2_vop_readlink(struct vop_readlink_args *ap)
761 {
762         struct vnode *vp;
763         hammer2_inode_t *ip;
764         int error;
765
766         vp = ap->a_vp;
767         if (vp->v_type != VLNK)
768                 return (EINVAL);
769         ip = VTOI(vp);
770
771         error = hammer2_read_file(ip, ap->a_uio, 0);
772         return (error);
773 }
774
775 static
776 int
777 hammer2_vop_read(struct vop_read_args *ap)
778 {
779         struct vnode *vp;
780         hammer2_inode_t *ip;
781         struct uio *uio;
782         int error;
783         int seqcount;
784
785         /*
786          * Read operations supported on this vnode?
787          */
788         vp = ap->a_vp;
789         if (vp->v_type == VDIR)
790                 return (EISDIR);
791         if (vp->v_type != VREG)
792                 return (EINVAL);
793
794         /*
795          * Misc
796          */
797         ip = VTOI(vp);
798         uio = ap->a_uio;
799         error = 0;
800
801         seqcount = ap->a_ioflag >> IO_SEQSHIFT;
802
803         error = hammer2_read_file(ip, uio, seqcount);
804         return (error);
805 }
806
807 static
808 int
809 hammer2_vop_write(struct vop_write_args *ap)
810 {
811         hammer2_inode_t *ip;
812         thread_t td;
813         struct vnode *vp;
814         struct uio *uio;
815         int error;
816         int seqcount;
817         int ioflag;
818
819         /*
820          * Read operations supported on this vnode?
821          */
822         vp = ap->a_vp;
823         if (vp->v_type != VREG)
824                 return (EINVAL);
825
826         /*
827          * Misc
828          */
829         ip = VTOI(vp);
830         ioflag = ap->a_ioflag;
831         uio = ap->a_uio;
832         error = 0;
833         if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG))
834                 return (EROFS);
835         switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
836         case 2:
837                 return (ENOSPC);
838         case 1:
839                 ioflag |= IO_DIRECT;    /* semi-synchronous */
840                 /* fall through */
841         default:
842                 break;
843         }
844
845         seqcount = ioflag >> IO_SEQSHIFT;
846
847         /*
848          * Check resource limit
849          */
850         if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
851             uio->uio_offset + uio->uio_resid >
852              td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
853                 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
854                 return (EFBIG);
855         }
856
857         /*
858          * The transaction interlocks against flush initiations
859          * (note: but will run concurrently with the actual flush).
860          *
861          * To avoid deadlocking against the VM system, we must flag any
862          * transaction related to the buffer cache or other direct
863          * VM page manipulation.
864          */
865         if (uio->uio_segflg == UIO_NOCOPY) {
866                 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
867         } else {
868                 hammer2_trans_init(ip->pmp, 0);
869         }
870         error = hammer2_write_file(ip, uio, ioflag, seqcount);
871         if (uio->uio_segflg == UIO_NOCOPY)
872                 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE |
873                                             HAMMER2_TRANS_SIDEQ);
874         else
875                 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
876
877         return (error);
878 }
879
880 /*
881  * Perform read operations on a file or symlink given an UNLOCKED
882  * inode and uio.
883  *
884  * The passed ip is not locked.
885  */
886 static
887 int
888 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
889 {
890         hammer2_off_t size;
891         struct buf *bp;
892         int error;
893
894         error = 0;
895
896         /*
897          * UIO read loop.
898          *
899          * WARNING! Assumes that the kernel interlocks size changes at the
900          *          vnode level.
901          */
902         hammer2_mtx_sh(&ip->lock);
903         hammer2_mtx_sh(&ip->truncate_lock);
904         size = ip->meta.size;
905         hammer2_mtx_unlock(&ip->lock);
906
907         while (uio->uio_resid > 0 && uio->uio_offset < size) {
908                 hammer2_key_t lbase;
909                 hammer2_key_t leof;
910                 int lblksize;
911                 int loff;
912                 int n;
913
914                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
915                                                 &lbase, &leof);
916
917 #if 1
918                 bp = NULL;
919                 error = cluster_readx(ip->vp, leof, lbase, lblksize,
920                                       B_NOTMETA | B_KVABIO,
921                                       uio->uio_resid,
922                                       seqcount * MAXBSIZE,
923                                       &bp);
924 #else
925                 if (uio->uio_segflg == UIO_NOCOPY) {
926                         bp = getblk(ip->vp, lbase, lblksize,
927                                     GETBLK_BHEAVY | GETBLK_KVABIO, 0);
928                         if (bp->b_flags & B_CACHE) {
929                                 int i;
930                                 int j = 0;
931                                 if (bp->b_xio.xio_npages != 16)
932                                         kprintf("NPAGES BAD\n");
933                                 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
934                                         vm_page_t m;
935                                         m = bp->b_xio.xio_pages[i];
936                                         if (m == NULL || m->valid == 0) {
937                                                 kprintf("bp %016jx %016jx pg %d inv",
938                                                         lbase, leof, i);
939                                                 if (m)
940                                                         kprintf("m->object %p/%p", m->object, ip->vp->v_object);
941                                                 kprintf("\n");
942                                                 j = 1;
943                                         }
944                                 }
945                                 if (j)
946                                         kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
947                         }
948                         bqrelse(bp);
949                 }
950                 error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
951 #endif
952                 if (error) {
953                         brelse(bp);
954                         break;
955                 }
956                 bkvasync(bp);
957                 loff = (int)(uio->uio_offset - lbase);
958                 n = lblksize - loff;
959                 if (n > uio->uio_resid)
960                         n = uio->uio_resid;
961                 if (n > size - uio->uio_offset)
962                         n = (int)(size - uio->uio_offset);
963                 bp->b_flags |= B_AGE;
964                 uiomovebp(bp, bp->b_data + loff, n, uio);
965                 bqrelse(bp);
966         }
967         hammer2_mtx_unlock(&ip->truncate_lock);
968
969         return (error);
970 }
971
972 /*
973  * Write to the file represented by the inode via the logical buffer cache.
974  * The inode may represent a regular file or a symlink.
975  *
976  * The inode must not be locked.
977  */
978 static
979 int
980 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
981                    int ioflag, int seqcount)
982 {
983         hammer2_key_t old_eof;
984         hammer2_key_t new_eof;
985         struct buf *bp;
986         int kflags;
987         int error;
988         int modified;
989
990         /*
991          * Setup if append
992          *
993          * WARNING! Assumes that the kernel interlocks size changes at the
994          *          vnode level.
995          */
996         hammer2_mtx_ex(&ip->lock);
997         hammer2_mtx_sh(&ip->truncate_lock);
998         if (ioflag & IO_APPEND)
999                 uio->uio_offset = ip->meta.size;
1000         old_eof = ip->meta.size;
1001
1002         /*
1003          * Extend the file if necessary.  If the write fails at some point
1004          * we will truncate it back down to cover as much as we were able
1005          * to write.
1006          *
1007          * Doing this now makes it easier to calculate buffer sizes in
1008          * the loop.
1009          */
1010         kflags = 0;
1011         error = 0;
1012         modified = 0;
1013
1014         if (uio->uio_offset + uio->uio_resid > old_eof) {
1015                 new_eof = uio->uio_offset + uio->uio_resid;
1016                 modified = 1;
1017                 hammer2_extend_file(ip, new_eof);
1018                 kflags |= NOTE_EXTEND;
1019         } else {
1020                 new_eof = old_eof;
1021         }
1022         hammer2_mtx_unlock(&ip->lock);
1023
1024         /*
1025          * UIO write loop
1026          */
1027         while (uio->uio_resid > 0) {
1028                 hammer2_key_t lbase;
1029                 int trivial;
1030                 int endofblk;
1031                 int lblksize;
1032                 int loff;
1033                 int n;
1034
1035                 /*
1036                  * Don't allow the buffer build to blow out the buffer
1037                  * cache.
1038                  */
1039                 if ((ioflag & IO_RECURSE) == 0)
1040                         bwillwrite(HAMMER2_PBUFSIZE);
1041
1042                 /*
1043                  * This nominally tells us how much we can cluster and
1044                  * what the logical buffer size needs to be.  Currently
1045                  * we don't try to cluster the write and just handle one
1046                  * block at a time.
1047                  */
1048                 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1049                                                 &lbase, NULL);
1050                 loff = (int)(uio->uio_offset - lbase);
1051
1052                 KKASSERT(lblksize <= MAXBSIZE);
1053
1054                 /*
1055                  * Calculate bytes to copy this transfer and whether the
1056                  * copy completely covers the buffer or not.
1057                  */
1058                 trivial = 0;
1059                 n = lblksize - loff;
1060                 if (n > uio->uio_resid) {
1061                         n = uio->uio_resid;
1062                         if (loff == lbase && uio->uio_offset + n == new_eof)
1063                                 trivial = 1;
1064                         endofblk = 0;
1065                 } else {
1066                         if (loff == 0)
1067                                 trivial = 1;
1068                         endofblk = 1;
1069                 }
1070                 if (lbase >= new_eof)
1071                         trivial = 1;
1072
1073                 /*
1074                  * Get the buffer
1075                  */
1076                 if (uio->uio_segflg == UIO_NOCOPY) {
1077                         /*
1078                          * Issuing a write with the same data backing the
1079                          * buffer.  Instantiate the buffer to collect the
1080                          * backing vm pages, then read-in any missing bits.
1081                          *
1082                          * This case is used by vop_stdputpages().
1083                          */
1084                         bp = getblk(ip->vp, lbase, lblksize,
1085                                     GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1086                         if ((bp->b_flags & B_CACHE) == 0) {
1087                                 bqrelse(bp);
1088                                 error = bread_kvabio(ip->vp, lbase,
1089                                                      lblksize, &bp);
1090                         }
1091                 } else if (trivial) {
1092                         /*
1093                          * Even though we are entirely overwriting the buffer
1094                          * we may still have to zero it out to avoid a
1095                          * mmap/write visibility issue.
1096                          */
1097                         bp = getblk(ip->vp, lbase, lblksize,
1098                                     GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1099                         if ((bp->b_flags & B_CACHE) == 0)
1100                                 vfs_bio_clrbuf(bp);
1101                 } else {
1102                         /*
1103                          * Partial overwrite, read in any missing bits then
1104                          * replace the portion being written.
1105                          *
1106                          * (The strategy code will detect zero-fill physical
1107                          * blocks for this case).
1108                          */
1109                         error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1110                         if (error == 0)
1111                                 bheavy(bp);
1112                 }
1113
1114                 if (error) {
1115                         brelse(bp);
1116                         break;
1117                 }
1118
1119                 /*
1120                  * Ok, copy the data in
1121                  */
1122                 bkvasync(bp);
1123                 error = uiomovebp(bp, bp->b_data + loff, n, uio);
1124                 kflags |= NOTE_WRITE;
1125                 modified = 1;
1126                 if (error) {
1127                         brelse(bp);
1128                         break;
1129                 }
1130
1131                 /*
1132                  * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1133                  *          with IO_SYNC or IO_ASYNC set.  These writes
1134                  *          must be handled as the pageout daemon expects.
1135                  *
1136                  * NOTE!    H2 relies on cluster_write() here because it
1137                  *          cannot preallocate disk blocks at the logical
1138                  *          level due to not knowing what the compression
1139                  *          size will be at this time.
1140                  *
1141                  *          We must use cluster_write() here and we depend
1142                  *          on the write-behind feature to flush buffers
1143                  *          appropriately.  If we let the buffer daemons do
1144                  *          it the block allocations will be all over the
1145                  *          map.
1146                  */
1147                 if (ioflag & IO_SYNC) {
1148                         bwrite(bp);
1149                 } else if ((ioflag & IO_DIRECT) && endofblk) {
1150                         bawrite(bp);
1151                 } else if (ioflag & IO_ASYNC) {
1152                         bawrite(bp);
1153                 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1154                         bdwrite(bp);
1155                 } else {
1156 #if 1
1157                         bp->b_flags |= B_CLUSTEROK;
1158                         cluster_write(bp, new_eof, lblksize, seqcount);
1159 #else
1160                         bp->b_flags |= B_CLUSTEROK;
1161                         bdwrite(bp);
1162 #endif
1163                 }
1164         }
1165
1166         /*
1167          * Cleanup.  If we extended the file EOF but failed to write through
1168          * the entire write is a failure and we have to back-up.
1169          */
1170         if (error && new_eof != old_eof) {
1171                 hammer2_mtx_unlock(&ip->truncate_lock);
1172                 hammer2_mtx_ex(&ip->lock);              /* note lock order */
1173                 hammer2_mtx_ex(&ip->truncate_lock);     /* note lock order */
1174                 hammer2_truncate_file(ip, old_eof);
1175                 if (ip->flags & HAMMER2_INODE_MODIFIED)
1176                         hammer2_inode_chain_sync(ip);
1177                 hammer2_mtx_unlock(&ip->lock);
1178         } else if (modified) {
1179                 struct vnode *vp = ip->vp;
1180
1181                 hammer2_mtx_ex(&ip->lock);
1182                 hammer2_inode_modify(ip);
1183                 if (uio->uio_segflg == UIO_NOCOPY) {
1184                         if (vp->v_flag & VLASTWRITETS) {
1185                                 ip->meta.mtime =
1186                                     (unsigned long)vp->v_lastwrite_ts.tv_sec *
1187                                     1000000 +
1188                                     vp->v_lastwrite_ts.tv_nsec / 1000;
1189                         }
1190                 } else {
1191                         hammer2_update_time(&ip->meta.mtime);
1192                         vclrflags(vp, VLASTWRITETS);
1193                 }
1194
1195 #if 0
1196                 /*
1197                  * REMOVED - handled by hammer2_extend_file().  Do not issue
1198                  * a chain_sync() outside of a sync/fsync except for DIRECTDATA
1199                  * state changes.
1200                  *
1201                  * Under normal conditions we only issue a chain_sync if
1202                  * the inode's DIRECTDATA state changed.
1203                  */
1204                 if (ip->flags & HAMMER2_INODE_RESIZED)
1205                         hammer2_inode_chain_sync(ip);
1206 #endif
1207                 hammer2_mtx_unlock(&ip->lock);
1208                 hammer2_knote(ip->vp, kflags);
1209         }
1210         hammer2_trans_assert_strategy(ip->pmp);
1211         hammer2_mtx_unlock(&ip->truncate_lock);
1212
1213         return error;
1214 }
1215
1216 /*
1217  * Truncate the size of a file.  The inode must be locked.
1218  *
1219  * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1220  * ensure that any on-media data beyond the new file EOF has been destroyed.
1221  *
1222  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1223  *          held due to the way our write thread works.  If the truncation
1224  *          occurs in the middle of a buffer, nvtruncbuf() is responsible
1225  *          for dirtying that buffer and zeroing out trailing bytes.
1226  *
1227  * WARNING! Assumes that the kernel interlocks size changes at the
1228  *          vnode level.
1229  *
1230  * WARNING! Caller assumes responsibility for removing dead blocks
1231  *          if INODE_RESIZED is set.
1232  */
1233 static
1234 void
1235 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1236 {
1237         int nblksize;
1238
1239         hammer2_mtx_unlock(&ip->lock);
1240         if (ip->vp) {
1241                 nblksize = hammer2_calc_logical(ip, 0, NULL, NULL);
1242                 nvtruncbuf(ip->vp, nsize,
1243                            nblksize, (int)nsize & (nblksize - 1),
1244                            0);
1245         }
1246         hammer2_mtx_ex(&ip->lock);
1247         KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1248         ip->osize = ip->meta.size;
1249         ip->meta.size = nsize;
1250         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1251         hammer2_inode_modify(ip);
1252 }
1253
1254 /*
1255  * Extend the size of a file.  The inode must be locked.
1256  *
1257  * Even though the file size is changing, we do not have to set the
1258  * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1259  * boundary.  When this occurs a hammer2_inode_chain_sync() is required
1260  * to prepare the inode cluster's indirect block table, otherwise
1261  * async execution of the strategy code will implode on us.
1262  *
1263  * WARNING! Assumes that the kernel interlocks size changes at the
1264  *          vnode level.
1265  *
1266  * WARNING! Caller assumes responsibility for transitioning out
1267  *          of the inode DIRECTDATA mode if INODE_RESIZED is set.
1268  */
1269 static
1270 void
1271 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1272 {
1273         hammer2_key_t osize;
1274         int oblksize;
1275         int nblksize;
1276         int error;
1277
1278         KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1279         hammer2_inode_modify(ip);
1280         osize = ip->meta.size;
1281         ip->osize = osize;
1282         ip->meta.size = nsize;
1283
1284         /*
1285          * We must issue a chain_sync() when the DIRECTDATA state changes
1286          * to prevent confusion between the flush code and the in-memory
1287          * state.  This is not perfect because we are doing it outside of
1288          * a sync/fsync operation, so it might not be fully synchronized
1289          * with the meta-data topology flush.
1290          *
1291          * We must retain and re-dirty the buffer cache buffer containing
1292          * the direct data so it can be written to a real block.  It should
1293          * not be possible for a bread error to occur since the original data
1294          * is extracted from the inode structure directly.
1295          */
1296         if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1297                 if (osize) {
1298                         struct buf *bp;
1299
1300                         oblksize = hammer2_calc_logical(ip, 0, NULL, NULL);
1301                         error = bread_kvabio(ip->vp, 0, oblksize, &bp);
1302                         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1303                         hammer2_inode_chain_sync(ip);
1304                         if (error == 0) {
1305                                 bheavy(bp);
1306                                 bdwrite(bp);
1307                         } else {
1308                                 brelse(bp);
1309                         }
1310                 } else {
1311                         atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1312                         hammer2_inode_chain_sync(ip);
1313                 }
1314         }
1315         hammer2_mtx_unlock(&ip->lock);
1316         if (ip->vp) {
1317                 oblksize = hammer2_calc_logical(ip, 0, NULL, NULL);
1318                 nblksize = hammer2_calc_logical(ip, 0, NULL, NULL);
1319                 nvextendbuf(ip->vp,
1320                             osize, nsize,
1321                             oblksize, nblksize,
1322                             -1, -1, 0);
1323         }
1324         hammer2_mtx_ex(&ip->lock);
1325 }
1326
1327 static
1328 int
1329 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1330 {
1331         hammer2_xop_nresolve_t *xop;
1332         hammer2_inode_t *ip;
1333         hammer2_inode_t *dip;
1334         struct namecache *ncp;
1335         struct vnode *vp;
1336         int error;
1337
1338         dip = VTOI(ap->a_dvp);
1339         xop = hammer2_xop_alloc(dip, 0);
1340
1341         ncp = ap->a_nch->ncp;
1342         hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1343
1344         /*
1345          * Note: In DragonFly the kernel handles '.' and '..'.
1346          */
1347         hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1348         hammer2_xop_start(&xop->head, &hammer2_nresolve_desc);
1349
1350         error = hammer2_xop_collect(&xop->head, 0);
1351         error = hammer2_error_to_errno(error);
1352         if (error) {
1353                 ip = NULL;
1354         } else {
1355                 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
1356         }
1357         hammer2_inode_unlock(dip);
1358
1359         /*
1360          * Acquire the related vnode
1361          *
1362          * NOTE: For error processing, only ENOENT resolves the namecache
1363          *       entry to NULL, otherwise we just return the error and
1364          *       leave the namecache unresolved.
1365          *
1366          * WARNING: inode structure is locked exclusively via inode_get
1367          *          but chain was locked shared.  inode_unlock()
1368          *          will handle it properly.
1369          */
1370         if (ip) {
1371                 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */
1372                 if (error == 0) {
1373                         vn_unlock(vp);
1374                         cache_setvp(ap->a_nch, vp);
1375                 } else if (error == ENOENT) {
1376                         cache_setvp(ap->a_nch, NULL);
1377                 }
1378                 hammer2_inode_unlock(ip);
1379
1380                 /*
1381                  * The vp should not be released until after we've disposed
1382                  * of our locks, because it might cause vop_inactive() to
1383                  * be called.
1384                  */
1385                 if (vp)
1386                         vrele(vp);
1387         } else {
1388                 error = ENOENT;
1389                 cache_setvp(ap->a_nch, NULL);
1390         }
1391         hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1392         KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1393                 ("resolve error %d/%p ap %p\n",
1394                  error, ap->a_nch->ncp->nc_vp, ap));
1395
1396         return error;
1397 }
1398
1399 static
1400 int
1401 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1402 {
1403         hammer2_inode_t *dip;
1404         hammer2_tid_t inum;
1405         int error;
1406
1407         dip = VTOI(ap->a_dvp);
1408         inum = dip->meta.iparent;
1409         *ap->a_vpp = NULL;
1410
1411         if (inum) {
1412                 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1413                                          inum, ap->a_vpp);
1414         } else {
1415                 error = ENOENT;
1416         }
1417         return error;
1418 }
1419
1420 static
1421 int
1422 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1423 {
1424         hammer2_inode_t *dip;
1425         hammer2_inode_t *nip;
1426         struct namecache *ncp;
1427         const char *name;
1428         size_t name_len;
1429         hammer2_tid_t inum;
1430         int error;
1431
1432         dip = VTOI(ap->a_dvp);
1433         if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1434                 return (EROFS);
1435         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1436                 return (ENOSPC);
1437
1438         ncp = ap->a_nch->ncp;
1439         name = ncp->nc_name;
1440         name_len = ncp->nc_nlen;
1441
1442         hammer2_trans_init(dip->pmp, 0);
1443
1444         inum = hammer2_trans_newinum(dip->pmp);
1445
1446         /*
1447          * Create the directory as an inode and then create the directory
1448          * entry.
1449          *
1450          * dip must be locked before nip to avoid deadlock.
1451          */
1452         hammer2_inode_lock(dip, 0);
1453         nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1454                                           inum, &error);
1455         if (error) {
1456                 error = hammer2_error_to_errno(error);
1457         } else {
1458                 error = hammer2_dirent_create(dip, name, name_len,
1459                                               nip->meta.inum, nip->meta.type);
1460                 /* returns UNIX error code */
1461         }
1462         if (error) {
1463                 if (nip) {
1464                         hammer2_inode_unlink_finisher(nip, NULL);
1465                         hammer2_inode_unlock(nip);
1466                         nip = NULL;
1467                 }
1468                 *ap->a_vpp = NULL;
1469         } else {
1470                 /*
1471                  * inode_depend() must occur before the igetv() because
1472                  * the igetv() can temporarily release the inode lock.
1473                  */
1474                 hammer2_inode_depend(dip, nip); /* before igetv */
1475                 *ap->a_vpp = hammer2_igetv(nip, &error);
1476                 hammer2_inode_unlock(nip);
1477         }
1478
1479         /*
1480          * Update dip's mtime
1481          *
1482          * We can use a shared inode lock and allow the meta.mtime update
1483          * SMP race.  hammer2_inode_modify() is MPSAFE w/a shared lock.
1484          */
1485         if (error == 0) {
1486                 uint64_t mtime;
1487
1488                 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1489                 hammer2_update_time(&mtime);
1490                 hammer2_inode_modify(dip);
1491                 dip->meta.mtime = mtime;
1492                 /*hammer2_inode_unlock(dip);*/
1493         }
1494         hammer2_inode_unlock(dip);
1495
1496         hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1497
1498         if (error == 0) {
1499                 cache_setunresolved(ap->a_nch);
1500                 cache_setvp(ap->a_nch, *ap->a_vpp);
1501                 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1502         }
1503         return error;
1504 }
1505
1506 static
1507 int
1508 hammer2_vop_open(struct vop_open_args *ap)
1509 {
1510         return vop_stdopen(ap);
1511 }
1512
1513 /*
1514  * hammer2_vop_advlock { vp, id, op, fl, flags }
1515  */
1516 static
1517 int
1518 hammer2_vop_advlock(struct vop_advlock_args *ap)
1519 {
1520         hammer2_inode_t *ip = VTOI(ap->a_vp);
1521         hammer2_off_t size;
1522
1523         size = ip->meta.size;
1524         return (lf_advlock(ap, &ip->advlock, size));
1525 }
1526
1527 static
1528 int
1529 hammer2_vop_close(struct vop_close_args *ap)
1530 {
1531         return vop_stdclose(ap);
1532 }
1533
1534 /*
1535  * hammer2_vop_nlink { nch, dvp, vp, cred }
1536  *
1537  * Create a hardlink from (vp) to {dvp, nch}.
1538  */
1539 static
1540 int
1541 hammer2_vop_nlink(struct vop_nlink_args *ap)
1542 {
1543         hammer2_inode_t *tdip;  /* target directory to create link in */
1544         hammer2_inode_t *ip;    /* inode we are hardlinking to */
1545         struct namecache *ncp;
1546         const char *name;
1547         size_t name_len;
1548         int error;
1549         uint64_t cmtime;
1550
1551         if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1552                 return(EXDEV);
1553
1554         tdip = VTOI(ap->a_dvp);
1555         if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG))
1556                 return (EROFS);
1557         if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1558                 return (ENOSPC);
1559
1560         ncp = ap->a_nch->ncp;
1561         name = ncp->nc_name;
1562         name_len = ncp->nc_nlen;
1563
1564         /*
1565          * ip represents the file being hardlinked.  The file could be a
1566          * normal file or a hardlink target if it has already been hardlinked.
1567          * (with the new semantics, it will almost always be a hardlink
1568          * target).
1569          *
1570          * Bump nlinks and potentially also create or move the hardlink
1571          * target in the parent directory common to (ip) and (tdip).  The
1572          * consolidation code can modify ip->cluster.  The returned cluster
1573          * is locked.
1574          */
1575         ip = VTOI(ap->a_vp);
1576         KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1577         hammer2_trans_init(ip->pmp, 0);
1578
1579         /*
1580          * Target should be an indexed inode or there's no way we will ever
1581          * be able to find it!
1582          */
1583         KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1584
1585         hammer2_inode_lock4(tdip, ip, NULL, NULL);
1586
1587         hammer2_update_time(&cmtime);
1588
1589         /*
1590          * Create the directory entry and bump nlinks.
1591          * Also update ip's ctime.
1592          */
1593         error = hammer2_dirent_create(tdip, name, name_len,
1594                                       ip->meta.inum, ip->meta.type);
1595         hammer2_inode_modify(ip);
1596         ++ip->meta.nlinks;
1597         ip->meta.ctime = cmtime;
1598         if (error == 0) {
1599                 /*
1600                  * Update dip's [cm]time
1601                  */
1602                 hammer2_inode_modify(tdip);
1603                 tdip->meta.mtime = cmtime;
1604                 tdip->meta.ctime = cmtime;
1605
1606                 cache_setunresolved(ap->a_nch);
1607                 cache_setvp(ap->a_nch, ap->a_vp);
1608         }
1609         hammer2_inode_unlock(ip);
1610         hammer2_inode_unlock(tdip);
1611
1612         hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
1613         hammer2_knote(ap->a_vp, NOTE_LINK);
1614         hammer2_knote(ap->a_dvp, NOTE_WRITE);
1615
1616         return error;
1617 }
1618
1619 /*
1620  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1621  *
1622  * The operating system has already ensured that the directory entry
1623  * does not exist and done all appropriate namespace locking.
1624  */
1625 static
1626 int
1627 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1628 {
1629         hammer2_inode_t *dip;
1630         hammer2_inode_t *nip;
1631         struct namecache *ncp;
1632         const char *name;
1633         size_t name_len;
1634         hammer2_tid_t inum;
1635         int error;
1636
1637         dip = VTOI(ap->a_dvp);
1638         if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1639                 return (EROFS);
1640         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1641                 return (ENOSPC);
1642
1643         ncp = ap->a_nch->ncp;
1644         name = ncp->nc_name;
1645         name_len = ncp->nc_nlen;
1646         hammer2_trans_init(dip->pmp, 0);
1647
1648         inum = hammer2_trans_newinum(dip->pmp);
1649
1650         /*
1651          * Create the regular file as an inode and then create the directory
1652          * entry.
1653          *
1654          * dip must be locked before nip to avoid deadlock.
1655          */
1656         hammer2_inode_lock(dip, 0);
1657         nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1658                                           inum, &error);
1659         if (error) {
1660                 error = hammer2_error_to_errno(error);
1661         } else {
1662                 error = hammer2_dirent_create(dip, name, name_len,
1663                                               nip->meta.inum, nip->meta.type);
1664         }
1665         if (error) {
1666                 if (nip) {
1667                         hammer2_inode_unlink_finisher(nip, NULL);
1668                         hammer2_inode_unlock(nip);
1669                         nip = NULL;
1670                 }
1671                 *ap->a_vpp = NULL;
1672         } else {
1673                 hammer2_inode_depend(dip, nip); /* before igetv */
1674                 *ap->a_vpp = hammer2_igetv(nip, &error);
1675                 hammer2_inode_unlock(nip);
1676         }
1677
1678         /*
1679          * Update dip's mtime
1680          */
1681         if (error == 0) {
1682                 uint64_t mtime;
1683
1684                 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1685                 hammer2_update_time(&mtime);
1686                 hammer2_inode_modify(dip);
1687                 dip->meta.mtime = mtime;
1688                 /*hammer2_inode_unlock(dip);*/
1689         }
1690         hammer2_inode_unlock(dip);
1691
1692         hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1693
1694         if (error == 0) {
1695                 cache_setunresolved(ap->a_nch);
1696                 cache_setvp(ap->a_nch, *ap->a_vpp);
1697                 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1698         }
1699         return error;
1700 }
1701
1702 /*
1703  * Make a device node (typically a fifo)
1704  */
1705 static
1706 int
1707 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1708 {
1709         hammer2_inode_t *dip;
1710         hammer2_inode_t *nip;
1711         struct namecache *ncp;
1712         const char *name;
1713         size_t name_len;
1714         hammer2_tid_t inum;
1715         int error;
1716
1717         dip = VTOI(ap->a_dvp);
1718         if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1719                 return (EROFS);
1720         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1721                 return (ENOSPC);
1722
1723         ncp = ap->a_nch->ncp;
1724         name = ncp->nc_name;
1725         name_len = ncp->nc_nlen;
1726         hammer2_trans_init(dip->pmp, 0);
1727
1728         /*
1729          * Create the device inode and then create the directory entry.
1730          *
1731          * dip must be locked before nip to avoid deadlock.
1732          */
1733         inum = hammer2_trans_newinum(dip->pmp);
1734
1735         hammer2_inode_lock(dip, 0);
1736         nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1737                                           inum, &error);
1738         if (error) {
1739                 error = hammer2_error_to_errno(error);
1740         } else {
1741                 error = hammer2_dirent_create(dip, name, name_len,
1742                                               nip->meta.inum, nip->meta.type);
1743         }
1744         if (error) {
1745                 if (nip) {
1746                         hammer2_inode_unlink_finisher(nip, NULL);
1747                         hammer2_inode_unlock(nip);
1748                         nip = NULL;
1749                 }
1750                 *ap->a_vpp = NULL;
1751         } else {
1752                 hammer2_inode_depend(dip, nip); /* before igetv */
1753                 *ap->a_vpp = hammer2_igetv(nip, &error);
1754                 hammer2_inode_unlock(nip);
1755         }
1756
1757         /*
1758          * Update dip's mtime
1759          */
1760         if (error == 0) {
1761                 uint64_t mtime;
1762
1763                 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1764                 hammer2_update_time(&mtime);
1765                 hammer2_inode_modify(dip);
1766                 dip->meta.mtime = mtime;
1767                 /*hammer2_inode_unlock(dip);*/
1768         }
1769         hammer2_inode_unlock(dip);
1770
1771         hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1772
1773         if (error == 0) {
1774                 cache_setunresolved(ap->a_nch);
1775                 cache_setvp(ap->a_nch, *ap->a_vpp);
1776                 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1777         }
1778         return error;
1779 }
1780
1781 /*
1782  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1783  */
1784 static
1785 int
1786 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1787 {
1788         hammer2_inode_t *dip;
1789         hammer2_inode_t *nip;
1790         struct namecache *ncp;
1791         const char *name;
1792         size_t name_len;
1793         hammer2_tid_t inum;
1794         int error;
1795
1796         dip = VTOI(ap->a_dvp);
1797         if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1798                 return (EROFS);
1799         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1800                 return (ENOSPC);
1801
1802         ncp = ap->a_nch->ncp;
1803         name = ncp->nc_name;
1804         name_len = ncp->nc_nlen;
1805         hammer2_trans_init(dip->pmp, 0);
1806
1807         ap->a_vap->va_type = VLNK;      /* enforce type */
1808
1809         /*
1810          * Create the softlink as an inode and then create the directory
1811          * entry.
1812          *
1813          * dip must be locked before nip to avoid deadlock.
1814          */
1815         inum = hammer2_trans_newinum(dip->pmp);
1816
1817         hammer2_inode_lock(dip, 0);
1818         nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1819                                           inum, &error);
1820         if (error) {
1821                 error = hammer2_error_to_errno(error);
1822         } else {
1823                 error = hammer2_dirent_create(dip, name, name_len,
1824                                               nip->meta.inum, nip->meta.type);
1825         }
1826         if (error) {
1827                 if (nip) {
1828                         hammer2_inode_unlink_finisher(nip, NULL);
1829                         hammer2_inode_unlock(nip);
1830                         nip = NULL;
1831                 }
1832                 *ap->a_vpp = NULL;
1833                 hammer2_inode_unlock(dip);
1834                 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1835                 return error;
1836         }
1837         hammer2_inode_depend(dip, nip); /* before igetv */
1838         *ap->a_vpp = hammer2_igetv(nip, &error);
1839
1840         /*
1841          * Build the softlink (~like file data) and finalize the namecache.
1842          */
1843         if (error == 0) {
1844                 size_t bytes;
1845                 struct uio auio;
1846                 struct iovec aiov;
1847
1848                 bytes = strlen(ap->a_target);
1849
1850                 hammer2_inode_unlock(nip);
1851                 bzero(&auio, sizeof(auio));
1852                 bzero(&aiov, sizeof(aiov));
1853                 auio.uio_iov = &aiov;
1854                 auio.uio_segflg = UIO_SYSSPACE;
1855                 auio.uio_rw = UIO_WRITE;
1856                 auio.uio_resid = bytes;
1857                 auio.uio_iovcnt = 1;
1858                 auio.uio_td = curthread;
1859                 aiov.iov_base = ap->a_target;
1860                 aiov.iov_len = bytes;
1861                 error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1862                 /* XXX handle error */
1863                 error = 0;
1864         } else {
1865                 hammer2_inode_unlock(nip);
1866         }
1867
1868         /*
1869          * Update dip's mtime
1870          */
1871         if (error == 0) {
1872                 uint64_t mtime;
1873
1874                 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1875                 hammer2_update_time(&mtime);
1876                 hammer2_inode_modify(dip);
1877                 dip->meta.mtime = mtime;
1878                 /*hammer2_inode_unlock(dip);*/
1879         }
1880         hammer2_inode_unlock(dip);
1881
1882         hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1883
1884         /*
1885          * Finalize namecache
1886          */
1887         if (error == 0) {
1888                 cache_setunresolved(ap->a_nch);
1889                 cache_setvp(ap->a_nch, *ap->a_vpp);
1890                 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1891         }
1892         return error;
1893 }
1894
1895 /*
1896  * hammer2_vop_nremove { nch, dvp, cred }
1897  */
1898 static
1899 int
1900 hammer2_vop_nremove(struct vop_nremove_args *ap)
1901 {
1902         hammer2_xop_unlink_t *xop;
1903         hammer2_inode_t *dip;
1904         hammer2_inode_t *ip;
1905         struct vnode *vprecycle;
1906         struct namecache *ncp;
1907         int error;
1908
1909         dip = VTOI(ap->a_dvp);
1910         if (dip->pmp->ronly)
1911                 return (EROFS);
1912 #if 0
1913         /* allow removals, except user to also bulkfree */
1914         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1915                 return (ENOSPC);
1916 #endif
1917
1918         ncp = ap->a_nch->ncp;
1919
1920         if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) {
1921                 kprintf("hammer2: attempt to delete inside debug inode: %s\n",
1922                         ncp->nc_name);
1923                 while (hammer2_debug_inode &&
1924                        dip->meta.inum == hammer2_debug_inode) {
1925                         tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5);
1926                 }
1927         }
1928
1929         hammer2_trans_init(dip->pmp, 0);
1930         hammer2_inode_lock(dip, 0);
1931
1932         /*
1933          * The unlink XOP unlinks the path from the directory and
1934          * locates and returns the cluster associated with the real inode.
1935          * We have to handle nlinks here on the frontend.
1936          */
1937         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1938         hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1939
1940         xop->isdir = 0;
1941         xop->dopermanent = 0;
1942         hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
1943
1944         /*
1945          * Collect the real inode and adjust nlinks, destroy the real
1946          * inode if nlinks transitions to 0 and it was the real inode
1947          * (else it has already been removed).
1948          */
1949         error = hammer2_xop_collect(&xop->head, 0);
1950         error = hammer2_error_to_errno(error);
1951         vprecycle = NULL;
1952
1953         if (error == 0) {
1954                 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
1955                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1956                 if (ip) {
1957                         if (hammer2_debug_inode &&
1958                             ip->meta.inum == hammer2_debug_inode) {
1959                                 kprintf("hammer2: attempt to delete debug "
1960                                         "inode!\n");
1961                                 while (hammer2_debug_inode &&
1962                                        ip->meta.inum == hammer2_debug_inode) {
1963                                         tsleep(&hammer2_debug_inode, 0,
1964                                                "h2debug", hz*5);
1965                                 }
1966                         }
1967                         hammer2_inode_unlink_finisher(ip, &vprecycle);
1968                         hammer2_inode_depend(dip, ip); /* after modified */
1969                         hammer2_inode_unlock(ip);
1970                 }
1971         } else {
1972                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1973         }
1974
1975         /*
1976          * Update dip's mtime
1977          */
1978         if (error == 0) {
1979                 uint64_t mtime;
1980
1981                 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1982                 hammer2_update_time(&mtime);
1983                 hammer2_inode_modify(dip);
1984                 dip->meta.mtime = mtime;
1985                 /*hammer2_inode_unlock(dip);*/
1986         }
1987         hammer2_inode_unlock(dip);
1988
1989         hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1990         if (error == 0) {
1991                 cache_unlink(ap->a_nch);
1992                 hammer2_knote(ap->a_dvp, NOTE_WRITE);
1993         }
1994         if (vprecycle)
1995                 hammer2_inode_vprecycle(vprecycle);
1996
1997         return (error);
1998 }
1999
2000 /*
2001  * hammer2_vop_nrmdir { nch, dvp, cred }
2002  */
2003 static
2004 int
2005 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
2006 {
2007         hammer2_xop_unlink_t *xop;
2008         hammer2_inode_t *dip;
2009         hammer2_inode_t *ip;
2010         struct namecache *ncp;
2011         struct vnode *vprecycle;
2012         int error;
2013
2014         dip = VTOI(ap->a_dvp);
2015         if (dip->pmp->ronly)
2016                 return (EROFS);
2017 #if 0
2018         /* allow removals, except user to also bulkfree */
2019         if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
2020                 return (ENOSPC);
2021 #endif
2022
2023         hammer2_trans_init(dip->pmp, 0);
2024         hammer2_inode_lock(dip, 0);
2025
2026         xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
2027
2028         ncp = ap->a_nch->ncp;
2029         hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
2030         xop->isdir = 1;
2031         xop->dopermanent = 0;
2032         hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
2033
2034         /*
2035          * Collect the real inode and adjust nlinks, destroy the real
2036          * inode if nlinks transitions to 0 and it was the real inode
2037          * (else it has already been removed).
2038          */
2039         error = hammer2_xop_collect(&xop->head, 0);
2040         error = hammer2_error_to_errno(error);
2041         vprecycle = NULL;
2042
2043         if (error == 0) {
2044                 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
2045                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2046                 if (ip) {
2047                         hammer2_inode_unlink_finisher(ip, &vprecycle);
2048                         hammer2_inode_depend(dip, ip);  /* after modified */
2049                         hammer2_inode_unlock(ip);
2050                 }
2051         } else {
2052                 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2053         }
2054
2055         /*
2056          * Update dip's mtime
2057          */
2058         if (error == 0) {
2059                 uint64_t mtime;
2060
2061                 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
2062                 hammer2_update_time(&mtime);
2063                 hammer2_inode_modify(dip);
2064                 dip->meta.mtime = mtime;
2065                 /*hammer2_inode_unlock(dip);*/
2066         }
2067         hammer2_inode_unlock(dip);
2068
2069         hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
2070         if (error == 0) {
2071                 cache_unlink(ap->a_nch);
2072                 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2073         }
2074         if (vprecycle)
2075                 hammer2_inode_vprecycle(vprecycle);
2076         return (error);
2077 }
2078
2079 /*
2080  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
2081  */
2082 static
2083 int
2084 hammer2_vop_nrename(struct vop_nrename_args *ap)
2085 {
2086         struct namecache *fncp;
2087         struct namecache *tncp;
2088         hammer2_inode_t *fdip;  /* source directory */
2089         hammer2_inode_t *tdip;  /* target directory */
2090         hammer2_inode_t *ip;    /* file being renamed */
2091         hammer2_inode_t *tip;   /* replaced target during rename or NULL */
2092         struct vnode *vprecycle;
2093         const char *fname;
2094         size_t fname_len;
2095         const char *tname;
2096         size_t tname_len;
2097         int error;
2098         int update_tdip;
2099         int update_fdip;
2100         hammer2_key_t tlhc;
2101
2102         if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
2103                 return(EXDEV);
2104         if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
2105                 return(EXDEV);
2106
2107         fdip = VTOI(ap->a_fdvp);        /* source directory */
2108         tdip = VTOI(ap->a_tdvp);        /* target directory */
2109
2110         if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG))
2111                 return (EROFS);
2112         if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
2113                 return (ENOSPC);
2114
2115         fncp = ap->a_fnch->ncp;         /* entry name in source */
2116         fname = fncp->nc_name;
2117         fname_len = fncp->nc_nlen;
2118
2119         tncp = ap->a_tnch->ncp;         /* entry name in target */
2120         tname = tncp->nc_name;
2121         tname_len = tncp->nc_nlen;
2122
2123         hammer2_trans_init(tdip->pmp, 0);
2124
2125         update_tdip = 0;
2126         update_fdip = 0;
2127
2128         ip = VTOI(fncp->nc_vp);
2129         hammer2_inode_ref(ip);          /* extra ref */
2130
2131         /*
2132          * Lookup the target name to determine if a directory entry
2133          * is being overwritten.  We only hold related inode locks
2134          * temporarily, the operating system is expected to protect
2135          * against rename races.
2136          */
2137         tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
2138         if (tip)
2139                 hammer2_inode_ref(tip); /* extra ref */
2140
2141         /*
2142          * For now try to avoid deadlocks with a simple pointer address
2143          * test.  (tip) can be NULL.
2144          */
2145         error = 0;
2146         {
2147                 hammer2_inode_t *ip1 = fdip;
2148                 hammer2_inode_t *ip2 = tdip;
2149                 hammer2_inode_t *ip3 = ip;
2150                 hammer2_inode_t *ip4 = tip;     /* may be NULL */
2151
2152                 if (fdip > tdip) {
2153                         ip1 = tdip;
2154                         ip2 = fdip;
2155                 }
2156                 if (tip && ip > tip) {
2157                         ip3 = tip;
2158                         ip4 = ip;
2159                 }
2160                 hammer2_inode_lock4(ip1, ip2, ip3, ip4);
2161         }
2162
2163         /*
2164          * Resolve the collision space for (tdip, tname, tname_len)
2165          *
2166          * tdip must be held exclusively locked to prevent races since
2167          * multiple filenames can end up in the same collision space.
2168          */
2169         {
2170                 hammer2_xop_scanlhc_t *sxop;
2171                 hammer2_key_t lhcbase;
2172
2173                 tlhc = hammer2_dirhash(tname, tname_len);
2174                 lhcbase = tlhc;
2175                 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2176                 sxop->lhc = tlhc;
2177                 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
2178                 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2179                         if (tlhc != sxop->head.cluster.focus->bref.key)
2180                                 break;
2181                         ++tlhc;
2182                 }
2183                 error = hammer2_error_to_errno(error);
2184                 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2185
2186                 if (error) {
2187                         if (error != ENOENT)
2188                                 goto done2;
2189                         ++tlhc;
2190                         error = 0;
2191                 }
2192                 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2193                         error = ENOSPC;
2194                         goto done2;
2195                 }
2196         }
2197
2198         /*
2199          * Ready to go, issue the rename to the backend.  Note that meta-data
2200          * updates to the related inodes occur separately from the rename
2201          * operation.
2202          *
2203          * NOTE: While it is not necessary to update ip->meta.name*, doing
2204          *       so aids catastrophic recovery and debugging.
2205          */
2206         if (error == 0) {
2207                 hammer2_xop_nrename_t *xop4;
2208
2209                 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2210                 xop4->lhc = tlhc;
2211                 xop4->ip_key = ip->meta.name_key;
2212                 hammer2_xop_setip2(&xop4->head, ip);
2213                 hammer2_xop_setip3(&xop4->head, tdip);
2214                 if (tip && tip->meta.type == HAMMER2_OBJTYPE_DIRECTORY)
2215                     hammer2_xop_setip4(&xop4->head, tip);
2216                 hammer2_xop_setname(&xop4->head, fname, fname_len);
2217                 hammer2_xop_setname2(&xop4->head, tname, tname_len);
2218                 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc);
2219
2220                 error = hammer2_xop_collect(&xop4->head, 0);
2221                 error = hammer2_error_to_errno(error);
2222                 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2223
2224                 if (error == ENOENT)
2225                         error = 0;
2226
2227                 /*
2228                  * Update inode meta-data.
2229                  *
2230                  * WARNING!  The in-memory inode (ip) structure does not
2231                  *           maintain a copy of the inode's filename buffer.
2232                  */
2233                 if (error == 0 &&
2234                     (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2235                         hammer2_inode_modify(ip);
2236                         ip->meta.name_len = tname_len;
2237                         ip->meta.name_key = tlhc;
2238                 }
2239                 if (error == 0) {
2240                         hammer2_inode_modify(ip);
2241                         ip->meta.iparent = tdip->meta.inum;
2242                 }
2243                 update_fdip = 1;
2244                 update_tdip = 1;
2245         }
2246
2247 done2:
2248         /*
2249          * If no error, the backend has replaced the target directory entry.
2250          * We must adjust nlinks on the original replace target if it exists.
2251          */
2252         vprecycle = NULL;
2253         if (error == 0 && tip) {
2254                 hammer2_inode_unlink_finisher(tip, &vprecycle);
2255         }
2256
2257         /*
2258          * Update directory mtimes to represent the something changed.
2259          */
2260         if (update_fdip || update_tdip) {
2261                 uint64_t mtime;
2262
2263                 hammer2_update_time(&mtime);
2264                 if (update_fdip) {
2265                         hammer2_inode_modify(fdip);
2266                         fdip->meta.mtime = mtime;
2267                 }
2268                 if (update_tdip) {
2269                         hammer2_inode_modify(tdip);
2270                         tdip->meta.mtime = mtime;
2271                 }
2272         }
2273         if (tip) {
2274                 hammer2_inode_unlock(tip);
2275                 hammer2_inode_drop(tip);
2276         }
2277         hammer2_inode_unlock(ip);
2278         hammer2_inode_unlock(tdip);
2279         hammer2_inode_unlock(fdip);
2280         hammer2_inode_drop(ip);
2281         hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ);
2282
2283         /*
2284          * Issue the namecache update after unlocking all the internal
2285          * hammer2 structures, otherwise we might deadlock.
2286          *
2287          * WARNING! The target namespace must be updated atomically,
2288          *          and we depend on cache_rename() to handle that for
2289          *          us.  Do not do a separate cache_unlink() because
2290          *          that leaves a small window of opportunity for other
2291          *          threads to allocate the target namespace before we
2292          *          manage to complete our rename.
2293          *
2294          * WARNING! cache_rename() (and cache_unlink()) will properly
2295          *          set VREF_FINALIZE on any attached vnode.  Do not
2296          *          call cache_setunresolved() manually before-hand as
2297          *          this will prevent the flag from being set later via
2298          *          cache_rename().  If VREF_FINALIZE is not properly set
2299          *          and the inode is no longer in the topology, related
2300          *          chains can remain dirty indefinitely.
2301          */
2302         if (error == 0 && tip) {
2303                 /*cache_unlink(ap->a_tnch); see above */
2304                 /*cache_setunresolved(ap->a_tnch); see above */
2305         }
2306         if (error == 0) {
2307                 cache_rename(ap->a_fnch, ap->a_tnch);
2308                 hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2309                 hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2310                 hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2311         }
2312         if (vprecycle)
2313                 hammer2_inode_vprecycle(vprecycle);
2314
2315         return (error);
2316 }
2317
2318 /*
2319  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2320  */
2321 static
2322 int
2323 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2324 {
2325         hammer2_inode_t *ip;
2326         int error;
2327
2328         ip = VTOI(ap->a_vp);
2329
2330         error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2331                               ap->a_fflag, ap->a_cred);
2332         return (error);
2333 }
2334
2335 static
2336 int
2337 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2338 {
2339         struct mount *mp;
2340         hammer2_pfs_t *pmp;
2341         int rc;
2342
2343         switch (ap->a_op) {
2344         case (MOUNTCTL_SET_EXPORT):
2345                 mp = ap->a_head.a_ops->head.vv_mount;
2346                 pmp = MPTOPMP(mp);
2347
2348                 if (ap->a_ctllen != sizeof(struct export_args))
2349                         rc = (EINVAL);
2350                 else
2351                         rc = vfs_export(mp, &pmp->export,
2352                                         (const struct export_args *)ap->a_ctl);
2353                 break;
2354         default:
2355                 rc = vop_stdmountctl(ap);
2356                 break;
2357         }
2358         return (rc);
2359 }
2360
2361 /*
2362  * KQFILTER
2363  */
2364 static void filt_hammer2detach(struct knote *kn);
2365 static int filt_hammer2read(struct knote *kn, long hint);
2366 static int filt_hammer2write(struct knote *kn, long hint);
2367 static int filt_hammer2vnode(struct knote *kn, long hint);
2368
2369 static struct filterops hammer2read_filtops =
2370         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2371           NULL, filt_hammer2detach, filt_hammer2read };
2372 static struct filterops hammer2write_filtops =
2373         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2374           NULL, filt_hammer2detach, filt_hammer2write };
2375 static struct filterops hammer2vnode_filtops =
2376         { FILTEROP_ISFD | FILTEROP_MPSAFE,
2377           NULL, filt_hammer2detach, filt_hammer2vnode };
2378
2379 static
2380 int
2381 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2382 {
2383         struct vnode *vp = ap->a_vp;
2384         struct knote *kn = ap->a_kn;
2385
2386         switch (kn->kn_filter) {
2387         case EVFILT_READ:
2388                 kn->kn_fop = &hammer2read_filtops;
2389                 break;
2390         case EVFILT_WRITE:
2391                 kn->kn_fop = &hammer2write_filtops;
2392                 break;
2393         case EVFILT_VNODE:
2394                 kn->kn_fop = &hammer2vnode_filtops;
2395                 break;
2396         default:
2397                 return (EOPNOTSUPP);
2398         }
2399
2400         kn->kn_hook = (caddr_t)vp;
2401
2402         knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2403
2404         return(0);
2405 }
2406
2407 static void
2408 filt_hammer2detach(struct knote *kn)
2409 {
2410         struct vnode *vp = (void *)kn->kn_hook;
2411
2412         knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2413 }
2414
2415 static int
2416 filt_hammer2read(struct knote *kn, long hint)
2417 {
2418         struct vnode *vp = (void *)kn->kn_hook;
2419         hammer2_inode_t *ip = VTOI(vp);
2420         off_t off;
2421
2422         if (hint == NOTE_REVOKE) {
2423                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2424                 return(1);
2425         }
2426         off = ip->meta.size - kn->kn_fp->f_offset;
2427         kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2428         if (kn->kn_sfflags & NOTE_OLDAPI)
2429                 return(1);
2430         return (kn->kn_data != 0);
2431 }
2432
2433
2434 static int
2435 filt_hammer2write(struct knote *kn, long hint)
2436 {
2437         if (hint == NOTE_REVOKE)
2438                 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2439         kn->kn_data = 0;
2440         return (1);
2441 }
2442
2443 static int
2444 filt_hammer2vnode(struct knote *kn, long hint)
2445 {
2446         if (kn->kn_sfflags & hint)
2447                 kn->kn_fflags |= hint;
2448         if (hint == NOTE_REVOKE) {
2449                 kn->kn_flags |= (EV_EOF | EV_NODATA);
2450                 return (1);
2451         }
2452         return (kn->kn_fflags != 0);
2453 }
2454
2455 /*
2456  * FIFO VOPS
2457  */
2458 static
2459 int
2460 hammer2_vop_markatime(struct vop_markatime_args *ap)
2461 {
2462         hammer2_inode_t *ip;
2463         struct vnode *vp;
2464
2465         vp = ap->a_vp;
2466         ip = VTOI(vp);
2467
2468         if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG))
2469                 return (EROFS);
2470         return(0);
2471 }
2472
2473 static
2474 int
2475 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2476 {
2477         int error;
2478
2479         error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2480         if (error)
2481                 error = hammer2_vop_kqfilter(ap);
2482         return(error);
2483 }
2484
2485 /*
2486  * VOPS vector
2487  */
2488 struct vop_ops hammer2_vnode_vops = {
2489         .vop_default    = vop_defaultop,
2490         .vop_fsync      = hammer2_vop_fsync,
2491         .vop_getpages   = vop_stdgetpages,
2492         .vop_putpages   = vop_stdputpages,
2493         .vop_access     = hammer2_vop_access,
2494         .vop_advlock    = hammer2_vop_advlock,
2495         .vop_close      = hammer2_vop_close,
2496         .vop_nlink      = hammer2_vop_nlink,
2497         .vop_ncreate    = hammer2_vop_ncreate,
2498         .vop_nsymlink   = hammer2_vop_nsymlink,
2499         .vop_nremove    = hammer2_vop_nremove,
2500         .vop_nrmdir     = hammer2_vop_nrmdir,
2501         .vop_nrename    = hammer2_vop_nrename,
2502         .vop_getattr    = hammer2_vop_getattr,
2503         .vop_getattr_lite = hammer2_vop_getattr_lite,
2504         .vop_setattr    = hammer2_vop_setattr,
2505         .vop_readdir    = hammer2_vop_readdir,
2506         .vop_readlink   = hammer2_vop_readlink,
2507         .vop_read       = hammer2_vop_read,
2508         .vop_write      = hammer2_vop_write,
2509         .vop_open       = hammer2_vop_open,
2510         .vop_inactive   = hammer2_vop_inactive,
2511         .vop_reclaim    = hammer2_vop_reclaim,
2512         .vop_nresolve   = hammer2_vop_nresolve,
2513         .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2514         .vop_nmkdir     = hammer2_vop_nmkdir,
2515         .vop_nmknod     = hammer2_vop_nmknod,
2516         .vop_ioctl      = hammer2_vop_ioctl,
2517         .vop_mountctl   = hammer2_vop_mountctl,
2518         .vop_bmap       = hammer2_vop_bmap,
2519         .vop_strategy   = hammer2_vop_strategy,
2520         .vop_kqfilter   = hammer2_vop_kqfilter
2521 };
2522
2523 struct vop_ops hammer2_spec_vops = {
2524         .vop_default =          vop_defaultop,
2525         .vop_fsync =            hammer2_vop_fsync,
2526         .vop_read =             vop_stdnoread,
2527         .vop_write =            vop_stdnowrite,
2528         .vop_access =           hammer2_vop_access,
2529         .vop_close =            hammer2_vop_close,
2530         .vop_markatime =        hammer2_vop_markatime,
2531         .vop_getattr =          hammer2_vop_getattr,
2532         .vop_inactive =         hammer2_vop_inactive,
2533         .vop_reclaim =          hammer2_vop_reclaim,
2534         .vop_setattr =          hammer2_vop_setattr
2535 };
2536
2537 struct vop_ops hammer2_fifo_vops = {
2538         .vop_default =          fifo_vnoperate,
2539         .vop_fsync =            hammer2_vop_fsync,
2540 #if 0
2541         .vop_read =             hammer2_vop_fiforead,
2542         .vop_write =            hammer2_vop_fifowrite,
2543 #endif
2544         .vop_access =           hammer2_vop_access,
2545 #if 0
2546         .vop_close =            hammer2_vop_fifoclose,
2547 #endif
2548         .vop_markatime =        hammer2_vop_markatime,
2549         .vop_getattr =          hammer2_vop_getattr,
2550         .vop_inactive =         hammer2_vop_inactive,
2551         .vop_reclaim =          hammer2_vop_reclaim,
2552         .vop_setattr =          hammer2_vop_setattr,
2553         .vop_kqfilter =         hammer2_vop_fifokqfilter
2554 };
2555