Merge branches 'hammer2' and 'master' of ssh://crater.dragonflybsd.org/repository...
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/fcntl.h>
39#include <sys/buf.h>
40#include <sys/proc.h>
41#include <sys/namei.h>
42#include <sys/mount.h>
43#include <sys/vnode.h>
44#include <sys/mountctl.h>
45#include <sys/dirent.h>
46#include <sys/uio.h>
47
48#include "hammer2.h"
49
50#define ZFOFFSET (-2LL)
51
52static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
53 int seqcount);
54static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, int ioflag);
55static hammer2_off_t hammer2_assign_physical(hammer2_inode_t *ip,
56 hammer2_key_t lbase, int lblksize, int *errorp);
57static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
58static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
59static int hammer2_unlink_file(hammer2_inode_t *dip,
60 const uint8_t *name, size_t name_len,
61 int isdir, int adjlinks);
62
63/*
64 * Last reference to a vnode is going away but it is still cached.
65 */
66static
67int
68hammer2_vop_inactive(struct vop_inactive_args *ap)
69{
70 struct vnode *vp;
71 struct hammer2_inode *ip;
72#if 0
73 struct hammer2_mount *hmp;
74#endif
75
76 vp = ap->a_vp;
77 ip = VTOI(vp);
78
79 /*
80 * Degenerate case
81 */
82 if (ip == NULL) {
83 vrecycle(vp);
84 return (0);
85 }
86
87 /*
88 * Detect updates to the embedded data which may be synchronized by
89 * the strategy code. Simply mark the inode modified so it gets
90 * picked up by our normal flush.
91 */
92 if (ip->chain.flags & HAMMER2_CHAIN_DIRTYEMBED) {
93 hammer2_inode_lock_ex(ip);
94 atomic_clear_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
95 hammer2_chain_modify(ip->hmp, &ip->chain, 0);
96 hammer2_inode_unlock_ex(ip);
97 }
98
99 /*
100 * Check for deleted inodes and recycle immediately.
101 */
102 if (ip->chain.flags & HAMMER2_CHAIN_DELETED) {
103 vrecycle(vp);
104 }
105 return (0);
106}
107
108/*
109 * Reclaim a vnode so that it can be reused; after the inode is
110 * disassociated, the filesystem must manage it alone.
111 */
112static
113int
114hammer2_vop_reclaim(struct vop_reclaim_args *ap)
115{
116 struct hammer2_inode *ip;
117 struct hammer2_mount *hmp;
118 struct vnode *vp;
119
120 vp = ap->a_vp;
121 ip = VTOI(vp);
122 if (ip == NULL)
123 return(0);
124 hmp = ip->hmp;
125
126 hammer2_inode_lock_ex(ip);
127 vp->v_data = NULL;
128 ip->vp = NULL;
129 if (ip->chain.flags & HAMMER2_CHAIN_DELETED)
130 atomic_set_int(&ip->chain.flags, HAMMER2_CHAIN_DESTROYED);
131 hammer2_chain_flush(hmp, &ip->chain);
132 hammer2_inode_unlock_ex(ip);
133 hammer2_chain_drop(hmp, &ip->chain); /* vp ref */
134
135 /*
136 * XXX handle background sync when ip dirty, kernel will no longer
137 * notify us regarding this inode because there is no longer a
138 * vnode attached to it.
139 */
140
141 return (0);
142}
143
144static
145int
146hammer2_vop_fsync(struct vop_fsync_args *ap)
147{
148 struct hammer2_inode *ip;
149 struct hammer2_mount *hmp;
150 struct vnode *vp;
151
152 vp = ap->a_vp;
153 ip = VTOI(vp);
154 hmp = ip->hmp;
155
156 hammer2_inode_lock_ex(ip);
157 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
158
159 /*
160 * Detect updates to the embedded data which may be synchronized by
161 * the strategy code. Simply mark the inode modified so it gets
162 * picked up by our normal flush.
163 */
164 if (ip->chain.flags & HAMMER2_CHAIN_DIRTYEMBED) {
165 atomic_clear_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
166 hammer2_chain_modify(hmp, &ip->chain, 0);
167 }
168
169 /*
170 * Calling chain_flush here creates a lot of duplicative
171 * COW operations due to non-optimal vnode ordering.
172 *
173 * Only do it for an actual fsync() syscall. The other forms
174 * which call this function will eventually call chain_flush
175 * on the volume root as a catch-all, which is far more optimal.
176 */
177 if (ap->a_flags & VOP_FSYNC_SYSCALL)
178 hammer2_chain_flush(hmp, &ip->chain);
179 hammer2_inode_unlock_ex(ip);
180 return (0);
181}
182
183static
184int
185hammer2_vop_access(struct vop_access_args *ap)
186{
187 hammer2_inode_t *ip = VTOI(ap->a_vp);
188 uid_t uid;
189 gid_t gid;
190 int error;
191
192 uid = hammer2_to_unix_xid(&ip->ip_data.uid);
193 gid = hammer2_to_unix_xid(&ip->ip_data.gid);
194
195 error = vop_helper_access(ap, uid, gid, ip->ip_data.mode,
196 ip->ip_data.uflags);
197 return (error);
198}
199
200static
201int
202hammer2_vop_getattr(struct vop_getattr_args *ap)
203{
204 hammer2_mount_t *hmp;
205 hammer2_inode_t *ip;
206 struct vnode *vp;
207 struct vattr *vap;
208
209 vp = ap->a_vp;
210 vap = ap->a_vap;
211
212 ip = VTOI(vp);
213 hmp = ip->hmp;
214
215 hammer2_inode_lock_sh(ip);
216
217 vap->va_fsid = hmp->mp->mnt_stat.f_fsid.val[0];
218 vap->va_fileid = ip->ip_data.inum;
219 vap->va_mode = ip->ip_data.mode;
220 vap->va_nlink = ip->ip_data.nlinks;
221 vap->va_uid = 0;
222 vap->va_gid = 0;
223 vap->va_rmajor = 0;
224 vap->va_rminor = 0;
225 vap->va_size = ip->ip_data.size;
226 vap->va_blocksize = HAMMER2_PBUFSIZE;
227 vap->va_flags = ip->ip_data.uflags;
228 hammer2_time_to_timespec(ip->ip_data.ctime, &vap->va_ctime);
229 hammer2_time_to_timespec(ip->ip_data.mtime, &vap->va_mtime);
230 hammer2_time_to_timespec(ip->ip_data.mtime, &vap->va_atime);
231 vap->va_gen = 1;
232 vap->va_bytes = vap->va_size; /* XXX */
233 vap->va_type = hammer2_get_vtype(ip);
234 vap->va_filerev = 0;
235 vap->va_uid_uuid = ip->ip_data.uid;
236 vap->va_gid_uuid = ip->ip_data.gid;
237 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
238 VA_FSID_UUID_VALID;
239
240 hammer2_inode_unlock_sh(ip);
241
242 return (0);
243}
244
245static
246int
247hammer2_vop_setattr(struct vop_setattr_args *ap)
248{
249 hammer2_mount_t *hmp;
250 hammer2_inode_t *ip;
251 struct vnode *vp;
252 struct vattr *vap;
253 int error;
254 int kflags = 0;
255 int doctime = 0;
256 int domtime = 0;
257
258 vp = ap->a_vp;
259 vap = ap->a_vap;
260
261 ip = VTOI(vp);
262 hmp = ip->hmp;
263
264 if (hmp->ronly)
265 return(EROFS);
266
267 hammer2_inode_lock_ex(ip);
268 error = 0;
269
270 if (vap->va_flags != VNOVAL) {
271 u_int32_t flags;
272
273 flags = ip->ip_data.uflags;
274 error = vop_helper_setattr_flags(&flags, vap->va_flags,
275 hammer2_to_unix_xid(&ip->ip_data.uid),
276 ap->a_cred);
277 if (error == 0) {
278 if (ip->ip_data.uflags != flags) {
279 hammer2_chain_modify(hmp, &ip->chain, 0);
280 ip->ip_data.uflags = flags;
281 doctime = 1;
282 kflags |= NOTE_ATTRIB;
283 }
284 if (ip->ip_data.uflags & (IMMUTABLE | APPEND)) {
285 error = 0;
286 goto done;
287 }
288 }
289 }
290
291 if (ip->ip_data.uflags & (IMMUTABLE | APPEND)) {
292 error = EPERM;
293 goto done;
294 }
295 /* uid, gid */
296
297 /*
298 * Resize the file
299 */
300 if (vap->va_size != VNOVAL && ip->ip_data.size != vap->va_size) {
301 switch(vp->v_type) {
302 case VREG:
303 if (vap->va_size == ip->ip_data.size)
304 break;
305 if (vap->va_size < ip->ip_data.size) {
306 hammer2_truncate_file(ip, vap->va_size);
307 } else {
308 hammer2_extend_file(ip, vap->va_size);
309 }
310 domtime = 1;
311 break;
312 default:
313 error = EINVAL;
314 goto done;
315 }
316 }
317done:
318 hammer2_inode_unlock_ex(ip);
319 return (error);
320}
321
322static
323int
324hammer2_vop_readdir(struct vop_readdir_args *ap)
325{
326 hammer2_mount_t *hmp;
327 hammer2_inode_t *ip;
328 hammer2_inode_t *xip;
329 hammer2_chain_t *parent;
330 hammer2_chain_t *chain;
331 hammer2_key_t lkey;
332 struct uio *uio;
333 off_t *cookies;
334 off_t saveoff;
335 int cookie_index;
336 int ncookies;
337 int error;
338 int dtype;
339 int r;
340
341 ip = VTOI(ap->a_vp);
342 hmp = ip->hmp;
343 uio = ap->a_uio;
344 saveoff = uio->uio_offset;
345
346 /*
347 * Setup cookies directory entry cookies if requested
348 */
349 if (ap->a_ncookies) {
350 ncookies = uio->uio_resid / 16 + 1;
351 if (ncookies > 1024)
352 ncookies = 1024;
353 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
354 } else {
355 ncookies = -1;
356 cookies = NULL;
357 }
358 cookie_index = 0;
359
360 /*
361 * Handle artificial entries. To ensure that only positive 64 bit
362 * quantities are returned to userland we always strip off bit 63.
363 * The hash code is designed such that codes 0x0000-0x7FFF are not
364 * used, allowing us to use these codes for articial entries.
365 *
366 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
367 * allow '..' to cross the mount point into (e.g.) the super-root.
368 */
369 error = 0;
370 chain = (void *)(intptr_t)-1; /* non-NULL for early goto done case */
371
372 if (saveoff == 0) {
373 r = vop_write_dirent(&error, uio,
374 ip->ip_data.inum &
375 HAMMER2_DIRHASH_USERMSK,
376 DT_DIR, 1, ".");
377 if (r)
378 goto done;
379 if (cookies)
380 cookies[cookie_index] = saveoff;
381 ++saveoff;
382 ++cookie_index;
383 if (cookie_index == ncookies)
384 goto done;
385 }
386 if (saveoff == 1) {
387 if (ip->pip == NULL || ip == hmp->iroot)
388 xip = ip;
389 else
390 xip = ip->pip;
391
392 r = vop_write_dirent(&error, uio,
393 xip->ip_data.inum &
394 HAMMER2_DIRHASH_USERMSK,
395 DT_DIR, 2, "..");
396 if (r)
397 goto done;
398 if (cookies)
399 cookies[cookie_index] = saveoff;
400 ++saveoff;
401 ++cookie_index;
402 if (cookie_index == ncookies)
403 goto done;
404 }
405
406 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
407
408 parent = &ip->chain;
409 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
410 if (error) {
411 hammer2_chain_unlock(hmp, parent);
412 goto done;
413 }
414 chain = hammer2_chain_lookup(hmp, &parent, lkey, lkey, 0);
415 if (chain == NULL) {
416 chain = hammer2_chain_lookup(hmp, &parent,
417 lkey, (hammer2_key_t)-1, 0);
418 }
419 while (chain) {
420 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
421 dtype = hammer2_get_dtype(chain->u.ip);
422 saveoff = chain->bref.key & HAMMER2_DIRHASH_USERMSK;
423 r = vop_write_dirent(&error, uio,
424 chain->u.ip->ip_data.inum &
425 HAMMER2_DIRHASH_USERMSK,
426 dtype, chain->u.ip->ip_data.name_len,
427 chain->u.ip->ip_data.filename);
428 if (r)
429 break;
430 if (cookies)
431 cookies[cookie_index] = saveoff;
432 ++cookie_index;
433 } else {
434 /* XXX chain error */
435 kprintf("bad chain type readdir %d\n",
436 chain->bref.type);
437 }
438
439 /*
440 * Keys may not be returned in order so once we have a
441 * placemarker (chain) the scan must allow the full range
442 * or some entries will be missed.
443 */
444 chain = hammer2_chain_next(hmp, &parent, chain,
445 0, (hammer2_key_t)-1, 0);
446 if (chain) {
447 saveoff = (chain->bref.key &
448 HAMMER2_DIRHASH_USERMSK) + 1;
449 } else {
450 saveoff = (hammer2_key_t)-1;
451 }
452 if (cookie_index == ncookies)
453 break;
454 }
455 if (chain)
456 hammer2_chain_unlock(hmp, chain);
457 hammer2_chain_unlock(hmp, parent);
458done:
459 if (ap->a_eofflag)
460 *ap->a_eofflag = (chain == NULL);
461 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
462 if (error && cookie_index == 0) {
463 if (cookies) {
464 kfree(cookies, M_TEMP);
465 *ap->a_ncookies = 0;
466 *ap->a_cookies = NULL;
467 }
468 } else {
469 if (cookies) {
470 *ap->a_ncookies = cookie_index;
471 *ap->a_cookies = cookies;
472 }
473 }
474 return (error);
475}
476
477/*
478 * hammer2_vop_readlink { vp, uio, cred }
479 */
480static
481int
482hammer2_vop_readlink(struct vop_readlink_args *ap)
483{
484 struct vnode *vp;
485 hammer2_mount_t *hmp;
486 hammer2_inode_t *ip;
487 int error;
488
489 vp = ap->a_vp;
490 if (vp->v_type != VLNK)
491 return (EINVAL);
492 ip = VTOI(vp);
493 hmp = ip->hmp;
494
495 error = hammer2_read_file(ip, ap->a_uio, 0);
496 return (error);
497}
498
499static
500int
501hammer2_vop_read(struct vop_read_args *ap)
502{
503 struct vnode *vp;
504 hammer2_mount_t *hmp;
505 hammer2_inode_t *ip;
506 struct uio *uio;
507 int error;
508 int seqcount;
509 int bigread;
510
511 /*
512 * Read operations supported on this vnode?
513 */
514 vp = ap->a_vp;
515 if (vp->v_type != VREG)
516 return (EINVAL);
517
518 /*
519 * Misc
520 */
521 ip = VTOI(vp);
522 hmp = ip->hmp;
523 uio = ap->a_uio;
524 error = 0;
525
526 seqcount = ap->a_ioflag >> 16;
527 bigread = (uio->uio_resid > 100 * 1024 * 1024);
528
529 error = hammer2_read_file(ip, uio, seqcount);
530 return (error);
531}
532
533static
534int
535hammer2_vop_write(struct vop_write_args *ap)
536{
537 thread_t td;
538 struct vnode *vp;
539 hammer2_mount_t *hmp;
540 hammer2_inode_t *ip;
541 struct uio *uio;
542 int error;
543 int seqcount;
544 int bigwrite;
545
546 /*
547 * Read operations supported on this vnode?
548 */
549 vp = ap->a_vp;
550 if (vp->v_type != VREG)
551 return (EINVAL);
552
553 /*
554 * Misc
555 */
556 ip = VTOI(vp);
557 hmp = ip->hmp;
558 uio = ap->a_uio;
559 error = 0;
560 if (hmp->ronly)
561 return (EROFS);
562
563 seqcount = ap->a_ioflag >> 16;
564 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
565
566 /*
567 * Check resource limit
568 */
569 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
570 uio->uio_offset + uio->uio_resid >
571 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
572 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
573 return (EFBIG);
574 }
575
576 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
577
578 /*
579 * ip must be locked if extending the file.
580 * ip must be locked to avoid racing a truncation.
581 *
582 * ip must be marked modified, particularly because the write
583 * might wind up being copied into the embedded data area.
584 */
585 hammer2_inode_lock_ex(ip);
586 hammer2_chain_modify(hmp, &ip->chain, 0);
587 error = hammer2_write_file(ip, uio, ap->a_ioflag);
588
589 hammer2_inode_unlock_ex(ip);
590 return (error);
591}
592
593/*
594 * Perform read operations on a file or symlink given an UNLOCKED
595 * inode and uio.
596 */
597static
598int
599hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
600{
601 struct buf *bp;
602 int error;
603
604 error = 0;
605
606 /*
607 * UIO read loop
608 */
609 while (uio->uio_resid > 0 && uio->uio_offset < ip->ip_data.size) {
610 hammer2_key_t lbase;
611 hammer2_key_t leof;
612 int lblksize;
613 int loff;
614 int n;
615
616 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
617 &lbase, &leof);
618
619 error = cluster_read(ip->vp, leof, lbase, lblksize,
620 uio->uio_resid, seqcount * BKVASIZE,
621 &bp);
622
623 if (error)
624 break;
625 loff = (int)(uio->uio_offset - lbase);
626 n = lblksize - loff;
627 if (n > uio->uio_resid)
628 n = uio->uio_resid;
629 if (n > ip->ip_data.size - uio->uio_offset)
630 n = (int)(ip->ip_data.size - uio->uio_offset);
631 bp->b_flags |= B_AGE;
632 uiomove((char *)bp->b_data + loff, n, uio);
633 bqrelse(bp);
634 }
635 return (error);
636}
637
638/*
639 * Called with a locked (ip) to do the underlying write to a file or
640 * to build the symlink target.
641 */
642static
643int
644hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, int ioflag)
645{
646 hammer2_key_t old_eof;
647 struct buf *bp;
648 int kflags;
649 int error;
650
651 /*
652 * Setup if append
653 */
654 if (ioflag & IO_APPEND)
655 uio->uio_offset = ip->ip_data.size;
656 kflags = 0;
657 error = 0;
658
659 /*
660 * Extend the file if necessary. If the write fails at some point
661 * we will truncate it back down to cover as much as we were able
662 * to write.
663 *
664 * Doing this now makes it easier to calculate buffer sizes in
665 * the loop.
666 */
667 old_eof = ip->ip_data.size;
668 if (uio->uio_offset + uio->uio_resid > ip->ip_data.size) {
669 hammer2_extend_file(ip, uio->uio_offset + uio->uio_resid);
670 kflags |= NOTE_EXTEND;
671 }
672
673 /*
674 * UIO write loop
675 */
676 while (uio->uio_resid > 0) {
677 hammer2_key_t lbase;
678 hammer2_key_t leof;
679 int trivial;
680 int lblksize;
681 int loff;
682 int n;
683
684 /*
685 * Don't allow the buffer build to blow out the buffer
686 * cache.
687 */
688 if ((ioflag & IO_RECURSE) == 0) {
689 /*
690 * XXX should try to leave this unlocked through
691 * the whole loop
692 */
693 hammer2_chain_unlock(ip->hmp, &ip->chain);
694 bwillwrite(HAMMER2_PBUFSIZE);
695 hammer2_chain_lock(ip->hmp, &ip->chain,
696 HAMMER2_RESOLVE_ALWAYS);
697 }
698
699 /* XXX bigwrite & signal check test */
700
701 /*
702 * This nominally tells us how much we can cluster and
703 * what the logical buffer size needs to be. Currently
704 * we don't try to cluster the write and just handle one
705 * block at a time.
706 */
707 lblksize = hammer2_calc_logical(ip, uio->uio_offset,
708 &lbase, &leof);
709 loff = (int)(uio->uio_offset - lbase);
710
711 /*
712 * Calculate bytes to copy this transfer and whether the
713 * copy completely covers the buffer or not.
714 */
715 trivial = 0;
716 n = lblksize - loff;
717 if (n > uio->uio_resid) {
718 n = uio->uio_resid;
719 if (uio->uio_offset + n == ip->ip_data.size)
720 trivial = 1;
721 } else if (loff == 0) {
722 trivial = 1;
723 }
724
725 /*
726 * Get the buffer
727 */
728 if (uio->uio_segflg == UIO_NOCOPY) {
729 /*
730 * Issuing a write with the same data backing the
731 * buffer. Instantiate the buffer to collect the
732 * backing vm pages, then read-in any missing bits.
733 *
734 * This case is used by vop_stdputpages().
735 */
736 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
737 if ((bp->b_flags & B_CACHE) == 0) {
738 bqrelse(bp);
739 error = bread(ip->vp, lbase, lblksize, &bp);
740 }
741 } else if (trivial) {
742 /*
743 * Even though we are entirely overwriting the buffer
744 * we may still have to zero it out to avoid a
745 * mmap/write visibility issue.
746 */
747 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
748 if ((bp->b_flags & B_CACHE) == 0)
749 vfs_bio_clrbuf(bp);
750 } else {
751 /*
752 * Partial overwrite, read in any missing bits then
753 * replace the portion being written.
754 *
755 * (The strategy code will detect zero-fill physical
756 * blocks for this case).
757 */
758 error = bread(ip->vp, lbase, lblksize, &bp);
759 if (error == 0)
760 bheavy(bp);
761 }
762
763 if (error) {
764 brelse(bp);
765 break;
766 }
767
768 /*
769 * We have to assign physical storage to the buffer we intend
770 * to dirty or write now to avoid deadlocks in the strategy
771 * code later.
772 *
773 * This can return NOOFFSET for inode-embedded data. The
774 * strategy code will take care of it in that case.
775 */
776 bp->b_bio2.bio_offset =
777 hammer2_assign_physical(ip, lbase, lblksize, &error);
778 if (error) {
779 brelse(bp);
780 break;
781 }
782
783 /*
784 * Ok, copy the data in
785 */
786 hammer2_chain_unlock(ip->hmp, &ip->chain);
787 error = uiomove(bp->b_data + loff, n, uio);
788 hammer2_chain_lock(ip->hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
789 kflags |= NOTE_WRITE;
790
791 if (error) {
792 brelse(bp);
793 break;
794 }
795
796 /* XXX update ino_data.mtime */
797
798 /*
799 * Once we dirty a buffer any cached offset becomes invalid.
800 */
801 bp->b_flags |= B_AGE;
802 if (ioflag & IO_SYNC) {
803 bwrite(bp);
804 } else if ((ioflag & IO_DIRECT) && loff + n == lblksize) {
805 bp->b_flags |= B_CLUSTEROK;
806 bdwrite(bp);
807 } else if (ioflag & IO_ASYNC) {
808 bawrite(bp);
809 } else {
810 bp->b_flags |= B_CLUSTEROK;
811 bdwrite(bp);
812 }
813 }
814
815 /*
816 * Cleanup. If we extended the file EOF but failed to write through
817 * the entire write is a failure and we have to back-up.
818 */
819 if (error && ip->ip_data.size != old_eof)
820 hammer2_truncate_file(ip, old_eof);
821 /* hammer2_knote(ip->vp, kflags); */
822 return error;
823}
824
825/*
826 * Assign physical storage to a logical block.
827 *
828 * NOOFFSET is returned if the data is inode-embedded. In this case the
829 * strategy code will simply bcopy() the data into the inode.
830 */
831static
832hammer2_off_t
833hammer2_assign_physical(hammer2_inode_t *ip, hammer2_key_t lbase,
834 int lblksize, int *errorp)
835{
836 hammer2_mount_t *hmp;
837 hammer2_chain_t *parent;
838 hammer2_chain_t *chain;
839 hammer2_off_t pbase;
840
841 *errorp = 0;
842 hmp = ip->hmp;
843
844 /*
845 * Locate the chain associated with lbase, return a locked chain.
846 * However, do not instantiate any data reference (which utilizes a
847 * device buffer) because we will be using direct IO via the
848 * logical buffer cache buffer.
849 */
850 parent = &ip->chain;
851 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
852
853 chain = hammer2_chain_lookup(hmp, &parent,
854 lbase, lbase,
855 HAMMER2_LOOKUP_NODATA);
856
857 if (chain == NULL) {
858 /*
859 * We found a hole, create a new chain entry.
860 *
861 * NOTE: DATA chains are created without device backing
862 * store (nor do we want any).
863 */
864 chain = hammer2_chain_create(hmp, parent, NULL,
865 lbase, HAMMER2_PBUFRADIX,
866 HAMMER2_BREF_TYPE_DATA,
867 lblksize);
868 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
869 } else {
870 switch (chain->bref.type) {
871 case HAMMER2_BREF_TYPE_INODE:
872 /*
873 * The data is embedded in the inode. The
874 * caller is responsible for marking the inode
875 * modified and copying the data to the embedded
876 * area.
877 */
878 pbase = NOOFFSET;
879 break;
880 case HAMMER2_BREF_TYPE_DATA:
881 if (chain->bytes != lblksize) {
882 panic("hammer2_assign_physical: "
883 "size mismatch %d/%d\n",
884 lblksize, chain->bytes);
885 }
886 hammer2_chain_modify(hmp, chain,
887 HAMMER2_MODIFY_OPTDATA);
888 pbase = chain->bref.data_off & ~HAMMER2_OFF_MASK_RADIX;
889 break;
890 default:
891 panic("hammer2_assign_physical: bad type");
892 /* NOT REACHED */
893 pbase = NOOFFSET;
894 break;
895 }
896 }
897
898 if (chain)
899 hammer2_chain_unlock(hmp, chain);
900 hammer2_chain_unlock(hmp, parent);
901
902 return (pbase);
903}
904
905/*
906 * Truncate the size of a file.
907 *
908 * This routine adjusts ip->ip_data.size smaller, destroying any related
909 * data beyond the new EOF and potentially resizing the block straddling
910 * the EOF.
911 *
912 * The inode must be locked.
913 */
914static
915void
916hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
917{
918 hammer2_chain_t *parent;
919 hammer2_chain_t *chain;
920 hammer2_mount_t *hmp = ip->hmp;
921 hammer2_key_t lbase;
922 hammer2_key_t leof;
923 struct buf *bp;
924 int loff;
925 int error;
926 int oblksize;
927 int nblksize;
928
929 hammer2_chain_modify(hmp, &ip->chain, 0);
930 bp = NULL;
931
932 /*
933 * Destroy any logical buffer cache buffers beyond the file EOF.
934 *
935 * We call nvtruncbuf() w/ trivial == 1 to prevent it from messing
936 * around with the buffer straddling EOF, because we need to assign
937 * a new physical offset to it.
938 */
939 if (ip->vp) {
940 nvtruncbuf(ip->vp, nsize,
941 HAMMER2_PBUFSIZE, (int)nsize & HAMMER2_PBUFMASK,
942 1);
943 }
944
945 /*
946 * Setup for lookup/search
947 */
948 parent = &ip->chain;
949 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
950 if (error) {
951 hammer2_chain_unlock(hmp, parent);
952 /* XXX error reporting */
953 return;
954 }
955
956 /*
957 * Handle the case where a chain/logical-buffer straddles the new
958 * EOF. We told nvtruncbuf() above not to mess with the logical
959 * buffer straddling the EOF because we need to reassign its storage
960 * and can't let the strategy code do it for us.
961 */
962 loff = (int)nsize & HAMMER2_PBUFMASK;
963 if (loff && ip->vp) {
964 oblksize = hammer2_calc_logical(ip, nsize, &lbase, &leof);
965 error = bread(ip->vp, lbase, oblksize, &bp);
966 KKASSERT(error == 0);
967 }
968 ip->ip_data.size = nsize;
969 nblksize = hammer2_calc_logical(ip, nsize, &lbase, &leof);
970
971 /*
972 * Fixup the chain element. If we have a logical buffer in-hand
973 * we don't want to create a conflicting device buffer.
974 */
975 if (loff && bp) {
976 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase,
977 HAMMER2_LOOKUP_NODATA);
978 if (chain) {
979 allocbuf(bp, nblksize);
980 switch(chain->bref.type) {
981 case HAMMER2_BREF_TYPE_DATA:
982 hammer2_chain_resize(hmp, chain,
983 hammer2_bytes_to_radix(nblksize),
984 HAMMER2_MODIFY_OPTDATA);
985 bzero(bp->b_data + loff, nblksize - loff);
986 bp->b_bio2.bio_offset = chain->bref.data_off &
987 HAMMER2_OFF_MASK;
988 break;
989 case HAMMER2_BREF_TYPE_INODE:
990 bzero(bp->b_data + loff, nblksize - loff);
991 bp->b_bio2.bio_offset = NOOFFSET;
992 break;
993 default:
994 panic("hammer2_truncate_file: bad type");
995 break;
996 }
997 hammer2_chain_unlock(hmp, chain);
998 bp->b_flags |= B_CLUSTEROK;
999 bdwrite(bp);
1000 } else {
1001 /*
1002 * Destroy clean buffer w/ wrong buffer size. Retain
1003 * backing store.
1004 */
1005 bp->b_flags |= B_RELBUF;
1006 KKASSERT(bp->b_bio2.bio_offset == NOOFFSET);
1007 KKASSERT((bp->b_flags & B_DIRTY) == 0);
1008 bqrelse(bp);
1009 }
1010 } else if (loff) {
1011 /*
1012 * WARNING: This utilizes a device buffer for the data.
1013 *
1014 * XXX case should not occur
1015 */
1016 panic("hammer2_truncate_file: non-zero truncation, no-vnode");
1017 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase, 0);
1018 if (chain) {
1019 switch(chain->bref.type) {
1020 case HAMMER2_BREF_TYPE_DATA:
1021 hammer2_chain_resize(hmp, chain,
1022 hammer2_bytes_to_radix(nblksize),
1023 0);
1024 hammer2_chain_modify(hmp, chain, 0);
1025 bzero(chain->data->buf + loff, nblksize - loff);
1026 break;
1027 case HAMMER2_BREF_TYPE_INODE:
1028 if (loff < HAMMER2_EMBEDDED_BYTES) {
1029 hammer2_chain_modify(hmp, chain, 0);
1030 bzero(chain->data->ipdata.u.data + loff,
1031 HAMMER2_EMBEDDED_BYTES - loff);
1032 }
1033 break;
1034 }
1035 hammer2_chain_unlock(hmp, chain);
1036 }
1037 }
1038
1039 /*
1040 * Clean up any fragmentory VM pages now that we have properly
1041 * resized the straddling buffer. These pages are no longer
1042 * part of the buffer.
1043 */
1044 if (ip->vp) {
1045 nvtruncbuf(ip->vp, nsize,
1046 nblksize, (int)nsize & (nblksize - 1),
1047 1);
1048 }
1049
1050 /*
1051 * Destroy any physical blocks after the new EOF point.
1052 */
1053 lbase = (nsize + HAMMER2_PBUFMASK64) & ~HAMMER2_PBUFMASK64;
1054 chain = hammer2_chain_lookup(hmp, &parent,
1055 lbase, (hammer2_key_t)-1,
1056 HAMMER2_LOOKUP_NODATA);
1057 while (chain) {
1058 /*
1059 * Degenerate embedded data case, nothing to loop on.
1060 */
1061 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1062 hammer2_chain_unlock(hmp, chain);
1063 break;
1064 }
1065
1066 /*
1067 * Delete physical data blocks past the file EOF.
1068 */
1069 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1070 hammer2_chain_delete(hmp, parent, chain);
1071 }
1072 /* XXX check parent if empty indirect block & delete */
1073 chain = hammer2_chain_next(hmp, &parent, chain,
1074 lbase, (hammer2_key_t)-1,
1075 HAMMER2_LOOKUP_NODATA);
1076 }
1077 hammer2_chain_unlock(hmp, parent);
1078}
1079
1080/*
1081 * Extend the size of a file. The inode must be locked.
1082 *
1083 * We may have to resize the block straddling the old EOF.
1084 */
1085static
1086void
1087hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1088{
1089 hammer2_mount_t *hmp;
1090 hammer2_chain_t *parent;
1091 hammer2_chain_t *chain;
1092 struct buf *bp;
1093 hammer2_key_t osize;
1094 hammer2_key_t obase;
1095 hammer2_key_t nbase;
1096 hammer2_key_t leof;
1097 int oblksize;
1098 int nblksize;
1099 int nradix;
1100 int error;
1101
1102 KKASSERT(ip->vp);
1103 hmp = ip->hmp;
1104
1105 hammer2_chain_modify(hmp, &ip->chain, 0);
1106
1107 /*
1108 * Nothing to do if the direct-data case is still intact
1109 */
1110 if ((ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
1111 nsize <= HAMMER2_EMBEDDED_BYTES) {
1112 ip->ip_data.size = nsize;
1113 return;
1114 }
1115
1116 /*
1117 * Calculate the blocksize at the original EOF and resize the block
1118 * if necessary. Adjust the file size in the inode.
1119 */
1120 osize = ip->ip_data.size;
1121 oblksize = hammer2_calc_logical(ip, osize, &obase, &leof);
1122 ip->ip_data.size = nsize;
1123 nblksize = hammer2_calc_logical(ip, osize, &nbase, &leof);
1124
1125 /*
1126 * Do all required vnode operations, but do not mess with the
1127 * buffer straddling the orignal EOF.
1128 */
1129 nvextendbuf(ip->vp,
1130 ip->ip_data.size, nsize,
1131 0, nblksize,
1132 0, (int)nsize & HAMMER2_PBUFMASK,
1133 1);
1134
1135 /*
1136 * Early return if we have no more work to do.
1137 */
1138 if (obase == nbase && oblksize == nblksize &&
1139 (ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) == 0) {
1140 return;
1141 }
1142
1143 /*
1144 * We have work to do, including possibly resizing the buffer
1145 * at the EOF point and turning off DIRECTDATA mode.
1146 */
1147 bp = NULL;
1148 if (((int)osize & HAMMER2_PBUFMASK)) {
1149 error = bread(ip->vp, obase, oblksize, &bp);
1150 KKASSERT(error == 0);
1151
1152 if (obase != nbase) {
1153 allocbuf(bp, HAMMER2_PBUFSIZE);
1154 } else {
1155 allocbuf(bp, nblksize);
1156 }
1157 vfs_bio_clrbuf(bp);
1158 }
1159
1160 /*
1161 * Disable direct-data mode by loading up a buffer cache buffer
1162 * with the data, then converting the inode data area into the
1163 * inode indirect block array area.
1164 */
1165 if (ip->ip_data.op_flags & HAMMER2_OPFLAG_DIRECTDATA) {
1166 ip->ip_data.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
1167 bzero(&ip->ip_data.u.blockset, sizeof(ip->ip_data.u.blockset));
1168 }
1169
1170 /*
1171 * Resize the chain element at the old EOF.
1172 */
1173 if (((int)osize & HAMMER2_PBUFMASK)) {
1174 parent = &ip->chain;
1175 error = hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1176 KKASSERT(error == 0);
1177
1178 nradix = hammer2_bytes_to_radix(nblksize);
1179
1180 chain = hammer2_chain_lookup(hmp, &parent,
1181 obase, obase,
1182 HAMMER2_LOOKUP_NODATA);
1183 if (chain == NULL) {
1184 chain = hammer2_chain_create(hmp, parent, NULL,
1185 obase, nblksize,
1186 HAMMER2_BREF_TYPE_DATA,
1187 nradix);
1188 } else {
1189 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_DATA);
1190 hammer2_chain_resize(hmp, chain, nradix,
1191 HAMMER2_MODIFY_OPTDATA);
1192 }
1193 bp->b_bio2.bio_offset = chain->bref.data_off &
1194 HAMMER2_OFF_MASK;
1195 hammer2_chain_unlock(hmp, chain);
1196 bp->b_flags |= B_CLUSTEROK;
1197 bdwrite(bp);
1198 hammer2_chain_unlock(hmp, parent);
1199 }
1200}
1201
1202static
1203int
1204hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1205{
1206 hammer2_inode_t *dip;
1207 hammer2_mount_t *hmp;
1208 hammer2_chain_t *parent;
1209 hammer2_chain_t *chain;
1210 struct namecache *ncp;
1211 const uint8_t *name;
1212 size_t name_len;
1213 hammer2_key_t lhc;
1214 int error = 0;
1215 struct vnode *vp;
1216
1217 dip = VTOI(ap->a_dvp);
1218 hmp = dip->hmp;
1219 ncp = ap->a_nch->ncp;
1220 name = ncp->nc_name;
1221 name_len = ncp->nc_nlen;
1222 lhc = hammer2_dirhash(name, name_len);
1223
1224 /*
1225 * Note: In DragonFly the kernel handles '.' and '..'.
1226 */
1227 parent = &dip->chain;
1228 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1229 chain = hammer2_chain_lookup(hmp, &parent,
1230 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1231 0);
1232 while (chain) {
1233 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1234 chain->u.ip &&
1235 name_len == chain->data->ipdata.name_len &&
1236 bcmp(name, chain->data->ipdata.filename, name_len) == 0) {
1237 break;
1238 }
1239 chain = hammer2_chain_next(hmp, &parent, chain,
1240 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1241 0);
1242 }
1243 hammer2_chain_unlock(hmp, parent);
1244
1245 if (chain) {
1246 vp = hammer2_igetv(chain->u.ip, &error);
1247 if (error == 0) {
1248 vn_unlock(vp);
1249 cache_setvp(ap->a_nch, vp);
1250 vrele(vp);
1251 }
1252 hammer2_chain_unlock(hmp, chain);
1253 } else {
1254 error = ENOENT;
1255 cache_setvp(ap->a_nch, NULL);
1256 }
1257 return error;
1258}
1259
1260static
1261int
1262hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1263{
1264 hammer2_inode_t *dip;
1265 hammer2_inode_t *ip;
1266 hammer2_mount_t *hmp;
1267 int error;
1268
1269 dip = VTOI(ap->a_dvp);
1270 hmp = dip->hmp;
1271
1272 if ((ip = dip->pip) == NULL) {
1273 *ap->a_vpp = NULL;
1274 return ENOENT;
1275 }
1276 hammer2_chain_lock(hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
1277 *ap->a_vpp = hammer2_igetv(ip, &error);
1278 hammer2_chain_unlock(hmp, &ip->chain);
1279
1280 return error;
1281}
1282
1283static
1284int
1285hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1286{
1287 hammer2_mount_t *hmp;
1288 hammer2_inode_t *dip;
1289 hammer2_inode_t *nip;
1290 struct namecache *ncp;
1291 const uint8_t *name;
1292 size_t name_len;
1293 int error;
1294
1295 dip = VTOI(ap->a_dvp);
1296 hmp = dip->hmp;
1297 if (hmp->ronly)
1298 return (EROFS);
1299
1300 ncp = ap->a_nch->ncp;
1301 name = ncp->nc_name;
1302 name_len = ncp->nc_nlen;
1303
1304 error = hammer2_inode_create(hmp, ap->a_vap, ap->a_cred,
1305 dip, name, name_len, &nip);
1306 if (error) {
1307 KKASSERT(nip == NULL);
1308 *ap->a_vpp = NULL;
1309 return error;
1310 }
1311 *ap->a_vpp = hammer2_igetv(nip, &error);
1312 hammer2_chain_unlock(hmp, &nip->chain);
1313
1314 if (error == 0) {
1315 cache_setunresolved(ap->a_nch);
1316 cache_setvp(ap->a_nch, *ap->a_vpp);
1317 }
1318 return error;
1319}
1320
1321/*
1322 * Return the largest contiguous physical disk range for the logical
1323 * request.
1324 *
1325 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1326 */
1327static
1328int
1329hammer2_vop_bmap(struct vop_bmap_args *ap)
1330{
1331 struct vnode *vp;
1332 hammer2_mount_t *hmp;
1333 hammer2_inode_t *ip;
1334 hammer2_chain_t *parent;
1335 hammer2_chain_t *chain;
1336 hammer2_key_t lbeg;
1337 hammer2_key_t lend;
1338 hammer2_off_t pbeg;
1339 hammer2_off_t pbytes;
1340 hammer2_off_t array[HAMMER2_BMAP_COUNT][2];
1341 int loff;
1342 int ai;
1343
1344 /*
1345 * Only supported on regular files
1346 *
1347 * Only supported for read operations (required for cluster_read).
1348 * The block allocation is delayed for write operations.
1349 */
1350 vp = ap->a_vp;
1351 if (vp->v_type != VREG)
1352 return (EOPNOTSUPP);
1353 if (ap->a_cmd != BUF_CMD_READ)
1354 return (EOPNOTSUPP);
1355
1356 ip = VTOI(vp);
1357 hmp = ip->hmp;
1358 bzero(array, sizeof(array));
1359
1360 /*
1361 * Calculate logical range
1362 */
1363 KKASSERT((ap->a_loffset & HAMMER2_LBUFMASK64) == 0);
1364 lbeg = ap->a_loffset & HAMMER2_OFF_MASK_HI;
1365 lend = lbeg + HAMMER2_BMAP_COUNT * HAMMER2_PBUFSIZE - 1;
1366 if (lend < lbeg)
1367 lend = lbeg;
1368 loff = ap->a_loffset & HAMMER2_OFF_MASK_LO;
1369
1370 parent = &ip->chain;
1371 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1372 chain = hammer2_chain_lookup(hmp, &parent,
1373 lbeg, lend,
1374 HAMMER2_LOOKUP_NODATA);
1375 if (chain == NULL) {
1376 *ap->a_doffsetp = ZFOFFSET;
1377 hammer2_chain_unlock(hmp, parent);
1378 return (0);
1379 }
1380
1381 while (chain) {
1382 if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1383 ai = (chain->bref.key - lbeg) / HAMMER2_PBUFSIZE;
1384 KKASSERT(ai >= 0 && ai < HAMMER2_BMAP_COUNT);
1385 array[ai][0] = chain->bref.data_off & HAMMER2_OFF_MASK;
1386 array[ai][1] = chain->bytes;
1387 }
1388 chain = hammer2_chain_next(hmp, &parent, chain,
1389 lbeg, lend,
1390 HAMMER2_LOOKUP_NODATA);
1391 }
1392 hammer2_chain_unlock(hmp, parent);
1393
1394 /*
1395 * If the requested loffset is not mappable physically we can't
1396 * bmap. The caller will have to access the file data via a
1397 * device buffer.
1398 */
1399 if (array[0][0] == 0 || array[0][1] < loff + HAMMER2_LBUFSIZE) {
1400 *ap->a_doffsetp = NOOFFSET;
1401 return (0);
1402 }
1403
1404 /*
1405 * Calculate the physical disk offset range for array[0]
1406 */
1407 pbeg = array[0][0] + loff;
1408 pbytes = array[0][1] - loff;
1409
1410 for (ai = 1; ai < HAMMER2_BMAP_COUNT; ++ai) {
1411 if (array[ai][0] != pbeg + pbytes)
1412 break;
1413 pbytes += array[ai][1];
1414 }
1415
1416 *ap->a_doffsetp = pbeg;
1417 if (ap->a_runp)
1418 *ap->a_runp = pbytes;
1419 return (0);
1420}
1421
1422static
1423int
1424hammer2_vop_open(struct vop_open_args *ap)
1425{
1426 return vop_stdopen(ap);
1427}
1428
1429/*
1430 * hammer2_vop_advlock { vp, id, op, fl, flags }
1431 */
1432static
1433int
1434hammer2_vop_advlock(struct vop_advlock_args *ap)
1435{
1436 hammer2_inode_t *ip = VTOI(ap->a_vp);
1437
1438 return (lf_advlock(ap, &ip->advlock, ip->ip_data.size));
1439}
1440
1441
1442static
1443int
1444hammer2_vop_close(struct vop_close_args *ap)
1445{
1446 return vop_stdclose(ap);
1447}
1448
1449/*
1450 * hammer2_vop_nlink { nch, dvp, vp, cred }
1451 *
1452 * Create a hardlink to vp.
1453 */
1454static
1455int
1456hammer2_vop_nlink(struct vop_nlink_args *ap)
1457{
1458 hammer2_inode_t *dip;
1459 hammer2_inode_t *ip; /* inode we are hardlinking to */
1460 hammer2_mount_t *hmp;
1461 struct namecache *ncp;
1462 const uint8_t *name;
1463 size_t name_len;
1464 int error;
1465
1466 dip = VTOI(ap->a_dvp);
1467 hmp = dip->hmp;
1468 if (hmp->ronly)
1469 return (EROFS);
1470
1471 ip = VTOI(ap->a_vp);
1472
1473 ncp = ap->a_nch->ncp;
1474 name = ncp->nc_name;
1475 name_len = ncp->nc_nlen;
1476
1477 error = hammer2_hardlink_create(ip, dip, name, name_len);
1478 if (error == 0) {
1479 cache_setunresolved(ap->a_nch);
1480 cache_setvp(ap->a_nch, ap->a_vp);
1481 }
1482 return error;
1483}
1484
1485/*
1486 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1487 *
1488 * The operating system has already ensured that the directory entry
1489 * does not exist and done all appropriate namespace locking.
1490 */
1491static
1492int
1493hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1494{
1495 hammer2_mount_t *hmp;
1496 hammer2_inode_t *dip;
1497 hammer2_inode_t *nip;
1498 struct namecache *ncp;
1499 const uint8_t *name;
1500 size_t name_len;
1501 int error;
1502
1503 dip = VTOI(ap->a_dvp);
1504 hmp = dip->hmp;
1505 if (hmp->ronly)
1506 return (EROFS);
1507
1508 ncp = ap->a_nch->ncp;
1509 name = ncp->nc_name;
1510 name_len = ncp->nc_nlen;
1511
1512 error = hammer2_inode_create(hmp, ap->a_vap, ap->a_cred,
1513 dip, name, name_len, &nip);
1514 if (error) {
1515 KKASSERT(nip == NULL);
1516 *ap->a_vpp = NULL;
1517 return error;
1518 }
1519 *ap->a_vpp = hammer2_igetv(nip, &error);
1520 hammer2_chain_unlock(hmp, &nip->chain);
1521
1522 if (error == 0) {
1523 cache_setunresolved(ap->a_nch);
1524 cache_setvp(ap->a_nch, *ap->a_vpp);
1525 }
1526 return error;
1527}
1528
1529/*
1530 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1531 */
1532static
1533int
1534hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1535{
1536 hammer2_mount_t *hmp;
1537 hammer2_inode_t *dip;
1538 hammer2_inode_t *nip;
1539 struct namecache *ncp;
1540 const uint8_t *name;
1541 size_t name_len;
1542 int error;
1543
1544 dip = VTOI(ap->a_dvp);
1545 hmp = dip->hmp;
1546 if (hmp->ronly)
1547 return (EROFS);
1548
1549 ncp = ap->a_nch->ncp;
1550 name = ncp->nc_name;
1551 name_len = ncp->nc_nlen;
1552
1553 ap->a_vap->va_type = VLNK; /* enforce type */
1554
1555 error = hammer2_inode_create(hmp, ap->a_vap, ap->a_cred,
1556 dip, name, name_len, &nip);
1557 if (error) {
1558 KKASSERT(nip == NULL);
1559 *ap->a_vpp = NULL;
1560 return error;
1561 }
1562 *ap->a_vpp = hammer2_igetv(nip, &error);
1563
1564 /*
1565 * Build the softlink (~like file data) and finalize the namecache.
1566 */
1567 if (error == 0) {
1568 size_t bytes;
1569 struct uio auio;
1570 struct iovec aiov;
1571
1572 bytes = strlen(ap->a_target);
1573
1574 if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1575 KKASSERT(nip->ip_data.op_flags &
1576 HAMMER2_OPFLAG_DIRECTDATA);
1577 bcopy(ap->a_target, nip->ip_data.u.data, bytes);
1578 nip->ip_data.size = bytes;
1579 } else {
1580 bzero(&auio, sizeof(auio));
1581 bzero(&aiov, sizeof(aiov));
1582 auio.uio_iov = &aiov;
1583 auio.uio_segflg = UIO_SYSSPACE;
1584 auio.uio_rw = UIO_WRITE;
1585 auio.uio_resid = bytes;
1586 auio.uio_iovcnt = 1;
1587 auio.uio_td = curthread;
1588 aiov.iov_base = ap->a_target;
1589 aiov.iov_len = bytes;
1590 error = hammer2_write_file(nip, &auio, IO_APPEND);
1591 /* XXX handle error */
1592 error = 0;
1593 }
1594 }
1595 hammer2_chain_unlock(hmp, &nip->chain);
1596
1597 /*
1598 * Finalize namecache
1599 */
1600 if (error == 0) {
1601 cache_setunresolved(ap->a_nch);
1602 cache_setvp(ap->a_nch, *ap->a_vpp);
1603 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1604 }
1605 return error;
1606}
1607
1608/*
1609 * hammer2_vop_nremove { nch, dvp, cred }
1610 */
1611static
1612int
1613hammer2_vop_nremove(struct vop_nremove_args *ap)
1614{
1615 hammer2_inode_t *dip;
1616 hammer2_mount_t *hmp;
1617 struct namecache *ncp;
1618 const uint8_t *name;
1619 size_t name_len;
1620 int error;
1621
1622 dip = VTOI(ap->a_dvp);
1623 hmp = dip->hmp;
1624 if (hmp->ronly)
1625 return(EROFS);
1626
1627 ncp = ap->a_nch->ncp;
1628 name = ncp->nc_name;
1629 name_len = ncp->nc_nlen;
1630
1631 error = hammer2_unlink_file(dip, name, name_len, 0, 1);
1632
1633 if (error == 0) {
1634 cache_setunresolved(ap->a_nch);
1635 cache_setvp(ap->a_nch, NULL);
1636 }
1637 return (error);
1638}
1639
1640/*
1641 * hammer2_vop_nrmdir { nch, dvp, cred }
1642 */
1643static
1644int
1645hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1646{
1647 hammer2_inode_t *dip;
1648 hammer2_mount_t *hmp;
1649 struct namecache *ncp;
1650 const uint8_t *name;
1651 size_t name_len;
1652 int error;
1653
1654 dip = VTOI(ap->a_dvp);
1655 hmp = dip->hmp;
1656 if (hmp->ronly)
1657 return(EROFS);
1658
1659 ncp = ap->a_nch->ncp;
1660 name = ncp->nc_name;
1661 name_len = ncp->nc_nlen;
1662
1663 error = hammer2_unlink_file(dip, name, name_len, 1, 1);
1664
1665 if (error == 0) {
1666 cache_setunresolved(ap->a_nch);
1667 cache_setvp(ap->a_nch, NULL);
1668 }
1669 return (error);
1670}
1671
1672/*
1673 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1674 */
1675static
1676int
1677hammer2_vop_nrename(struct vop_nrename_args *ap)
1678{
1679 struct namecache *fncp;
1680 struct namecache *tncp;
1681 hammer2_inode_t *fdip;
1682 hammer2_inode_t *tdip;
1683 hammer2_inode_t *ip;
1684 hammer2_mount_t *hmp;
1685 const uint8_t *fname;
1686 size_t fname_len;
1687 const uint8_t *tname;
1688 size_t tname_len;
1689 int error;
1690
1691 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1692 return(EXDEV);
1693 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1694 return(EXDEV);
1695
1696 fdip = VTOI(ap->a_fdvp); /* source directory */
1697 tdip = VTOI(ap->a_tdvp); /* target directory */
1698
1699 hmp = fdip->hmp; /* check read-only filesystem */
1700 if (hmp->ronly)
1701 return(EROFS);
1702
1703 fncp = ap->a_fnch->ncp; /* entry name in source */
1704 fname = fncp->nc_name;
1705 fname_len = fncp->nc_nlen;
1706
1707 tncp = ap->a_tnch->ncp; /* entry name in target */
1708 tname = tncp->nc_name;
1709 tname_len = tncp->nc_nlen;
1710
1711 ip = VTOI(fncp->nc_vp); /* inode being moved */
1712
1713 /*
1714 * Keep a tight grip on the inode as removing it should disconnect
1715 * it and we don't want to destroy it.
1716 *
1717 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1718 * unlinking elements from their directories.
1719 */
1720 hammer2_chain_ref(hmp, &ip->chain);
1721
1722 /*
1723 * Remove target if it exists
1724 */
1725 error = hammer2_unlink_file(tdip, tname, tname_len, -1, 1);
1726 if (error && error != ENOENT)
1727 goto done;
1728 cache_setunresolved(ap->a_tnch);
1729 cache_setvp(ap->a_tnch, NULL);
1730
1731 /*
1732 * Disconnect ip from the source directory, do not adjust
1733 * the link count. Note that rename doesn't need to understand
1734 * whether this is a hardlink or not, we can just rename the
1735 * forwarding entry and don't even have to adjust the related
1736 * hardlink's link count.
1737 */
1738 error = hammer2_unlink_file(fdip, fname, fname_len, -1, 0);
1739 if (error)
1740 goto done;
1741
1742 if (ip->chain.parent != NULL)
1743 panic("hammer2_vop_nrename(): rename source != ip!");
1744
1745 /*
1746 * Reconnect ip to target directory.
1747 */
1748 hammer2_chain_lock(hmp, &ip->chain, HAMMER2_RESOLVE_ALWAYS);
1749 error = hammer2_inode_connect(tdip, ip, tname, tname_len);
1750
1751 if (error == 0) {
1752 cache_rename(ap->a_fnch, ap->a_tnch);
1753 }
1754 hammer2_chain_unlock(hmp, &ip->chain);
1755done:
1756 hammer2_chain_drop(hmp, &ip->chain); /* from ref up top */
1757
1758 return (error);
1759}
1760
1761/*
1762 * Unlink the file from the specified directory inode. The directory inode
1763 * does not need to be locked.
1764 *
1765 * isdir determines whether a directory/non-directory check should be made.
1766 * No check is made if isdir is set to -1.
1767 *
1768 * adjlinks tells unlink that we want to adjust the nlinks count of the
1769 * inode. When removing the last link for a NON forwarding entry we can
1770 * just ignore the link count... no point updating the inode that we are
1771 * about to dereference, it would just result in a lot of wasted I/O.
1772 *
1773 * However, if the entry is a forwarding entry (aka a hardlink), and adjlinks
1774 * is non-zero, we have to locate the hardlink and adjust its nlinks field.
1775 */
1776static
1777int
1778hammer2_unlink_file(hammer2_inode_t *dip, const uint8_t *name, size_t name_len,
1779 int isdir, int adjlinks)
1780{
1781 hammer2_mount_t *hmp;
1782 hammer2_chain_t *parent;
1783 hammer2_chain_t *chain;
1784 hammer2_chain_t *dparent;
1785 hammer2_chain_t *dchain;
1786 hammer2_key_t lhc;
1787 int error;
1788
1789 error = 0;
1790
1791 hmp = dip->hmp;
1792 lhc = hammer2_dirhash(name, name_len);
1793
1794 /*
1795 * Search for the filename in the directory
1796 */
1797 parent = &dip->chain;
1798 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1799 chain = hammer2_chain_lookup(hmp, &parent,
1800 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1801 0);
1802 while (chain) {
1803 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1804 chain->u.ip &&
1805 name_len == chain->data->ipdata.name_len &&
1806 bcmp(name, chain->data->ipdata.filename, name_len) == 0) {
1807 break;
1808 }
1809 chain = hammer2_chain_next(hmp, &parent, chain,
1810 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1811 0);
1812 }
1813
1814 /*
1815 * Not found or wrong type (isdir < 0 disables the type check).
1816 */
1817 if (chain == NULL) {
1818 hammer2_chain_unlock(hmp, parent);
1819 return ENOENT;
1820 }
1821 if (chain->data->ipdata.type == HAMMER2_OBJTYPE_DIRECTORY &&
1822 isdir == 0) {
1823 error = ENOTDIR;
1824 goto done;
1825 }
1826 if (chain->data->ipdata.type != HAMMER2_OBJTYPE_DIRECTORY &&
1827 isdir == 1) {
1828 error = EISDIR;
1829 goto done;
1830 }
1831
1832 /*
1833 * If this is a directory the directory must be empty. However, if
1834 * isdir < 0 we are doing a rename and the directory does not have
1835 * to be empty.
1836 */
1837 if (chain->data->ipdata.type == HAMMER2_OBJTYPE_DIRECTORY &&
1838 isdir >= 0) {
1839 dparent = chain;
1840 hammer2_chain_lock(hmp, dparent, HAMMER2_RESOLVE_ALWAYS);
1841 dchain = hammer2_chain_lookup(hmp, &dparent,
1842 0, (hammer2_key_t)-1,
1843 HAMMER2_LOOKUP_NODATA);
1844 if (dchain) {
1845 hammer2_chain_unlock(hmp, dchain);
1846 hammer2_chain_unlock(hmp, dparent);
1847 error = ENOTEMPTY;
1848 goto done;
1849 }
1850 hammer2_chain_unlock(hmp, dparent);
1851 dparent = NULL;
1852 /* dchain NULL */
1853 }
1854
1855#if 0
1856 /*
1857 * If adjlinks is non-zero this is a real deletion (otherwise it is
1858 * probably a rename). XXX
1859 */
1860 if (adjlinks) {
1861 if (chain->data->ipdata.type == HAMMER2_OBJTYPE_HARDLINK) {
1862 /*hammer2_adjust_hardlink(chain->u.ip, -1);*/
1863 /* error handling */
1864 } else {
1865 waslastlink = 1;
1866 }
1867 } else {
1868 waslastlink = 0;
1869 }
1870#endif
1871
1872 /*
1873 * Found, the chain represents the inode. Remove the parent reference
1874 * to the chain. The chain itself is no longer referenced and will
1875 * be marked unmodified by hammer2_chain_delete(), avoiding unnecessary
1876 * I/O.
1877 */
1878 hammer2_chain_delete(hmp, parent, chain);
1879 /* XXX nlinks (hardlink special case) */
1880 /* XXX nlinks (parent directory) */
1881
1882#if 0
1883 /*
1884 * Destroy any associated vnode, but only if this was the last
1885 * link. XXX this might not be needed.
1886 */
1887 if (chain->u.ip->vp) {
1888 struct vnode *vp;
1889 vp = hammer2_igetv(chain->u.ip, &error);
1890 if (error == 0) {
1891 vn_unlock(vp);
1892 /* hammer2_knote(vp, NOTE_DELETE); */
1893 cache_inval_vp(vp, CINV_DESTROY);
1894 vrele(vp);
1895 }
1896 }
1897#endif
1898 error = 0;
1899
1900done:
1901 hammer2_chain_unlock(hmp, chain);
1902 hammer2_chain_unlock(hmp, parent);
1903
1904 return error;
1905}
1906
1907
1908static int hammer2_strategy_read(struct vop_strategy_args *ap);
1909static int hammer2_strategy_write(struct vop_strategy_args *ap);
1910
1911static
1912int
1913hammer2_vop_strategy(struct vop_strategy_args *ap)
1914{
1915 struct bio *biop;
1916 struct buf *bp;
1917 int error;
1918
1919 biop = ap->a_bio;
1920 bp = biop->bio_buf;
1921
1922 switch(bp->b_cmd) {
1923 case BUF_CMD_READ:
1924 error = hammer2_strategy_read(ap);
1925 ++hammer2_iod_file_read;
1926 break;
1927 case BUF_CMD_WRITE:
1928 error = hammer2_strategy_write(ap);
1929 ++hammer2_iod_file_write;
1930 break;
1931 default:
1932 bp->b_error = error = EINVAL;
1933 bp->b_flags |= B_ERROR;
1934 biodone(biop);
1935 break;
1936 }
1937
1938 return (error);
1939}
1940
1941static
1942int
1943hammer2_strategy_read(struct vop_strategy_args *ap)
1944{
1945 struct buf *bp;
1946 struct bio *bio;
1947 struct bio *nbio;
1948 hammer2_mount_t *hmp;
1949 hammer2_inode_t *ip;
1950 hammer2_chain_t *parent;
1951 hammer2_chain_t *chain;
1952 hammer2_key_t lbase;
1953
1954 bio = ap->a_bio;
1955 bp = bio->bio_buf;
1956 ip = VTOI(ap->a_vp);
1957 hmp = ip->hmp;
1958 nbio = push_bio(bio);
1959
1960 lbase = bio->bio_offset;
1961 chain = NULL;
1962 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
1963
1964 /*
1965 * We must characterize the logical->physical translation if it
1966 * has not already been cached.
1967 *
1968 * Physical data references < LBUFSIZE are never cached. This
1969 * includes both small-block allocations and inode-embedded data.
1970 */
1971 if (nbio->bio_offset == NOOFFSET) {
1972 parent = &ip->chain;
1973 hammer2_chain_lock(hmp, parent, HAMMER2_RESOLVE_ALWAYS);
1974
1975 chain = hammer2_chain_lookup(hmp, &parent, lbase, lbase,
1976 HAMMER2_LOOKUP_NODATA);
1977 if (chain == NULL) {
1978 /*
1979 * Data is zero-fill
1980 */
1981 nbio->bio_offset = ZFOFFSET;
1982 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1983 /*
1984 * Data is embedded in the inode (do nothing)
1985 */
1986 KKASSERT(chain == parent);
1987 hammer2_chain_unlock(hmp, chain);
1988 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
1989 /*
1990 * Data is on-media
1991 */
1992 KKASSERT(bp->b_bcount == chain->bytes);
1993 nbio->bio_offset = chain->bref.data_off &
1994 HAMMER2_OFF_MASK;
1995 hammer2_chain_unlock(hmp, chain);
1996 KKASSERT(nbio->bio_offset != 0);
1997 } else {
1998 panic("hammer2_strategy_read: unknown bref type");
1999 }
2000 hammer2_chain_unlock(hmp, parent);
2001 }
2002
2003 if (hammer2_debug & 0x0020) {
2004 kprintf("read %016jx %016jx\n",
2005 bio->bio_offset, nbio->bio_offset);
2006 }
2007
2008 if (nbio->bio_offset == ZFOFFSET) {
2009 /*
2010 * Data is zero-fill
2011 */
2012 bp->b_resid = 0;
2013 bp->b_error = 0;
2014 bzero(bp->b_data, bp->b_bcount);
2015 biodone(nbio);
2016 } else if (nbio->bio_offset != NOOFFSET) {
2017 /*
2018 * Forward direct IO to the device
2019 */
2020 vn_strategy(hmp->devvp, nbio);
2021 } else {
2022 /*
2023 * Data is embedded in inode.
2024 */
2025 bcopy(chain->data->ipdata.u.data, bp->b_data,
2026 HAMMER2_EMBEDDED_BYTES);
2027 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
2028 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
2029 bp->b_resid = 0;
2030 bp->b_error = 0;
2031 biodone(nbio);
2032 }
2033 return (0);
2034}
2035
2036static
2037int
2038hammer2_strategy_write(struct vop_strategy_args *ap)
2039{
2040 struct buf *bp;
2041 struct bio *bio;
2042 struct bio *nbio;
2043 hammer2_mount_t *hmp;
2044 hammer2_inode_t *ip;
2045
2046 bio = ap->a_bio;
2047 bp = bio->bio_buf;
2048 ip = VTOI(ap->a_vp);
2049 hmp = ip->hmp;
2050 nbio = push_bio(bio);
2051
2052 KKASSERT((bio->bio_offset & HAMMER2_PBUFMASK64) == 0);
2053 KKASSERT(nbio->bio_offset != 0 && nbio->bio_offset != ZFOFFSET);
2054
2055 if (nbio->bio_offset == NOOFFSET) {
2056 /*
2057 * Must be embedded in the inode.
2058 */
2059 KKASSERT(bio->bio_offset == 0);
2060 bcopy(bp->b_data, ip->ip_data.u.data, HAMMER2_EMBEDDED_BYTES);
2061 bp->b_resid = 0;
2062 bp->b_error = 0;
2063 biodone(nbio);
2064
2065 /*
2066 * This special flag does not follow the normal MODIFY1 rules
2067 * because we might deadlock on ip. Instead we depend on
2068 * VOP_FSYNC() to detect the case.
2069 */
2070 atomic_set_int(&ip->chain.flags, HAMMER2_CHAIN_DIRTYEMBED);
2071 } else {
2072 /*
2073 * Forward direct IO to the device
2074 */
2075 vn_strategy(hmp->devvp, nbio);
2076 }
2077 return (0);
2078}
2079
2080static
2081int
2082hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2083{
2084 struct mount *mp;
2085 struct hammer2_mount *hmp;
2086 int rc;
2087
2088 switch (ap->a_op) {
2089 case (MOUNTCTL_SET_EXPORT):
2090 mp = ap->a_head.a_ops->head.vv_mount;
2091 hmp = MPTOH2(mp);
2092
2093 if (ap->a_ctllen != sizeof(struct export_args))
2094 rc = (EINVAL);
2095 else
2096 rc = vfs_export(mp, &hmp->export,
2097 (const struct export_args *)ap->a_ctl);
2098 break;
2099 default:
2100 rc = vop_stdmountctl(ap);
2101 break;
2102 }
2103 return (rc);
2104}
2105
2106struct vop_ops hammer2_vnode_vops = {
2107 .vop_default = vop_defaultop,
2108 .vop_fsync = hammer2_vop_fsync,
2109 .vop_getpages = vop_stdgetpages,
2110 .vop_putpages = vop_stdputpages,
2111 .vop_access = hammer2_vop_access,
2112 .vop_advlock = hammer2_vop_advlock,
2113 .vop_close = hammer2_vop_close,
2114 .vop_nlink = hammer2_vop_nlink,
2115 .vop_ncreate = hammer2_vop_ncreate,
2116 .vop_nsymlink = hammer2_vop_nsymlink,
2117 .vop_nremove = hammer2_vop_nremove,
2118 .vop_nrmdir = hammer2_vop_nrmdir,
2119 .vop_nrename = hammer2_vop_nrename,
2120 .vop_getattr = hammer2_vop_getattr,
2121 .vop_setattr = hammer2_vop_setattr,
2122 .vop_readdir = hammer2_vop_readdir,
2123 .vop_readlink = hammer2_vop_readlink,
2124 .vop_getpages = vop_stdgetpages,
2125 .vop_putpages = vop_stdputpages,
2126 .vop_read = hammer2_vop_read,
2127 .vop_write = hammer2_vop_write,
2128 .vop_open = hammer2_vop_open,
2129 .vop_inactive = hammer2_vop_inactive,
2130 .vop_reclaim = hammer2_vop_reclaim,
2131 .vop_nresolve = hammer2_vop_nresolve,
2132 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2133 .vop_nmkdir = hammer2_vop_nmkdir,
2134 .vop_mountctl = hammer2_vop_mountctl,
2135 .vop_bmap = hammer2_vop_bmap,
2136 .vop_strategy = hammer2_vop_strategy,
2137};
2138
2139struct vop_ops hammer2_spec_vops = {
2140
2141};
2142
2143struct vop_ops hammer2_fifo_vops = {
2144
2145};