Merge branches 'hammer2' and 'master' of ssh://crater.dragonflybsd.org/repository...
[dragonfly.git] / sys / vfs / hammer2 / hammer2_vnops.c
CommitLineData
e118c14f
MD
1/*
2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
703720e4
MD
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/fcntl.h>
39#include <sys/buf.h>
40#include <sys/proc.h>
41#include <sys/namei.h>
42#include <sys/mount.h>
43#include <sys/vnode.h>
f0206a67 44#include <sys/mountctl.h>
e028fa74 45#include <sys/dirent.h>
703720e4
MD
46
47#include "hammer2.h"
48
db71f61f
MD
49#define ZFOFFSET (-2LL)
50
703720e4
MD
51/*
52 * Last reference to a vnode is going away but it is still cached.
53 */
e118c14f 54static
703720e4 55int
e118c14f 56hammer2_vop_inactive(struct vop_inactive_args *ap)
703720e4
MD
57{
58 struct vnode *vp;
59 struct hammer2_inode *ip;
e118c14f 60#if 0
703720e4 61 struct hammer2_mount *hmp;
e118c14f 62#endif
703720e4 63
703720e4
MD
64 vp = ap->a_vp;
65 ip = VTOI(vp);
703720e4 66
df9ea374
MD
67 /*
68 * Degenerate case
69 */
70 if (ip == NULL) {
71 vrecycle(vp);
72 return (0);
73 }
74
703720e4
MD
75 return (0);
76}
77
78/*
79 * Reclaim a vnode so that it can be reused; after the inode is
80 * disassociated, the filesystem must manage it alone.
81 */
e118c14f 82static
703720e4 83int
e118c14f 84hammer2_vop_reclaim(struct vop_reclaim_args *ap)
703720e4 85{
703720e4
MD
86 struct hammer2_inode *ip;
87 struct hammer2_mount *hmp;
b7926f31 88 struct vnode *vp;
703720e4 89
703720e4
MD
90 vp = ap->a_vp;
91 ip = VTOI(vp);
9c2e0de0
MD
92 if (ip == NULL)
93 return(0);
9c2e0de0 94 hmp = ip->hmp;
b7926f31 95
54eb943b 96 hammer2_inode_lock_ex(ip);
703720e4 97 vp->v_data = NULL;
0e92b724 98 ip->vp = NULL;
b7926f31 99 hammer2_chain_flush(hmp, &ip->chain, NULL);
54eb943b 100 hammer2_inode_unlock_ex(ip);
9c2e0de0 101 hammer2_chain_drop(hmp, &ip->chain); /* vp ref removed */
54eb943b
MD
102
103 /*
104 * XXX handle background sync when ip dirty, kernel will no longer
105 * notify us regarding this inode because there is no longer a
106 * vnode attached to it.
107 */
703720e4
MD
108
109 return (0);
110}
111
e118c14f 112static
703720e4 113int
e118c14f 114hammer2_vop_fsync(struct vop_fsync_args *ap)
703720e4 115{
b7926f31
MD
116 struct hammer2_inode *ip;
117 struct hammer2_mount *hmp;
118 struct vnode *vp;
119
120 vp = ap->a_vp;
121 ip = VTOI(vp);
122 hmp = ip->hmp;
123
124 hammer2_inode_lock_ex(ip);
125 vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
126 hammer2_chain_flush(hmp, &ip->chain, NULL);
127 hammer2_inode_unlock_ex(ip);
128 return (0);
703720e4
MD
129}
130
e118c14f 131static
703720e4 132int
e118c14f 133hammer2_vop_access(struct vop_access_args *ap)
703720e4 134{
37494cab
MD
135 hammer2_inode_t *ip = VTOI(ap->a_vp);
136 uid_t uid;
137 gid_t gid;
138 int error;
139
140 uid = hammer2_to_unix_xid(&ip->ip_data.uid);
141 gid = hammer2_to_unix_xid(&ip->ip_data.gid);
142
143 error = vop_helper_access(ap, uid, gid, ip->ip_data.mode,
144 ip->ip_data.uflags);
145 return (error);
703720e4
MD
146}
147
e118c14f 148static
703720e4 149int
e118c14f 150hammer2_vop_getattr(struct vop_getattr_args *ap)
703720e4 151{
cd4b3d92
MD
152 hammer2_mount_t *hmp;
153 hammer2_inode_t *ip;
703720e4
MD
154 struct vnode *vp;
155 struct vattr *vap;
703720e4
MD
156
157 vp = ap->a_vp;
158 vap = ap->a_vap;
159
cd4b3d92
MD
160 ip = VTOI(vp);
161 hmp = ip->hmp;
162
703720e4
MD
163 hammer2_inode_lock_sh(ip);
164
cd4b3d92
MD
165 vap->va_fsid = hmp->mp->mnt_stat.f_fsid.val[0];
166 vap->va_fileid = ip->ip_data.inum;
167 vap->va_mode = ip->ip_data.mode;
168 vap->va_nlink = ip->ip_data.nlinks;
703720e4
MD
169 vap->va_uid = 0;
170 vap->va_gid = 0;
cd4b3d92
MD
171 vap->va_rmajor = 0;
172 vap->va_rminor = 0;
173 vap->va_size = ip->ip_data.size;
df9ea374 174 vap->va_blocksize = HAMMER2_PBUFSIZE;
cd4b3d92
MD
175 vap->va_flags = ip->ip_data.uflags;
176 hammer2_time_to_timespec(ip->ip_data.ctime, &vap->va_ctime);
177 hammer2_time_to_timespec(ip->ip_data.mtime, &vap->va_mtime);
178 hammer2_time_to_timespec(ip->ip_data.mtime, &vap->va_atime);
179 vap->va_gen = 1;
180 vap->va_bytes = vap->va_size;
181 vap->va_type = hammer2_get_vtype(ip);
182 vap->va_filerev = 0;
183 vap->va_uid_uuid = ip->ip_data.uid;
184 vap->va_gid_uuid = ip->ip_data.gid;
185 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
186 VA_FSID_UUID_VALID;
703720e4
MD
187
188 hammer2_inode_unlock_sh(ip);
189
190 return (0);
191}
192
e118c14f 193static
703720e4 194int
e118c14f 195hammer2_vop_readdir(struct vop_readdir_args *ap)
703720e4 196{
e028fa74
MD
197 hammer2_mount_t *hmp;
198 hammer2_inode_t *ip;
199 hammer2_inode_t *xip;
200 hammer2_chain_t *parent;
201 hammer2_chain_t *chain;
202 hammer2_key_t lkey;
203 struct uio *uio;
204 off_t *cookies;
205 off_t saveoff;
206 int cookie_index;
207 int ncookies;
208 int error;
209 int dtype;
210 int r;
211
212 ip = VTOI(ap->a_vp);
213 hmp = ip->hmp;
214 uio = ap->a_uio;
215 saveoff = uio->uio_offset;
216
217 /*
218 * Setup cookies directory entry cookies if requested
219 */
220 if (ap->a_ncookies) {
221 ncookies = uio->uio_resid / 16 + 1;
222 if (ncookies > 1024)
223 ncookies = 1024;
224 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
225 } else {
226 ncookies = -1;
227 cookies = NULL;
228 }
229 cookie_index = 0;
230
231 /*
232 * Handle artificial entries. To ensure that only positive 64 bit
233 * quantities are returned to userland we always strip off bit 63.
234 * The hash code is designed such that codes 0x0000-0x7FFF are not
235 * used, allowing us to use these codes for articial entries.
236 *
237 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not
238 * allow '..' to cross the mount point into (e.g.) the super-root.
239 */
240 error = 0;
241 chain = (void *)(intptr_t)-1; /* non-NULL early done means not eof */
242
243 if (saveoff == 0) {
244 r = vop_write_dirent(&error, uio,
245 ip->ip_data.inum &
246 HAMMER2_DIRHASH_USERMSK,
247 DT_DIR, 1, ".");
248 if (r)
249 goto done;
250 if (cookies)
251 cookies[cookie_index] = saveoff;
252 ++saveoff;
253 ++cookie_index;
254 if (cookie_index == ncookies)
255 goto done;
256 }
257 if (saveoff == 1) {
258 if (ip->pip == NULL || ip == hmp->iroot)
259 xip = ip;
260 else
261 xip = ip->pip;
262
263 r = vop_write_dirent(&error, uio,
264 xip->ip_data.inum &
265 HAMMER2_DIRHASH_USERMSK,
266 DT_DIR, 2, "..");
267 if (r)
268 goto done;
269 if (cookies)
270 cookies[cookie_index] = saveoff;
271 ++saveoff;
272 ++cookie_index;
273 if (cookie_index == ncookies)
274 goto done;
275 }
276
277 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
278
279 parent = &ip->chain;
280 hammer2_chain_ref(hmp, parent);
281 error = hammer2_chain_lock(hmp, parent);
282 if (error) {
283 hammer2_chain_put(hmp, parent);
284 goto done;
285 }
c667909f 286 chain = hammer2_chain_lookup(hmp, &parent, lkey, (hammer2_key_t)-1, 0);
e028fa74 287 while (chain) {
c667909f
MD
288 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
289 dtype = hammer2_get_dtype(chain->u.ip);
290 saveoff = chain->bref.key & HAMMER2_DIRHASH_USERMSK;
291 r = vop_write_dirent(&error, uio,
292 chain->u.ip->ip_data.inum &
293 HAMMER2_DIRHASH_USERMSK,
294 dtype, chain->u.ip->ip_data.name_len,
295 chain->u.ip->ip_data.filename);
296 if (r)
297 break;
298 if (cookies)
299 cookies[cookie_index] = saveoff;
300 ++cookie_index;
301 } else {
302 /* XXX chain error */
303 kprintf("bad chain type readdir %d\n",
304 chain->bref.type);
305 }
995e78dc
MD
306
307 /*
308 * Keys may not be returned in order so once we have a
309 * placemarker (chain) the scan must allow the full range
310 * or some entries will be missed.
311 */
e028fa74 312 chain = hammer2_chain_next(hmp, &parent, chain,
995e78dc 313 0, (hammer2_key_t)-1, 0);
028a55bb
MD
314 if (chain) {
315 saveoff = (chain->bref.key &
316 HAMMER2_DIRHASH_USERMSK) + 1;
317 } else {
318 saveoff = (hammer2_key_t)-1;
319 }
320 if (cookie_index == ncookies)
321 break;
e028fa74
MD
322 }
323 hammer2_chain_put(hmp, parent);
028a55bb
MD
324 if (chain)
325 hammer2_chain_put(hmp, chain);
e028fa74
MD
326done:
327 if (ap->a_eofflag)
328 *ap->a_eofflag = (chain == NULL);
329 uio->uio_offset = saveoff;
330 if (error && cookie_index == 0) {
331 if (cookies) {
332 kfree(cookies, M_TEMP);
333 *ap->a_ncookies = 0;
334 *ap->a_cookies = NULL;
335 }
336 } else {
337 if (cookies) {
338 *ap->a_ncookies = cookie_index;
339 *ap->a_cookies = cookies;
340 }
341 }
342 return (error);
703720e4
MD
343}
344
e118c14f 345static
703720e4 346int
e118c14f 347hammer2_vop_read(struct vop_read_args *ap)
703720e4 348{
db71f61f
MD
349 struct vnode *vp;
350 hammer2_mount_t *hmp;
351 hammer2_inode_t *ip;
352 struct buf *bp;
353 struct uio *uio;
354 int error;
355 int seqcount;
356 int bigread;
357
358 /*
359 * Read operations supported on this vnode?
360 */
361 vp = ap->a_vp;
362 if (vp->v_type != VREG)
363 return (EINVAL);
364
365 /*
366 * Misc
367 */
368 ip = VTOI(vp);
369 hmp = ip->hmp;
370 uio = ap->a_uio;
371 error = 0;
372
373 seqcount = ap->a_ioflag >> 16;
374 bigread = (uio->uio_resid > 100 * 1024 * 1024);
375
376 /*
377 * UIO read loop
378 */
379 while (uio->uio_resid > 0 && uio->uio_offset < ip->ip_data.size) {
380 hammer2_key_t off_hi;
381 int off_lo;
382 int n;
383
384 off_hi = uio->uio_offset & ~HAMMER2_LBUFMASK64;
385 off_lo = (int)(uio->uio_offset & HAMMER2_LBUFMASK64);
386
387 /* XXX bigread & signal check test */
388
389 error = cluster_read(vp, ip->ip_data.size, off_hi,
390 HAMMER2_LBUFSIZE, HAMMER2_PBUFSIZE,
391 seqcount * BKVASIZE, &bp);
392 if (error)
393 break;
394 n = HAMMER2_LBUFSIZE - off_lo;
395 if (n > uio->uio_resid)
396 n = uio->uio_resid;
397 if (n > ip->ip_data.size - uio->uio_offset)
398 n = (int)(ip->ip_data.size - uio->uio_offset);
399 bp->b_flags |= B_AGE;
400 uiomove((char *)bp->b_data + off_lo, n, uio);
c667909f 401 bqrelse(bp);
db71f61f
MD
402 }
403 return (error);
47902fef 404}
703720e4 405
e118c14f 406static
47902fef 407int
e118c14f 408hammer2_vop_write(struct vop_write_args *ap)
47902fef 409{
db71f61f
MD
410 thread_t td;
411 struct vnode *vp;
412 hammer2_mount_t *hmp;
413 hammer2_inode_t *ip;
414 struct buf *bp;
415 struct uio *uio;
416 int error;
417 int kflags;
418 int seqcount;
419 int bigwrite;
420
421 /*
422 * Read operations supported on this vnode?
423 */
424 vp = ap->a_vp;
425 if (vp->v_type != VREG)
426 return (EINVAL);
427
428 /*
429 * Misc
430 */
431 ip = VTOI(vp);
432 hmp = ip->hmp;
433 uio = ap->a_uio;
434 error = 0;
435 kflags = 0;
436 if (hmp->ronly)
437 return (EROFS);
438
439 seqcount = ap->a_ioflag >> 16;
440 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
441
442 /*
443 * Check resource limit
444 */
445 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
446 uio->uio_offset + uio->uio_resid >
447 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
448 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
449 return (EFBIG);
450 }
451
452 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
453
454 /*
455 * UIO read loop
456 */
457 while (uio->uio_resid > 0) {
458 hammer2_key_t nsize;
459 hammer2_key_t off_hi;
460 int fixsize;
461 int off_lo;
462 int n;
463 int trivial;
464 int endofblk;
465
466 off_hi = uio->uio_offset & ~HAMMER2_LBUFMASK64;
467 off_lo = (int)(uio->uio_offset & HAMMER2_LBUFMASK64);
468
469 n = HAMMER2_LBUFSIZE - off_lo;
470 if (n > uio->uio_resid) {
471 n = uio->uio_resid;
472 endofblk = 0;
473 } else {
474 endofblk = 1;
475 }
476 nsize = uio->uio_offset + n;
477
478 /* XXX bigwrite & signal check test */
479
480 /*
481 * Don't allow the buffer build to blow out the buffer
482 * cache.
483 */
484 if ((ap->a_ioflag & IO_RECURSE) == 0)
485 bwillwrite(HAMMER2_LBUFSIZE);
486
487 /*
488 * Extend the size of the file as needed
489 * XXX lock.
490 */
491 if (nsize > ip->ip_data.size) {
492 if (uio->uio_offset > ip->ip_data.size)
493 trivial = 0;
494 else
495 trivial = 1;
496 nvextendbuf(vp, ip->ip_data.size, nsize,
497 HAMMER2_LBUFSIZE, HAMMER2_LBUFSIZE,
498 (int)(ip->ip_data.size & HAMMER2_LBUFMASK),
499 (int)(nsize),
500 trivial);
501 kflags |= NOTE_EXTEND;
502 fixsize = 1;
503 } else {
504 fixsize = 0;
505 }
506
507 if (uio->uio_segflg == UIO_NOCOPY) {
508 /*
509 * Issuing a write with the same data backing the
510 * buffer. Instantiate the buffer to collect the
511 * backing vm pages, then read-in any missing bits.
512 *
513 * This case is used by vop_stdputpages().
514 */
515 bp = getblk(vp, off_hi,
516 HAMMER2_LBUFSIZE, GETBLK_BHEAVY, 0);
517 if ((bp->b_flags & B_CACHE) == 0) {
518 bqrelse(bp);
519 error = bread(ap->a_vp,
520 off_hi, HAMMER2_LBUFSIZE, &bp);
521 }
522 } else if (off_lo == 0 && uio->uio_resid >= HAMMER2_LBUFSIZE) {
523 /*
524 * Even though we are entirely overwriting the buffer
525 * we may still have to zero it out to avoid a
526 * mmap/write visibility issue.
527 */
528 bp = getblk(vp, off_hi,
529 HAMMER2_LBUFSIZE, GETBLK_BHEAVY, 0);
530 if ((bp->b_flags & B_CACHE) == 0)
531 vfs_bio_clrbuf(bp);
532 } else if (off_hi >= ip->ip_data.size) {
533 /*
534 * If the base offset of the buffer is beyond the
535 * file EOF, we don't have to issue a read.
536 */
537 bp = getblk(vp, off_hi,
538 HAMMER2_LBUFSIZE, GETBLK_BHEAVY, 0);
539 vfs_bio_clrbuf(bp);
540 } else {
541 /*
542 * Partial overwrite, read in any missing bits then
543 * replace the portion being written.
544 */
545 error = bread(vp, off_hi, HAMMER2_LBUFSIZE, &bp);
546 if (error == 0)
547 bheavy(bp);
548 }
549
550 if (error == 0) {
551 /* release lock */
552 error = uiomove(bp->b_data + off_lo, n, uio);
553 /* acquire lock */
554 }
555
556 if (error) {
557 brelse(bp);
558 if (fixsize) {
559 nvtruncbuf(vp, ip->ip_data.size,
560 HAMMER2_LBUFSIZE, HAMMER2_LBUFSIZE);
561 }
562 break;
563 }
564 kflags |= NOTE_WRITE;
565 if (ip->ip_data.size < uio->uio_offset)
566 ip->ip_data.size = uio->uio_offset;
567 /* XXX update ino_data.mtime */
568
569 /*
570 * Once we dirty a buffer any cached offset becomes invalid.
571 */
572 bp->b_bio2.bio_offset = NOOFFSET;
573 bp->b_flags |= B_AGE;
574 if (ap->a_ioflag & IO_SYNC) {
575 bwrite(bp);
576 } else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
577 bawrite(bp);
578 } else if (ap->a_ioflag & IO_ASYNC) {
579 bawrite(bp);
580 } else {
581 bdwrite(bp);
582 }
583 }
584 /* hammer2_knote(vp, kflags); */
585 return (error);
703720e4
MD
586}
587
e118c14f 588static
703720e4 589int
e118c14f 590hammer2_vop_nresolve(struct vop_nresolve_args *ap)
703720e4 591{
37494cab
MD
592 hammer2_inode_t *dip;
593 hammer2_mount_t *hmp;
594 hammer2_chain_t *parent;
595 hammer2_chain_t *chain;
596 struct namecache *ncp;
597 const uint8_t *name;
598 size_t name_len;
599 hammer2_key_t lhc;
600 int error = 0;
601 struct vnode *vp;
602
603 dip = VTOI(ap->a_dvp);
604 hmp = dip->hmp;
605 ncp = ap->a_nch->ncp;
606 name = ncp->nc_name;
607 name_len = ncp->nc_nlen;
608 lhc = hammer2_dirhash(name, name_len);
609
610 /*
611 * Note: In DragonFly the kernel handles '.' and '..'.
612 */
613 parent = &dip->chain;
614 hammer2_chain_ref(hmp, parent);
615 hammer2_chain_lock(hmp, parent);
616 chain = hammer2_chain_lookup(hmp, &parent,
c667909f
MD
617 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
618 0);
37494cab
MD
619 while (chain) {
620 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
621 chain->u.ip &&
622 name_len == chain->data->ipdata.name_len &&
623 bcmp(name, chain->data->ipdata.filename, name_len) == 0) {
624 break;
625 }
626 chain = hammer2_chain_next(hmp, &parent, chain,
c667909f
MD
627 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
628 0);
37494cab
MD
629 }
630 hammer2_chain_put(hmp, parent);
631
632 if (chain) {
633 vp = hammer2_igetv(chain->u.ip, &error);
634 if (error == 0) {
635 vn_unlock(vp);
636 cache_setvp(ap->a_nch, vp);
637 vrele(vp);
638 }
639 hammer2_chain_put(hmp, chain);
640 } else {
641 error = ENOENT;
642 cache_setvp(ap->a_nch, NULL);
643 }
644 return error;
645}
646
647static
648int
649hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
650{
651 hammer2_inode_t *dip;
652 hammer2_inode_t *ip;
653 hammer2_mount_t *hmp;
654 int error;
655
656 dip = VTOI(ap->a_dvp);
657 hmp = dip->hmp;
658
659 if ((ip = dip->pip) == NULL) {
660 *ap->a_vpp = NULL;
661 return ENOENT;
662 }
663 hammer2_chain_ref(hmp, &ip->chain);
664 hammer2_chain_lock(hmp, &ip->chain);
665 *ap->a_vpp = hammer2_igetv(ip, &error);
666 hammer2_chain_put(hmp, &ip->chain);
667
668 return error;
669}
670
671static
672int
673hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
674{
675 hammer2_mount_t *hmp;
676 hammer2_inode_t *dip;
677 hammer2_inode_t *nip;
678 struct namecache *ncp;
679 const uint8_t *name;
680 size_t name_len;
681 int error;
682
683 dip = VTOI(ap->a_dvp);
684 hmp = dip->hmp;
db71f61f
MD
685 if (hmp->ronly)
686 return (EROFS);
687
37494cab
MD
688 ncp = ap->a_nch->ncp;
689 name = ncp->nc_name;
690 name_len = ncp->nc_nlen;
691
692 error = hammer2_create_inode(hmp, ap->a_vap, ap->a_cred,
693 dip, name, name_len, &nip);
694 if (error) {
695 KKASSERT(nip == NULL);
696 *ap->a_vpp = NULL;
697 return error;
698 }
699 *ap->a_vpp = hammer2_igetv(nip, &error);
700 hammer2_chain_put(hmp, &nip->chain);
701
702 if (error == 0) {
703 cache_setunresolved(ap->a_nch);
704 cache_setvp(ap->a_nch, *ap->a_vpp);
705 }
706 return error;
703720e4
MD
707}
708
db71f61f
MD
709/*
710 * Return the largest contiguous physical disk range for the logical
711 * request.
712 *
713 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
714 */
e118c14f 715static
703720e4 716int
e118c14f 717hammer2_vop_bmap(struct vop_bmap_args *ap)
703720e4 718{
db71f61f
MD
719 struct vnode *vp;
720 hammer2_mount_t *hmp;
721 hammer2_inode_t *ip;
722 hammer2_chain_t *parent;
723 hammer2_chain_t *chain;
724 hammer2_key_t off_hi;
725
726 /*
727 * Only supported on regular files
728 *
729 * Only supported for read operations (required for cluster_read).
730 * The block allocation is delayed for write operations.
731 */
732 vp = ap->a_vp;
733 if (vp->v_type != VREG)
734 return (EOPNOTSUPP);
735 if (ap->a_cmd != BUF_CMD_READ)
736 return (EOPNOTSUPP);
737
738 ip = VTOI(vp);
739 hmp = ip->hmp;
740 off_hi = ap->a_loffset & HAMMER2_OFF_MASK_HI;
741 KKASSERT((ap->a_loffset & HAMMER2_LBUFMASK64) == 0);
742
743 parent = &ip->chain;
744 hammer2_chain_ref(hmp, parent);
745 hammer2_chain_lock(hmp, parent);
c667909f 746 chain = hammer2_chain_lookup(hmp, &parent, off_hi, off_hi, 0);
db71f61f
MD
747 if (chain) {
748 *ap->a_doffsetp = (chain->bref.data_off & ~HAMMER2_LBUFMASK64);
749 hammer2_chain_put(hmp, chain);
750 } else {
751 *ap->a_doffsetp = ZFOFFSET; /* zero-fill hole */
752 }
753 hammer2_chain_put(hmp, parent);
754 return (0);
703720e4
MD
755}
756
e118c14f 757static
703720e4 758int
e118c14f 759hammer2_vop_open(struct vop_open_args *ap)
703720e4 760{
703720e4
MD
761 return vop_stdopen(ap);
762}
763
c667909f
MD
764static
765int
766hammer2_vop_close(struct vop_close_args *ap)
767{
768 return vop_stdclose(ap);
769}
770
771/*
772 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
773 *
774 * The operating system has already ensured that the directory entry
775 * does not exist and done all appropriate namespace locking.
776 */
777static
778int
779hammer2_vop_ncreate(struct vop_ncreate_args *ap)
780{
781 hammer2_mount_t *hmp;
782 hammer2_inode_t *dip;
783 hammer2_inode_t *nip;
784 struct namecache *ncp;
785 const uint8_t *name;
786 size_t name_len;
787 int error;
788
789 dip = VTOI(ap->a_dvp);
790 hmp = dip->hmp;
791 if (hmp->ronly)
792 return (EROFS);
793
794 ncp = ap->a_nch->ncp;
795 name = ncp->nc_name;
796 name_len = ncp->nc_nlen;
797
798 error = hammer2_create_inode(hmp, ap->a_vap, ap->a_cred,
799 dip, name, name_len, &nip);
800 if (error) {
801 KKASSERT(nip == NULL);
802 *ap->a_vpp = NULL;
803 return error;
804 }
805 *ap->a_vpp = hammer2_igetv(nip, &error);
806 hammer2_chain_put(hmp, &nip->chain);
807
808 if (error == 0) {
809 cache_setunresolved(ap->a_nch);
810 cache_setvp(ap->a_nch, *ap->a_vpp);
811 }
812 return error;
813}
814
db71f61f
MD
815static int hammer2_strategy_read(struct vop_strategy_args *ap);
816static int hammer2_strategy_write(struct vop_strategy_args *ap);
817
e118c14f 818static
703720e4 819int
e118c14f 820hammer2_vop_strategy(struct vop_strategy_args *ap)
703720e4 821{
703720e4
MD
822 struct bio *biop;
823 struct buf *bp;
703720e4
MD
824 int error;
825
703720e4
MD
826 biop = ap->a_bio;
827 bp = biop->bio_buf;
703720e4
MD
828
829 switch(bp->b_cmd) {
9c2e0de0 830 case BUF_CMD_READ:
db71f61f
MD
831 error = hammer2_strategy_read(ap);
832 break;
9c2e0de0 833 case BUF_CMD_WRITE:
db71f61f
MD
834 error = hammer2_strategy_write(ap);
835 break;
703720e4
MD
836 default:
837 bp->b_error = error = EINVAL;
838 bp->b_flags |= B_ERROR;
839 biodone(biop);
840 break;
841 }
842
843 return (error);
844}
845
db71f61f
MD
846static
847int
848hammer2_strategy_read(struct vop_strategy_args *ap)
849{
850 struct buf *bp;
851 struct bio *bio;
852 struct bio *nbio;
853 hammer2_mount_t *hmp;
854 hammer2_inode_t *ip;
855 hammer2_chain_t *parent;
856 hammer2_chain_t *chain;
857 hammer2_key_t off_hi;
858
859 bio = ap->a_bio;
860 bp = bio->bio_buf;
861 ip = VTOI(ap->a_vp);
862 hmp = ip->hmp;
863 nbio = push_bio(bio);
864
865 if (nbio->bio_offset == NOOFFSET) {
866 off_hi = bio->bio_offset & HAMMER2_OFF_MASK_HI;
867 KKASSERT((bio->bio_offset & HAMMER2_LBUFMASK64) == 0);
868
869 parent = &ip->chain;
870 hammer2_chain_ref(hmp, parent);
871 hammer2_chain_lock(hmp, parent);
c667909f
MD
872
873 /*
874 * Specifying NOLOCK avoids unnecessary bread()s of the
875 * chain element's content. We just need the block device
876 * offset.
877 */
878 kprintf("lookup data logical %016jx\n", off_hi);
879 chain = hammer2_chain_lookup(hmp, &parent, off_hi, off_hi,
880 HAMMER2_LOOKUP_NOLOCK);
db71f61f 881 if (chain) {
c667909f 882 kprintf("lookup success\n");
db71f61f
MD
883 nbio->bio_offset = (chain->bref.data_off &
884 ~HAMMER2_LBUFMASK64);
c667909f 885 hammer2_chain_drop(hmp, chain);
db71f61f 886 } else {
c667909f 887 kprintf("lookup zero-fill\n");
db71f61f
MD
888 nbio->bio_offset = ZFOFFSET;
889 }
890 hammer2_chain_put(hmp, parent);
891 }
892 if (nbio->bio_offset == ZFOFFSET) {
893 bp->b_resid = 0;
894 bp->b_error = 0;
895 vfs_bio_clrbuf(bp);
896 biodone(nbio);
897 } else {
c667909f 898 kprintf("data read %016jx\n", nbio->bio_offset);
db71f61f
MD
899 vn_strategy(hmp->devvp, nbio);
900 }
901 return (0);
902}
903
904static
905int
906hammer2_strategy_write(struct vop_strategy_args *ap)
907{
908 struct buf *bp;
909 struct bio *bio;
910 struct bio *nbio;
911 hammer2_mount_t *hmp;
912 hammer2_inode_t *ip;
913 hammer2_chain_t *parent;
914 hammer2_chain_t *chain;
915 hammer2_key_t off_hi;
916 int off_lo;
917
918 bio = ap->a_bio;
919 bp = bio->bio_buf;
920 ip = VTOI(ap->a_vp);
921 hmp = ip->hmp;
922 nbio = push_bio(bio);
923
924 /*
925 * Our bmap doesn't support writes atm, and a vop_write should
926 * clear the physical disk offset cache for the copy-on-write
927 * operation.
928 */
929 KKASSERT(nbio->bio_offset == NOOFFSET);
930
931 off_hi = bio->bio_offset & HAMMER2_OFF_MASK_HI;
932 off_lo = bio->bio_offset & HAMMER2_OFF_MASK_LO;
933 KKASSERT((bio->bio_offset & HAMMER2_LBUFMASK64) == 0);
934
935 parent = &ip->chain;
936 hammer2_chain_ref(hmp, parent);
937 hammer2_chain_lock(hmp, parent);
c667909f 938 chain = hammer2_chain_lookup(hmp, &parent, off_hi, off_hi, 0);
db71f61f
MD
939 if (chain) {
940 hammer2_chain_modify(hmp, chain);
941 bcopy(bp->b_data, chain->data->buf + off_lo, bp->b_bcount);
942 hammer2_chain_put(hmp, chain);
943 } else {
944 chain = hammer2_chain_create(hmp, parent,
945 off_hi, HAMMER2_PBUFRADIX,
946 HAMMER2_BREF_TYPE_DATA,
947 HAMMER2_PBUFSIZE);
948 bcopy(bp->b_data, chain->data->buf + off_lo, bp->b_bcount);
949 hammer2_chain_put(hmp, chain);
950 }
951 hammer2_chain_put(hmp, parent);
952
953 bp->b_resid = 0;
954 bp->b_error = 0;
955 biodone(nbio);
956
957 return (0);
958}
959
e118c14f 960static
f0206a67 961int
e118c14f 962hammer2_vop_mountctl(struct vop_mountctl_args *ap)
f0206a67
VS
963{
964 struct mount *mp;
965 struct hammer2_mount *hmp;
966 int rc;
967
968 switch (ap->a_op) {
969 case (MOUNTCTL_SET_EXPORT):
970 mp = ap->a_head.a_ops->head.vv_mount;
971 hmp = MPTOH2(mp);
972
973 if (ap->a_ctllen != sizeof(struct export_args))
974 rc = (EINVAL);
975 else
10c5dee0
MD
976 rc = vfs_export(mp, &hmp->export,
977 (const struct export_args *)ap->a_ctl);
f0206a67
VS
978 break;
979 default:
980 rc = vop_stdmountctl(ap);
981 break;
982 }
983 return (rc);
984}
985
703720e4
MD
986struct vop_ops hammer2_vnode_vops = {
987 .vop_default = vop_defaultop,
e118c14f 988 .vop_fsync = hammer2_vop_fsync,
703720e4
MD
989 .vop_getpages = vop_stdgetpages,
990 .vop_putpages = vop_stdputpages,
e118c14f 991 .vop_access = hammer2_vop_access,
c667909f
MD
992 .vop_close = hammer2_vop_close,
993 .vop_ncreate = hammer2_vop_ncreate,
e118c14f
MD
994 .vop_getattr = hammer2_vop_getattr,
995 .vop_readdir = hammer2_vop_readdir,
996 .vop_read = hammer2_vop_read,
997 .vop_write = hammer2_vop_write,
998 .vop_open = hammer2_vop_open,
999 .vop_inactive = hammer2_vop_inactive,
1000 .vop_reclaim = hammer2_vop_reclaim,
1001 .vop_nresolve = hammer2_vop_nresolve,
37494cab
MD
1002 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
1003 .vop_nmkdir = hammer2_vop_nmkdir,
e118c14f
MD
1004 .vop_mountctl = hammer2_vop_mountctl,
1005 .vop_bmap = hammer2_vop_bmap,
1006 .vop_strategy = hammer2_vop_strategy,
703720e4
MD
1007};
1008
1009struct vop_ops hammer2_spec_vops = {
1010
1011};
1012
1013struct vop_ops hammer2_fifo_vops = {
1014
1015};