Reword slightly and point out that DESTDIR has to exist.
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.51 2008/05/09 22:17:43 dillon Exp $
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/fcntl.h>
41#include <sys/namecache.h>
42#include <sys/vnode.h>
43#include <sys/lockf.h>
44#include <sys/event.h>
45#include <sys/stat.h>
46#include <sys/dirent.h>
47#include <vm/vm_extern.h>
48#include <vfs/fifofs/fifo.h>
49#include "hammer.h"
50
51/*
52 * USERFS VNOPS
53 */
54/*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
55static int hammer_vop_fsync(struct vop_fsync_args *);
56static int hammer_vop_read(struct vop_read_args *);
57static int hammer_vop_write(struct vop_write_args *);
58static int hammer_vop_access(struct vop_access_args *);
59static int hammer_vop_advlock(struct vop_advlock_args *);
60static int hammer_vop_close(struct vop_close_args *);
61static int hammer_vop_ncreate(struct vop_ncreate_args *);
62static int hammer_vop_getattr(struct vop_getattr_args *);
63static int hammer_vop_nresolve(struct vop_nresolve_args *);
64static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65static int hammer_vop_nlink(struct vop_nlink_args *);
66static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67static int hammer_vop_nmknod(struct vop_nmknod_args *);
68static int hammer_vop_open(struct vop_open_args *);
69static int hammer_vop_pathconf(struct vop_pathconf_args *);
70static int hammer_vop_print(struct vop_print_args *);
71static int hammer_vop_readdir(struct vop_readdir_args *);
72static int hammer_vop_readlink(struct vop_readlink_args *);
73static int hammer_vop_nremove(struct vop_nremove_args *);
74static int hammer_vop_nrename(struct vop_nrename_args *);
75static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76static int hammer_vop_setattr(struct vop_setattr_args *);
77static int hammer_vop_strategy(struct vop_strategy_args *);
78static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
79static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
80static int hammer_vop_ioctl(struct vop_ioctl_args *);
81static int hammer_vop_mountctl(struct vop_mountctl_args *);
82
83static int hammer_vop_fifoclose (struct vop_close_args *);
84static int hammer_vop_fiforead (struct vop_read_args *);
85static int hammer_vop_fifowrite (struct vop_write_args *);
86
87static int hammer_vop_specclose (struct vop_close_args *);
88static int hammer_vop_specread (struct vop_read_args *);
89static int hammer_vop_specwrite (struct vop_write_args *);
90
91struct vop_ops hammer_vnode_vops = {
92 .vop_default = vop_defaultop,
93 .vop_fsync = hammer_vop_fsync,
94 .vop_getpages = vop_stdgetpages,
95 .vop_putpages = vop_stdputpages,
96 .vop_read = hammer_vop_read,
97 .vop_write = hammer_vop_write,
98 .vop_access = hammer_vop_access,
99 .vop_advlock = hammer_vop_advlock,
100 .vop_close = hammer_vop_close,
101 .vop_ncreate = hammer_vop_ncreate,
102 .vop_getattr = hammer_vop_getattr,
103 .vop_inactive = hammer_vop_inactive,
104 .vop_reclaim = hammer_vop_reclaim,
105 .vop_nresolve = hammer_vop_nresolve,
106 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
107 .vop_nlink = hammer_vop_nlink,
108 .vop_nmkdir = hammer_vop_nmkdir,
109 .vop_nmknod = hammer_vop_nmknod,
110 .vop_open = hammer_vop_open,
111 .vop_pathconf = hammer_vop_pathconf,
112 .vop_print = hammer_vop_print,
113 .vop_readdir = hammer_vop_readdir,
114 .vop_readlink = hammer_vop_readlink,
115 .vop_nremove = hammer_vop_nremove,
116 .vop_nrename = hammer_vop_nrename,
117 .vop_nrmdir = hammer_vop_nrmdir,
118 .vop_setattr = hammer_vop_setattr,
119 .vop_strategy = hammer_vop_strategy,
120 .vop_nsymlink = hammer_vop_nsymlink,
121 .vop_nwhiteout = hammer_vop_nwhiteout,
122 .vop_ioctl = hammer_vop_ioctl,
123 .vop_mountctl = hammer_vop_mountctl
124};
125
126struct vop_ops hammer_spec_vops = {
127 .vop_default = spec_vnoperate,
128 .vop_fsync = hammer_vop_fsync,
129 .vop_read = hammer_vop_specread,
130 .vop_write = hammer_vop_specwrite,
131 .vop_access = hammer_vop_access,
132 .vop_close = hammer_vop_specclose,
133 .vop_getattr = hammer_vop_getattr,
134 .vop_inactive = hammer_vop_inactive,
135 .vop_reclaim = hammer_vop_reclaim,
136 .vop_setattr = hammer_vop_setattr
137};
138
139struct vop_ops hammer_fifo_vops = {
140 .vop_default = fifo_vnoperate,
141 .vop_fsync = hammer_vop_fsync,
142 .vop_read = hammer_vop_fiforead,
143 .vop_write = hammer_vop_fifowrite,
144 .vop_access = hammer_vop_access,
145 .vop_close = hammer_vop_fifoclose,
146 .vop_getattr = hammer_vop_getattr,
147 .vop_inactive = hammer_vop_inactive,
148 .vop_reclaim = hammer_vop_reclaim,
149 .vop_setattr = hammer_vop_setattr
150};
151
152static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
153 struct vnode *dvp, struct ucred *cred, int flags);
154static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
155static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
156
157#if 0
158static
159int
160hammer_vop_vnoperate(struct vop_generic_args *)
161{
162 return (VOCALL(&hammer_vnode_vops, ap));
163}
164#endif
165
166/*
167 * hammer_vop_fsync { vp, waitfor }
168 */
169static
170int
171hammer_vop_fsync(struct vop_fsync_args *ap)
172{
173 hammer_inode_t ip = VTOI(ap->a_vp);
174
175 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
176 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
177 if (ap->a_waitfor == MNT_WAIT)
178 hammer_wait_inode(ip);
179 return (ip->error);
180}
181
182/*
183 * hammer_vop_read { vp, uio, ioflag, cred }
184 */
185static
186int
187hammer_vop_read(struct vop_read_args *ap)
188{
189 struct hammer_transaction trans;
190 hammer_inode_t ip;
191 off_t offset;
192 struct buf *bp;
193 struct uio *uio;
194 int error;
195 int n;
196 int seqcount;
197
198 if (ap->a_vp->v_type != VREG)
199 return (EINVAL);
200 ip = VTOI(ap->a_vp);
201 error = 0;
202 seqcount = ap->a_ioflag >> 16;
203
204 hammer_start_transaction(&trans, ip->hmp);
205
206 /*
207 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
208 */
209 uio = ap->a_uio;
210 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_rec.ino_size) {
211 offset = uio->uio_offset & HAMMER_BUFMASK;
212#if 0
213 error = cluster_read(ap->a_vp, ip->ino_rec.ino_size,
214 uio->uio_offset - offset, HAMMER_BUFSIZE,
215 MAXBSIZE, seqcount, &bp);
216#endif
217 error = bread(ap->a_vp, uio->uio_offset - offset,
218 HAMMER_BUFSIZE, &bp);
219 if (error) {
220 brelse(bp);
221 break;
222 }
223 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
224 n = HAMMER_BUFSIZE - offset;
225 if (n > uio->uio_resid)
226 n = uio->uio_resid;
227 if (n > ip->ino_rec.ino_size - uio->uio_offset)
228 n = (int)(ip->ino_rec.ino_size - uio->uio_offset);
229 error = uiomove((char *)bp->b_data + offset, n, uio);
230 if (error) {
231 bqrelse(bp);
232 break;
233 }
234 bqrelse(bp);
235 }
236 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
237 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
238 ip->ino_rec.ino_atime = trans.time;
239 hammer_modify_inode(&trans, ip, HAMMER_INODE_ITIMES);
240 }
241 hammer_done_transaction(&trans);
242 return (error);
243}
244
245/*
246 * hammer_vop_write { vp, uio, ioflag, cred }
247 */
248static
249int
250hammer_vop_write(struct vop_write_args *ap)
251{
252 struct hammer_transaction trans;
253 struct hammer_inode *ip;
254 struct uio *uio;
255 off_t offset;
256 struct buf *bp;
257 int error;
258 int n;
259 int flags;
260 int count;
261
262 if (ap->a_vp->v_type != VREG)
263 return (EINVAL);
264 ip = VTOI(ap->a_vp);
265 error = 0;
266
267 if (ip->flags & HAMMER_INODE_RO)
268 return (EROFS);
269
270 /*
271 * Create a transaction to cover the operations we perform.
272 */
273 hammer_start_transaction(&trans, ip->hmp);
274 uio = ap->a_uio;
275
276 /*
277 * Check append mode
278 */
279 if (ap->a_ioflag & IO_APPEND)
280 uio->uio_offset = ip->ino_rec.ino_size;
281
282 /*
283 * Check for illegal write offsets. Valid range is 0...2^63-1
284 */
285 if (uio->uio_offset < 0 || uio->uio_offset + uio->uio_resid <= 0) {
286 hammer_done_transaction(&trans);
287 return (EFBIG);
288 }
289
290 /*
291 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
292 */
293 count = 0;
294 while (uio->uio_resid > 0) {
295 int fixsize = 0;
296
297 /*
298 * Do not allow huge writes to deadlock the buffer cache
299 */
300 if ((++count & 15) == 0) {
301 vn_unlock(ap->a_vp);
302 if ((ap->a_ioflag & IO_NOBWILL) == 0)
303 bwillwrite();
304 vn_lock(ap->a_vp, LK_EXCLUSIVE|LK_RETRY);
305 }
306
307 offset = uio->uio_offset & HAMMER_BUFMASK;
308 n = HAMMER_BUFSIZE - offset;
309 if (n > uio->uio_resid)
310 n = uio->uio_resid;
311 if (uio->uio_offset + n > ip->ino_rec.ino_size) {
312 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
313 fixsize = 1;
314 }
315
316 if (uio->uio_segflg == UIO_NOCOPY) {
317 /*
318 * Issuing a write with the same data backing the
319 * buffer. Instantiate the buffer to collect the
320 * backing vm pages, then read-in any missing bits.
321 *
322 * This case is used by vop_stdputpages().
323 */
324 bp = getblk(ap->a_vp, uio->uio_offset - offset,
325 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
326 if ((bp->b_flags & B_CACHE) == 0) {
327 bqrelse(bp);
328 error = bread(ap->a_vp,
329 uio->uio_offset - offset,
330 HAMMER_BUFSIZE, &bp);
331 }
332 } else if (offset == 0 && uio->uio_resid >= HAMMER_BUFSIZE) {
333 /*
334 * Even though we are entirely overwriting the buffer
335 * we may still have to zero it out to avoid a
336 * mmap/write visibility issue.
337 */
338 bp = getblk(ap->a_vp, uio->uio_offset - offset,
339 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
340 if ((bp->b_flags & B_CACHE) == 0)
341 vfs_bio_clrbuf(bp);
342 } else if (uio->uio_offset - offset >= ip->ino_rec.ino_size) {
343 /*
344 * If the base offset of the buffer is beyond the
345 * file EOF, we don't have to issue a read.
346 */
347 bp = getblk(ap->a_vp, uio->uio_offset - offset,
348 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
349 vfs_bio_clrbuf(bp);
350 } else {
351 /*
352 * Partial overwrite, read in any missing bits then
353 * replace the portion being written.
354 */
355 error = bread(ap->a_vp, uio->uio_offset - offset,
356 HAMMER_BUFSIZE, &bp);
357 if (error == 0)
358 bheavy(bp);
359 }
360 if (error == 0)
361 error = uiomove((char *)bp->b_data + offset, n, uio);
362
363 /*
364 * If we screwed up we have to undo any VM size changes we
365 * made.
366 */
367 if (error) {
368 brelse(bp);
369 if (fixsize) {
370 vtruncbuf(ap->a_vp, ip->ino_rec.ino_size,
371 HAMMER_BUFSIZE);
372 }
373 break;
374 }
375 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
376 if (ip->ino_rec.ino_size < uio->uio_offset) {
377 ip->ino_rec.ino_size = uio->uio_offset;
378 flags = HAMMER_INODE_RDIRTY;
379 vnode_pager_setsize(ap->a_vp, ip->ino_rec.ino_size);
380 } else {
381 flags = 0;
382 }
383 ip->ino_rec.ino_mtime = trans.time;
384 flags |= HAMMER_INODE_ITIMES | HAMMER_INODE_BUFS;
385 hammer_modify_inode(&trans, ip, flags);
386
387 if (ap->a_ioflag & IO_SYNC) {
388 bwrite(bp);
389 } else if (ap->a_ioflag & IO_DIRECT) {
390 bawrite(bp);
391#if 0
392 } else if ((ap->a_ioflag >> 16) == IO_SEQMAX &&
393 (uio->uio_offset & HAMMER_BUFMASK) == 0) {
394 /*
395 * XXX HAMMER can only fsync the whole inode,
396 * doing it on every buffer would be a bad idea.
397 */
398 /*
399 * If seqcount indicates sequential operation and
400 * we just finished filling a buffer, push it out
401 * now to prevent the buffer cache from becoming
402 * too full, which would trigger non-optimal
403 * flushes.
404 */
405 bdwrite(bp);
406#endif
407 } else {
408 bdwrite(bp);
409 }
410 }
411 hammer_done_transaction(&trans);
412 return (error);
413}
414
415/*
416 * hammer_vop_access { vp, mode, cred }
417 */
418static
419int
420hammer_vop_access(struct vop_access_args *ap)
421{
422 struct hammer_inode *ip = VTOI(ap->a_vp);
423 uid_t uid;
424 gid_t gid;
425 int error;
426
427 uid = hammer_to_unix_xid(&ip->ino_data.uid);
428 gid = hammer_to_unix_xid(&ip->ino_data.gid);
429
430 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
431 ip->ino_data.uflags);
432 return (error);
433}
434
435/*
436 * hammer_vop_advlock { vp, id, op, fl, flags }
437 */
438static
439int
440hammer_vop_advlock(struct vop_advlock_args *ap)
441{
442 struct hammer_inode *ip = VTOI(ap->a_vp);
443
444 return (lf_advlock(ap, &ip->advlock, ip->ino_rec.ino_size));
445}
446
447/*
448 * hammer_vop_close { vp, fflag }
449 */
450static
451int
452hammer_vop_close(struct vop_close_args *ap)
453{
454 return (vop_stdclose(ap));
455}
456
457/*
458 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
459 *
460 * The operating system has already ensured that the directory entry
461 * does not exist and done all appropriate namespace locking.
462 */
463static
464int
465hammer_vop_ncreate(struct vop_ncreate_args *ap)
466{
467 struct hammer_transaction trans;
468 struct hammer_inode *dip;
469 struct hammer_inode *nip;
470 struct nchandle *nch;
471 int error;
472
473 nch = ap->a_nch;
474 dip = VTOI(ap->a_dvp);
475
476 if (dip->flags & HAMMER_INODE_RO)
477 return (EROFS);
478
479 /*
480 * Create a transaction to cover the operations we perform.
481 */
482 hammer_start_transaction(&trans, dip->hmp);
483
484 /*
485 * Create a new filesystem object of the requested type. The
486 * returned inode will be referenced and shared-locked to prevent
487 * it from being moved to the flusher.
488 */
489
490 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
491 if (error) {
492 hkprintf("hammer_create_inode error %d\n", error);
493 hammer_done_transaction(&trans);
494 *ap->a_vpp = NULL;
495 return (error);
496 }
497
498 /*
499 * Add the new filesystem object to the directory. This will also
500 * bump the inode's link count.
501 */
502 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
503 if (error)
504 hkprintf("hammer_ip_add_directory error %d\n", error);
505
506 /*
507 * Finish up.
508 */
509 if (error) {
510 hammer_rel_inode(nip, 0);
511 hammer_done_transaction(&trans);
512 *ap->a_vpp = NULL;
513 } else {
514 error = hammer_get_vnode(nip, ap->a_vpp);
515 hammer_done_transaction(&trans);
516 hammer_rel_inode(nip, 0);
517 if (error == 0) {
518 cache_setunresolved(ap->a_nch);
519 cache_setvp(ap->a_nch, *ap->a_vpp);
520 }
521 }
522 return (error);
523}
524
525/*
526 * hammer_vop_getattr { vp, vap }
527 */
528static
529int
530hammer_vop_getattr(struct vop_getattr_args *ap)
531{
532 struct hammer_inode *ip = VTOI(ap->a_vp);
533 struct vattr *vap = ap->a_vap;
534
535#if 0
536 if (cache_check_fsmid_vp(ap->a_vp, &ip->fsmid) &&
537 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0 &&
538 ip->obj_asof == XXX
539 ) {
540 /* LAZYMOD XXX */
541 }
542 hammer_itimes(ap->a_vp);
543#endif
544
545 vap->va_fsid = ip->hmp->fsid_udev;
546 vap->va_fileid = ip->ino_rec.base.base.obj_id;
547 vap->va_mode = ip->ino_data.mode;
548 vap->va_nlink = ip->ino_rec.ino_nlinks;
549 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
550 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
551 vap->va_rmajor = 0;
552 vap->va_rminor = 0;
553 vap->va_size = ip->ino_rec.ino_size;
554 hammer_to_timespec(ip->ino_rec.ino_atime, &vap->va_atime);
555 hammer_to_timespec(ip->ino_rec.ino_mtime, &vap->va_mtime);
556 hammer_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
557 vap->va_flags = ip->ino_data.uflags;
558 vap->va_gen = 1; /* hammer inums are unique for all time */
559 vap->va_blocksize = HAMMER_BUFSIZE;
560 vap->va_bytes = (ip->ino_rec.ino_size + 63) & ~63;
561 vap->va_type = hammer_get_vnode_type(ip->ino_rec.base.base.obj_type);
562 vap->va_filerev = 0; /* XXX */
563 /* mtime uniquely identifies any adjustments made to the file */
564 vap->va_fsmid = ip->ino_rec.ino_mtime;
565 vap->va_uid_uuid = ip->ino_data.uid;
566 vap->va_gid_uuid = ip->ino_data.gid;
567 vap->va_fsid_uuid = ip->hmp->fsid;
568 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
569 VA_FSID_UUID_VALID;
570
571 switch (ip->ino_rec.base.base.obj_type) {
572 case HAMMER_OBJTYPE_CDEV:
573 case HAMMER_OBJTYPE_BDEV:
574 vap->va_rmajor = ip->ino_data.rmajor;
575 vap->va_rminor = ip->ino_data.rminor;
576 break;
577 default:
578 break;
579 }
580
581 return(0);
582}
583
584/*
585 * hammer_vop_nresolve { nch, dvp, cred }
586 *
587 * Locate the requested directory entry.
588 */
589static
590int
591hammer_vop_nresolve(struct vop_nresolve_args *ap)
592{
593 struct hammer_transaction trans;
594 struct namecache *ncp;
595 hammer_inode_t dip;
596 hammer_inode_t ip;
597 hammer_tid_t asof;
598 struct hammer_cursor cursor;
599 union hammer_record_ondisk *rec;
600 struct vnode *vp;
601 int64_t namekey;
602 int error;
603 int i;
604 int nlen;
605 int flags;
606 u_int64_t obj_id;
607
608 /*
609 * Misc initialization, plus handle as-of name extensions. Look for
610 * the '@@' extension. Note that as-of files and directories cannot
611 * be modified.
612 */
613 dip = VTOI(ap->a_dvp);
614 ncp = ap->a_nch->ncp;
615 asof = dip->obj_asof;
616 nlen = ncp->nc_nlen;
617 flags = dip->flags;
618
619 hammer_simple_transaction(&trans, dip->hmp);
620
621 for (i = 0; i < nlen; ++i) {
622 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
623 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
624 flags |= HAMMER_INODE_RO;
625 break;
626 }
627 }
628 nlen = i;
629
630 /*
631 * If there is no path component the time extension is relative to
632 * dip.
633 */
634 if (nlen == 0) {
635 ip = hammer_get_inode(&trans, &dip->cache[1], dip->obj_id,
636 asof, flags, &error);
637 if (error == 0) {
638 error = hammer_get_vnode(ip, &vp);
639 hammer_rel_inode(ip, 0);
640 } else {
641 vp = NULL;
642 }
643 if (error == 0) {
644 vn_unlock(vp);
645 cache_setvp(ap->a_nch, vp);
646 vrele(vp);
647 }
648 goto done;
649 }
650
651 /*
652 * Calculate the namekey and setup the key range for the scan. This
653 * works kinda like a chained hash table where the lower 32 bits
654 * of the namekey synthesize the chain.
655 *
656 * The key range is inclusive of both key_beg and key_end.
657 */
658 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
659
660 error = hammer_init_cursor(&trans, &cursor, &dip->cache[0], dip);
661 cursor.key_beg.obj_id = dip->obj_id;
662 cursor.key_beg.key = namekey;
663 cursor.key_beg.create_tid = 0;
664 cursor.key_beg.delete_tid = 0;
665 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
666 cursor.key_beg.obj_type = 0;
667
668 cursor.key_end = cursor.key_beg;
669 cursor.key_end.key |= 0xFFFFFFFFULL;
670 cursor.asof = asof;
671 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
672
673 /*
674 * Scan all matching records (the chain), locate the one matching
675 * the requested path component.
676 *
677 * The hammer_ip_*() functions merge in-memory records with on-disk
678 * records for the purposes of the search.
679 */
680 obj_id = 0;
681
682 if (error == 0) {
683 rec = NULL;
684 error = hammer_ip_first(&cursor);
685 while (error == 0) {
686 error = hammer_ip_resolve_data(&cursor);
687 if (error)
688 break;
689 rec = cursor.record;
690 if (nlen == rec->entry.base.data_len &&
691 bcmp(ncp->nc_name, cursor.data, nlen) == 0) {
692 obj_id = rec->entry.obj_id;
693 break;
694 }
695 error = hammer_ip_next(&cursor);
696 }
697 }
698 hammer_done_cursor(&cursor);
699 if (error == 0) {
700 ip = hammer_get_inode(&trans, &dip->cache[1],
701 obj_id, asof, flags, &error);
702 if (error == 0) {
703 error = hammer_get_vnode(ip, &vp);
704 hammer_rel_inode(ip, 0);
705 } else {
706 vp = NULL;
707 }
708 if (error == 0) {
709 vn_unlock(vp);
710 cache_setvp(ap->a_nch, vp);
711 vrele(vp);
712 }
713 } else if (error == ENOENT) {
714 cache_setvp(ap->a_nch, NULL);
715 }
716done:
717 hammer_done_transaction(&trans);
718 return (error);
719}
720
721/*
722 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
723 *
724 * Locate the parent directory of a directory vnode.
725 *
726 * dvp is referenced but not locked. *vpp must be returned referenced and
727 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
728 * at the root, instead it could indicate that the directory we were in was
729 * removed.
730 *
731 * NOTE: as-of sequences are not linked into the directory structure. If
732 * we are at the root with a different asof then the mount point, reload
733 * the same directory with the mount point's asof. I'm not sure what this
734 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
735 * get confused, but it hasn't been tested.
736 */
737static
738int
739hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
740{
741 struct hammer_transaction trans;
742 struct hammer_inode *dip;
743 struct hammer_inode *ip;
744 int64_t parent_obj_id;
745 hammer_tid_t asof;
746 int error;
747
748 dip = VTOI(ap->a_dvp);
749 asof = dip->obj_asof;
750 parent_obj_id = dip->ino_data.parent_obj_id;
751
752 if (parent_obj_id == 0) {
753 if (dip->obj_id == HAMMER_OBJID_ROOT &&
754 asof != dip->hmp->asof) {
755 parent_obj_id = dip->obj_id;
756 asof = dip->hmp->asof;
757 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
758 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
759 dip->obj_asof);
760 } else {
761 *ap->a_vpp = NULL;
762 return ENOENT;
763 }
764 }
765
766 hammer_simple_transaction(&trans, dip->hmp);
767
768 ip = hammer_get_inode(&trans, &dip->cache[1], parent_obj_id,
769 asof, dip->flags, &error);
770 if (ip) {
771 error = hammer_get_vnode(ip, ap->a_vpp);
772 hammer_rel_inode(ip, 0);
773 } else {
774 *ap->a_vpp = NULL;
775 }
776 hammer_done_transaction(&trans);
777 return (error);
778}
779
780/*
781 * hammer_vop_nlink { nch, dvp, vp, cred }
782 */
783static
784int
785hammer_vop_nlink(struct vop_nlink_args *ap)
786{
787 struct hammer_transaction trans;
788 struct hammer_inode *dip;
789 struct hammer_inode *ip;
790 struct nchandle *nch;
791 int error;
792
793 nch = ap->a_nch;
794 dip = VTOI(ap->a_dvp);
795 ip = VTOI(ap->a_vp);
796
797 if (dip->flags & HAMMER_INODE_RO)
798 return (EROFS);
799 if (ip->flags & HAMMER_INODE_RO)
800 return (EROFS);
801
802 /*
803 * Create a transaction to cover the operations we perform.
804 */
805 hammer_start_transaction(&trans, dip->hmp);
806
807 /*
808 * Add the filesystem object to the directory. Note that neither
809 * dip nor ip are referenced or locked, but their vnodes are
810 * referenced. This function will bump the inode's link count.
811 */
812 error = hammer_ip_add_directory(&trans, dip, nch->ncp, ip);
813
814 /*
815 * Finish up.
816 */
817 if (error == 0) {
818 cache_setunresolved(nch);
819 cache_setvp(nch, ap->a_vp);
820 }
821 hammer_done_transaction(&trans);
822 return (error);
823}
824
825/*
826 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
827 *
828 * The operating system has already ensured that the directory entry
829 * does not exist and done all appropriate namespace locking.
830 */
831static
832int
833hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
834{
835 struct hammer_transaction trans;
836 struct hammer_inode *dip;
837 struct hammer_inode *nip;
838 struct nchandle *nch;
839 int error;
840
841 nch = ap->a_nch;
842 dip = VTOI(ap->a_dvp);
843
844 if (dip->flags & HAMMER_INODE_RO)
845 return (EROFS);
846
847 /*
848 * Create a transaction to cover the operations we perform.
849 */
850 hammer_start_transaction(&trans, dip->hmp);
851
852 /*
853 * Create a new filesystem object of the requested type. The
854 * returned inode will be referenced but not locked.
855 */
856 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
857 if (error) {
858 hkprintf("hammer_mkdir error %d\n", error);
859 hammer_done_transaction(&trans);
860 *ap->a_vpp = NULL;
861 return (error);
862 }
863 /*
864 * Add the new filesystem object to the directory. This will also
865 * bump the inode's link count.
866 */
867 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
868 if (error)
869 hkprintf("hammer_mkdir (add) error %d\n", error);
870
871 /*
872 * Finish up.
873 */
874 if (error) {
875 hammer_rel_inode(nip, 0);
876 *ap->a_vpp = NULL;
877 } else {
878 error = hammer_get_vnode(nip, ap->a_vpp);
879 hammer_rel_inode(nip, 0);
880 if (error == 0) {
881 cache_setunresolved(ap->a_nch);
882 cache_setvp(ap->a_nch, *ap->a_vpp);
883 }
884 }
885 hammer_done_transaction(&trans);
886 return (error);
887}
888
889/*
890 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
891 *
892 * The operating system has already ensured that the directory entry
893 * does not exist and done all appropriate namespace locking.
894 */
895static
896int
897hammer_vop_nmknod(struct vop_nmknod_args *ap)
898{
899 struct hammer_transaction trans;
900 struct hammer_inode *dip;
901 struct hammer_inode *nip;
902 struct nchandle *nch;
903 int error;
904
905 nch = ap->a_nch;
906 dip = VTOI(ap->a_dvp);
907
908 if (dip->flags & HAMMER_INODE_RO)
909 return (EROFS);
910
911 /*
912 * Create a transaction to cover the operations we perform.
913 */
914 hammer_start_transaction(&trans, dip->hmp);
915
916 /*
917 * Create a new filesystem object of the requested type. The
918 * returned inode will be referenced but not locked.
919 */
920 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
921 if (error) {
922 hammer_done_transaction(&trans);
923 *ap->a_vpp = NULL;
924 return (error);
925 }
926
927 /*
928 * Add the new filesystem object to the directory. This will also
929 * bump the inode's link count.
930 */
931 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
932
933 /*
934 * Finish up.
935 */
936 if (error) {
937 hammer_rel_inode(nip, 0);
938 *ap->a_vpp = NULL;
939 } else {
940 error = hammer_get_vnode(nip, ap->a_vpp);
941 hammer_rel_inode(nip, 0);
942 if (error == 0) {
943 cache_setunresolved(ap->a_nch);
944 cache_setvp(ap->a_nch, *ap->a_vpp);
945 }
946 }
947 hammer_done_transaction(&trans);
948 return (error);
949}
950
951/*
952 * hammer_vop_open { vp, mode, cred, fp }
953 */
954static
955int
956hammer_vop_open(struct vop_open_args *ap)
957{
958 if ((ap->a_mode & FWRITE) && (VTOI(ap->a_vp)->flags & HAMMER_INODE_RO))
959 return (EROFS);
960
961 return(vop_stdopen(ap));
962}
963
964/*
965 * hammer_vop_pathconf { vp, name, retval }
966 */
967static
968int
969hammer_vop_pathconf(struct vop_pathconf_args *ap)
970{
971 return EOPNOTSUPP;
972}
973
974/*
975 * hammer_vop_print { vp }
976 */
977static
978int
979hammer_vop_print(struct vop_print_args *ap)
980{
981 return EOPNOTSUPP;
982}
983
984/*
985 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
986 */
987static
988int
989hammer_vop_readdir(struct vop_readdir_args *ap)
990{
991 struct hammer_transaction trans;
992 struct hammer_cursor cursor;
993 struct hammer_inode *ip;
994 struct uio *uio;
995 hammer_record_ondisk_t rec;
996 hammer_base_elm_t base;
997 int error;
998 int cookie_index;
999 int ncookies;
1000 off_t *cookies;
1001 off_t saveoff;
1002 int r;
1003
1004 ip = VTOI(ap->a_vp);
1005 uio = ap->a_uio;
1006 saveoff = uio->uio_offset;
1007
1008 if (ap->a_ncookies) {
1009 ncookies = uio->uio_resid / 16 + 1;
1010 if (ncookies > 1024)
1011 ncookies = 1024;
1012 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1013 cookie_index = 0;
1014 } else {
1015 ncookies = -1;
1016 cookies = NULL;
1017 cookie_index = 0;
1018 }
1019
1020 hammer_simple_transaction(&trans, ip->hmp);
1021
1022 /*
1023 * Handle artificial entries
1024 */
1025 error = 0;
1026 if (saveoff == 0) {
1027 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1028 if (r)
1029 goto done;
1030 if (cookies)
1031 cookies[cookie_index] = saveoff;
1032 ++saveoff;
1033 ++cookie_index;
1034 if (cookie_index == ncookies)
1035 goto done;
1036 }
1037 if (saveoff == 1) {
1038 if (ip->ino_data.parent_obj_id) {
1039 r = vop_write_dirent(&error, uio,
1040 ip->ino_data.parent_obj_id,
1041 DT_DIR, 2, "..");
1042 } else {
1043 r = vop_write_dirent(&error, uio,
1044 ip->obj_id, DT_DIR, 2, "..");
1045 }
1046 if (r)
1047 goto done;
1048 if (cookies)
1049 cookies[cookie_index] = saveoff;
1050 ++saveoff;
1051 ++cookie_index;
1052 if (cookie_index == ncookies)
1053 goto done;
1054 }
1055
1056 /*
1057 * Key range (begin and end inclusive) to scan. Directory keys
1058 * directly translate to a 64 bit 'seek' position.
1059 */
1060 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1061 cursor.key_beg.obj_id = ip->obj_id;
1062 cursor.key_beg.create_tid = 0;
1063 cursor.key_beg.delete_tid = 0;
1064 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1065 cursor.key_beg.obj_type = 0;
1066 cursor.key_beg.key = saveoff;
1067
1068 cursor.key_end = cursor.key_beg;
1069 cursor.key_end.key = HAMMER_MAX_KEY;
1070 cursor.asof = ip->obj_asof;
1071 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1072
1073 error = hammer_ip_first(&cursor);
1074
1075 while (error == 0) {
1076 error = hammer_ip_resolve_record_and_data(&cursor);
1077 if (error)
1078 break;
1079 rec = cursor.record;
1080 base = &rec->base.base;
1081 saveoff = base->key;
1082
1083 if (base->obj_id != ip->obj_id)
1084 panic("readdir: bad record at %p", cursor.node);
1085
1086 r = vop_write_dirent(
1087 &error, uio, rec->entry.obj_id,
1088 hammer_get_dtype(rec->entry.base.base.obj_type),
1089 rec->entry.base.data_len,
1090 (void *)cursor.data);
1091 if (r)
1092 break;
1093 ++saveoff;
1094 if (cookies)
1095 cookies[cookie_index] = base->key;
1096 ++cookie_index;
1097 if (cookie_index == ncookies)
1098 break;
1099 error = hammer_ip_next(&cursor);
1100 }
1101 hammer_done_cursor(&cursor);
1102
1103done:
1104 hammer_done_transaction(&trans);
1105
1106 if (ap->a_eofflag)
1107 *ap->a_eofflag = (error == ENOENT);
1108 uio->uio_offset = saveoff;
1109 if (error && cookie_index == 0) {
1110 if (error == ENOENT)
1111 error = 0;
1112 if (cookies) {
1113 kfree(cookies, M_TEMP);
1114 *ap->a_ncookies = 0;
1115 *ap->a_cookies = NULL;
1116 }
1117 } else {
1118 if (error == ENOENT)
1119 error = 0;
1120 if (cookies) {
1121 *ap->a_ncookies = cookie_index;
1122 *ap->a_cookies = cookies;
1123 }
1124 }
1125 return(error);
1126}
1127
1128/*
1129 * hammer_vop_readlink { vp, uio, cred }
1130 */
1131static
1132int
1133hammer_vop_readlink(struct vop_readlink_args *ap)
1134{
1135 struct hammer_transaction trans;
1136 struct hammer_cursor cursor;
1137 struct hammer_inode *ip;
1138 int error;
1139
1140 ip = VTOI(ap->a_vp);
1141
1142 hammer_simple_transaction(&trans, ip->hmp);
1143
1144 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1145
1146 /*
1147 * Key range (begin and end inclusive) to scan. Directory keys
1148 * directly translate to a 64 bit 'seek' position.
1149 */
1150 cursor.key_beg.obj_id = ip->obj_id;
1151 cursor.key_beg.create_tid = 0;
1152 cursor.key_beg.delete_tid = 0;
1153 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1154 cursor.key_beg.obj_type = 0;
1155 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1156 cursor.asof = ip->obj_asof;
1157 cursor.flags |= HAMMER_CURSOR_ASOF;
1158
1159 error = hammer_ip_lookup(&cursor);
1160 if (error == 0) {
1161 error = hammer_ip_resolve_data(&cursor);
1162 if (error == 0) {
1163 error = uiomove((char *)cursor.data,
1164 cursor.record->base.data_len,
1165 ap->a_uio);
1166 }
1167 }
1168 hammer_done_cursor(&cursor);
1169 hammer_done_transaction(&trans);
1170 return(error);
1171}
1172
1173/*
1174 * hammer_vop_nremove { nch, dvp, cred }
1175 */
1176static
1177int
1178hammer_vop_nremove(struct vop_nremove_args *ap)
1179{
1180 struct hammer_transaction trans;
1181 int error;
1182
1183 hammer_start_transaction(&trans, VTOI(ap->a_dvp)->hmp);
1184 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1185 hammer_done_transaction(&trans);
1186
1187 return (error);
1188}
1189
1190/*
1191 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1192 */
1193static
1194int
1195hammer_vop_nrename(struct vop_nrename_args *ap)
1196{
1197 struct hammer_transaction trans;
1198 struct namecache *fncp;
1199 struct namecache *tncp;
1200 struct hammer_inode *fdip;
1201 struct hammer_inode *tdip;
1202 struct hammer_inode *ip;
1203 struct hammer_cursor cursor;
1204 union hammer_record_ondisk *rec;
1205 int64_t namekey;
1206 int error;
1207
1208 fdip = VTOI(ap->a_fdvp);
1209 tdip = VTOI(ap->a_tdvp);
1210 fncp = ap->a_fnch->ncp;
1211 tncp = ap->a_tnch->ncp;
1212 ip = VTOI(fncp->nc_vp);
1213 KKASSERT(ip != NULL);
1214
1215 if (fdip->flags & HAMMER_INODE_RO)
1216 return (EROFS);
1217 if (tdip->flags & HAMMER_INODE_RO)
1218 return (EROFS);
1219 if (ip->flags & HAMMER_INODE_RO)
1220 return (EROFS);
1221
1222 hammer_start_transaction(&trans, fdip->hmp);
1223
1224 /*
1225 * Remove tncp from the target directory and then link ip as
1226 * tncp. XXX pass trans to dounlink
1227 *
1228 * Force the inode sync-time to match the transaction so it is
1229 * in-sync with the creation of the target directory entry.
1230 */
1231 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
1232 if (error == 0 || error == ENOENT) {
1233 error = hammer_ip_add_directory(&trans, tdip, tncp, ip);
1234 if (error == 0) {
1235 ip->ino_data.parent_obj_id = tdip->obj_id;
1236 hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
1237 }
1238 }
1239 if (error)
1240 goto failed; /* XXX */
1241
1242 /*
1243 * Locate the record in the originating directory and remove it.
1244 *
1245 * Calculate the namekey and setup the key range for the scan. This
1246 * works kinda like a chained hash table where the lower 32 bits
1247 * of the namekey synthesize the chain.
1248 *
1249 * The key range is inclusive of both key_beg and key_end.
1250 */
1251 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
1252retry:
1253 hammer_init_cursor(&trans, &cursor, &fdip->cache[0], fdip);
1254 cursor.key_beg.obj_id = fdip->obj_id;
1255 cursor.key_beg.key = namekey;
1256 cursor.key_beg.create_tid = 0;
1257 cursor.key_beg.delete_tid = 0;
1258 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1259 cursor.key_beg.obj_type = 0;
1260
1261 cursor.key_end = cursor.key_beg;
1262 cursor.key_end.key |= 0xFFFFFFFFULL;
1263 cursor.asof = fdip->obj_asof;
1264 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1265
1266 /*
1267 * Scan all matching records (the chain), locate the one matching
1268 * the requested path component.
1269 *
1270 * The hammer_ip_*() functions merge in-memory records with on-disk
1271 * records for the purposes of the search.
1272 */
1273 error = hammer_ip_first(&cursor);
1274 while (error == 0) {
1275 if (hammer_ip_resolve_data(&cursor) != 0)
1276 break;
1277 rec = cursor.record;
1278 if (fncp->nc_nlen == rec->entry.base.data_len &&
1279 bcmp(fncp->nc_name, cursor.data, fncp->nc_nlen) == 0) {
1280 break;
1281 }
1282 error = hammer_ip_next(&cursor);
1283 }
1284
1285 /*
1286 * If all is ok we have to get the inode so we can adjust nlinks.
1287 *
1288 * WARNING: hammer_ip_del_directory() may have to terminate the
1289 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1290 * twice.
1291 */
1292 if (error == 0)
1293 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
1294
1295 /*
1296 * XXX A deadlock here will break rename's atomicy for the purposes
1297 * of crash recovery.
1298 */
1299 if (error == EDEADLK) {
1300 hammer_done_cursor(&cursor);
1301 goto retry;
1302 }
1303
1304 /*
1305 * Cleanup and tell the kernel that the rename succeeded.
1306 */
1307 hammer_done_cursor(&cursor);
1308 if (error == 0)
1309 cache_rename(ap->a_fnch, ap->a_tnch);
1310
1311failed:
1312 hammer_done_transaction(&trans);
1313 return (error);
1314}
1315
1316/*
1317 * hammer_vop_nrmdir { nch, dvp, cred }
1318 */
1319static
1320int
1321hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
1322{
1323 struct hammer_transaction trans;
1324 int error;
1325
1326 hammer_start_transaction(&trans, VTOI(ap->a_dvp)->hmp);
1327 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1328 hammer_done_transaction(&trans);
1329
1330 return (error);
1331}
1332
1333/*
1334 * hammer_vop_setattr { vp, vap, cred }
1335 */
1336static
1337int
1338hammer_vop_setattr(struct vop_setattr_args *ap)
1339{
1340 struct hammer_transaction trans;
1341 struct vattr *vap;
1342 struct hammer_inode *ip;
1343 int modflags;
1344 int error;
1345 int truncating;
1346 off_t aligned_size;
1347 u_int32_t flags;
1348 uuid_t uuid;
1349
1350 vap = ap->a_vap;
1351 ip = ap->a_vp->v_data;
1352 modflags = 0;
1353
1354 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1355 return(EROFS);
1356 if (ip->flags & HAMMER_INODE_RO)
1357 return (EROFS);
1358
1359 hammer_start_transaction(&trans, ip->hmp);
1360 error = 0;
1361
1362 if (vap->va_flags != VNOVAL) {
1363 flags = ip->ino_data.uflags;
1364 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1365 hammer_to_unix_xid(&ip->ino_data.uid),
1366 ap->a_cred);
1367 if (error == 0) {
1368 if (ip->ino_data.uflags != flags) {
1369 ip->ino_data.uflags = flags;
1370 modflags |= HAMMER_INODE_DDIRTY;
1371 }
1372 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1373 error = 0;
1374 goto done;
1375 }
1376 }
1377 goto done;
1378 }
1379 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1380 error = EPERM;
1381 goto done;
1382 }
1383 if (vap->va_uid != (uid_t)VNOVAL) {
1384 hammer_guid_to_uuid(&uuid, vap->va_uid);
1385 if (bcmp(&uuid, &ip->ino_data.uid, sizeof(uuid)) != 0) {
1386 ip->ino_data.uid = uuid;
1387 modflags |= HAMMER_INODE_DDIRTY;
1388 }
1389 }
1390 if (vap->va_gid != (uid_t)VNOVAL) {
1391 hammer_guid_to_uuid(&uuid, vap->va_gid);
1392 if (bcmp(&uuid, &ip->ino_data.gid, sizeof(uuid)) != 0) {
1393 ip->ino_data.gid = uuid;
1394 modflags |= HAMMER_INODE_DDIRTY;
1395 }
1396 }
1397 while (vap->va_size != VNOVAL && ip->ino_rec.ino_size != vap->va_size) {
1398 switch(ap->a_vp->v_type) {
1399 case VREG:
1400 if (vap->va_size == ip->ino_rec.ino_size)
1401 break;
1402 /*
1403 * XXX break atomicy, we can deadlock the backend
1404 * if we do not release the lock. Probably not a
1405 * big deal here.
1406 */
1407 if (vap->va_size < ip->ino_rec.ino_size) {
1408 vtruncbuf(ap->a_vp, vap->va_size,
1409 HAMMER_BUFSIZE);
1410 truncating = 1;
1411 } else {
1412 vnode_pager_setsize(ap->a_vp, vap->va_size);
1413 truncating = 0;
1414 }
1415 ip->ino_rec.ino_size = vap->va_size;
1416 modflags |= HAMMER_INODE_RDIRTY;
1417 aligned_size = (vap->va_size + HAMMER_BUFMASK) &
1418 ~HAMMER_BUFMASK64;
1419
1420 /*
1421 * on-media truncation is cached in the inode until
1422 * the inode is synchronized.
1423 */
1424 if (truncating) {
1425 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1426 ip->flags |= HAMMER_INODE_TRUNCATED;
1427 ip->trunc_off = vap->va_size;
1428 } else if (ip->trunc_off > vap->va_size) {
1429 ip->trunc_off = vap->va_size;
1430 }
1431 }
1432
1433 /*
1434 * If truncating we have to clean out a portion of
1435 * the last block on-disk. We do this in the
1436 * front-end buffer cache.
1437 */
1438 if (truncating && vap->va_size < aligned_size) {
1439 struct buf *bp;
1440 int offset;
1441
1442 offset = vap->va_size & HAMMER_BUFMASK;
1443 error = bread(ap->a_vp,
1444 aligned_size - HAMMER_BUFSIZE,
1445 HAMMER_BUFSIZE, &bp);
1446 if (error == 0) {
1447 bzero(bp->b_data + offset,
1448 HAMMER_BUFSIZE - offset);
1449 bdwrite(bp);
1450 } else {
1451 brelse(bp);
1452 }
1453 }
1454 break;
1455 case VDATABASE:
1456 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1457 ip->flags |= HAMMER_INODE_TRUNCATED;
1458 ip->trunc_off = vap->va_size;
1459 } else if (ip->trunc_off > vap->va_size) {
1460 ip->trunc_off = vap->va_size;
1461 }
1462 ip->ino_rec.ino_size = vap->va_size;
1463 modflags |= HAMMER_INODE_RDIRTY;
1464 break;
1465 default:
1466 error = EINVAL;
1467 goto done;
1468 }
1469 break;
1470 }
1471 if (vap->va_atime.tv_sec != VNOVAL) {
1472 ip->ino_rec.ino_atime =
1473 hammer_timespec_to_transid(&vap->va_atime);
1474 modflags |= HAMMER_INODE_ITIMES;
1475 }
1476 if (vap->va_mtime.tv_sec != VNOVAL) {
1477 ip->ino_rec.ino_mtime =
1478 hammer_timespec_to_transid(&vap->va_mtime);
1479 modflags |= HAMMER_INODE_ITIMES;
1480 }
1481 if (vap->va_mode != (mode_t)VNOVAL) {
1482 if (ip->ino_data.mode != vap->va_mode) {
1483 ip->ino_data.mode = vap->va_mode;
1484 modflags |= HAMMER_INODE_DDIRTY;
1485 }
1486 }
1487done:
1488 if (error == 0)
1489 hammer_modify_inode(&trans, ip, modflags);
1490 hammer_done_transaction(&trans);
1491 return (error);
1492}
1493
1494/*
1495 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1496 */
1497static
1498int
1499hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
1500{
1501 struct hammer_transaction trans;
1502 struct hammer_inode *dip;
1503 struct hammer_inode *nip;
1504 struct nchandle *nch;
1505 hammer_record_t record;
1506 int error;
1507 int bytes;
1508
1509 ap->a_vap->va_type = VLNK;
1510
1511 nch = ap->a_nch;
1512 dip = VTOI(ap->a_dvp);
1513
1514 if (dip->flags & HAMMER_INODE_RO)
1515 return (EROFS);
1516
1517 /*
1518 * Create a transaction to cover the operations we perform.
1519 */
1520 hammer_start_transaction(&trans, dip->hmp);
1521
1522 /*
1523 * Create a new filesystem object of the requested type. The
1524 * returned inode will be referenced but not locked.
1525 */
1526
1527 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
1528 if (error) {
1529 hammer_done_transaction(&trans);
1530 *ap->a_vpp = NULL;
1531 return (error);
1532 }
1533
1534 /*
1535 * Add a record representing the symlink. symlink stores the link
1536 * as pure data, not a string, and is no \0 terminated.
1537 */
1538 if (error == 0) {
1539 record = hammer_alloc_mem_record(nip);
1540 record->type = HAMMER_MEM_RECORD_GENERAL;
1541 bytes = strlen(ap->a_target);
1542
1543 record->rec.base.base.key = HAMMER_FIXKEY_SYMLINK;
1544 record->rec.base.base.rec_type = HAMMER_RECTYPE_FIX;
1545 record->rec.base.data_len = bytes;
1546 record->rec.base.signature = HAMMER_RECORD_SIGNATURE_GOOD;
1547 record->data = (void *)ap->a_target;
1548 /* will be reallocated by routine below */
1549 error = hammer_ip_add_record(&trans, record);
1550
1551 /*
1552 * Set the file size to the length of the link.
1553 */
1554 if (error == 0) {
1555 nip->ino_rec.ino_size = bytes;
1556 hammer_modify_inode(&trans, nip, HAMMER_INODE_RDIRTY);
1557 }
1558 }
1559 if (error == 0)
1560 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
1561
1562 /*
1563 * Finish up.
1564 */
1565 if (error) {
1566 hammer_rel_inode(nip, 0);
1567 *ap->a_vpp = NULL;
1568 } else {
1569 error = hammer_get_vnode(nip, ap->a_vpp);
1570 hammer_rel_inode(nip, 0);
1571 if (error == 0) {
1572 cache_setunresolved(ap->a_nch);
1573 cache_setvp(ap->a_nch, *ap->a_vpp);
1574 }
1575 }
1576 hammer_done_transaction(&trans);
1577 return (error);
1578}
1579
1580/*
1581 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1582 */
1583static
1584int
1585hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
1586{
1587 struct hammer_transaction trans;
1588 int error;
1589
1590 hammer_start_transaction(&trans, VTOI(ap->a_dvp)->hmp);
1591 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1592 ap->a_cred, ap->a_flags);
1593 hammer_done_transaction(&trans);
1594
1595 return (error);
1596}
1597
1598/*
1599 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1600 */
1601static
1602int
1603hammer_vop_ioctl(struct vop_ioctl_args *ap)
1604{
1605 struct hammer_inode *ip = ap->a_vp->v_data;
1606
1607 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1608 ap->a_fflag, ap->a_cred));
1609}
1610
1611static
1612int
1613hammer_vop_mountctl(struct vop_mountctl_args *ap)
1614{
1615 struct mount *mp;
1616 int error;
1617
1618 mp = ap->a_head.a_ops->head.vv_mount;
1619
1620 switch(ap->a_op) {
1621 case MOUNTCTL_SET_EXPORT:
1622 if (ap->a_ctllen != sizeof(struct export_args))
1623 error = EINVAL;
1624 error = hammer_vfs_export(mp, ap->a_op,
1625 (const struct export_args *)ap->a_ctl);
1626 break;
1627 default:
1628 error = journal_mountctl(ap);
1629 break;
1630 }
1631 return(error);
1632}
1633
1634/*
1635 * hammer_vop_strategy { vp, bio }
1636 *
1637 * Strategy call, used for regular file read & write only. Note that the
1638 * bp may represent a cluster.
1639 *
1640 * To simplify operation and allow better optimizations in the future,
1641 * this code does not make any assumptions with regards to buffer alignment
1642 * or size.
1643 */
1644static
1645int
1646hammer_vop_strategy(struct vop_strategy_args *ap)
1647{
1648 struct buf *bp;
1649 int error;
1650
1651 bp = ap->a_bio->bio_buf;
1652
1653 switch(bp->b_cmd) {
1654 case BUF_CMD_READ:
1655 error = hammer_vop_strategy_read(ap);
1656 break;
1657 case BUF_CMD_WRITE:
1658 error = hammer_vop_strategy_write(ap);
1659 break;
1660 default:
1661 bp->b_error = error = EINVAL;
1662 bp->b_flags |= B_ERROR;
1663 biodone(ap->a_bio);
1664 break;
1665 }
1666 return (error);
1667}
1668
1669/*
1670 * Read from a regular file. Iterate the related records and fill in the
1671 * BIO/BUF. Gaps are zero-filled.
1672 *
1673 * The support code in hammer_object.c should be used to deal with mixed
1674 * in-memory and on-disk records.
1675 *
1676 * XXX atime update
1677 */
1678static
1679int
1680hammer_vop_strategy_read(struct vop_strategy_args *ap)
1681{
1682 struct hammer_transaction trans;
1683 struct hammer_inode *ip;
1684 struct hammer_cursor cursor;
1685 hammer_record_ondisk_t rec;
1686 hammer_base_elm_t base;
1687 struct bio *bio;
1688 struct buf *bp;
1689 int64_t rec_offset;
1690 int64_t ran_end;
1691 int64_t tmp64;
1692 int error;
1693 int boff;
1694 int roff;
1695 int n;
1696
1697 bio = ap->a_bio;
1698 bp = bio->bio_buf;
1699 ip = ap->a_vp->v_data;
1700
1701 hammer_simple_transaction(&trans, ip->hmp);
1702 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1703
1704 /*
1705 * Key range (begin and end inclusive) to scan. Note that the key's
1706 * stored in the actual records represent BASE+LEN, not BASE. The
1707 * first record containing bio_offset will have a key > bio_offset.
1708 */
1709 cursor.key_beg.obj_id = ip->obj_id;
1710 cursor.key_beg.create_tid = 0;
1711 cursor.key_beg.delete_tid = 0;
1712 cursor.key_beg.obj_type = 0;
1713 cursor.key_beg.key = bio->bio_offset + 1;
1714 cursor.asof = ip->obj_asof;
1715 cursor.flags |= HAMMER_CURSOR_ASOF | HAMMER_CURSOR_DATAEXTOK;
1716
1717 cursor.key_end = cursor.key_beg;
1718 KKASSERT(ip->ino_rec.base.base.obj_type == HAMMER_OBJTYPE_REGFILE);
1719#if 0
1720 if (ip->ino_rec.base.base.obj_type == HAMMER_OBJTYPE_DBFILE) {
1721 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
1722 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
1723 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1724 } else
1725#endif
1726 {
1727 ran_end = bio->bio_offset + bp->b_bufsize;
1728 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
1729 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
1730 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
1731 if (tmp64 < ran_end)
1732 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1733 else
1734 cursor.key_end.key = ran_end + MAXPHYS + 1;
1735 }
1736 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
1737
1738 error = hammer_ip_first(&cursor);
1739 boff = 0;
1740
1741 while (error == 0) {
1742 error = hammer_ip_resolve_data(&cursor);
1743 if (error)
1744 break;
1745 rec = cursor.record;
1746 base = &rec->base.base;
1747
1748 rec_offset = base->key - rec->data.base.data_len;
1749
1750 /*
1751 * Calculate the gap, if any, and zero-fill it.
1752 */
1753 n = (int)(rec_offset - (bio->bio_offset + boff));
1754 if (n > 0) {
1755 if (n > bp->b_bufsize - boff)
1756 n = bp->b_bufsize - boff;
1757 bzero((char *)bp->b_data + boff, n);
1758 boff += n;
1759 n = 0;
1760 }
1761
1762 /*
1763 * Calculate the data offset in the record and the number
1764 * of bytes we can copy.
1765 *
1766 * Note there is a degenerate case here where boff may
1767 * already be at bp->b_bufsize.
1768 */
1769 roff = -n;
1770 rec_offset += roff;
1771 n = rec->data.base.data_len - roff;
1772 KKASSERT(n > 0);
1773 if (n > bp->b_bufsize - boff)
1774 n = bp->b_bufsize - boff;
1775
1776 /*
1777 * If we cached a truncation point on our front-end the
1778 * on-disk version may still have physical records beyond
1779 * that point. Truncate visibility.
1780 */
1781 if (ip->trunc_off <= rec_offset)
1782 n = 0;
1783 else if (ip->trunc_off < rec_offset + n)
1784 n = (int)(ip->trunc_off - rec_offset);
1785
1786 /*
1787 * Copy
1788 */
1789 if (n) {
1790 bcopy((char *)cursor.data + roff,
1791 (char *)bp->b_data + boff, n);
1792 boff += n;
1793 }
1794 if (boff == bp->b_bufsize)
1795 break;
1796 error = hammer_ip_next(&cursor);
1797 }
1798 hammer_done_cursor(&cursor);
1799 hammer_done_transaction(&trans);
1800
1801 /*
1802 * There may have been a gap after the last record
1803 */
1804 if (error == ENOENT)
1805 error = 0;
1806 if (error == 0 && boff != bp->b_bufsize) {
1807 KKASSERT(boff < bp->b_bufsize);
1808 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
1809 /* boff = bp->b_bufsize; */
1810 }
1811 bp->b_resid = 0;
1812 bp->b_error = error;
1813 if (error)
1814 bp->b_flags |= B_ERROR;
1815 biodone(ap->a_bio);
1816 return(error);
1817}
1818
1819/*
1820 * Write to a regular file. Because this is a strategy call the OS is
1821 * trying to actually sync data to the media. HAMMER can only flush
1822 * the entire inode (so the TID remains properly synchronized).
1823 *
1824 * Basically all we do here is place the bio on the inode's flush queue
1825 * and activate the flusher.
1826 */
1827static
1828int
1829hammer_vop_strategy_write(struct vop_strategy_args *ap)
1830{
1831 hammer_inode_t ip;
1832 struct bio *bio;
1833 struct buf *bp;
1834
1835 bio = ap->a_bio;
1836 bp = bio->bio_buf;
1837 ip = ap->a_vp->v_data;
1838
1839 if (ip->flags & HAMMER_INODE_RO) {
1840 bp->b_error = EROFS;
1841 bp->b_flags |= B_ERROR;
1842 biodone(ap->a_bio);
1843 return(EROFS);
1844 }
1845
1846 /*
1847 * If the inode is being flushed we cannot re-queue buffers
1848 * it may have already flushed, or it could result in duplicate
1849 * records in the database.
1850 */
1851 BUF_KERNPROC(bp);
1852 if (ip->flags & HAMMER_INODE_WRITE_ALT)
1853 TAILQ_INSERT_TAIL(&ip->bio_alt_list, bio, bio_act);
1854 else
1855 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1856 ++hammer_bio_count;
1857 hammer_modify_inode(NULL, ip, HAMMER_INODE_BUFS);
1858
1859 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1860#if 0
1861 /*
1862 * XXX
1863 *
1864 * If the write was not part of an integrated flush operation then
1865 * signal a flush.
1866 */
1867 if (ip->flush_state != HAMMER_FST_FLUSH ||
1868 (ip->flags & HAMMER_INODE_WRITE_ALT)) {
1869 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1870 }
1871#endif
1872 return(0);
1873}
1874
1875/*
1876 * Backend code which actually performs the write to the media. This
1877 * routine is typically called from the flusher. The bio will be disposed
1878 * of (biodone'd) by this routine.
1879 *
1880 * Iterate the related records and mark for deletion. If existing edge
1881 * records (left and right side) overlap our write they have to be marked
1882 * deleted and new records created, usually referencing a portion of the
1883 * original data. Then add a record to represent the buffer.
1884 */
1885int
1886hammer_dowrite(hammer_cursor_t cursor, hammer_inode_t ip, struct bio *bio)
1887{
1888 struct buf *bp = bio->bio_buf;
1889 int error;
1890
1891 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1892
1893 /*
1894 * If the inode is going or gone, just throw away any frontend
1895 * buffers.
1896 */
1897 if (ip->flags & HAMMER_INODE_DELETED) {
1898 bp->b_resid = 0;
1899 biodone(bio);
1900 --hammer_bio_count;
1901 return(0);
1902 }
1903
1904 /*
1905 * Delete any records overlapping our range. This function will
1906 * (eventually) properly truncate partial overlaps.
1907 */
1908 if (ip->sync_ino_rec.base.base.obj_type == HAMMER_OBJTYPE_DBFILE) {
1909 error = hammer_ip_delete_range(cursor, ip, bio->bio_offset,
1910 bio->bio_offset);
1911 } else {
1912 error = hammer_ip_delete_range(cursor, ip, bio->bio_offset,
1913 bio->bio_offset +
1914 bp->b_bufsize - 1);
1915 }
1916
1917 /*
1918 * Add a single record to cover the write. We can write a record
1919 * with only the actual file data - for example, a small 200 byte
1920 * file does not have to write out a 16K record.
1921 *
1922 * While the data size does not have to be aligned, we still do it
1923 * to reduce fragmentation in a future allocation model.
1924 */
1925 if (error == 0) {
1926 int limit_size;
1927
1928 if (ip->sync_ino_rec.ino_size - bio->bio_offset >
1929 bp->b_bufsize) {
1930 limit_size = bp->b_bufsize;
1931 } else {
1932 limit_size = (int)(ip->sync_ino_rec.ino_size -
1933 bio->bio_offset);
1934 KKASSERT(limit_size >= 0);
1935 limit_size = (limit_size + 63) & ~63;
1936 }
1937 if (limit_size) {
1938 error = hammer_ip_sync_data(cursor, ip, bio->bio_offset,
1939 bp->b_data, limit_size);
1940 }
1941 }
1942 if (error)
1943 Debugger("hammer_dowrite: error");
1944
1945 if (error) {
1946 bp->b_resid = bp->b_bufsize;
1947 bp->b_error = error;
1948 bp->b_flags |= B_ERROR;
1949 } else {
1950 bp->b_resid = 0;
1951 }
1952 biodone(bio);
1953 --hammer_bio_count;
1954 return(error);
1955}
1956
1957/*
1958 * dounlink - disconnect a directory entry
1959 *
1960 * XXX whiteout support not really in yet
1961 */
1962static int
1963hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
1964 struct vnode *dvp, struct ucred *cred, int flags)
1965{
1966 struct namecache *ncp;
1967 hammer_inode_t dip;
1968 hammer_inode_t ip;
1969 hammer_record_ondisk_t rec;
1970 struct hammer_cursor cursor;
1971 int64_t namekey;
1972 int error;
1973
1974 /*
1975 * Calculate the namekey and setup the key range for the scan. This
1976 * works kinda like a chained hash table where the lower 32 bits
1977 * of the namekey synthesize the chain.
1978 *
1979 * The key range is inclusive of both key_beg and key_end.
1980 */
1981 dip = VTOI(dvp);
1982 ncp = nch->ncp;
1983
1984 if (dip->flags & HAMMER_INODE_RO)
1985 return (EROFS);
1986
1987 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
1988retry:
1989 hammer_init_cursor(trans, &cursor, &dip->cache[0], dip);
1990 cursor.key_beg.obj_id = dip->obj_id;
1991 cursor.key_beg.key = namekey;
1992 cursor.key_beg.create_tid = 0;
1993 cursor.key_beg.delete_tid = 0;
1994 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1995 cursor.key_beg.obj_type = 0;
1996
1997 cursor.key_end = cursor.key_beg;
1998 cursor.key_end.key |= 0xFFFFFFFFULL;
1999 cursor.asof = dip->obj_asof;
2000 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2001
2002 /*
2003 * Scan all matching records (the chain), locate the one matching
2004 * the requested path component. info->last_error contains the
2005 * error code on search termination and could be 0, ENOENT, or
2006 * something else.
2007 *
2008 * The hammer_ip_*() functions merge in-memory records with on-disk
2009 * records for the purposes of the search.
2010 */
2011 rec = NULL;
2012 error = hammer_ip_first(&cursor);
2013
2014 while (error == 0) {
2015 error = hammer_ip_resolve_data(&cursor);
2016 if (error)
2017 break;
2018 rec = cursor.record;
2019 if (ncp->nc_nlen == rec->entry.base.data_len &&
2020 bcmp(ncp->nc_name, cursor.data, ncp->nc_nlen) == 0) {
2021 break;
2022 }
2023 error = hammer_ip_next(&cursor);
2024 }
2025
2026 /*
2027 * If all is ok we have to get the inode so we can adjust nlinks.
2028 *
2029 * If the target is a directory, it must be empty.
2030 */
2031 if (error == 0) {
2032 ip = hammer_get_inode(trans, &dip->cache[1],
2033 rec->entry.obj_id,
2034 dip->hmp->asof, 0, &error);
2035 if (error == ENOENT) {
2036 kprintf("obj_id %016llx\n", rec->entry.obj_id);
2037 Debugger("ENOENT unlinking object that should exist");
2038 }
2039
2040 /*
2041 * If we are trying to remove a directory the directory must
2042 * be empty.
2043 *
2044 * WARNING: hammer_ip_check_directory_empty() may have to
2045 * terminate the cursor to avoid a deadlock. It is ok to
2046 * call hammer_done_cursor() twice.
2047 */
2048 if (error == 0 && ip->ino_rec.base.base.obj_type ==
2049 HAMMER_OBJTYPE_DIRECTORY) {
2050 error = hammer_ip_check_directory_empty(trans, &cursor,
2051 ip);
2052 }
2053
2054 /*
2055 * Delete the directory entry.
2056 *
2057 * WARNING: hammer_ip_del_directory() may have to terminate
2058 * the cursor to avoid a deadlock. It is ok to call
2059 * hammer_done_cursor() twice.
2060 */
2061 if (error == 0) {
2062 error = hammer_ip_del_directory(trans, &cursor,
2063 dip, ip);
2064 }
2065 if (error == 0) {
2066 cache_setunresolved(nch);
2067 cache_setvp(nch, NULL);
2068 /* XXX locking */
2069 if (ip->vp)
2070 cache_inval_vp(ip->vp, CINV_DESTROY);
2071 }
2072 hammer_rel_inode(ip, 0);
2073 }
2074 hammer_done_cursor(&cursor);
2075 if (error == EDEADLK)
2076 goto retry;
2077
2078 return (error);
2079}
2080
2081/************************************************************************
2082 * FIFO AND SPECFS OPS *
2083 ************************************************************************
2084 *
2085 */
2086
2087static int
2088hammer_vop_fifoclose (struct vop_close_args *ap)
2089{
2090 /* XXX update itimes */
2091 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2092}
2093
2094static int
2095hammer_vop_fiforead (struct vop_read_args *ap)
2096{
2097 int error;
2098
2099 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2100 /* XXX update access time */
2101 return (error);
2102}
2103
2104static int
2105hammer_vop_fifowrite (struct vop_write_args *ap)
2106{
2107 int error;
2108
2109 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2110 /* XXX update access time */
2111 return (error);
2112}
2113
2114static int
2115hammer_vop_specclose (struct vop_close_args *ap)
2116{
2117 /* XXX update itimes */
2118 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2119}
2120
2121static int
2122hammer_vop_specread (struct vop_read_args *ap)
2123{
2124 /* XXX update access time */
2125 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2126}
2127
2128static int
2129hammer_vop_specwrite (struct vop_write_args *ap)
2130{
2131 /* XXX update last change time */
2132 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2133}
2134