Fix a path in the comment.
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
af209b0f 34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.65 2008/06/10 22:30:21 dillon Exp $
427e5fc6
MD
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/fcntl.h>
41#include <sys/namecache.h>
42#include <sys/vnode.h>
43#include <sys/lockf.h>
44#include <sys/event.h>
45#include <sys/stat.h>
b3deaf57 46#include <sys/dirent.h>
c0ade690 47#include <vm/vm_extern.h>
7a04d74f 48#include <vfs/fifofs/fifo.h>
427e5fc6
MD
49#include "hammer.h"
50
51/*
52 * USERFS VNOPS
53 */
54/*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
66325755
MD
55static int hammer_vop_fsync(struct vop_fsync_args *);
56static int hammer_vop_read(struct vop_read_args *);
57static int hammer_vop_write(struct vop_write_args *);
58static int hammer_vop_access(struct vop_access_args *);
59static int hammer_vop_advlock(struct vop_advlock_args *);
60static int hammer_vop_close(struct vop_close_args *);
61static int hammer_vop_ncreate(struct vop_ncreate_args *);
62static int hammer_vop_getattr(struct vop_getattr_args *);
63static int hammer_vop_nresolve(struct vop_nresolve_args *);
64static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65static int hammer_vop_nlink(struct vop_nlink_args *);
66static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67static int hammer_vop_nmknod(struct vop_nmknod_args *);
68static int hammer_vop_open(struct vop_open_args *);
69static int hammer_vop_pathconf(struct vop_pathconf_args *);
70static int hammer_vop_print(struct vop_print_args *);
71static int hammer_vop_readdir(struct vop_readdir_args *);
72static int hammer_vop_readlink(struct vop_readlink_args *);
73static int hammer_vop_nremove(struct vop_nremove_args *);
74static int hammer_vop_nrename(struct vop_nrename_args *);
75static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76static int hammer_vop_setattr(struct vop_setattr_args *);
77static int hammer_vop_strategy(struct vop_strategy_args *);
78static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
79static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
7dc57964 80static int hammer_vop_ioctl(struct vop_ioctl_args *);
513ca7d7 81static int hammer_vop_mountctl(struct vop_mountctl_args *);
427e5fc6 82
7a04d74f
MD
83static int hammer_vop_fifoclose (struct vop_close_args *);
84static int hammer_vop_fiforead (struct vop_read_args *);
85static int hammer_vop_fifowrite (struct vop_write_args *);
86
87static int hammer_vop_specclose (struct vop_close_args *);
88static int hammer_vop_specread (struct vop_read_args *);
89static int hammer_vop_specwrite (struct vop_write_args *);
90
427e5fc6
MD
91struct vop_ops hammer_vnode_vops = {
92 .vop_default = vop_defaultop,
93 .vop_fsync = hammer_vop_fsync,
c0ade690
MD
94 .vop_getpages = vop_stdgetpages,
95 .vop_putpages = vop_stdputpages,
427e5fc6
MD
96 .vop_read = hammer_vop_read,
97 .vop_write = hammer_vop_write,
98 .vop_access = hammer_vop_access,
99 .vop_advlock = hammer_vop_advlock,
100 .vop_close = hammer_vop_close,
101 .vop_ncreate = hammer_vop_ncreate,
102 .vop_getattr = hammer_vop_getattr,
103 .vop_inactive = hammer_vop_inactive,
104 .vop_reclaim = hammer_vop_reclaim,
105 .vop_nresolve = hammer_vop_nresolve,
106 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
107 .vop_nlink = hammer_vop_nlink,
108 .vop_nmkdir = hammer_vop_nmkdir,
109 .vop_nmknod = hammer_vop_nmknod,
110 .vop_open = hammer_vop_open,
111 .vop_pathconf = hammer_vop_pathconf,
112 .vop_print = hammer_vop_print,
113 .vop_readdir = hammer_vop_readdir,
114 .vop_readlink = hammer_vop_readlink,
115 .vop_nremove = hammer_vop_nremove,
116 .vop_nrename = hammer_vop_nrename,
117 .vop_nrmdir = hammer_vop_nrmdir,
118 .vop_setattr = hammer_vop_setattr,
119 .vop_strategy = hammer_vop_strategy,
120 .vop_nsymlink = hammer_vop_nsymlink,
7dc57964 121 .vop_nwhiteout = hammer_vop_nwhiteout,
513ca7d7
MD
122 .vop_ioctl = hammer_vop_ioctl,
123 .vop_mountctl = hammer_vop_mountctl
427e5fc6
MD
124};
125
7a04d74f
MD
126struct vop_ops hammer_spec_vops = {
127 .vop_default = spec_vnoperate,
128 .vop_fsync = hammer_vop_fsync,
129 .vop_read = hammer_vop_specread,
130 .vop_write = hammer_vop_specwrite,
131 .vop_access = hammer_vop_access,
132 .vop_close = hammer_vop_specclose,
133 .vop_getattr = hammer_vop_getattr,
134 .vop_inactive = hammer_vop_inactive,
135 .vop_reclaim = hammer_vop_reclaim,
136 .vop_setattr = hammer_vop_setattr
137};
138
139struct vop_ops hammer_fifo_vops = {
140 .vop_default = fifo_vnoperate,
141 .vop_fsync = hammer_vop_fsync,
142 .vop_read = hammer_vop_fiforead,
143 .vop_write = hammer_vop_fifowrite,
144 .vop_access = hammer_vop_access,
145 .vop_close = hammer_vop_fifoclose,
146 .vop_getattr = hammer_vop_getattr,
147 .vop_inactive = hammer_vop_inactive,
148 .vop_reclaim = hammer_vop_reclaim,
149 .vop_setattr = hammer_vop_setattr
150};
151
0832c9bb
MD
152#ifdef DEBUG_TRUNCATE
153struct hammer_inode *HammerTruncIp;
154#endif
155
b84de5af
MD
156static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
157 struct vnode *dvp, struct ucred *cred, int flags);
8cd0a023
MD
158static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
159static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
0832c9bb
MD
160static void hammer_cleanup_write_io(hammer_inode_t ip);
161static void hammer_update_rsv_databufs(hammer_inode_t ip);
8cd0a023 162
427e5fc6
MD
163#if 0
164static
165int
166hammer_vop_vnoperate(struct vop_generic_args *)
167{
168 return (VOCALL(&hammer_vnode_vops, ap));
169}
170#endif
171
66325755
MD
172/*
173 * hammer_vop_fsync { vp, waitfor }
174 */
427e5fc6
MD
175static
176int
66325755 177hammer_vop_fsync(struct vop_fsync_args *ap)
427e5fc6 178{
b84de5af 179 hammer_inode_t ip = VTOI(ap->a_vp);
c0ade690 180
e8599db1 181 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
af209b0f 182 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
b84de5af
MD
183 if (ap->a_waitfor == MNT_WAIT)
184 hammer_wait_inode(ip);
059819e3 185 return (ip->error);
427e5fc6
MD
186}
187
66325755
MD
188/*
189 * hammer_vop_read { vp, uio, ioflag, cred }
190 */
427e5fc6
MD
191static
192int
66325755 193hammer_vop_read(struct vop_read_args *ap)
427e5fc6 194{
66325755 195 struct hammer_transaction trans;
c0ade690 196 hammer_inode_t ip;
66325755
MD
197 off_t offset;
198 struct buf *bp;
199 struct uio *uio;
200 int error;
201 int n;
8cd0a023 202 int seqcount;
66325755
MD
203
204 if (ap->a_vp->v_type != VREG)
205 return (EINVAL);
206 ip = VTOI(ap->a_vp);
207 error = 0;
8cd0a023 208 seqcount = ap->a_ioflag >> 16;
66325755 209
8cd0a023 210 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
211
212 /*
213 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
214 */
215 uio = ap->a_uio;
11ad5ade 216 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
66325755 217 offset = uio->uio_offset & HAMMER_BUFMASK;
c0ade690 218#if 0
11ad5ade 219 error = cluster_read(ap->a_vp, ip->ino_data.size,
8cd0a023
MD
220 uio->uio_offset - offset, HAMMER_BUFSIZE,
221 MAXBSIZE, seqcount, &bp);
c0ade690
MD
222#endif
223 error = bread(ap->a_vp, uio->uio_offset - offset,
224 HAMMER_BUFSIZE, &bp);
66325755
MD
225 if (error) {
226 brelse(bp);
227 break;
228 }
c0ade690 229 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
66325755
MD
230 n = HAMMER_BUFSIZE - offset;
231 if (n > uio->uio_resid)
232 n = uio->uio_resid;
11ad5ade
MD
233 if (n > ip->ino_data.size - uio->uio_offset)
234 n = (int)(ip->ino_data.size - uio->uio_offset);
66325755 235 error = uiomove((char *)bp->b_data + offset, n, uio);
66325755 236 bqrelse(bp);
af209b0f
MD
237 if (error)
238 break;
66325755 239 }
b84de5af
MD
240 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
241 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
11ad5ade 242 ip->ino_leaf.atime = trans.time;
47637bff 243 hammer_modify_inode(ip, HAMMER_INODE_ITIMES);
b84de5af
MD
244 }
245 hammer_done_transaction(&trans);
66325755 246 return (error);
427e5fc6
MD
247}
248
66325755
MD
249/*
250 * hammer_vop_write { vp, uio, ioflag, cred }
251 */
427e5fc6
MD
252static
253int
66325755 254hammer_vop_write(struct vop_write_args *ap)
427e5fc6 255{
66325755
MD
256 struct hammer_transaction trans;
257 struct hammer_inode *ip;
258 struct uio *uio;
47637bff
MD
259 int rel_offset;
260 off_t base_offset;
66325755
MD
261 struct buf *bp;
262 int error;
263 int n;
c0ade690 264 int flags;
059819e3 265 int count;
66325755
MD
266
267 if (ap->a_vp->v_type != VREG)
268 return (EINVAL);
269 ip = VTOI(ap->a_vp);
270 error = 0;
271
d113fda1
MD
272 if (ip->flags & HAMMER_INODE_RO)
273 return (EROFS);
274
66325755
MD
275 /*
276 * Create a transaction to cover the operations we perform.
277 */
8cd0a023 278 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
279 uio = ap->a_uio;
280
281 /*
282 * Check append mode
283 */
284 if (ap->a_ioflag & IO_APPEND)
11ad5ade 285 uio->uio_offset = ip->ino_data.size;
66325755
MD
286
287 /*
af209b0f
MD
288 * Check for illegal write offsets. Valid range is 0...2^63-1.
289 *
290 * NOTE: the base_off assignment is required to work around what
291 * I consider to be a GCC-4 optimization bug.
66325755 292 */
af209b0f
MD
293 if (uio->uio_offset < 0) {
294 hammer_done_transaction(&trans);
295 return (EFBIG);
296 }
297 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
298 if (uio->uio_resid > 0 && base_offset <= 0) {
b84de5af 299 hammer_done_transaction(&trans);
66325755 300 return (EFBIG);
9c448776 301 }
66325755
MD
302
303 /*
304 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
305 */
059819e3 306 count = 0;
66325755 307 while (uio->uio_resid > 0) {
d5ef456e
MD
308 int fixsize = 0;
309
e63644f0
MD
310 if ((error = hammer_checkspace(trans.hmp)) != 0)
311 break;
312
059819e3 313 /*
47637bff
MD
314 * Do not allow HAMMER to blow out the buffer cache.
315 *
316 * Do not allow HAMMER to blow out system memory by
317 * accumulating too many records. Records are decoupled
318 * from the buffer cache.
319 *
320 * Always check at the beginning so separate writes are
321 * not able to bypass this code.
0832c9bb
MD
322 *
323 * WARNING: Cannot unlock vp when doing a NOCOPY write as
324 * part of a putpages operation. Doing so could cause us
325 * to deadlock against the VM system when we try to re-lock.
059819e3 326 */
47637bff 327 if ((count++ & 15) == 0) {
0832c9bb
MD
328 if (uio->uio_segflg != UIO_NOCOPY) {
329 vn_unlock(ap->a_vp);
330 if ((ap->a_ioflag & IO_NOBWILL) == 0)
331 bwillwrite();
332 }
47637bff
MD
333 if (ip->rsv_recs > hammer_limit_irecs) {
334 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
335 hammer_wait_inode(ip);
336 }
0832c9bb
MD
337 if (uio->uio_segflg != UIO_NOCOPY)
338 vn_lock(ap->a_vp, LK_EXCLUSIVE|LK_RETRY);
059819e3
MD
339 }
340
47637bff
MD
341 rel_offset = (int)(uio->uio_offset & HAMMER_BUFMASK);
342 base_offset = uio->uio_offset & ~HAMMER_BUFMASK64;
343 n = HAMMER_BUFSIZE - rel_offset;
d5ef456e
MD
344 if (n > uio->uio_resid)
345 n = uio->uio_resid;
11ad5ade 346 if (uio->uio_offset + n > ip->ino_data.size) {
d5ef456e
MD
347 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
348 fixsize = 1;
349 }
350
c0ade690
MD
351 if (uio->uio_segflg == UIO_NOCOPY) {
352 /*
353 * Issuing a write with the same data backing the
354 * buffer. Instantiate the buffer to collect the
355 * backing vm pages, then read-in any missing bits.
356 *
357 * This case is used by vop_stdputpages().
358 */
47637bff 359 bp = getblk(ap->a_vp, base_offset,
d5ef456e 360 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
c0ade690
MD
361 if ((bp->b_flags & B_CACHE) == 0) {
362 bqrelse(bp);
47637bff 363 error = bread(ap->a_vp, base_offset,
c0ade690 364 HAMMER_BUFSIZE, &bp);
c0ade690 365 }
47637bff 366 } else if (rel_offset == 0 && uio->uio_resid >= HAMMER_BUFSIZE) {
c0ade690 367 /*
a5fddc16
MD
368 * Even though we are entirely overwriting the buffer
369 * we may still have to zero it out to avoid a
370 * mmap/write visibility issue.
c0ade690 371 */
47637bff 372 bp = getblk(ap->a_vp, base_offset,
d5ef456e 373 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
a5fddc16
MD
374 if ((bp->b_flags & B_CACHE) == 0)
375 vfs_bio_clrbuf(bp);
47637bff 376 } else if (base_offset >= ip->ino_data.size) {
c0ade690 377 /*
a5fddc16
MD
378 * If the base offset of the buffer is beyond the
379 * file EOF, we don't have to issue a read.
c0ade690 380 */
47637bff 381 bp = getblk(ap->a_vp, base_offset,
d5ef456e 382 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
66325755
MD
383 vfs_bio_clrbuf(bp);
384 } else {
c0ade690
MD
385 /*
386 * Partial overwrite, read in any missing bits then
387 * replace the portion being written.
388 */
47637bff 389 error = bread(ap->a_vp, base_offset,
66325755 390 HAMMER_BUFSIZE, &bp);
d5ef456e
MD
391 if (error == 0)
392 bheavy(bp);
66325755 393 }
47637bff
MD
394 if (error == 0) {
395 error = uiomove((char *)bp->b_data + rel_offset,
396 n, uio);
397 }
d5ef456e
MD
398
399 /*
400 * If we screwed up we have to undo any VM size changes we
401 * made.
402 */
66325755
MD
403 if (error) {
404 brelse(bp);
d5ef456e 405 if (fixsize) {
11ad5ade 406 vtruncbuf(ap->a_vp, ip->ino_data.size,
d5ef456e
MD
407 HAMMER_BUFSIZE);
408 }
66325755
MD
409 break;
410 }
c0ade690 411 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
11ad5ade
MD
412 if (ip->ino_data.size < uio->uio_offset) {
413 ip->ino_data.size = uio->uio_offset;
414 flags = HAMMER_INODE_DDIRTY;
415 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
c0ade690 416 } else {
d113fda1 417 flags = 0;
66325755 418 }
11ad5ade 419 ip->ino_data.mtime = trans.time;
f3b0f382 420 flags |= HAMMER_INODE_ITIMES | HAMMER_INODE_BUFS;
11ad5ade 421 flags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
47637bff 422 hammer_modify_inode(ip, flags);
32c90105 423
0832c9bb
MD
424 /*
425 * Try to keep track of cached dirty data.
426 */
e63644f0
MD
427 if ((bp->b_flags & B_DIRTY) == 0) {
428 ++ip->rsv_databufs;
429 ++ip->hmp->rsv_databufs;
430 }
431
47637bff
MD
432 /*
433 * Final buffer disposition.
434 */
66325755
MD
435 if (ap->a_ioflag & IO_SYNC) {
436 bwrite(bp);
437 } else if (ap->a_ioflag & IO_DIRECT) {
66325755 438 bawrite(bp);
47637bff 439#if 1
059819e3 440 } else if ((ap->a_ioflag >> 16) == IO_SEQMAX &&
34d829f7
MD
441 (uio->uio_offset & HAMMER_BUFMASK) == 0) {
442 /*
443 * If seqcount indicates sequential operation and
444 * we just finished filling a buffer, push it out
445 * now to prevent the buffer cache from becoming
446 * too full, which would trigger non-optimal
447 * flushes.
448 */
47637bff 449 bawrite(bp);
059819e3 450#endif
66325755 451 } else {
66325755
MD
452 bdwrite(bp);
453 }
454 }
b84de5af 455 hammer_done_transaction(&trans);
66325755 456 return (error);
427e5fc6
MD
457}
458
66325755
MD
459/*
460 * hammer_vop_access { vp, mode, cred }
461 */
427e5fc6
MD
462static
463int
66325755 464hammer_vop_access(struct vop_access_args *ap)
427e5fc6 465{
66325755
MD
466 struct hammer_inode *ip = VTOI(ap->a_vp);
467 uid_t uid;
468 gid_t gid;
469 int error;
470
471 uid = hammer_to_unix_xid(&ip->ino_data.uid);
472 gid = hammer_to_unix_xid(&ip->ino_data.gid);
473
474 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
475 ip->ino_data.uflags);
476 return (error);
427e5fc6
MD
477}
478
66325755
MD
479/*
480 * hammer_vop_advlock { vp, id, op, fl, flags }
481 */
427e5fc6
MD
482static
483int
66325755 484hammer_vop_advlock(struct vop_advlock_args *ap)
427e5fc6 485{
66325755
MD
486 struct hammer_inode *ip = VTOI(ap->a_vp);
487
11ad5ade 488 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
427e5fc6
MD
489}
490
66325755
MD
491/*
492 * hammer_vop_close { vp, fflag }
493 */
427e5fc6
MD
494static
495int
66325755 496hammer_vop_close(struct vop_close_args *ap)
427e5fc6 497{
a89aec1b 498 return (vop_stdclose(ap));
427e5fc6
MD
499}
500
66325755
MD
501/*
502 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
503 *
504 * The operating system has already ensured that the directory entry
505 * does not exist and done all appropriate namespace locking.
506 */
427e5fc6
MD
507static
508int
66325755 509hammer_vop_ncreate(struct vop_ncreate_args *ap)
427e5fc6 510{
66325755
MD
511 struct hammer_transaction trans;
512 struct hammer_inode *dip;
513 struct hammer_inode *nip;
514 struct nchandle *nch;
515 int error;
516
517 nch = ap->a_nch;
518 dip = VTOI(ap->a_dvp);
519
d113fda1
MD
520 if (dip->flags & HAMMER_INODE_RO)
521 return (EROFS);
e63644f0
MD
522 if ((error = hammer_checkspace(dip->hmp)) != 0)
523 return (error);
d113fda1 524
66325755
MD
525 /*
526 * Create a transaction to cover the operations we perform.
527 */
8cd0a023 528 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
529
530 /*
531 * Create a new filesystem object of the requested type. The
b84de5af
MD
532 * returned inode will be referenced and shared-locked to prevent
533 * it from being moved to the flusher.
66325755 534 */
8cd0a023
MD
535
536 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 537 if (error) {
77062c8a 538 hkprintf("hammer_create_inode error %d\n", error);
b84de5af 539 hammer_done_transaction(&trans);
66325755
MD
540 *ap->a_vpp = NULL;
541 return (error);
542 }
66325755
MD
543
544 /*
545 * Add the new filesystem object to the directory. This will also
546 * bump the inode's link count.
547 */
a89aec1b 548 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
0b075555 549 if (error)
77062c8a 550 hkprintf("hammer_ip_add_directory error %d\n", error);
66325755
MD
551
552 /*
553 * Finish up.
554 */
555 if (error) {
a89aec1b 556 hammer_rel_inode(nip, 0);
b84de5af 557 hammer_done_transaction(&trans);
66325755
MD
558 *ap->a_vpp = NULL;
559 } else {
e8599db1 560 error = hammer_get_vnode(nip, ap->a_vpp);
b84de5af 561 hammer_done_transaction(&trans);
a89aec1b
MD
562 hammer_rel_inode(nip, 0);
563 if (error == 0) {
564 cache_setunresolved(ap->a_nch);
565 cache_setvp(ap->a_nch, *ap->a_vpp);
566 }
66325755
MD
567 }
568 return (error);
427e5fc6
MD
569}
570
66325755
MD
571/*
572 * hammer_vop_getattr { vp, vap }
98f7132d
MD
573 *
574 * Retrieve an inode's attribute information. When accessing inodes
575 * historically we fake the atime field to ensure consistent results.
576 * The atime field is stored in the B-Tree element and allowed to be
577 * updated without cycling the element.
66325755 578 */
427e5fc6
MD
579static
580int
66325755 581hammer_vop_getattr(struct vop_getattr_args *ap)
427e5fc6 582{
66325755
MD
583 struct hammer_inode *ip = VTOI(ap->a_vp);
584 struct vattr *vap = ap->a_vap;
585
586#if 0
587 if (cache_check_fsmid_vp(ap->a_vp, &ip->fsmid) &&
588 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0 &&
7f7c1f84 589 ip->obj_asof == XXX
66325755
MD
590 ) {
591 /* LAZYMOD XXX */
592 }
593 hammer_itimes(ap->a_vp);
594#endif
595
596 vap->va_fsid = ip->hmp->fsid_udev;
11ad5ade 597 vap->va_fileid = ip->ino_leaf.base.obj_id;
66325755 598 vap->va_mode = ip->ino_data.mode;
11ad5ade 599 vap->va_nlink = ip->ino_data.nlinks;
66325755
MD
600 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
601 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
602 vap->va_rmajor = 0;
603 vap->va_rminor = 0;
11ad5ade 604 vap->va_size = ip->ino_data.size;
98f7132d
MD
605 if (ip->flags & HAMMER_INODE_RO)
606 hammer_to_timespec(ip->ino_data.mtime, &vap->va_atime);
607 else
608 hammer_to_timespec(ip->ino_leaf.atime, &vap->va_atime);
11ad5ade 609 hammer_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
66325755
MD
610 hammer_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
611 vap->va_flags = ip->ino_data.uflags;
612 vap->va_gen = 1; /* hammer inums are unique for all time */
bf686dbe 613 vap->va_blocksize = HAMMER_BUFSIZE;
11ad5ade
MD
614 vap->va_bytes = (ip->ino_data.size + 63) & ~63;
615 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
66325755
MD
616 vap->va_filerev = 0; /* XXX */
617 /* mtime uniquely identifies any adjustments made to the file */
11ad5ade 618 vap->va_fsmid = ip->ino_data.mtime;
66325755
MD
619 vap->va_uid_uuid = ip->ino_data.uid;
620 vap->va_gid_uuid = ip->ino_data.gid;
621 vap->va_fsid_uuid = ip->hmp->fsid;
622 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
623 VA_FSID_UUID_VALID;
7a04d74f 624
11ad5ade 625 switch (ip->ino_data.obj_type) {
7a04d74f
MD
626 case HAMMER_OBJTYPE_CDEV:
627 case HAMMER_OBJTYPE_BDEV:
628 vap->va_rmajor = ip->ino_data.rmajor;
629 vap->va_rminor = ip->ino_data.rminor;
630 break;
631 default:
632 break;
633 }
634
66325755 635 return(0);
427e5fc6
MD
636}
637
66325755
MD
638/*
639 * hammer_vop_nresolve { nch, dvp, cred }
640 *
641 * Locate the requested directory entry.
642 */
427e5fc6
MD
643static
644int
66325755 645hammer_vop_nresolve(struct vop_nresolve_args *ap)
427e5fc6 646{
36f82b23 647 struct hammer_transaction trans;
66325755 648 struct namecache *ncp;
7f7c1f84
MD
649 hammer_inode_t dip;
650 hammer_inode_t ip;
651 hammer_tid_t asof;
8cd0a023 652 struct hammer_cursor cursor;
66325755
MD
653 struct vnode *vp;
654 int64_t namekey;
655 int error;
7f7c1f84
MD
656 int i;
657 int nlen;
d113fda1 658 int flags;
6a37e7e4 659 u_int64_t obj_id;
7f7c1f84
MD
660
661 /*
662 * Misc initialization, plus handle as-of name extensions. Look for
663 * the '@@' extension. Note that as-of files and directories cannot
664 * be modified.
7f7c1f84
MD
665 */
666 dip = VTOI(ap->a_dvp);
667 ncp = ap->a_nch->ncp;
668 asof = dip->obj_asof;
669 nlen = ncp->nc_nlen;
d113fda1 670 flags = dip->flags;
7f7c1f84 671
36f82b23
MD
672 hammer_simple_transaction(&trans, dip->hmp);
673
7f7c1f84
MD
674 for (i = 0; i < nlen; ++i) {
675 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
d113fda1 676 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
d113fda1 677 flags |= HAMMER_INODE_RO;
7f7c1f84
MD
678 break;
679 }
680 }
681 nlen = i;
66325755 682
d113fda1
MD
683 /*
684 * If there is no path component the time extension is relative to
685 * dip.
686 */
687 if (nlen == 0) {
36f82b23 688 ip = hammer_get_inode(&trans, &dip->cache[1], dip->obj_id,
61aeeb33 689 asof, flags, &error);
d113fda1 690 if (error == 0) {
e8599db1 691 error = hammer_get_vnode(ip, &vp);
d113fda1
MD
692 hammer_rel_inode(ip, 0);
693 } else {
694 vp = NULL;
695 }
696 if (error == 0) {
697 vn_unlock(vp);
698 cache_setvp(ap->a_nch, vp);
699 vrele(vp);
700 }
36f82b23 701 goto done;
d113fda1
MD
702 }
703
8cd0a023
MD
704 /*
705 * Calculate the namekey and setup the key range for the scan. This
706 * works kinda like a chained hash table where the lower 32 bits
707 * of the namekey synthesize the chain.
708 *
709 * The key range is inclusive of both key_beg and key_end.
710 */
7f7c1f84 711 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
66325755 712
4e17f465 713 error = hammer_init_cursor(&trans, &cursor, &dip->cache[0], dip);
2f85fa4d 714 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
715 cursor.key_beg.obj_id = dip->obj_id;
716 cursor.key_beg.key = namekey;
d5530d22 717 cursor.key_beg.create_tid = 0;
8cd0a023
MD
718 cursor.key_beg.delete_tid = 0;
719 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
720 cursor.key_beg.obj_type = 0;
66325755 721
8cd0a023
MD
722 cursor.key_end = cursor.key_beg;
723 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
724 cursor.asof = asof;
725 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
66325755
MD
726
727 /*
8cd0a023 728 * Scan all matching records (the chain), locate the one matching
a89aec1b 729 * the requested path component.
8cd0a023
MD
730 *
731 * The hammer_ip_*() functions merge in-memory records with on-disk
732 * records for the purposes of the search.
66325755 733 */
6a37e7e4
MD
734 obj_id = 0;
735
4e17f465 736 if (error == 0) {
4e17f465
MD
737 error = hammer_ip_first(&cursor);
738 while (error == 0) {
739 error = hammer_ip_resolve_data(&cursor);
740 if (error)
741 break;
11ad5ade
MD
742 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
743 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
744 obj_id = cursor.data->entry.obj_id;
4e17f465
MD
745 break;
746 }
747 error = hammer_ip_next(&cursor);
66325755
MD
748 }
749 }
6a37e7e4 750 hammer_done_cursor(&cursor);
66325755 751 if (error == 0) {
36f82b23 752 ip = hammer_get_inode(&trans, &dip->cache[1],
6a37e7e4 753 obj_id, asof, flags, &error);
7f7c1f84 754 if (error == 0) {
e8599db1 755 error = hammer_get_vnode(ip, &vp);
7f7c1f84
MD
756 hammer_rel_inode(ip, 0);
757 } else {
758 vp = NULL;
759 }
66325755
MD
760 if (error == 0) {
761 vn_unlock(vp);
762 cache_setvp(ap->a_nch, vp);
763 vrele(vp);
764 }
765 } else if (error == ENOENT) {
766 cache_setvp(ap->a_nch, NULL);
767 }
36f82b23 768done:
b84de5af 769 hammer_done_transaction(&trans);
66325755 770 return (error);
427e5fc6
MD
771}
772
66325755
MD
773/*
774 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
775 *
776 * Locate the parent directory of a directory vnode.
777 *
778 * dvp is referenced but not locked. *vpp must be returned referenced and
779 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
780 * at the root, instead it could indicate that the directory we were in was
781 * removed.
42c7d26b
MD
782 *
783 * NOTE: as-of sequences are not linked into the directory structure. If
784 * we are at the root with a different asof then the mount point, reload
785 * the same directory with the mount point's asof. I'm not sure what this
786 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
787 * get confused, but it hasn't been tested.
66325755 788 */
427e5fc6
MD
789static
790int
66325755 791hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
427e5fc6 792{
36f82b23 793 struct hammer_transaction trans;
66325755 794 struct hammer_inode *dip;
d113fda1 795 struct hammer_inode *ip;
42c7d26b
MD
796 int64_t parent_obj_id;
797 hammer_tid_t asof;
d113fda1 798 int error;
66325755
MD
799
800 dip = VTOI(ap->a_dvp);
42c7d26b
MD
801 asof = dip->obj_asof;
802 parent_obj_id = dip->ino_data.parent_obj_id;
803
804 if (parent_obj_id == 0) {
805 if (dip->obj_id == HAMMER_OBJID_ROOT &&
806 asof != dip->hmp->asof) {
807 parent_obj_id = dip->obj_id;
808 asof = dip->hmp->asof;
809 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
810 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
811 dip->obj_asof);
812 } else {
813 *ap->a_vpp = NULL;
814 return ENOENT;
815 }
66325755 816 }
d113fda1 817
36f82b23
MD
818 hammer_simple_transaction(&trans, dip->hmp);
819
820 ip = hammer_get_inode(&trans, &dip->cache[1], parent_obj_id,
42c7d26b 821 asof, dip->flags, &error);
36f82b23 822 if (ip) {
e8599db1 823 error = hammer_get_vnode(ip, ap->a_vpp);
36f82b23
MD
824 hammer_rel_inode(ip, 0);
825 } else {
d113fda1 826 *ap->a_vpp = NULL;
d113fda1 827 }
b84de5af 828 hammer_done_transaction(&trans);
d113fda1 829 return (error);
427e5fc6
MD
830}
831
66325755
MD
832/*
833 * hammer_vop_nlink { nch, dvp, vp, cred }
834 */
427e5fc6
MD
835static
836int
66325755 837hammer_vop_nlink(struct vop_nlink_args *ap)
427e5fc6 838{
66325755
MD
839 struct hammer_transaction trans;
840 struct hammer_inode *dip;
841 struct hammer_inode *ip;
842 struct nchandle *nch;
843 int error;
844
845 nch = ap->a_nch;
846 dip = VTOI(ap->a_dvp);
847 ip = VTOI(ap->a_vp);
848
d113fda1
MD
849 if (dip->flags & HAMMER_INODE_RO)
850 return (EROFS);
851 if (ip->flags & HAMMER_INODE_RO)
852 return (EROFS);
e63644f0
MD
853 if ((error = hammer_checkspace(dip->hmp)) != 0)
854 return (error);
d113fda1 855
66325755
MD
856 /*
857 * Create a transaction to cover the operations we perform.
858 */
8cd0a023 859 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
860
861 /*
862 * Add the filesystem object to the directory. Note that neither
863 * dip nor ip are referenced or locked, but their vnodes are
864 * referenced. This function will bump the inode's link count.
865 */
a89aec1b 866 error = hammer_ip_add_directory(&trans, dip, nch->ncp, ip);
66325755
MD
867
868 /*
869 * Finish up.
870 */
b84de5af 871 if (error == 0) {
6b4f890b
MD
872 cache_setunresolved(nch);
873 cache_setvp(nch, ap->a_vp);
66325755 874 }
b84de5af 875 hammer_done_transaction(&trans);
66325755 876 return (error);
427e5fc6
MD
877}
878
66325755
MD
879/*
880 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
881 *
882 * The operating system has already ensured that the directory entry
883 * does not exist and done all appropriate namespace locking.
884 */
427e5fc6
MD
885static
886int
66325755 887hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
427e5fc6 888{
66325755
MD
889 struct hammer_transaction trans;
890 struct hammer_inode *dip;
891 struct hammer_inode *nip;
892 struct nchandle *nch;
893 int error;
894
895 nch = ap->a_nch;
896 dip = VTOI(ap->a_dvp);
897
d113fda1
MD
898 if (dip->flags & HAMMER_INODE_RO)
899 return (EROFS);
e63644f0
MD
900 if ((error = hammer_checkspace(dip->hmp)) != 0)
901 return (error);
d113fda1 902
66325755
MD
903 /*
904 * Create a transaction to cover the operations we perform.
905 */
8cd0a023 906 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
907
908 /*
909 * Create a new filesystem object of the requested type. The
8cd0a023 910 * returned inode will be referenced but not locked.
66325755 911 */
8cd0a023 912 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 913 if (error) {
77062c8a 914 hkprintf("hammer_mkdir error %d\n", error);
b84de5af 915 hammer_done_transaction(&trans);
66325755
MD
916 *ap->a_vpp = NULL;
917 return (error);
918 }
66325755
MD
919 /*
920 * Add the new filesystem object to the directory. This will also
921 * bump the inode's link count.
922 */
a89aec1b 923 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
0b075555 924 if (error)
77062c8a 925 hkprintf("hammer_mkdir (add) error %d\n", error);
66325755
MD
926
927 /*
928 * Finish up.
929 */
930 if (error) {
a89aec1b 931 hammer_rel_inode(nip, 0);
66325755
MD
932 *ap->a_vpp = NULL;
933 } else {
e8599db1 934 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
935 hammer_rel_inode(nip, 0);
936 if (error == 0) {
937 cache_setunresolved(ap->a_nch);
938 cache_setvp(ap->a_nch, *ap->a_vpp);
939 }
66325755 940 }
b84de5af 941 hammer_done_transaction(&trans);
66325755 942 return (error);
427e5fc6
MD
943}
944
66325755
MD
945/*
946 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
947 *
948 * The operating system has already ensured that the directory entry
949 * does not exist and done all appropriate namespace locking.
950 */
427e5fc6
MD
951static
952int
66325755 953hammer_vop_nmknod(struct vop_nmknod_args *ap)
427e5fc6 954{
66325755
MD
955 struct hammer_transaction trans;
956 struct hammer_inode *dip;
957 struct hammer_inode *nip;
958 struct nchandle *nch;
959 int error;
960
961 nch = ap->a_nch;
962 dip = VTOI(ap->a_dvp);
963
d113fda1
MD
964 if (dip->flags & HAMMER_INODE_RO)
965 return (EROFS);
e63644f0
MD
966 if ((error = hammer_checkspace(dip->hmp)) != 0)
967 return (error);
d113fda1 968
66325755
MD
969 /*
970 * Create a transaction to cover the operations we perform.
971 */
8cd0a023 972 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
973
974 /*
975 * Create a new filesystem object of the requested type. The
8cd0a023 976 * returned inode will be referenced but not locked.
66325755 977 */
8cd0a023 978 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 979 if (error) {
b84de5af 980 hammer_done_transaction(&trans);
66325755
MD
981 *ap->a_vpp = NULL;
982 return (error);
983 }
66325755
MD
984
985 /*
986 * Add the new filesystem object to the directory. This will also
987 * bump the inode's link count.
988 */
a89aec1b 989 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
66325755
MD
990
991 /*
992 * Finish up.
993 */
994 if (error) {
a89aec1b 995 hammer_rel_inode(nip, 0);
66325755
MD
996 *ap->a_vpp = NULL;
997 } else {
e8599db1 998 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
999 hammer_rel_inode(nip, 0);
1000 if (error == 0) {
1001 cache_setunresolved(ap->a_nch);
1002 cache_setvp(ap->a_nch, *ap->a_vpp);
1003 }
66325755 1004 }
b84de5af 1005 hammer_done_transaction(&trans);
66325755 1006 return (error);
427e5fc6
MD
1007}
1008
66325755
MD
1009/*
1010 * hammer_vop_open { vp, mode, cred, fp }
1011 */
427e5fc6
MD
1012static
1013int
66325755 1014hammer_vop_open(struct vop_open_args *ap)
427e5fc6 1015{
9f5097dc
MD
1016 hammer_inode_t ip;
1017
1018 ip = VTOI(ap->a_vp);
1019
1020 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
d113fda1 1021 return (EROFS);
a89aec1b 1022 return(vop_stdopen(ap));
427e5fc6
MD
1023}
1024
66325755
MD
1025/*
1026 * hammer_vop_pathconf { vp, name, retval }
1027 */
427e5fc6
MD
1028static
1029int
66325755 1030hammer_vop_pathconf(struct vop_pathconf_args *ap)
427e5fc6
MD
1031{
1032 return EOPNOTSUPP;
1033}
1034
66325755
MD
1035/*
1036 * hammer_vop_print { vp }
1037 */
427e5fc6
MD
1038static
1039int
66325755 1040hammer_vop_print(struct vop_print_args *ap)
427e5fc6
MD
1041{
1042 return EOPNOTSUPP;
1043}
1044
66325755 1045/*
6b4f890b 1046 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
66325755 1047 */
427e5fc6
MD
1048static
1049int
66325755 1050hammer_vop_readdir(struct vop_readdir_args *ap)
427e5fc6 1051{
36f82b23 1052 struct hammer_transaction trans;
6b4f890b
MD
1053 struct hammer_cursor cursor;
1054 struct hammer_inode *ip;
1055 struct uio *uio;
6b4f890b
MD
1056 hammer_base_elm_t base;
1057 int error;
1058 int cookie_index;
1059 int ncookies;
1060 off_t *cookies;
1061 off_t saveoff;
1062 int r;
1063
1064 ip = VTOI(ap->a_vp);
1065 uio = ap->a_uio;
b3deaf57
MD
1066 saveoff = uio->uio_offset;
1067
1068 if (ap->a_ncookies) {
1069 ncookies = uio->uio_resid / 16 + 1;
1070 if (ncookies > 1024)
1071 ncookies = 1024;
1072 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1073 cookie_index = 0;
1074 } else {
1075 ncookies = -1;
1076 cookies = NULL;
1077 cookie_index = 0;
1078 }
1079
36f82b23
MD
1080 hammer_simple_transaction(&trans, ip->hmp);
1081
b3deaf57
MD
1082 /*
1083 * Handle artificial entries
1084 */
1085 error = 0;
1086 if (saveoff == 0) {
1087 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1088 if (r)
1089 goto done;
1090 if (cookies)
1091 cookies[cookie_index] = saveoff;
1092 ++saveoff;
1093 ++cookie_index;
1094 if (cookie_index == ncookies)
1095 goto done;
1096 }
1097 if (saveoff == 1) {
1098 if (ip->ino_data.parent_obj_id) {
1099 r = vop_write_dirent(&error, uio,
1100 ip->ino_data.parent_obj_id,
1101 DT_DIR, 2, "..");
1102 } else {
1103 r = vop_write_dirent(&error, uio,
1104 ip->obj_id, DT_DIR, 2, "..");
1105 }
1106 if (r)
1107 goto done;
1108 if (cookies)
1109 cookies[cookie_index] = saveoff;
1110 ++saveoff;
1111 ++cookie_index;
1112 if (cookie_index == ncookies)
1113 goto done;
1114 }
6b4f890b
MD
1115
1116 /*
1117 * Key range (begin and end inclusive) to scan. Directory keys
1118 * directly translate to a 64 bit 'seek' position.
1119 */
4e17f465 1120 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
2f85fa4d 1121 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
6b4f890b 1122 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1123 cursor.key_beg.create_tid = 0;
6b4f890b
MD
1124 cursor.key_beg.delete_tid = 0;
1125 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1126 cursor.key_beg.obj_type = 0;
b3deaf57 1127 cursor.key_beg.key = saveoff;
6b4f890b
MD
1128
1129 cursor.key_end = cursor.key_beg;
1130 cursor.key_end.key = HAMMER_MAX_KEY;
d5530d22
MD
1131 cursor.asof = ip->obj_asof;
1132 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
6b4f890b 1133
4e17f465 1134 error = hammer_ip_first(&cursor);
6b4f890b
MD
1135
1136 while (error == 0) {
11ad5ade 1137 error = hammer_ip_resolve_data(&cursor);
6b4f890b
MD
1138 if (error)
1139 break;
11ad5ade 1140 base = &cursor.leaf->base;
6b4f890b 1141 saveoff = base->key;
11ad5ade 1142 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
6b4f890b 1143
7a04d74f
MD
1144 if (base->obj_id != ip->obj_id)
1145 panic("readdir: bad record at %p", cursor.node);
1146
6b4f890b 1147 r = vop_write_dirent(
11ad5ade
MD
1148 &error, uio, cursor.data->entry.obj_id,
1149 hammer_get_dtype(cursor.leaf->base.obj_type),
1150 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1151 (void *)cursor.data->entry.name);
6b4f890b
MD
1152 if (r)
1153 break;
1154 ++saveoff;
1155 if (cookies)
1156 cookies[cookie_index] = base->key;
1157 ++cookie_index;
1158 if (cookie_index == ncookies)
1159 break;
1160 error = hammer_ip_next(&cursor);
1161 }
1162 hammer_done_cursor(&cursor);
1163
b3deaf57 1164done:
b84de5af 1165 hammer_done_transaction(&trans);
36f82b23 1166
6b4f890b
MD
1167 if (ap->a_eofflag)
1168 *ap->a_eofflag = (error == ENOENT);
6b4f890b
MD
1169 uio->uio_offset = saveoff;
1170 if (error && cookie_index == 0) {
b3deaf57
MD
1171 if (error == ENOENT)
1172 error = 0;
6b4f890b
MD
1173 if (cookies) {
1174 kfree(cookies, M_TEMP);
1175 *ap->a_ncookies = 0;
1176 *ap->a_cookies = NULL;
1177 }
1178 } else {
7a04d74f
MD
1179 if (error == ENOENT)
1180 error = 0;
6b4f890b
MD
1181 if (cookies) {
1182 *ap->a_ncookies = cookie_index;
1183 *ap->a_cookies = cookies;
1184 }
1185 }
1186 return(error);
427e5fc6
MD
1187}
1188
66325755
MD
1189/*
1190 * hammer_vop_readlink { vp, uio, cred }
1191 */
427e5fc6
MD
1192static
1193int
66325755 1194hammer_vop_readlink(struct vop_readlink_args *ap)
427e5fc6 1195{
36f82b23 1196 struct hammer_transaction trans;
7a04d74f
MD
1197 struct hammer_cursor cursor;
1198 struct hammer_inode *ip;
1199 int error;
1200
1201 ip = VTOI(ap->a_vp);
36f82b23 1202
2f85fa4d
MD
1203 /*
1204 * Shortcut if the symlink data was stuffed into ino_data.
1205 */
1206 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1207 error = uiomove(ip->ino_data.ext.symlink,
1208 ip->ino_data.size, ap->a_uio);
1209 return(error);
1210 }
36f82b23 1211
2f85fa4d
MD
1212 /*
1213 * Long version
1214 */
1215 hammer_simple_transaction(&trans, ip->hmp);
4e17f465 1216 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
7a04d74f
MD
1217
1218 /*
1219 * Key range (begin and end inclusive) to scan. Directory keys
1220 * directly translate to a 64 bit 'seek' position.
1221 */
2f85fa4d 1222 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC; /* XXX */
7a04d74f 1223 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1224 cursor.key_beg.create_tid = 0;
7a04d74f
MD
1225 cursor.key_beg.delete_tid = 0;
1226 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1227 cursor.key_beg.obj_type = 0;
1228 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
d5530d22
MD
1229 cursor.asof = ip->obj_asof;
1230 cursor.flags |= HAMMER_CURSOR_ASOF;
7a04d74f 1231
45a014dc 1232 error = hammer_ip_lookup(&cursor);
7a04d74f
MD
1233 if (error == 0) {
1234 error = hammer_ip_resolve_data(&cursor);
1235 if (error == 0) {
11ad5ade
MD
1236 KKASSERT(cursor.leaf->data_len >=
1237 HAMMER_SYMLINK_NAME_OFF);
1238 error = uiomove(cursor.data->symlink.name,
1239 cursor.leaf->data_len -
1240 HAMMER_SYMLINK_NAME_OFF,
7a04d74f
MD
1241 ap->a_uio);
1242 }
1243 }
1244 hammer_done_cursor(&cursor);
b84de5af 1245 hammer_done_transaction(&trans);
7a04d74f 1246 return(error);
427e5fc6
MD
1247}
1248
66325755
MD
1249/*
1250 * hammer_vop_nremove { nch, dvp, cred }
1251 */
427e5fc6
MD
1252static
1253int
66325755 1254hammer_vop_nremove(struct vop_nremove_args *ap)
427e5fc6 1255{
b84de5af 1256 struct hammer_transaction trans;
e63644f0 1257 struct hammer_inode *dip;
b84de5af
MD
1258 int error;
1259
e63644f0
MD
1260 dip = VTOI(ap->a_dvp);
1261
1262 if (hammer_nohistory(dip) == 0 &&
1263 (error = hammer_checkspace(dip->hmp)) != 0) {
1264 return (error);
1265 }
1266
1267 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1268 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1269 hammer_done_transaction(&trans);
1270
1271 return (error);
427e5fc6
MD
1272}
1273
66325755
MD
1274/*
1275 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1276 */
427e5fc6
MD
1277static
1278int
66325755 1279hammer_vop_nrename(struct vop_nrename_args *ap)
427e5fc6 1280{
8cd0a023
MD
1281 struct hammer_transaction trans;
1282 struct namecache *fncp;
1283 struct namecache *tncp;
1284 struct hammer_inode *fdip;
1285 struct hammer_inode *tdip;
1286 struct hammer_inode *ip;
1287 struct hammer_cursor cursor;
8cd0a023 1288 int64_t namekey;
11ad5ade 1289 int nlen, error;
8cd0a023
MD
1290
1291 fdip = VTOI(ap->a_fdvp);
1292 tdip = VTOI(ap->a_tdvp);
1293 fncp = ap->a_fnch->ncp;
1294 tncp = ap->a_tnch->ncp;
b3deaf57
MD
1295 ip = VTOI(fncp->nc_vp);
1296 KKASSERT(ip != NULL);
d113fda1
MD
1297
1298 if (fdip->flags & HAMMER_INODE_RO)
1299 return (EROFS);
1300 if (tdip->flags & HAMMER_INODE_RO)
1301 return (EROFS);
1302 if (ip->flags & HAMMER_INODE_RO)
1303 return (EROFS);
e63644f0
MD
1304 if ((error = hammer_checkspace(fdip->hmp)) != 0)
1305 return (error);
d113fda1 1306
8cd0a023
MD
1307 hammer_start_transaction(&trans, fdip->hmp);
1308
1309 /*
b3deaf57
MD
1310 * Remove tncp from the target directory and then link ip as
1311 * tncp. XXX pass trans to dounlink
42c7d26b
MD
1312 *
1313 * Force the inode sync-time to match the transaction so it is
1314 * in-sync with the creation of the target directory entry.
8cd0a023 1315 */
b84de5af 1316 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
42c7d26b 1317 if (error == 0 || error == ENOENT) {
b3deaf57 1318 error = hammer_ip_add_directory(&trans, tdip, tncp, ip);
42c7d26b
MD
1319 if (error == 0) {
1320 ip->ino_data.parent_obj_id = tdip->obj_id;
47637bff 1321 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
42c7d26b
MD
1322 }
1323 }
b3deaf57
MD
1324 if (error)
1325 goto failed; /* XXX */
8cd0a023
MD
1326
1327 /*
1328 * Locate the record in the originating directory and remove it.
1329 *
1330 * Calculate the namekey and setup the key range for the scan. This
1331 * works kinda like a chained hash table where the lower 32 bits
1332 * of the namekey synthesize the chain.
1333 *
1334 * The key range is inclusive of both key_beg and key_end.
1335 */
1336 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
6a37e7e4 1337retry:
4e17f465 1338 hammer_init_cursor(&trans, &cursor, &fdip->cache[0], fdip);
2f85fa4d 1339 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
1340 cursor.key_beg.obj_id = fdip->obj_id;
1341 cursor.key_beg.key = namekey;
d5530d22 1342 cursor.key_beg.create_tid = 0;
8cd0a023
MD
1343 cursor.key_beg.delete_tid = 0;
1344 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1345 cursor.key_beg.obj_type = 0;
1346
1347 cursor.key_end = cursor.key_beg;
1348 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
1349 cursor.asof = fdip->obj_asof;
1350 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023
MD
1351
1352 /*
1353 * Scan all matching records (the chain), locate the one matching
a89aec1b 1354 * the requested path component.
8cd0a023
MD
1355 *
1356 * The hammer_ip_*() functions merge in-memory records with on-disk
1357 * records for the purposes of the search.
1358 */
4e17f465 1359 error = hammer_ip_first(&cursor);
a89aec1b 1360 while (error == 0) {
8cd0a023
MD
1361 if (hammer_ip_resolve_data(&cursor) != 0)
1362 break;
11ad5ade
MD
1363 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1364 KKASSERT(nlen > 0);
1365 if (fncp->nc_nlen == nlen &&
1366 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
8cd0a023
MD
1367 break;
1368 }
a89aec1b 1369 error = hammer_ip_next(&cursor);
8cd0a023 1370 }
8cd0a023
MD
1371
1372 /*
1373 * If all is ok we have to get the inode so we can adjust nlinks.
6a37e7e4
MD
1374 *
1375 * WARNING: hammer_ip_del_directory() may have to terminate the
1376 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1377 * twice.
8cd0a023 1378 */
9944ae54 1379 if (error == 0)
6a37e7e4 1380 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
b84de5af
MD
1381
1382 /*
1383 * XXX A deadlock here will break rename's atomicy for the purposes
1384 * of crash recovery.
1385 */
1386 if (error == EDEADLK) {
b84de5af 1387 hammer_done_cursor(&cursor);
b84de5af
MD
1388 goto retry;
1389 }
1390
1391 /*
1392 * Cleanup and tell the kernel that the rename succeeded.
1393 */
c0ade690 1394 hammer_done_cursor(&cursor);
6a37e7e4
MD
1395 if (error == 0)
1396 cache_rename(ap->a_fnch, ap->a_tnch);
b84de5af 1397
b3deaf57 1398failed:
b84de5af 1399 hammer_done_transaction(&trans);
8cd0a023 1400 return (error);
427e5fc6
MD
1401}
1402
66325755
MD
1403/*
1404 * hammer_vop_nrmdir { nch, dvp, cred }
1405 */
427e5fc6
MD
1406static
1407int
66325755 1408hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
427e5fc6 1409{
b84de5af 1410 struct hammer_transaction trans;
e63644f0 1411 struct hammer_inode *dip;
b84de5af
MD
1412 int error;
1413
e63644f0
MD
1414 dip = VTOI(ap->a_dvp);
1415
1416 if (hammer_nohistory(dip) == 0 &&
1417 (error = hammer_checkspace(dip->hmp)) != 0) {
1418 return (error);
1419 }
1420
1421 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1422 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1423 hammer_done_transaction(&trans);
1424
1425 return (error);
427e5fc6
MD
1426}
1427
66325755
MD
1428/*
1429 * hammer_vop_setattr { vp, vap, cred }
1430 */
427e5fc6
MD
1431static
1432int
66325755 1433hammer_vop_setattr(struct vop_setattr_args *ap)
427e5fc6 1434{
8cd0a023
MD
1435 struct hammer_transaction trans;
1436 struct vattr *vap;
1437 struct hammer_inode *ip;
1438 int modflags;
1439 int error;
d5ef456e 1440 int truncating;
b84de5af 1441 off_t aligned_size;
8cd0a023 1442 u_int32_t flags;
8cd0a023
MD
1443
1444 vap = ap->a_vap;
1445 ip = ap->a_vp->v_data;
1446 modflags = 0;
1447
1448 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1449 return(EROFS);
d113fda1
MD
1450 if (ip->flags & HAMMER_INODE_RO)
1451 return (EROFS);
e63644f0
MD
1452 if (hammer_nohistory(ip) == 0 &&
1453 (error = hammer_checkspace(ip->hmp)) != 0) {
1454 return (error);
1455 }
8cd0a023
MD
1456
1457 hammer_start_transaction(&trans, ip->hmp);
1458 error = 0;
1459
1460 if (vap->va_flags != VNOVAL) {
1461 flags = ip->ino_data.uflags;
1462 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1463 hammer_to_unix_xid(&ip->ino_data.uid),
1464 ap->a_cred);
1465 if (error == 0) {
1466 if (ip->ino_data.uflags != flags) {
1467 ip->ino_data.uflags = flags;
1468 modflags |= HAMMER_INODE_DDIRTY;
1469 }
1470 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1471 error = 0;
1472 goto done;
1473 }
1474 }
1475 goto done;
1476 }
1477 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1478 error = EPERM;
1479 goto done;
1480 }
7538695e
MD
1481 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1482 mode_t cur_mode = ip->ino_data.mode;
1483 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1484 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1485 uuid_t uuid_uid;
1486 uuid_t uuid_gid;
1487
1488 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1489 ap->a_cred,
1490 &cur_uid, &cur_gid, &cur_mode);
1491 if (error == 0) {
1492 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1493 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1494 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1495 sizeof(uuid_uid)) ||
1496 bcmp(&uuid_gid, &ip->ino_data.gid,
1497 sizeof(uuid_gid)) ||
1498 ip->ino_data.mode != cur_mode
1499 ) {
1500 ip->ino_data.uid = uuid_uid;
1501 ip->ino_data.gid = uuid_gid;
1502 ip->ino_data.mode = cur_mode;
1503 }
8cd0a023
MD
1504 modflags |= HAMMER_INODE_DDIRTY;
1505 }
1506 }
11ad5ade 1507 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
8cd0a023
MD
1508 switch(ap->a_vp->v_type) {
1509 case VREG:
11ad5ade 1510 if (vap->va_size == ip->ino_data.size)
d5ef456e 1511 break;
b84de5af
MD
1512 /*
1513 * XXX break atomicy, we can deadlock the backend
1514 * if we do not release the lock. Probably not a
1515 * big deal here.
1516 */
11ad5ade 1517 if (vap->va_size < ip->ino_data.size) {
c0ade690
MD
1518 vtruncbuf(ap->a_vp, vap->va_size,
1519 HAMMER_BUFSIZE);
d5ef456e
MD
1520 truncating = 1;
1521 } else {
c0ade690 1522 vnode_pager_setsize(ap->a_vp, vap->va_size);
d5ef456e 1523 truncating = 0;
c0ade690 1524 }
11ad5ade
MD
1525 ip->ino_data.size = vap->va_size;
1526 modflags |= HAMMER_INODE_DDIRTY;
76376933 1527 aligned_size = (vap->va_size + HAMMER_BUFMASK) &
b84de5af 1528 ~HAMMER_BUFMASK64;
d5ef456e 1529
b84de5af
MD
1530 /*
1531 * on-media truncation is cached in the inode until
1532 * the inode is synchronized.
1533 */
d5ef456e 1534 if (truncating) {
47637bff 1535 hammer_ip_frontend_trunc(ip, vap->va_size);
0832c9bb
MD
1536 hammer_update_rsv_databufs(ip);
1537#ifdef DEBUG_TRUNCATE
1538 if (HammerTruncIp == NULL)
1539 HammerTruncIp = ip;
1540#endif
b84de5af
MD
1541 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1542 ip->flags |= HAMMER_INODE_TRUNCATED;
1543 ip->trunc_off = vap->va_size;
0832c9bb
MD
1544#ifdef DEBUG_TRUNCATE
1545 if (ip == HammerTruncIp)
1546 kprintf("truncate1 %016llx\n", ip->trunc_off);
1547#endif
b84de5af
MD
1548 } else if (ip->trunc_off > vap->va_size) {
1549 ip->trunc_off = vap->va_size;
0832c9bb
MD
1550#ifdef DEBUG_TRUNCATE
1551 if (ip == HammerTruncIp)
1552 kprintf("truncate2 %016llx\n", ip->trunc_off);
1553#endif
1554 } else {
1555#ifdef DEBUG_TRUNCATE
1556 if (ip == HammerTruncIp)
1557 kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
1558#endif
b84de5af 1559 }
d5ef456e 1560 }
b84de5af 1561
d5ef456e
MD
1562 /*
1563 * If truncating we have to clean out a portion of
b84de5af
MD
1564 * the last block on-disk. We do this in the
1565 * front-end buffer cache.
d5ef456e 1566 */
b84de5af 1567 if (truncating && vap->va_size < aligned_size) {
d5ef456e
MD
1568 struct buf *bp;
1569 int offset;
1570
47637bff
MD
1571 aligned_size -= HAMMER_BUFSIZE;
1572
d5ef456e 1573 offset = vap->va_size & HAMMER_BUFMASK;
47637bff 1574 error = bread(ap->a_vp, aligned_size,
d5ef456e 1575 HAMMER_BUFSIZE, &bp);
47637bff 1576 hammer_ip_frontend_trunc(ip, aligned_size);
d5ef456e
MD
1577 if (error == 0) {
1578 bzero(bp->b_data + offset,
1579 HAMMER_BUFSIZE - offset);
1580 bdwrite(bp);
1581 } else {
47637bff 1582 kprintf("ERROR %d\n", error);
d5ef456e
MD
1583 brelse(bp);
1584 }
1585 }
76376933 1586 break;
8cd0a023 1587 case VDATABASE:
b84de5af
MD
1588 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1589 ip->flags |= HAMMER_INODE_TRUNCATED;
1590 ip->trunc_off = vap->va_size;
1591 } else if (ip->trunc_off > vap->va_size) {
1592 ip->trunc_off = vap->va_size;
1593 }
47637bff 1594 hammer_ip_frontend_trunc(ip, vap->va_size);
11ad5ade
MD
1595 ip->ino_data.size = vap->va_size;
1596 modflags |= HAMMER_INODE_DDIRTY;
8cd0a023
MD
1597 break;
1598 default:
1599 error = EINVAL;
1600 goto done;
1601 }
d26d0ae9 1602 break;
8cd0a023
MD
1603 }
1604 if (vap->va_atime.tv_sec != VNOVAL) {
11ad5ade 1605 ip->ino_leaf.atime =
8cd0a023
MD
1606 hammer_timespec_to_transid(&vap->va_atime);
1607 modflags |= HAMMER_INODE_ITIMES;
1608 }
1609 if (vap->va_mtime.tv_sec != VNOVAL) {
11ad5ade 1610 ip->ino_data.mtime =
8cd0a023
MD
1611 hammer_timespec_to_transid(&vap->va_mtime);
1612 modflags |= HAMMER_INODE_ITIMES;
98f7132d 1613 modflags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
8cd0a023
MD
1614 }
1615 if (vap->va_mode != (mode_t)VNOVAL) {
7538695e
MD
1616 mode_t cur_mode = ip->ino_data.mode;
1617 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1618 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1619
1620 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1621 cur_uid, cur_gid, &cur_mode);
1622 if (error == 0 && ip->ino_data.mode != cur_mode) {
1623 ip->ino_data.mode = cur_mode;
8cd0a023
MD
1624 modflags |= HAMMER_INODE_DDIRTY;
1625 }
1626 }
1627done:
b84de5af 1628 if (error == 0)
47637bff 1629 hammer_modify_inode(ip, modflags);
b84de5af 1630 hammer_done_transaction(&trans);
8cd0a023 1631 return (error);
427e5fc6
MD
1632}
1633
66325755
MD
1634/*
1635 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1636 */
427e5fc6
MD
1637static
1638int
66325755 1639hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
427e5fc6 1640{
7a04d74f
MD
1641 struct hammer_transaction trans;
1642 struct hammer_inode *dip;
1643 struct hammer_inode *nip;
1644 struct nchandle *nch;
1645 hammer_record_t record;
1646 int error;
1647 int bytes;
1648
1649 ap->a_vap->va_type = VLNK;
1650
1651 nch = ap->a_nch;
1652 dip = VTOI(ap->a_dvp);
1653
d113fda1
MD
1654 if (dip->flags & HAMMER_INODE_RO)
1655 return (EROFS);
e63644f0
MD
1656 if ((error = hammer_checkspace(dip->hmp)) != 0)
1657 return (error);
d113fda1 1658
7a04d74f
MD
1659 /*
1660 * Create a transaction to cover the operations we perform.
1661 */
1662 hammer_start_transaction(&trans, dip->hmp);
1663
1664 /*
1665 * Create a new filesystem object of the requested type. The
1666 * returned inode will be referenced but not locked.
1667 */
1668
1669 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
1670 if (error) {
b84de5af 1671 hammer_done_transaction(&trans);
7a04d74f
MD
1672 *ap->a_vpp = NULL;
1673 return (error);
1674 }
1675
7a04d74f
MD
1676 /*
1677 * Add a record representing the symlink. symlink stores the link
1678 * as pure data, not a string, and is no \0 terminated.
1679 */
1680 if (error == 0) {
7a04d74f
MD
1681 bytes = strlen(ap->a_target);
1682
2f85fa4d
MD
1683 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1684 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1685 } else {
1686 record = hammer_alloc_mem_record(nip, bytes);
1687 record->type = HAMMER_MEM_RECORD_GENERAL;
1688
1689 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
1690 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1691 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1692 record->leaf.data_len = bytes;
1693 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1694 bcopy(ap->a_target, record->data->symlink.name, bytes);
1695 error = hammer_ip_add_record(&trans, record);
1696 }
42c7d26b
MD
1697
1698 /*
1699 * Set the file size to the length of the link.
1700 */
1701 if (error == 0) {
11ad5ade 1702 nip->ino_data.size = bytes;
47637bff 1703 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
42c7d26b 1704 }
7a04d74f 1705 }
1f07f686
MD
1706 if (error == 0)
1707 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
7a04d74f
MD
1708
1709 /*
1710 * Finish up.
1711 */
1712 if (error) {
1713 hammer_rel_inode(nip, 0);
7a04d74f
MD
1714 *ap->a_vpp = NULL;
1715 } else {
e8599db1 1716 error = hammer_get_vnode(nip, ap->a_vpp);
7a04d74f
MD
1717 hammer_rel_inode(nip, 0);
1718 if (error == 0) {
1719 cache_setunresolved(ap->a_nch);
1720 cache_setvp(ap->a_nch, *ap->a_vpp);
1721 }
1722 }
b84de5af 1723 hammer_done_transaction(&trans);
7a04d74f 1724 return (error);
427e5fc6
MD
1725}
1726
66325755
MD
1727/*
1728 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1729 */
427e5fc6
MD
1730static
1731int
66325755 1732hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
427e5fc6 1733{
b84de5af 1734 struct hammer_transaction trans;
e63644f0 1735 struct hammer_inode *dip;
b84de5af
MD
1736 int error;
1737
e63644f0
MD
1738 dip = VTOI(ap->a_dvp);
1739
1740 if (hammer_nohistory(dip) == 0 &&
1741 (error = hammer_checkspace(dip->hmp)) != 0) {
1742 return (error);
1743 }
1744
1745 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1746 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1747 ap->a_cred, ap->a_flags);
1748 hammer_done_transaction(&trans);
1749
1750 return (error);
427e5fc6
MD
1751}
1752
7dc57964
MD
1753/*
1754 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1755 */
1756static
1757int
1758hammer_vop_ioctl(struct vop_ioctl_args *ap)
1759{
1760 struct hammer_inode *ip = ap->a_vp->v_data;
1761
1762 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1763 ap->a_fflag, ap->a_cred));
1764}
1765
513ca7d7
MD
1766static
1767int
1768hammer_vop_mountctl(struct vop_mountctl_args *ap)
1769{
1770 struct mount *mp;
1771 int error;
1772
1773 mp = ap->a_head.a_ops->head.vv_mount;
1774
1775 switch(ap->a_op) {
1776 case MOUNTCTL_SET_EXPORT:
1777 if (ap->a_ctllen != sizeof(struct export_args))
1778 error = EINVAL;
1779 error = hammer_vfs_export(mp, ap->a_op,
1780 (const struct export_args *)ap->a_ctl);
1781 break;
1782 default:
1783 error = journal_mountctl(ap);
1784 break;
1785 }
1786 return(error);
1787}
1788
66325755
MD
1789/*
1790 * hammer_vop_strategy { vp, bio }
8cd0a023
MD
1791 *
1792 * Strategy call, used for regular file read & write only. Note that the
1793 * bp may represent a cluster.
1794 *
1795 * To simplify operation and allow better optimizations in the future,
1796 * this code does not make any assumptions with regards to buffer alignment
1797 * or size.
66325755 1798 */
427e5fc6
MD
1799static
1800int
66325755 1801hammer_vop_strategy(struct vop_strategy_args *ap)
427e5fc6 1802{
8cd0a023
MD
1803 struct buf *bp;
1804 int error;
1805
1806 bp = ap->a_bio->bio_buf;
1807
1808 switch(bp->b_cmd) {
1809 case BUF_CMD_READ:
1810 error = hammer_vop_strategy_read(ap);
1811 break;
1812 case BUF_CMD_WRITE:
1813 error = hammer_vop_strategy_write(ap);
1814 break;
1815 default:
059819e3
MD
1816 bp->b_error = error = EINVAL;
1817 bp->b_flags |= B_ERROR;
1818 biodone(ap->a_bio);
8cd0a023
MD
1819 break;
1820 }
8cd0a023 1821 return (error);
427e5fc6
MD
1822}
1823
8cd0a023
MD
1824/*
1825 * Read from a regular file. Iterate the related records and fill in the
1826 * BIO/BUF. Gaps are zero-filled.
1827 *
1828 * The support code in hammer_object.c should be used to deal with mixed
1829 * in-memory and on-disk records.
1830 *
1831 * XXX atime update
1832 */
1833static
1834int
1835hammer_vop_strategy_read(struct vop_strategy_args *ap)
1836{
36f82b23
MD
1837 struct hammer_transaction trans;
1838 struct hammer_inode *ip;
8cd0a023 1839 struct hammer_cursor cursor;
8cd0a023
MD
1840 hammer_base_elm_t base;
1841 struct bio *bio;
1842 struct buf *bp;
1843 int64_t rec_offset;
a89aec1b 1844 int64_t ran_end;
195c19a1 1845 int64_t tmp64;
8cd0a023
MD
1846 int error;
1847 int boff;
1848 int roff;
1849 int n;
1850
1851 bio = ap->a_bio;
1852 bp = bio->bio_buf;
36f82b23 1853 ip = ap->a_vp->v_data;
8cd0a023 1854
36f82b23 1855 hammer_simple_transaction(&trans, ip->hmp);
47637bff 1856 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
8cd0a023
MD
1857
1858 /*
1859 * Key range (begin and end inclusive) to scan. Note that the key's
c0ade690
MD
1860 * stored in the actual records represent BASE+LEN, not BASE. The
1861 * first record containing bio_offset will have a key > bio_offset.
8cd0a023 1862 */
2f85fa4d 1863 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023 1864 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1865 cursor.key_beg.create_tid = 0;
8cd0a023 1866 cursor.key_beg.delete_tid = 0;
8cd0a023 1867 cursor.key_beg.obj_type = 0;
c0ade690 1868 cursor.key_beg.key = bio->bio_offset + 1;
d5530d22 1869 cursor.asof = ip->obj_asof;
47197d71 1870 cursor.flags |= HAMMER_CURSOR_ASOF | HAMMER_CURSOR_DATAEXTOK;
8cd0a023
MD
1871
1872 cursor.key_end = cursor.key_beg;
11ad5ade 1873 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
b84de5af 1874#if 0
11ad5ade 1875 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
a89aec1b
MD
1876 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
1877 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
1878 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
b84de5af
MD
1879 } else
1880#endif
1881 {
c0ade690 1882 ran_end = bio->bio_offset + bp->b_bufsize;
a89aec1b
MD
1883 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
1884 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
195c19a1
MD
1885 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
1886 if (tmp64 < ran_end)
a89aec1b
MD
1887 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1888 else
7f7c1f84 1889 cursor.key_end.key = ran_end + MAXPHYS + 1;
a89aec1b 1890 }
d26d0ae9 1891 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
8cd0a023 1892
4e17f465 1893 error = hammer_ip_first(&cursor);
8cd0a023
MD
1894 boff = 0;
1895
a89aec1b 1896 while (error == 0) {
47637bff
MD
1897 /*
1898 * Get the base file offset of the record. The key for
1899 * data records is (base + bytes) rather then (base).
1900 */
11ad5ade 1901 base = &cursor.leaf->base;
11ad5ade 1902 rec_offset = base->key - cursor.leaf->data_len;
8cd0a023 1903
66325755 1904 /*
a89aec1b 1905 * Calculate the gap, if any, and zero-fill it.
1fef775e
MD
1906 *
1907 * n is the offset of the start of the record verses our
1908 * current seek offset in the bio.
66325755 1909 */
8cd0a023
MD
1910 n = (int)(rec_offset - (bio->bio_offset + boff));
1911 if (n > 0) {
a89aec1b
MD
1912 if (n > bp->b_bufsize - boff)
1913 n = bp->b_bufsize - boff;
8cd0a023
MD
1914 bzero((char *)bp->b_data + boff, n);
1915 boff += n;
1916 n = 0;
66325755 1917 }
8cd0a023
MD
1918
1919 /*
1920 * Calculate the data offset in the record and the number
1921 * of bytes we can copy.
a89aec1b 1922 *
1fef775e
MD
1923 * There are two degenerate cases. First, boff may already
1924 * be at bp->b_bufsize. Secondly, the data offset within
1925 * the record may exceed the record's size.
8cd0a023
MD
1926 */
1927 roff = -n;
b84de5af 1928 rec_offset += roff;
11ad5ade 1929 n = cursor.leaf->data_len - roff;
1fef775e
MD
1930 if (n <= 0) {
1931 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
1932 n = 0;
1933 } else if (n > bp->b_bufsize - boff) {
8cd0a023 1934 n = bp->b_bufsize - boff;
1fef775e 1935 }
059819e3 1936
b84de5af 1937 /*
47637bff
MD
1938 * Deal with cached truncations. This cool bit of code
1939 * allows truncate()/ftruncate() to avoid having to sync
1940 * the file.
1941 *
1942 * If the frontend is truncated then all backend records are
1943 * subject to the frontend's truncation.
1944 *
1945 * If the backend is truncated then backend records on-disk
1946 * (but not in-memory) are subject to the backend's
1947 * truncation. In-memory records owned by the backend
1948 * represent data written after the truncation point on the
1949 * backend and must not be truncated.
1950 *
1951 * Truncate operations deal with frontend buffer cache
1952 * buffers and frontend-owned in-memory records synchronously.
b84de5af 1953 */
47637bff
MD
1954 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1955 if (hammer_cursor_ondisk(&cursor) ||
1956 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
1957 if (ip->trunc_off <= rec_offset)
1958 n = 0;
1959 else if (ip->trunc_off < rec_offset + n)
1960 n = (int)(ip->trunc_off - rec_offset);
1961 }
1962 }
1963 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1964 if (hammer_cursor_ondisk(&cursor)) {
1965 if (ip->sync_trunc_off <= rec_offset)
1966 n = 0;
1967 else if (ip->sync_trunc_off < rec_offset + n)
1968 n = (int)(ip->sync_trunc_off - rec_offset);
1969 }
1970 }
b84de5af
MD
1971
1972 /*
47637bff
MD
1973 * Try to issue a direct read into our bio if possible,
1974 * otherwise resolve the element data into a hammer_buffer
1975 * and copy.
1976 *
1977 * WARNING: If we hit the else clause.
b84de5af 1978 */
cebe9493 1979 if (roff == 0 && boff == 0 && n == bp->b_bufsize &&
47637bff
MD
1980 (rec_offset & HAMMER_BUFMASK) == 0) {
1981 error = hammer_io_direct_read(trans.hmp, cursor.leaf,
1982 bio);
1983 goto done;
1984 } else if (n) {
1985 error = hammer_ip_resolve_data(&cursor);
1986 if (error == 0) {
1987 bcopy((char *)cursor.data + roff,
1988 (char *)bp->b_data + boff, n);
1989 }
b84de5af 1990 }
47637bff
MD
1991 if (error)
1992 break;
1993
1994 /*
1995 * Iterate until we have filled the request.
1996 */
1997 boff += n;
8cd0a023 1998 if (boff == bp->b_bufsize)
66325755 1999 break;
a89aec1b 2000 error = hammer_ip_next(&cursor);
66325755
MD
2001 }
2002
2003 /*
8cd0a023 2004 * There may have been a gap after the last record
66325755 2005 */
8cd0a023
MD
2006 if (error == ENOENT)
2007 error = 0;
2008 if (error == 0 && boff != bp->b_bufsize) {
7f7c1f84 2009 KKASSERT(boff < bp->b_bufsize);
8cd0a023
MD
2010 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2011 /* boff = bp->b_bufsize; */
2012 }
2013 bp->b_resid = 0;
059819e3
MD
2014 bp->b_error = error;
2015 if (error)
2016 bp->b_flags |= B_ERROR;
2017 biodone(ap->a_bio);
47637bff
MD
2018
2019done:
2020 if (cursor.node)
2021 hammer_cache_node(cursor.node, &ip->cache[1]);
2022 hammer_done_cursor(&cursor);
2023 hammer_done_transaction(&trans);
8cd0a023
MD
2024 return(error);
2025}
2026
2027/*
059819e3
MD
2028 * Write to a regular file. Because this is a strategy call the OS is
2029 * trying to actually sync data to the media. HAMMER can only flush
2030 * the entire inode (so the TID remains properly synchronized).
8cd0a023 2031 *
059819e3
MD
2032 * Basically all we do here is place the bio on the inode's flush queue
2033 * and activate the flusher.
8cd0a023
MD
2034 */
2035static
2036int
2037hammer_vop_strategy_write(struct vop_strategy_args *ap)
2038{
47637bff 2039 hammer_record_t record;
af209b0f 2040 hammer_mount_t hmp;
8cd0a023
MD
2041 hammer_inode_t ip;
2042 struct bio *bio;
2043 struct buf *bp;
0832c9bb
MD
2044 int bytes;
2045 int error;
8cd0a023
MD
2046
2047 bio = ap->a_bio;
2048 bp = bio->bio_buf;
2049 ip = ap->a_vp->v_data;
af209b0f 2050 hmp = ip->hmp;
d113fda1 2051
059819e3
MD
2052 if (ip->flags & HAMMER_INODE_RO) {
2053 bp->b_error = EROFS;
2054 bp->b_flags |= B_ERROR;
2055 biodone(ap->a_bio);
e63644f0 2056 hammer_cleanup_write_io(ip);
059819e3
MD
2057 return(EROFS);
2058 }
b84de5af 2059
29ce0677
MD
2060 /*
2061 * Interlock with inode destruction (no in-kernel or directory
2062 * topology visibility). If we queue new IO while trying to
2063 * destroy the inode we can deadlock the vtrunc call in
2064 * hammer_inode_unloadable_check().
2065 */
2066 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2067 bp->b_resid = 0;
2068 biodone(ap->a_bio);
e63644f0 2069 hammer_cleanup_write_io(ip);
29ce0677
MD
2070 return(0);
2071 }
2072
b84de5af 2073 /*
47637bff
MD
2074 * Attempt to reserve space and issue a direct-write from the
2075 * front-end. If we can't we will queue the BIO to the flusher.
0832c9bb
MD
2076 * The bulk/direct-write code will still bcopy if writing less
2077 * then full-sized blocks (at the end of a file).
47637bff
MD
2078 *
2079 * If we can the I/O can be issued and an in-memory record will
0832c9bb 2080 * be installed to reference the storage until the flusher can get to
47637bff
MD
2081 * it.
2082 *
2083 * Since we own the high level bio the front-end will not try to
0832c9bb 2084 * do a direct-read until the write completes.
47637bff 2085 */
0832c9bb
MD
2086 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2087 KKASSERT(bio->bio_offset < ip->ino_data.size);
2088 if (bio->bio_offset + bp->b_bufsize <= ip->ino_data.size)
2089 bytes = bp->b_bufsize;
b84de5af 2090 else
0832c9bb
MD
2091 bytes = (int)(ip->ino_data.size - bio->bio_offset);
2092
2093 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2094 bytes, &error);
2095 if (record) {
af209b0f 2096 hammer_io_direct_write(hmp, &record->leaf, bio);
0832c9bb 2097 hammer_rel_mem_record(record);
af209b0f
MD
2098 if (hmp->rsv_recs > hammer_limit_recs &&
2099 ip->rsv_recs > hammer_limit_irecs / 10) {
0832c9bb 2100 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
af209b0f
MD
2101 } else if (ip->rsv_recs > hammer_limit_irecs) {
2102 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2103 }
0832c9bb
MD
2104 } else {
2105 bp->b_error = error;
2106 bp->b_flags |= B_ERROR;
2107 biodone(ap->a_bio);
2108 }
2109 hammer_cleanup_write_io(ip);
2110 return(error);
059819e3
MD
2111}
2112
2113/*
47637bff
MD
2114 * Clean-up after disposing of a dirty frontend buffer's data.
2115 * This is somewhat heuristical so try to be robust.
059819e3 2116 */
0832c9bb 2117static void
e63644f0
MD
2118hammer_cleanup_write_io(hammer_inode_t ip)
2119{
2120 if (ip->rsv_databufs) {
2121 --ip->rsv_databufs;
2122 --ip->hmp->rsv_databufs;
2123 }
2124}
2125
0832c9bb
MD
2126/*
2127 * We can lose track of dirty buffer cache buffers if we truncate, this
2128 * routine will resynchronize the count.
2129 */
2130static
2131void
2132hammer_update_rsv_databufs(hammer_inode_t ip)
2133{
2134 struct buf *bp;
2135 int delta;
2136 int n;
2137
2138 if (ip->vp) {
2139 n = 0;
2140 RB_FOREACH(bp, buf_rb_tree, &ip->vp->v_rbdirty_tree) {
2141 ++n;
2142 }
2143 } else {
2144 n = 0;
2145 }
2146 delta = n - ip->rsv_databufs;
2147 ip->rsv_databufs += delta;
2148 ip->hmp->rsv_databufs += delta;
2149}
2150
8cd0a023
MD
2151/*
2152 * dounlink - disconnect a directory entry
2153 *
2154 * XXX whiteout support not really in yet
2155 */
2156static int
b84de5af
MD
2157hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2158 struct vnode *dvp, struct ucred *cred, int flags)
8cd0a023 2159{
8cd0a023
MD
2160 struct namecache *ncp;
2161 hammer_inode_t dip;
2162 hammer_inode_t ip;
8cd0a023 2163 struct hammer_cursor cursor;
8cd0a023 2164 int64_t namekey;
11ad5ade 2165 int nlen, error;
8cd0a023
MD
2166
2167 /*
2168 * Calculate the namekey and setup the key range for the scan. This
2169 * works kinda like a chained hash table where the lower 32 bits
2170 * of the namekey synthesize the chain.
2171 *
2172 * The key range is inclusive of both key_beg and key_end.
2173 */
2174 dip = VTOI(dvp);
2175 ncp = nch->ncp;
d113fda1
MD
2176
2177 if (dip->flags & HAMMER_INODE_RO)
2178 return (EROFS);
2179
6a37e7e4
MD
2180 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2181retry:
4e17f465 2182 hammer_init_cursor(trans, &cursor, &dip->cache[0], dip);
2f85fa4d 2183 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
2184 cursor.key_beg.obj_id = dip->obj_id;
2185 cursor.key_beg.key = namekey;
d5530d22 2186 cursor.key_beg.create_tid = 0;
8cd0a023
MD
2187 cursor.key_beg.delete_tid = 0;
2188 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2189 cursor.key_beg.obj_type = 0;
2190
2191 cursor.key_end = cursor.key_beg;
2192 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
2193 cursor.asof = dip->obj_asof;
2194 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023 2195
8cd0a023
MD
2196 /*
2197 * Scan all matching records (the chain), locate the one matching
2198 * the requested path component. info->last_error contains the
2199 * error code on search termination and could be 0, ENOENT, or
2200 * something else.
2201 *
2202 * The hammer_ip_*() functions merge in-memory records with on-disk
2203 * records for the purposes of the search.
2204 */
4e17f465
MD
2205 error = hammer_ip_first(&cursor);
2206
a89aec1b
MD
2207 while (error == 0) {
2208 error = hammer_ip_resolve_data(&cursor);
2209 if (error)
66325755 2210 break;
11ad5ade
MD
2211 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2212 KKASSERT(nlen > 0);
2213 if (ncp->nc_nlen == nlen &&
2214 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
66325755
MD
2215 break;
2216 }
a89aec1b 2217 error = hammer_ip_next(&cursor);
66325755 2218 }
8cd0a023
MD
2219
2220 /*
2221 * If all is ok we have to get the inode so we can adjust nlinks.
269c5eab
MD
2222 * To avoid a deadlock with the flusher we must release the inode
2223 * lock on the directory when acquiring the inode for the entry.
b3deaf57
MD
2224 *
2225 * If the target is a directory, it must be empty.
8cd0a023 2226 */
66325755 2227 if (error == 0) {
269c5eab 2228 hammer_unlock(&cursor.ip->lock);
b84de5af 2229 ip = hammer_get_inode(trans, &dip->cache[1],
11ad5ade 2230 cursor.data->entry.obj_id,
d113fda1 2231 dip->hmp->asof, 0, &error);
269c5eab 2232 hammer_lock_sh(&cursor.ip->lock);
46fe7ae1 2233 if (error == ENOENT) {
11ad5ade 2234 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
10a5d1ba 2235 Debugger("ENOENT unlinking object that should exist");
46fe7ae1 2236 }
1f07f686
MD
2237
2238 /*
2239 * If we are trying to remove a directory the directory must
2240 * be empty.
2241 *
2242 * WARNING: hammer_ip_check_directory_empty() may have to
2243 * terminate the cursor to avoid a deadlock. It is ok to
2244 * call hammer_done_cursor() twice.
2245 */
11ad5ade 2246 if (error == 0 && ip->ino_data.obj_type ==
b3deaf57 2247 HAMMER_OBJTYPE_DIRECTORY) {
98f7132d 2248 error = hammer_ip_check_directory_empty(trans, ip);
b3deaf57 2249 }
1f07f686 2250
6a37e7e4 2251 /*
1f07f686
MD
2252 * Delete the directory entry.
2253 *
6a37e7e4 2254 * WARNING: hammer_ip_del_directory() may have to terminate
1f07f686 2255 * the cursor to avoid a deadlock. It is ok to call
6a37e7e4
MD
2256 * hammer_done_cursor() twice.
2257 */
b84de5af 2258 if (error == 0) {
b84de5af
MD
2259 error = hammer_ip_del_directory(trans, &cursor,
2260 dip, ip);
b84de5af 2261 }
269c5eab 2262 hammer_done_cursor(&cursor);
8cd0a023
MD
2263 if (error == 0) {
2264 cache_setunresolved(nch);
2265 cache_setvp(nch, NULL);
2266 /* XXX locking */
2267 if (ip->vp)
2268 cache_inval_vp(ip->vp, CINV_DESTROY);
2269 }
af209b0f
MD
2270 if (ip)
2271 hammer_rel_inode(ip, 0);
269c5eab
MD
2272 } else {
2273 hammer_done_cursor(&cursor);
66325755 2274 }
6a37e7e4
MD
2275 if (error == EDEADLK)
2276 goto retry;
9c448776 2277
66325755 2278 return (error);
66325755
MD
2279}
2280
7a04d74f
MD
2281/************************************************************************
2282 * FIFO AND SPECFS OPS *
2283 ************************************************************************
2284 *
2285 */
2286
2287static int
2288hammer_vop_fifoclose (struct vop_close_args *ap)
2289{
2290 /* XXX update itimes */
2291 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2292}
2293
2294static int
2295hammer_vop_fiforead (struct vop_read_args *ap)
2296{
2297 int error;
2298
2299 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2300 /* XXX update access time */
2301 return (error);
2302}
2303
2304static int
2305hammer_vop_fifowrite (struct vop_write_args *ap)
2306{
2307 int error;
2308
2309 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2310 /* XXX update access time */
2311 return (error);
2312}
2313
2314static int
2315hammer_vop_specclose (struct vop_close_args *ap)
2316{
2317 /* XXX update itimes */
2318 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2319}
2320
2321static int
2322hammer_vop_specread (struct vop_read_args *ap)
2323{
2324 /* XXX update access time */
2325 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2326}
2327
2328static int
2329hammer_vop_specwrite (struct vop_write_args *ap)
2330{
2331 /* XXX update last change time */
2332 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2333}
2334