Switch from bioq_insert_tail() to bioqdisksort(). When the kernel is
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
269c5eab 34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.64 2008/06/10 08:06:28 dillon Exp $
427e5fc6
MD
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/fcntl.h>
41#include <sys/namecache.h>
42#include <sys/vnode.h>
43#include <sys/lockf.h>
44#include <sys/event.h>
45#include <sys/stat.h>
b3deaf57 46#include <sys/dirent.h>
c0ade690 47#include <vm/vm_extern.h>
7a04d74f 48#include <vfs/fifofs/fifo.h>
427e5fc6
MD
49#include "hammer.h"
50
51/*
52 * USERFS VNOPS
53 */
54/*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
66325755
MD
55static int hammer_vop_fsync(struct vop_fsync_args *);
56static int hammer_vop_read(struct vop_read_args *);
57static int hammer_vop_write(struct vop_write_args *);
58static int hammer_vop_access(struct vop_access_args *);
59static int hammer_vop_advlock(struct vop_advlock_args *);
60static int hammer_vop_close(struct vop_close_args *);
61static int hammer_vop_ncreate(struct vop_ncreate_args *);
62static int hammer_vop_getattr(struct vop_getattr_args *);
63static int hammer_vop_nresolve(struct vop_nresolve_args *);
64static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65static int hammer_vop_nlink(struct vop_nlink_args *);
66static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67static int hammer_vop_nmknod(struct vop_nmknod_args *);
68static int hammer_vop_open(struct vop_open_args *);
69static int hammer_vop_pathconf(struct vop_pathconf_args *);
70static int hammer_vop_print(struct vop_print_args *);
71static int hammer_vop_readdir(struct vop_readdir_args *);
72static int hammer_vop_readlink(struct vop_readlink_args *);
73static int hammer_vop_nremove(struct vop_nremove_args *);
74static int hammer_vop_nrename(struct vop_nrename_args *);
75static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76static int hammer_vop_setattr(struct vop_setattr_args *);
77static int hammer_vop_strategy(struct vop_strategy_args *);
78static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
79static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
7dc57964 80static int hammer_vop_ioctl(struct vop_ioctl_args *);
513ca7d7 81static int hammer_vop_mountctl(struct vop_mountctl_args *);
427e5fc6 82
7a04d74f
MD
83static int hammer_vop_fifoclose (struct vop_close_args *);
84static int hammer_vop_fiforead (struct vop_read_args *);
85static int hammer_vop_fifowrite (struct vop_write_args *);
86
87static int hammer_vop_specclose (struct vop_close_args *);
88static int hammer_vop_specread (struct vop_read_args *);
89static int hammer_vop_specwrite (struct vop_write_args *);
90
427e5fc6
MD
91struct vop_ops hammer_vnode_vops = {
92 .vop_default = vop_defaultop,
93 .vop_fsync = hammer_vop_fsync,
c0ade690
MD
94 .vop_getpages = vop_stdgetpages,
95 .vop_putpages = vop_stdputpages,
427e5fc6
MD
96 .vop_read = hammer_vop_read,
97 .vop_write = hammer_vop_write,
98 .vop_access = hammer_vop_access,
99 .vop_advlock = hammer_vop_advlock,
100 .vop_close = hammer_vop_close,
101 .vop_ncreate = hammer_vop_ncreate,
102 .vop_getattr = hammer_vop_getattr,
103 .vop_inactive = hammer_vop_inactive,
104 .vop_reclaim = hammer_vop_reclaim,
105 .vop_nresolve = hammer_vop_nresolve,
106 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
107 .vop_nlink = hammer_vop_nlink,
108 .vop_nmkdir = hammer_vop_nmkdir,
109 .vop_nmknod = hammer_vop_nmknod,
110 .vop_open = hammer_vop_open,
111 .vop_pathconf = hammer_vop_pathconf,
112 .vop_print = hammer_vop_print,
113 .vop_readdir = hammer_vop_readdir,
114 .vop_readlink = hammer_vop_readlink,
115 .vop_nremove = hammer_vop_nremove,
116 .vop_nrename = hammer_vop_nrename,
117 .vop_nrmdir = hammer_vop_nrmdir,
118 .vop_setattr = hammer_vop_setattr,
119 .vop_strategy = hammer_vop_strategy,
120 .vop_nsymlink = hammer_vop_nsymlink,
7dc57964 121 .vop_nwhiteout = hammer_vop_nwhiteout,
513ca7d7
MD
122 .vop_ioctl = hammer_vop_ioctl,
123 .vop_mountctl = hammer_vop_mountctl
427e5fc6
MD
124};
125
7a04d74f
MD
126struct vop_ops hammer_spec_vops = {
127 .vop_default = spec_vnoperate,
128 .vop_fsync = hammer_vop_fsync,
129 .vop_read = hammer_vop_specread,
130 .vop_write = hammer_vop_specwrite,
131 .vop_access = hammer_vop_access,
132 .vop_close = hammer_vop_specclose,
133 .vop_getattr = hammer_vop_getattr,
134 .vop_inactive = hammer_vop_inactive,
135 .vop_reclaim = hammer_vop_reclaim,
136 .vop_setattr = hammer_vop_setattr
137};
138
139struct vop_ops hammer_fifo_vops = {
140 .vop_default = fifo_vnoperate,
141 .vop_fsync = hammer_vop_fsync,
142 .vop_read = hammer_vop_fiforead,
143 .vop_write = hammer_vop_fifowrite,
144 .vop_access = hammer_vop_access,
145 .vop_close = hammer_vop_fifoclose,
146 .vop_getattr = hammer_vop_getattr,
147 .vop_inactive = hammer_vop_inactive,
148 .vop_reclaim = hammer_vop_reclaim,
149 .vop_setattr = hammer_vop_setattr
150};
151
0832c9bb
MD
152#ifdef DEBUG_TRUNCATE
153struct hammer_inode *HammerTruncIp;
154#endif
155
b84de5af
MD
156static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
157 struct vnode *dvp, struct ucred *cred, int flags);
8cd0a023
MD
158static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
159static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
0832c9bb
MD
160static void hammer_cleanup_write_io(hammer_inode_t ip);
161static void hammer_update_rsv_databufs(hammer_inode_t ip);
8cd0a023 162
427e5fc6
MD
163#if 0
164static
165int
166hammer_vop_vnoperate(struct vop_generic_args *)
167{
168 return (VOCALL(&hammer_vnode_vops, ap));
169}
170#endif
171
66325755
MD
172/*
173 * hammer_vop_fsync { vp, waitfor }
174 */
427e5fc6
MD
175static
176int
66325755 177hammer_vop_fsync(struct vop_fsync_args *ap)
427e5fc6 178{
b84de5af 179 hammer_inode_t ip = VTOI(ap->a_vp);
c0ade690 180
f90dde4c 181 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
e8599db1 182 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
b84de5af
MD
183 if (ap->a_waitfor == MNT_WAIT)
184 hammer_wait_inode(ip);
059819e3 185 return (ip->error);
427e5fc6
MD
186}
187
66325755
MD
188/*
189 * hammer_vop_read { vp, uio, ioflag, cred }
190 */
427e5fc6
MD
191static
192int
66325755 193hammer_vop_read(struct vop_read_args *ap)
427e5fc6 194{
66325755 195 struct hammer_transaction trans;
c0ade690 196 hammer_inode_t ip;
66325755
MD
197 off_t offset;
198 struct buf *bp;
199 struct uio *uio;
200 int error;
201 int n;
8cd0a023 202 int seqcount;
66325755
MD
203
204 if (ap->a_vp->v_type != VREG)
205 return (EINVAL);
206 ip = VTOI(ap->a_vp);
207 error = 0;
8cd0a023 208 seqcount = ap->a_ioflag >> 16;
66325755 209
8cd0a023 210 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
211
212 /*
213 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
214 */
215 uio = ap->a_uio;
11ad5ade 216 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
66325755 217 offset = uio->uio_offset & HAMMER_BUFMASK;
c0ade690 218#if 0
11ad5ade 219 error = cluster_read(ap->a_vp, ip->ino_data.size,
8cd0a023
MD
220 uio->uio_offset - offset, HAMMER_BUFSIZE,
221 MAXBSIZE, seqcount, &bp);
c0ade690
MD
222#endif
223 error = bread(ap->a_vp, uio->uio_offset - offset,
224 HAMMER_BUFSIZE, &bp);
66325755
MD
225 if (error) {
226 brelse(bp);
227 break;
228 }
c0ade690 229 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
66325755
MD
230 n = HAMMER_BUFSIZE - offset;
231 if (n > uio->uio_resid)
232 n = uio->uio_resid;
11ad5ade
MD
233 if (n > ip->ino_data.size - uio->uio_offset)
234 n = (int)(ip->ino_data.size - uio->uio_offset);
66325755
MD
235 error = uiomove((char *)bp->b_data + offset, n, uio);
236 if (error) {
8cd0a023 237 bqrelse(bp);
66325755
MD
238 break;
239 }
66325755
MD
240 bqrelse(bp);
241 }
b84de5af
MD
242 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
243 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
11ad5ade 244 ip->ino_leaf.atime = trans.time;
47637bff 245 hammer_modify_inode(ip, HAMMER_INODE_ITIMES);
b84de5af
MD
246 }
247 hammer_done_transaction(&trans);
66325755 248 return (error);
427e5fc6
MD
249}
250
66325755
MD
251/*
252 * hammer_vop_write { vp, uio, ioflag, cred }
253 */
427e5fc6
MD
254static
255int
66325755 256hammer_vop_write(struct vop_write_args *ap)
427e5fc6 257{
66325755
MD
258 struct hammer_transaction trans;
259 struct hammer_inode *ip;
260 struct uio *uio;
47637bff
MD
261 int rel_offset;
262 off_t base_offset;
66325755
MD
263 struct buf *bp;
264 int error;
265 int n;
c0ade690 266 int flags;
059819e3 267 int count;
66325755
MD
268
269 if (ap->a_vp->v_type != VREG)
270 return (EINVAL);
271 ip = VTOI(ap->a_vp);
272 error = 0;
273
d113fda1
MD
274 if (ip->flags & HAMMER_INODE_RO)
275 return (EROFS);
276
66325755
MD
277 /*
278 * Create a transaction to cover the operations we perform.
279 */
8cd0a023 280 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
281 uio = ap->a_uio;
282
283 /*
284 * Check append mode
285 */
286 if (ap->a_ioflag & IO_APPEND)
11ad5ade 287 uio->uio_offset = ip->ino_data.size;
66325755
MD
288
289 /*
290 * Check for illegal write offsets. Valid range is 0...2^63-1
291 */
9c448776 292 if (uio->uio_offset < 0 || uio->uio_offset + uio->uio_resid <= 0) {
b84de5af 293 hammer_done_transaction(&trans);
66325755 294 return (EFBIG);
9c448776 295 }
66325755
MD
296
297 /*
298 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
299 */
059819e3 300 count = 0;
66325755 301 while (uio->uio_resid > 0) {
d5ef456e
MD
302 int fixsize = 0;
303
e63644f0
MD
304 if ((error = hammer_checkspace(trans.hmp)) != 0)
305 break;
306
059819e3 307 /*
47637bff
MD
308 * Do not allow HAMMER to blow out the buffer cache.
309 *
310 * Do not allow HAMMER to blow out system memory by
311 * accumulating too many records. Records are decoupled
312 * from the buffer cache.
313 *
314 * Always check at the beginning so separate writes are
315 * not able to bypass this code.
0832c9bb
MD
316 *
317 * WARNING: Cannot unlock vp when doing a NOCOPY write as
318 * part of a putpages operation. Doing so could cause us
319 * to deadlock against the VM system when we try to re-lock.
059819e3 320 */
47637bff 321 if ((count++ & 15) == 0) {
0832c9bb
MD
322 if (uio->uio_segflg != UIO_NOCOPY) {
323 vn_unlock(ap->a_vp);
324 if ((ap->a_ioflag & IO_NOBWILL) == 0)
325 bwillwrite();
326 }
47637bff
MD
327 if (ip->rsv_recs > hammer_limit_irecs) {
328 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
329 hammer_wait_inode(ip);
330 }
0832c9bb
MD
331 if (uio->uio_segflg != UIO_NOCOPY)
332 vn_lock(ap->a_vp, LK_EXCLUSIVE|LK_RETRY);
059819e3
MD
333 }
334
47637bff
MD
335 rel_offset = (int)(uio->uio_offset & HAMMER_BUFMASK);
336 base_offset = uio->uio_offset & ~HAMMER_BUFMASK64;
337 n = HAMMER_BUFSIZE - rel_offset;
d5ef456e
MD
338 if (n > uio->uio_resid)
339 n = uio->uio_resid;
11ad5ade 340 if (uio->uio_offset + n > ip->ino_data.size) {
d5ef456e
MD
341 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
342 fixsize = 1;
343 }
344
c0ade690
MD
345 if (uio->uio_segflg == UIO_NOCOPY) {
346 /*
347 * Issuing a write with the same data backing the
348 * buffer. Instantiate the buffer to collect the
349 * backing vm pages, then read-in any missing bits.
350 *
351 * This case is used by vop_stdputpages().
352 */
47637bff 353 bp = getblk(ap->a_vp, base_offset,
d5ef456e 354 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
c0ade690
MD
355 if ((bp->b_flags & B_CACHE) == 0) {
356 bqrelse(bp);
47637bff 357 error = bread(ap->a_vp, base_offset,
c0ade690 358 HAMMER_BUFSIZE, &bp);
c0ade690 359 }
47637bff 360 } else if (rel_offset == 0 && uio->uio_resid >= HAMMER_BUFSIZE) {
c0ade690 361 /*
a5fddc16
MD
362 * Even though we are entirely overwriting the buffer
363 * we may still have to zero it out to avoid a
364 * mmap/write visibility issue.
c0ade690 365 */
47637bff 366 bp = getblk(ap->a_vp, base_offset,
d5ef456e 367 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
a5fddc16
MD
368 if ((bp->b_flags & B_CACHE) == 0)
369 vfs_bio_clrbuf(bp);
47637bff 370 } else if (base_offset >= ip->ino_data.size) {
c0ade690 371 /*
a5fddc16
MD
372 * If the base offset of the buffer is beyond the
373 * file EOF, we don't have to issue a read.
c0ade690 374 */
47637bff 375 bp = getblk(ap->a_vp, base_offset,
d5ef456e 376 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
66325755
MD
377 vfs_bio_clrbuf(bp);
378 } else {
c0ade690
MD
379 /*
380 * Partial overwrite, read in any missing bits then
381 * replace the portion being written.
382 */
47637bff 383 error = bread(ap->a_vp, base_offset,
66325755 384 HAMMER_BUFSIZE, &bp);
d5ef456e
MD
385 if (error == 0)
386 bheavy(bp);
66325755 387 }
47637bff
MD
388 if (error == 0) {
389 error = uiomove((char *)bp->b_data + rel_offset,
390 n, uio);
391 }
d5ef456e
MD
392
393 /*
394 * If we screwed up we have to undo any VM size changes we
395 * made.
396 */
66325755
MD
397 if (error) {
398 brelse(bp);
d5ef456e 399 if (fixsize) {
11ad5ade 400 vtruncbuf(ap->a_vp, ip->ino_data.size,
d5ef456e
MD
401 HAMMER_BUFSIZE);
402 }
66325755
MD
403 break;
404 }
c0ade690 405 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
11ad5ade
MD
406 if (ip->ino_data.size < uio->uio_offset) {
407 ip->ino_data.size = uio->uio_offset;
408 flags = HAMMER_INODE_DDIRTY;
409 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
c0ade690 410 } else {
d113fda1 411 flags = 0;
66325755 412 }
11ad5ade 413 ip->ino_data.mtime = trans.time;
f3b0f382 414 flags |= HAMMER_INODE_ITIMES | HAMMER_INODE_BUFS;
11ad5ade 415 flags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
47637bff 416 hammer_modify_inode(ip, flags);
32c90105 417
0832c9bb
MD
418 /*
419 * Try to keep track of cached dirty data.
420 */
e63644f0
MD
421 if ((bp->b_flags & B_DIRTY) == 0) {
422 ++ip->rsv_databufs;
423 ++ip->hmp->rsv_databufs;
424 }
425
47637bff
MD
426 /*
427 * Final buffer disposition.
428 */
66325755
MD
429 if (ap->a_ioflag & IO_SYNC) {
430 bwrite(bp);
431 } else if (ap->a_ioflag & IO_DIRECT) {
66325755 432 bawrite(bp);
47637bff 433#if 1
059819e3 434 } else if ((ap->a_ioflag >> 16) == IO_SEQMAX &&
34d829f7
MD
435 (uio->uio_offset & HAMMER_BUFMASK) == 0) {
436 /*
437 * If seqcount indicates sequential operation and
438 * we just finished filling a buffer, push it out
439 * now to prevent the buffer cache from becoming
440 * too full, which would trigger non-optimal
441 * flushes.
442 */
47637bff 443 bawrite(bp);
059819e3 444#endif
66325755 445 } else {
66325755
MD
446 bdwrite(bp);
447 }
448 }
b84de5af 449 hammer_done_transaction(&trans);
66325755 450 return (error);
427e5fc6
MD
451}
452
66325755
MD
453/*
454 * hammer_vop_access { vp, mode, cred }
455 */
427e5fc6
MD
456static
457int
66325755 458hammer_vop_access(struct vop_access_args *ap)
427e5fc6 459{
66325755
MD
460 struct hammer_inode *ip = VTOI(ap->a_vp);
461 uid_t uid;
462 gid_t gid;
463 int error;
464
465 uid = hammer_to_unix_xid(&ip->ino_data.uid);
466 gid = hammer_to_unix_xid(&ip->ino_data.gid);
467
468 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
469 ip->ino_data.uflags);
470 return (error);
427e5fc6
MD
471}
472
66325755
MD
473/*
474 * hammer_vop_advlock { vp, id, op, fl, flags }
475 */
427e5fc6
MD
476static
477int
66325755 478hammer_vop_advlock(struct vop_advlock_args *ap)
427e5fc6 479{
66325755
MD
480 struct hammer_inode *ip = VTOI(ap->a_vp);
481
11ad5ade 482 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
427e5fc6
MD
483}
484
66325755
MD
485/*
486 * hammer_vop_close { vp, fflag }
487 */
427e5fc6
MD
488static
489int
66325755 490hammer_vop_close(struct vop_close_args *ap)
427e5fc6 491{
a89aec1b 492 return (vop_stdclose(ap));
427e5fc6
MD
493}
494
66325755
MD
495/*
496 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
497 *
498 * The operating system has already ensured that the directory entry
499 * does not exist and done all appropriate namespace locking.
500 */
427e5fc6
MD
501static
502int
66325755 503hammer_vop_ncreate(struct vop_ncreate_args *ap)
427e5fc6 504{
66325755
MD
505 struct hammer_transaction trans;
506 struct hammer_inode *dip;
507 struct hammer_inode *nip;
508 struct nchandle *nch;
509 int error;
510
511 nch = ap->a_nch;
512 dip = VTOI(ap->a_dvp);
513
d113fda1
MD
514 if (dip->flags & HAMMER_INODE_RO)
515 return (EROFS);
e63644f0
MD
516 if ((error = hammer_checkspace(dip->hmp)) != 0)
517 return (error);
d113fda1 518
66325755
MD
519 /*
520 * Create a transaction to cover the operations we perform.
521 */
8cd0a023 522 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
523
524 /*
525 * Create a new filesystem object of the requested type. The
b84de5af
MD
526 * returned inode will be referenced and shared-locked to prevent
527 * it from being moved to the flusher.
66325755 528 */
8cd0a023
MD
529
530 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 531 if (error) {
77062c8a 532 hkprintf("hammer_create_inode error %d\n", error);
b84de5af 533 hammer_done_transaction(&trans);
66325755
MD
534 *ap->a_vpp = NULL;
535 return (error);
536 }
66325755
MD
537
538 /*
539 * Add the new filesystem object to the directory. This will also
540 * bump the inode's link count.
541 */
a89aec1b 542 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
0b075555 543 if (error)
77062c8a 544 hkprintf("hammer_ip_add_directory error %d\n", error);
66325755
MD
545
546 /*
547 * Finish up.
548 */
549 if (error) {
a89aec1b 550 hammer_rel_inode(nip, 0);
b84de5af 551 hammer_done_transaction(&trans);
66325755
MD
552 *ap->a_vpp = NULL;
553 } else {
e8599db1 554 error = hammer_get_vnode(nip, ap->a_vpp);
b84de5af 555 hammer_done_transaction(&trans);
a89aec1b
MD
556 hammer_rel_inode(nip, 0);
557 if (error == 0) {
558 cache_setunresolved(ap->a_nch);
559 cache_setvp(ap->a_nch, *ap->a_vpp);
560 }
66325755
MD
561 }
562 return (error);
427e5fc6
MD
563}
564
66325755
MD
565/*
566 * hammer_vop_getattr { vp, vap }
98f7132d
MD
567 *
568 * Retrieve an inode's attribute information. When accessing inodes
569 * historically we fake the atime field to ensure consistent results.
570 * The atime field is stored in the B-Tree element and allowed to be
571 * updated without cycling the element.
66325755 572 */
427e5fc6
MD
573static
574int
66325755 575hammer_vop_getattr(struct vop_getattr_args *ap)
427e5fc6 576{
66325755
MD
577 struct hammer_inode *ip = VTOI(ap->a_vp);
578 struct vattr *vap = ap->a_vap;
579
580#if 0
581 if (cache_check_fsmid_vp(ap->a_vp, &ip->fsmid) &&
582 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0 &&
7f7c1f84 583 ip->obj_asof == XXX
66325755
MD
584 ) {
585 /* LAZYMOD XXX */
586 }
587 hammer_itimes(ap->a_vp);
588#endif
589
590 vap->va_fsid = ip->hmp->fsid_udev;
11ad5ade 591 vap->va_fileid = ip->ino_leaf.base.obj_id;
66325755 592 vap->va_mode = ip->ino_data.mode;
11ad5ade 593 vap->va_nlink = ip->ino_data.nlinks;
66325755
MD
594 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
595 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
596 vap->va_rmajor = 0;
597 vap->va_rminor = 0;
11ad5ade 598 vap->va_size = ip->ino_data.size;
98f7132d
MD
599 if (ip->flags & HAMMER_INODE_RO)
600 hammer_to_timespec(ip->ino_data.mtime, &vap->va_atime);
601 else
602 hammer_to_timespec(ip->ino_leaf.atime, &vap->va_atime);
11ad5ade 603 hammer_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
66325755
MD
604 hammer_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
605 vap->va_flags = ip->ino_data.uflags;
606 vap->va_gen = 1; /* hammer inums are unique for all time */
bf686dbe 607 vap->va_blocksize = HAMMER_BUFSIZE;
11ad5ade
MD
608 vap->va_bytes = (ip->ino_data.size + 63) & ~63;
609 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
66325755
MD
610 vap->va_filerev = 0; /* XXX */
611 /* mtime uniquely identifies any adjustments made to the file */
11ad5ade 612 vap->va_fsmid = ip->ino_data.mtime;
66325755
MD
613 vap->va_uid_uuid = ip->ino_data.uid;
614 vap->va_gid_uuid = ip->ino_data.gid;
615 vap->va_fsid_uuid = ip->hmp->fsid;
616 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
617 VA_FSID_UUID_VALID;
7a04d74f 618
11ad5ade 619 switch (ip->ino_data.obj_type) {
7a04d74f
MD
620 case HAMMER_OBJTYPE_CDEV:
621 case HAMMER_OBJTYPE_BDEV:
622 vap->va_rmajor = ip->ino_data.rmajor;
623 vap->va_rminor = ip->ino_data.rminor;
624 break;
625 default:
626 break;
627 }
628
66325755 629 return(0);
427e5fc6
MD
630}
631
66325755
MD
632/*
633 * hammer_vop_nresolve { nch, dvp, cred }
634 *
635 * Locate the requested directory entry.
636 */
427e5fc6
MD
637static
638int
66325755 639hammer_vop_nresolve(struct vop_nresolve_args *ap)
427e5fc6 640{
36f82b23 641 struct hammer_transaction trans;
66325755 642 struct namecache *ncp;
7f7c1f84
MD
643 hammer_inode_t dip;
644 hammer_inode_t ip;
645 hammer_tid_t asof;
8cd0a023 646 struct hammer_cursor cursor;
66325755
MD
647 struct vnode *vp;
648 int64_t namekey;
649 int error;
7f7c1f84
MD
650 int i;
651 int nlen;
d113fda1 652 int flags;
6a37e7e4 653 u_int64_t obj_id;
7f7c1f84
MD
654
655 /*
656 * Misc initialization, plus handle as-of name extensions. Look for
657 * the '@@' extension. Note that as-of files and directories cannot
658 * be modified.
7f7c1f84
MD
659 */
660 dip = VTOI(ap->a_dvp);
661 ncp = ap->a_nch->ncp;
662 asof = dip->obj_asof;
663 nlen = ncp->nc_nlen;
d113fda1 664 flags = dip->flags;
7f7c1f84 665
36f82b23
MD
666 hammer_simple_transaction(&trans, dip->hmp);
667
7f7c1f84
MD
668 for (i = 0; i < nlen; ++i) {
669 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
d113fda1 670 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
d113fda1 671 flags |= HAMMER_INODE_RO;
7f7c1f84
MD
672 break;
673 }
674 }
675 nlen = i;
66325755 676
d113fda1
MD
677 /*
678 * If there is no path component the time extension is relative to
679 * dip.
680 */
681 if (nlen == 0) {
36f82b23 682 ip = hammer_get_inode(&trans, &dip->cache[1], dip->obj_id,
61aeeb33 683 asof, flags, &error);
d113fda1 684 if (error == 0) {
e8599db1 685 error = hammer_get_vnode(ip, &vp);
d113fda1
MD
686 hammer_rel_inode(ip, 0);
687 } else {
688 vp = NULL;
689 }
690 if (error == 0) {
691 vn_unlock(vp);
692 cache_setvp(ap->a_nch, vp);
693 vrele(vp);
694 }
36f82b23 695 goto done;
d113fda1
MD
696 }
697
8cd0a023
MD
698 /*
699 * Calculate the namekey and setup the key range for the scan. This
700 * works kinda like a chained hash table where the lower 32 bits
701 * of the namekey synthesize the chain.
702 *
703 * The key range is inclusive of both key_beg and key_end.
704 */
7f7c1f84 705 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
66325755 706
4e17f465 707 error = hammer_init_cursor(&trans, &cursor, &dip->cache[0], dip);
2f85fa4d 708 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
709 cursor.key_beg.obj_id = dip->obj_id;
710 cursor.key_beg.key = namekey;
d5530d22 711 cursor.key_beg.create_tid = 0;
8cd0a023
MD
712 cursor.key_beg.delete_tid = 0;
713 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
714 cursor.key_beg.obj_type = 0;
66325755 715
8cd0a023
MD
716 cursor.key_end = cursor.key_beg;
717 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
718 cursor.asof = asof;
719 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
66325755
MD
720
721 /*
8cd0a023 722 * Scan all matching records (the chain), locate the one matching
a89aec1b 723 * the requested path component.
8cd0a023
MD
724 *
725 * The hammer_ip_*() functions merge in-memory records with on-disk
726 * records for the purposes of the search.
66325755 727 */
6a37e7e4
MD
728 obj_id = 0;
729
4e17f465 730 if (error == 0) {
4e17f465
MD
731 error = hammer_ip_first(&cursor);
732 while (error == 0) {
733 error = hammer_ip_resolve_data(&cursor);
734 if (error)
735 break;
11ad5ade
MD
736 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
737 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
738 obj_id = cursor.data->entry.obj_id;
4e17f465
MD
739 break;
740 }
741 error = hammer_ip_next(&cursor);
66325755
MD
742 }
743 }
6a37e7e4 744 hammer_done_cursor(&cursor);
66325755 745 if (error == 0) {
36f82b23 746 ip = hammer_get_inode(&trans, &dip->cache[1],
6a37e7e4 747 obj_id, asof, flags, &error);
7f7c1f84 748 if (error == 0) {
e8599db1 749 error = hammer_get_vnode(ip, &vp);
7f7c1f84
MD
750 hammer_rel_inode(ip, 0);
751 } else {
752 vp = NULL;
753 }
66325755
MD
754 if (error == 0) {
755 vn_unlock(vp);
756 cache_setvp(ap->a_nch, vp);
757 vrele(vp);
758 }
759 } else if (error == ENOENT) {
760 cache_setvp(ap->a_nch, NULL);
761 }
36f82b23 762done:
b84de5af 763 hammer_done_transaction(&trans);
66325755 764 return (error);
427e5fc6
MD
765}
766
66325755
MD
767/*
768 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
769 *
770 * Locate the parent directory of a directory vnode.
771 *
772 * dvp is referenced but not locked. *vpp must be returned referenced and
773 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
774 * at the root, instead it could indicate that the directory we were in was
775 * removed.
42c7d26b
MD
776 *
777 * NOTE: as-of sequences are not linked into the directory structure. If
778 * we are at the root with a different asof then the mount point, reload
779 * the same directory with the mount point's asof. I'm not sure what this
780 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
781 * get confused, but it hasn't been tested.
66325755 782 */
427e5fc6
MD
783static
784int
66325755 785hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
427e5fc6 786{
36f82b23 787 struct hammer_transaction trans;
66325755 788 struct hammer_inode *dip;
d113fda1 789 struct hammer_inode *ip;
42c7d26b
MD
790 int64_t parent_obj_id;
791 hammer_tid_t asof;
d113fda1 792 int error;
66325755
MD
793
794 dip = VTOI(ap->a_dvp);
42c7d26b
MD
795 asof = dip->obj_asof;
796 parent_obj_id = dip->ino_data.parent_obj_id;
797
798 if (parent_obj_id == 0) {
799 if (dip->obj_id == HAMMER_OBJID_ROOT &&
800 asof != dip->hmp->asof) {
801 parent_obj_id = dip->obj_id;
802 asof = dip->hmp->asof;
803 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
804 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
805 dip->obj_asof);
806 } else {
807 *ap->a_vpp = NULL;
808 return ENOENT;
809 }
66325755 810 }
d113fda1 811
36f82b23
MD
812 hammer_simple_transaction(&trans, dip->hmp);
813
814 ip = hammer_get_inode(&trans, &dip->cache[1], parent_obj_id,
42c7d26b 815 asof, dip->flags, &error);
36f82b23 816 if (ip) {
e8599db1 817 error = hammer_get_vnode(ip, ap->a_vpp);
36f82b23
MD
818 hammer_rel_inode(ip, 0);
819 } else {
d113fda1 820 *ap->a_vpp = NULL;
d113fda1 821 }
b84de5af 822 hammer_done_transaction(&trans);
d113fda1 823 return (error);
427e5fc6
MD
824}
825
66325755
MD
826/*
827 * hammer_vop_nlink { nch, dvp, vp, cred }
828 */
427e5fc6
MD
829static
830int
66325755 831hammer_vop_nlink(struct vop_nlink_args *ap)
427e5fc6 832{
66325755
MD
833 struct hammer_transaction trans;
834 struct hammer_inode *dip;
835 struct hammer_inode *ip;
836 struct nchandle *nch;
837 int error;
838
839 nch = ap->a_nch;
840 dip = VTOI(ap->a_dvp);
841 ip = VTOI(ap->a_vp);
842
d113fda1
MD
843 if (dip->flags & HAMMER_INODE_RO)
844 return (EROFS);
845 if (ip->flags & HAMMER_INODE_RO)
846 return (EROFS);
e63644f0
MD
847 if ((error = hammer_checkspace(dip->hmp)) != 0)
848 return (error);
d113fda1 849
66325755
MD
850 /*
851 * Create a transaction to cover the operations we perform.
852 */
8cd0a023 853 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
854
855 /*
856 * Add the filesystem object to the directory. Note that neither
857 * dip nor ip are referenced or locked, but their vnodes are
858 * referenced. This function will bump the inode's link count.
859 */
a89aec1b 860 error = hammer_ip_add_directory(&trans, dip, nch->ncp, ip);
66325755
MD
861
862 /*
863 * Finish up.
864 */
b84de5af 865 if (error == 0) {
6b4f890b
MD
866 cache_setunresolved(nch);
867 cache_setvp(nch, ap->a_vp);
66325755 868 }
b84de5af 869 hammer_done_transaction(&trans);
66325755 870 return (error);
427e5fc6
MD
871}
872
66325755
MD
873/*
874 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
875 *
876 * The operating system has already ensured that the directory entry
877 * does not exist and done all appropriate namespace locking.
878 */
427e5fc6
MD
879static
880int
66325755 881hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
427e5fc6 882{
66325755
MD
883 struct hammer_transaction trans;
884 struct hammer_inode *dip;
885 struct hammer_inode *nip;
886 struct nchandle *nch;
887 int error;
888
889 nch = ap->a_nch;
890 dip = VTOI(ap->a_dvp);
891
d113fda1
MD
892 if (dip->flags & HAMMER_INODE_RO)
893 return (EROFS);
e63644f0
MD
894 if ((error = hammer_checkspace(dip->hmp)) != 0)
895 return (error);
d113fda1 896
66325755
MD
897 /*
898 * Create a transaction to cover the operations we perform.
899 */
8cd0a023 900 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
901
902 /*
903 * Create a new filesystem object of the requested type. The
8cd0a023 904 * returned inode will be referenced but not locked.
66325755 905 */
8cd0a023 906 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 907 if (error) {
77062c8a 908 hkprintf("hammer_mkdir error %d\n", error);
b84de5af 909 hammer_done_transaction(&trans);
66325755
MD
910 *ap->a_vpp = NULL;
911 return (error);
912 }
66325755
MD
913 /*
914 * Add the new filesystem object to the directory. This will also
915 * bump the inode's link count.
916 */
a89aec1b 917 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
0b075555 918 if (error)
77062c8a 919 hkprintf("hammer_mkdir (add) error %d\n", error);
66325755
MD
920
921 /*
922 * Finish up.
923 */
924 if (error) {
a89aec1b 925 hammer_rel_inode(nip, 0);
66325755
MD
926 *ap->a_vpp = NULL;
927 } else {
e8599db1 928 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
929 hammer_rel_inode(nip, 0);
930 if (error == 0) {
931 cache_setunresolved(ap->a_nch);
932 cache_setvp(ap->a_nch, *ap->a_vpp);
933 }
66325755 934 }
b84de5af 935 hammer_done_transaction(&trans);
66325755 936 return (error);
427e5fc6
MD
937}
938
66325755
MD
939/*
940 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
941 *
942 * The operating system has already ensured that the directory entry
943 * does not exist and done all appropriate namespace locking.
944 */
427e5fc6
MD
945static
946int
66325755 947hammer_vop_nmknod(struct vop_nmknod_args *ap)
427e5fc6 948{
66325755
MD
949 struct hammer_transaction trans;
950 struct hammer_inode *dip;
951 struct hammer_inode *nip;
952 struct nchandle *nch;
953 int error;
954
955 nch = ap->a_nch;
956 dip = VTOI(ap->a_dvp);
957
d113fda1
MD
958 if (dip->flags & HAMMER_INODE_RO)
959 return (EROFS);
e63644f0
MD
960 if ((error = hammer_checkspace(dip->hmp)) != 0)
961 return (error);
d113fda1 962
66325755
MD
963 /*
964 * Create a transaction to cover the operations we perform.
965 */
8cd0a023 966 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
967
968 /*
969 * Create a new filesystem object of the requested type. The
8cd0a023 970 * returned inode will be referenced but not locked.
66325755 971 */
8cd0a023 972 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 973 if (error) {
b84de5af 974 hammer_done_transaction(&trans);
66325755
MD
975 *ap->a_vpp = NULL;
976 return (error);
977 }
66325755
MD
978
979 /*
980 * Add the new filesystem object to the directory. This will also
981 * bump the inode's link count.
982 */
a89aec1b 983 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
66325755
MD
984
985 /*
986 * Finish up.
987 */
988 if (error) {
a89aec1b 989 hammer_rel_inode(nip, 0);
66325755
MD
990 *ap->a_vpp = NULL;
991 } else {
e8599db1 992 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
993 hammer_rel_inode(nip, 0);
994 if (error == 0) {
995 cache_setunresolved(ap->a_nch);
996 cache_setvp(ap->a_nch, *ap->a_vpp);
997 }
66325755 998 }
b84de5af 999 hammer_done_transaction(&trans);
66325755 1000 return (error);
427e5fc6
MD
1001}
1002
66325755
MD
1003/*
1004 * hammer_vop_open { vp, mode, cred, fp }
1005 */
427e5fc6
MD
1006static
1007int
66325755 1008hammer_vop_open(struct vop_open_args *ap)
427e5fc6 1009{
9f5097dc
MD
1010 hammer_inode_t ip;
1011
1012 ip = VTOI(ap->a_vp);
1013
1014 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
d113fda1 1015 return (EROFS);
a89aec1b 1016 return(vop_stdopen(ap));
427e5fc6
MD
1017}
1018
66325755
MD
1019/*
1020 * hammer_vop_pathconf { vp, name, retval }
1021 */
427e5fc6
MD
1022static
1023int
66325755 1024hammer_vop_pathconf(struct vop_pathconf_args *ap)
427e5fc6
MD
1025{
1026 return EOPNOTSUPP;
1027}
1028
66325755
MD
1029/*
1030 * hammer_vop_print { vp }
1031 */
427e5fc6
MD
1032static
1033int
66325755 1034hammer_vop_print(struct vop_print_args *ap)
427e5fc6
MD
1035{
1036 return EOPNOTSUPP;
1037}
1038
66325755 1039/*
6b4f890b 1040 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
66325755 1041 */
427e5fc6
MD
1042static
1043int
66325755 1044hammer_vop_readdir(struct vop_readdir_args *ap)
427e5fc6 1045{
36f82b23 1046 struct hammer_transaction trans;
6b4f890b
MD
1047 struct hammer_cursor cursor;
1048 struct hammer_inode *ip;
1049 struct uio *uio;
6b4f890b
MD
1050 hammer_base_elm_t base;
1051 int error;
1052 int cookie_index;
1053 int ncookies;
1054 off_t *cookies;
1055 off_t saveoff;
1056 int r;
1057
1058 ip = VTOI(ap->a_vp);
1059 uio = ap->a_uio;
b3deaf57
MD
1060 saveoff = uio->uio_offset;
1061
1062 if (ap->a_ncookies) {
1063 ncookies = uio->uio_resid / 16 + 1;
1064 if (ncookies > 1024)
1065 ncookies = 1024;
1066 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1067 cookie_index = 0;
1068 } else {
1069 ncookies = -1;
1070 cookies = NULL;
1071 cookie_index = 0;
1072 }
1073
36f82b23
MD
1074 hammer_simple_transaction(&trans, ip->hmp);
1075
b3deaf57
MD
1076 /*
1077 * Handle artificial entries
1078 */
1079 error = 0;
1080 if (saveoff == 0) {
1081 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1082 if (r)
1083 goto done;
1084 if (cookies)
1085 cookies[cookie_index] = saveoff;
1086 ++saveoff;
1087 ++cookie_index;
1088 if (cookie_index == ncookies)
1089 goto done;
1090 }
1091 if (saveoff == 1) {
1092 if (ip->ino_data.parent_obj_id) {
1093 r = vop_write_dirent(&error, uio,
1094 ip->ino_data.parent_obj_id,
1095 DT_DIR, 2, "..");
1096 } else {
1097 r = vop_write_dirent(&error, uio,
1098 ip->obj_id, DT_DIR, 2, "..");
1099 }
1100 if (r)
1101 goto done;
1102 if (cookies)
1103 cookies[cookie_index] = saveoff;
1104 ++saveoff;
1105 ++cookie_index;
1106 if (cookie_index == ncookies)
1107 goto done;
1108 }
6b4f890b
MD
1109
1110 /*
1111 * Key range (begin and end inclusive) to scan. Directory keys
1112 * directly translate to a 64 bit 'seek' position.
1113 */
4e17f465 1114 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
2f85fa4d 1115 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
6b4f890b 1116 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1117 cursor.key_beg.create_tid = 0;
6b4f890b
MD
1118 cursor.key_beg.delete_tid = 0;
1119 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1120 cursor.key_beg.obj_type = 0;
b3deaf57 1121 cursor.key_beg.key = saveoff;
6b4f890b
MD
1122
1123 cursor.key_end = cursor.key_beg;
1124 cursor.key_end.key = HAMMER_MAX_KEY;
d5530d22
MD
1125 cursor.asof = ip->obj_asof;
1126 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
6b4f890b 1127
4e17f465 1128 error = hammer_ip_first(&cursor);
6b4f890b
MD
1129
1130 while (error == 0) {
11ad5ade 1131 error = hammer_ip_resolve_data(&cursor);
6b4f890b
MD
1132 if (error)
1133 break;
11ad5ade 1134 base = &cursor.leaf->base;
6b4f890b 1135 saveoff = base->key;
11ad5ade 1136 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
6b4f890b 1137
7a04d74f
MD
1138 if (base->obj_id != ip->obj_id)
1139 panic("readdir: bad record at %p", cursor.node);
1140
6b4f890b 1141 r = vop_write_dirent(
11ad5ade
MD
1142 &error, uio, cursor.data->entry.obj_id,
1143 hammer_get_dtype(cursor.leaf->base.obj_type),
1144 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1145 (void *)cursor.data->entry.name);
6b4f890b
MD
1146 if (r)
1147 break;
1148 ++saveoff;
1149 if (cookies)
1150 cookies[cookie_index] = base->key;
1151 ++cookie_index;
1152 if (cookie_index == ncookies)
1153 break;
1154 error = hammer_ip_next(&cursor);
1155 }
1156 hammer_done_cursor(&cursor);
1157
b3deaf57 1158done:
b84de5af 1159 hammer_done_transaction(&trans);
36f82b23 1160
6b4f890b
MD
1161 if (ap->a_eofflag)
1162 *ap->a_eofflag = (error == ENOENT);
6b4f890b
MD
1163 uio->uio_offset = saveoff;
1164 if (error && cookie_index == 0) {
b3deaf57
MD
1165 if (error == ENOENT)
1166 error = 0;
6b4f890b
MD
1167 if (cookies) {
1168 kfree(cookies, M_TEMP);
1169 *ap->a_ncookies = 0;
1170 *ap->a_cookies = NULL;
1171 }
1172 } else {
7a04d74f
MD
1173 if (error == ENOENT)
1174 error = 0;
6b4f890b
MD
1175 if (cookies) {
1176 *ap->a_ncookies = cookie_index;
1177 *ap->a_cookies = cookies;
1178 }
1179 }
1180 return(error);
427e5fc6
MD
1181}
1182
66325755
MD
1183/*
1184 * hammer_vop_readlink { vp, uio, cred }
1185 */
427e5fc6
MD
1186static
1187int
66325755 1188hammer_vop_readlink(struct vop_readlink_args *ap)
427e5fc6 1189{
36f82b23 1190 struct hammer_transaction trans;
7a04d74f
MD
1191 struct hammer_cursor cursor;
1192 struct hammer_inode *ip;
1193 int error;
1194
1195 ip = VTOI(ap->a_vp);
36f82b23 1196
2f85fa4d
MD
1197 /*
1198 * Shortcut if the symlink data was stuffed into ino_data.
1199 */
1200 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1201 error = uiomove(ip->ino_data.ext.symlink,
1202 ip->ino_data.size, ap->a_uio);
1203 return(error);
1204 }
36f82b23 1205
2f85fa4d
MD
1206 /*
1207 * Long version
1208 */
1209 hammer_simple_transaction(&trans, ip->hmp);
4e17f465 1210 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
7a04d74f
MD
1211
1212 /*
1213 * Key range (begin and end inclusive) to scan. Directory keys
1214 * directly translate to a 64 bit 'seek' position.
1215 */
2f85fa4d 1216 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC; /* XXX */
7a04d74f 1217 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1218 cursor.key_beg.create_tid = 0;
7a04d74f
MD
1219 cursor.key_beg.delete_tid = 0;
1220 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1221 cursor.key_beg.obj_type = 0;
1222 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
d5530d22
MD
1223 cursor.asof = ip->obj_asof;
1224 cursor.flags |= HAMMER_CURSOR_ASOF;
7a04d74f 1225
45a014dc 1226 error = hammer_ip_lookup(&cursor);
7a04d74f
MD
1227 if (error == 0) {
1228 error = hammer_ip_resolve_data(&cursor);
1229 if (error == 0) {
11ad5ade
MD
1230 KKASSERT(cursor.leaf->data_len >=
1231 HAMMER_SYMLINK_NAME_OFF);
1232 error = uiomove(cursor.data->symlink.name,
1233 cursor.leaf->data_len -
1234 HAMMER_SYMLINK_NAME_OFF,
7a04d74f
MD
1235 ap->a_uio);
1236 }
1237 }
1238 hammer_done_cursor(&cursor);
b84de5af 1239 hammer_done_transaction(&trans);
7a04d74f 1240 return(error);
427e5fc6
MD
1241}
1242
66325755
MD
1243/*
1244 * hammer_vop_nremove { nch, dvp, cred }
1245 */
427e5fc6
MD
1246static
1247int
66325755 1248hammer_vop_nremove(struct vop_nremove_args *ap)
427e5fc6 1249{
b84de5af 1250 struct hammer_transaction trans;
e63644f0 1251 struct hammer_inode *dip;
b84de5af
MD
1252 int error;
1253
e63644f0
MD
1254 dip = VTOI(ap->a_dvp);
1255
1256 if (hammer_nohistory(dip) == 0 &&
1257 (error = hammer_checkspace(dip->hmp)) != 0) {
1258 return (error);
1259 }
1260
1261 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1262 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1263 hammer_done_transaction(&trans);
1264
1265 return (error);
427e5fc6
MD
1266}
1267
66325755
MD
1268/*
1269 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1270 */
427e5fc6
MD
1271static
1272int
66325755 1273hammer_vop_nrename(struct vop_nrename_args *ap)
427e5fc6 1274{
8cd0a023
MD
1275 struct hammer_transaction trans;
1276 struct namecache *fncp;
1277 struct namecache *tncp;
1278 struct hammer_inode *fdip;
1279 struct hammer_inode *tdip;
1280 struct hammer_inode *ip;
1281 struct hammer_cursor cursor;
8cd0a023 1282 int64_t namekey;
11ad5ade 1283 int nlen, error;
8cd0a023
MD
1284
1285 fdip = VTOI(ap->a_fdvp);
1286 tdip = VTOI(ap->a_tdvp);
1287 fncp = ap->a_fnch->ncp;
1288 tncp = ap->a_tnch->ncp;
b3deaf57
MD
1289 ip = VTOI(fncp->nc_vp);
1290 KKASSERT(ip != NULL);
d113fda1
MD
1291
1292 if (fdip->flags & HAMMER_INODE_RO)
1293 return (EROFS);
1294 if (tdip->flags & HAMMER_INODE_RO)
1295 return (EROFS);
1296 if (ip->flags & HAMMER_INODE_RO)
1297 return (EROFS);
e63644f0
MD
1298 if ((error = hammer_checkspace(fdip->hmp)) != 0)
1299 return (error);
d113fda1 1300
8cd0a023
MD
1301 hammer_start_transaction(&trans, fdip->hmp);
1302
1303 /*
b3deaf57
MD
1304 * Remove tncp from the target directory and then link ip as
1305 * tncp. XXX pass trans to dounlink
42c7d26b
MD
1306 *
1307 * Force the inode sync-time to match the transaction so it is
1308 * in-sync with the creation of the target directory entry.
8cd0a023 1309 */
b84de5af 1310 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
42c7d26b 1311 if (error == 0 || error == ENOENT) {
b3deaf57 1312 error = hammer_ip_add_directory(&trans, tdip, tncp, ip);
42c7d26b
MD
1313 if (error == 0) {
1314 ip->ino_data.parent_obj_id = tdip->obj_id;
47637bff 1315 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
42c7d26b
MD
1316 }
1317 }
b3deaf57
MD
1318 if (error)
1319 goto failed; /* XXX */
8cd0a023
MD
1320
1321 /*
1322 * Locate the record in the originating directory and remove it.
1323 *
1324 * Calculate the namekey and setup the key range for the scan. This
1325 * works kinda like a chained hash table where the lower 32 bits
1326 * of the namekey synthesize the chain.
1327 *
1328 * The key range is inclusive of both key_beg and key_end.
1329 */
1330 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
6a37e7e4 1331retry:
4e17f465 1332 hammer_init_cursor(&trans, &cursor, &fdip->cache[0], fdip);
2f85fa4d 1333 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
1334 cursor.key_beg.obj_id = fdip->obj_id;
1335 cursor.key_beg.key = namekey;
d5530d22 1336 cursor.key_beg.create_tid = 0;
8cd0a023
MD
1337 cursor.key_beg.delete_tid = 0;
1338 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1339 cursor.key_beg.obj_type = 0;
1340
1341 cursor.key_end = cursor.key_beg;
1342 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
1343 cursor.asof = fdip->obj_asof;
1344 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023
MD
1345
1346 /*
1347 * Scan all matching records (the chain), locate the one matching
a89aec1b 1348 * the requested path component.
8cd0a023
MD
1349 *
1350 * The hammer_ip_*() functions merge in-memory records with on-disk
1351 * records for the purposes of the search.
1352 */
4e17f465 1353 error = hammer_ip_first(&cursor);
a89aec1b 1354 while (error == 0) {
8cd0a023
MD
1355 if (hammer_ip_resolve_data(&cursor) != 0)
1356 break;
11ad5ade
MD
1357 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1358 KKASSERT(nlen > 0);
1359 if (fncp->nc_nlen == nlen &&
1360 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
8cd0a023
MD
1361 break;
1362 }
a89aec1b 1363 error = hammer_ip_next(&cursor);
8cd0a023 1364 }
8cd0a023
MD
1365
1366 /*
1367 * If all is ok we have to get the inode so we can adjust nlinks.
6a37e7e4
MD
1368 *
1369 * WARNING: hammer_ip_del_directory() may have to terminate the
1370 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1371 * twice.
8cd0a023 1372 */
9944ae54 1373 if (error == 0)
6a37e7e4 1374 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
b84de5af
MD
1375
1376 /*
1377 * XXX A deadlock here will break rename's atomicy for the purposes
1378 * of crash recovery.
1379 */
1380 if (error == EDEADLK) {
b84de5af 1381 hammer_done_cursor(&cursor);
b84de5af
MD
1382 goto retry;
1383 }
1384
1385 /*
1386 * Cleanup and tell the kernel that the rename succeeded.
1387 */
c0ade690 1388 hammer_done_cursor(&cursor);
6a37e7e4
MD
1389 if (error == 0)
1390 cache_rename(ap->a_fnch, ap->a_tnch);
b84de5af 1391
b3deaf57 1392failed:
b84de5af 1393 hammer_done_transaction(&trans);
8cd0a023 1394 return (error);
427e5fc6
MD
1395}
1396
66325755
MD
1397/*
1398 * hammer_vop_nrmdir { nch, dvp, cred }
1399 */
427e5fc6
MD
1400static
1401int
66325755 1402hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
427e5fc6 1403{
b84de5af 1404 struct hammer_transaction trans;
e63644f0 1405 struct hammer_inode *dip;
b84de5af
MD
1406 int error;
1407
e63644f0
MD
1408 dip = VTOI(ap->a_dvp);
1409
1410 if (hammer_nohistory(dip) == 0 &&
1411 (error = hammer_checkspace(dip->hmp)) != 0) {
1412 return (error);
1413 }
1414
1415 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1416 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1417 hammer_done_transaction(&trans);
1418
1419 return (error);
427e5fc6
MD
1420}
1421
66325755
MD
1422/*
1423 * hammer_vop_setattr { vp, vap, cred }
1424 */
427e5fc6
MD
1425static
1426int
66325755 1427hammer_vop_setattr(struct vop_setattr_args *ap)
427e5fc6 1428{
8cd0a023
MD
1429 struct hammer_transaction trans;
1430 struct vattr *vap;
1431 struct hammer_inode *ip;
1432 int modflags;
1433 int error;
d5ef456e 1434 int truncating;
b84de5af 1435 off_t aligned_size;
8cd0a023 1436 u_int32_t flags;
8cd0a023
MD
1437
1438 vap = ap->a_vap;
1439 ip = ap->a_vp->v_data;
1440 modflags = 0;
1441
1442 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1443 return(EROFS);
d113fda1
MD
1444 if (ip->flags & HAMMER_INODE_RO)
1445 return (EROFS);
e63644f0
MD
1446 if (hammer_nohistory(ip) == 0 &&
1447 (error = hammer_checkspace(ip->hmp)) != 0) {
1448 return (error);
1449 }
8cd0a023
MD
1450
1451 hammer_start_transaction(&trans, ip->hmp);
1452 error = 0;
1453
1454 if (vap->va_flags != VNOVAL) {
1455 flags = ip->ino_data.uflags;
1456 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1457 hammer_to_unix_xid(&ip->ino_data.uid),
1458 ap->a_cred);
1459 if (error == 0) {
1460 if (ip->ino_data.uflags != flags) {
1461 ip->ino_data.uflags = flags;
1462 modflags |= HAMMER_INODE_DDIRTY;
1463 }
1464 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1465 error = 0;
1466 goto done;
1467 }
1468 }
1469 goto done;
1470 }
1471 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1472 error = EPERM;
1473 goto done;
1474 }
7538695e
MD
1475 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1476 mode_t cur_mode = ip->ino_data.mode;
1477 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1478 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1479 uuid_t uuid_uid;
1480 uuid_t uuid_gid;
1481
1482 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1483 ap->a_cred,
1484 &cur_uid, &cur_gid, &cur_mode);
1485 if (error == 0) {
1486 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1487 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1488 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1489 sizeof(uuid_uid)) ||
1490 bcmp(&uuid_gid, &ip->ino_data.gid,
1491 sizeof(uuid_gid)) ||
1492 ip->ino_data.mode != cur_mode
1493 ) {
1494 ip->ino_data.uid = uuid_uid;
1495 ip->ino_data.gid = uuid_gid;
1496 ip->ino_data.mode = cur_mode;
1497 }
8cd0a023
MD
1498 modflags |= HAMMER_INODE_DDIRTY;
1499 }
1500 }
11ad5ade 1501 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
8cd0a023
MD
1502 switch(ap->a_vp->v_type) {
1503 case VREG:
11ad5ade 1504 if (vap->va_size == ip->ino_data.size)
d5ef456e 1505 break;
b84de5af
MD
1506 /*
1507 * XXX break atomicy, we can deadlock the backend
1508 * if we do not release the lock. Probably not a
1509 * big deal here.
1510 */
11ad5ade 1511 if (vap->va_size < ip->ino_data.size) {
c0ade690
MD
1512 vtruncbuf(ap->a_vp, vap->va_size,
1513 HAMMER_BUFSIZE);
d5ef456e
MD
1514 truncating = 1;
1515 } else {
c0ade690 1516 vnode_pager_setsize(ap->a_vp, vap->va_size);
d5ef456e 1517 truncating = 0;
c0ade690 1518 }
11ad5ade
MD
1519 ip->ino_data.size = vap->va_size;
1520 modflags |= HAMMER_INODE_DDIRTY;
76376933 1521 aligned_size = (vap->va_size + HAMMER_BUFMASK) &
b84de5af 1522 ~HAMMER_BUFMASK64;
d5ef456e 1523
b84de5af
MD
1524 /*
1525 * on-media truncation is cached in the inode until
1526 * the inode is synchronized.
1527 */
d5ef456e 1528 if (truncating) {
47637bff 1529 hammer_ip_frontend_trunc(ip, vap->va_size);
0832c9bb
MD
1530 hammer_update_rsv_databufs(ip);
1531#ifdef DEBUG_TRUNCATE
1532 if (HammerTruncIp == NULL)
1533 HammerTruncIp = ip;
1534#endif
b84de5af
MD
1535 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1536 ip->flags |= HAMMER_INODE_TRUNCATED;
1537 ip->trunc_off = vap->va_size;
0832c9bb
MD
1538#ifdef DEBUG_TRUNCATE
1539 if (ip == HammerTruncIp)
1540 kprintf("truncate1 %016llx\n", ip->trunc_off);
1541#endif
b84de5af
MD
1542 } else if (ip->trunc_off > vap->va_size) {
1543 ip->trunc_off = vap->va_size;
0832c9bb
MD
1544#ifdef DEBUG_TRUNCATE
1545 if (ip == HammerTruncIp)
1546 kprintf("truncate2 %016llx\n", ip->trunc_off);
1547#endif
1548 } else {
1549#ifdef DEBUG_TRUNCATE
1550 if (ip == HammerTruncIp)
1551 kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
1552#endif
b84de5af 1553 }
d5ef456e 1554 }
b84de5af 1555
d5ef456e
MD
1556 /*
1557 * If truncating we have to clean out a portion of
b84de5af
MD
1558 * the last block on-disk. We do this in the
1559 * front-end buffer cache.
d5ef456e 1560 */
b84de5af 1561 if (truncating && vap->va_size < aligned_size) {
d5ef456e
MD
1562 struct buf *bp;
1563 int offset;
1564
47637bff
MD
1565 aligned_size -= HAMMER_BUFSIZE;
1566
d5ef456e 1567 offset = vap->va_size & HAMMER_BUFMASK;
47637bff 1568 error = bread(ap->a_vp, aligned_size,
d5ef456e 1569 HAMMER_BUFSIZE, &bp);
47637bff 1570 hammer_ip_frontend_trunc(ip, aligned_size);
d5ef456e
MD
1571 if (error == 0) {
1572 bzero(bp->b_data + offset,
1573 HAMMER_BUFSIZE - offset);
1574 bdwrite(bp);
1575 } else {
47637bff 1576 kprintf("ERROR %d\n", error);
d5ef456e
MD
1577 brelse(bp);
1578 }
1579 }
76376933 1580 break;
8cd0a023 1581 case VDATABASE:
b84de5af
MD
1582 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1583 ip->flags |= HAMMER_INODE_TRUNCATED;
1584 ip->trunc_off = vap->va_size;
1585 } else if (ip->trunc_off > vap->va_size) {
1586 ip->trunc_off = vap->va_size;
1587 }
47637bff 1588 hammer_ip_frontend_trunc(ip, vap->va_size);
11ad5ade
MD
1589 ip->ino_data.size = vap->va_size;
1590 modflags |= HAMMER_INODE_DDIRTY;
8cd0a023
MD
1591 break;
1592 default:
1593 error = EINVAL;
1594 goto done;
1595 }
d26d0ae9 1596 break;
8cd0a023
MD
1597 }
1598 if (vap->va_atime.tv_sec != VNOVAL) {
11ad5ade 1599 ip->ino_leaf.atime =
8cd0a023
MD
1600 hammer_timespec_to_transid(&vap->va_atime);
1601 modflags |= HAMMER_INODE_ITIMES;
1602 }
1603 if (vap->va_mtime.tv_sec != VNOVAL) {
11ad5ade 1604 ip->ino_data.mtime =
8cd0a023
MD
1605 hammer_timespec_to_transid(&vap->va_mtime);
1606 modflags |= HAMMER_INODE_ITIMES;
98f7132d 1607 modflags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
8cd0a023
MD
1608 }
1609 if (vap->va_mode != (mode_t)VNOVAL) {
7538695e
MD
1610 mode_t cur_mode = ip->ino_data.mode;
1611 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1612 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1613
1614 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1615 cur_uid, cur_gid, &cur_mode);
1616 if (error == 0 && ip->ino_data.mode != cur_mode) {
1617 ip->ino_data.mode = cur_mode;
8cd0a023
MD
1618 modflags |= HAMMER_INODE_DDIRTY;
1619 }
1620 }
1621done:
b84de5af 1622 if (error == 0)
47637bff 1623 hammer_modify_inode(ip, modflags);
b84de5af 1624 hammer_done_transaction(&trans);
8cd0a023 1625 return (error);
427e5fc6
MD
1626}
1627
66325755
MD
1628/*
1629 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1630 */
427e5fc6
MD
1631static
1632int
66325755 1633hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
427e5fc6 1634{
7a04d74f
MD
1635 struct hammer_transaction trans;
1636 struct hammer_inode *dip;
1637 struct hammer_inode *nip;
1638 struct nchandle *nch;
1639 hammer_record_t record;
1640 int error;
1641 int bytes;
1642
1643 ap->a_vap->va_type = VLNK;
1644
1645 nch = ap->a_nch;
1646 dip = VTOI(ap->a_dvp);
1647
d113fda1
MD
1648 if (dip->flags & HAMMER_INODE_RO)
1649 return (EROFS);
e63644f0
MD
1650 if ((error = hammer_checkspace(dip->hmp)) != 0)
1651 return (error);
d113fda1 1652
7a04d74f
MD
1653 /*
1654 * Create a transaction to cover the operations we perform.
1655 */
1656 hammer_start_transaction(&trans, dip->hmp);
1657
1658 /*
1659 * Create a new filesystem object of the requested type. The
1660 * returned inode will be referenced but not locked.
1661 */
1662
1663 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
1664 if (error) {
b84de5af 1665 hammer_done_transaction(&trans);
7a04d74f
MD
1666 *ap->a_vpp = NULL;
1667 return (error);
1668 }
1669
7a04d74f
MD
1670 /*
1671 * Add a record representing the symlink. symlink stores the link
1672 * as pure data, not a string, and is no \0 terminated.
1673 */
1674 if (error == 0) {
7a04d74f
MD
1675 bytes = strlen(ap->a_target);
1676
2f85fa4d
MD
1677 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1678 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1679 } else {
1680 record = hammer_alloc_mem_record(nip, bytes);
1681 record->type = HAMMER_MEM_RECORD_GENERAL;
1682
1683 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
1684 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1685 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1686 record->leaf.data_len = bytes;
1687 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1688 bcopy(ap->a_target, record->data->symlink.name, bytes);
1689 error = hammer_ip_add_record(&trans, record);
1690 }
42c7d26b
MD
1691
1692 /*
1693 * Set the file size to the length of the link.
1694 */
1695 if (error == 0) {
11ad5ade 1696 nip->ino_data.size = bytes;
47637bff 1697 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
42c7d26b 1698 }
7a04d74f 1699 }
1f07f686
MD
1700 if (error == 0)
1701 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
7a04d74f
MD
1702
1703 /*
1704 * Finish up.
1705 */
1706 if (error) {
1707 hammer_rel_inode(nip, 0);
7a04d74f
MD
1708 *ap->a_vpp = NULL;
1709 } else {
e8599db1 1710 error = hammer_get_vnode(nip, ap->a_vpp);
7a04d74f
MD
1711 hammer_rel_inode(nip, 0);
1712 if (error == 0) {
1713 cache_setunresolved(ap->a_nch);
1714 cache_setvp(ap->a_nch, *ap->a_vpp);
1715 }
1716 }
b84de5af 1717 hammer_done_transaction(&trans);
7a04d74f 1718 return (error);
427e5fc6
MD
1719}
1720
66325755
MD
1721/*
1722 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1723 */
427e5fc6
MD
1724static
1725int
66325755 1726hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
427e5fc6 1727{
b84de5af 1728 struct hammer_transaction trans;
e63644f0 1729 struct hammer_inode *dip;
b84de5af
MD
1730 int error;
1731
e63644f0
MD
1732 dip = VTOI(ap->a_dvp);
1733
1734 if (hammer_nohistory(dip) == 0 &&
1735 (error = hammer_checkspace(dip->hmp)) != 0) {
1736 return (error);
1737 }
1738
1739 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1740 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1741 ap->a_cred, ap->a_flags);
1742 hammer_done_transaction(&trans);
1743
1744 return (error);
427e5fc6
MD
1745}
1746
7dc57964
MD
1747/*
1748 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1749 */
1750static
1751int
1752hammer_vop_ioctl(struct vop_ioctl_args *ap)
1753{
1754 struct hammer_inode *ip = ap->a_vp->v_data;
1755
1756 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1757 ap->a_fflag, ap->a_cred));
1758}
1759
513ca7d7
MD
1760static
1761int
1762hammer_vop_mountctl(struct vop_mountctl_args *ap)
1763{
1764 struct mount *mp;
1765 int error;
1766
1767 mp = ap->a_head.a_ops->head.vv_mount;
1768
1769 switch(ap->a_op) {
1770 case MOUNTCTL_SET_EXPORT:
1771 if (ap->a_ctllen != sizeof(struct export_args))
1772 error = EINVAL;
1773 error = hammer_vfs_export(mp, ap->a_op,
1774 (const struct export_args *)ap->a_ctl);
1775 break;
1776 default:
1777 error = journal_mountctl(ap);
1778 break;
1779 }
1780 return(error);
1781}
1782
66325755
MD
1783/*
1784 * hammer_vop_strategy { vp, bio }
8cd0a023
MD
1785 *
1786 * Strategy call, used for regular file read & write only. Note that the
1787 * bp may represent a cluster.
1788 *
1789 * To simplify operation and allow better optimizations in the future,
1790 * this code does not make any assumptions with regards to buffer alignment
1791 * or size.
66325755 1792 */
427e5fc6
MD
1793static
1794int
66325755 1795hammer_vop_strategy(struct vop_strategy_args *ap)
427e5fc6 1796{
8cd0a023
MD
1797 struct buf *bp;
1798 int error;
1799
1800 bp = ap->a_bio->bio_buf;
1801
1802 switch(bp->b_cmd) {
1803 case BUF_CMD_READ:
1804 error = hammer_vop_strategy_read(ap);
1805 break;
1806 case BUF_CMD_WRITE:
1807 error = hammer_vop_strategy_write(ap);
1808 break;
1809 default:
059819e3
MD
1810 bp->b_error = error = EINVAL;
1811 bp->b_flags |= B_ERROR;
1812 biodone(ap->a_bio);
8cd0a023
MD
1813 break;
1814 }
8cd0a023 1815 return (error);
427e5fc6
MD
1816}
1817
8cd0a023
MD
1818/*
1819 * Read from a regular file. Iterate the related records and fill in the
1820 * BIO/BUF. Gaps are zero-filled.
1821 *
1822 * The support code in hammer_object.c should be used to deal with mixed
1823 * in-memory and on-disk records.
1824 *
1825 * XXX atime update
1826 */
1827static
1828int
1829hammer_vop_strategy_read(struct vop_strategy_args *ap)
1830{
36f82b23
MD
1831 struct hammer_transaction trans;
1832 struct hammer_inode *ip;
8cd0a023 1833 struct hammer_cursor cursor;
8cd0a023
MD
1834 hammer_base_elm_t base;
1835 struct bio *bio;
1836 struct buf *bp;
1837 int64_t rec_offset;
a89aec1b 1838 int64_t ran_end;
195c19a1 1839 int64_t tmp64;
8cd0a023
MD
1840 int error;
1841 int boff;
1842 int roff;
1843 int n;
1844
1845 bio = ap->a_bio;
1846 bp = bio->bio_buf;
36f82b23 1847 ip = ap->a_vp->v_data;
8cd0a023 1848
36f82b23 1849 hammer_simple_transaction(&trans, ip->hmp);
47637bff 1850 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
8cd0a023
MD
1851
1852 /*
1853 * Key range (begin and end inclusive) to scan. Note that the key's
c0ade690
MD
1854 * stored in the actual records represent BASE+LEN, not BASE. The
1855 * first record containing bio_offset will have a key > bio_offset.
8cd0a023 1856 */
2f85fa4d 1857 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023 1858 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1859 cursor.key_beg.create_tid = 0;
8cd0a023 1860 cursor.key_beg.delete_tid = 0;
8cd0a023 1861 cursor.key_beg.obj_type = 0;
c0ade690 1862 cursor.key_beg.key = bio->bio_offset + 1;
d5530d22 1863 cursor.asof = ip->obj_asof;
47197d71 1864 cursor.flags |= HAMMER_CURSOR_ASOF | HAMMER_CURSOR_DATAEXTOK;
8cd0a023
MD
1865
1866 cursor.key_end = cursor.key_beg;
11ad5ade 1867 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
b84de5af 1868#if 0
11ad5ade 1869 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
a89aec1b
MD
1870 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
1871 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
1872 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
b84de5af
MD
1873 } else
1874#endif
1875 {
c0ade690 1876 ran_end = bio->bio_offset + bp->b_bufsize;
a89aec1b
MD
1877 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
1878 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
195c19a1
MD
1879 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
1880 if (tmp64 < ran_end)
a89aec1b
MD
1881 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1882 else
7f7c1f84 1883 cursor.key_end.key = ran_end + MAXPHYS + 1;
a89aec1b 1884 }
d26d0ae9 1885 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
8cd0a023 1886
4e17f465 1887 error = hammer_ip_first(&cursor);
8cd0a023
MD
1888 boff = 0;
1889
a89aec1b 1890 while (error == 0) {
47637bff
MD
1891 /*
1892 * Get the base file offset of the record. The key for
1893 * data records is (base + bytes) rather then (base).
1894 */
11ad5ade 1895 base = &cursor.leaf->base;
11ad5ade 1896 rec_offset = base->key - cursor.leaf->data_len;
8cd0a023 1897
66325755 1898 /*
a89aec1b 1899 * Calculate the gap, if any, and zero-fill it.
1fef775e
MD
1900 *
1901 * n is the offset of the start of the record verses our
1902 * current seek offset in the bio.
66325755 1903 */
8cd0a023
MD
1904 n = (int)(rec_offset - (bio->bio_offset + boff));
1905 if (n > 0) {
a89aec1b
MD
1906 if (n > bp->b_bufsize - boff)
1907 n = bp->b_bufsize - boff;
8cd0a023
MD
1908 bzero((char *)bp->b_data + boff, n);
1909 boff += n;
1910 n = 0;
66325755 1911 }
8cd0a023
MD
1912
1913 /*
1914 * Calculate the data offset in the record and the number
1915 * of bytes we can copy.
a89aec1b 1916 *
1fef775e
MD
1917 * There are two degenerate cases. First, boff may already
1918 * be at bp->b_bufsize. Secondly, the data offset within
1919 * the record may exceed the record's size.
8cd0a023
MD
1920 */
1921 roff = -n;
b84de5af 1922 rec_offset += roff;
11ad5ade 1923 n = cursor.leaf->data_len - roff;
1fef775e
MD
1924 if (n <= 0) {
1925 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
1926 n = 0;
1927 } else if (n > bp->b_bufsize - boff) {
8cd0a023 1928 n = bp->b_bufsize - boff;
1fef775e 1929 }
059819e3 1930
b84de5af 1931 /*
47637bff
MD
1932 * Deal with cached truncations. This cool bit of code
1933 * allows truncate()/ftruncate() to avoid having to sync
1934 * the file.
1935 *
1936 * If the frontend is truncated then all backend records are
1937 * subject to the frontend's truncation.
1938 *
1939 * If the backend is truncated then backend records on-disk
1940 * (but not in-memory) are subject to the backend's
1941 * truncation. In-memory records owned by the backend
1942 * represent data written after the truncation point on the
1943 * backend and must not be truncated.
1944 *
1945 * Truncate operations deal with frontend buffer cache
1946 * buffers and frontend-owned in-memory records synchronously.
b84de5af 1947 */
47637bff
MD
1948 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1949 if (hammer_cursor_ondisk(&cursor) ||
1950 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
1951 if (ip->trunc_off <= rec_offset)
1952 n = 0;
1953 else if (ip->trunc_off < rec_offset + n)
1954 n = (int)(ip->trunc_off - rec_offset);
1955 }
1956 }
1957 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1958 if (hammer_cursor_ondisk(&cursor)) {
1959 if (ip->sync_trunc_off <= rec_offset)
1960 n = 0;
1961 else if (ip->sync_trunc_off < rec_offset + n)
1962 n = (int)(ip->sync_trunc_off - rec_offset);
1963 }
1964 }
b84de5af
MD
1965
1966 /*
47637bff
MD
1967 * Try to issue a direct read into our bio if possible,
1968 * otherwise resolve the element data into a hammer_buffer
1969 * and copy.
1970 *
1971 * WARNING: If we hit the else clause.
b84de5af 1972 */
cebe9493 1973 if (roff == 0 && boff == 0 && n == bp->b_bufsize &&
47637bff
MD
1974 (rec_offset & HAMMER_BUFMASK) == 0) {
1975 error = hammer_io_direct_read(trans.hmp, cursor.leaf,
1976 bio);
1977 goto done;
1978 } else if (n) {
1979 error = hammer_ip_resolve_data(&cursor);
1980 if (error == 0) {
1981 bcopy((char *)cursor.data + roff,
1982 (char *)bp->b_data + boff, n);
1983 }
b84de5af 1984 }
47637bff
MD
1985 if (error)
1986 break;
1987
1988 /*
1989 * Iterate until we have filled the request.
1990 */
1991 boff += n;
8cd0a023 1992 if (boff == bp->b_bufsize)
66325755 1993 break;
a89aec1b 1994 error = hammer_ip_next(&cursor);
66325755
MD
1995 }
1996
1997 /*
8cd0a023 1998 * There may have been a gap after the last record
66325755 1999 */
8cd0a023
MD
2000 if (error == ENOENT)
2001 error = 0;
2002 if (error == 0 && boff != bp->b_bufsize) {
7f7c1f84 2003 KKASSERT(boff < bp->b_bufsize);
8cd0a023
MD
2004 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2005 /* boff = bp->b_bufsize; */
2006 }
2007 bp->b_resid = 0;
059819e3
MD
2008 bp->b_error = error;
2009 if (error)
2010 bp->b_flags |= B_ERROR;
2011 biodone(ap->a_bio);
47637bff
MD
2012
2013done:
2014 if (cursor.node)
2015 hammer_cache_node(cursor.node, &ip->cache[1]);
2016 hammer_done_cursor(&cursor);
2017 hammer_done_transaction(&trans);
8cd0a023
MD
2018 return(error);
2019}
2020
2021/*
059819e3
MD
2022 * Write to a regular file. Because this is a strategy call the OS is
2023 * trying to actually sync data to the media. HAMMER can only flush
2024 * the entire inode (so the TID remains properly synchronized).
8cd0a023 2025 *
059819e3
MD
2026 * Basically all we do here is place the bio on the inode's flush queue
2027 * and activate the flusher.
8cd0a023
MD
2028 */
2029static
2030int
2031hammer_vop_strategy_write(struct vop_strategy_args *ap)
2032{
47637bff 2033 hammer_record_t record;
8cd0a023
MD
2034 hammer_inode_t ip;
2035 struct bio *bio;
2036 struct buf *bp;
0832c9bb
MD
2037 int bytes;
2038 int error;
8cd0a023
MD
2039
2040 bio = ap->a_bio;
2041 bp = bio->bio_buf;
2042 ip = ap->a_vp->v_data;
d113fda1 2043
059819e3
MD
2044 if (ip->flags & HAMMER_INODE_RO) {
2045 bp->b_error = EROFS;
2046 bp->b_flags |= B_ERROR;
2047 biodone(ap->a_bio);
e63644f0 2048 hammer_cleanup_write_io(ip);
059819e3
MD
2049 return(EROFS);
2050 }
b84de5af 2051
29ce0677
MD
2052 /*
2053 * Interlock with inode destruction (no in-kernel or directory
2054 * topology visibility). If we queue new IO while trying to
2055 * destroy the inode we can deadlock the vtrunc call in
2056 * hammer_inode_unloadable_check().
2057 */
2058 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2059 bp->b_resid = 0;
2060 biodone(ap->a_bio);
e63644f0 2061 hammer_cleanup_write_io(ip);
29ce0677
MD
2062 return(0);
2063 }
2064
b84de5af 2065 /*
47637bff
MD
2066 * Attempt to reserve space and issue a direct-write from the
2067 * front-end. If we can't we will queue the BIO to the flusher.
0832c9bb
MD
2068 * The bulk/direct-write code will still bcopy if writing less
2069 * then full-sized blocks (at the end of a file).
47637bff
MD
2070 *
2071 * If we can the I/O can be issued and an in-memory record will
0832c9bb 2072 * be installed to reference the storage until the flusher can get to
47637bff
MD
2073 * it.
2074 *
2075 * Since we own the high level bio the front-end will not try to
0832c9bb 2076 * do a direct-read until the write completes.
47637bff 2077 */
0832c9bb
MD
2078 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2079 KKASSERT(bio->bio_offset < ip->ino_data.size);
2080 if (bio->bio_offset + bp->b_bufsize <= ip->ino_data.size)
2081 bytes = bp->b_bufsize;
b84de5af 2082 else
0832c9bb
MD
2083 bytes = (int)(ip->ino_data.size - bio->bio_offset);
2084
2085 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2086 bytes, &error);
2087 if (record) {
2088 hammer_io_direct_write(ip->hmp, &record->leaf, bio);
2089 hammer_rel_mem_record(record);
2090 if (ip->rsv_recs > hammer_limit_irecs / 2)
2091 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2092 else
2093 hammer_flush_inode(ip, 0);
2094 } else {
2095 bp->b_error = error;
2096 bp->b_flags |= B_ERROR;
2097 biodone(ap->a_bio);
2098 }
2099 hammer_cleanup_write_io(ip);
2100 return(error);
059819e3
MD
2101}
2102
2103/*
47637bff
MD
2104 * Clean-up after disposing of a dirty frontend buffer's data.
2105 * This is somewhat heuristical so try to be robust.
059819e3 2106 */
0832c9bb 2107static void
e63644f0
MD
2108hammer_cleanup_write_io(hammer_inode_t ip)
2109{
2110 if (ip->rsv_databufs) {
2111 --ip->rsv_databufs;
2112 --ip->hmp->rsv_databufs;
2113 }
2114}
2115
0832c9bb
MD
2116/*
2117 * We can lose track of dirty buffer cache buffers if we truncate, this
2118 * routine will resynchronize the count.
2119 */
2120static
2121void
2122hammer_update_rsv_databufs(hammer_inode_t ip)
2123{
2124 struct buf *bp;
2125 int delta;
2126 int n;
2127
2128 if (ip->vp) {
2129 n = 0;
2130 RB_FOREACH(bp, buf_rb_tree, &ip->vp->v_rbdirty_tree) {
2131 ++n;
2132 }
2133 } else {
2134 n = 0;
2135 }
2136 delta = n - ip->rsv_databufs;
2137 ip->rsv_databufs += delta;
2138 ip->hmp->rsv_databufs += delta;
2139}
2140
8cd0a023
MD
2141/*
2142 * dounlink - disconnect a directory entry
2143 *
2144 * XXX whiteout support not really in yet
2145 */
2146static int
b84de5af
MD
2147hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2148 struct vnode *dvp, struct ucred *cred, int flags)
8cd0a023 2149{
8cd0a023
MD
2150 struct namecache *ncp;
2151 hammer_inode_t dip;
2152 hammer_inode_t ip;
8cd0a023 2153 struct hammer_cursor cursor;
8cd0a023 2154 int64_t namekey;
11ad5ade 2155 int nlen, error;
8cd0a023
MD
2156
2157 /*
2158 * Calculate the namekey and setup the key range for the scan. This
2159 * works kinda like a chained hash table where the lower 32 bits
2160 * of the namekey synthesize the chain.
2161 *
2162 * The key range is inclusive of both key_beg and key_end.
2163 */
2164 dip = VTOI(dvp);
2165 ncp = nch->ncp;
d113fda1
MD
2166
2167 if (dip->flags & HAMMER_INODE_RO)
2168 return (EROFS);
2169
6a37e7e4
MD
2170 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2171retry:
4e17f465 2172 hammer_init_cursor(trans, &cursor, &dip->cache[0], dip);
2f85fa4d 2173 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
2174 cursor.key_beg.obj_id = dip->obj_id;
2175 cursor.key_beg.key = namekey;
d5530d22 2176 cursor.key_beg.create_tid = 0;
8cd0a023
MD
2177 cursor.key_beg.delete_tid = 0;
2178 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2179 cursor.key_beg.obj_type = 0;
2180
2181 cursor.key_end = cursor.key_beg;
2182 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
2183 cursor.asof = dip->obj_asof;
2184 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023 2185
8cd0a023
MD
2186 /*
2187 * Scan all matching records (the chain), locate the one matching
2188 * the requested path component. info->last_error contains the
2189 * error code on search termination and could be 0, ENOENT, or
2190 * something else.
2191 *
2192 * The hammer_ip_*() functions merge in-memory records with on-disk
2193 * records for the purposes of the search.
2194 */
4e17f465
MD
2195 error = hammer_ip_first(&cursor);
2196
a89aec1b
MD
2197 while (error == 0) {
2198 error = hammer_ip_resolve_data(&cursor);
2199 if (error)
66325755 2200 break;
11ad5ade
MD
2201 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2202 KKASSERT(nlen > 0);
2203 if (ncp->nc_nlen == nlen &&
2204 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
66325755
MD
2205 break;
2206 }
a89aec1b 2207 error = hammer_ip_next(&cursor);
66325755 2208 }
8cd0a023
MD
2209
2210 /*
2211 * If all is ok we have to get the inode so we can adjust nlinks.
269c5eab
MD
2212 * To avoid a deadlock with the flusher we must release the inode
2213 * lock on the directory when acquiring the inode for the entry.
b3deaf57
MD
2214 *
2215 * If the target is a directory, it must be empty.
8cd0a023 2216 */
66325755 2217 if (error == 0) {
269c5eab 2218 hammer_unlock(&cursor.ip->lock);
b84de5af 2219 ip = hammer_get_inode(trans, &dip->cache[1],
11ad5ade 2220 cursor.data->entry.obj_id,
d113fda1 2221 dip->hmp->asof, 0, &error);
269c5eab 2222 hammer_lock_sh(&cursor.ip->lock);
46fe7ae1 2223 if (error == ENOENT) {
11ad5ade 2224 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
10a5d1ba 2225 Debugger("ENOENT unlinking object that should exist");
46fe7ae1 2226 }
1f07f686
MD
2227
2228 /*
2229 * If we are trying to remove a directory the directory must
2230 * be empty.
2231 *
2232 * WARNING: hammer_ip_check_directory_empty() may have to
2233 * terminate the cursor to avoid a deadlock. It is ok to
2234 * call hammer_done_cursor() twice.
2235 */
11ad5ade 2236 if (error == 0 && ip->ino_data.obj_type ==
b3deaf57 2237 HAMMER_OBJTYPE_DIRECTORY) {
98f7132d 2238 error = hammer_ip_check_directory_empty(trans, ip);
b3deaf57 2239 }
1f07f686 2240
6a37e7e4 2241 /*
1f07f686
MD
2242 * Delete the directory entry.
2243 *
6a37e7e4 2244 * WARNING: hammer_ip_del_directory() may have to terminate
1f07f686 2245 * the cursor to avoid a deadlock. It is ok to call
6a37e7e4
MD
2246 * hammer_done_cursor() twice.
2247 */
b84de5af 2248 if (error == 0) {
b84de5af
MD
2249 error = hammer_ip_del_directory(trans, &cursor,
2250 dip, ip);
b84de5af 2251 }
269c5eab 2252 hammer_done_cursor(&cursor);
8cd0a023
MD
2253 if (error == 0) {
2254 cache_setunresolved(nch);
2255 cache_setvp(nch, NULL);
2256 /* XXX locking */
2257 if (ip->vp)
2258 cache_inval_vp(ip->vp, CINV_DESTROY);
2259 }
a89aec1b 2260 hammer_rel_inode(ip, 0);
269c5eab
MD
2261 } else {
2262 hammer_done_cursor(&cursor);
66325755 2263 }
6a37e7e4
MD
2264 if (error == EDEADLK)
2265 goto retry;
9c448776 2266
66325755 2267 return (error);
66325755
MD
2268}
2269
7a04d74f
MD
2270/************************************************************************
2271 * FIFO AND SPECFS OPS *
2272 ************************************************************************
2273 *
2274 */
2275
2276static int
2277hammer_vop_fifoclose (struct vop_close_args *ap)
2278{
2279 /* XXX update itimes */
2280 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2281}
2282
2283static int
2284hammer_vop_fiforead (struct vop_read_args *ap)
2285{
2286 int error;
2287
2288 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2289 /* XXX update access time */
2290 return (error);
2291}
2292
2293static int
2294hammer_vop_fifowrite (struct vop_write_args *ap)
2295{
2296 int error;
2297
2298 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2299 /* XXX update access time */
2300 return (error);
2301}
2302
2303static int
2304hammer_vop_specclose (struct vop_close_args *ap)
2305{
2306 /* XXX update itimes */
2307 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2308}
2309
2310static int
2311hammer_vop_specread (struct vop_read_args *ap)
2312{
2313 /* XXX update access time */
2314 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2315}
2316
2317static int
2318hammer_vop_specwrite (struct vop_write_args *ap)
2319{
2320 /* XXX update last change time */
2321 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2322}
2323