Add vop_helper_chmod() and vop_helper_chown(). These helper functions
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
29ce0677 34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.55 2008/05/22 04:14:01 dillon Exp $
427e5fc6
MD
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/fcntl.h>
41#include <sys/namecache.h>
42#include <sys/vnode.h>
43#include <sys/lockf.h>
44#include <sys/event.h>
45#include <sys/stat.h>
b3deaf57 46#include <sys/dirent.h>
c0ade690 47#include <vm/vm_extern.h>
7a04d74f 48#include <vfs/fifofs/fifo.h>
427e5fc6
MD
49#include "hammer.h"
50
51/*
52 * USERFS VNOPS
53 */
54/*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
66325755
MD
55static int hammer_vop_fsync(struct vop_fsync_args *);
56static int hammer_vop_read(struct vop_read_args *);
57static int hammer_vop_write(struct vop_write_args *);
58static int hammer_vop_access(struct vop_access_args *);
59static int hammer_vop_advlock(struct vop_advlock_args *);
60static int hammer_vop_close(struct vop_close_args *);
61static int hammer_vop_ncreate(struct vop_ncreate_args *);
62static int hammer_vop_getattr(struct vop_getattr_args *);
63static int hammer_vop_nresolve(struct vop_nresolve_args *);
64static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65static int hammer_vop_nlink(struct vop_nlink_args *);
66static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67static int hammer_vop_nmknod(struct vop_nmknod_args *);
68static int hammer_vop_open(struct vop_open_args *);
69static int hammer_vop_pathconf(struct vop_pathconf_args *);
70static int hammer_vop_print(struct vop_print_args *);
71static int hammer_vop_readdir(struct vop_readdir_args *);
72static int hammer_vop_readlink(struct vop_readlink_args *);
73static int hammer_vop_nremove(struct vop_nremove_args *);
74static int hammer_vop_nrename(struct vop_nrename_args *);
75static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76static int hammer_vop_setattr(struct vop_setattr_args *);
77static int hammer_vop_strategy(struct vop_strategy_args *);
78static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
79static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
7dc57964 80static int hammer_vop_ioctl(struct vop_ioctl_args *);
513ca7d7 81static int hammer_vop_mountctl(struct vop_mountctl_args *);
427e5fc6 82
7a04d74f
MD
83static int hammer_vop_fifoclose (struct vop_close_args *);
84static int hammer_vop_fiforead (struct vop_read_args *);
85static int hammer_vop_fifowrite (struct vop_write_args *);
86
87static int hammer_vop_specclose (struct vop_close_args *);
88static int hammer_vop_specread (struct vop_read_args *);
89static int hammer_vop_specwrite (struct vop_write_args *);
90
427e5fc6
MD
91struct vop_ops hammer_vnode_vops = {
92 .vop_default = vop_defaultop,
93 .vop_fsync = hammer_vop_fsync,
c0ade690
MD
94 .vop_getpages = vop_stdgetpages,
95 .vop_putpages = vop_stdputpages,
427e5fc6
MD
96 .vop_read = hammer_vop_read,
97 .vop_write = hammer_vop_write,
98 .vop_access = hammer_vop_access,
99 .vop_advlock = hammer_vop_advlock,
100 .vop_close = hammer_vop_close,
101 .vop_ncreate = hammer_vop_ncreate,
102 .vop_getattr = hammer_vop_getattr,
103 .vop_inactive = hammer_vop_inactive,
104 .vop_reclaim = hammer_vop_reclaim,
105 .vop_nresolve = hammer_vop_nresolve,
106 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
107 .vop_nlink = hammer_vop_nlink,
108 .vop_nmkdir = hammer_vop_nmkdir,
109 .vop_nmknod = hammer_vop_nmknod,
110 .vop_open = hammer_vop_open,
111 .vop_pathconf = hammer_vop_pathconf,
112 .vop_print = hammer_vop_print,
113 .vop_readdir = hammer_vop_readdir,
114 .vop_readlink = hammer_vop_readlink,
115 .vop_nremove = hammer_vop_nremove,
116 .vop_nrename = hammer_vop_nrename,
117 .vop_nrmdir = hammer_vop_nrmdir,
118 .vop_setattr = hammer_vop_setattr,
119 .vop_strategy = hammer_vop_strategy,
120 .vop_nsymlink = hammer_vop_nsymlink,
7dc57964 121 .vop_nwhiteout = hammer_vop_nwhiteout,
513ca7d7
MD
122 .vop_ioctl = hammer_vop_ioctl,
123 .vop_mountctl = hammer_vop_mountctl
427e5fc6
MD
124};
125
7a04d74f
MD
126struct vop_ops hammer_spec_vops = {
127 .vop_default = spec_vnoperate,
128 .vop_fsync = hammer_vop_fsync,
129 .vop_read = hammer_vop_specread,
130 .vop_write = hammer_vop_specwrite,
131 .vop_access = hammer_vop_access,
132 .vop_close = hammer_vop_specclose,
133 .vop_getattr = hammer_vop_getattr,
134 .vop_inactive = hammer_vop_inactive,
135 .vop_reclaim = hammer_vop_reclaim,
136 .vop_setattr = hammer_vop_setattr
137};
138
139struct vop_ops hammer_fifo_vops = {
140 .vop_default = fifo_vnoperate,
141 .vop_fsync = hammer_vop_fsync,
142 .vop_read = hammer_vop_fiforead,
143 .vop_write = hammer_vop_fifowrite,
144 .vop_access = hammer_vop_access,
145 .vop_close = hammer_vop_fifoclose,
146 .vop_getattr = hammer_vop_getattr,
147 .vop_inactive = hammer_vop_inactive,
148 .vop_reclaim = hammer_vop_reclaim,
149 .vop_setattr = hammer_vop_setattr
150};
151
b84de5af
MD
152static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
153 struct vnode *dvp, struct ucred *cred, int flags);
8cd0a023
MD
154static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
155static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
156
427e5fc6
MD
157#if 0
158static
159int
160hammer_vop_vnoperate(struct vop_generic_args *)
161{
162 return (VOCALL(&hammer_vnode_vops, ap));
163}
164#endif
165
66325755
MD
166/*
167 * hammer_vop_fsync { vp, waitfor }
168 */
427e5fc6
MD
169static
170int
66325755 171hammer_vop_fsync(struct vop_fsync_args *ap)
427e5fc6 172{
b84de5af 173 hammer_inode_t ip = VTOI(ap->a_vp);
c0ade690 174
f90dde4c 175 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
e8599db1 176 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
b84de5af
MD
177 if (ap->a_waitfor == MNT_WAIT)
178 hammer_wait_inode(ip);
059819e3 179 return (ip->error);
427e5fc6
MD
180}
181
66325755
MD
182/*
183 * hammer_vop_read { vp, uio, ioflag, cred }
184 */
427e5fc6
MD
185static
186int
66325755 187hammer_vop_read(struct vop_read_args *ap)
427e5fc6 188{
66325755 189 struct hammer_transaction trans;
c0ade690 190 hammer_inode_t ip;
66325755
MD
191 off_t offset;
192 struct buf *bp;
193 struct uio *uio;
194 int error;
195 int n;
8cd0a023 196 int seqcount;
66325755
MD
197
198 if (ap->a_vp->v_type != VREG)
199 return (EINVAL);
200 ip = VTOI(ap->a_vp);
201 error = 0;
8cd0a023 202 seqcount = ap->a_ioflag >> 16;
66325755 203
8cd0a023 204 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
205
206 /*
207 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
208 */
209 uio = ap->a_uio;
11ad5ade 210 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
66325755 211 offset = uio->uio_offset & HAMMER_BUFMASK;
c0ade690 212#if 0
11ad5ade 213 error = cluster_read(ap->a_vp, ip->ino_data.size,
8cd0a023
MD
214 uio->uio_offset - offset, HAMMER_BUFSIZE,
215 MAXBSIZE, seqcount, &bp);
c0ade690
MD
216#endif
217 error = bread(ap->a_vp, uio->uio_offset - offset,
218 HAMMER_BUFSIZE, &bp);
66325755
MD
219 if (error) {
220 brelse(bp);
221 break;
222 }
c0ade690 223 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
66325755
MD
224 n = HAMMER_BUFSIZE - offset;
225 if (n > uio->uio_resid)
226 n = uio->uio_resid;
11ad5ade
MD
227 if (n > ip->ino_data.size - uio->uio_offset)
228 n = (int)(ip->ino_data.size - uio->uio_offset);
66325755
MD
229 error = uiomove((char *)bp->b_data + offset, n, uio);
230 if (error) {
8cd0a023 231 bqrelse(bp);
66325755
MD
232 break;
233 }
66325755
MD
234 bqrelse(bp);
235 }
b84de5af
MD
236 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
237 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
11ad5ade 238 ip->ino_leaf.atime = trans.time;
b84de5af
MD
239 hammer_modify_inode(&trans, ip, HAMMER_INODE_ITIMES);
240 }
241 hammer_done_transaction(&trans);
66325755 242 return (error);
427e5fc6
MD
243}
244
66325755
MD
245/*
246 * hammer_vop_write { vp, uio, ioflag, cred }
247 */
427e5fc6
MD
248static
249int
66325755 250hammer_vop_write(struct vop_write_args *ap)
427e5fc6 251{
66325755
MD
252 struct hammer_transaction trans;
253 struct hammer_inode *ip;
254 struct uio *uio;
255 off_t offset;
256 struct buf *bp;
257 int error;
258 int n;
c0ade690 259 int flags;
059819e3 260 int count;
66325755
MD
261
262 if (ap->a_vp->v_type != VREG)
263 return (EINVAL);
264 ip = VTOI(ap->a_vp);
265 error = 0;
266
d113fda1
MD
267 if (ip->flags & HAMMER_INODE_RO)
268 return (EROFS);
269
66325755
MD
270 /*
271 * Create a transaction to cover the operations we perform.
272 */
8cd0a023 273 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
274 uio = ap->a_uio;
275
276 /*
277 * Check append mode
278 */
279 if (ap->a_ioflag & IO_APPEND)
11ad5ade 280 uio->uio_offset = ip->ino_data.size;
66325755
MD
281
282 /*
283 * Check for illegal write offsets. Valid range is 0...2^63-1
284 */
9c448776 285 if (uio->uio_offset < 0 || uio->uio_offset + uio->uio_resid <= 0) {
b84de5af 286 hammer_done_transaction(&trans);
66325755 287 return (EFBIG);
9c448776 288 }
66325755
MD
289
290 /*
291 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
292 */
059819e3 293 count = 0;
66325755 294 while (uio->uio_resid > 0) {
d5ef456e
MD
295 int fixsize = 0;
296
059819e3
MD
297 /*
298 * Do not allow huge writes to deadlock the buffer cache
299 */
300 if ((++count & 15) == 0) {
301 vn_unlock(ap->a_vp);
302 if ((ap->a_ioflag & IO_NOBWILL) == 0)
303 bwillwrite();
304 vn_lock(ap->a_vp, LK_EXCLUSIVE|LK_RETRY);
305 }
306
66325755 307 offset = uio->uio_offset & HAMMER_BUFMASK;
d5ef456e
MD
308 n = HAMMER_BUFSIZE - offset;
309 if (n > uio->uio_resid)
310 n = uio->uio_resid;
11ad5ade 311 if (uio->uio_offset + n > ip->ino_data.size) {
d5ef456e
MD
312 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
313 fixsize = 1;
314 }
315
c0ade690
MD
316 if (uio->uio_segflg == UIO_NOCOPY) {
317 /*
318 * Issuing a write with the same data backing the
319 * buffer. Instantiate the buffer to collect the
320 * backing vm pages, then read-in any missing bits.
321 *
322 * This case is used by vop_stdputpages().
323 */
d5ef456e
MD
324 bp = getblk(ap->a_vp, uio->uio_offset - offset,
325 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
c0ade690
MD
326 if ((bp->b_flags & B_CACHE) == 0) {
327 bqrelse(bp);
328 error = bread(ap->a_vp,
329 uio->uio_offset - offset,
330 HAMMER_BUFSIZE, &bp);
c0ade690
MD
331 }
332 } else if (offset == 0 && uio->uio_resid >= HAMMER_BUFSIZE) {
333 /*
a5fddc16
MD
334 * Even though we are entirely overwriting the buffer
335 * we may still have to zero it out to avoid a
336 * mmap/write visibility issue.
c0ade690 337 */
d5ef456e
MD
338 bp = getblk(ap->a_vp, uio->uio_offset - offset,
339 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
a5fddc16
MD
340 if ((bp->b_flags & B_CACHE) == 0)
341 vfs_bio_clrbuf(bp);
11ad5ade 342 } else if (uio->uio_offset - offset >= ip->ino_data.size) {
c0ade690 343 /*
a5fddc16
MD
344 * If the base offset of the buffer is beyond the
345 * file EOF, we don't have to issue a read.
c0ade690 346 */
d5ef456e
MD
347 bp = getblk(ap->a_vp, uio->uio_offset - offset,
348 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
66325755
MD
349 vfs_bio_clrbuf(bp);
350 } else {
c0ade690
MD
351 /*
352 * Partial overwrite, read in any missing bits then
353 * replace the portion being written.
354 */
66325755
MD
355 error = bread(ap->a_vp, uio->uio_offset - offset,
356 HAMMER_BUFSIZE, &bp);
d5ef456e
MD
357 if (error == 0)
358 bheavy(bp);
66325755 359 }
d5ef456e
MD
360 if (error == 0)
361 error = uiomove((char *)bp->b_data + offset, n, uio);
362
363 /*
364 * If we screwed up we have to undo any VM size changes we
365 * made.
366 */
66325755
MD
367 if (error) {
368 brelse(bp);
d5ef456e 369 if (fixsize) {
11ad5ade 370 vtruncbuf(ap->a_vp, ip->ino_data.size,
d5ef456e
MD
371 HAMMER_BUFSIZE);
372 }
66325755
MD
373 break;
374 }
c0ade690 375 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
11ad5ade
MD
376 if (ip->ino_data.size < uio->uio_offset) {
377 ip->ino_data.size = uio->uio_offset;
378 flags = HAMMER_INODE_DDIRTY;
379 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
c0ade690 380 } else {
d113fda1 381 flags = 0;
66325755 382 }
11ad5ade 383 ip->ino_data.mtime = trans.time;
f3b0f382 384 flags |= HAMMER_INODE_ITIMES | HAMMER_INODE_BUFS;
11ad5ade 385 flags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
c0ade690 386 hammer_modify_inode(&trans, ip, flags);
32c90105 387
66325755
MD
388 if (ap->a_ioflag & IO_SYNC) {
389 bwrite(bp);
390 } else if (ap->a_ioflag & IO_DIRECT) {
66325755 391 bawrite(bp);
059819e3
MD
392#if 0
393 } else if ((ap->a_ioflag >> 16) == IO_SEQMAX &&
34d829f7 394 (uio->uio_offset & HAMMER_BUFMASK) == 0) {
059819e3
MD
395 /*
396 * XXX HAMMER can only fsync the whole inode,
397 * doing it on every buffer would be a bad idea.
398 */
34d829f7
MD
399 /*
400 * If seqcount indicates sequential operation and
401 * we just finished filling a buffer, push it out
402 * now to prevent the buffer cache from becoming
403 * too full, which would trigger non-optimal
404 * flushes.
405 */
059819e3
MD
406 bdwrite(bp);
407#endif
66325755 408 } else {
66325755
MD
409 bdwrite(bp);
410 }
411 }
b84de5af 412 hammer_done_transaction(&trans);
66325755 413 return (error);
427e5fc6
MD
414}
415
66325755
MD
416/*
417 * hammer_vop_access { vp, mode, cred }
418 */
427e5fc6
MD
419static
420int
66325755 421hammer_vop_access(struct vop_access_args *ap)
427e5fc6 422{
66325755
MD
423 struct hammer_inode *ip = VTOI(ap->a_vp);
424 uid_t uid;
425 gid_t gid;
426 int error;
427
428 uid = hammer_to_unix_xid(&ip->ino_data.uid);
429 gid = hammer_to_unix_xid(&ip->ino_data.gid);
430
431 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
432 ip->ino_data.uflags);
433 return (error);
427e5fc6
MD
434}
435
66325755
MD
436/*
437 * hammer_vop_advlock { vp, id, op, fl, flags }
438 */
427e5fc6
MD
439static
440int
66325755 441hammer_vop_advlock(struct vop_advlock_args *ap)
427e5fc6 442{
66325755
MD
443 struct hammer_inode *ip = VTOI(ap->a_vp);
444
11ad5ade 445 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
427e5fc6
MD
446}
447
66325755
MD
448/*
449 * hammer_vop_close { vp, fflag }
450 */
427e5fc6
MD
451static
452int
66325755 453hammer_vop_close(struct vop_close_args *ap)
427e5fc6 454{
a89aec1b 455 return (vop_stdclose(ap));
427e5fc6
MD
456}
457
66325755
MD
458/*
459 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
460 *
461 * The operating system has already ensured that the directory entry
462 * does not exist and done all appropriate namespace locking.
463 */
427e5fc6
MD
464static
465int
66325755 466hammer_vop_ncreate(struct vop_ncreate_args *ap)
427e5fc6 467{
66325755
MD
468 struct hammer_transaction trans;
469 struct hammer_inode *dip;
470 struct hammer_inode *nip;
471 struct nchandle *nch;
472 int error;
473
474 nch = ap->a_nch;
475 dip = VTOI(ap->a_dvp);
476
d113fda1
MD
477 if (dip->flags & HAMMER_INODE_RO)
478 return (EROFS);
479
66325755
MD
480 /*
481 * Create a transaction to cover the operations we perform.
482 */
8cd0a023 483 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
484
485 /*
486 * Create a new filesystem object of the requested type. The
b84de5af
MD
487 * returned inode will be referenced and shared-locked to prevent
488 * it from being moved to the flusher.
66325755 489 */
8cd0a023
MD
490
491 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 492 if (error) {
77062c8a 493 hkprintf("hammer_create_inode error %d\n", error);
b84de5af 494 hammer_done_transaction(&trans);
66325755
MD
495 *ap->a_vpp = NULL;
496 return (error);
497 }
66325755
MD
498
499 /*
500 * Add the new filesystem object to the directory. This will also
501 * bump the inode's link count.
502 */
a89aec1b 503 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
0b075555 504 if (error)
77062c8a 505 hkprintf("hammer_ip_add_directory error %d\n", error);
66325755
MD
506
507 /*
508 * Finish up.
509 */
510 if (error) {
a89aec1b 511 hammer_rel_inode(nip, 0);
b84de5af 512 hammer_done_transaction(&trans);
66325755
MD
513 *ap->a_vpp = NULL;
514 } else {
e8599db1 515 error = hammer_get_vnode(nip, ap->a_vpp);
b84de5af 516 hammer_done_transaction(&trans);
a89aec1b
MD
517 hammer_rel_inode(nip, 0);
518 if (error == 0) {
519 cache_setunresolved(ap->a_nch);
520 cache_setvp(ap->a_nch, *ap->a_vpp);
521 }
66325755
MD
522 }
523 return (error);
427e5fc6
MD
524}
525
66325755
MD
526/*
527 * hammer_vop_getattr { vp, vap }
98f7132d
MD
528 *
529 * Retrieve an inode's attribute information. When accessing inodes
530 * historically we fake the atime field to ensure consistent results.
531 * The atime field is stored in the B-Tree element and allowed to be
532 * updated without cycling the element.
66325755 533 */
427e5fc6
MD
534static
535int
66325755 536hammer_vop_getattr(struct vop_getattr_args *ap)
427e5fc6 537{
66325755
MD
538 struct hammer_inode *ip = VTOI(ap->a_vp);
539 struct vattr *vap = ap->a_vap;
540
541#if 0
542 if (cache_check_fsmid_vp(ap->a_vp, &ip->fsmid) &&
543 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0 &&
7f7c1f84 544 ip->obj_asof == XXX
66325755
MD
545 ) {
546 /* LAZYMOD XXX */
547 }
548 hammer_itimes(ap->a_vp);
549#endif
550
551 vap->va_fsid = ip->hmp->fsid_udev;
11ad5ade 552 vap->va_fileid = ip->ino_leaf.base.obj_id;
66325755 553 vap->va_mode = ip->ino_data.mode;
11ad5ade 554 vap->va_nlink = ip->ino_data.nlinks;
66325755
MD
555 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
556 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
557 vap->va_rmajor = 0;
558 vap->va_rminor = 0;
11ad5ade 559 vap->va_size = ip->ino_data.size;
98f7132d
MD
560 if (ip->flags & HAMMER_INODE_RO)
561 hammer_to_timespec(ip->ino_data.mtime, &vap->va_atime);
562 else
563 hammer_to_timespec(ip->ino_leaf.atime, &vap->va_atime);
11ad5ade 564 hammer_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
66325755
MD
565 hammer_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
566 vap->va_flags = ip->ino_data.uflags;
567 vap->va_gen = 1; /* hammer inums are unique for all time */
bf686dbe 568 vap->va_blocksize = HAMMER_BUFSIZE;
11ad5ade
MD
569 vap->va_bytes = (ip->ino_data.size + 63) & ~63;
570 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
66325755
MD
571 vap->va_filerev = 0; /* XXX */
572 /* mtime uniquely identifies any adjustments made to the file */
11ad5ade 573 vap->va_fsmid = ip->ino_data.mtime;
66325755
MD
574 vap->va_uid_uuid = ip->ino_data.uid;
575 vap->va_gid_uuid = ip->ino_data.gid;
576 vap->va_fsid_uuid = ip->hmp->fsid;
577 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
578 VA_FSID_UUID_VALID;
7a04d74f 579
11ad5ade 580 switch (ip->ino_data.obj_type) {
7a04d74f
MD
581 case HAMMER_OBJTYPE_CDEV:
582 case HAMMER_OBJTYPE_BDEV:
583 vap->va_rmajor = ip->ino_data.rmajor;
584 vap->va_rminor = ip->ino_data.rminor;
585 break;
586 default:
587 break;
588 }
589
66325755 590 return(0);
427e5fc6
MD
591}
592
66325755
MD
593/*
594 * hammer_vop_nresolve { nch, dvp, cred }
595 *
596 * Locate the requested directory entry.
597 */
427e5fc6
MD
598static
599int
66325755 600hammer_vop_nresolve(struct vop_nresolve_args *ap)
427e5fc6 601{
36f82b23 602 struct hammer_transaction trans;
66325755 603 struct namecache *ncp;
7f7c1f84
MD
604 hammer_inode_t dip;
605 hammer_inode_t ip;
606 hammer_tid_t asof;
8cd0a023 607 struct hammer_cursor cursor;
66325755
MD
608 struct vnode *vp;
609 int64_t namekey;
610 int error;
7f7c1f84
MD
611 int i;
612 int nlen;
d113fda1 613 int flags;
6a37e7e4 614 u_int64_t obj_id;
7f7c1f84
MD
615
616 /*
617 * Misc initialization, plus handle as-of name extensions. Look for
618 * the '@@' extension. Note that as-of files and directories cannot
619 * be modified.
7f7c1f84
MD
620 */
621 dip = VTOI(ap->a_dvp);
622 ncp = ap->a_nch->ncp;
623 asof = dip->obj_asof;
624 nlen = ncp->nc_nlen;
d113fda1 625 flags = dip->flags;
7f7c1f84 626
36f82b23
MD
627 hammer_simple_transaction(&trans, dip->hmp);
628
7f7c1f84
MD
629 for (i = 0; i < nlen; ++i) {
630 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
d113fda1 631 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
d113fda1 632 flags |= HAMMER_INODE_RO;
7f7c1f84
MD
633 break;
634 }
635 }
636 nlen = i;
66325755 637
d113fda1
MD
638 /*
639 * If there is no path component the time extension is relative to
640 * dip.
641 */
642 if (nlen == 0) {
36f82b23 643 ip = hammer_get_inode(&trans, &dip->cache[1], dip->obj_id,
61aeeb33 644 asof, flags, &error);
d113fda1 645 if (error == 0) {
e8599db1 646 error = hammer_get_vnode(ip, &vp);
d113fda1
MD
647 hammer_rel_inode(ip, 0);
648 } else {
649 vp = NULL;
650 }
651 if (error == 0) {
652 vn_unlock(vp);
653 cache_setvp(ap->a_nch, vp);
654 vrele(vp);
655 }
36f82b23 656 goto done;
d113fda1
MD
657 }
658
8cd0a023
MD
659 /*
660 * Calculate the namekey and setup the key range for the scan. This
661 * works kinda like a chained hash table where the lower 32 bits
662 * of the namekey synthesize the chain.
663 *
664 * The key range is inclusive of both key_beg and key_end.
665 */
7f7c1f84 666 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
66325755 667
4e17f465 668 error = hammer_init_cursor(&trans, &cursor, &dip->cache[0], dip);
2f85fa4d 669 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
670 cursor.key_beg.obj_id = dip->obj_id;
671 cursor.key_beg.key = namekey;
d5530d22 672 cursor.key_beg.create_tid = 0;
8cd0a023
MD
673 cursor.key_beg.delete_tid = 0;
674 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
675 cursor.key_beg.obj_type = 0;
66325755 676
8cd0a023
MD
677 cursor.key_end = cursor.key_beg;
678 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
679 cursor.asof = asof;
680 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
66325755
MD
681
682 /*
8cd0a023 683 * Scan all matching records (the chain), locate the one matching
a89aec1b 684 * the requested path component.
8cd0a023
MD
685 *
686 * The hammer_ip_*() functions merge in-memory records with on-disk
687 * records for the purposes of the search.
66325755 688 */
6a37e7e4
MD
689 obj_id = 0;
690
4e17f465 691 if (error == 0) {
4e17f465
MD
692 error = hammer_ip_first(&cursor);
693 while (error == 0) {
694 error = hammer_ip_resolve_data(&cursor);
695 if (error)
696 break;
11ad5ade
MD
697 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
698 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
699 obj_id = cursor.data->entry.obj_id;
4e17f465
MD
700 break;
701 }
702 error = hammer_ip_next(&cursor);
66325755
MD
703 }
704 }
6a37e7e4 705 hammer_done_cursor(&cursor);
66325755 706 if (error == 0) {
36f82b23 707 ip = hammer_get_inode(&trans, &dip->cache[1],
6a37e7e4 708 obj_id, asof, flags, &error);
7f7c1f84 709 if (error == 0) {
e8599db1 710 error = hammer_get_vnode(ip, &vp);
7f7c1f84
MD
711 hammer_rel_inode(ip, 0);
712 } else {
713 vp = NULL;
714 }
66325755
MD
715 if (error == 0) {
716 vn_unlock(vp);
717 cache_setvp(ap->a_nch, vp);
718 vrele(vp);
719 }
720 } else if (error == ENOENT) {
721 cache_setvp(ap->a_nch, NULL);
722 }
36f82b23 723done:
b84de5af 724 hammer_done_transaction(&trans);
66325755 725 return (error);
427e5fc6
MD
726}
727
66325755
MD
728/*
729 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
730 *
731 * Locate the parent directory of a directory vnode.
732 *
733 * dvp is referenced but not locked. *vpp must be returned referenced and
734 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
735 * at the root, instead it could indicate that the directory we were in was
736 * removed.
42c7d26b
MD
737 *
738 * NOTE: as-of sequences are not linked into the directory structure. If
739 * we are at the root with a different asof then the mount point, reload
740 * the same directory with the mount point's asof. I'm not sure what this
741 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
742 * get confused, but it hasn't been tested.
66325755 743 */
427e5fc6
MD
744static
745int
66325755 746hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
427e5fc6 747{
36f82b23 748 struct hammer_transaction trans;
66325755 749 struct hammer_inode *dip;
d113fda1 750 struct hammer_inode *ip;
42c7d26b
MD
751 int64_t parent_obj_id;
752 hammer_tid_t asof;
d113fda1 753 int error;
66325755
MD
754
755 dip = VTOI(ap->a_dvp);
42c7d26b
MD
756 asof = dip->obj_asof;
757 parent_obj_id = dip->ino_data.parent_obj_id;
758
759 if (parent_obj_id == 0) {
760 if (dip->obj_id == HAMMER_OBJID_ROOT &&
761 asof != dip->hmp->asof) {
762 parent_obj_id = dip->obj_id;
763 asof = dip->hmp->asof;
764 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
765 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
766 dip->obj_asof);
767 } else {
768 *ap->a_vpp = NULL;
769 return ENOENT;
770 }
66325755 771 }
d113fda1 772
36f82b23
MD
773 hammer_simple_transaction(&trans, dip->hmp);
774
775 ip = hammer_get_inode(&trans, &dip->cache[1], parent_obj_id,
42c7d26b 776 asof, dip->flags, &error);
36f82b23 777 if (ip) {
e8599db1 778 error = hammer_get_vnode(ip, ap->a_vpp);
36f82b23
MD
779 hammer_rel_inode(ip, 0);
780 } else {
d113fda1 781 *ap->a_vpp = NULL;
d113fda1 782 }
b84de5af 783 hammer_done_transaction(&trans);
d113fda1 784 return (error);
427e5fc6
MD
785}
786
66325755
MD
787/*
788 * hammer_vop_nlink { nch, dvp, vp, cred }
789 */
427e5fc6
MD
790static
791int
66325755 792hammer_vop_nlink(struct vop_nlink_args *ap)
427e5fc6 793{
66325755
MD
794 struct hammer_transaction trans;
795 struct hammer_inode *dip;
796 struct hammer_inode *ip;
797 struct nchandle *nch;
798 int error;
799
800 nch = ap->a_nch;
801 dip = VTOI(ap->a_dvp);
802 ip = VTOI(ap->a_vp);
803
d113fda1
MD
804 if (dip->flags & HAMMER_INODE_RO)
805 return (EROFS);
806 if (ip->flags & HAMMER_INODE_RO)
807 return (EROFS);
808
66325755
MD
809 /*
810 * Create a transaction to cover the operations we perform.
811 */
8cd0a023 812 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
813
814 /*
815 * Add the filesystem object to the directory. Note that neither
816 * dip nor ip are referenced or locked, but their vnodes are
817 * referenced. This function will bump the inode's link count.
818 */
a89aec1b 819 error = hammer_ip_add_directory(&trans, dip, nch->ncp, ip);
66325755
MD
820
821 /*
822 * Finish up.
823 */
b84de5af 824 if (error == 0) {
6b4f890b
MD
825 cache_setunresolved(nch);
826 cache_setvp(nch, ap->a_vp);
66325755 827 }
b84de5af 828 hammer_done_transaction(&trans);
66325755 829 return (error);
427e5fc6
MD
830}
831
66325755
MD
832/*
833 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
834 *
835 * The operating system has already ensured that the directory entry
836 * does not exist and done all appropriate namespace locking.
837 */
427e5fc6
MD
838static
839int
66325755 840hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
427e5fc6 841{
66325755
MD
842 struct hammer_transaction trans;
843 struct hammer_inode *dip;
844 struct hammer_inode *nip;
845 struct nchandle *nch;
846 int error;
847
848 nch = ap->a_nch;
849 dip = VTOI(ap->a_dvp);
850
d113fda1
MD
851 if (dip->flags & HAMMER_INODE_RO)
852 return (EROFS);
853
66325755
MD
854 /*
855 * Create a transaction to cover the operations we perform.
856 */
8cd0a023 857 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
858
859 /*
860 * Create a new filesystem object of the requested type. The
8cd0a023 861 * returned inode will be referenced but not locked.
66325755 862 */
8cd0a023 863 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 864 if (error) {
77062c8a 865 hkprintf("hammer_mkdir error %d\n", error);
b84de5af 866 hammer_done_transaction(&trans);
66325755
MD
867 *ap->a_vpp = NULL;
868 return (error);
869 }
66325755
MD
870 /*
871 * Add the new filesystem object to the directory. This will also
872 * bump the inode's link count.
873 */
a89aec1b 874 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
0b075555 875 if (error)
77062c8a 876 hkprintf("hammer_mkdir (add) error %d\n", error);
66325755
MD
877
878 /*
879 * Finish up.
880 */
881 if (error) {
a89aec1b 882 hammer_rel_inode(nip, 0);
66325755
MD
883 *ap->a_vpp = NULL;
884 } else {
e8599db1 885 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
886 hammer_rel_inode(nip, 0);
887 if (error == 0) {
888 cache_setunresolved(ap->a_nch);
889 cache_setvp(ap->a_nch, *ap->a_vpp);
890 }
66325755 891 }
b84de5af 892 hammer_done_transaction(&trans);
66325755 893 return (error);
427e5fc6
MD
894}
895
66325755
MD
896/*
897 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
898 *
899 * The operating system has already ensured that the directory entry
900 * does not exist and done all appropriate namespace locking.
901 */
427e5fc6
MD
902static
903int
66325755 904hammer_vop_nmknod(struct vop_nmknod_args *ap)
427e5fc6 905{
66325755
MD
906 struct hammer_transaction trans;
907 struct hammer_inode *dip;
908 struct hammer_inode *nip;
909 struct nchandle *nch;
910 int error;
911
912 nch = ap->a_nch;
913 dip = VTOI(ap->a_dvp);
914
d113fda1
MD
915 if (dip->flags & HAMMER_INODE_RO)
916 return (EROFS);
917
66325755
MD
918 /*
919 * Create a transaction to cover the operations we perform.
920 */
8cd0a023 921 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
922
923 /*
924 * Create a new filesystem object of the requested type. The
8cd0a023 925 * returned inode will be referenced but not locked.
66325755 926 */
8cd0a023 927 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 928 if (error) {
b84de5af 929 hammer_done_transaction(&trans);
66325755
MD
930 *ap->a_vpp = NULL;
931 return (error);
932 }
66325755
MD
933
934 /*
935 * Add the new filesystem object to the directory. This will also
936 * bump the inode's link count.
937 */
a89aec1b 938 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
66325755
MD
939
940 /*
941 * Finish up.
942 */
943 if (error) {
a89aec1b 944 hammer_rel_inode(nip, 0);
66325755
MD
945 *ap->a_vpp = NULL;
946 } else {
e8599db1 947 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
948 hammer_rel_inode(nip, 0);
949 if (error == 0) {
950 cache_setunresolved(ap->a_nch);
951 cache_setvp(ap->a_nch, *ap->a_vpp);
952 }
66325755 953 }
b84de5af 954 hammer_done_transaction(&trans);
66325755 955 return (error);
427e5fc6
MD
956}
957
66325755
MD
958/*
959 * hammer_vop_open { vp, mode, cred, fp }
960 */
427e5fc6
MD
961static
962int
66325755 963hammer_vop_open(struct vop_open_args *ap)
427e5fc6 964{
d113fda1
MD
965 if ((ap->a_mode & FWRITE) && (VTOI(ap->a_vp)->flags & HAMMER_INODE_RO))
966 return (EROFS);
967
a89aec1b 968 return(vop_stdopen(ap));
427e5fc6
MD
969}
970
66325755
MD
971/*
972 * hammer_vop_pathconf { vp, name, retval }
973 */
427e5fc6
MD
974static
975int
66325755 976hammer_vop_pathconf(struct vop_pathconf_args *ap)
427e5fc6
MD
977{
978 return EOPNOTSUPP;
979}
980
66325755
MD
981/*
982 * hammer_vop_print { vp }
983 */
427e5fc6
MD
984static
985int
66325755 986hammer_vop_print(struct vop_print_args *ap)
427e5fc6
MD
987{
988 return EOPNOTSUPP;
989}
990
66325755 991/*
6b4f890b 992 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
66325755 993 */
427e5fc6
MD
994static
995int
66325755 996hammer_vop_readdir(struct vop_readdir_args *ap)
427e5fc6 997{
36f82b23 998 struct hammer_transaction trans;
6b4f890b
MD
999 struct hammer_cursor cursor;
1000 struct hammer_inode *ip;
1001 struct uio *uio;
6b4f890b
MD
1002 hammer_base_elm_t base;
1003 int error;
1004 int cookie_index;
1005 int ncookies;
1006 off_t *cookies;
1007 off_t saveoff;
1008 int r;
1009
1010 ip = VTOI(ap->a_vp);
1011 uio = ap->a_uio;
b3deaf57
MD
1012 saveoff = uio->uio_offset;
1013
1014 if (ap->a_ncookies) {
1015 ncookies = uio->uio_resid / 16 + 1;
1016 if (ncookies > 1024)
1017 ncookies = 1024;
1018 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1019 cookie_index = 0;
1020 } else {
1021 ncookies = -1;
1022 cookies = NULL;
1023 cookie_index = 0;
1024 }
1025
36f82b23
MD
1026 hammer_simple_transaction(&trans, ip->hmp);
1027
b3deaf57
MD
1028 /*
1029 * Handle artificial entries
1030 */
1031 error = 0;
1032 if (saveoff == 0) {
1033 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1034 if (r)
1035 goto done;
1036 if (cookies)
1037 cookies[cookie_index] = saveoff;
1038 ++saveoff;
1039 ++cookie_index;
1040 if (cookie_index == ncookies)
1041 goto done;
1042 }
1043 if (saveoff == 1) {
1044 if (ip->ino_data.parent_obj_id) {
1045 r = vop_write_dirent(&error, uio,
1046 ip->ino_data.parent_obj_id,
1047 DT_DIR, 2, "..");
1048 } else {
1049 r = vop_write_dirent(&error, uio,
1050 ip->obj_id, DT_DIR, 2, "..");
1051 }
1052 if (r)
1053 goto done;
1054 if (cookies)
1055 cookies[cookie_index] = saveoff;
1056 ++saveoff;
1057 ++cookie_index;
1058 if (cookie_index == ncookies)
1059 goto done;
1060 }
6b4f890b
MD
1061
1062 /*
1063 * Key range (begin and end inclusive) to scan. Directory keys
1064 * directly translate to a 64 bit 'seek' position.
1065 */
4e17f465 1066 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
2f85fa4d 1067 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
6b4f890b 1068 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1069 cursor.key_beg.create_tid = 0;
6b4f890b
MD
1070 cursor.key_beg.delete_tid = 0;
1071 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1072 cursor.key_beg.obj_type = 0;
b3deaf57 1073 cursor.key_beg.key = saveoff;
6b4f890b
MD
1074
1075 cursor.key_end = cursor.key_beg;
1076 cursor.key_end.key = HAMMER_MAX_KEY;
d5530d22
MD
1077 cursor.asof = ip->obj_asof;
1078 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
6b4f890b 1079
4e17f465 1080 error = hammer_ip_first(&cursor);
6b4f890b
MD
1081
1082 while (error == 0) {
11ad5ade 1083 error = hammer_ip_resolve_data(&cursor);
6b4f890b
MD
1084 if (error)
1085 break;
11ad5ade 1086 base = &cursor.leaf->base;
6b4f890b 1087 saveoff = base->key;
11ad5ade 1088 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
6b4f890b 1089
7a04d74f
MD
1090 if (base->obj_id != ip->obj_id)
1091 panic("readdir: bad record at %p", cursor.node);
1092
6b4f890b 1093 r = vop_write_dirent(
11ad5ade
MD
1094 &error, uio, cursor.data->entry.obj_id,
1095 hammer_get_dtype(cursor.leaf->base.obj_type),
1096 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1097 (void *)cursor.data->entry.name);
6b4f890b
MD
1098 if (r)
1099 break;
1100 ++saveoff;
1101 if (cookies)
1102 cookies[cookie_index] = base->key;
1103 ++cookie_index;
1104 if (cookie_index == ncookies)
1105 break;
1106 error = hammer_ip_next(&cursor);
1107 }
1108 hammer_done_cursor(&cursor);
1109
b3deaf57 1110done:
b84de5af 1111 hammer_done_transaction(&trans);
36f82b23 1112
6b4f890b
MD
1113 if (ap->a_eofflag)
1114 *ap->a_eofflag = (error == ENOENT);
6b4f890b
MD
1115 uio->uio_offset = saveoff;
1116 if (error && cookie_index == 0) {
b3deaf57
MD
1117 if (error == ENOENT)
1118 error = 0;
6b4f890b
MD
1119 if (cookies) {
1120 kfree(cookies, M_TEMP);
1121 *ap->a_ncookies = 0;
1122 *ap->a_cookies = NULL;
1123 }
1124 } else {
7a04d74f
MD
1125 if (error == ENOENT)
1126 error = 0;
6b4f890b
MD
1127 if (cookies) {
1128 *ap->a_ncookies = cookie_index;
1129 *ap->a_cookies = cookies;
1130 }
1131 }
1132 return(error);
427e5fc6
MD
1133}
1134
66325755
MD
1135/*
1136 * hammer_vop_readlink { vp, uio, cred }
1137 */
427e5fc6
MD
1138static
1139int
66325755 1140hammer_vop_readlink(struct vop_readlink_args *ap)
427e5fc6 1141{
36f82b23 1142 struct hammer_transaction trans;
7a04d74f
MD
1143 struct hammer_cursor cursor;
1144 struct hammer_inode *ip;
1145 int error;
1146
1147 ip = VTOI(ap->a_vp);
36f82b23 1148
2f85fa4d
MD
1149 /*
1150 * Shortcut if the symlink data was stuffed into ino_data.
1151 */
1152 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1153 error = uiomove(ip->ino_data.ext.symlink,
1154 ip->ino_data.size, ap->a_uio);
1155 return(error);
1156 }
36f82b23 1157
2f85fa4d
MD
1158 /*
1159 * Long version
1160 */
1161 hammer_simple_transaction(&trans, ip->hmp);
4e17f465 1162 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
7a04d74f
MD
1163
1164 /*
1165 * Key range (begin and end inclusive) to scan. Directory keys
1166 * directly translate to a 64 bit 'seek' position.
1167 */
2f85fa4d 1168 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC; /* XXX */
7a04d74f 1169 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1170 cursor.key_beg.create_tid = 0;
7a04d74f
MD
1171 cursor.key_beg.delete_tid = 0;
1172 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1173 cursor.key_beg.obj_type = 0;
1174 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
d5530d22
MD
1175 cursor.asof = ip->obj_asof;
1176 cursor.flags |= HAMMER_CURSOR_ASOF;
7a04d74f 1177
45a014dc 1178 error = hammer_ip_lookup(&cursor);
7a04d74f
MD
1179 if (error == 0) {
1180 error = hammer_ip_resolve_data(&cursor);
1181 if (error == 0) {
11ad5ade
MD
1182 KKASSERT(cursor.leaf->data_len >=
1183 HAMMER_SYMLINK_NAME_OFF);
1184 error = uiomove(cursor.data->symlink.name,
1185 cursor.leaf->data_len -
1186 HAMMER_SYMLINK_NAME_OFF,
7a04d74f
MD
1187 ap->a_uio);
1188 }
1189 }
1190 hammer_done_cursor(&cursor);
b84de5af 1191 hammer_done_transaction(&trans);
7a04d74f 1192 return(error);
427e5fc6
MD
1193}
1194
66325755
MD
1195/*
1196 * hammer_vop_nremove { nch, dvp, cred }
1197 */
427e5fc6
MD
1198static
1199int
66325755 1200hammer_vop_nremove(struct vop_nremove_args *ap)
427e5fc6 1201{
b84de5af
MD
1202 struct hammer_transaction trans;
1203 int error;
1204
1205 hammer_start_transaction(&trans, VTOI(ap->a_dvp)->hmp);
1206 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1207 hammer_done_transaction(&trans);
1208
1209 return (error);
427e5fc6
MD
1210}
1211
66325755
MD
1212/*
1213 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1214 */
427e5fc6
MD
1215static
1216int
66325755 1217hammer_vop_nrename(struct vop_nrename_args *ap)
427e5fc6 1218{
8cd0a023
MD
1219 struct hammer_transaction trans;
1220 struct namecache *fncp;
1221 struct namecache *tncp;
1222 struct hammer_inode *fdip;
1223 struct hammer_inode *tdip;
1224 struct hammer_inode *ip;
1225 struct hammer_cursor cursor;
8cd0a023 1226 int64_t namekey;
11ad5ade 1227 int nlen, error;
8cd0a023
MD
1228
1229 fdip = VTOI(ap->a_fdvp);
1230 tdip = VTOI(ap->a_tdvp);
1231 fncp = ap->a_fnch->ncp;
1232 tncp = ap->a_tnch->ncp;
b3deaf57
MD
1233 ip = VTOI(fncp->nc_vp);
1234 KKASSERT(ip != NULL);
d113fda1
MD
1235
1236 if (fdip->flags & HAMMER_INODE_RO)
1237 return (EROFS);
1238 if (tdip->flags & HAMMER_INODE_RO)
1239 return (EROFS);
1240 if (ip->flags & HAMMER_INODE_RO)
1241 return (EROFS);
1242
8cd0a023
MD
1243 hammer_start_transaction(&trans, fdip->hmp);
1244
1245 /*
b3deaf57
MD
1246 * Remove tncp from the target directory and then link ip as
1247 * tncp. XXX pass trans to dounlink
42c7d26b
MD
1248 *
1249 * Force the inode sync-time to match the transaction so it is
1250 * in-sync with the creation of the target directory entry.
8cd0a023 1251 */
b84de5af 1252 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
42c7d26b 1253 if (error == 0 || error == ENOENT) {
b3deaf57 1254 error = hammer_ip_add_directory(&trans, tdip, tncp, ip);
42c7d26b
MD
1255 if (error == 0) {
1256 ip->ino_data.parent_obj_id = tdip->obj_id;
b84de5af 1257 hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
42c7d26b
MD
1258 }
1259 }
b3deaf57
MD
1260 if (error)
1261 goto failed; /* XXX */
8cd0a023
MD
1262
1263 /*
1264 * Locate the record in the originating directory and remove it.
1265 *
1266 * Calculate the namekey and setup the key range for the scan. This
1267 * works kinda like a chained hash table where the lower 32 bits
1268 * of the namekey synthesize the chain.
1269 *
1270 * The key range is inclusive of both key_beg and key_end.
1271 */
1272 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
6a37e7e4 1273retry:
4e17f465 1274 hammer_init_cursor(&trans, &cursor, &fdip->cache[0], fdip);
2f85fa4d 1275 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
1276 cursor.key_beg.obj_id = fdip->obj_id;
1277 cursor.key_beg.key = namekey;
d5530d22 1278 cursor.key_beg.create_tid = 0;
8cd0a023
MD
1279 cursor.key_beg.delete_tid = 0;
1280 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1281 cursor.key_beg.obj_type = 0;
1282
1283 cursor.key_end = cursor.key_beg;
1284 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
1285 cursor.asof = fdip->obj_asof;
1286 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023
MD
1287
1288 /*
1289 * Scan all matching records (the chain), locate the one matching
a89aec1b 1290 * the requested path component.
8cd0a023
MD
1291 *
1292 * The hammer_ip_*() functions merge in-memory records with on-disk
1293 * records for the purposes of the search.
1294 */
4e17f465 1295 error = hammer_ip_first(&cursor);
a89aec1b 1296 while (error == 0) {
8cd0a023
MD
1297 if (hammer_ip_resolve_data(&cursor) != 0)
1298 break;
11ad5ade
MD
1299 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1300 KKASSERT(nlen > 0);
1301 if (fncp->nc_nlen == nlen &&
1302 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
8cd0a023
MD
1303 break;
1304 }
a89aec1b 1305 error = hammer_ip_next(&cursor);
8cd0a023 1306 }
8cd0a023
MD
1307
1308 /*
1309 * If all is ok we have to get the inode so we can adjust nlinks.
6a37e7e4
MD
1310 *
1311 * WARNING: hammer_ip_del_directory() may have to terminate the
1312 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1313 * twice.
8cd0a023 1314 */
9944ae54 1315 if (error == 0)
6a37e7e4 1316 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
b84de5af
MD
1317
1318 /*
1319 * XXX A deadlock here will break rename's atomicy for the purposes
1320 * of crash recovery.
1321 */
1322 if (error == EDEADLK) {
b84de5af 1323 hammer_done_cursor(&cursor);
b84de5af
MD
1324 goto retry;
1325 }
1326
1327 /*
1328 * Cleanup and tell the kernel that the rename succeeded.
1329 */
c0ade690 1330 hammer_done_cursor(&cursor);
6a37e7e4
MD
1331 if (error == 0)
1332 cache_rename(ap->a_fnch, ap->a_tnch);
b84de5af 1333
b3deaf57 1334failed:
b84de5af 1335 hammer_done_transaction(&trans);
8cd0a023 1336 return (error);
427e5fc6
MD
1337}
1338
66325755
MD
1339/*
1340 * hammer_vop_nrmdir { nch, dvp, cred }
1341 */
427e5fc6
MD
1342static
1343int
66325755 1344hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
427e5fc6 1345{
b84de5af
MD
1346 struct hammer_transaction trans;
1347 int error;
1348
1349 hammer_start_transaction(&trans, VTOI(ap->a_dvp)->hmp);
1350 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1351 hammer_done_transaction(&trans);
1352
1353 return (error);
427e5fc6
MD
1354}
1355
66325755
MD
1356/*
1357 * hammer_vop_setattr { vp, vap, cred }
1358 */
427e5fc6
MD
1359static
1360int
66325755 1361hammer_vop_setattr(struct vop_setattr_args *ap)
427e5fc6 1362{
8cd0a023
MD
1363 struct hammer_transaction trans;
1364 struct vattr *vap;
1365 struct hammer_inode *ip;
1366 int modflags;
1367 int error;
d5ef456e 1368 int truncating;
b84de5af 1369 off_t aligned_size;
8cd0a023
MD
1370 u_int32_t flags;
1371 uuid_t uuid;
1372
1373 vap = ap->a_vap;
1374 ip = ap->a_vp->v_data;
1375 modflags = 0;
1376
1377 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1378 return(EROFS);
d113fda1
MD
1379 if (ip->flags & HAMMER_INODE_RO)
1380 return (EROFS);
8cd0a023
MD
1381
1382 hammer_start_transaction(&trans, ip->hmp);
1383 error = 0;
1384
1385 if (vap->va_flags != VNOVAL) {
1386 flags = ip->ino_data.uflags;
1387 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1388 hammer_to_unix_xid(&ip->ino_data.uid),
1389 ap->a_cred);
1390 if (error == 0) {
1391 if (ip->ino_data.uflags != flags) {
1392 ip->ino_data.uflags = flags;
1393 modflags |= HAMMER_INODE_DDIRTY;
1394 }
1395 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1396 error = 0;
1397 goto done;
1398 }
1399 }
1400 goto done;
1401 }
1402 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1403 error = EPERM;
1404 goto done;
1405 }
1406 if (vap->va_uid != (uid_t)VNOVAL) {
1407 hammer_guid_to_uuid(&uuid, vap->va_uid);
6b4f890b 1408 if (bcmp(&uuid, &ip->ino_data.uid, sizeof(uuid)) != 0) {
8cd0a023
MD
1409 ip->ino_data.uid = uuid;
1410 modflags |= HAMMER_INODE_DDIRTY;
1411 }
1412 }
1413 if (vap->va_gid != (uid_t)VNOVAL) {
6b4f890b
MD
1414 hammer_guid_to_uuid(&uuid, vap->va_gid);
1415 if (bcmp(&uuid, &ip->ino_data.gid, sizeof(uuid)) != 0) {
8cd0a023
MD
1416 ip->ino_data.gid = uuid;
1417 modflags |= HAMMER_INODE_DDIRTY;
1418 }
1419 }
11ad5ade 1420 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
8cd0a023
MD
1421 switch(ap->a_vp->v_type) {
1422 case VREG:
11ad5ade 1423 if (vap->va_size == ip->ino_data.size)
d5ef456e 1424 break;
b84de5af
MD
1425 /*
1426 * XXX break atomicy, we can deadlock the backend
1427 * if we do not release the lock. Probably not a
1428 * big deal here.
1429 */
11ad5ade 1430 if (vap->va_size < ip->ino_data.size) {
c0ade690
MD
1431 vtruncbuf(ap->a_vp, vap->va_size,
1432 HAMMER_BUFSIZE);
d5ef456e
MD
1433 truncating = 1;
1434 } else {
c0ade690 1435 vnode_pager_setsize(ap->a_vp, vap->va_size);
d5ef456e 1436 truncating = 0;
c0ade690 1437 }
11ad5ade
MD
1438 ip->ino_data.size = vap->va_size;
1439 modflags |= HAMMER_INODE_DDIRTY;
76376933 1440 aligned_size = (vap->va_size + HAMMER_BUFMASK) &
b84de5af 1441 ~HAMMER_BUFMASK64;
d5ef456e 1442
b84de5af
MD
1443 /*
1444 * on-media truncation is cached in the inode until
1445 * the inode is synchronized.
1446 */
d5ef456e 1447 if (truncating) {
b84de5af
MD
1448 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1449 ip->flags |= HAMMER_INODE_TRUNCATED;
1450 ip->trunc_off = vap->va_size;
1451 } else if (ip->trunc_off > vap->va_size) {
1452 ip->trunc_off = vap->va_size;
1453 }
d5ef456e 1454 }
b84de5af 1455
d5ef456e
MD
1456 /*
1457 * If truncating we have to clean out a portion of
b84de5af
MD
1458 * the last block on-disk. We do this in the
1459 * front-end buffer cache.
d5ef456e 1460 */
b84de5af 1461 if (truncating && vap->va_size < aligned_size) {
d5ef456e
MD
1462 struct buf *bp;
1463 int offset;
1464
1465 offset = vap->va_size & HAMMER_BUFMASK;
1466 error = bread(ap->a_vp,
1467 aligned_size - HAMMER_BUFSIZE,
1468 HAMMER_BUFSIZE, &bp);
1469 if (error == 0) {
1470 bzero(bp->b_data + offset,
1471 HAMMER_BUFSIZE - offset);
1472 bdwrite(bp);
1473 } else {
1474 brelse(bp);
1475 }
1476 }
76376933 1477 break;
8cd0a023 1478 case VDATABASE:
b84de5af
MD
1479 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1480 ip->flags |= HAMMER_INODE_TRUNCATED;
1481 ip->trunc_off = vap->va_size;
1482 } else if (ip->trunc_off > vap->va_size) {
1483 ip->trunc_off = vap->va_size;
1484 }
11ad5ade
MD
1485 ip->ino_data.size = vap->va_size;
1486 modflags |= HAMMER_INODE_DDIRTY;
8cd0a023
MD
1487 break;
1488 default:
1489 error = EINVAL;
1490 goto done;
1491 }
d26d0ae9 1492 break;
8cd0a023
MD
1493 }
1494 if (vap->va_atime.tv_sec != VNOVAL) {
11ad5ade 1495 ip->ino_leaf.atime =
8cd0a023
MD
1496 hammer_timespec_to_transid(&vap->va_atime);
1497 modflags |= HAMMER_INODE_ITIMES;
1498 }
1499 if (vap->va_mtime.tv_sec != VNOVAL) {
11ad5ade 1500 ip->ino_data.mtime =
8cd0a023
MD
1501 hammer_timespec_to_transid(&vap->va_mtime);
1502 modflags |= HAMMER_INODE_ITIMES;
98f7132d 1503 modflags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
8cd0a023
MD
1504 }
1505 if (vap->va_mode != (mode_t)VNOVAL) {
1506 if (ip->ino_data.mode != vap->va_mode) {
1507 ip->ino_data.mode = vap->va_mode;
1508 modflags |= HAMMER_INODE_DDIRTY;
1509 }
1510 }
1511done:
b84de5af 1512 if (error == 0)
c0ade690 1513 hammer_modify_inode(&trans, ip, modflags);
b84de5af 1514 hammer_done_transaction(&trans);
8cd0a023 1515 return (error);
427e5fc6
MD
1516}
1517
66325755
MD
1518/*
1519 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1520 */
427e5fc6
MD
1521static
1522int
66325755 1523hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
427e5fc6 1524{
7a04d74f
MD
1525 struct hammer_transaction trans;
1526 struct hammer_inode *dip;
1527 struct hammer_inode *nip;
1528 struct nchandle *nch;
1529 hammer_record_t record;
1530 int error;
1531 int bytes;
1532
1533 ap->a_vap->va_type = VLNK;
1534
1535 nch = ap->a_nch;
1536 dip = VTOI(ap->a_dvp);
1537
d113fda1
MD
1538 if (dip->flags & HAMMER_INODE_RO)
1539 return (EROFS);
1540
7a04d74f
MD
1541 /*
1542 * Create a transaction to cover the operations we perform.
1543 */
1544 hammer_start_transaction(&trans, dip->hmp);
1545
1546 /*
1547 * Create a new filesystem object of the requested type. The
1548 * returned inode will be referenced but not locked.
1549 */
1550
1551 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
1552 if (error) {
b84de5af 1553 hammer_done_transaction(&trans);
7a04d74f
MD
1554 *ap->a_vpp = NULL;
1555 return (error);
1556 }
1557
7a04d74f
MD
1558 /*
1559 * Add a record representing the symlink. symlink stores the link
1560 * as pure data, not a string, and is no \0 terminated.
1561 */
1562 if (error == 0) {
7a04d74f
MD
1563 bytes = strlen(ap->a_target);
1564
2f85fa4d
MD
1565 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1566 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1567 } else {
1568 record = hammer_alloc_mem_record(nip, bytes);
1569 record->type = HAMMER_MEM_RECORD_GENERAL;
1570
1571 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
1572 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1573 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1574 record->leaf.data_len = bytes;
1575 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1576 bcopy(ap->a_target, record->data->symlink.name, bytes);
1577 error = hammer_ip_add_record(&trans, record);
1578 }
42c7d26b
MD
1579
1580 /*
1581 * Set the file size to the length of the link.
1582 */
1583 if (error == 0) {
11ad5ade
MD
1584 nip->ino_data.size = bytes;
1585 hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
42c7d26b 1586 }
7a04d74f 1587 }
1f07f686
MD
1588 if (error == 0)
1589 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
7a04d74f
MD
1590
1591 /*
1592 * Finish up.
1593 */
1594 if (error) {
1595 hammer_rel_inode(nip, 0);
7a04d74f
MD
1596 *ap->a_vpp = NULL;
1597 } else {
e8599db1 1598 error = hammer_get_vnode(nip, ap->a_vpp);
7a04d74f
MD
1599 hammer_rel_inode(nip, 0);
1600 if (error == 0) {
1601 cache_setunresolved(ap->a_nch);
1602 cache_setvp(ap->a_nch, *ap->a_vpp);
1603 }
1604 }
b84de5af 1605 hammer_done_transaction(&trans);
7a04d74f 1606 return (error);
427e5fc6
MD
1607}
1608
66325755
MD
1609/*
1610 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1611 */
427e5fc6
MD
1612static
1613int
66325755 1614hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
427e5fc6 1615{
b84de5af
MD
1616 struct hammer_transaction trans;
1617 int error;
1618
1619 hammer_start_transaction(&trans, VTOI(ap->a_dvp)->hmp);
1620 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1621 ap->a_cred, ap->a_flags);
1622 hammer_done_transaction(&trans);
1623
1624 return (error);
427e5fc6
MD
1625}
1626
7dc57964
MD
1627/*
1628 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1629 */
1630static
1631int
1632hammer_vop_ioctl(struct vop_ioctl_args *ap)
1633{
1634 struct hammer_inode *ip = ap->a_vp->v_data;
1635
1636 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1637 ap->a_fflag, ap->a_cred));
1638}
1639
513ca7d7
MD
1640static
1641int
1642hammer_vop_mountctl(struct vop_mountctl_args *ap)
1643{
1644 struct mount *mp;
1645 int error;
1646
1647 mp = ap->a_head.a_ops->head.vv_mount;
1648
1649 switch(ap->a_op) {
1650 case MOUNTCTL_SET_EXPORT:
1651 if (ap->a_ctllen != sizeof(struct export_args))
1652 error = EINVAL;
1653 error = hammer_vfs_export(mp, ap->a_op,
1654 (const struct export_args *)ap->a_ctl);
1655 break;
1656 default:
1657 error = journal_mountctl(ap);
1658 break;
1659 }
1660 return(error);
1661}
1662
66325755
MD
1663/*
1664 * hammer_vop_strategy { vp, bio }
8cd0a023
MD
1665 *
1666 * Strategy call, used for regular file read & write only. Note that the
1667 * bp may represent a cluster.
1668 *
1669 * To simplify operation and allow better optimizations in the future,
1670 * this code does not make any assumptions with regards to buffer alignment
1671 * or size.
66325755 1672 */
427e5fc6
MD
1673static
1674int
66325755 1675hammer_vop_strategy(struct vop_strategy_args *ap)
427e5fc6 1676{
8cd0a023
MD
1677 struct buf *bp;
1678 int error;
1679
1680 bp = ap->a_bio->bio_buf;
1681
1682 switch(bp->b_cmd) {
1683 case BUF_CMD_READ:
1684 error = hammer_vop_strategy_read(ap);
1685 break;
1686 case BUF_CMD_WRITE:
1687 error = hammer_vop_strategy_write(ap);
1688 break;
1689 default:
059819e3
MD
1690 bp->b_error = error = EINVAL;
1691 bp->b_flags |= B_ERROR;
1692 biodone(ap->a_bio);
8cd0a023
MD
1693 break;
1694 }
8cd0a023 1695 return (error);
427e5fc6
MD
1696}
1697
8cd0a023
MD
1698/*
1699 * Read from a regular file. Iterate the related records and fill in the
1700 * BIO/BUF. Gaps are zero-filled.
1701 *
1702 * The support code in hammer_object.c should be used to deal with mixed
1703 * in-memory and on-disk records.
1704 *
1705 * XXX atime update
1706 */
1707static
1708int
1709hammer_vop_strategy_read(struct vop_strategy_args *ap)
1710{
36f82b23
MD
1711 struct hammer_transaction trans;
1712 struct hammer_inode *ip;
8cd0a023 1713 struct hammer_cursor cursor;
8cd0a023
MD
1714 hammer_base_elm_t base;
1715 struct bio *bio;
1716 struct buf *bp;
1717 int64_t rec_offset;
a89aec1b 1718 int64_t ran_end;
195c19a1 1719 int64_t tmp64;
8cd0a023
MD
1720 int error;
1721 int boff;
1722 int roff;
1723 int n;
1724
1725 bio = ap->a_bio;
1726 bp = bio->bio_buf;
36f82b23 1727 ip = ap->a_vp->v_data;
8cd0a023 1728
36f82b23 1729 hammer_simple_transaction(&trans, ip->hmp);
4e17f465 1730 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
8cd0a023
MD
1731
1732 /*
1733 * Key range (begin and end inclusive) to scan. Note that the key's
c0ade690
MD
1734 * stored in the actual records represent BASE+LEN, not BASE. The
1735 * first record containing bio_offset will have a key > bio_offset.
8cd0a023 1736 */
2f85fa4d 1737 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023 1738 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1739 cursor.key_beg.create_tid = 0;
8cd0a023 1740 cursor.key_beg.delete_tid = 0;
8cd0a023 1741 cursor.key_beg.obj_type = 0;
c0ade690 1742 cursor.key_beg.key = bio->bio_offset + 1;
d5530d22 1743 cursor.asof = ip->obj_asof;
47197d71 1744 cursor.flags |= HAMMER_CURSOR_ASOF | HAMMER_CURSOR_DATAEXTOK;
8cd0a023
MD
1745
1746 cursor.key_end = cursor.key_beg;
11ad5ade 1747 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
b84de5af 1748#if 0
11ad5ade 1749 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
a89aec1b
MD
1750 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
1751 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
1752 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
b84de5af
MD
1753 } else
1754#endif
1755 {
c0ade690 1756 ran_end = bio->bio_offset + bp->b_bufsize;
a89aec1b
MD
1757 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
1758 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
195c19a1
MD
1759 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
1760 if (tmp64 < ran_end)
a89aec1b
MD
1761 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1762 else
7f7c1f84 1763 cursor.key_end.key = ran_end + MAXPHYS + 1;
a89aec1b 1764 }
d26d0ae9 1765 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
8cd0a023 1766
4e17f465 1767 error = hammer_ip_first(&cursor);
8cd0a023
MD
1768 boff = 0;
1769
a89aec1b
MD
1770 while (error == 0) {
1771 error = hammer_ip_resolve_data(&cursor);
1772 if (error)
66325755 1773 break;
11ad5ade 1774 base = &cursor.leaf->base;
8cd0a023 1775
11ad5ade 1776 rec_offset = base->key - cursor.leaf->data_len;
8cd0a023 1777
66325755 1778 /*
a89aec1b 1779 * Calculate the gap, if any, and zero-fill it.
66325755 1780 */
8cd0a023
MD
1781 n = (int)(rec_offset - (bio->bio_offset + boff));
1782 if (n > 0) {
a89aec1b
MD
1783 if (n > bp->b_bufsize - boff)
1784 n = bp->b_bufsize - boff;
8cd0a023
MD
1785 bzero((char *)bp->b_data + boff, n);
1786 boff += n;
1787 n = 0;
66325755 1788 }
8cd0a023
MD
1789
1790 /*
1791 * Calculate the data offset in the record and the number
1792 * of bytes we can copy.
a89aec1b
MD
1793 *
1794 * Note there is a degenerate case here where boff may
1795 * already be at bp->b_bufsize.
8cd0a023
MD
1796 */
1797 roff = -n;
b84de5af 1798 rec_offset += roff;
11ad5ade 1799 n = cursor.leaf->data_len - roff;
8cd0a023
MD
1800 KKASSERT(n > 0);
1801 if (n > bp->b_bufsize - boff)
1802 n = bp->b_bufsize - boff;
059819e3 1803
b84de5af
MD
1804 /*
1805 * If we cached a truncation point on our front-end the
1806 * on-disk version may still have physical records beyond
1807 * that point. Truncate visibility.
1808 */
1809 if (ip->trunc_off <= rec_offset)
1810 n = 0;
1811 else if (ip->trunc_off < rec_offset + n)
1812 n = (int)(ip->trunc_off - rec_offset);
1813
1814 /*
1815 * Copy
1816 */
1817 if (n) {
1818 bcopy((char *)cursor.data + roff,
1819 (char *)bp->b_data + boff, n);
1820 boff += n;
1821 }
8cd0a023 1822 if (boff == bp->b_bufsize)
66325755 1823 break;
a89aec1b 1824 error = hammer_ip_next(&cursor);
66325755 1825 }
8cd0a023 1826 hammer_done_cursor(&cursor);
b84de5af 1827 hammer_done_transaction(&trans);
66325755
MD
1828
1829 /*
8cd0a023 1830 * There may have been a gap after the last record
66325755 1831 */
8cd0a023
MD
1832 if (error == ENOENT)
1833 error = 0;
1834 if (error == 0 && boff != bp->b_bufsize) {
7f7c1f84 1835 KKASSERT(boff < bp->b_bufsize);
8cd0a023
MD
1836 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
1837 /* boff = bp->b_bufsize; */
1838 }
1839 bp->b_resid = 0;
059819e3
MD
1840 bp->b_error = error;
1841 if (error)
1842 bp->b_flags |= B_ERROR;
1843 biodone(ap->a_bio);
8cd0a023
MD
1844 return(error);
1845}
1846
1847/*
059819e3
MD
1848 * Write to a regular file. Because this is a strategy call the OS is
1849 * trying to actually sync data to the media. HAMMER can only flush
1850 * the entire inode (so the TID remains properly synchronized).
8cd0a023 1851 *
059819e3
MD
1852 * Basically all we do here is place the bio on the inode's flush queue
1853 * and activate the flusher.
8cd0a023
MD
1854 */
1855static
1856int
1857hammer_vop_strategy_write(struct vop_strategy_args *ap)
1858{
8cd0a023
MD
1859 hammer_inode_t ip;
1860 struct bio *bio;
1861 struct buf *bp;
8cd0a023
MD
1862
1863 bio = ap->a_bio;
1864 bp = bio->bio_buf;
1865 ip = ap->a_vp->v_data;
d113fda1 1866
059819e3
MD
1867 if (ip->flags & HAMMER_INODE_RO) {
1868 bp->b_error = EROFS;
1869 bp->b_flags |= B_ERROR;
1870 biodone(ap->a_bio);
1871 return(EROFS);
1872 }
b84de5af 1873
29ce0677
MD
1874 /*
1875 * Interlock with inode destruction (no in-kernel or directory
1876 * topology visibility). If we queue new IO while trying to
1877 * destroy the inode we can deadlock the vtrunc call in
1878 * hammer_inode_unloadable_check().
1879 */
1880 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
1881 bp->b_resid = 0;
1882 biodone(ap->a_bio);
1883 return(0);
1884 }
1885
b84de5af
MD
1886 /*
1887 * If the inode is being flushed we cannot re-queue buffers
1888 * it may have already flushed, or it could result in duplicate
1889 * records in the database.
1890 */
059819e3 1891 BUF_KERNPROC(bp);
1f07f686 1892 if (ip->flags & HAMMER_INODE_WRITE_ALT)
b84de5af
MD
1893 TAILQ_INSERT_TAIL(&ip->bio_alt_list, bio, bio_act);
1894 else
1895 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1f07f686
MD
1896 ++hammer_bio_count;
1897 hammer_modify_inode(NULL, ip, HAMMER_INODE_BUFS);
4e17f465
MD
1898
1899 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1900#if 0
1901 /*
1902 * XXX
1903 *
1904 * If the write was not part of an integrated flush operation then
1905 * signal a flush.
1906 */
1907 if (ip->flush_state != HAMMER_FST_FLUSH ||
1908 (ip->flags & HAMMER_INODE_WRITE_ALT)) {
1909 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1910 }
1911#endif
059819e3
MD
1912 return(0);
1913}
1914
1915/*
b84de5af 1916 * Backend code which actually performs the write to the media. This
059819e3
MD
1917 * routine is typically called from the flusher. The bio will be disposed
1918 * of (biodone'd) by this routine.
1919 *
1920 * Iterate the related records and mark for deletion. If existing edge
1921 * records (left and right side) overlap our write they have to be marked
1922 * deleted and new records created, usually referencing a portion of the
1923 * original data. Then add a record to represent the buffer.
1924 */
1925int
4e17f465 1926hammer_dowrite(hammer_cursor_t cursor, hammer_inode_t ip, struct bio *bio)
059819e3
MD
1927{
1928 struct buf *bp = bio->bio_buf;
1929 int error;
8cd0a023 1930
b84de5af
MD
1931 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1932
869e8f55
MD
1933 /*
1934 * If the inode is going or gone, just throw away any frontend
1935 * buffers.
1936 */
1937 if (ip->flags & HAMMER_INODE_DELETED) {
1938 bp->b_resid = 0;
1939 biodone(bio);
77062c8a 1940 --hammer_bio_count;
ee3fed53 1941 return(0);
869e8f55
MD
1942 }
1943
8cd0a023
MD
1944 /*
1945 * Delete any records overlapping our range. This function will
d26d0ae9 1946 * (eventually) properly truncate partial overlaps.
8cd0a023 1947 */
11ad5ade 1948 if (ip->sync_ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
4e17f465 1949 error = hammer_ip_delete_range(cursor, ip, bio->bio_offset,
47197d71 1950 bio->bio_offset);
a89aec1b 1951 } else {
4e17f465 1952 error = hammer_ip_delete_range(cursor, ip, bio->bio_offset,
d26d0ae9 1953 bio->bio_offset +
47197d71 1954 bp->b_bufsize - 1);
a89aec1b 1955 }
8cd0a023
MD
1956
1957 /*
e38e0b15
MD
1958 * Add a single record to cover the write. We can write a record
1959 * with only the actual file data - for example, a small 200 byte
1960 * file does not have to write out a 16K record.
1961 *
1962 * While the data size does not have to be aligned, we still do it
1963 * to reduce fragmentation in a future allocation model.
8cd0a023
MD
1964 */
1965 if (error == 0) {
e38e0b15
MD
1966 int limit_size;
1967
11ad5ade 1968 if (ip->sync_ino_data.size - bio->bio_offset >
b84de5af
MD
1969 bp->b_bufsize) {
1970 limit_size = bp->b_bufsize;
e38e0b15 1971 } else {
11ad5ade 1972 limit_size = (int)(ip->sync_ino_data.size -
e38e0b15
MD
1973 bio->bio_offset);
1974 KKASSERT(limit_size >= 0);
1975 limit_size = (limit_size + 63) & ~63;
1976 }
4e17f465
MD
1977 if (limit_size) {
1978 error = hammer_ip_sync_data(cursor, ip, bio->bio_offset,
1979 bp->b_data, limit_size);
1980 }
66325755 1981 }
a5fddc16
MD
1982 if (error)
1983 Debugger("hammer_dowrite: error");
66325755 1984
8cd0a023 1985 if (error) {
8cd0a023 1986 bp->b_resid = bp->b_bufsize;
059819e3
MD
1987 bp->b_error = error;
1988 bp->b_flags |= B_ERROR;
8cd0a023 1989 } else {
8cd0a023
MD
1990 bp->b_resid = 0;
1991 }
059819e3 1992 biodone(bio);
1f07f686 1993 --hammer_bio_count;
8cd0a023
MD
1994 return(error);
1995}
1996
1997/*
1998 * dounlink - disconnect a directory entry
1999 *
2000 * XXX whiteout support not really in yet
2001 */
2002static int
b84de5af
MD
2003hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2004 struct vnode *dvp, struct ucred *cred, int flags)
8cd0a023 2005{
8cd0a023
MD
2006 struct namecache *ncp;
2007 hammer_inode_t dip;
2008 hammer_inode_t ip;
8cd0a023 2009 struct hammer_cursor cursor;
8cd0a023 2010 int64_t namekey;
11ad5ade 2011 int nlen, error;
8cd0a023
MD
2012
2013 /*
2014 * Calculate the namekey and setup the key range for the scan. This
2015 * works kinda like a chained hash table where the lower 32 bits
2016 * of the namekey synthesize the chain.
2017 *
2018 * The key range is inclusive of both key_beg and key_end.
2019 */
2020 dip = VTOI(dvp);
2021 ncp = nch->ncp;
d113fda1
MD
2022
2023 if (dip->flags & HAMMER_INODE_RO)
2024 return (EROFS);
2025
6a37e7e4
MD
2026 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2027retry:
4e17f465 2028 hammer_init_cursor(trans, &cursor, &dip->cache[0], dip);
2f85fa4d 2029 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
2030 cursor.key_beg.obj_id = dip->obj_id;
2031 cursor.key_beg.key = namekey;
d5530d22 2032 cursor.key_beg.create_tid = 0;
8cd0a023
MD
2033 cursor.key_beg.delete_tid = 0;
2034 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2035 cursor.key_beg.obj_type = 0;
2036
2037 cursor.key_end = cursor.key_beg;
2038 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
2039 cursor.asof = dip->obj_asof;
2040 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023 2041
8cd0a023
MD
2042 /*
2043 * Scan all matching records (the chain), locate the one matching
2044 * the requested path component. info->last_error contains the
2045 * error code on search termination and could be 0, ENOENT, or
2046 * something else.
2047 *
2048 * The hammer_ip_*() functions merge in-memory records with on-disk
2049 * records for the purposes of the search.
2050 */
4e17f465
MD
2051 error = hammer_ip_first(&cursor);
2052
a89aec1b
MD
2053 while (error == 0) {
2054 error = hammer_ip_resolve_data(&cursor);
2055 if (error)
66325755 2056 break;
11ad5ade
MD
2057 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2058 KKASSERT(nlen > 0);
2059 if (ncp->nc_nlen == nlen &&
2060 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
66325755
MD
2061 break;
2062 }
a89aec1b 2063 error = hammer_ip_next(&cursor);
66325755 2064 }
8cd0a023
MD
2065
2066 /*
2067 * If all is ok we have to get the inode so we can adjust nlinks.
b3deaf57
MD
2068 *
2069 * If the target is a directory, it must be empty.
8cd0a023 2070 */
66325755 2071 if (error == 0) {
b84de5af 2072 ip = hammer_get_inode(trans, &dip->cache[1],
11ad5ade 2073 cursor.data->entry.obj_id,
d113fda1 2074 dip->hmp->asof, 0, &error);
46fe7ae1 2075 if (error == ENOENT) {
11ad5ade 2076 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
10a5d1ba 2077 Debugger("ENOENT unlinking object that should exist");
46fe7ae1 2078 }
1f07f686
MD
2079
2080 /*
2081 * If we are trying to remove a directory the directory must
2082 * be empty.
2083 *
2084 * WARNING: hammer_ip_check_directory_empty() may have to
2085 * terminate the cursor to avoid a deadlock. It is ok to
2086 * call hammer_done_cursor() twice.
2087 */
11ad5ade 2088 if (error == 0 && ip->ino_data.obj_type ==
b3deaf57 2089 HAMMER_OBJTYPE_DIRECTORY) {
98f7132d 2090 error = hammer_ip_check_directory_empty(trans, ip);
b3deaf57 2091 }
1f07f686 2092
6a37e7e4 2093 /*
1f07f686
MD
2094 * Delete the directory entry.
2095 *
6a37e7e4 2096 * WARNING: hammer_ip_del_directory() may have to terminate
1f07f686 2097 * the cursor to avoid a deadlock. It is ok to call
6a37e7e4
MD
2098 * hammer_done_cursor() twice.
2099 */
b84de5af 2100 if (error == 0) {
b84de5af
MD
2101 error = hammer_ip_del_directory(trans, &cursor,
2102 dip, ip);
b84de5af 2103 }
8cd0a023
MD
2104 if (error == 0) {
2105 cache_setunresolved(nch);
2106 cache_setvp(nch, NULL);
2107 /* XXX locking */
2108 if (ip->vp)
2109 cache_inval_vp(ip->vp, CINV_DESTROY);
2110 }
a89aec1b 2111 hammer_rel_inode(ip, 0);
66325755 2112 }
6a37e7e4
MD
2113 hammer_done_cursor(&cursor);
2114 if (error == EDEADLK)
2115 goto retry;
9c448776 2116
66325755 2117 return (error);
66325755
MD
2118}
2119
7a04d74f
MD
2120/************************************************************************
2121 * FIFO AND SPECFS OPS *
2122 ************************************************************************
2123 *
2124 */
2125
2126static int
2127hammer_vop_fifoclose (struct vop_close_args *ap)
2128{
2129 /* XXX update itimes */
2130 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2131}
2132
2133static int
2134hammer_vop_fiforead (struct vop_read_args *ap)
2135{
2136 int error;
2137
2138 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2139 /* XXX update access time */
2140 return (error);
2141}
2142
2143static int
2144hammer_vop_fifowrite (struct vop_write_args *ap)
2145{
2146 int error;
2147
2148 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2149 /* XXX update access time */
2150 return (error);
2151}
2152
2153static int
2154hammer_vop_specclose (struct vop_close_args *ap)
2155{
2156 /* XXX update itimes */
2157 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2158}
2159
2160static int
2161hammer_vop_specread (struct vop_read_args *ap)
2162{
2163 /* XXX update access time */
2164 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2165}
2166
2167static int
2168hammer_vop_specwrite (struct vop_write_args *ap)
2169{
2170 /* XXX update last change time */
2171 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2172}
2173