HAMMER Utilities: Critical bug in newfs_hammer
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
47637bff 34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.59 2008/06/07 07:41:51 dillon Exp $
427e5fc6
MD
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/fcntl.h>
41#include <sys/namecache.h>
42#include <sys/vnode.h>
43#include <sys/lockf.h>
44#include <sys/event.h>
45#include <sys/stat.h>
b3deaf57 46#include <sys/dirent.h>
c0ade690 47#include <vm/vm_extern.h>
7a04d74f 48#include <vfs/fifofs/fifo.h>
427e5fc6
MD
49#include "hammer.h"
50
51/*
52 * USERFS VNOPS
53 */
54/*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
66325755
MD
55static int hammer_vop_fsync(struct vop_fsync_args *);
56static int hammer_vop_read(struct vop_read_args *);
57static int hammer_vop_write(struct vop_write_args *);
58static int hammer_vop_access(struct vop_access_args *);
59static int hammer_vop_advlock(struct vop_advlock_args *);
60static int hammer_vop_close(struct vop_close_args *);
61static int hammer_vop_ncreate(struct vop_ncreate_args *);
62static int hammer_vop_getattr(struct vop_getattr_args *);
63static int hammer_vop_nresolve(struct vop_nresolve_args *);
64static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65static int hammer_vop_nlink(struct vop_nlink_args *);
66static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67static int hammer_vop_nmknod(struct vop_nmknod_args *);
68static int hammer_vop_open(struct vop_open_args *);
69static int hammer_vop_pathconf(struct vop_pathconf_args *);
70static int hammer_vop_print(struct vop_print_args *);
71static int hammer_vop_readdir(struct vop_readdir_args *);
72static int hammer_vop_readlink(struct vop_readlink_args *);
73static int hammer_vop_nremove(struct vop_nremove_args *);
74static int hammer_vop_nrename(struct vop_nrename_args *);
75static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76static int hammer_vop_setattr(struct vop_setattr_args *);
77static int hammer_vop_strategy(struct vop_strategy_args *);
78static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
79static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
7dc57964 80static int hammer_vop_ioctl(struct vop_ioctl_args *);
513ca7d7 81static int hammer_vop_mountctl(struct vop_mountctl_args *);
427e5fc6 82
7a04d74f
MD
83static int hammer_vop_fifoclose (struct vop_close_args *);
84static int hammer_vop_fiforead (struct vop_read_args *);
85static int hammer_vop_fifowrite (struct vop_write_args *);
86
87static int hammer_vop_specclose (struct vop_close_args *);
88static int hammer_vop_specread (struct vop_read_args *);
89static int hammer_vop_specwrite (struct vop_write_args *);
90
427e5fc6
MD
91struct vop_ops hammer_vnode_vops = {
92 .vop_default = vop_defaultop,
93 .vop_fsync = hammer_vop_fsync,
c0ade690
MD
94 .vop_getpages = vop_stdgetpages,
95 .vop_putpages = vop_stdputpages,
427e5fc6
MD
96 .vop_read = hammer_vop_read,
97 .vop_write = hammer_vop_write,
98 .vop_access = hammer_vop_access,
99 .vop_advlock = hammer_vop_advlock,
100 .vop_close = hammer_vop_close,
101 .vop_ncreate = hammer_vop_ncreate,
102 .vop_getattr = hammer_vop_getattr,
103 .vop_inactive = hammer_vop_inactive,
104 .vop_reclaim = hammer_vop_reclaim,
105 .vop_nresolve = hammer_vop_nresolve,
106 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
107 .vop_nlink = hammer_vop_nlink,
108 .vop_nmkdir = hammer_vop_nmkdir,
109 .vop_nmknod = hammer_vop_nmknod,
110 .vop_open = hammer_vop_open,
111 .vop_pathconf = hammer_vop_pathconf,
112 .vop_print = hammer_vop_print,
113 .vop_readdir = hammer_vop_readdir,
114 .vop_readlink = hammer_vop_readlink,
115 .vop_nremove = hammer_vop_nremove,
116 .vop_nrename = hammer_vop_nrename,
117 .vop_nrmdir = hammer_vop_nrmdir,
118 .vop_setattr = hammer_vop_setattr,
119 .vop_strategy = hammer_vop_strategy,
120 .vop_nsymlink = hammer_vop_nsymlink,
7dc57964 121 .vop_nwhiteout = hammer_vop_nwhiteout,
513ca7d7
MD
122 .vop_ioctl = hammer_vop_ioctl,
123 .vop_mountctl = hammer_vop_mountctl
427e5fc6
MD
124};
125
7a04d74f
MD
126struct vop_ops hammer_spec_vops = {
127 .vop_default = spec_vnoperate,
128 .vop_fsync = hammer_vop_fsync,
129 .vop_read = hammer_vop_specread,
130 .vop_write = hammer_vop_specwrite,
131 .vop_access = hammer_vop_access,
132 .vop_close = hammer_vop_specclose,
133 .vop_getattr = hammer_vop_getattr,
134 .vop_inactive = hammer_vop_inactive,
135 .vop_reclaim = hammer_vop_reclaim,
136 .vop_setattr = hammer_vop_setattr
137};
138
139struct vop_ops hammer_fifo_vops = {
140 .vop_default = fifo_vnoperate,
141 .vop_fsync = hammer_vop_fsync,
142 .vop_read = hammer_vop_fiforead,
143 .vop_write = hammer_vop_fifowrite,
144 .vop_access = hammer_vop_access,
145 .vop_close = hammer_vop_fifoclose,
146 .vop_getattr = hammer_vop_getattr,
147 .vop_inactive = hammer_vop_inactive,
148 .vop_reclaim = hammer_vop_reclaim,
149 .vop_setattr = hammer_vop_setattr
150};
151
b84de5af
MD
152static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
153 struct vnode *dvp, struct ucred *cred, int flags);
8cd0a023
MD
154static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
155static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
156
427e5fc6
MD
157#if 0
158static
159int
160hammer_vop_vnoperate(struct vop_generic_args *)
161{
162 return (VOCALL(&hammer_vnode_vops, ap));
163}
164#endif
165
66325755
MD
166/*
167 * hammer_vop_fsync { vp, waitfor }
168 */
427e5fc6
MD
169static
170int
66325755 171hammer_vop_fsync(struct vop_fsync_args *ap)
427e5fc6 172{
b84de5af 173 hammer_inode_t ip = VTOI(ap->a_vp);
c0ade690 174
f90dde4c 175 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
e8599db1 176 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
b84de5af
MD
177 if (ap->a_waitfor == MNT_WAIT)
178 hammer_wait_inode(ip);
059819e3 179 return (ip->error);
427e5fc6
MD
180}
181
66325755
MD
182/*
183 * hammer_vop_read { vp, uio, ioflag, cred }
184 */
427e5fc6
MD
185static
186int
66325755 187hammer_vop_read(struct vop_read_args *ap)
427e5fc6 188{
66325755 189 struct hammer_transaction trans;
c0ade690 190 hammer_inode_t ip;
66325755
MD
191 off_t offset;
192 struct buf *bp;
193 struct uio *uio;
194 int error;
195 int n;
8cd0a023 196 int seqcount;
66325755
MD
197
198 if (ap->a_vp->v_type != VREG)
199 return (EINVAL);
200 ip = VTOI(ap->a_vp);
201 error = 0;
8cd0a023 202 seqcount = ap->a_ioflag >> 16;
66325755 203
8cd0a023 204 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
205
206 /*
207 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
208 */
209 uio = ap->a_uio;
11ad5ade 210 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
66325755 211 offset = uio->uio_offset & HAMMER_BUFMASK;
c0ade690 212#if 0
11ad5ade 213 error = cluster_read(ap->a_vp, ip->ino_data.size,
8cd0a023
MD
214 uio->uio_offset - offset, HAMMER_BUFSIZE,
215 MAXBSIZE, seqcount, &bp);
c0ade690
MD
216#endif
217 error = bread(ap->a_vp, uio->uio_offset - offset,
218 HAMMER_BUFSIZE, &bp);
66325755
MD
219 if (error) {
220 brelse(bp);
221 break;
222 }
c0ade690 223 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
66325755
MD
224 n = HAMMER_BUFSIZE - offset;
225 if (n > uio->uio_resid)
226 n = uio->uio_resid;
11ad5ade
MD
227 if (n > ip->ino_data.size - uio->uio_offset)
228 n = (int)(ip->ino_data.size - uio->uio_offset);
66325755
MD
229 error = uiomove((char *)bp->b_data + offset, n, uio);
230 if (error) {
8cd0a023 231 bqrelse(bp);
66325755
MD
232 break;
233 }
66325755
MD
234 bqrelse(bp);
235 }
b84de5af
MD
236 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
237 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
11ad5ade 238 ip->ino_leaf.atime = trans.time;
47637bff 239 hammer_modify_inode(ip, HAMMER_INODE_ITIMES);
b84de5af
MD
240 }
241 hammer_done_transaction(&trans);
66325755 242 return (error);
427e5fc6
MD
243}
244
66325755
MD
245/*
246 * hammer_vop_write { vp, uio, ioflag, cred }
247 */
427e5fc6
MD
248static
249int
66325755 250hammer_vop_write(struct vop_write_args *ap)
427e5fc6 251{
66325755
MD
252 struct hammer_transaction trans;
253 struct hammer_inode *ip;
254 struct uio *uio;
47637bff
MD
255 int rel_offset;
256 off_t base_offset;
66325755
MD
257 struct buf *bp;
258 int error;
259 int n;
c0ade690 260 int flags;
059819e3 261 int count;
66325755
MD
262
263 if (ap->a_vp->v_type != VREG)
264 return (EINVAL);
265 ip = VTOI(ap->a_vp);
266 error = 0;
267
d113fda1
MD
268 if (ip->flags & HAMMER_INODE_RO)
269 return (EROFS);
270
66325755
MD
271 /*
272 * Create a transaction to cover the operations we perform.
273 */
8cd0a023 274 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
275 uio = ap->a_uio;
276
277 /*
278 * Check append mode
279 */
280 if (ap->a_ioflag & IO_APPEND)
11ad5ade 281 uio->uio_offset = ip->ino_data.size;
66325755
MD
282
283 /*
284 * Check for illegal write offsets. Valid range is 0...2^63-1
285 */
9c448776 286 if (uio->uio_offset < 0 || uio->uio_offset + uio->uio_resid <= 0) {
b84de5af 287 hammer_done_transaction(&trans);
66325755 288 return (EFBIG);
9c448776 289 }
66325755
MD
290
291 /*
292 * Access the data in HAMMER_BUFSIZE blocks via the buffer cache.
293 */
059819e3 294 count = 0;
66325755 295 while (uio->uio_resid > 0) {
d5ef456e
MD
296 int fixsize = 0;
297
e63644f0
MD
298 if ((error = hammer_checkspace(trans.hmp)) != 0)
299 break;
300
059819e3 301 /*
47637bff
MD
302 * Do not allow HAMMER to blow out the buffer cache.
303 *
304 * Do not allow HAMMER to blow out system memory by
305 * accumulating too many records. Records are decoupled
306 * from the buffer cache.
307 *
308 * Always check at the beginning so separate writes are
309 * not able to bypass this code.
059819e3 310 */
47637bff 311 if ((count++ & 15) == 0) {
059819e3
MD
312 vn_unlock(ap->a_vp);
313 if ((ap->a_ioflag & IO_NOBWILL) == 0)
314 bwillwrite();
47637bff
MD
315 if (ip->rsv_recs > hammer_limit_irecs) {
316 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
317 hammer_wait_inode(ip);
318 }
059819e3
MD
319 vn_lock(ap->a_vp, LK_EXCLUSIVE|LK_RETRY);
320 }
321
47637bff
MD
322 rel_offset = (int)(uio->uio_offset & HAMMER_BUFMASK);
323 base_offset = uio->uio_offset & ~HAMMER_BUFMASK64;
324 n = HAMMER_BUFSIZE - rel_offset;
d5ef456e
MD
325 if (n > uio->uio_resid)
326 n = uio->uio_resid;
11ad5ade 327 if (uio->uio_offset + n > ip->ino_data.size) {
d5ef456e
MD
328 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
329 fixsize = 1;
330 }
331
c0ade690
MD
332 if (uio->uio_segflg == UIO_NOCOPY) {
333 /*
334 * Issuing a write with the same data backing the
335 * buffer. Instantiate the buffer to collect the
336 * backing vm pages, then read-in any missing bits.
337 *
338 * This case is used by vop_stdputpages().
339 */
47637bff 340 bp = getblk(ap->a_vp, base_offset,
d5ef456e 341 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
c0ade690
MD
342 if ((bp->b_flags & B_CACHE) == 0) {
343 bqrelse(bp);
47637bff 344 error = bread(ap->a_vp, base_offset,
c0ade690 345 HAMMER_BUFSIZE, &bp);
c0ade690 346 }
47637bff 347 } else if (rel_offset == 0 && uio->uio_resid >= HAMMER_BUFSIZE) {
c0ade690 348 /*
a5fddc16
MD
349 * Even though we are entirely overwriting the buffer
350 * we may still have to zero it out to avoid a
351 * mmap/write visibility issue.
c0ade690 352 */
47637bff 353 bp = getblk(ap->a_vp, base_offset,
d5ef456e 354 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
a5fddc16
MD
355 if ((bp->b_flags & B_CACHE) == 0)
356 vfs_bio_clrbuf(bp);
47637bff 357 } else if (base_offset >= ip->ino_data.size) {
c0ade690 358 /*
a5fddc16
MD
359 * If the base offset of the buffer is beyond the
360 * file EOF, we don't have to issue a read.
c0ade690 361 */
47637bff 362 bp = getblk(ap->a_vp, base_offset,
d5ef456e 363 HAMMER_BUFSIZE, GETBLK_BHEAVY, 0);
66325755
MD
364 vfs_bio_clrbuf(bp);
365 } else {
c0ade690
MD
366 /*
367 * Partial overwrite, read in any missing bits then
368 * replace the portion being written.
369 */
47637bff 370 error = bread(ap->a_vp, base_offset,
66325755 371 HAMMER_BUFSIZE, &bp);
d5ef456e
MD
372 if (error == 0)
373 bheavy(bp);
66325755 374 }
47637bff
MD
375 if (error == 0) {
376 error = uiomove((char *)bp->b_data + rel_offset,
377 n, uio);
378 }
d5ef456e
MD
379
380 /*
381 * If we screwed up we have to undo any VM size changes we
382 * made.
383 */
66325755
MD
384 if (error) {
385 brelse(bp);
d5ef456e 386 if (fixsize) {
11ad5ade 387 vtruncbuf(ap->a_vp, ip->ino_data.size,
d5ef456e
MD
388 HAMMER_BUFSIZE);
389 }
66325755
MD
390 break;
391 }
c0ade690 392 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
11ad5ade
MD
393 if (ip->ino_data.size < uio->uio_offset) {
394 ip->ino_data.size = uio->uio_offset;
395 flags = HAMMER_INODE_DDIRTY;
396 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
c0ade690 397 } else {
d113fda1 398 flags = 0;
66325755 399 }
11ad5ade 400 ip->ino_data.mtime = trans.time;
f3b0f382 401 flags |= HAMMER_INODE_ITIMES | HAMMER_INODE_BUFS;
11ad5ade 402 flags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
47637bff 403 hammer_modify_inode(ip, flags);
32c90105 404
e63644f0
MD
405 if ((bp->b_flags & B_DIRTY) == 0) {
406 ++ip->rsv_databufs;
407 ++ip->hmp->rsv_databufs;
408 }
409
47637bff
MD
410 /*
411 * Final buffer disposition.
412 */
66325755
MD
413 if (ap->a_ioflag & IO_SYNC) {
414 bwrite(bp);
415 } else if (ap->a_ioflag & IO_DIRECT) {
66325755 416 bawrite(bp);
47637bff 417#if 1
059819e3 418 } else if ((ap->a_ioflag >> 16) == IO_SEQMAX &&
34d829f7
MD
419 (uio->uio_offset & HAMMER_BUFMASK) == 0) {
420 /*
421 * If seqcount indicates sequential operation and
422 * we just finished filling a buffer, push it out
423 * now to prevent the buffer cache from becoming
424 * too full, which would trigger non-optimal
425 * flushes.
426 */
47637bff
MD
427 bp->b_flags |= B_NOCACHE;
428 bawrite(bp);
059819e3 429#endif
66325755 430 } else {
66325755
MD
431 bdwrite(bp);
432 }
433 }
b84de5af 434 hammer_done_transaction(&trans);
66325755 435 return (error);
427e5fc6
MD
436}
437
66325755
MD
438/*
439 * hammer_vop_access { vp, mode, cred }
440 */
427e5fc6
MD
441static
442int
66325755 443hammer_vop_access(struct vop_access_args *ap)
427e5fc6 444{
66325755
MD
445 struct hammer_inode *ip = VTOI(ap->a_vp);
446 uid_t uid;
447 gid_t gid;
448 int error;
449
450 uid = hammer_to_unix_xid(&ip->ino_data.uid);
451 gid = hammer_to_unix_xid(&ip->ino_data.gid);
452
453 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
454 ip->ino_data.uflags);
455 return (error);
427e5fc6
MD
456}
457
66325755
MD
458/*
459 * hammer_vop_advlock { vp, id, op, fl, flags }
460 */
427e5fc6
MD
461static
462int
66325755 463hammer_vop_advlock(struct vop_advlock_args *ap)
427e5fc6 464{
66325755
MD
465 struct hammer_inode *ip = VTOI(ap->a_vp);
466
11ad5ade 467 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
427e5fc6
MD
468}
469
66325755
MD
470/*
471 * hammer_vop_close { vp, fflag }
472 */
427e5fc6
MD
473static
474int
66325755 475hammer_vop_close(struct vop_close_args *ap)
427e5fc6 476{
a89aec1b 477 return (vop_stdclose(ap));
427e5fc6
MD
478}
479
66325755
MD
480/*
481 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
482 *
483 * The operating system has already ensured that the directory entry
484 * does not exist and done all appropriate namespace locking.
485 */
427e5fc6
MD
486static
487int
66325755 488hammer_vop_ncreate(struct vop_ncreate_args *ap)
427e5fc6 489{
66325755
MD
490 struct hammer_transaction trans;
491 struct hammer_inode *dip;
492 struct hammer_inode *nip;
493 struct nchandle *nch;
494 int error;
495
496 nch = ap->a_nch;
497 dip = VTOI(ap->a_dvp);
498
d113fda1
MD
499 if (dip->flags & HAMMER_INODE_RO)
500 return (EROFS);
e63644f0
MD
501 if ((error = hammer_checkspace(dip->hmp)) != 0)
502 return (error);
d113fda1 503
66325755
MD
504 /*
505 * Create a transaction to cover the operations we perform.
506 */
8cd0a023 507 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
508
509 /*
510 * Create a new filesystem object of the requested type. The
b84de5af
MD
511 * returned inode will be referenced and shared-locked to prevent
512 * it from being moved to the flusher.
66325755 513 */
8cd0a023
MD
514
515 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 516 if (error) {
77062c8a 517 hkprintf("hammer_create_inode error %d\n", error);
b84de5af 518 hammer_done_transaction(&trans);
66325755
MD
519 *ap->a_vpp = NULL;
520 return (error);
521 }
66325755
MD
522
523 /*
524 * Add the new filesystem object to the directory. This will also
525 * bump the inode's link count.
526 */
a89aec1b 527 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
0b075555 528 if (error)
77062c8a 529 hkprintf("hammer_ip_add_directory error %d\n", error);
66325755
MD
530
531 /*
532 * Finish up.
533 */
534 if (error) {
a89aec1b 535 hammer_rel_inode(nip, 0);
b84de5af 536 hammer_done_transaction(&trans);
66325755
MD
537 *ap->a_vpp = NULL;
538 } else {
e8599db1 539 error = hammer_get_vnode(nip, ap->a_vpp);
b84de5af 540 hammer_done_transaction(&trans);
a89aec1b
MD
541 hammer_rel_inode(nip, 0);
542 if (error == 0) {
543 cache_setunresolved(ap->a_nch);
544 cache_setvp(ap->a_nch, *ap->a_vpp);
545 }
66325755
MD
546 }
547 return (error);
427e5fc6
MD
548}
549
66325755
MD
550/*
551 * hammer_vop_getattr { vp, vap }
98f7132d
MD
552 *
553 * Retrieve an inode's attribute information. When accessing inodes
554 * historically we fake the atime field to ensure consistent results.
555 * The atime field is stored in the B-Tree element and allowed to be
556 * updated without cycling the element.
66325755 557 */
427e5fc6
MD
558static
559int
66325755 560hammer_vop_getattr(struct vop_getattr_args *ap)
427e5fc6 561{
66325755
MD
562 struct hammer_inode *ip = VTOI(ap->a_vp);
563 struct vattr *vap = ap->a_vap;
564
565#if 0
566 if (cache_check_fsmid_vp(ap->a_vp, &ip->fsmid) &&
567 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0 &&
7f7c1f84 568 ip->obj_asof == XXX
66325755
MD
569 ) {
570 /* LAZYMOD XXX */
571 }
572 hammer_itimes(ap->a_vp);
573#endif
574
575 vap->va_fsid = ip->hmp->fsid_udev;
11ad5ade 576 vap->va_fileid = ip->ino_leaf.base.obj_id;
66325755 577 vap->va_mode = ip->ino_data.mode;
11ad5ade 578 vap->va_nlink = ip->ino_data.nlinks;
66325755
MD
579 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
580 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
581 vap->va_rmajor = 0;
582 vap->va_rminor = 0;
11ad5ade 583 vap->va_size = ip->ino_data.size;
98f7132d
MD
584 if (ip->flags & HAMMER_INODE_RO)
585 hammer_to_timespec(ip->ino_data.mtime, &vap->va_atime);
586 else
587 hammer_to_timespec(ip->ino_leaf.atime, &vap->va_atime);
11ad5ade 588 hammer_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
66325755
MD
589 hammer_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
590 vap->va_flags = ip->ino_data.uflags;
591 vap->va_gen = 1; /* hammer inums are unique for all time */
bf686dbe 592 vap->va_blocksize = HAMMER_BUFSIZE;
11ad5ade
MD
593 vap->va_bytes = (ip->ino_data.size + 63) & ~63;
594 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
66325755
MD
595 vap->va_filerev = 0; /* XXX */
596 /* mtime uniquely identifies any adjustments made to the file */
11ad5ade 597 vap->va_fsmid = ip->ino_data.mtime;
66325755
MD
598 vap->va_uid_uuid = ip->ino_data.uid;
599 vap->va_gid_uuid = ip->ino_data.gid;
600 vap->va_fsid_uuid = ip->hmp->fsid;
601 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
602 VA_FSID_UUID_VALID;
7a04d74f 603
11ad5ade 604 switch (ip->ino_data.obj_type) {
7a04d74f
MD
605 case HAMMER_OBJTYPE_CDEV:
606 case HAMMER_OBJTYPE_BDEV:
607 vap->va_rmajor = ip->ino_data.rmajor;
608 vap->va_rminor = ip->ino_data.rminor;
609 break;
610 default:
611 break;
612 }
613
66325755 614 return(0);
427e5fc6
MD
615}
616
66325755
MD
617/*
618 * hammer_vop_nresolve { nch, dvp, cred }
619 *
620 * Locate the requested directory entry.
621 */
427e5fc6
MD
622static
623int
66325755 624hammer_vop_nresolve(struct vop_nresolve_args *ap)
427e5fc6 625{
36f82b23 626 struct hammer_transaction trans;
66325755 627 struct namecache *ncp;
7f7c1f84
MD
628 hammer_inode_t dip;
629 hammer_inode_t ip;
630 hammer_tid_t asof;
8cd0a023 631 struct hammer_cursor cursor;
66325755
MD
632 struct vnode *vp;
633 int64_t namekey;
634 int error;
7f7c1f84
MD
635 int i;
636 int nlen;
d113fda1 637 int flags;
6a37e7e4 638 u_int64_t obj_id;
7f7c1f84
MD
639
640 /*
641 * Misc initialization, plus handle as-of name extensions. Look for
642 * the '@@' extension. Note that as-of files and directories cannot
643 * be modified.
7f7c1f84
MD
644 */
645 dip = VTOI(ap->a_dvp);
646 ncp = ap->a_nch->ncp;
647 asof = dip->obj_asof;
648 nlen = ncp->nc_nlen;
d113fda1 649 flags = dip->flags;
7f7c1f84 650
36f82b23
MD
651 hammer_simple_transaction(&trans, dip->hmp);
652
7f7c1f84
MD
653 for (i = 0; i < nlen; ++i) {
654 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
d113fda1 655 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
d113fda1 656 flags |= HAMMER_INODE_RO;
7f7c1f84
MD
657 break;
658 }
659 }
660 nlen = i;
66325755 661
d113fda1
MD
662 /*
663 * If there is no path component the time extension is relative to
664 * dip.
665 */
666 if (nlen == 0) {
36f82b23 667 ip = hammer_get_inode(&trans, &dip->cache[1], dip->obj_id,
61aeeb33 668 asof, flags, &error);
d113fda1 669 if (error == 0) {
e8599db1 670 error = hammer_get_vnode(ip, &vp);
d113fda1
MD
671 hammer_rel_inode(ip, 0);
672 } else {
673 vp = NULL;
674 }
675 if (error == 0) {
676 vn_unlock(vp);
677 cache_setvp(ap->a_nch, vp);
678 vrele(vp);
679 }
36f82b23 680 goto done;
d113fda1
MD
681 }
682
8cd0a023
MD
683 /*
684 * Calculate the namekey and setup the key range for the scan. This
685 * works kinda like a chained hash table where the lower 32 bits
686 * of the namekey synthesize the chain.
687 *
688 * The key range is inclusive of both key_beg and key_end.
689 */
7f7c1f84 690 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
66325755 691
4e17f465 692 error = hammer_init_cursor(&trans, &cursor, &dip->cache[0], dip);
2f85fa4d 693 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
694 cursor.key_beg.obj_id = dip->obj_id;
695 cursor.key_beg.key = namekey;
d5530d22 696 cursor.key_beg.create_tid = 0;
8cd0a023
MD
697 cursor.key_beg.delete_tid = 0;
698 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
699 cursor.key_beg.obj_type = 0;
66325755 700
8cd0a023
MD
701 cursor.key_end = cursor.key_beg;
702 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
703 cursor.asof = asof;
704 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
66325755
MD
705
706 /*
8cd0a023 707 * Scan all matching records (the chain), locate the one matching
a89aec1b 708 * the requested path component.
8cd0a023
MD
709 *
710 * The hammer_ip_*() functions merge in-memory records with on-disk
711 * records for the purposes of the search.
66325755 712 */
6a37e7e4
MD
713 obj_id = 0;
714
4e17f465 715 if (error == 0) {
4e17f465
MD
716 error = hammer_ip_first(&cursor);
717 while (error == 0) {
718 error = hammer_ip_resolve_data(&cursor);
719 if (error)
720 break;
11ad5ade
MD
721 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
722 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
723 obj_id = cursor.data->entry.obj_id;
4e17f465
MD
724 break;
725 }
726 error = hammer_ip_next(&cursor);
66325755
MD
727 }
728 }
6a37e7e4 729 hammer_done_cursor(&cursor);
66325755 730 if (error == 0) {
36f82b23 731 ip = hammer_get_inode(&trans, &dip->cache[1],
6a37e7e4 732 obj_id, asof, flags, &error);
7f7c1f84 733 if (error == 0) {
e8599db1 734 error = hammer_get_vnode(ip, &vp);
7f7c1f84
MD
735 hammer_rel_inode(ip, 0);
736 } else {
737 vp = NULL;
738 }
66325755
MD
739 if (error == 0) {
740 vn_unlock(vp);
741 cache_setvp(ap->a_nch, vp);
742 vrele(vp);
743 }
744 } else if (error == ENOENT) {
745 cache_setvp(ap->a_nch, NULL);
746 }
36f82b23 747done:
b84de5af 748 hammer_done_transaction(&trans);
66325755 749 return (error);
427e5fc6
MD
750}
751
66325755
MD
752/*
753 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
754 *
755 * Locate the parent directory of a directory vnode.
756 *
757 * dvp is referenced but not locked. *vpp must be returned referenced and
758 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
759 * at the root, instead it could indicate that the directory we were in was
760 * removed.
42c7d26b
MD
761 *
762 * NOTE: as-of sequences are not linked into the directory structure. If
763 * we are at the root with a different asof then the mount point, reload
764 * the same directory with the mount point's asof. I'm not sure what this
765 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
766 * get confused, but it hasn't been tested.
66325755 767 */
427e5fc6
MD
768static
769int
66325755 770hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
427e5fc6 771{
36f82b23 772 struct hammer_transaction trans;
66325755 773 struct hammer_inode *dip;
d113fda1 774 struct hammer_inode *ip;
42c7d26b
MD
775 int64_t parent_obj_id;
776 hammer_tid_t asof;
d113fda1 777 int error;
66325755
MD
778
779 dip = VTOI(ap->a_dvp);
42c7d26b
MD
780 asof = dip->obj_asof;
781 parent_obj_id = dip->ino_data.parent_obj_id;
782
783 if (parent_obj_id == 0) {
784 if (dip->obj_id == HAMMER_OBJID_ROOT &&
785 asof != dip->hmp->asof) {
786 parent_obj_id = dip->obj_id;
787 asof = dip->hmp->asof;
788 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
789 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
790 dip->obj_asof);
791 } else {
792 *ap->a_vpp = NULL;
793 return ENOENT;
794 }
66325755 795 }
d113fda1 796
36f82b23
MD
797 hammer_simple_transaction(&trans, dip->hmp);
798
799 ip = hammer_get_inode(&trans, &dip->cache[1], parent_obj_id,
42c7d26b 800 asof, dip->flags, &error);
36f82b23 801 if (ip) {
e8599db1 802 error = hammer_get_vnode(ip, ap->a_vpp);
36f82b23
MD
803 hammer_rel_inode(ip, 0);
804 } else {
d113fda1 805 *ap->a_vpp = NULL;
d113fda1 806 }
b84de5af 807 hammer_done_transaction(&trans);
d113fda1 808 return (error);
427e5fc6
MD
809}
810
66325755
MD
811/*
812 * hammer_vop_nlink { nch, dvp, vp, cred }
813 */
427e5fc6
MD
814static
815int
66325755 816hammer_vop_nlink(struct vop_nlink_args *ap)
427e5fc6 817{
66325755
MD
818 struct hammer_transaction trans;
819 struct hammer_inode *dip;
820 struct hammer_inode *ip;
821 struct nchandle *nch;
822 int error;
823
824 nch = ap->a_nch;
825 dip = VTOI(ap->a_dvp);
826 ip = VTOI(ap->a_vp);
827
d113fda1
MD
828 if (dip->flags & HAMMER_INODE_RO)
829 return (EROFS);
830 if (ip->flags & HAMMER_INODE_RO)
831 return (EROFS);
e63644f0
MD
832 if ((error = hammer_checkspace(dip->hmp)) != 0)
833 return (error);
d113fda1 834
66325755
MD
835 /*
836 * Create a transaction to cover the operations we perform.
837 */
8cd0a023 838 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
839
840 /*
841 * Add the filesystem object to the directory. Note that neither
842 * dip nor ip are referenced or locked, but their vnodes are
843 * referenced. This function will bump the inode's link count.
844 */
a89aec1b 845 error = hammer_ip_add_directory(&trans, dip, nch->ncp, ip);
66325755
MD
846
847 /*
848 * Finish up.
849 */
b84de5af 850 if (error == 0) {
6b4f890b
MD
851 cache_setunresolved(nch);
852 cache_setvp(nch, ap->a_vp);
66325755 853 }
b84de5af 854 hammer_done_transaction(&trans);
66325755 855 return (error);
427e5fc6
MD
856}
857
66325755
MD
858/*
859 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
860 *
861 * The operating system has already ensured that the directory entry
862 * does not exist and done all appropriate namespace locking.
863 */
427e5fc6
MD
864static
865int
66325755 866hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
427e5fc6 867{
66325755
MD
868 struct hammer_transaction trans;
869 struct hammer_inode *dip;
870 struct hammer_inode *nip;
871 struct nchandle *nch;
872 int error;
873
874 nch = ap->a_nch;
875 dip = VTOI(ap->a_dvp);
876
d113fda1
MD
877 if (dip->flags & HAMMER_INODE_RO)
878 return (EROFS);
e63644f0
MD
879 if ((error = hammer_checkspace(dip->hmp)) != 0)
880 return (error);
d113fda1 881
66325755
MD
882 /*
883 * Create a transaction to cover the operations we perform.
884 */
8cd0a023 885 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
886
887 /*
888 * Create a new filesystem object of the requested type. The
8cd0a023 889 * returned inode will be referenced but not locked.
66325755 890 */
8cd0a023 891 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 892 if (error) {
77062c8a 893 hkprintf("hammer_mkdir error %d\n", error);
b84de5af 894 hammer_done_transaction(&trans);
66325755
MD
895 *ap->a_vpp = NULL;
896 return (error);
897 }
66325755
MD
898 /*
899 * Add the new filesystem object to the directory. This will also
900 * bump the inode's link count.
901 */
a89aec1b 902 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
0b075555 903 if (error)
77062c8a 904 hkprintf("hammer_mkdir (add) error %d\n", error);
66325755
MD
905
906 /*
907 * Finish up.
908 */
909 if (error) {
a89aec1b 910 hammer_rel_inode(nip, 0);
66325755
MD
911 *ap->a_vpp = NULL;
912 } else {
e8599db1 913 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
914 hammer_rel_inode(nip, 0);
915 if (error == 0) {
916 cache_setunresolved(ap->a_nch);
917 cache_setvp(ap->a_nch, *ap->a_vpp);
918 }
66325755 919 }
b84de5af 920 hammer_done_transaction(&trans);
66325755 921 return (error);
427e5fc6
MD
922}
923
66325755
MD
924/*
925 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
926 *
927 * The operating system has already ensured that the directory entry
928 * does not exist and done all appropriate namespace locking.
929 */
427e5fc6
MD
930static
931int
66325755 932hammer_vop_nmknod(struct vop_nmknod_args *ap)
427e5fc6 933{
66325755
MD
934 struct hammer_transaction trans;
935 struct hammer_inode *dip;
936 struct hammer_inode *nip;
937 struct nchandle *nch;
938 int error;
939
940 nch = ap->a_nch;
941 dip = VTOI(ap->a_dvp);
942
d113fda1
MD
943 if (dip->flags & HAMMER_INODE_RO)
944 return (EROFS);
e63644f0
MD
945 if ((error = hammer_checkspace(dip->hmp)) != 0)
946 return (error);
d113fda1 947
66325755
MD
948 /*
949 * Create a transaction to cover the operations we perform.
950 */
8cd0a023 951 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
952
953 /*
954 * Create a new filesystem object of the requested type. The
8cd0a023 955 * returned inode will be referenced but not locked.
66325755 956 */
8cd0a023 957 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
66325755 958 if (error) {
b84de5af 959 hammer_done_transaction(&trans);
66325755
MD
960 *ap->a_vpp = NULL;
961 return (error);
962 }
66325755
MD
963
964 /*
965 * Add the new filesystem object to the directory. This will also
966 * bump the inode's link count.
967 */
a89aec1b 968 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
66325755
MD
969
970 /*
971 * Finish up.
972 */
973 if (error) {
a89aec1b 974 hammer_rel_inode(nip, 0);
66325755
MD
975 *ap->a_vpp = NULL;
976 } else {
e8599db1 977 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
978 hammer_rel_inode(nip, 0);
979 if (error == 0) {
980 cache_setunresolved(ap->a_nch);
981 cache_setvp(ap->a_nch, *ap->a_vpp);
982 }
66325755 983 }
b84de5af 984 hammer_done_transaction(&trans);
66325755 985 return (error);
427e5fc6
MD
986}
987
66325755
MD
988/*
989 * hammer_vop_open { vp, mode, cred, fp }
990 */
427e5fc6
MD
991static
992int
66325755 993hammer_vop_open(struct vop_open_args *ap)
427e5fc6 994{
d113fda1
MD
995 if ((ap->a_mode & FWRITE) && (VTOI(ap->a_vp)->flags & HAMMER_INODE_RO))
996 return (EROFS);
997
a89aec1b 998 return(vop_stdopen(ap));
427e5fc6
MD
999}
1000
66325755
MD
1001/*
1002 * hammer_vop_pathconf { vp, name, retval }
1003 */
427e5fc6
MD
1004static
1005int
66325755 1006hammer_vop_pathconf(struct vop_pathconf_args *ap)
427e5fc6
MD
1007{
1008 return EOPNOTSUPP;
1009}
1010
66325755
MD
1011/*
1012 * hammer_vop_print { vp }
1013 */
427e5fc6
MD
1014static
1015int
66325755 1016hammer_vop_print(struct vop_print_args *ap)
427e5fc6
MD
1017{
1018 return EOPNOTSUPP;
1019}
1020
66325755 1021/*
6b4f890b 1022 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
66325755 1023 */
427e5fc6
MD
1024static
1025int
66325755 1026hammer_vop_readdir(struct vop_readdir_args *ap)
427e5fc6 1027{
36f82b23 1028 struct hammer_transaction trans;
6b4f890b
MD
1029 struct hammer_cursor cursor;
1030 struct hammer_inode *ip;
1031 struct uio *uio;
6b4f890b
MD
1032 hammer_base_elm_t base;
1033 int error;
1034 int cookie_index;
1035 int ncookies;
1036 off_t *cookies;
1037 off_t saveoff;
1038 int r;
1039
1040 ip = VTOI(ap->a_vp);
1041 uio = ap->a_uio;
b3deaf57
MD
1042 saveoff = uio->uio_offset;
1043
1044 if (ap->a_ncookies) {
1045 ncookies = uio->uio_resid / 16 + 1;
1046 if (ncookies > 1024)
1047 ncookies = 1024;
1048 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1049 cookie_index = 0;
1050 } else {
1051 ncookies = -1;
1052 cookies = NULL;
1053 cookie_index = 0;
1054 }
1055
36f82b23
MD
1056 hammer_simple_transaction(&trans, ip->hmp);
1057
b3deaf57
MD
1058 /*
1059 * Handle artificial entries
1060 */
1061 error = 0;
1062 if (saveoff == 0) {
1063 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1064 if (r)
1065 goto done;
1066 if (cookies)
1067 cookies[cookie_index] = saveoff;
1068 ++saveoff;
1069 ++cookie_index;
1070 if (cookie_index == ncookies)
1071 goto done;
1072 }
1073 if (saveoff == 1) {
1074 if (ip->ino_data.parent_obj_id) {
1075 r = vop_write_dirent(&error, uio,
1076 ip->ino_data.parent_obj_id,
1077 DT_DIR, 2, "..");
1078 } else {
1079 r = vop_write_dirent(&error, uio,
1080 ip->obj_id, DT_DIR, 2, "..");
1081 }
1082 if (r)
1083 goto done;
1084 if (cookies)
1085 cookies[cookie_index] = saveoff;
1086 ++saveoff;
1087 ++cookie_index;
1088 if (cookie_index == ncookies)
1089 goto done;
1090 }
6b4f890b
MD
1091
1092 /*
1093 * Key range (begin and end inclusive) to scan. Directory keys
1094 * directly translate to a 64 bit 'seek' position.
1095 */
4e17f465 1096 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
2f85fa4d 1097 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
6b4f890b 1098 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1099 cursor.key_beg.create_tid = 0;
6b4f890b
MD
1100 cursor.key_beg.delete_tid = 0;
1101 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1102 cursor.key_beg.obj_type = 0;
b3deaf57 1103 cursor.key_beg.key = saveoff;
6b4f890b
MD
1104
1105 cursor.key_end = cursor.key_beg;
1106 cursor.key_end.key = HAMMER_MAX_KEY;
d5530d22
MD
1107 cursor.asof = ip->obj_asof;
1108 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
6b4f890b 1109
4e17f465 1110 error = hammer_ip_first(&cursor);
6b4f890b
MD
1111
1112 while (error == 0) {
11ad5ade 1113 error = hammer_ip_resolve_data(&cursor);
6b4f890b
MD
1114 if (error)
1115 break;
11ad5ade 1116 base = &cursor.leaf->base;
6b4f890b 1117 saveoff = base->key;
11ad5ade 1118 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
6b4f890b 1119
7a04d74f
MD
1120 if (base->obj_id != ip->obj_id)
1121 panic("readdir: bad record at %p", cursor.node);
1122
6b4f890b 1123 r = vop_write_dirent(
11ad5ade
MD
1124 &error, uio, cursor.data->entry.obj_id,
1125 hammer_get_dtype(cursor.leaf->base.obj_type),
1126 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1127 (void *)cursor.data->entry.name);
6b4f890b
MD
1128 if (r)
1129 break;
1130 ++saveoff;
1131 if (cookies)
1132 cookies[cookie_index] = base->key;
1133 ++cookie_index;
1134 if (cookie_index == ncookies)
1135 break;
1136 error = hammer_ip_next(&cursor);
1137 }
1138 hammer_done_cursor(&cursor);
1139
b3deaf57 1140done:
b84de5af 1141 hammer_done_transaction(&trans);
36f82b23 1142
6b4f890b
MD
1143 if (ap->a_eofflag)
1144 *ap->a_eofflag = (error == ENOENT);
6b4f890b
MD
1145 uio->uio_offset = saveoff;
1146 if (error && cookie_index == 0) {
b3deaf57
MD
1147 if (error == ENOENT)
1148 error = 0;
6b4f890b
MD
1149 if (cookies) {
1150 kfree(cookies, M_TEMP);
1151 *ap->a_ncookies = 0;
1152 *ap->a_cookies = NULL;
1153 }
1154 } else {
7a04d74f
MD
1155 if (error == ENOENT)
1156 error = 0;
6b4f890b
MD
1157 if (cookies) {
1158 *ap->a_ncookies = cookie_index;
1159 *ap->a_cookies = cookies;
1160 }
1161 }
1162 return(error);
427e5fc6
MD
1163}
1164
66325755
MD
1165/*
1166 * hammer_vop_readlink { vp, uio, cred }
1167 */
427e5fc6
MD
1168static
1169int
66325755 1170hammer_vop_readlink(struct vop_readlink_args *ap)
427e5fc6 1171{
36f82b23 1172 struct hammer_transaction trans;
7a04d74f
MD
1173 struct hammer_cursor cursor;
1174 struct hammer_inode *ip;
1175 int error;
1176
1177 ip = VTOI(ap->a_vp);
36f82b23 1178
2f85fa4d
MD
1179 /*
1180 * Shortcut if the symlink data was stuffed into ino_data.
1181 */
1182 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1183 error = uiomove(ip->ino_data.ext.symlink,
1184 ip->ino_data.size, ap->a_uio);
1185 return(error);
1186 }
36f82b23 1187
2f85fa4d
MD
1188 /*
1189 * Long version
1190 */
1191 hammer_simple_transaction(&trans, ip->hmp);
4e17f465 1192 hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
7a04d74f
MD
1193
1194 /*
1195 * Key range (begin and end inclusive) to scan. Directory keys
1196 * directly translate to a 64 bit 'seek' position.
1197 */
2f85fa4d 1198 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC; /* XXX */
7a04d74f 1199 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1200 cursor.key_beg.create_tid = 0;
7a04d74f
MD
1201 cursor.key_beg.delete_tid = 0;
1202 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1203 cursor.key_beg.obj_type = 0;
1204 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
d5530d22
MD
1205 cursor.asof = ip->obj_asof;
1206 cursor.flags |= HAMMER_CURSOR_ASOF;
7a04d74f 1207
45a014dc 1208 error = hammer_ip_lookup(&cursor);
7a04d74f
MD
1209 if (error == 0) {
1210 error = hammer_ip_resolve_data(&cursor);
1211 if (error == 0) {
11ad5ade
MD
1212 KKASSERT(cursor.leaf->data_len >=
1213 HAMMER_SYMLINK_NAME_OFF);
1214 error = uiomove(cursor.data->symlink.name,
1215 cursor.leaf->data_len -
1216 HAMMER_SYMLINK_NAME_OFF,
7a04d74f
MD
1217 ap->a_uio);
1218 }
1219 }
1220 hammer_done_cursor(&cursor);
b84de5af 1221 hammer_done_transaction(&trans);
7a04d74f 1222 return(error);
427e5fc6
MD
1223}
1224
66325755
MD
1225/*
1226 * hammer_vop_nremove { nch, dvp, cred }
1227 */
427e5fc6
MD
1228static
1229int
66325755 1230hammer_vop_nremove(struct vop_nremove_args *ap)
427e5fc6 1231{
b84de5af 1232 struct hammer_transaction trans;
e63644f0 1233 struct hammer_inode *dip;
b84de5af
MD
1234 int error;
1235
e63644f0
MD
1236 dip = VTOI(ap->a_dvp);
1237
1238 if (hammer_nohistory(dip) == 0 &&
1239 (error = hammer_checkspace(dip->hmp)) != 0) {
1240 return (error);
1241 }
1242
1243 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1244 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1245 hammer_done_transaction(&trans);
1246
1247 return (error);
427e5fc6
MD
1248}
1249
66325755
MD
1250/*
1251 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1252 */
427e5fc6
MD
1253static
1254int
66325755 1255hammer_vop_nrename(struct vop_nrename_args *ap)
427e5fc6 1256{
8cd0a023
MD
1257 struct hammer_transaction trans;
1258 struct namecache *fncp;
1259 struct namecache *tncp;
1260 struct hammer_inode *fdip;
1261 struct hammer_inode *tdip;
1262 struct hammer_inode *ip;
1263 struct hammer_cursor cursor;
8cd0a023 1264 int64_t namekey;
11ad5ade 1265 int nlen, error;
8cd0a023
MD
1266
1267 fdip = VTOI(ap->a_fdvp);
1268 tdip = VTOI(ap->a_tdvp);
1269 fncp = ap->a_fnch->ncp;
1270 tncp = ap->a_tnch->ncp;
b3deaf57
MD
1271 ip = VTOI(fncp->nc_vp);
1272 KKASSERT(ip != NULL);
d113fda1
MD
1273
1274 if (fdip->flags & HAMMER_INODE_RO)
1275 return (EROFS);
1276 if (tdip->flags & HAMMER_INODE_RO)
1277 return (EROFS);
1278 if (ip->flags & HAMMER_INODE_RO)
1279 return (EROFS);
e63644f0
MD
1280 if ((error = hammer_checkspace(fdip->hmp)) != 0)
1281 return (error);
d113fda1 1282
8cd0a023
MD
1283 hammer_start_transaction(&trans, fdip->hmp);
1284
1285 /*
b3deaf57
MD
1286 * Remove tncp from the target directory and then link ip as
1287 * tncp. XXX pass trans to dounlink
42c7d26b
MD
1288 *
1289 * Force the inode sync-time to match the transaction so it is
1290 * in-sync with the creation of the target directory entry.
8cd0a023 1291 */
b84de5af 1292 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
42c7d26b 1293 if (error == 0 || error == ENOENT) {
b3deaf57 1294 error = hammer_ip_add_directory(&trans, tdip, tncp, ip);
42c7d26b
MD
1295 if (error == 0) {
1296 ip->ino_data.parent_obj_id = tdip->obj_id;
47637bff 1297 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
42c7d26b
MD
1298 }
1299 }
b3deaf57
MD
1300 if (error)
1301 goto failed; /* XXX */
8cd0a023
MD
1302
1303 /*
1304 * Locate the record in the originating directory and remove it.
1305 *
1306 * Calculate the namekey and setup the key range for the scan. This
1307 * works kinda like a chained hash table where the lower 32 bits
1308 * of the namekey synthesize the chain.
1309 *
1310 * The key range is inclusive of both key_beg and key_end.
1311 */
1312 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
6a37e7e4 1313retry:
4e17f465 1314 hammer_init_cursor(&trans, &cursor, &fdip->cache[0], fdip);
2f85fa4d 1315 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
1316 cursor.key_beg.obj_id = fdip->obj_id;
1317 cursor.key_beg.key = namekey;
d5530d22 1318 cursor.key_beg.create_tid = 0;
8cd0a023
MD
1319 cursor.key_beg.delete_tid = 0;
1320 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1321 cursor.key_beg.obj_type = 0;
1322
1323 cursor.key_end = cursor.key_beg;
1324 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
1325 cursor.asof = fdip->obj_asof;
1326 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023
MD
1327
1328 /*
1329 * Scan all matching records (the chain), locate the one matching
a89aec1b 1330 * the requested path component.
8cd0a023
MD
1331 *
1332 * The hammer_ip_*() functions merge in-memory records with on-disk
1333 * records for the purposes of the search.
1334 */
4e17f465 1335 error = hammer_ip_first(&cursor);
a89aec1b 1336 while (error == 0) {
8cd0a023
MD
1337 if (hammer_ip_resolve_data(&cursor) != 0)
1338 break;
11ad5ade
MD
1339 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1340 KKASSERT(nlen > 0);
1341 if (fncp->nc_nlen == nlen &&
1342 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
8cd0a023
MD
1343 break;
1344 }
a89aec1b 1345 error = hammer_ip_next(&cursor);
8cd0a023 1346 }
8cd0a023
MD
1347
1348 /*
1349 * If all is ok we have to get the inode so we can adjust nlinks.
6a37e7e4
MD
1350 *
1351 * WARNING: hammer_ip_del_directory() may have to terminate the
1352 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1353 * twice.
8cd0a023 1354 */
9944ae54 1355 if (error == 0)
6a37e7e4 1356 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
b84de5af
MD
1357
1358 /*
1359 * XXX A deadlock here will break rename's atomicy for the purposes
1360 * of crash recovery.
1361 */
1362 if (error == EDEADLK) {
b84de5af 1363 hammer_done_cursor(&cursor);
b84de5af
MD
1364 goto retry;
1365 }
1366
1367 /*
1368 * Cleanup and tell the kernel that the rename succeeded.
1369 */
c0ade690 1370 hammer_done_cursor(&cursor);
6a37e7e4
MD
1371 if (error == 0)
1372 cache_rename(ap->a_fnch, ap->a_tnch);
b84de5af 1373
b3deaf57 1374failed:
b84de5af 1375 hammer_done_transaction(&trans);
8cd0a023 1376 return (error);
427e5fc6
MD
1377}
1378
66325755
MD
1379/*
1380 * hammer_vop_nrmdir { nch, dvp, cred }
1381 */
427e5fc6
MD
1382static
1383int
66325755 1384hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
427e5fc6 1385{
b84de5af 1386 struct hammer_transaction trans;
e63644f0 1387 struct hammer_inode *dip;
b84de5af
MD
1388 int error;
1389
e63644f0
MD
1390 dip = VTOI(ap->a_dvp);
1391
1392 if (hammer_nohistory(dip) == 0 &&
1393 (error = hammer_checkspace(dip->hmp)) != 0) {
1394 return (error);
1395 }
1396
1397 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1398 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1399 hammer_done_transaction(&trans);
1400
1401 return (error);
427e5fc6
MD
1402}
1403
66325755
MD
1404/*
1405 * hammer_vop_setattr { vp, vap, cred }
1406 */
427e5fc6
MD
1407static
1408int
66325755 1409hammer_vop_setattr(struct vop_setattr_args *ap)
427e5fc6 1410{
8cd0a023
MD
1411 struct hammer_transaction trans;
1412 struct vattr *vap;
1413 struct hammer_inode *ip;
1414 int modflags;
1415 int error;
d5ef456e 1416 int truncating;
b84de5af 1417 off_t aligned_size;
8cd0a023 1418 u_int32_t flags;
8cd0a023
MD
1419
1420 vap = ap->a_vap;
1421 ip = ap->a_vp->v_data;
1422 modflags = 0;
1423
1424 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1425 return(EROFS);
d113fda1
MD
1426 if (ip->flags & HAMMER_INODE_RO)
1427 return (EROFS);
e63644f0
MD
1428 if (hammer_nohistory(ip) == 0 &&
1429 (error = hammer_checkspace(ip->hmp)) != 0) {
1430 return (error);
1431 }
8cd0a023
MD
1432
1433 hammer_start_transaction(&trans, ip->hmp);
1434 error = 0;
1435
1436 if (vap->va_flags != VNOVAL) {
1437 flags = ip->ino_data.uflags;
1438 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1439 hammer_to_unix_xid(&ip->ino_data.uid),
1440 ap->a_cred);
1441 if (error == 0) {
1442 if (ip->ino_data.uflags != flags) {
1443 ip->ino_data.uflags = flags;
1444 modflags |= HAMMER_INODE_DDIRTY;
1445 }
1446 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1447 error = 0;
1448 goto done;
1449 }
1450 }
1451 goto done;
1452 }
1453 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1454 error = EPERM;
1455 goto done;
1456 }
7538695e
MD
1457 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1458 mode_t cur_mode = ip->ino_data.mode;
1459 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1460 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1461 uuid_t uuid_uid;
1462 uuid_t uuid_gid;
1463
1464 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1465 ap->a_cred,
1466 &cur_uid, &cur_gid, &cur_mode);
1467 if (error == 0) {
1468 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1469 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1470 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1471 sizeof(uuid_uid)) ||
1472 bcmp(&uuid_gid, &ip->ino_data.gid,
1473 sizeof(uuid_gid)) ||
1474 ip->ino_data.mode != cur_mode
1475 ) {
1476 ip->ino_data.uid = uuid_uid;
1477 ip->ino_data.gid = uuid_gid;
1478 ip->ino_data.mode = cur_mode;
1479 }
8cd0a023
MD
1480 modflags |= HAMMER_INODE_DDIRTY;
1481 }
1482 }
11ad5ade 1483 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
8cd0a023
MD
1484 switch(ap->a_vp->v_type) {
1485 case VREG:
11ad5ade 1486 if (vap->va_size == ip->ino_data.size)
d5ef456e 1487 break;
b84de5af
MD
1488 /*
1489 * XXX break atomicy, we can deadlock the backend
1490 * if we do not release the lock. Probably not a
1491 * big deal here.
1492 */
11ad5ade 1493 if (vap->va_size < ip->ino_data.size) {
c0ade690
MD
1494 vtruncbuf(ap->a_vp, vap->va_size,
1495 HAMMER_BUFSIZE);
d5ef456e
MD
1496 truncating = 1;
1497 } else {
c0ade690 1498 vnode_pager_setsize(ap->a_vp, vap->va_size);
d5ef456e 1499 truncating = 0;
c0ade690 1500 }
11ad5ade
MD
1501 ip->ino_data.size = vap->va_size;
1502 modflags |= HAMMER_INODE_DDIRTY;
76376933 1503 aligned_size = (vap->va_size + HAMMER_BUFMASK) &
b84de5af 1504 ~HAMMER_BUFMASK64;
d5ef456e 1505
b84de5af
MD
1506 /*
1507 * on-media truncation is cached in the inode until
1508 * the inode is synchronized.
1509 */
d5ef456e 1510 if (truncating) {
47637bff 1511 hammer_ip_frontend_trunc(ip, vap->va_size);
b84de5af
MD
1512 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1513 ip->flags |= HAMMER_INODE_TRUNCATED;
1514 ip->trunc_off = vap->va_size;
1515 } else if (ip->trunc_off > vap->va_size) {
1516 ip->trunc_off = vap->va_size;
1517 }
d5ef456e 1518 }
b84de5af 1519
d5ef456e
MD
1520 /*
1521 * If truncating we have to clean out a portion of
b84de5af
MD
1522 * the last block on-disk. We do this in the
1523 * front-end buffer cache.
d5ef456e 1524 */
b84de5af 1525 if (truncating && vap->va_size < aligned_size) {
d5ef456e
MD
1526 struct buf *bp;
1527 int offset;
1528
47637bff
MD
1529 aligned_size -= HAMMER_BUFSIZE;
1530
d5ef456e 1531 offset = vap->va_size & HAMMER_BUFMASK;
47637bff 1532 error = bread(ap->a_vp, aligned_size,
d5ef456e 1533 HAMMER_BUFSIZE, &bp);
47637bff 1534 hammer_ip_frontend_trunc(ip, aligned_size);
d5ef456e
MD
1535 if (error == 0) {
1536 bzero(bp->b_data + offset,
1537 HAMMER_BUFSIZE - offset);
1538 bdwrite(bp);
1539 } else {
47637bff 1540 kprintf("ERROR %d\n", error);
d5ef456e
MD
1541 brelse(bp);
1542 }
1543 }
76376933 1544 break;
8cd0a023 1545 case VDATABASE:
b84de5af
MD
1546 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1547 ip->flags |= HAMMER_INODE_TRUNCATED;
1548 ip->trunc_off = vap->va_size;
1549 } else if (ip->trunc_off > vap->va_size) {
1550 ip->trunc_off = vap->va_size;
1551 }
47637bff 1552 hammer_ip_frontend_trunc(ip, vap->va_size);
11ad5ade
MD
1553 ip->ino_data.size = vap->va_size;
1554 modflags |= HAMMER_INODE_DDIRTY;
8cd0a023
MD
1555 break;
1556 default:
1557 error = EINVAL;
1558 goto done;
1559 }
d26d0ae9 1560 break;
8cd0a023
MD
1561 }
1562 if (vap->va_atime.tv_sec != VNOVAL) {
11ad5ade 1563 ip->ino_leaf.atime =
8cd0a023
MD
1564 hammer_timespec_to_transid(&vap->va_atime);
1565 modflags |= HAMMER_INODE_ITIMES;
1566 }
1567 if (vap->va_mtime.tv_sec != VNOVAL) {
11ad5ade 1568 ip->ino_data.mtime =
8cd0a023
MD
1569 hammer_timespec_to_transid(&vap->va_mtime);
1570 modflags |= HAMMER_INODE_ITIMES;
98f7132d 1571 modflags |= HAMMER_INODE_DDIRTY; /* XXX mtime */
8cd0a023
MD
1572 }
1573 if (vap->va_mode != (mode_t)VNOVAL) {
7538695e
MD
1574 mode_t cur_mode = ip->ino_data.mode;
1575 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1576 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1577
1578 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1579 cur_uid, cur_gid, &cur_mode);
1580 if (error == 0 && ip->ino_data.mode != cur_mode) {
1581 ip->ino_data.mode = cur_mode;
8cd0a023
MD
1582 modflags |= HAMMER_INODE_DDIRTY;
1583 }
1584 }
1585done:
b84de5af 1586 if (error == 0)
47637bff 1587 hammer_modify_inode(ip, modflags);
b84de5af 1588 hammer_done_transaction(&trans);
8cd0a023 1589 return (error);
427e5fc6
MD
1590}
1591
66325755
MD
1592/*
1593 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1594 */
427e5fc6
MD
1595static
1596int
66325755 1597hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
427e5fc6 1598{
7a04d74f
MD
1599 struct hammer_transaction trans;
1600 struct hammer_inode *dip;
1601 struct hammer_inode *nip;
1602 struct nchandle *nch;
1603 hammer_record_t record;
1604 int error;
1605 int bytes;
1606
1607 ap->a_vap->va_type = VLNK;
1608
1609 nch = ap->a_nch;
1610 dip = VTOI(ap->a_dvp);
1611
d113fda1
MD
1612 if (dip->flags & HAMMER_INODE_RO)
1613 return (EROFS);
e63644f0
MD
1614 if ((error = hammer_checkspace(dip->hmp)) != 0)
1615 return (error);
d113fda1 1616
7a04d74f
MD
1617 /*
1618 * Create a transaction to cover the operations we perform.
1619 */
1620 hammer_start_transaction(&trans, dip->hmp);
1621
1622 /*
1623 * Create a new filesystem object of the requested type. The
1624 * returned inode will be referenced but not locked.
1625 */
1626
1627 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred, dip, &nip);
1628 if (error) {
b84de5af 1629 hammer_done_transaction(&trans);
7a04d74f
MD
1630 *ap->a_vpp = NULL;
1631 return (error);
1632 }
1633
7a04d74f
MD
1634 /*
1635 * Add a record representing the symlink. symlink stores the link
1636 * as pure data, not a string, and is no \0 terminated.
1637 */
1638 if (error == 0) {
7a04d74f
MD
1639 bytes = strlen(ap->a_target);
1640
2f85fa4d
MD
1641 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1642 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1643 } else {
1644 record = hammer_alloc_mem_record(nip, bytes);
1645 record->type = HAMMER_MEM_RECORD_GENERAL;
1646
1647 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
1648 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1649 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1650 record->leaf.data_len = bytes;
1651 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1652 bcopy(ap->a_target, record->data->symlink.name, bytes);
1653 error = hammer_ip_add_record(&trans, record);
1654 }
42c7d26b
MD
1655
1656 /*
1657 * Set the file size to the length of the link.
1658 */
1659 if (error == 0) {
11ad5ade 1660 nip->ino_data.size = bytes;
47637bff 1661 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
42c7d26b 1662 }
7a04d74f 1663 }
1f07f686
MD
1664 if (error == 0)
1665 error = hammer_ip_add_directory(&trans, dip, nch->ncp, nip);
7a04d74f
MD
1666
1667 /*
1668 * Finish up.
1669 */
1670 if (error) {
1671 hammer_rel_inode(nip, 0);
7a04d74f
MD
1672 *ap->a_vpp = NULL;
1673 } else {
e8599db1 1674 error = hammer_get_vnode(nip, ap->a_vpp);
7a04d74f
MD
1675 hammer_rel_inode(nip, 0);
1676 if (error == 0) {
1677 cache_setunresolved(ap->a_nch);
1678 cache_setvp(ap->a_nch, *ap->a_vpp);
1679 }
1680 }
b84de5af 1681 hammer_done_transaction(&trans);
7a04d74f 1682 return (error);
427e5fc6
MD
1683}
1684
66325755
MD
1685/*
1686 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1687 */
427e5fc6
MD
1688static
1689int
66325755 1690hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
427e5fc6 1691{
b84de5af 1692 struct hammer_transaction trans;
e63644f0 1693 struct hammer_inode *dip;
b84de5af
MD
1694 int error;
1695
e63644f0
MD
1696 dip = VTOI(ap->a_dvp);
1697
1698 if (hammer_nohistory(dip) == 0 &&
1699 (error = hammer_checkspace(dip->hmp)) != 0) {
1700 return (error);
1701 }
1702
1703 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1704 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1705 ap->a_cred, ap->a_flags);
1706 hammer_done_transaction(&trans);
1707
1708 return (error);
427e5fc6
MD
1709}
1710
7dc57964
MD
1711/*
1712 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1713 */
1714static
1715int
1716hammer_vop_ioctl(struct vop_ioctl_args *ap)
1717{
1718 struct hammer_inode *ip = ap->a_vp->v_data;
1719
1720 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1721 ap->a_fflag, ap->a_cred));
1722}
1723
513ca7d7
MD
1724static
1725int
1726hammer_vop_mountctl(struct vop_mountctl_args *ap)
1727{
1728 struct mount *mp;
1729 int error;
1730
1731 mp = ap->a_head.a_ops->head.vv_mount;
1732
1733 switch(ap->a_op) {
1734 case MOUNTCTL_SET_EXPORT:
1735 if (ap->a_ctllen != sizeof(struct export_args))
1736 error = EINVAL;
1737 error = hammer_vfs_export(mp, ap->a_op,
1738 (const struct export_args *)ap->a_ctl);
1739 break;
1740 default:
1741 error = journal_mountctl(ap);
1742 break;
1743 }
1744 return(error);
1745}
1746
66325755
MD
1747/*
1748 * hammer_vop_strategy { vp, bio }
8cd0a023
MD
1749 *
1750 * Strategy call, used for regular file read & write only. Note that the
1751 * bp may represent a cluster.
1752 *
1753 * To simplify operation and allow better optimizations in the future,
1754 * this code does not make any assumptions with regards to buffer alignment
1755 * or size.
66325755 1756 */
427e5fc6
MD
1757static
1758int
66325755 1759hammer_vop_strategy(struct vop_strategy_args *ap)
427e5fc6 1760{
8cd0a023
MD
1761 struct buf *bp;
1762 int error;
1763
1764 bp = ap->a_bio->bio_buf;
1765
1766 switch(bp->b_cmd) {
1767 case BUF_CMD_READ:
1768 error = hammer_vop_strategy_read(ap);
1769 break;
1770 case BUF_CMD_WRITE:
1771 error = hammer_vop_strategy_write(ap);
1772 break;
1773 default:
059819e3
MD
1774 bp->b_error = error = EINVAL;
1775 bp->b_flags |= B_ERROR;
1776 biodone(ap->a_bio);
8cd0a023
MD
1777 break;
1778 }
8cd0a023 1779 return (error);
427e5fc6
MD
1780}
1781
8cd0a023
MD
1782/*
1783 * Read from a regular file. Iterate the related records and fill in the
1784 * BIO/BUF. Gaps are zero-filled.
1785 *
1786 * The support code in hammer_object.c should be used to deal with mixed
1787 * in-memory and on-disk records.
1788 *
1789 * XXX atime update
1790 */
1791static
1792int
1793hammer_vop_strategy_read(struct vop_strategy_args *ap)
1794{
36f82b23
MD
1795 struct hammer_transaction trans;
1796 struct hammer_inode *ip;
8cd0a023 1797 struct hammer_cursor cursor;
8cd0a023
MD
1798 hammer_base_elm_t base;
1799 struct bio *bio;
1800 struct buf *bp;
1801 int64_t rec_offset;
a89aec1b 1802 int64_t ran_end;
195c19a1 1803 int64_t tmp64;
8cd0a023
MD
1804 int error;
1805 int boff;
1806 int roff;
1807 int n;
1808
1809 bio = ap->a_bio;
1810 bp = bio->bio_buf;
36f82b23 1811 ip = ap->a_vp->v_data;
8cd0a023 1812
36f82b23 1813 hammer_simple_transaction(&trans, ip->hmp);
47637bff 1814 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
8cd0a023
MD
1815
1816 /*
1817 * Key range (begin and end inclusive) to scan. Note that the key's
c0ade690
MD
1818 * stored in the actual records represent BASE+LEN, not BASE. The
1819 * first record containing bio_offset will have a key > bio_offset.
8cd0a023 1820 */
2f85fa4d 1821 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023 1822 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1823 cursor.key_beg.create_tid = 0;
8cd0a023 1824 cursor.key_beg.delete_tid = 0;
8cd0a023 1825 cursor.key_beg.obj_type = 0;
c0ade690 1826 cursor.key_beg.key = bio->bio_offset + 1;
d5530d22 1827 cursor.asof = ip->obj_asof;
47197d71 1828 cursor.flags |= HAMMER_CURSOR_ASOF | HAMMER_CURSOR_DATAEXTOK;
8cd0a023
MD
1829
1830 cursor.key_end = cursor.key_beg;
11ad5ade 1831 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
b84de5af 1832#if 0
11ad5ade 1833 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
a89aec1b
MD
1834 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
1835 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
1836 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
b84de5af
MD
1837 } else
1838#endif
1839 {
c0ade690 1840 ran_end = bio->bio_offset + bp->b_bufsize;
a89aec1b
MD
1841 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
1842 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
195c19a1
MD
1843 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
1844 if (tmp64 < ran_end)
a89aec1b
MD
1845 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1846 else
7f7c1f84 1847 cursor.key_end.key = ran_end + MAXPHYS + 1;
a89aec1b 1848 }
d26d0ae9 1849 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
8cd0a023 1850
4e17f465 1851 error = hammer_ip_first(&cursor);
8cd0a023
MD
1852 boff = 0;
1853
a89aec1b 1854 while (error == 0) {
47637bff
MD
1855 /*
1856 * Get the base file offset of the record. The key for
1857 * data records is (base + bytes) rather then (base).
1858 */
11ad5ade 1859 base = &cursor.leaf->base;
11ad5ade 1860 rec_offset = base->key - cursor.leaf->data_len;
8cd0a023 1861
66325755 1862 /*
a89aec1b 1863 * Calculate the gap, if any, and zero-fill it.
1fef775e
MD
1864 *
1865 * n is the offset of the start of the record verses our
1866 * current seek offset in the bio.
66325755 1867 */
8cd0a023
MD
1868 n = (int)(rec_offset - (bio->bio_offset + boff));
1869 if (n > 0) {
a89aec1b
MD
1870 if (n > bp->b_bufsize - boff)
1871 n = bp->b_bufsize - boff;
8cd0a023
MD
1872 bzero((char *)bp->b_data + boff, n);
1873 boff += n;
1874 n = 0;
66325755 1875 }
8cd0a023
MD
1876
1877 /*
1878 * Calculate the data offset in the record and the number
1879 * of bytes we can copy.
a89aec1b 1880 *
1fef775e
MD
1881 * There are two degenerate cases. First, boff may already
1882 * be at bp->b_bufsize. Secondly, the data offset within
1883 * the record may exceed the record's size.
8cd0a023
MD
1884 */
1885 roff = -n;
b84de5af 1886 rec_offset += roff;
11ad5ade 1887 n = cursor.leaf->data_len - roff;
1fef775e
MD
1888 if (n <= 0) {
1889 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
1890 n = 0;
1891 } else if (n > bp->b_bufsize - boff) {
8cd0a023 1892 n = bp->b_bufsize - boff;
1fef775e 1893 }
059819e3 1894
b84de5af 1895 /*
47637bff
MD
1896 * Deal with cached truncations. This cool bit of code
1897 * allows truncate()/ftruncate() to avoid having to sync
1898 * the file.
1899 *
1900 * If the frontend is truncated then all backend records are
1901 * subject to the frontend's truncation.
1902 *
1903 * If the backend is truncated then backend records on-disk
1904 * (but not in-memory) are subject to the backend's
1905 * truncation. In-memory records owned by the backend
1906 * represent data written after the truncation point on the
1907 * backend and must not be truncated.
1908 *
1909 * Truncate operations deal with frontend buffer cache
1910 * buffers and frontend-owned in-memory records synchronously.
b84de5af 1911 */
47637bff
MD
1912 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1913 if (hammer_cursor_ondisk(&cursor) ||
1914 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
1915 if (ip->trunc_off <= rec_offset)
1916 n = 0;
1917 else if (ip->trunc_off < rec_offset + n)
1918 n = (int)(ip->trunc_off - rec_offset);
1919 }
1920 }
1921 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1922 if (hammer_cursor_ondisk(&cursor)) {
1923 if (ip->sync_trunc_off <= rec_offset)
1924 n = 0;
1925 else if (ip->sync_trunc_off < rec_offset + n)
1926 n = (int)(ip->sync_trunc_off - rec_offset);
1927 }
1928 }
b84de5af
MD
1929
1930 /*
47637bff
MD
1931 * Try to issue a direct read into our bio if possible,
1932 * otherwise resolve the element data into a hammer_buffer
1933 * and copy.
1934 *
1935 * WARNING: If we hit the else clause.
b84de5af 1936 */
47637bff
MD
1937 if (roff == 0 && n == bp->b_bufsize &&
1938 (rec_offset & HAMMER_BUFMASK) == 0) {
1939 error = hammer_io_direct_read(trans.hmp, cursor.leaf,
1940 bio);
1941 goto done;
1942 } else if (n) {
1943 error = hammer_ip_resolve_data(&cursor);
1944 if (error == 0) {
1945 bcopy((char *)cursor.data + roff,
1946 (char *)bp->b_data + boff, n);
1947 }
b84de5af 1948 }
47637bff
MD
1949 if (error)
1950 break;
1951
1952 /*
1953 * Iterate until we have filled the request.
1954 */
1955 boff += n;
8cd0a023 1956 if (boff == bp->b_bufsize)
66325755 1957 break;
a89aec1b 1958 error = hammer_ip_next(&cursor);
66325755
MD
1959 }
1960
1961 /*
8cd0a023 1962 * There may have been a gap after the last record
66325755 1963 */
8cd0a023
MD
1964 if (error == ENOENT)
1965 error = 0;
1966 if (error == 0 && boff != bp->b_bufsize) {
7f7c1f84 1967 KKASSERT(boff < bp->b_bufsize);
8cd0a023
MD
1968 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
1969 /* boff = bp->b_bufsize; */
1970 }
1971 bp->b_resid = 0;
059819e3
MD
1972 bp->b_error = error;
1973 if (error)
1974 bp->b_flags |= B_ERROR;
1975 biodone(ap->a_bio);
47637bff
MD
1976
1977done:
1978 if (cursor.node)
1979 hammer_cache_node(cursor.node, &ip->cache[1]);
1980 hammer_done_cursor(&cursor);
1981 hammer_done_transaction(&trans);
8cd0a023
MD
1982 return(error);
1983}
1984
1985/*
059819e3
MD
1986 * Write to a regular file. Because this is a strategy call the OS is
1987 * trying to actually sync data to the media. HAMMER can only flush
1988 * the entire inode (so the TID remains properly synchronized).
8cd0a023 1989 *
059819e3
MD
1990 * Basically all we do here is place the bio on the inode's flush queue
1991 * and activate the flusher.
8cd0a023
MD
1992 */
1993static
1994int
1995hammer_vop_strategy_write(struct vop_strategy_args *ap)
1996{
47637bff 1997 hammer_record_t record;
8cd0a023
MD
1998 hammer_inode_t ip;
1999 struct bio *bio;
2000 struct buf *bp;
47637bff 2001 int force_alt = 0;
8cd0a023
MD
2002
2003 bio = ap->a_bio;
2004 bp = bio->bio_buf;
2005 ip = ap->a_vp->v_data;
d113fda1 2006
059819e3
MD
2007 if (ip->flags & HAMMER_INODE_RO) {
2008 bp->b_error = EROFS;
2009 bp->b_flags |= B_ERROR;
2010 biodone(ap->a_bio);
e63644f0 2011 hammer_cleanup_write_io(ip);
059819e3
MD
2012 return(EROFS);
2013 }
b84de5af 2014
29ce0677
MD
2015 /*
2016 * Interlock with inode destruction (no in-kernel or directory
2017 * topology visibility). If we queue new IO while trying to
2018 * destroy the inode we can deadlock the vtrunc call in
2019 * hammer_inode_unloadable_check().
2020 */
2021 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2022 bp->b_resid = 0;
2023 biodone(ap->a_bio);
e63644f0 2024 hammer_cleanup_write_io(ip);
29ce0677
MD
2025 return(0);
2026 }
2027
b84de5af 2028 /*
47637bff
MD
2029 * Attempt to reserve space and issue a direct-write from the
2030 * front-end. If we can't we will queue the BIO to the flusher.
2031 *
2032 * If we can the I/O can be issued and an in-memory record will
2033 * be installed to reference the stroage until the flusher can get to
2034 * it.
2035 *
2036 * Since we own the high level bio the front-end will not try to
2037 * do a read until the write completes.
2038 */
2039 if ((bp->b_bufsize & HAMMER_BUFMASK) == 0 &&
2040 bio->bio_offset + bp->b_bufsize <= ip->ino_data.size) {
2041 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2042 bp->b_bufsize, &force_alt);
2043 if (record) {
2044 hammer_io_direct_write(ip->hmp, &record->leaf, bio);
2045 hammer_rel_mem_record(record);
2046 if (ip->rsv_recs > hammer_limit_irecs / 2)
2047 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2048 else
2049 hammer_flush_inode(ip, 0);
2050 return(0);
2051 }
2052 }
2053
2054 /*
2055 * Queue the bio to the flusher and let it deal with it.
2056 *
b84de5af
MD
2057 * If the inode is being flushed we cannot re-queue buffers
2058 * it may have already flushed, or it could result in duplicate
2059 * records in the database.
2060 */
059819e3 2061 BUF_KERNPROC(bp);
47637bff 2062 if (ip->flush_state == HAMMER_FST_FLUSH || force_alt)
b84de5af
MD
2063 TAILQ_INSERT_TAIL(&ip->bio_alt_list, bio, bio_act);
2064 else
2065 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1f07f686 2066 ++hammer_bio_count;
47637bff 2067 hammer_modify_inode(ip, HAMMER_INODE_BUFS);
4e17f465 2068 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
47637bff 2069
059819e3
MD
2070 return(0);
2071}
2072
2073/*
47637bff
MD
2074 * Clean-up after disposing of a dirty frontend buffer's data.
2075 * This is somewhat heuristical so try to be robust.
059819e3 2076 */
47637bff 2077void
e63644f0
MD
2078hammer_cleanup_write_io(hammer_inode_t ip)
2079{
2080 if (ip->rsv_databufs) {
2081 --ip->rsv_databufs;
2082 --ip->hmp->rsv_databufs;
2083 }
2084}
2085
8cd0a023
MD
2086/*
2087 * dounlink - disconnect a directory entry
2088 *
2089 * XXX whiteout support not really in yet
2090 */
2091static int
b84de5af
MD
2092hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2093 struct vnode *dvp, struct ucred *cred, int flags)
8cd0a023 2094{
8cd0a023
MD
2095 struct namecache *ncp;
2096 hammer_inode_t dip;
2097 hammer_inode_t ip;
8cd0a023 2098 struct hammer_cursor cursor;
8cd0a023 2099 int64_t namekey;
11ad5ade 2100 int nlen, error;
8cd0a023
MD
2101
2102 /*
2103 * Calculate the namekey and setup the key range for the scan. This
2104 * works kinda like a chained hash table where the lower 32 bits
2105 * of the namekey synthesize the chain.
2106 *
2107 * The key range is inclusive of both key_beg and key_end.
2108 */
2109 dip = VTOI(dvp);
2110 ncp = nch->ncp;
d113fda1
MD
2111
2112 if (dip->flags & HAMMER_INODE_RO)
2113 return (EROFS);
2114
6a37e7e4
MD
2115 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2116retry:
4e17f465 2117 hammer_init_cursor(trans, &cursor, &dip->cache[0], dip);
2f85fa4d 2118 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
8cd0a023
MD
2119 cursor.key_beg.obj_id = dip->obj_id;
2120 cursor.key_beg.key = namekey;
d5530d22 2121 cursor.key_beg.create_tid = 0;
8cd0a023
MD
2122 cursor.key_beg.delete_tid = 0;
2123 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2124 cursor.key_beg.obj_type = 0;
2125
2126 cursor.key_end = cursor.key_beg;
2127 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
2128 cursor.asof = dip->obj_asof;
2129 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023 2130
8cd0a023
MD
2131 /*
2132 * Scan all matching records (the chain), locate the one matching
2133 * the requested path component. info->last_error contains the
2134 * error code on search termination and could be 0, ENOENT, or
2135 * something else.
2136 *
2137 * The hammer_ip_*() functions merge in-memory records with on-disk
2138 * records for the purposes of the search.
2139 */
4e17f465
MD
2140 error = hammer_ip_first(&cursor);
2141
a89aec1b
MD
2142 while (error == 0) {
2143 error = hammer_ip_resolve_data(&cursor);
2144 if (error)
66325755 2145 break;
11ad5ade
MD
2146 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2147 KKASSERT(nlen > 0);
2148 if (ncp->nc_nlen == nlen &&
2149 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
66325755
MD
2150 break;
2151 }
a89aec1b 2152 error = hammer_ip_next(&cursor);
66325755 2153 }
8cd0a023
MD
2154
2155 /*
2156 * If all is ok we have to get the inode so we can adjust nlinks.
b3deaf57
MD
2157 *
2158 * If the target is a directory, it must be empty.
8cd0a023 2159 */
66325755 2160 if (error == 0) {
b84de5af 2161 ip = hammer_get_inode(trans, &dip->cache[1],
11ad5ade 2162 cursor.data->entry.obj_id,
d113fda1 2163 dip->hmp->asof, 0, &error);
46fe7ae1 2164 if (error == ENOENT) {
11ad5ade 2165 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
10a5d1ba 2166 Debugger("ENOENT unlinking object that should exist");
46fe7ae1 2167 }
1f07f686
MD
2168
2169 /*
2170 * If we are trying to remove a directory the directory must
2171 * be empty.
2172 *
2173 * WARNING: hammer_ip_check_directory_empty() may have to
2174 * terminate the cursor to avoid a deadlock. It is ok to
2175 * call hammer_done_cursor() twice.
2176 */
11ad5ade 2177 if (error == 0 && ip->ino_data.obj_type ==
b3deaf57 2178 HAMMER_OBJTYPE_DIRECTORY) {
98f7132d 2179 error = hammer_ip_check_directory_empty(trans, ip);
b3deaf57 2180 }
1f07f686 2181
6a37e7e4 2182 /*
1f07f686
MD
2183 * Delete the directory entry.
2184 *
6a37e7e4 2185 * WARNING: hammer_ip_del_directory() may have to terminate
1f07f686 2186 * the cursor to avoid a deadlock. It is ok to call
6a37e7e4
MD
2187 * hammer_done_cursor() twice.
2188 */
b84de5af 2189 if (error == 0) {
b84de5af
MD
2190 error = hammer_ip_del_directory(trans, &cursor,
2191 dip, ip);
b84de5af 2192 }
8cd0a023
MD
2193 if (error == 0) {
2194 cache_setunresolved(nch);
2195 cache_setvp(nch, NULL);
2196 /* XXX locking */
2197 if (ip->vp)
2198 cache_inval_vp(ip->vp, CINV_DESTROY);
2199 }
a89aec1b 2200 hammer_rel_inode(ip, 0);
66325755 2201 }
6a37e7e4
MD
2202 hammer_done_cursor(&cursor);
2203 if (error == EDEADLK)
2204 goto retry;
9c448776 2205
66325755 2206 return (error);
66325755
MD
2207}
2208
7a04d74f
MD
2209/************************************************************************
2210 * FIFO AND SPECFS OPS *
2211 ************************************************************************
2212 *
2213 */
2214
2215static int
2216hammer_vop_fifoclose (struct vop_close_args *ap)
2217{
2218 /* XXX update itimes */
2219 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2220}
2221
2222static int
2223hammer_vop_fiforead (struct vop_read_args *ap)
2224{
2225 int error;
2226
2227 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2228 /* XXX update access time */
2229 return (error);
2230}
2231
2232static int
2233hammer_vop_fifowrite (struct vop_write_args *ap)
2234{
2235 int error;
2236
2237 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2238 /* XXX update access time */
2239 return (error);
2240}
2241
2242static int
2243hammer_vop_specclose (struct vop_close_args *ap)
2244{
2245 /* XXX update itimes */
2246 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2247}
2248
2249static int
2250hammer_vop_specread (struct vop_read_args *ap)
2251{
2252 /* XXX update access time */
2253 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2254}
2255
2256static int
2257hammer_vop_specwrite (struct vop_write_args *ap)
2258{
2259 /* XXX update last change time */
2260 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2261}
2262