HAMMER VFS - Handle critical I/O errors without panicing
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
fbb84158 34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.102 2008/10/16 17:24:16 dillon Exp $
427e5fc6
MD
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/fcntl.h>
41#include <sys/namecache.h>
42#include <sys/vnode.h>
43#include <sys/lockf.h>
44#include <sys/event.h>
45#include <sys/stat.h>
b3deaf57 46#include <sys/dirent.h>
fbb84158 47#include <sys/file.h>
c0ade690 48#include <vm/vm_extern.h>
7a04d74f 49#include <vfs/fifofs/fifo.h>
427e5fc6
MD
50#include "hammer.h"
51
52/*
53 * USERFS VNOPS
54 */
55/*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
66325755
MD
56static int hammer_vop_fsync(struct vop_fsync_args *);
57static int hammer_vop_read(struct vop_read_args *);
58static int hammer_vop_write(struct vop_write_args *);
59static int hammer_vop_access(struct vop_access_args *);
60static int hammer_vop_advlock(struct vop_advlock_args *);
61static int hammer_vop_close(struct vop_close_args *);
62static int hammer_vop_ncreate(struct vop_ncreate_args *);
63static int hammer_vop_getattr(struct vop_getattr_args *);
64static int hammer_vop_nresolve(struct vop_nresolve_args *);
65static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
66static int hammer_vop_nlink(struct vop_nlink_args *);
67static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
68static int hammer_vop_nmknod(struct vop_nmknod_args *);
69static int hammer_vop_open(struct vop_open_args *);
66325755
MD
70static int hammer_vop_print(struct vop_print_args *);
71static int hammer_vop_readdir(struct vop_readdir_args *);
72static int hammer_vop_readlink(struct vop_readlink_args *);
73static int hammer_vop_nremove(struct vop_nremove_args *);
74static int hammer_vop_nrename(struct vop_nrename_args *);
75static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
349433c9 76static int hammer_vop_markatime(struct vop_markatime_args *);
66325755
MD
77static int hammer_vop_setattr(struct vop_setattr_args *);
78static int hammer_vop_strategy(struct vop_strategy_args *);
a99b9ea2 79static int hammer_vop_bmap(struct vop_bmap_args *ap);
66325755
MD
80static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
81static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
7dc57964 82static int hammer_vop_ioctl(struct vop_ioctl_args *);
513ca7d7 83static int hammer_vop_mountctl(struct vop_mountctl_args *);
fbb84158 84static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
427e5fc6 85
7a04d74f
MD
86static int hammer_vop_fifoclose (struct vop_close_args *);
87static int hammer_vop_fiforead (struct vop_read_args *);
88static int hammer_vop_fifowrite (struct vop_write_args *);
fbb84158 89static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
7a04d74f 90
427e5fc6
MD
91struct vop_ops hammer_vnode_vops = {
92 .vop_default = vop_defaultop,
93 .vop_fsync = hammer_vop_fsync,
c0ade690
MD
94 .vop_getpages = vop_stdgetpages,
95 .vop_putpages = vop_stdputpages,
427e5fc6
MD
96 .vop_read = hammer_vop_read,
97 .vop_write = hammer_vop_write,
98 .vop_access = hammer_vop_access,
99 .vop_advlock = hammer_vop_advlock,
100 .vop_close = hammer_vop_close,
101 .vop_ncreate = hammer_vop_ncreate,
102 .vop_getattr = hammer_vop_getattr,
103 .vop_inactive = hammer_vop_inactive,
104 .vop_reclaim = hammer_vop_reclaim,
105 .vop_nresolve = hammer_vop_nresolve,
106 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
107 .vop_nlink = hammer_vop_nlink,
108 .vop_nmkdir = hammer_vop_nmkdir,
109 .vop_nmknod = hammer_vop_nmknod,
110 .vop_open = hammer_vop_open,
64950f31 111 .vop_pathconf = vop_stdpathconf,
427e5fc6
MD
112 .vop_print = hammer_vop_print,
113 .vop_readdir = hammer_vop_readdir,
114 .vop_readlink = hammer_vop_readlink,
115 .vop_nremove = hammer_vop_nremove,
116 .vop_nrename = hammer_vop_nrename,
117 .vop_nrmdir = hammer_vop_nrmdir,
349433c9 118 .vop_markatime = hammer_vop_markatime,
427e5fc6 119 .vop_setattr = hammer_vop_setattr,
a99b9ea2 120 .vop_bmap = hammer_vop_bmap,
427e5fc6
MD
121 .vop_strategy = hammer_vop_strategy,
122 .vop_nsymlink = hammer_vop_nsymlink,
7dc57964 123 .vop_nwhiteout = hammer_vop_nwhiteout,
513ca7d7 124 .vop_ioctl = hammer_vop_ioctl,
fbb84158
MD
125 .vop_mountctl = hammer_vop_mountctl,
126 .vop_kqfilter = hammer_vop_kqfilter
427e5fc6
MD
127};
128
7a04d74f 129struct vop_ops hammer_spec_vops = {
8be7edad 130 .vop_default = vop_defaultop,
7a04d74f 131 .vop_fsync = hammer_vop_fsync,
8be7edad
MD
132 .vop_read = vop_stdnoread,
133 .vop_write = vop_stdnowrite,
7a04d74f 134 .vop_access = hammer_vop_access,
8be7edad 135 .vop_close = hammer_vop_close,
349433c9 136 .vop_markatime = hammer_vop_markatime,
8be7edad 137 .vop_getattr = hammer_vop_getattr,
7a04d74f
MD
138 .vop_inactive = hammer_vop_inactive,
139 .vop_reclaim = hammer_vop_reclaim,
140 .vop_setattr = hammer_vop_setattr
141};
142
143struct vop_ops hammer_fifo_vops = {
144 .vop_default = fifo_vnoperate,
145 .vop_fsync = hammer_vop_fsync,
146 .vop_read = hammer_vop_fiforead,
147 .vop_write = hammer_vop_fifowrite,
148 .vop_access = hammer_vop_access,
149 .vop_close = hammer_vop_fifoclose,
349433c9 150 .vop_markatime = hammer_vop_markatime,
7a04d74f
MD
151 .vop_getattr = hammer_vop_getattr,
152 .vop_inactive = hammer_vop_inactive,
153 .vop_reclaim = hammer_vop_reclaim,
fbb84158
MD
154 .vop_setattr = hammer_vop_setattr,
155 .vop_kqfilter = hammer_vop_fifokqfilter
7a04d74f
MD
156};
157
fbb84158
MD
158static __inline
159void
160hammer_knote(struct vnode *vp, int flags)
161{
162 if (flags)
163 KNOTE(&vp->v_pollinfo.vpi_selinfo.si_note, flags);
164}
165
0832c9bb
MD
166#ifdef DEBUG_TRUNCATE
167struct hammer_inode *HammerTruncIp;
168#endif
169
b84de5af 170static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
d7e278bb
MD
171 struct vnode *dvp, struct ucred *cred,
172 int flags, int isdir);
8cd0a023
MD
173static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
174static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
175
427e5fc6
MD
176#if 0
177static
178int
179hammer_vop_vnoperate(struct vop_generic_args *)
180{
181 return (VOCALL(&hammer_vnode_vops, ap));
182}
183#endif
184
66325755
MD
185/*
186 * hammer_vop_fsync { vp, waitfor }
ddfdf542
MD
187 *
188 * fsync() an inode to disk and wait for it to be completely committed
189 * such that the information would not be undone if a crash occured after
190 * return.
6f3d87c0
MD
191 *
192 * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
193 * a REDO log. A sysctl is provided to relax HAMMER's fsync()
194 * operation.
195 *
196 * Ultimately the combination of a REDO log and use of fast storage
197 * to front-end cluster caches will make fsync fast, but it aint
198 * here yet. And, in anycase, we need real transactional
199 * all-or-nothing features which are not restricted to a single file.
66325755 200 */
427e5fc6
MD
201static
202int
66325755 203hammer_vop_fsync(struct vop_fsync_args *ap)
427e5fc6 204{
b84de5af 205 hammer_inode_t ip = VTOI(ap->a_vp);
6f3d87c0
MD
206 int waitfor = ap->a_waitfor;
207
208 /*
209 * Fsync rule relaxation (default disabled)
210 */
211 if (ap->a_flags & VOP_FSYNC_SYSCALL) {
212 switch(hammer_fsync_mode) {
213 case 0:
214 /* full semantics */
215 break;
216 case 1:
217 /* asynchronous */
218 if (waitfor == MNT_WAIT)
219 waitfor = MNT_NOWAIT;
220 break;
221 case 2:
222 /* synchronous fsync on close */
223 ip->flags |= HAMMER_INODE_CLOSESYNC;
224 return(0);
225 case 3:
226 /* asynchronous fsync on close */
227 ip->flags |= HAMMER_INODE_CLOSEASYNC;
228 return(0);
229 default:
230 /* ignore the fsync() system call */
231 return(0);
232 }
233 }
c0ade690 234
6f3d87c0
MD
235 /*
236 * Go do it
237 */
7a61b85d 238 ++hammer_count_fsyncs;
6f3d87c0 239 vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
af209b0f 240 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
6f3d87c0 241 if (waitfor == MNT_WAIT) {
b424ca30 242 vn_unlock(ap->a_vp);
b84de5af 243 hammer_wait_inode(ip);
b424ca30
MD
244 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
245 }
059819e3 246 return (ip->error);
427e5fc6
MD
247}
248
66325755
MD
249/*
250 * hammer_vop_read { vp, uio, ioflag, cred }
42cd5131
MD
251 *
252 * MPALMOSTSAFE
66325755 253 */
427e5fc6
MD
254static
255int
66325755 256hammer_vop_read(struct vop_read_args *ap)
427e5fc6 257{
66325755 258 struct hammer_transaction trans;
c0ade690 259 hammer_inode_t ip;
66325755
MD
260 off_t offset;
261 struct buf *bp;
262 struct uio *uio;
263 int error;
264 int n;
8cd0a023 265 int seqcount;
4a2796f3
MD
266 int ioseqcount;
267 int blksize;
899eb297 268 int got_mplock;
f864373f 269 int bigread;
66325755
MD
270
271 if (ap->a_vp->v_type != VREG)
272 return (EINVAL);
273 ip = VTOI(ap->a_vp);
274 error = 0;
4a2796f3
MD
275 uio = ap->a_uio;
276
277 /*
278 * Allow the UIO's size to override the sequential heuristic.
279 */
280 blksize = hammer_blocksize(uio->uio_offset);
281 seqcount = (uio->uio_resid + (blksize - 1)) / blksize;
282 ioseqcount = ap->a_ioflag >> 16;
283 if (seqcount < ioseqcount)
284 seqcount = ioseqcount;
66325755 285
7ff770b4
MD
286 /*
287 * Temporary hack until more of HAMMER can be made MPSAFE.
288 */
289#ifdef SMP
899eb297
MD
290 if (curthread->td_mpcount) {
291 got_mplock = -1;
292 hammer_start_transaction(&trans, ip->hmp);
293 } else {
294 got_mplock = 0;
295 }
7ff770b4
MD
296#else
297 hammer_start_transaction(&trans, ip->hmp);
298 got_mplock = -1;
299#endif
899eb297 300
f864373f
MD
301 /*
302 * If reading or writing a huge amount of data we have to break
303 * atomicy and allow the operation to be interrupted by a signal
304 * or it can DOS the machine.
305 */
306 bigread = (uio->uio_resid > 100 * 1024 * 1024);
307
66325755 308 /*
4a2796f3
MD
309 * Access the data typically in HAMMER_BUFSIZE blocks via the
310 * buffer cache, but HAMMER may use a variable block size based
311 * on the offset.
42cd5131
MD
312 *
313 * XXX Temporary hack, delay the start transaction while we remain
314 * MPSAFE. NOTE: ino_data.size cannot change while vnode is
315 * locked-shared.
66325755 316 */
11ad5ade 317 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
4a2796f3
MD
318 int64_t base_offset;
319 int64_t file_limit;
320
321 blksize = hammer_blocksize(uio->uio_offset);
322 offset = (int)uio->uio_offset & (blksize - 1);
323 base_offset = uio->uio_offset - offset;
324
f864373f
MD
325 if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
326 break;
327
42cd5131
MD
328 /*
329 * MPSAFE
330 */
331 bp = getcacheblk(ap->a_vp, base_offset);
332 if (bp) {
333 error = 0;
334 goto skip;
335 }
336
337 /*
338 * MPUNSAFE
339 */
340 if (got_mplock == 0) {
341 got_mplock = 1;
342 get_mplock();
343 hammer_start_transaction(&trans, ip->hmp);
344 }
345
1b0ab2c3 346 if (hammer_cluster_enable) {
4a2796f3
MD
347 /*
348 * Use file_limit to prevent cluster_read() from
349 * creating buffers of the wrong block size past
350 * the demarc.
351 */
352 file_limit = ip->ino_data.size;
353 if (base_offset < HAMMER_XDEMARC &&
354 file_limit > HAMMER_XDEMARC) {
355 file_limit = HAMMER_XDEMARC;
356 }
357 error = cluster_read(ap->a_vp,
358 file_limit, base_offset,
359 blksize, MAXPHYS,
360 seqcount, &bp);
a99b9ea2 361 } else {
4a2796f3 362 error = bread(ap->a_vp, base_offset, blksize, &bp);
a99b9ea2 363 }
66325755
MD
364 if (error) {
365 brelse(bp);
366 break;
367 }
42cd5131 368skip:
7bc5b8c2 369
c0ade690 370 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
4a2796f3 371 n = blksize - offset;
66325755
MD
372 if (n > uio->uio_resid)
373 n = uio->uio_resid;
11ad5ade
MD
374 if (n > ip->ino_data.size - uio->uio_offset)
375 n = (int)(ip->ino_data.size - uio->uio_offset);
66325755 376 error = uiomove((char *)bp->b_data + offset, n, uio);
7bc5b8c2
MD
377
378 /* data has a lower priority then meta-data */
379 bp->b_flags |= B_AGE;
66325755 380 bqrelse(bp);
af209b0f
MD
381 if (error)
382 break;
ce0138a6 383 hammer_stats_file_read += n;
66325755 384 }
42cd5131
MD
385
386 /*
387 * XXX only update the atime if we had to get the MP lock.
388 * XXX hack hack hack, fixme.
389 */
390 if (got_mplock) {
391 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
392 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
393 ip->ino_data.atime = trans.time;
394 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
395 }
396 hammer_done_transaction(&trans);
899eb297
MD
397 if (got_mplock > 0)
398 rel_mplock();
b84de5af 399 }
66325755 400 return (error);
427e5fc6
MD
401}
402
66325755
MD
403/*
404 * hammer_vop_write { vp, uio, ioflag, cred }
405 */
427e5fc6
MD
406static
407int
66325755 408hammer_vop_write(struct vop_write_args *ap)
427e5fc6 409{
66325755
MD
410 struct hammer_transaction trans;
411 struct hammer_inode *ip;
4a2796f3 412 hammer_mount_t hmp;
66325755 413 struct uio *uio;
4a2796f3 414 int offset;
47637bff 415 off_t base_offset;
66325755 416 struct buf *bp;
fbb84158 417 int kflags;
66325755
MD
418 int error;
419 int n;
c0ade690 420 int flags;
cb51be26 421 int seqcount;
f864373f 422 int bigwrite;
66325755
MD
423
424 if (ap->a_vp->v_type != VREG)
425 return (EINVAL);
426 ip = VTOI(ap->a_vp);
4a2796f3 427 hmp = ip->hmp;
66325755 428 error = 0;
fbb84158 429 kflags = 0;
cb51be26 430 seqcount = ap->a_ioflag >> 16;
66325755 431
d113fda1
MD
432 if (ip->flags & HAMMER_INODE_RO)
433 return (EROFS);
434
66325755
MD
435 /*
436 * Create a transaction to cover the operations we perform.
437 */
4a2796f3 438 hammer_start_transaction(&trans, hmp);
66325755
MD
439 uio = ap->a_uio;
440
441 /*
442 * Check append mode
443 */
444 if (ap->a_ioflag & IO_APPEND)
11ad5ade 445 uio->uio_offset = ip->ino_data.size;
66325755
MD
446
447 /*
af209b0f
MD
448 * Check for illegal write offsets. Valid range is 0...2^63-1.
449 *
450 * NOTE: the base_off assignment is required to work around what
451 * I consider to be a GCC-4 optimization bug.
66325755 452 */
af209b0f
MD
453 if (uio->uio_offset < 0) {
454 hammer_done_transaction(&trans);
455 return (EFBIG);
456 }
457 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
e54488bb 458 if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
b84de5af 459 hammer_done_transaction(&trans);
66325755 460 return (EFBIG);
9c448776 461 }
66325755 462
f864373f
MD
463 /*
464 * If reading or writing a huge amount of data we have to break
465 * atomicy and allow the operation to be interrupted by a signal
466 * or it can DOS the machine.
467 */
468 bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
469
66325755 470 /*
4a2796f3
MD
471 * Access the data typically in HAMMER_BUFSIZE blocks via the
472 * buffer cache, but HAMMER may use a variable block size based
473 * on the offset.
66325755
MD
474 */
475 while (uio->uio_resid > 0) {
d5ef456e 476 int fixsize = 0;
4a2796f3
MD
477 int blksize;
478 int blkmask;
d5ef456e 479
93291532 480 if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
e63644f0 481 break;
f864373f
MD
482 if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
483 break;
e63644f0 484
a9d52b76
MD
485 blksize = hammer_blocksize(uio->uio_offset);
486
059819e3 487 /*
4a2796f3
MD
488 * Do not allow HAMMER to blow out the buffer cache. Very
489 * large UIOs can lockout other processes due to bwillwrite()
490 * mechanics.
47637bff 491 *
df301614
MD
492 * The hammer inode is not locked during these operations.
493 * The vnode is locked which can interfere with the pageout
494 * daemon for non-UIO_NOCOPY writes but should not interfere
495 * with the buffer cache. Even so, we cannot afford to
496 * allow the pageout daemon to build up too many dirty buffer
497 * cache buffers.
cb63d1bc
MD
498 *
499 * Only call this if we aren't being recursively called from
500 * a virtual disk device (vn), else we may deadlock.
df301614 501 */
cb63d1bc
MD
502 if ((ap->a_ioflag & IO_RECURSE) == 0)
503 bwillwrite(blksize);
df301614 504
de996e86
MD
505 /*
506 * Control the number of pending records associated with
507 * this inode. If too many have accumulated start a
508 * flush. Try to maintain a pipeline with the flusher.
509 */
510 if (ip->rsv_recs >= hammer_limit_inode_recs) {
511 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
512 }
513 if (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
514 while (ip->rsv_recs >= hammer_limit_inode_recs) {
515 tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
516 }
517 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
518 }
519
520#if 0
df301614 521 /*
e4a5ff06
MD
522 * Do not allow HAMMER to blow out system memory by
523 * accumulating too many records. Records are so well
524 * decoupled from the buffer cache that it is possible
525 * for userland to push data out to the media via
526 * direct-write, but build up the records queued to the
527 * backend faster then the backend can flush them out.
528 * HAMMER has hit its write limit but the frontend has
529 * no pushback to slow it down.
059819e3 530 */
df301614 531 if (hmp->rsv_recs > hammer_limit_recs / 2) {
4a2796f3 532 /*
df301614 533 * Get the inode on the flush list
4a2796f3 534 */
df301614
MD
535 if (ip->rsv_recs >= 64)
536 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
537 else if (ip->rsv_recs >= 16)
538 hammer_flush_inode(ip, 0);
4a2796f3
MD
539
540 /*
df301614
MD
541 * Keep the flusher going if the system keeps
542 * queueing records.
4a2796f3 543 */
df301614
MD
544 delta = hmp->count_newrecords -
545 hmp->last_newrecords;
546 if (delta < 0 || delta > hammer_limit_recs / 2) {
547 hmp->last_newrecords = hmp->count_newrecords;
548 hammer_sync_hmp(hmp, MNT_NOWAIT);
4a2796f3
MD
549 }
550
df301614
MD
551 /*
552 * If we have gotten behind start slowing
553 * down the writers.
554 */
555 delta = (hmp->rsv_recs - hammer_limit_recs) *
556 hz / hammer_limit_recs;
557 if (delta > 0)
558 tsleep(&trans, 0, "hmrslo", delta);
059819e3 559 }
de996e86 560#endif
059819e3 561
4a2796f3
MD
562 /*
563 * Calculate the blocksize at the current offset and figure
564 * out how much we can actually write.
565 */
4a2796f3
MD
566 blkmask = blksize - 1;
567 offset = (int)uio->uio_offset & blkmask;
568 base_offset = uio->uio_offset & ~(int64_t)blkmask;
569 n = blksize - offset;
d5ef456e
MD
570 if (n > uio->uio_resid)
571 n = uio->uio_resid;
11ad5ade 572 if (uio->uio_offset + n > ip->ino_data.size) {
d5ef456e
MD
573 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
574 fixsize = 1;
fbb84158 575 kflags |= NOTE_EXTEND;
d5ef456e
MD
576 }
577
c0ade690
MD
578 if (uio->uio_segflg == UIO_NOCOPY) {
579 /*
580 * Issuing a write with the same data backing the
581 * buffer. Instantiate the buffer to collect the
582 * backing vm pages, then read-in any missing bits.
583 *
584 * This case is used by vop_stdputpages().
585 */
47637bff 586 bp = getblk(ap->a_vp, base_offset,
4a2796f3 587 blksize, GETBLK_BHEAVY, 0);
c0ade690
MD
588 if ((bp->b_flags & B_CACHE) == 0) {
589 bqrelse(bp);
47637bff 590 error = bread(ap->a_vp, base_offset,
4a2796f3 591 blksize, &bp);
c0ade690 592 }
4a2796f3 593 } else if (offset == 0 && uio->uio_resid >= blksize) {
c0ade690 594 /*
a5fddc16
MD
595 * Even though we are entirely overwriting the buffer
596 * we may still have to zero it out to avoid a
597 * mmap/write visibility issue.
c0ade690 598 */
4a2796f3 599 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
a5fddc16
MD
600 if ((bp->b_flags & B_CACHE) == 0)
601 vfs_bio_clrbuf(bp);
47637bff 602 } else if (base_offset >= ip->ino_data.size) {
c0ade690 603 /*
a5fddc16
MD
604 * If the base offset of the buffer is beyond the
605 * file EOF, we don't have to issue a read.
c0ade690 606 */
47637bff 607 bp = getblk(ap->a_vp, base_offset,
4a2796f3 608 blksize, GETBLK_BHEAVY, 0);
66325755
MD
609 vfs_bio_clrbuf(bp);
610 } else {
c0ade690
MD
611 /*
612 * Partial overwrite, read in any missing bits then
613 * replace the portion being written.
614 */
4a2796f3 615 error = bread(ap->a_vp, base_offset, blksize, &bp);
d5ef456e
MD
616 if (error == 0)
617 bheavy(bp);
66325755 618 }
47637bff 619 if (error == 0) {
4a2796f3 620 error = uiomove((char *)bp->b_data + offset,
47637bff
MD
621 n, uio);
622 }
d5ef456e
MD
623
624 /*
625 * If we screwed up we have to undo any VM size changes we
626 * made.
627 */
66325755
MD
628 if (error) {
629 brelse(bp);
d5ef456e 630 if (fixsize) {
11ad5ade 631 vtruncbuf(ap->a_vp, ip->ino_data.size,
4a2796f3 632 hammer_blocksize(ip->ino_data.size));
d5ef456e 633 }
66325755
MD
634 break;
635 }
fbb84158 636 kflags |= NOTE_WRITE;
ce0138a6 637 hammer_stats_file_write += n;
c0ade690 638 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
11ad5ade
MD
639 if (ip->ino_data.size < uio->uio_offset) {
640 ip->ino_data.size = uio->uio_offset;
641 flags = HAMMER_INODE_DDIRTY;
642 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
c0ade690 643 } else {
d113fda1 644 flags = 0;
66325755 645 }
11ad5ade 646 ip->ino_data.mtime = trans.time;
ddfdf542 647 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
47637bff 648 hammer_modify_inode(ip, flags);
32c90105 649
1b0ab2c3
MD
650 /*
651 * Once we dirty the buffer any cached zone-X offset
652 * becomes invalid. HAMMER NOTE: no-history mode cannot
653 * allow overwriting over the same data sector unless
654 * we provide UNDOs for the old data, which we don't.
655 */
656 bp->b_bio2.bio_offset = NOOFFSET;
657
47637bff
MD
658 /*
659 * Final buffer disposition.
de996e86
MD
660 *
661 * Because meta-data updates are deferred, HAMMER is
662 * especially sensitive to excessive bdwrite()s because
663 * the I/O stream is not broken up by disk reads. So the
664 * buffer cache simply cannot keep up.
665 *
666 * WARNING! blksize is variable. cluster_write() is
667 * expected to not blow up if it encounters buffers that
668 * do not match the passed blksize.
710733a6
MD
669 *
670 * NOTE! Hammer shouldn't need to bawrite()/cluster_write().
671 * The ip->rsv_recs check should burst-flush the data.
672 * If we queue it immediately the buf could be left
673 * locked on the device queue for a very long time.
47637bff 674 */
cb51be26 675 bp->b_flags |= B_AGE;
66325755
MD
676 if (ap->a_ioflag & IO_SYNC) {
677 bwrite(bp);
678 } else if (ap->a_ioflag & IO_DIRECT) {
66325755 679 bawrite(bp);
710733a6
MD
680 } else {
681#if 0
682 if (offset + n == blksize) {
de996e86
MD
683 if (hammer_cluster_enable == 0 ||
684 (ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
685 bawrite(bp);
686 } else {
687 cluster_write(bp, ip->ino_data.size,
688 blksize, seqcount);
689 }
4a2796f3 690 } else {
710733a6 691#endif
4a2796f3
MD
692 bdwrite(bp);
693 }
66325755 694 }
b84de5af 695 hammer_done_transaction(&trans);
fbb84158 696 hammer_knote(ap->a_vp, kflags);
66325755 697 return (error);
427e5fc6
MD
698}
699
66325755
MD
700/*
701 * hammer_vop_access { vp, mode, cred }
702 */
427e5fc6
MD
703static
704int
66325755 705hammer_vop_access(struct vop_access_args *ap)
427e5fc6 706{
66325755
MD
707 struct hammer_inode *ip = VTOI(ap->a_vp);
708 uid_t uid;
709 gid_t gid;
710 int error;
711
ce0138a6 712 ++hammer_stats_file_iopsr;
66325755
MD
713 uid = hammer_to_unix_xid(&ip->ino_data.uid);
714 gid = hammer_to_unix_xid(&ip->ino_data.gid);
715
716 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
717 ip->ino_data.uflags);
718 return (error);
427e5fc6
MD
719}
720
66325755
MD
721/*
722 * hammer_vop_advlock { vp, id, op, fl, flags }
723 */
427e5fc6
MD
724static
725int
66325755 726hammer_vop_advlock(struct vop_advlock_args *ap)
427e5fc6 727{
4a2796f3 728 hammer_inode_t ip = VTOI(ap->a_vp);
66325755 729
11ad5ade 730 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
427e5fc6
MD
731}
732
66325755
MD
733/*
734 * hammer_vop_close { vp, fflag }
6f3d87c0
MD
735 *
736 * We can only sync-on-close for normal closes.
66325755 737 */
427e5fc6
MD
738static
739int
66325755 740hammer_vop_close(struct vop_close_args *ap)
427e5fc6 741{
6f3d87c0
MD
742 struct vnode *vp = ap->a_vp;
743 hammer_inode_t ip = VTOI(vp);
744 int waitfor;
745
746 if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
747 if (vn_islocked(vp) == LK_EXCLUSIVE &&
748 (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
749 if (ip->flags & HAMMER_INODE_CLOSESYNC)
750 waitfor = MNT_WAIT;
751 else
752 waitfor = MNT_NOWAIT;
753 ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
754 HAMMER_INODE_CLOSEASYNC);
755 VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
756 }
757 }
a89aec1b 758 return (vop_stdclose(ap));
427e5fc6
MD
759}
760
66325755
MD
761/*
762 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
763 *
764 * The operating system has already ensured that the directory entry
765 * does not exist and done all appropriate namespace locking.
766 */
427e5fc6
MD
767static
768int
66325755 769hammer_vop_ncreate(struct vop_ncreate_args *ap)
427e5fc6 770{
66325755
MD
771 struct hammer_transaction trans;
772 struct hammer_inode *dip;
773 struct hammer_inode *nip;
774 struct nchandle *nch;
775 int error;
776
777 nch = ap->a_nch;
778 dip = VTOI(ap->a_dvp);
779
d113fda1
MD
780 if (dip->flags & HAMMER_INODE_RO)
781 return (EROFS);
93291532 782 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
e63644f0 783 return (error);
d113fda1 784
66325755
MD
785 /*
786 * Create a transaction to cover the operations we perform.
787 */
8cd0a023 788 hammer_start_transaction(&trans, dip->hmp);
ce0138a6 789 ++hammer_stats_file_iopsw;
66325755
MD
790
791 /*
792 * Create a new filesystem object of the requested type. The
b84de5af
MD
793 * returned inode will be referenced and shared-locked to prevent
794 * it from being moved to the flusher.
66325755 795 */
5a930e66 796 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
5a64efa1
MD
797 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
798 NULL, &nip);
66325755 799 if (error) {
77062c8a 800 hkprintf("hammer_create_inode error %d\n", error);
b84de5af 801 hammer_done_transaction(&trans);
66325755
MD
802 *ap->a_vpp = NULL;
803 return (error);
804 }
66325755
MD
805
806 /*
807 * Add the new filesystem object to the directory. This will also
808 * bump the inode's link count.
809 */
5a930e66
MD
810 error = hammer_ip_add_directory(&trans, dip,
811 nch->ncp->nc_name, nch->ncp->nc_nlen,
812 nip);
0b075555 813 if (error)
77062c8a 814 hkprintf("hammer_ip_add_directory error %d\n", error);
66325755
MD
815
816 /*
817 * Finish up.
818 */
819 if (error) {
a89aec1b 820 hammer_rel_inode(nip, 0);
b84de5af 821 hammer_done_transaction(&trans);
66325755
MD
822 *ap->a_vpp = NULL;
823 } else {
e8599db1 824 error = hammer_get_vnode(nip, ap->a_vpp);
b84de5af 825 hammer_done_transaction(&trans);
a89aec1b
MD
826 hammer_rel_inode(nip, 0);
827 if (error == 0) {
828 cache_setunresolved(ap->a_nch);
829 cache_setvp(ap->a_nch, *ap->a_vpp);
830 }
fbb84158 831 hammer_knote(ap->a_dvp, NOTE_WRITE);
66325755
MD
832 }
833 return (error);
427e5fc6
MD
834}
835
66325755
MD
836/*
837 * hammer_vop_getattr { vp, vap }
98f7132d
MD
838 *
839 * Retrieve an inode's attribute information. When accessing inodes
840 * historically we fake the atime field to ensure consistent results.
841 * The atime field is stored in the B-Tree element and allowed to be
842 * updated without cycling the element.
899eb297
MD
843 *
844 * MPSAFE
66325755 845 */
427e5fc6
MD
846static
847int
66325755 848hammer_vop_getattr(struct vop_getattr_args *ap)
427e5fc6 849{
66325755
MD
850 struct hammer_inode *ip = VTOI(ap->a_vp);
851 struct vattr *vap = ap->a_vap;
852
a56cb012
MD
853 /*
854 * We want the fsid to be different when accessing a filesystem
855 * with different as-of's so programs like diff don't think
856 * the files are the same.
857 *
858 * We also want the fsid to be the same when comparing snapshots,
859 * or when comparing mirrors (which might be backed by different
860 * physical devices). HAMMER fsids are based on the PFS's
861 * shared_uuid field.
862 *
863 * XXX there is a chance of collision here. The va_fsid reported
864 * by stat is different from the more involved fsid used in the
865 * mount structure.
c82af904 866 */
ce0138a6 867 ++hammer_stats_file_iopsr;
899eb297 868 hammer_lock_sh(&ip->lock);
a56cb012
MD
869 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
870 (u_int32_t)(ip->obj_asof >> 32);
871
11ad5ade 872 vap->va_fileid = ip->ino_leaf.base.obj_id;
66325755 873 vap->va_mode = ip->ino_data.mode;
11ad5ade 874 vap->va_nlink = ip->ino_data.nlinks;
66325755
MD
875 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
876 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
877 vap->va_rmajor = 0;
878 vap->va_rminor = 0;
11ad5ade 879 vap->va_size = ip->ino_data.size;
bcac4bbb 880
f437a2ab
MD
881 /*
882 * Special case for @@PFS softlinks. The actual size of the
883 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
cb3c760c 884 * or for MAX_TID is "@@-1:%05d" == 10 bytes.
f437a2ab
MD
885 */
886 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
887 ip->ino_data.size == 10 &&
888 ip->obj_asof == HAMMER_MAX_TID &&
889 ip->obj_localization == 0 &&
890 strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
cb3c760c
MD
891 if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
892 vap->va_size = 26;
893 else
894 vap->va_size = 10;
f437a2ab
MD
895 }
896
bcac4bbb
MD
897 /*
898 * We must provide a consistent atime and mtime for snapshots
899 * so people can do a 'tar cf - ... | md5' on them and get
900 * consistent results.
901 */
902 if (ip->flags & HAMMER_INODE_RO) {
ddfdf542
MD
903 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
904 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
bcac4bbb 905 } else {
ddfdf542
MD
906 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
907 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
bcac4bbb 908 }
ddfdf542 909 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
66325755
MD
910 vap->va_flags = ip->ino_data.uflags;
911 vap->va_gen = 1; /* hammer inums are unique for all time */
bf686dbe 912 vap->va_blocksize = HAMMER_BUFSIZE;
4a2796f3
MD
913 if (ip->ino_data.size >= HAMMER_XDEMARC) {
914 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
915 ~HAMMER_XBUFMASK64;
916 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
917 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
918 ~HAMMER_BUFMASK64;
919 } else {
920 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
921 }
64950f31 922
11ad5ade 923 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
66325755 924 vap->va_filerev = 0; /* XXX */
4a2796f3 925 /* mtime uniquely identifies any adjustments made to the file XXX */
11ad5ade 926 vap->va_fsmid = ip->ino_data.mtime;
66325755
MD
927 vap->va_uid_uuid = ip->ino_data.uid;
928 vap->va_gid_uuid = ip->ino_data.gid;
929 vap->va_fsid_uuid = ip->hmp->fsid;
930 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
931 VA_FSID_UUID_VALID;
7a04d74f 932
11ad5ade 933 switch (ip->ino_data.obj_type) {
7a04d74f
MD
934 case HAMMER_OBJTYPE_CDEV:
935 case HAMMER_OBJTYPE_BDEV:
936 vap->va_rmajor = ip->ino_data.rmajor;
937 vap->va_rminor = ip->ino_data.rminor;
938 break;
939 default:
940 break;
941 }
899eb297 942 hammer_unlock(&ip->lock);
66325755 943 return(0);
427e5fc6
MD
944}
945
66325755
MD
946/*
947 * hammer_vop_nresolve { nch, dvp, cred }
948 *
949 * Locate the requested directory entry.
950 */
427e5fc6
MD
951static
952int
66325755 953hammer_vop_nresolve(struct vop_nresolve_args *ap)
427e5fc6 954{
36f82b23 955 struct hammer_transaction trans;
66325755 956 struct namecache *ncp;
7f7c1f84
MD
957 hammer_inode_t dip;
958 hammer_inode_t ip;
959 hammer_tid_t asof;
8cd0a023 960 struct hammer_cursor cursor;
66325755
MD
961 struct vnode *vp;
962 int64_t namekey;
963 int error;
7f7c1f84
MD
964 int i;
965 int nlen;
d113fda1 966 int flags;
a56cb012 967 int ispfs;
adf01747 968 int64_t obj_id;
ddfdf542 969 u_int32_t localization;
5e435c92 970 u_int32_t max_iterations;
7f7c1f84
MD
971
972 /*
973 * Misc initialization, plus handle as-of name extensions. Look for
974 * the '@@' extension. Note that as-of files and directories cannot
975 * be modified.
7f7c1f84
MD
976 */
977 dip = VTOI(ap->a_dvp);
978 ncp = ap->a_nch->ncp;
979 asof = dip->obj_asof;
bc6c1f13 980 localization = dip->obj_localization; /* for code consistency */
7f7c1f84 981 nlen = ncp->nc_nlen;
ea434b6f 982 flags = dip->flags & HAMMER_INODE_RO;
a56cb012 983 ispfs = 0;
7f7c1f84 984
36f82b23 985 hammer_simple_transaction(&trans, dip->hmp);
ce0138a6 986 ++hammer_stats_file_iopsr;
36f82b23 987
7f7c1f84
MD
988 for (i = 0; i < nlen; ++i) {
989 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
bc6c1f13
MD
990 error = hammer_str_to_tid(ncp->nc_name + i + 2,
991 &ispfs, &asof, &localization);
992 if (error != 0) {
993 i = nlen;
994 break;
995 }
ea434b6f
MD
996 if (asof != HAMMER_MAX_TID)
997 flags |= HAMMER_INODE_RO;
7f7c1f84
MD
998 break;
999 }
1000 }
1001 nlen = i;
66325755 1002
ea434b6f
MD
1003 /*
1004 * If this is a PFS softlink we dive into the PFS
1005 */
1006 if (ispfs && nlen == 0) {
1007 ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1008 asof, localization,
1009 flags, &error);
1010 if (error == 0) {
1011 error = hammer_get_vnode(ip, &vp);
1012 hammer_rel_inode(ip, 0);
1013 } else {
1014 vp = NULL;
1015 }
1016 if (error == 0) {
1017 vn_unlock(vp);
1018 cache_setvp(ap->a_nch, vp);
1019 vrele(vp);
1020 }
1021 goto done;
1022 }
1023
d113fda1 1024 /*
294aec9f
MD
1025 * If there is no path component the time extension is relative to dip.
1026 * e.g. "fubar/@@<snapshot>"
1027 *
1028 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1029 * e.g. "fubar/.@@<snapshot>"
1030 *
1031 * ".." is handled by the kernel. We do not currently handle
1032 * "..@<snapshot>".
d113fda1 1033 */
294aec9f 1034 if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
bcac4bbb 1035 ip = hammer_get_inode(&trans, dip, dip->obj_id,
ddfdf542
MD
1036 asof, dip->obj_localization,
1037 flags, &error);
d113fda1 1038 if (error == 0) {
e8599db1 1039 error = hammer_get_vnode(ip, &vp);
d113fda1
MD
1040 hammer_rel_inode(ip, 0);
1041 } else {
1042 vp = NULL;
1043 }
1044 if (error == 0) {
1045 vn_unlock(vp);
1046 cache_setvp(ap->a_nch, vp);
1047 vrele(vp);
1048 }
36f82b23 1049 goto done;
d113fda1
MD
1050 }
1051
8cd0a023
MD
1052 /*
1053 * Calculate the namekey and setup the key range for the scan. This
1054 * works kinda like a chained hash table where the lower 32 bits
1055 * of the namekey synthesize the chain.
1056 *
1057 * The key range is inclusive of both key_beg and key_end.
1058 */
5e435c92
MD
1059 namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1060 &max_iterations);
66325755 1061
bcac4bbb 1062 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
5a930e66 1063 cursor.key_beg.localization = dip->obj_localization +
beec5dc4 1064 hammer_dir_localization(dip);
8cd0a023
MD
1065 cursor.key_beg.obj_id = dip->obj_id;
1066 cursor.key_beg.key = namekey;
d5530d22 1067 cursor.key_beg.create_tid = 0;
8cd0a023
MD
1068 cursor.key_beg.delete_tid = 0;
1069 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1070 cursor.key_beg.obj_type = 0;
66325755 1071
8cd0a023 1072 cursor.key_end = cursor.key_beg;
5e435c92 1073 cursor.key_end.key += max_iterations;
d5530d22
MD
1074 cursor.asof = asof;
1075 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
66325755
MD
1076
1077 /*
8cd0a023 1078 * Scan all matching records (the chain), locate the one matching
a89aec1b 1079 * the requested path component.
8cd0a023
MD
1080 *
1081 * The hammer_ip_*() functions merge in-memory records with on-disk
1082 * records for the purposes of the search.
66325755 1083 */
6a37e7e4 1084 obj_id = 0;
43c665ae 1085 localization = HAMMER_DEF_LOCALIZATION;
6a37e7e4 1086
4e17f465 1087 if (error == 0) {
4e17f465
MD
1088 error = hammer_ip_first(&cursor);
1089 while (error == 0) {
1090 error = hammer_ip_resolve_data(&cursor);
1091 if (error)
1092 break;
11ad5ade
MD
1093 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1094 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1095 obj_id = cursor.data->entry.obj_id;
ddfdf542 1096 localization = cursor.data->entry.localization;
4e17f465
MD
1097 break;
1098 }
1099 error = hammer_ip_next(&cursor);
66325755
MD
1100 }
1101 }
6a37e7e4 1102 hammer_done_cursor(&cursor);
4c286c36
MD
1103
1104 /*
1105 * Lookup the obj_id. This should always succeed. If it does not
1106 * the filesystem may be damaged and we return a dummy inode.
1107 */
66325755 1108 if (error == 0) {
bcac4bbb 1109 ip = hammer_get_inode(&trans, dip, obj_id,
ddfdf542
MD
1110 asof, localization,
1111 flags, &error);
4c286c36
MD
1112 if (error == ENOENT) {
1113 kprintf("HAMMER: WARNING: Missing "
1114 "inode for dirent \"%s\"\n"
3d30bff3
MD
1115 "\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1116 ncp->nc_name,
1117 (long long)obj_id, (long long)asof,
1118 localization);
4c286c36
MD
1119 error = 0;
1120 ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1121 asof, localization,
1122 flags, &error);
1123 }
7f7c1f84 1124 if (error == 0) {
e8599db1 1125 error = hammer_get_vnode(ip, &vp);
7f7c1f84
MD
1126 hammer_rel_inode(ip, 0);
1127 } else {
1128 vp = NULL;
1129 }
66325755
MD
1130 if (error == 0) {
1131 vn_unlock(vp);
1132 cache_setvp(ap->a_nch, vp);
1133 vrele(vp);
1134 }
1135 } else if (error == ENOENT) {
1136 cache_setvp(ap->a_nch, NULL);
1137 }
36f82b23 1138done:
b84de5af 1139 hammer_done_transaction(&trans);
66325755 1140 return (error);
427e5fc6
MD
1141}
1142
66325755
MD
1143/*
1144 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1145 *
1146 * Locate the parent directory of a directory vnode.
1147 *
1148 * dvp is referenced but not locked. *vpp must be returned referenced and
1149 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
1150 * at the root, instead it could indicate that the directory we were in was
1151 * removed.
42c7d26b
MD
1152 *
1153 * NOTE: as-of sequences are not linked into the directory structure. If
1154 * we are at the root with a different asof then the mount point, reload
1155 * the same directory with the mount point's asof. I'm not sure what this
1156 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
1157 * get confused, but it hasn't been tested.
66325755 1158 */
427e5fc6
MD
1159static
1160int
66325755 1161hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
427e5fc6 1162{
36f82b23 1163 struct hammer_transaction trans;
66325755 1164 struct hammer_inode *dip;
d113fda1 1165 struct hammer_inode *ip;
42c7d26b 1166 int64_t parent_obj_id;
5a930e66 1167 u_int32_t parent_obj_localization;
42c7d26b 1168 hammer_tid_t asof;
d113fda1 1169 int error;
66325755
MD
1170
1171 dip = VTOI(ap->a_dvp);
42c7d26b 1172 asof = dip->obj_asof;
5a930e66
MD
1173
1174 /*
1175 * Whos are parent? This could be the root of a pseudo-filesystem
1176 * whos parent is in another localization domain.
1177 */
42c7d26b 1178 parent_obj_id = dip->ino_data.parent_obj_id;
5a930e66
MD
1179 if (dip->obj_id == HAMMER_OBJID_ROOT)
1180 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
1181 else
1182 parent_obj_localization = dip->obj_localization;
42c7d26b
MD
1183
1184 if (parent_obj_id == 0) {
1185 if (dip->obj_id == HAMMER_OBJID_ROOT &&
1186 asof != dip->hmp->asof) {
1187 parent_obj_id = dip->obj_id;
1188 asof = dip->hmp->asof;
1189 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1190 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
973c11b9 1191 (long long)dip->obj_asof);
42c7d26b
MD
1192 } else {
1193 *ap->a_vpp = NULL;
1194 return ENOENT;
1195 }
66325755 1196 }
d113fda1 1197
36f82b23 1198 hammer_simple_transaction(&trans, dip->hmp);
ce0138a6 1199 ++hammer_stats_file_iopsr;
36f82b23 1200
bcac4bbb 1201 ip = hammer_get_inode(&trans, dip, parent_obj_id,
5a930e66 1202 asof, parent_obj_localization,
ddfdf542 1203 dip->flags, &error);
36f82b23 1204 if (ip) {
e8599db1 1205 error = hammer_get_vnode(ip, ap->a_vpp);
36f82b23
MD
1206 hammer_rel_inode(ip, 0);
1207 } else {
d113fda1 1208 *ap->a_vpp = NULL;
d113fda1 1209 }
b84de5af 1210 hammer_done_transaction(&trans);
d113fda1 1211 return (error);
427e5fc6
MD
1212}
1213
66325755
MD
1214/*
1215 * hammer_vop_nlink { nch, dvp, vp, cred }
1216 */
427e5fc6
MD
1217static
1218int
66325755 1219hammer_vop_nlink(struct vop_nlink_args *ap)
427e5fc6 1220{
66325755
MD
1221 struct hammer_transaction trans;
1222 struct hammer_inode *dip;
1223 struct hammer_inode *ip;
1224 struct nchandle *nch;
1225 int error;
1226
f437a2ab
MD
1227 if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1228 return(EXDEV);
1229
66325755
MD
1230 nch = ap->a_nch;
1231 dip = VTOI(ap->a_dvp);
1232 ip = VTOI(ap->a_vp);
1233
f437a2ab
MD
1234 if (dip->obj_localization != ip->obj_localization)
1235 return(EXDEV);
1236
d113fda1
MD
1237 if (dip->flags & HAMMER_INODE_RO)
1238 return (EROFS);
1239 if (ip->flags & HAMMER_INODE_RO)
1240 return (EROFS);
93291532 1241 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
e63644f0 1242 return (error);
d113fda1 1243
66325755
MD
1244 /*
1245 * Create a transaction to cover the operations we perform.
1246 */
8cd0a023 1247 hammer_start_transaction(&trans, dip->hmp);
ce0138a6 1248 ++hammer_stats_file_iopsw;
66325755
MD
1249
1250 /*
1251 * Add the filesystem object to the directory. Note that neither
1252 * dip nor ip are referenced or locked, but their vnodes are
1253 * referenced. This function will bump the inode's link count.
1254 */
5a930e66
MD
1255 error = hammer_ip_add_directory(&trans, dip,
1256 nch->ncp->nc_name, nch->ncp->nc_nlen,
1257 ip);
66325755
MD
1258
1259 /*
1260 * Finish up.
1261 */
b84de5af 1262 if (error == 0) {
6b4f890b
MD
1263 cache_setunresolved(nch);
1264 cache_setvp(nch, ap->a_vp);
66325755 1265 }
b84de5af 1266 hammer_done_transaction(&trans);
fbb84158
MD
1267 hammer_knote(ap->a_vp, NOTE_LINK);
1268 hammer_knote(ap->a_dvp, NOTE_WRITE);
66325755 1269 return (error);
427e5fc6
MD
1270}
1271
66325755
MD
1272/*
1273 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1274 *
1275 * The operating system has already ensured that the directory entry
1276 * does not exist and done all appropriate namespace locking.
1277 */
427e5fc6
MD
1278static
1279int
66325755 1280hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
427e5fc6 1281{
66325755
MD
1282 struct hammer_transaction trans;
1283 struct hammer_inode *dip;
1284 struct hammer_inode *nip;
1285 struct nchandle *nch;
1286 int error;
1287
1288 nch = ap->a_nch;
1289 dip = VTOI(ap->a_dvp);
1290
d113fda1
MD
1291 if (dip->flags & HAMMER_INODE_RO)
1292 return (EROFS);
93291532 1293 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
e63644f0 1294 return (error);
d113fda1 1295
66325755
MD
1296 /*
1297 * Create a transaction to cover the operations we perform.
1298 */
8cd0a023 1299 hammer_start_transaction(&trans, dip->hmp);
ce0138a6 1300 ++hammer_stats_file_iopsw;
66325755
MD
1301
1302 /*
1303 * Create a new filesystem object of the requested type. The
8cd0a023 1304 * returned inode will be referenced but not locked.
66325755 1305 */
5a930e66 1306 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
5a64efa1
MD
1307 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1308 NULL, &nip);
66325755 1309 if (error) {
77062c8a 1310 hkprintf("hammer_mkdir error %d\n", error);
b84de5af 1311 hammer_done_transaction(&trans);
66325755
MD
1312 *ap->a_vpp = NULL;
1313 return (error);
1314 }
66325755
MD
1315 /*
1316 * Add the new filesystem object to the directory. This will also
1317 * bump the inode's link count.
1318 */
5a930e66
MD
1319 error = hammer_ip_add_directory(&trans, dip,
1320 nch->ncp->nc_name, nch->ncp->nc_nlen,
1321 nip);
0b075555 1322 if (error)
77062c8a 1323 hkprintf("hammer_mkdir (add) error %d\n", error);
66325755
MD
1324
1325 /*
1326 * Finish up.
1327 */
1328 if (error) {
a89aec1b 1329 hammer_rel_inode(nip, 0);
66325755
MD
1330 *ap->a_vpp = NULL;
1331 } else {
e8599db1 1332 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
1333 hammer_rel_inode(nip, 0);
1334 if (error == 0) {
1335 cache_setunresolved(ap->a_nch);
1336 cache_setvp(ap->a_nch, *ap->a_vpp);
1337 }
66325755 1338 }
b84de5af 1339 hammer_done_transaction(&trans);
fbb84158
MD
1340 if (error == 0)
1341 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
66325755 1342 return (error);
427e5fc6
MD
1343}
1344
66325755
MD
1345/*
1346 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1347 *
1348 * The operating system has already ensured that the directory entry
1349 * does not exist and done all appropriate namespace locking.
1350 */
427e5fc6
MD
1351static
1352int
66325755 1353hammer_vop_nmknod(struct vop_nmknod_args *ap)
427e5fc6 1354{
66325755
MD
1355 struct hammer_transaction trans;
1356 struct hammer_inode *dip;
1357 struct hammer_inode *nip;
1358 struct nchandle *nch;
1359 int error;
1360
1361 nch = ap->a_nch;
1362 dip = VTOI(ap->a_dvp);
1363
d113fda1
MD
1364 if (dip->flags & HAMMER_INODE_RO)
1365 return (EROFS);
93291532 1366 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
e63644f0 1367 return (error);
d113fda1 1368
66325755
MD
1369 /*
1370 * Create a transaction to cover the operations we perform.
1371 */
8cd0a023 1372 hammer_start_transaction(&trans, dip->hmp);
ce0138a6 1373 ++hammer_stats_file_iopsw;
66325755
MD
1374
1375 /*
1376 * Create a new filesystem object of the requested type. The
8cd0a023 1377 * returned inode will be referenced but not locked.
5a930e66
MD
1378 *
1379 * If mknod specifies a directory a pseudo-fs is created.
66325755 1380 */
5a930e66 1381 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
5a64efa1
MD
1382 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1383 NULL, &nip);
66325755 1384 if (error) {
b84de5af 1385 hammer_done_transaction(&trans);
66325755
MD
1386 *ap->a_vpp = NULL;
1387 return (error);
1388 }
66325755
MD
1389
1390 /*
1391 * Add the new filesystem object to the directory. This will also
1392 * bump the inode's link count.
1393 */
5a930e66
MD
1394 error = hammer_ip_add_directory(&trans, dip,
1395 nch->ncp->nc_name, nch->ncp->nc_nlen,
1396 nip);
66325755
MD
1397
1398 /*
1399 * Finish up.
1400 */
1401 if (error) {
a89aec1b 1402 hammer_rel_inode(nip, 0);
66325755
MD
1403 *ap->a_vpp = NULL;
1404 } else {
e8599db1 1405 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
1406 hammer_rel_inode(nip, 0);
1407 if (error == 0) {
1408 cache_setunresolved(ap->a_nch);
1409 cache_setvp(ap->a_nch, *ap->a_vpp);
1410 }
66325755 1411 }
b84de5af 1412 hammer_done_transaction(&trans);
fbb84158
MD
1413 if (error == 0)
1414 hammer_knote(ap->a_dvp, NOTE_WRITE);
66325755 1415 return (error);
427e5fc6
MD
1416}
1417
66325755
MD
1418/*
1419 * hammer_vop_open { vp, mode, cred, fp }
1420 */
427e5fc6
MD
1421static
1422int
66325755 1423hammer_vop_open(struct vop_open_args *ap)
427e5fc6 1424{
9f5097dc
MD
1425 hammer_inode_t ip;
1426
ce0138a6 1427 ++hammer_stats_file_iopsr;
9f5097dc
MD
1428 ip = VTOI(ap->a_vp);
1429
1430 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
d113fda1 1431 return (EROFS);
a89aec1b 1432 return(vop_stdopen(ap));
427e5fc6
MD
1433}
1434
66325755
MD
1435/*
1436 * hammer_vop_print { vp }
1437 */
427e5fc6
MD
1438static
1439int
66325755 1440hammer_vop_print(struct vop_print_args *ap)
427e5fc6
MD
1441{
1442 return EOPNOTSUPP;
1443}
1444
66325755 1445/*
6b4f890b 1446 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
66325755 1447 */
427e5fc6
MD
1448static
1449int
66325755 1450hammer_vop_readdir(struct vop_readdir_args *ap)
427e5fc6 1451{
36f82b23 1452 struct hammer_transaction trans;
6b4f890b
MD
1453 struct hammer_cursor cursor;
1454 struct hammer_inode *ip;
1455 struct uio *uio;
6b4f890b
MD
1456 hammer_base_elm_t base;
1457 int error;
1458 int cookie_index;
1459 int ncookies;
1460 off_t *cookies;
1461 off_t saveoff;
1462 int r;
ea434b6f 1463 int dtype;
6b4f890b 1464
ce0138a6 1465 ++hammer_stats_file_iopsr;
6b4f890b
MD
1466 ip = VTOI(ap->a_vp);
1467 uio = ap->a_uio;
b3deaf57
MD
1468 saveoff = uio->uio_offset;
1469
1470 if (ap->a_ncookies) {
1471 ncookies = uio->uio_resid / 16 + 1;
1472 if (ncookies > 1024)
1473 ncookies = 1024;
1474 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1475 cookie_index = 0;
1476 } else {
1477 ncookies = -1;
1478 cookies = NULL;
1479 cookie_index = 0;
1480 }
1481
36f82b23
MD
1482 hammer_simple_transaction(&trans, ip->hmp);
1483
b3deaf57
MD
1484 /*
1485 * Handle artificial entries
4c286c36
MD
1486 *
1487 * It should be noted that the minimum value for a directory
1488 * hash key on-media is 0x0000000100000000, so we can use anything
1489 * less then that to represent our 'special' key space.
b3deaf57
MD
1490 */
1491 error = 0;
1492 if (saveoff == 0) {
1493 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1494 if (r)
1495 goto done;
1496 if (cookies)
1497 cookies[cookie_index] = saveoff;
1498 ++saveoff;
1499 ++cookie_index;
1500 if (cookie_index == ncookies)
1501 goto done;
1502 }
1503 if (saveoff == 1) {
1504 if (ip->ino_data.parent_obj_id) {
1505 r = vop_write_dirent(&error, uio,
1506 ip->ino_data.parent_obj_id,
1507 DT_DIR, 2, "..");
1508 } else {
1509 r = vop_write_dirent(&error, uio,
1510 ip->obj_id, DT_DIR, 2, "..");
1511 }
1512 if (r)
1513 goto done;
1514 if (cookies)
1515 cookies[cookie_index] = saveoff;
1516 ++saveoff;
1517 ++cookie_index;
1518 if (cookie_index == ncookies)
1519 goto done;
1520 }
6b4f890b
MD
1521
1522 /*
1523 * Key range (begin and end inclusive) to scan. Directory keys
1524 * directly translate to a 64 bit 'seek' position.
1525 */
bcac4bbb 1526 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
5a930e66 1527 cursor.key_beg.localization = ip->obj_localization +
beec5dc4 1528 hammer_dir_localization(ip);
6b4f890b 1529 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1530 cursor.key_beg.create_tid = 0;
6b4f890b
MD
1531 cursor.key_beg.delete_tid = 0;
1532 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1533 cursor.key_beg.obj_type = 0;
b3deaf57 1534 cursor.key_beg.key = saveoff;
6b4f890b
MD
1535
1536 cursor.key_end = cursor.key_beg;
1537 cursor.key_end.key = HAMMER_MAX_KEY;
d5530d22
MD
1538 cursor.asof = ip->obj_asof;
1539 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
6b4f890b 1540
4e17f465 1541 error = hammer_ip_first(&cursor);
6b4f890b
MD
1542
1543 while (error == 0) {
11ad5ade 1544 error = hammer_ip_resolve_data(&cursor);
6b4f890b
MD
1545 if (error)
1546 break;
11ad5ade 1547 base = &cursor.leaf->base;
6b4f890b 1548 saveoff = base->key;
11ad5ade 1549 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
6b4f890b 1550
7a04d74f
MD
1551 if (base->obj_id != ip->obj_id)
1552 panic("readdir: bad record at %p", cursor.node);
1553
ea434b6f
MD
1554 /*
1555 * Convert pseudo-filesystems into softlinks
1556 */
1557 dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
6b4f890b 1558 r = vop_write_dirent(
11ad5ade 1559 &error, uio, cursor.data->entry.obj_id,
ea434b6f 1560 dtype,
11ad5ade
MD
1561 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1562 (void *)cursor.data->entry.name);
6b4f890b
MD
1563 if (r)
1564 break;
1565 ++saveoff;
1566 if (cookies)
1567 cookies[cookie_index] = base->key;
1568 ++cookie_index;
1569 if (cookie_index == ncookies)
1570 break;
1571 error = hammer_ip_next(&cursor);
1572 }
1573 hammer_done_cursor(&cursor);
1574
b3deaf57 1575done:
b84de5af 1576 hammer_done_transaction(&trans);
36f82b23 1577
6b4f890b
MD
1578 if (ap->a_eofflag)
1579 *ap->a_eofflag = (error == ENOENT);
6b4f890b
MD
1580 uio->uio_offset = saveoff;
1581 if (error && cookie_index == 0) {
b3deaf57
MD
1582 if (error == ENOENT)
1583 error = 0;
6b4f890b
MD
1584 if (cookies) {
1585 kfree(cookies, M_TEMP);
1586 *ap->a_ncookies = 0;
1587 *ap->a_cookies = NULL;
1588 }
1589 } else {
7a04d74f
MD
1590 if (error == ENOENT)
1591 error = 0;
6b4f890b
MD
1592 if (cookies) {
1593 *ap->a_ncookies = cookie_index;
1594 *ap->a_cookies = cookies;
1595 }
1596 }
1597 return(error);
427e5fc6
MD
1598}
1599
66325755
MD
1600/*
1601 * hammer_vop_readlink { vp, uio, cred }
1602 */
427e5fc6
MD
1603static
1604int
66325755 1605hammer_vop_readlink(struct vop_readlink_args *ap)
427e5fc6 1606{
36f82b23 1607 struct hammer_transaction trans;
7a04d74f
MD
1608 struct hammer_cursor cursor;
1609 struct hammer_inode *ip;
ea434b6f
MD
1610 char buf[32];
1611 u_int32_t localization;
1612 hammer_pseudofs_inmem_t pfsm;
7a04d74f
MD
1613 int error;
1614
1615 ip = VTOI(ap->a_vp);
36f82b23 1616
2f85fa4d
MD
1617 /*
1618 * Shortcut if the symlink data was stuffed into ino_data.
ea434b6f 1619 *
842e7a70
MD
1620 * Also expand special "@@PFS%05d" softlinks (expansion only
1621 * occurs for non-historical (current) accesses made from the
1622 * primary filesystem).
2f85fa4d
MD
1623 */
1624 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
ea434b6f
MD
1625 char *ptr;
1626 int bytes;
1627
1628 ptr = ip->ino_data.ext.symlink;
1629 bytes = (int)ip->ino_data.size;
842e7a70
MD
1630 if (bytes == 10 &&
1631 ip->obj_asof == HAMMER_MAX_TID &&
1632 ip->obj_localization == 0 &&
1633 strncmp(ptr, "@@PFS", 5) == 0) {
ea434b6f
MD
1634 hammer_simple_transaction(&trans, ip->hmp);
1635 bcopy(ptr + 5, buf, 5);
1636 buf[5] = 0;
1637 localization = strtoul(buf, NULL, 10) << 16;
1638 pfsm = hammer_load_pseudofs(&trans, localization,
1639 &error);
1640 if (error == 0) {
4c038e17
MD
1641 if (pfsm->pfsd.mirror_flags &
1642 HAMMER_PFSD_SLAVE) {
cb3c760c 1643 /* vap->va_size == 26 */
4c038e17
MD
1644 ksnprintf(buf, sizeof(buf),
1645 "@@0x%016llx:%05d",
973c11b9 1646 (long long)pfsm->pfsd.sync_end_tid,
4c038e17
MD
1647 localization >> 16);
1648 } else {
cb3c760c
MD
1649 /* vap->va_size == 10 */
1650 ksnprintf(buf, sizeof(buf),
1651 "@@-1:%05d",
1652 localization >> 16);
1653#if 0
4c038e17
MD
1654 ksnprintf(buf, sizeof(buf),
1655 "@@0x%016llx:%05d",
973c11b9 1656 (long long)HAMMER_MAX_TID,
4c038e17 1657 localization >> 16);
cb3c760c 1658#endif
4c038e17 1659 }
ea434b6f
MD
1660 ptr = buf;
1661 bytes = strlen(buf);
1662 }
1663 if (pfsm)
1664 hammer_rel_pseudofs(trans.hmp, pfsm);
1665 hammer_done_transaction(&trans);
1666 }
1667 error = uiomove(ptr, bytes, ap->a_uio);
2f85fa4d
MD
1668 return(error);
1669 }
36f82b23 1670
2f85fa4d
MD
1671 /*
1672 * Long version
1673 */
1674 hammer_simple_transaction(&trans, ip->hmp);
ce0138a6 1675 ++hammer_stats_file_iopsr;
bcac4bbb 1676 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
7a04d74f
MD
1677
1678 /*
1679 * Key range (begin and end inclusive) to scan. Directory keys
1680 * directly translate to a 64 bit 'seek' position.
1681 */
5a930e66
MD
1682 cursor.key_beg.localization = ip->obj_localization +
1683 HAMMER_LOCALIZE_MISC;
7a04d74f 1684 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1685 cursor.key_beg.create_tid = 0;
7a04d74f
MD
1686 cursor.key_beg.delete_tid = 0;
1687 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1688 cursor.key_beg.obj_type = 0;
1689 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
d5530d22
MD
1690 cursor.asof = ip->obj_asof;
1691 cursor.flags |= HAMMER_CURSOR_ASOF;
7a04d74f 1692
45a014dc 1693 error = hammer_ip_lookup(&cursor);
7a04d74f
MD
1694 if (error == 0) {
1695 error = hammer_ip_resolve_data(&cursor);
1696 if (error == 0) {
11ad5ade
MD
1697 KKASSERT(cursor.leaf->data_len >=
1698 HAMMER_SYMLINK_NAME_OFF);
1699 error = uiomove(cursor.data->symlink.name,
1700 cursor.leaf->data_len -
1701 HAMMER_SYMLINK_NAME_OFF,
7a04d74f
MD
1702 ap->a_uio);
1703 }
1704 }
1705 hammer_done_cursor(&cursor);
b84de5af 1706 hammer_done_transaction(&trans);
7a04d74f 1707 return(error);
427e5fc6
MD
1708}
1709
66325755
MD
1710/*
1711 * hammer_vop_nremove { nch, dvp, cred }
1712 */
427e5fc6
MD
1713static
1714int
66325755 1715hammer_vop_nremove(struct vop_nremove_args *ap)
427e5fc6 1716{
b84de5af 1717 struct hammer_transaction trans;
e63644f0 1718 struct hammer_inode *dip;
b84de5af
MD
1719 int error;
1720
e63644f0
MD
1721 dip = VTOI(ap->a_dvp);
1722
1723 if (hammer_nohistory(dip) == 0 &&
93291532 1724 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
e63644f0
MD
1725 return (error);
1726 }
1727
1728 hammer_start_transaction(&trans, dip->hmp);
ce0138a6 1729 ++hammer_stats_file_iopsw;
d7e278bb 1730 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
b84de5af 1731 hammer_done_transaction(&trans);
fbb84158
MD
1732 if (error == 0)
1733 hammer_knote(ap->a_dvp, NOTE_WRITE);
b84de5af 1734 return (error);
427e5fc6
MD
1735}
1736
66325755
MD
1737/*
1738 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1739 */
427e5fc6
MD
1740static
1741int
66325755 1742hammer_vop_nrename(struct vop_nrename_args *ap)
427e5fc6 1743{
8cd0a023
MD
1744 struct hammer_transaction trans;
1745 struct namecache *fncp;
1746 struct namecache *tncp;
1747 struct hammer_inode *fdip;
1748 struct hammer_inode *tdip;
1749 struct hammer_inode *ip;
1750 struct hammer_cursor cursor;
8cd0a023 1751 int64_t namekey;
5e435c92 1752 u_int32_t max_iterations;
11ad5ade 1753 int nlen, error;
8cd0a023 1754
f437a2ab
MD
1755 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1756 return(EXDEV);
1757 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1758 return(EXDEV);
1759
8cd0a023
MD
1760 fdip = VTOI(ap->a_fdvp);
1761 tdip = VTOI(ap->a_tdvp);
1762 fncp = ap->a_fnch->ncp;
1763 tncp = ap->a_tnch->ncp;
b3deaf57
MD
1764 ip = VTOI(fncp->nc_vp);
1765 KKASSERT(ip != NULL);
d113fda1 1766
f437a2ab
MD
1767 if (fdip->obj_localization != tdip->obj_localization)
1768 return(EXDEV);
1769 if (fdip->obj_localization != ip->obj_localization)
1770 return(EXDEV);
1771
d113fda1
MD
1772 if (fdip->flags & HAMMER_INODE_RO)
1773 return (EROFS);
1774 if (tdip->flags & HAMMER_INODE_RO)
1775 return (EROFS);
1776 if (ip->flags & HAMMER_INODE_RO)
1777 return (EROFS);
93291532 1778 if ((error = hammer_checkspace(fdip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
e63644f0 1779 return (error);
d113fda1 1780
8cd0a023 1781 hammer_start_transaction(&trans, fdip->hmp);
ce0138a6 1782 ++hammer_stats_file_iopsw;
8cd0a023
MD
1783
1784 /*
b3deaf57
MD
1785 * Remove tncp from the target directory and then link ip as
1786 * tncp. XXX pass trans to dounlink
42c7d26b
MD
1787 *
1788 * Force the inode sync-time to match the transaction so it is
1789 * in-sync with the creation of the target directory entry.
8cd0a023 1790 */
d7e278bb
MD
1791 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1792 ap->a_cred, 0, -1);
42c7d26b 1793 if (error == 0 || error == ENOENT) {
5a930e66
MD
1794 error = hammer_ip_add_directory(&trans, tdip,
1795 tncp->nc_name, tncp->nc_nlen,
1796 ip);
42c7d26b
MD
1797 if (error == 0) {
1798 ip->ino_data.parent_obj_id = tdip->obj_id;
cc0758d0 1799 ip->ino_data.ctime = trans.time;
47637bff 1800 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
42c7d26b
MD
1801 }
1802 }
b3deaf57
MD
1803 if (error)
1804 goto failed; /* XXX */
8cd0a023
MD
1805
1806 /*
1807 * Locate the record in the originating directory and remove it.
1808 *
1809 * Calculate the namekey and setup the key range for the scan. This
1810 * works kinda like a chained hash table where the lower 32 bits
1811 * of the namekey synthesize the chain.
1812 *
1813 * The key range is inclusive of both key_beg and key_end.
1814 */
5e435c92
MD
1815 namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
1816 &max_iterations);
6a37e7e4 1817retry:
bcac4bbb 1818 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
5a930e66 1819 cursor.key_beg.localization = fdip->obj_localization +
beec5dc4 1820 hammer_dir_localization(fdip);
8cd0a023
MD
1821 cursor.key_beg.obj_id = fdip->obj_id;
1822 cursor.key_beg.key = namekey;
d5530d22 1823 cursor.key_beg.create_tid = 0;
8cd0a023
MD
1824 cursor.key_beg.delete_tid = 0;
1825 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1826 cursor.key_beg.obj_type = 0;
1827
1828 cursor.key_end = cursor.key_beg;
5e435c92 1829 cursor.key_end.key += max_iterations;
d5530d22
MD
1830 cursor.asof = fdip->obj_asof;
1831 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023
MD
1832
1833 /*
1834 * Scan all matching records (the chain), locate the one matching
a89aec1b 1835 * the requested path component.
8cd0a023
MD
1836 *
1837 * The hammer_ip_*() functions merge in-memory records with on-disk
1838 * records for the purposes of the search.
1839 */
4e17f465 1840 error = hammer_ip_first(&cursor);
a89aec1b 1841 while (error == 0) {
8cd0a023
MD
1842 if (hammer_ip_resolve_data(&cursor) != 0)
1843 break;
11ad5ade
MD
1844 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1845 KKASSERT(nlen > 0);
1846 if (fncp->nc_nlen == nlen &&
1847 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
8cd0a023
MD
1848 break;
1849 }
a89aec1b 1850 error = hammer_ip_next(&cursor);
8cd0a023 1851 }
8cd0a023
MD
1852
1853 /*
1854 * If all is ok we have to get the inode so we can adjust nlinks.
6a37e7e4
MD
1855 *
1856 * WARNING: hammer_ip_del_directory() may have to terminate the
1857 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1858 * twice.
8cd0a023 1859 */
9944ae54 1860 if (error == 0)
6a37e7e4 1861 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
b84de5af
MD
1862
1863 /*
1864 * XXX A deadlock here will break rename's atomicy for the purposes
1865 * of crash recovery.
1866 */
1867 if (error == EDEADLK) {
b84de5af 1868 hammer_done_cursor(&cursor);
b84de5af
MD
1869 goto retry;
1870 }
1871
1872 /*
1873 * Cleanup and tell the kernel that the rename succeeded.
1874 */
c0ade690 1875 hammer_done_cursor(&cursor);
fbb84158 1876 if (error == 0) {
6a37e7e4 1877 cache_rename(ap->a_fnch, ap->a_tnch);
fbb84158
MD
1878 hammer_knote(ap->a_fdvp, NOTE_WRITE);
1879 hammer_knote(ap->a_tdvp, NOTE_WRITE);
1880 if (ip->vp)
1881 hammer_knote(ip->vp, NOTE_RENAME);
1882 }
b84de5af 1883
b3deaf57 1884failed:
b84de5af 1885 hammer_done_transaction(&trans);
8cd0a023 1886 return (error);
427e5fc6
MD
1887}
1888
66325755
MD
1889/*
1890 * hammer_vop_nrmdir { nch, dvp, cred }
1891 */
427e5fc6
MD
1892static
1893int
66325755 1894hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
427e5fc6 1895{
b84de5af 1896 struct hammer_transaction trans;
e63644f0 1897 struct hammer_inode *dip;
b84de5af
MD
1898 int error;
1899
e63644f0
MD
1900 dip = VTOI(ap->a_dvp);
1901
1902 if (hammer_nohistory(dip) == 0 &&
93291532 1903 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
e63644f0
MD
1904 return (error);
1905 }
1906
1907 hammer_start_transaction(&trans, dip->hmp);
ce0138a6 1908 ++hammer_stats_file_iopsw;
d7e278bb 1909 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
b84de5af 1910 hammer_done_transaction(&trans);
fbb84158
MD
1911 if (error == 0)
1912 hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
b84de5af 1913 return (error);
427e5fc6
MD
1914}
1915
349433c9
MD
1916/*
1917 * hammer_vop_markatime { vp, cred }
1918 */
1919static
1920int
1921hammer_vop_markatime(struct vop_markatime_args *ap)
1922{
1923 struct hammer_transaction trans;
1924 struct hammer_inode *ip;
1925
1926 ip = VTOI(ap->a_vp);
1927 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1928 return (EROFS);
1929 if (ip->flags & HAMMER_INODE_RO)
1930 return (EROFS);
1931 if (ip->hmp->mp->mnt_flag & MNT_NOATIME)
1932 return (0);
1933 hammer_start_transaction(&trans, ip->hmp);
1934 ++hammer_stats_file_iopsw;
1935
1936 ip->ino_data.atime = trans.time;
1937 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
1938 hammer_done_transaction(&trans);
1939 hammer_knote(ap->a_vp, NOTE_ATTRIB);
1940 return (0);
1941}
1942
66325755
MD
1943/*
1944 * hammer_vop_setattr { vp, vap, cred }
1945 */
427e5fc6
MD
1946static
1947int
66325755 1948hammer_vop_setattr(struct vop_setattr_args *ap)
427e5fc6 1949{
8cd0a023
MD
1950 struct hammer_transaction trans;
1951 struct vattr *vap;
1952 struct hammer_inode *ip;
1953 int modflags;
1954 int error;
d5ef456e 1955 int truncating;
4a2796f3 1956 int blksize;
fbb84158 1957 int kflags;
4a2796f3 1958 int64_t aligned_size;
8cd0a023 1959 u_int32_t flags;
8cd0a023
MD
1960
1961 vap = ap->a_vap;
1962 ip = ap->a_vp->v_data;
1963 modflags = 0;
fbb84158 1964 kflags = 0;
8cd0a023
MD
1965
1966 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1967 return(EROFS);
d113fda1
MD
1968 if (ip->flags & HAMMER_INODE_RO)
1969 return (EROFS);
e63644f0 1970 if (hammer_nohistory(ip) == 0 &&
93291532 1971 (error = hammer_checkspace(ip->hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
e63644f0
MD
1972 return (error);
1973 }
8cd0a023
MD
1974
1975 hammer_start_transaction(&trans, ip->hmp);
ce0138a6 1976 ++hammer_stats_file_iopsw;
8cd0a023
MD
1977 error = 0;
1978
1979 if (vap->va_flags != VNOVAL) {
1980 flags = ip->ino_data.uflags;
1981 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1982 hammer_to_unix_xid(&ip->ino_data.uid),
1983 ap->a_cred);
1984 if (error == 0) {
1985 if (ip->ino_data.uflags != flags) {
1986 ip->ino_data.uflags = flags;
cc0758d0 1987 ip->ino_data.ctime = trans.time;
8cd0a023 1988 modflags |= HAMMER_INODE_DDIRTY;
fbb84158 1989 kflags |= NOTE_ATTRIB;
8cd0a023
MD
1990 }
1991 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1992 error = 0;
1993 goto done;
1994 }
1995 }
1996 goto done;
1997 }
1998 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1999 error = EPERM;
2000 goto done;
2001 }
7538695e
MD
2002 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2003 mode_t cur_mode = ip->ino_data.mode;
2004 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2005 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2006 uuid_t uuid_uid;
2007 uuid_t uuid_gid;
2008
2009 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2010 ap->a_cred,
2011 &cur_uid, &cur_gid, &cur_mode);
2012 if (error == 0) {
2013 hammer_guid_to_uuid(&uuid_uid, cur_uid);
2014 hammer_guid_to_uuid(&uuid_gid, cur_gid);
2015 if (bcmp(&uuid_uid, &ip->ino_data.uid,
2016 sizeof(uuid_uid)) ||
2017 bcmp(&uuid_gid, &ip->ino_data.gid,
2018 sizeof(uuid_gid)) ||
2019 ip->ino_data.mode != cur_mode
2020 ) {
2021 ip->ino_data.uid = uuid_uid;
2022 ip->ino_data.gid = uuid_gid;
2023 ip->ino_data.mode = cur_mode;
cc0758d0
MD
2024 ip->ino_data.ctime = trans.time;
2025 modflags |= HAMMER_INODE_DDIRTY;
7538695e 2026 }
fbb84158 2027 kflags |= NOTE_ATTRIB;
8cd0a023
MD
2028 }
2029 }
11ad5ade 2030 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
8cd0a023
MD
2031 switch(ap->a_vp->v_type) {
2032 case VREG:
11ad5ade 2033 if (vap->va_size == ip->ino_data.size)
d5ef456e 2034 break;
b84de5af
MD
2035 /*
2036 * XXX break atomicy, we can deadlock the backend
2037 * if we do not release the lock. Probably not a
2038 * big deal here.
2039 */
4a2796f3 2040 blksize = hammer_blocksize(vap->va_size);
11ad5ade 2041 if (vap->va_size < ip->ino_data.size) {
4a2796f3 2042 vtruncbuf(ap->a_vp, vap->va_size, blksize);
d5ef456e 2043 truncating = 1;
fbb84158 2044 kflags |= NOTE_WRITE;
d5ef456e 2045 } else {
c0ade690 2046 vnode_pager_setsize(ap->a_vp, vap->va_size);
d5ef456e 2047 truncating = 0;
fbb84158 2048 kflags |= NOTE_WRITE | NOTE_EXTEND;
c0ade690 2049 }
11ad5ade 2050 ip->ino_data.size = vap->va_size;
cc0758d0
MD
2051 ip->ino_data.mtime = trans.time;
2052 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
d5ef456e 2053
b84de5af
MD
2054 /*
2055 * on-media truncation is cached in the inode until
2056 * the inode is synchronized.
2057 */
d5ef456e 2058 if (truncating) {
47637bff 2059 hammer_ip_frontend_trunc(ip, vap->va_size);
0832c9bb
MD
2060#ifdef DEBUG_TRUNCATE
2061 if (HammerTruncIp == NULL)
2062 HammerTruncIp = ip;
2063#endif
b84de5af
MD
2064 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2065 ip->flags |= HAMMER_INODE_TRUNCATED;
2066 ip->trunc_off = vap->va_size;
0832c9bb
MD
2067#ifdef DEBUG_TRUNCATE
2068 if (ip == HammerTruncIp)
973c11b9
MD
2069 kprintf("truncate1 %016llx\n",
2070 (long long)ip->trunc_off);
0832c9bb 2071#endif
b84de5af
MD
2072 } else if (ip->trunc_off > vap->va_size) {
2073 ip->trunc_off = vap->va_size;
0832c9bb
MD
2074#ifdef DEBUG_TRUNCATE
2075 if (ip == HammerTruncIp)
973c11b9
MD
2076 kprintf("truncate2 %016llx\n",
2077 (long long)ip->trunc_off);
0832c9bb
MD
2078#endif
2079 } else {
2080#ifdef DEBUG_TRUNCATE
2081 if (ip == HammerTruncIp)
973c11b9
MD
2082 kprintf("truncate3 %016llx (ignored)\n",
2083 (long long)vap->va_size);
0832c9bb 2084#endif
b84de5af 2085 }
d5ef456e 2086 }
b84de5af 2087
d5ef456e
MD
2088 /*
2089 * If truncating we have to clean out a portion of
b84de5af
MD
2090 * the last block on-disk. We do this in the
2091 * front-end buffer cache.
d5ef456e 2092 */
4a2796f3
MD
2093 aligned_size = (vap->va_size + (blksize - 1)) &
2094 ~(int64_t)(blksize - 1);
b84de5af 2095 if (truncating && vap->va_size < aligned_size) {
d5ef456e
MD
2096 struct buf *bp;
2097 int offset;
2098
4a2796f3 2099 aligned_size -= blksize;
47637bff 2100
4a2796f3 2101 offset = (int)vap->va_size & (blksize - 1);
47637bff 2102 error = bread(ap->a_vp, aligned_size,
4a2796f3 2103 blksize, &bp);
47637bff 2104 hammer_ip_frontend_trunc(ip, aligned_size);
d5ef456e
MD
2105 if (error == 0) {
2106 bzero(bp->b_data + offset,
4a2796f3 2107 blksize - offset);
1b0ab2c3
MD
2108 /* must de-cache direct-io offset */
2109 bp->b_bio2.bio_offset = NOOFFSET;
d5ef456e
MD
2110 bdwrite(bp);
2111 } else {
47637bff 2112 kprintf("ERROR %d\n", error);
d5ef456e
MD
2113 brelse(bp);
2114 }
2115 }
76376933 2116 break;
8cd0a023 2117 case VDATABASE:
b84de5af
MD
2118 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2119 ip->flags |= HAMMER_INODE_TRUNCATED;
2120 ip->trunc_off = vap->va_size;
2121 } else if (ip->trunc_off > vap->va_size) {
2122 ip->trunc_off = vap->va_size;
2123 }
47637bff 2124 hammer_ip_frontend_trunc(ip, vap->va_size);
11ad5ade 2125 ip->ino_data.size = vap->va_size;
cc0758d0
MD
2126 ip->ino_data.mtime = trans.time;
2127 modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
fbb84158 2128 kflags |= NOTE_ATTRIB;
8cd0a023
MD
2129 break;
2130 default:
2131 error = EINVAL;
2132 goto done;
2133 }
d26d0ae9 2134 break;
8cd0a023
MD
2135 }
2136 if (vap->va_atime.tv_sec != VNOVAL) {
cc0758d0 2137 ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
ddfdf542 2138 modflags |= HAMMER_INODE_ATIME;
fbb84158 2139 kflags |= NOTE_ATTRIB;
8cd0a023
MD
2140 }
2141 if (vap->va_mtime.tv_sec != VNOVAL) {
cc0758d0 2142 ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
ddfdf542 2143 modflags |= HAMMER_INODE_MTIME;
fbb84158 2144 kflags |= NOTE_ATTRIB;
8cd0a023
MD
2145 }
2146 if (vap->va_mode != (mode_t)VNOVAL) {
7538695e
MD
2147 mode_t cur_mode = ip->ino_data.mode;
2148 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2149 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2150
2151 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2152 cur_uid, cur_gid, &cur_mode);
2153 if (error == 0 && ip->ino_data.mode != cur_mode) {
2154 ip->ino_data.mode = cur_mode;
cc0758d0 2155 ip->ino_data.ctime = trans.time;
8cd0a023 2156 modflags |= HAMMER_INODE_DDIRTY;
fbb84158 2157 kflags |= NOTE_ATTRIB;
8cd0a023
MD
2158 }
2159 }
2160done:
b84de5af 2161 if (error == 0)
47637bff 2162 hammer_modify_inode(ip, modflags);
b84de5af 2163 hammer_done_transaction(&trans);
fbb84158 2164 hammer_knote(ap->a_vp, kflags);
8cd0a023 2165 return (error);
427e5fc6
MD
2166}
2167
66325755
MD
2168/*
2169 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2170 */
427e5fc6
MD
2171static
2172int
66325755 2173hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
427e5fc6 2174{
7a04d74f
MD
2175 struct hammer_transaction trans;
2176 struct hammer_inode *dip;
2177 struct hammer_inode *nip;
2178 struct nchandle *nch;
2179 hammer_record_t record;
2180 int error;
2181 int bytes;
2182
2183 ap->a_vap->va_type = VLNK;
2184
2185 nch = ap->a_nch;
2186 dip = VTOI(ap->a_dvp);
2187
d113fda1
MD
2188 if (dip->flags & HAMMER_INODE_RO)
2189 return (EROFS);
93291532 2190 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0)
e63644f0 2191 return (error);
d113fda1 2192
7a04d74f
MD
2193 /*
2194 * Create a transaction to cover the operations we perform.
2195 */
2196 hammer_start_transaction(&trans, dip->hmp);
ce0138a6 2197 ++hammer_stats_file_iopsw;
7a04d74f
MD
2198
2199 /*
2200 * Create a new filesystem object of the requested type. The
2201 * returned inode will be referenced but not locked.
2202 */
2203
5a930e66 2204 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
5a64efa1
MD
2205 dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2206 NULL, &nip);
7a04d74f 2207 if (error) {
b84de5af 2208 hammer_done_transaction(&trans);
7a04d74f
MD
2209 *ap->a_vpp = NULL;
2210 return (error);
2211 }
2212
7a04d74f
MD
2213 /*
2214 * Add a record representing the symlink. symlink stores the link
2215 * as pure data, not a string, and is no \0 terminated.
2216 */
2217 if (error == 0) {
7a04d74f
MD
2218 bytes = strlen(ap->a_target);
2219
2f85fa4d
MD
2220 if (bytes <= HAMMER_INODE_BASESYMLEN) {
2221 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2222 } else {
2223 record = hammer_alloc_mem_record(nip, bytes);
2224 record->type = HAMMER_MEM_RECORD_GENERAL;
2225
5a930e66
MD
2226 record->leaf.base.localization = nip->obj_localization +
2227 HAMMER_LOCALIZE_MISC;
2f85fa4d
MD
2228 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2229 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2230 record->leaf.data_len = bytes;
2231 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2232 bcopy(ap->a_target, record->data->symlink.name, bytes);
2233 error = hammer_ip_add_record(&trans, record);
2234 }
42c7d26b
MD
2235
2236 /*
2237 * Set the file size to the length of the link.
2238 */
2239 if (error == 0) {
11ad5ade 2240 nip->ino_data.size = bytes;
47637bff 2241 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
42c7d26b 2242 }
7a04d74f 2243 }
1f07f686 2244 if (error == 0)
5a930e66
MD
2245 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2246 nch->ncp->nc_nlen, nip);
7a04d74f
MD
2247
2248 /*
2249 * Finish up.
2250 */
2251 if (error) {
2252 hammer_rel_inode(nip, 0);
7a04d74f
MD
2253 *ap->a_vpp = NULL;
2254 } else {
e8599db1 2255 error = hammer_get_vnode(nip, ap->a_vpp);
7a04d74f
MD
2256 hammer_rel_inode(nip, 0);
2257 if (error == 0) {
2258 cache_setunresolved(ap->a_nch);
2259 cache_setvp(ap->a_nch, *ap->a_vpp);
fbb84158 2260 hammer_knote(ap->a_dvp, NOTE_WRITE);
7a04d74f
MD
2261 }
2262 }
b84de5af 2263 hammer_done_transaction(&trans);
7a04d74f 2264 return (error);
427e5fc6
MD
2265}
2266
66325755
MD
2267/*
2268 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2269 */
427e5fc6
MD
2270static
2271int
66325755 2272hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
427e5fc6 2273{
b84de5af 2274 struct hammer_transaction trans;
e63644f0 2275 struct hammer_inode *dip;
b84de5af
MD
2276 int error;
2277
e63644f0
MD
2278 dip = VTOI(ap->a_dvp);
2279
2280 if (hammer_nohistory(dip) == 0 &&
93291532 2281 (error = hammer_checkspace(dip->hmp, HAMMER_CHKSPC_CREATE)) != 0) {
e63644f0
MD
2282 return (error);
2283 }
2284
2285 hammer_start_transaction(&trans, dip->hmp);
ce0138a6 2286 ++hammer_stats_file_iopsw;
b84de5af 2287 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
d7e278bb 2288 ap->a_cred, ap->a_flags, -1);
b84de5af
MD
2289 hammer_done_transaction(&trans);
2290
2291 return (error);
427e5fc6
MD
2292}
2293
7dc57964
MD
2294/*
2295 * hammer_vop_ioctl { vp, command, data, fflag, cred }
2296 */
2297static
2298int
2299hammer_vop_ioctl(struct vop_ioctl_args *ap)
2300{
2301 struct hammer_inode *ip = ap->a_vp->v_data;
2302
ce0138a6 2303 ++hammer_stats_file_iopsr;
7dc57964
MD
2304 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
2305 ap->a_fflag, ap->a_cred));
2306}
2307
513ca7d7
MD
2308static
2309int
2310hammer_vop_mountctl(struct vop_mountctl_args *ap)
2311{
dad088a5
MD
2312 static const struct mountctl_opt extraopt[] = {
2313 { HMNT_NOHISTORY, "nohistory" },
2314 { HMNT_MASTERID, "master" },
2315 { 0, NULL}
2316
2317 };
2318 struct hammer_mount *hmp;
513ca7d7 2319 struct mount *mp;
dad088a5 2320 int usedbytes;
513ca7d7
MD
2321 int error;
2322
dad088a5
MD
2323 error = 0;
2324 usedbytes = 0;
513ca7d7 2325 mp = ap->a_head.a_ops->head.vv_mount;
dad088a5
MD
2326 KKASSERT(mp->mnt_data != NULL);
2327 hmp = (struct hammer_mount *)mp->mnt_data;
513ca7d7
MD
2328
2329 switch(ap->a_op) {
dad088a5 2330
513ca7d7
MD
2331 case MOUNTCTL_SET_EXPORT:
2332 if (ap->a_ctllen != sizeof(struct export_args))
2333 error = EINVAL;
b424ca30
MD
2334 else
2335 error = hammer_vfs_export(mp, ap->a_op,
513ca7d7
MD
2336 (const struct export_args *)ap->a_ctl);
2337 break;
dad088a5
MD
2338 case MOUNTCTL_MOUNTFLAGS:
2339 {
2340 /*
2341 * Call standard mountctl VOP function
2342 * so we get user mount flags.
2343 */
2344 error = vop_stdmountctl(ap);
2345 if (error)
2346 break;
2347
2348 usedbytes = *ap->a_res;
2349
eac446c5 2350 if (usedbytes > 0 && usedbytes < ap->a_buflen) {
dad088a5
MD
2351 usedbytes += vfs_flagstostr(hmp->hflags, extraopt, ap->a_buf,
2352 ap->a_buflen - usedbytes,
2353 &error);
dad088a5
MD
2354 }
2355
2356 *ap->a_res += usedbytes;
2357 break;
2358 }
513ca7d7 2359 default:
726e0641 2360 error = vop_stdmountctl(ap);
513ca7d7
MD
2361 break;
2362 }
2363 return(error);
2364}
2365
66325755
MD
2366/*
2367 * hammer_vop_strategy { vp, bio }
8cd0a023
MD
2368 *
2369 * Strategy call, used for regular file read & write only. Note that the
2370 * bp may represent a cluster.
2371 *
2372 * To simplify operation and allow better optimizations in the future,
2373 * this code does not make any assumptions with regards to buffer alignment
2374 * or size.
66325755 2375 */
427e5fc6
MD
2376static
2377int
66325755 2378hammer_vop_strategy(struct vop_strategy_args *ap)
427e5fc6 2379{
8cd0a023
MD
2380 struct buf *bp;
2381 int error;
2382
2383 bp = ap->a_bio->bio_buf;
2384
2385 switch(bp->b_cmd) {
2386 case BUF_CMD_READ:
2387 error = hammer_vop_strategy_read(ap);
2388 break;
2389 case BUF_CMD_WRITE:
2390 error = hammer_vop_strategy_write(ap);
2391 break;
2392 default:
059819e3
MD
2393 bp->b_error = error = EINVAL;
2394 bp->b_flags |= B_ERROR;
2395 biodone(ap->a_bio);
8cd0a023
MD
2396 break;
2397 }
8cd0a023 2398 return (error);
427e5fc6
MD
2399}
2400
8cd0a023
MD
2401/*
2402 * Read from a regular file. Iterate the related records and fill in the
2403 * BIO/BUF. Gaps are zero-filled.
2404 *
2405 * The support code in hammer_object.c should be used to deal with mixed
2406 * in-memory and on-disk records.
2407 *
4a2796f3
MD
2408 * NOTE: Can be called from the cluster code with an oversized buf.
2409 *
8cd0a023
MD
2410 * XXX atime update
2411 */
2412static
2413int
2414hammer_vop_strategy_read(struct vop_strategy_args *ap)
2415{
36f82b23
MD
2416 struct hammer_transaction trans;
2417 struct hammer_inode *ip;
39d8fd63 2418 struct hammer_inode *dip;
8cd0a023 2419 struct hammer_cursor cursor;
8cd0a023 2420 hammer_base_elm_t base;
4a2796f3 2421 hammer_off_t disk_offset;
8cd0a023 2422 struct bio *bio;
a99b9ea2 2423 struct bio *nbio;
8cd0a023
MD
2424 struct buf *bp;
2425 int64_t rec_offset;
a89aec1b 2426 int64_t ran_end;
195c19a1 2427 int64_t tmp64;
8cd0a023
MD
2428 int error;
2429 int boff;
2430 int roff;
2431 int n;
2432
2433 bio = ap->a_bio;
2434 bp = bio->bio_buf;
36f82b23 2435 ip = ap->a_vp->v_data;
8cd0a023 2436
a99b9ea2
MD
2437 /*
2438 * The zone-2 disk offset may have been set by the cluster code via
4a2796f3 2439 * a BMAP operation, or else should be NOOFFSET.
a99b9ea2 2440 *
4a2796f3 2441 * Checking the high bits for a match against zone-2 should suffice.
a99b9ea2
MD
2442 */
2443 nbio = push_bio(bio);
6aeaa7bd 2444 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
1b0ab2c3
MD
2445 HAMMER_ZONE_LARGE_DATA) {
2446 error = hammer_io_direct_read(ip->hmp, nbio, NULL);
a99b9ea2
MD
2447 return (error);
2448 }
2449
2450 /*
4a2796f3
MD
2451 * Well, that sucked. Do it the hard way. If all the stars are
2452 * aligned we may still be able to issue a direct-read.
a99b9ea2 2453 */
36f82b23 2454 hammer_simple_transaction(&trans, ip->hmp);
47637bff 2455 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
8cd0a023
MD
2456
2457 /*
2458 * Key range (begin and end inclusive) to scan. Note that the key's
c0ade690
MD
2459 * stored in the actual records represent BASE+LEN, not BASE. The
2460 * first record containing bio_offset will have a key > bio_offset.
8cd0a023 2461 */
5a930e66
MD
2462 cursor.key_beg.localization = ip->obj_localization +
2463 HAMMER_LOCALIZE_MISC;
8cd0a023 2464 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 2465 cursor.key_beg.create_tid = 0;
8cd0a023 2466 cursor.key_beg.delete_tid = 0;
8cd0a023 2467 cursor.key_beg.obj_type = 0;
c0ade690 2468 cursor.key_beg.key = bio->bio_offset + 1;
d5530d22 2469 cursor.asof = ip->obj_asof;
bf3b416b 2470 cursor.flags |= HAMMER_CURSOR_ASOF;
8cd0a023
MD
2471
2472 cursor.key_end = cursor.key_beg;
11ad5ade 2473 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
b84de5af 2474#if 0
11ad5ade 2475 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
a89aec1b
MD
2476 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2477 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2478 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
b84de5af
MD
2479 } else
2480#endif
2481 {
c0ade690 2482 ran_end = bio->bio_offset + bp->b_bufsize;
a89aec1b
MD
2483 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2484 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
195c19a1
MD
2485 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2486 if (tmp64 < ran_end)
a89aec1b
MD
2487 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2488 else
7f7c1f84 2489 cursor.key_end.key = ran_end + MAXPHYS + 1;
a89aec1b 2490 }
d26d0ae9 2491 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
8cd0a023 2492
4e17f465 2493 error = hammer_ip_first(&cursor);
8cd0a023
MD
2494 boff = 0;
2495
a89aec1b 2496 while (error == 0) {
47637bff
MD
2497 /*
2498 * Get the base file offset of the record. The key for
2499 * data records is (base + bytes) rather then (base).
2500 */
11ad5ade 2501 base = &cursor.leaf->base;
11ad5ade 2502 rec_offset = base->key - cursor.leaf->data_len;
8cd0a023 2503
66325755 2504 /*
a89aec1b 2505 * Calculate the gap, if any, and zero-fill it.
1fef775e
MD
2506 *
2507 * n is the offset of the start of the record verses our
2508 * current seek offset in the bio.
66325755 2509 */
8cd0a023
MD
2510 n = (int)(rec_offset - (bio->bio_offset + boff));
2511 if (n > 0) {
a89aec1b
MD
2512 if (n > bp->b_bufsize - boff)
2513 n = bp->b_bufsize - boff;
8cd0a023
MD
2514 bzero((char *)bp->b_data + boff, n);
2515 boff += n;
2516 n = 0;
66325755 2517 }
8cd0a023
MD
2518
2519 /*
2520 * Calculate the data offset in the record and the number
2521 * of bytes we can copy.
a89aec1b 2522 *
1fef775e
MD
2523 * There are two degenerate cases. First, boff may already
2524 * be at bp->b_bufsize. Secondly, the data offset within
2525 * the record may exceed the record's size.
8cd0a023
MD
2526 */
2527 roff = -n;
b84de5af 2528 rec_offset += roff;
11ad5ade 2529 n = cursor.leaf->data_len - roff;
1fef775e
MD
2530 if (n <= 0) {
2531 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2532 n = 0;
2533 } else if (n > bp->b_bufsize - boff) {
8cd0a023 2534 n = bp->b_bufsize - boff;
1fef775e 2535 }
059819e3 2536
b84de5af 2537 /*
47637bff
MD
2538 * Deal with cached truncations. This cool bit of code
2539 * allows truncate()/ftruncate() to avoid having to sync
2540 * the file.
2541 *
2542 * If the frontend is truncated then all backend records are
2543 * subject to the frontend's truncation.
2544 *
2545 * If the backend is truncated then backend records on-disk
2546 * (but not in-memory) are subject to the backend's
2547 * truncation. In-memory records owned by the backend
2548 * represent data written after the truncation point on the
2549 * backend and must not be truncated.
2550 *
2551 * Truncate operations deal with frontend buffer cache
2552 * buffers and frontend-owned in-memory records synchronously.
b84de5af 2553 */
47637bff
MD
2554 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2555 if (hammer_cursor_ondisk(&cursor) ||
2556 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2557 if (ip->trunc_off <= rec_offset)
2558 n = 0;
2559 else if (ip->trunc_off < rec_offset + n)
2560 n = (int)(ip->trunc_off - rec_offset);
2561 }
2562 }
2563 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2564 if (hammer_cursor_ondisk(&cursor)) {
2565 if (ip->sync_trunc_off <= rec_offset)
2566 n = 0;
2567 else if (ip->sync_trunc_off < rec_offset + n)
2568 n = (int)(ip->sync_trunc_off - rec_offset);
2569 }
2570 }
b84de5af
MD
2571
2572 /*
47637bff
MD
2573 * Try to issue a direct read into our bio if possible,
2574 * otherwise resolve the element data into a hammer_buffer
2575 * and copy.
4a2796f3
MD
2576 *
2577 * The buffer on-disk should be zerod past any real
2578 * truncation point, but may not be for any synthesized
2579 * truncation point from above.
b84de5af 2580 */
1b0ab2c3 2581 disk_offset = cursor.leaf->data_offset + roff;
4a2796f3 2582 if (boff == 0 && n == bp->b_bufsize &&
1b0ab2c3
MD
2583 hammer_cursor_ondisk(&cursor) &&
2584 (disk_offset & HAMMER_BUFMASK) == 0) {
2585 KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2586 HAMMER_ZONE_LARGE_DATA);
4a2796f3 2587 nbio->bio_offset = disk_offset;
1b0ab2c3
MD
2588 error = hammer_io_direct_read(trans.hmp, nbio,
2589 cursor.leaf);
47637bff
MD
2590 goto done;
2591 } else if (n) {
2592 error = hammer_ip_resolve_data(&cursor);
2593 if (error == 0) {
2594 bcopy((char *)cursor.data + roff,
2595 (char *)bp->b_data + boff, n);
2596 }
b84de5af 2597 }
47637bff
MD
2598 if (error)
2599 break;
2600
2601 /*
2602 * Iterate until we have filled the request.
2603 */
2604 boff += n;
8cd0a023 2605 if (boff == bp->b_bufsize)
66325755 2606 break;
a89aec1b 2607 error = hammer_ip_next(&cursor);
66325755
MD
2608 }
2609
2610 /*
8cd0a023 2611 * There may have been a gap after the last record
66325755 2612 */
8cd0a023
MD
2613 if (error == ENOENT)
2614 error = 0;
2615 if (error == 0 && boff != bp->b_bufsize) {
7f7c1f84 2616 KKASSERT(boff < bp->b_bufsize);
8cd0a023
MD
2617 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2618 /* boff = bp->b_bufsize; */
2619 }
2620 bp->b_resid = 0;
059819e3
MD
2621 bp->b_error = error;
2622 if (error)
2623 bp->b_flags |= B_ERROR;
2624 biodone(ap->a_bio);
47637bff
MD
2625
2626done:
39d8fd63
MD
2627 /*
2628 * Cache the b-tree node for the last data read in cache[1].
2629 *
2630 * If we hit the file EOF then also cache the node in the
2631 * governing director's cache[3], it will be used to initialize
2632 * the inode's cache[1] for any inodes looked up via the directory.
2633 *
2634 * This doesn't reduce disk accesses since the B-Tree chain is
2635 * likely cached, but it does reduce cpu overhead when looking
2636 * up file offsets for cpdup/tar/cpio style iterations.
2637 */
47637bff 2638 if (cursor.node)
bcac4bbb 2639 hammer_cache_node(&ip->cache[1], cursor.node);
39d8fd63
MD
2640 if (ran_end >= ip->ino_data.size) {
2641 dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2642 ip->obj_asof, ip->obj_localization);
2643 if (dip) {
2644 hammer_cache_node(&dip->cache[3], cursor.node);
2645 hammer_rel_inode(dip, 0);
2646 }
2647 }
47637bff
MD
2648 hammer_done_cursor(&cursor);
2649 hammer_done_transaction(&trans);
8cd0a023
MD
2650 return(error);
2651}
2652
a99b9ea2
MD
2653/*
2654 * BMAP operation - used to support cluster_read() only.
2655 *
2656 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2657 *
2658 * This routine may return EOPNOTSUPP if the opration is not supported for
2659 * the specified offset. The contents of the pointer arguments do not
2660 * need to be initialized in that case.
2661 *
2662 * If a disk address is available and properly aligned return 0 with
2663 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2664 * to the run-length relative to that offset. Callers may assume that
2665 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2666 * large, so return EOPNOTSUPP if it is not sufficiently large.
2667 */
2668static
2669int
2670hammer_vop_bmap(struct vop_bmap_args *ap)
2671{
2672 struct hammer_transaction trans;
2673 struct hammer_inode *ip;
2674 struct hammer_cursor cursor;
2675 hammer_base_elm_t base;
2676 int64_t rec_offset;
2677 int64_t ran_end;
2678 int64_t tmp64;
2679 int64_t base_offset;
2680 int64_t base_disk_offset;
2681 int64_t last_offset;
2682 hammer_off_t last_disk_offset;
2683 hammer_off_t disk_offset;
2684 int rec_len;
2685 int error;
4a2796f3 2686 int blksize;
a99b9ea2 2687
ce0138a6 2688 ++hammer_stats_file_iopsr;
a99b9ea2
MD
2689 ip = ap->a_vp->v_data;
2690
2691 /*
2692 * We can only BMAP regular files. We can't BMAP database files,
2693 * directories, etc.
2694 */
2695 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2696 return(EOPNOTSUPP);
2697
2698 /*
2699 * bmap is typically called with runp/runb both NULL when used
2700 * for writing. We do not support BMAP for writing atm.
2701 */
4a2796f3 2702 if (ap->a_cmd != BUF_CMD_READ)
a99b9ea2
MD
2703 return(EOPNOTSUPP);
2704
2705 /*
2706 * Scan the B-Tree to acquire blockmap addresses, then translate
2707 * to raw addresses.
2708 */
2709 hammer_simple_transaction(&trans, ip->hmp);
cb51be26 2710#if 0
973c11b9
MD
2711 kprintf("bmap_beg %016llx ip->cache %p\n",
2712 (long long)ap->a_loffset, ip->cache[1]);
cb51be26 2713#endif
a99b9ea2
MD
2714 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2715
2716 /*
2717 * Key range (begin and end inclusive) to scan. Note that the key's
2718 * stored in the actual records represent BASE+LEN, not BASE. The
2719 * first record containing bio_offset will have a key > bio_offset.
2720 */
5a930e66
MD
2721 cursor.key_beg.localization = ip->obj_localization +
2722 HAMMER_LOCALIZE_MISC;
a99b9ea2
MD
2723 cursor.key_beg.obj_id = ip->obj_id;
2724 cursor.key_beg.create_tid = 0;
2725 cursor.key_beg.delete_tid = 0;
2726 cursor.key_beg.obj_type = 0;
2727 if (ap->a_runb)
2728 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2729 else
2730 cursor.key_beg.key = ap->a_loffset + 1;
2731 if (cursor.key_beg.key < 0)
2732 cursor.key_beg.key = 0;
2733 cursor.asof = ip->obj_asof;
bf3b416b 2734 cursor.flags |= HAMMER_CURSOR_ASOF;
a99b9ea2
MD
2735
2736 cursor.key_end = cursor.key_beg;
2737 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2738
2739 ran_end = ap->a_loffset + MAXPHYS;
2740 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2741 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2742 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2743 if (tmp64 < ran_end)
2744 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2745 else
2746 cursor.key_end.key = ran_end + MAXPHYS + 1;
2747
2748 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2749
2750 error = hammer_ip_first(&cursor);
2751 base_offset = last_offset = 0;
2752 base_disk_offset = last_disk_offset = 0;
2753
2754 while (error == 0) {
2755 /*
2756 * Get the base file offset of the record. The key for
2757 * data records is (base + bytes) rather then (base).
4a2796f3
MD
2758 *
2759 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2760 * The extra bytes should be zero on-disk and the BMAP op
2761 * should still be ok.
a99b9ea2
MD
2762 */
2763 base = &cursor.leaf->base;
2764 rec_offset = base->key - cursor.leaf->data_len;
2765 rec_len = cursor.leaf->data_len;
2766
2767 /*
4a2796f3
MD
2768 * Incorporate any cached truncation.
2769 *
2770 * NOTE: Modifications to rec_len based on synthesized
2771 * truncation points remove the guarantee that any extended
2772 * data on disk is zero (since the truncations may not have
2773 * taken place on-media yet).
a99b9ea2
MD
2774 */
2775 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2776 if (hammer_cursor_ondisk(&cursor) ||
2777 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2778 if (ip->trunc_off <= rec_offset)
2779 rec_len = 0;
2780 else if (ip->trunc_off < rec_offset + rec_len)
2781 rec_len = (int)(ip->trunc_off - rec_offset);
2782 }
2783 }
2784 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2785 if (hammer_cursor_ondisk(&cursor)) {
2786 if (ip->sync_trunc_off <= rec_offset)
2787 rec_len = 0;
2788 else if (ip->sync_trunc_off < rec_offset + rec_len)
2789 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2790 }
2791 }
2792
2793 /*
2794 * Accumulate information. If we have hit a discontiguous
2795 * block reset base_offset unless we are already beyond the
2796 * requested offset. If we are, that's it, we stop.
2797 */
a99b9ea2
MD
2798 if (error)
2799 break;
1b0ab2c3
MD
2800 if (hammer_cursor_ondisk(&cursor)) {
2801 disk_offset = cursor.leaf->data_offset;
2802 if (rec_offset != last_offset ||
2803 disk_offset != last_disk_offset) {
2804 if (rec_offset > ap->a_loffset)
2805 break;
2806 base_offset = rec_offset;
2807 base_disk_offset = disk_offset;
2808 }
2809 last_offset = rec_offset + rec_len;
2810 last_disk_offset = disk_offset + rec_len;
a99b9ea2 2811 }
a99b9ea2
MD
2812 error = hammer_ip_next(&cursor);
2813 }
2814
2815#if 0
2816 kprintf("BMAP %016llx: %016llx - %016llx\n",
973c11b9
MD
2817 (long long)ap->a_loffset,
2818 (long long)base_offset,
2819 (long long)last_offset);
2820 kprintf("BMAP %16s: %016llx - %016llx\n", "",
2821 (long long)base_disk_offset,
2822 (long long)last_disk_offset);
a99b9ea2
MD
2823#endif
2824
cb51be26 2825 if (cursor.node) {
bcac4bbb 2826 hammer_cache_node(&ip->cache[1], cursor.node);
cb51be26 2827#if 0
973c11b9
MD
2828 kprintf("bmap_end2 %016llx ip->cache %p\n",
2829 (long long)ap->a_loffset, ip->cache[1]);
cb51be26
MD
2830#endif
2831 }
a99b9ea2
MD
2832 hammer_done_cursor(&cursor);
2833 hammer_done_transaction(&trans);
2834
4a2796f3
MD
2835 /*
2836 * If we couldn't find any records or the records we did find were
2837 * all behind the requested offset, return failure. A forward
2838 * truncation can leave a hole w/ no on-disk records.
2839 */
2840 if (last_offset == 0 || last_offset < ap->a_loffset)
2841 return (EOPNOTSUPP);
2842
2843 /*
2844 * Figure out the block size at the requested offset and adjust
2845 * our limits so the cluster_read() does not create inappropriately
2846 * sized buffer cache buffers.
2847 */
2848 blksize = hammer_blocksize(ap->a_loffset);
2849 if (hammer_blocksize(base_offset) != blksize) {
2850 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2851 }
2852 if (last_offset != ap->a_loffset &&
2853 hammer_blocksize(last_offset - 1) != blksize) {
2854 last_offset = hammer_blockdemarc(ap->a_loffset,
2855 last_offset - 1);
2856 }
2857
2858 /*
2859 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2860 * from occuring.
2861 */
2862 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2863
1b0ab2c3
MD
2864 if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
2865 /*
2866 * Only large-data zones can be direct-IOd
2867 */
2868 error = EOPNOTSUPP;
2869 } else if ((disk_offset & HAMMER_BUFMASK) ||
2870 (last_offset - ap->a_loffset) < blksize) {
2871 /*
2872 * doffsetp is not aligned or the forward run size does
2873 * not cover a whole buffer, disallow the direct I/O.
2874 */
a99b9ea2
MD
2875 error = EOPNOTSUPP;
2876 } else {
1b0ab2c3
MD
2877 /*
2878 * We're good.
2879 */
4a2796f3
MD
2880 *ap->a_doffsetp = disk_offset;
2881 if (ap->a_runb) {
2882 *ap->a_runb = ap->a_loffset - base_offset;
2883 KKASSERT(*ap->a_runb >= 0);
a99b9ea2 2884 }
4a2796f3
MD
2885 if (ap->a_runp) {
2886 *ap->a_runp = last_offset - ap->a_loffset;
2887 KKASSERT(*ap->a_runp >= 0);
2888 }
2889 error = 0;
a99b9ea2
MD
2890 }
2891 return(error);
2892}
2893
8cd0a023 2894/*
059819e3 2895 * Write to a regular file. Because this is a strategy call the OS is
bcac4bbb 2896 * trying to actually get data onto the media.
8cd0a023
MD
2897 */
2898static
2899int
2900hammer_vop_strategy_write(struct vop_strategy_args *ap)
2901{
47637bff 2902 hammer_record_t record;
af209b0f 2903 hammer_mount_t hmp;
8cd0a023
MD
2904 hammer_inode_t ip;
2905 struct bio *bio;
2906 struct buf *bp;
a7e9bef1 2907 int blksize;
0832c9bb
MD
2908 int bytes;
2909 int error;
8cd0a023
MD
2910
2911 bio = ap->a_bio;
2912 bp = bio->bio_buf;
2913 ip = ap->a_vp->v_data;
af209b0f 2914 hmp = ip->hmp;
d113fda1 2915
a7e9bef1
MD
2916 blksize = hammer_blocksize(bio->bio_offset);
2917 KKASSERT(bp->b_bufsize == blksize);
4a2796f3 2918
059819e3
MD
2919 if (ip->flags & HAMMER_INODE_RO) {
2920 bp->b_error = EROFS;
2921 bp->b_flags |= B_ERROR;
2922 biodone(ap->a_bio);
2923 return(EROFS);
2924 }
b84de5af 2925
29ce0677
MD
2926 /*
2927 * Interlock with inode destruction (no in-kernel or directory
2928 * topology visibility). If we queue new IO while trying to
2929 * destroy the inode we can deadlock the vtrunc call in
2930 * hammer_inode_unloadable_check().
35a49944
MD
2931 *
2932 * Besides, there's no point flushing a bp associated with an
2933 * inode that is being destroyed on-media and has no kernel
2934 * references.
29ce0677 2935 */
35a49944
MD
2936 if ((ip->flags | ip->sync_flags) &
2937 (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
29ce0677
MD
2938 bp->b_resid = 0;
2939 biodone(ap->a_bio);
2940 return(0);
2941 }
2942
b84de5af 2943 /*
a99b9ea2
MD
2944 * Reserve space and issue a direct-write from the front-end.
2945 * NOTE: The direct_io code will hammer_bread/bcopy smaller
2946 * allocations.
47637bff 2947 *
a99b9ea2
MD
2948 * An in-memory record will be installed to reference the storage
2949 * until the flusher can get to it.
47637bff
MD
2950 *
2951 * Since we own the high level bio the front-end will not try to
0832c9bb 2952 * do a direct-read until the write completes.
a99b9ea2
MD
2953 *
2954 * NOTE: The only time we do not reserve a full-sized buffers
2955 * worth of data is if the file is small. We do not try to
2956 * allocate a fragment (from the small-data zone) at the end of
2957 * an otherwise large file as this can lead to wildly separated
2958 * data.
47637bff 2959 */
0832c9bb
MD
2960 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2961 KKASSERT(bio->bio_offset < ip->ino_data.size);
a99b9ea2 2962 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
4a2796f3 2963 bytes = bp->b_bufsize;
b84de5af 2964 else
a99b9ea2 2965 bytes = ((int)ip->ino_data.size + 15) & ~15;
0832c9bb
MD
2966
2967 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2968 bytes, &error);
2969 if (record) {
1b0ab2c3 2970 hammer_io_direct_write(hmp, record, bio);
4a2796f3
MD
2971 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
2972 hammer_flush_inode(ip, 0);
0832c9bb 2973 } else {
a99b9ea2 2974 bp->b_bio2.bio_offset = NOOFFSET;
0832c9bb
MD
2975 bp->b_error = error;
2976 bp->b_flags |= B_ERROR;
2977 biodone(ap->a_bio);
2978 }
0832c9bb 2979 return(error);
059819e3
MD
2980}
2981
8cd0a023
MD
2982/*
2983 * dounlink - disconnect a directory entry
2984 *
2985 * XXX whiteout support not really in yet
2986 */
2987static int
b84de5af 2988hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
d7e278bb
MD
2989 struct vnode *dvp, struct ucred *cred,
2990 int flags, int isdir)
8cd0a023 2991{
8cd0a023
MD
2992 struct namecache *ncp;
2993 hammer_inode_t dip;
2994 hammer_inode_t ip;
8cd0a023 2995 struct hammer_cursor cursor;
8cd0a023 2996 int64_t namekey;
5e435c92 2997 u_int32_t max_iterations;
11ad5ade 2998 int nlen, error;
8cd0a023
MD
2999
3000 /*
3001 * Calculate the namekey and setup the key range for the scan. This
3002 * works kinda like a chained hash table where the lower 32 bits
3003 * of the namekey synthesize the chain.
3004 *
3005 * The key range is inclusive of both key_beg and key_end.
3006 */
3007 dip = VTOI(dvp);
3008 ncp = nch->ncp;
d113fda1
MD
3009
3010 if (dip->flags & HAMMER_INODE_RO)
3011 return (EROFS);
3012
5e435c92
MD
3013 namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3014 &max_iterations);
6a37e7e4 3015retry:
bcac4bbb 3016 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
5a930e66 3017 cursor.key_beg.localization = dip->obj_localization +
beec5dc4 3018 hammer_dir_localization(dip);
8cd0a023
MD
3019 cursor.key_beg.obj_id = dip->obj_id;
3020 cursor.key_beg.key = namekey;
d5530d22 3021 cursor.key_beg.create_tid = 0;
8cd0a023
MD
3022 cursor.key_beg.delete_tid = 0;
3023 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3024 cursor.key_beg.obj_type = 0;
3025
3026 cursor.key_end = cursor.key_beg;
5e435c92 3027 cursor.key_end.key += max_iterations;
d5530d22
MD
3028 cursor.asof = dip->obj_asof;
3029 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023 3030
8cd0a023
MD
3031 /*
3032 * Scan all matching records (the chain), locate the one matching
3033 * the requested path component. info->last_error contains the
3034 * error code on search termination and could be 0, ENOENT, or
3035 * something else.
3036 *
3037 * The hammer_ip_*() functions merge in-memory records with on-disk
3038 * records for the purposes of the search.
3039 */
4e17f465
MD
3040 error = hammer_ip_first(&cursor);
3041
a89aec1b
MD
3042 while (error == 0) {
3043 error = hammer_ip_resolve_data(&cursor);
3044 if (error)
66325755 3045 break;
11ad5ade
MD
3046 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3047 KKASSERT(nlen > 0);
3048 if (ncp->nc_nlen == nlen &&
3049 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
66325755
MD
3050 break;
3051 }
a89aec1b 3052 error = hammer_ip_next(&cursor);
66325755 3053 }
8cd0a023
MD
3054
3055 /*
3056 * If all is ok we have to get the inode so we can adjust nlinks.
269c5eab
MD
3057 * To avoid a deadlock with the flusher we must release the inode
3058 * lock on the directory when acquiring the inode for the entry.
b3deaf57
MD
3059 *
3060 * If the target is a directory, it must be empty.
8cd0a023 3061 */
66325755 3062 if (error == 0) {
269c5eab 3063 hammer_unlock(&cursor.ip->lock);
bcac4bbb 3064 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
ddfdf542
MD
3065 dip->hmp->asof,
3066 cursor.data->entry.localization,
3067 0, &error);
269c5eab 3068 hammer_lock_sh(&cursor.ip->lock);
46fe7ae1 3069 if (error == ENOENT) {
4c286c36
MD
3070 kprintf("HAMMER: WARNING: Removing "
3071 "dirent w/missing inode \"%s\"\n"
3072 "\tobj_id = %016llx\n",
3073 ncp->nc_name,
3074 (long long)cursor.data->entry.obj_id);
3075 error = 0;
46fe7ae1 3076 }
1f07f686 3077
d7e278bb
MD
3078 /*
3079 * If isdir >= 0 we validate that the entry is or is not a
3080 * directory. If isdir < 0 we don't care.
3081 */
4c286c36 3082 if (error == 0 && isdir >= 0 && ip) {
d7e278bb
MD
3083 if (isdir &&
3084 ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3085 error = ENOTDIR;
3086 } else if (isdir == 0 &&
3087 ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3088 error = EISDIR;
3089 }
3090 }
3091
1f07f686
MD
3092 /*
3093 * If we are trying to remove a directory the directory must
3094 * be empty.
3095 *
3f9b4cfa
MD
3096 * The check directory code can loop and deadlock/retry. Our
3097 * own cursor's node locks must be released to avoid a 3-way
3098 * deadlock with the flusher if the check directory code
3099 * blocks.
3100 *
3101 * If any changes whatsoever have been made to the cursor
3102 * set EDEADLK and retry.
c9ce54d6
MD
3103 *
3104 * WARNING: See warnings in hammer_unlock_cursor()
3105 * function.
1f07f686 3106 */
4c286c36
MD
3107 if (error == 0 && ip && ip->ino_data.obj_type ==
3108 HAMMER_OBJTYPE_DIRECTORY) {
3f9b4cfa 3109 hammer_unlock_cursor(&cursor);
98f7132d 3110 error = hammer_ip_check_directory_empty(trans, ip);
3f9b4cfa
MD
3111 hammer_lock_cursor(&cursor);
3112 if (cursor.flags & HAMMER_CURSOR_RETEST) {
3113 kprintf("HAMMER: Warning: avoided deadlock "
3114 "on rmdir '%s'\n",
3115 ncp->nc_name);
3116 error = EDEADLK;
3117 }
b3deaf57 3118 }
1f07f686 3119
6a37e7e4 3120 /*
1f07f686
MD
3121 * Delete the directory entry.
3122 *
6a37e7e4 3123 * WARNING: hammer_ip_del_directory() may have to terminate
1f07f686 3124 * the cursor to avoid a deadlock. It is ok to call
6a37e7e4
MD
3125 * hammer_done_cursor() twice.
3126 */
b84de5af 3127 if (error == 0) {
b84de5af
MD
3128 error = hammer_ip_del_directory(trans, &cursor,
3129 dip, ip);
b84de5af 3130 }
269c5eab 3131 hammer_done_cursor(&cursor);
8cd0a023
MD
3132 if (error == 0) {
3133 cache_setunresolved(nch);
3134 cache_setvp(nch, NULL);
3135 /* XXX locking */
4c286c36 3136 if (ip && ip->vp) {
fbb84158 3137 hammer_knote(ip->vp, NOTE_DELETE);
8cd0a023 3138 cache_inval_vp(ip->vp, CINV_DESTROY);
fbb84158 3139 }
8cd0a023 3140 }
af209b0f
MD
3141 if (ip)
3142 hammer_rel_inode(ip, 0);
269c5eab
MD
3143 } else {
3144 hammer_done_cursor(&cursor);
66325755 3145 }
6a37e7e4
MD
3146 if (error == EDEADLK)
3147 goto retry;
9c448776 3148
66325755 3149 return (error);
66325755
MD
3150}
3151
7a04d74f
MD
3152/************************************************************************
3153 * FIFO AND SPECFS OPS *
3154 ************************************************************************
3155 *
3156 */
3157
3158static int
3159hammer_vop_fifoclose (struct vop_close_args *ap)
3160{
3161 /* XXX update itimes */
3162 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3163}
3164
3165static int
3166hammer_vop_fiforead (struct vop_read_args *ap)
3167{
3168 int error;
3169
3170 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3171 /* XXX update access time */
3172 return (error);
3173}
3174
3175static int
3176hammer_vop_fifowrite (struct vop_write_args *ap)
3177{
3178 int error;
3179
3180 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3181 /* XXX update access time */
3182 return (error);
3183}
3184
fbb84158
MD
3185static
3186int
3187hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3188{
3189 int error;
3190
3191 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3192 if (error)
3193 error = hammer_vop_kqfilter(ap);
3194 return(error);
3195}
3196
fbb84158
MD
3197/************************************************************************
3198 * KQFILTER OPS *
3199 ************************************************************************
3200 *
3201 */
3202static void filt_hammerdetach(struct knote *kn);
3203static int filt_hammerread(struct knote *kn, long hint);
3204static int filt_hammerwrite(struct knote *kn, long hint);
3205static int filt_hammervnode(struct knote *kn, long hint);
3206
3207static struct filterops hammerread_filtops =
3208 { 1, NULL, filt_hammerdetach, filt_hammerread };
3209static struct filterops hammerwrite_filtops =
3210 { 1, NULL, filt_hammerdetach, filt_hammerwrite };
3211static struct filterops hammervnode_filtops =
3212 { 1, NULL, filt_hammerdetach, filt_hammervnode };
3213
3214static
3215int
3216hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3217{
3218 struct vnode *vp = ap->a_vp;
3219 struct knote *kn = ap->a_kn;
0202303b 3220 lwkt_tokref vlock;
fbb84158
MD
3221
3222 switch (kn->kn_filter) {
3223 case EVFILT_READ:
3224 kn->kn_fop = &hammerread_filtops;
3225 break;
3226 case EVFILT_WRITE:
3227 kn->kn_fop = &hammerwrite_filtops;
3228 break;
3229 case EVFILT_VNODE:
3230 kn->kn_fop = &hammervnode_filtops;
3231 break;
3232 default:
3233 return (1);
3234 }
3235
3236 kn->kn_hook = (caddr_t)vp;
3237
0202303b 3238 lwkt_gettoken(&vlock, &vp->v_token);
fbb84158 3239 SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
0202303b 3240 lwkt_reltoken(&vlock);
fbb84158
MD
3241
3242 return(0);
3243}
3244
3245static void
3246filt_hammerdetach(struct knote *kn)
3247{
3248 struct vnode *vp = (void *)kn->kn_hook;
0202303b 3249 lwkt_tokref vlock;
fbb84158 3250
0202303b 3251 lwkt_gettoken(&vlock, &vp->v_token);
fbb84158
MD
3252 SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
3253 kn, knote, kn_selnext);
0202303b 3254 lwkt_reltoken(&vlock);
fbb84158
MD
3255}
3256
3257static int
3258filt_hammerread(struct knote *kn, long hint)
3259{
3260 struct vnode *vp = (void *)kn->kn_hook;
3261 hammer_inode_t ip = VTOI(vp);
3262
3263 if (hint == NOTE_REVOKE) {
3264 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3265 return(1);
3266 }
3267 kn->kn_data = ip->ino_data.size - kn->kn_fp->f_offset;
3268 return (kn->kn_data != 0);
3269}
3270
3271static int
3272filt_hammerwrite(struct knote *kn, long hint)
3273{
3274 if (hint == NOTE_REVOKE)
3275 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3276 kn->kn_data = 0;
3277 return (1);
3278}