Introduce experimental MPLS over ethernet support. Add 'options MPLS'
[dragonfly.git] / sys / vfs / hammer / hammer_vnops.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
a56cb012 34 * $DragonFly: src/sys/vfs/hammer/hammer_vnops.c,v 1.82 2008/07/07 03:49:51 dillon Exp $
427e5fc6
MD
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/fcntl.h>
41#include <sys/namecache.h>
42#include <sys/vnode.h>
43#include <sys/lockf.h>
44#include <sys/event.h>
45#include <sys/stat.h>
b3deaf57 46#include <sys/dirent.h>
c0ade690 47#include <vm/vm_extern.h>
7a04d74f 48#include <vfs/fifofs/fifo.h>
427e5fc6
MD
49#include "hammer.h"
50
51/*
52 * USERFS VNOPS
53 */
54/*static int hammer_vop_vnoperate(struct vop_generic_args *);*/
66325755
MD
55static int hammer_vop_fsync(struct vop_fsync_args *);
56static int hammer_vop_read(struct vop_read_args *);
57static int hammer_vop_write(struct vop_write_args *);
58static int hammer_vop_access(struct vop_access_args *);
59static int hammer_vop_advlock(struct vop_advlock_args *);
60static int hammer_vop_close(struct vop_close_args *);
61static int hammer_vop_ncreate(struct vop_ncreate_args *);
62static int hammer_vop_getattr(struct vop_getattr_args *);
63static int hammer_vop_nresolve(struct vop_nresolve_args *);
64static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
65static int hammer_vop_nlink(struct vop_nlink_args *);
66static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
67static int hammer_vop_nmknod(struct vop_nmknod_args *);
68static int hammer_vop_open(struct vop_open_args *);
69static int hammer_vop_pathconf(struct vop_pathconf_args *);
70static int hammer_vop_print(struct vop_print_args *);
71static int hammer_vop_readdir(struct vop_readdir_args *);
72static int hammer_vop_readlink(struct vop_readlink_args *);
73static int hammer_vop_nremove(struct vop_nremove_args *);
74static int hammer_vop_nrename(struct vop_nrename_args *);
75static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
76static int hammer_vop_setattr(struct vop_setattr_args *);
77static int hammer_vop_strategy(struct vop_strategy_args *);
a99b9ea2 78static int hammer_vop_bmap(struct vop_bmap_args *ap);
66325755
MD
79static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
80static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
7dc57964 81static int hammer_vop_ioctl(struct vop_ioctl_args *);
513ca7d7 82static int hammer_vop_mountctl(struct vop_mountctl_args *);
427e5fc6 83
7a04d74f
MD
84static int hammer_vop_fifoclose (struct vop_close_args *);
85static int hammer_vop_fiforead (struct vop_read_args *);
86static int hammer_vop_fifowrite (struct vop_write_args *);
87
88static int hammer_vop_specclose (struct vop_close_args *);
89static int hammer_vop_specread (struct vop_read_args *);
90static int hammer_vop_specwrite (struct vop_write_args *);
91
427e5fc6
MD
92struct vop_ops hammer_vnode_vops = {
93 .vop_default = vop_defaultop,
94 .vop_fsync = hammer_vop_fsync,
c0ade690
MD
95 .vop_getpages = vop_stdgetpages,
96 .vop_putpages = vop_stdputpages,
427e5fc6
MD
97 .vop_read = hammer_vop_read,
98 .vop_write = hammer_vop_write,
99 .vop_access = hammer_vop_access,
100 .vop_advlock = hammer_vop_advlock,
101 .vop_close = hammer_vop_close,
102 .vop_ncreate = hammer_vop_ncreate,
103 .vop_getattr = hammer_vop_getattr,
104 .vop_inactive = hammer_vop_inactive,
105 .vop_reclaim = hammer_vop_reclaim,
106 .vop_nresolve = hammer_vop_nresolve,
107 .vop_nlookupdotdot = hammer_vop_nlookupdotdot,
108 .vop_nlink = hammer_vop_nlink,
109 .vop_nmkdir = hammer_vop_nmkdir,
110 .vop_nmknod = hammer_vop_nmknod,
111 .vop_open = hammer_vop_open,
112 .vop_pathconf = hammer_vop_pathconf,
113 .vop_print = hammer_vop_print,
114 .vop_readdir = hammer_vop_readdir,
115 .vop_readlink = hammer_vop_readlink,
116 .vop_nremove = hammer_vop_nremove,
117 .vop_nrename = hammer_vop_nrename,
118 .vop_nrmdir = hammer_vop_nrmdir,
119 .vop_setattr = hammer_vop_setattr,
a99b9ea2 120 .vop_bmap = hammer_vop_bmap,
427e5fc6
MD
121 .vop_strategy = hammer_vop_strategy,
122 .vop_nsymlink = hammer_vop_nsymlink,
7dc57964 123 .vop_nwhiteout = hammer_vop_nwhiteout,
513ca7d7
MD
124 .vop_ioctl = hammer_vop_ioctl,
125 .vop_mountctl = hammer_vop_mountctl
427e5fc6
MD
126};
127
7a04d74f
MD
128struct vop_ops hammer_spec_vops = {
129 .vop_default = spec_vnoperate,
130 .vop_fsync = hammer_vop_fsync,
131 .vop_read = hammer_vop_specread,
132 .vop_write = hammer_vop_specwrite,
133 .vop_access = hammer_vop_access,
134 .vop_close = hammer_vop_specclose,
135 .vop_getattr = hammer_vop_getattr,
136 .vop_inactive = hammer_vop_inactive,
137 .vop_reclaim = hammer_vop_reclaim,
138 .vop_setattr = hammer_vop_setattr
139};
140
141struct vop_ops hammer_fifo_vops = {
142 .vop_default = fifo_vnoperate,
143 .vop_fsync = hammer_vop_fsync,
144 .vop_read = hammer_vop_fiforead,
145 .vop_write = hammer_vop_fifowrite,
146 .vop_access = hammer_vop_access,
147 .vop_close = hammer_vop_fifoclose,
148 .vop_getattr = hammer_vop_getattr,
149 .vop_inactive = hammer_vop_inactive,
150 .vop_reclaim = hammer_vop_reclaim,
151 .vop_setattr = hammer_vop_setattr
152};
153
0832c9bb
MD
154#ifdef DEBUG_TRUNCATE
155struct hammer_inode *HammerTruncIp;
156#endif
157
b84de5af
MD
158static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
159 struct vnode *dvp, struct ucred *cred, int flags);
8cd0a023
MD
160static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
161static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
162
427e5fc6
MD
163#if 0
164static
165int
166hammer_vop_vnoperate(struct vop_generic_args *)
167{
168 return (VOCALL(&hammer_vnode_vops, ap));
169}
170#endif
171
66325755
MD
172/*
173 * hammer_vop_fsync { vp, waitfor }
ddfdf542
MD
174 *
175 * fsync() an inode to disk and wait for it to be completely committed
176 * such that the information would not be undone if a crash occured after
177 * return.
66325755 178 */
427e5fc6
MD
179static
180int
66325755 181hammer_vop_fsync(struct vop_fsync_args *ap)
427e5fc6 182{
b84de5af 183 hammer_inode_t ip = VTOI(ap->a_vp);
c0ade690 184
e8599db1 185 vfsync(ap->a_vp, ap->a_waitfor, 1, NULL, NULL);
af209b0f 186 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
b84de5af
MD
187 if (ap->a_waitfor == MNT_WAIT)
188 hammer_wait_inode(ip);
059819e3 189 return (ip->error);
427e5fc6
MD
190}
191
66325755
MD
192/*
193 * hammer_vop_read { vp, uio, ioflag, cred }
194 */
427e5fc6
MD
195static
196int
66325755 197hammer_vop_read(struct vop_read_args *ap)
427e5fc6 198{
66325755 199 struct hammer_transaction trans;
c0ade690 200 hammer_inode_t ip;
66325755
MD
201 off_t offset;
202 struct buf *bp;
203 struct uio *uio;
204 int error;
205 int n;
8cd0a023 206 int seqcount;
4a2796f3
MD
207 int ioseqcount;
208 int blksize;
66325755
MD
209
210 if (ap->a_vp->v_type != VREG)
211 return (EINVAL);
212 ip = VTOI(ap->a_vp);
213 error = 0;
4a2796f3
MD
214 uio = ap->a_uio;
215
216 /*
217 * Allow the UIO's size to override the sequential heuristic.
218 */
219 blksize = hammer_blocksize(uio->uio_offset);
220 seqcount = (uio->uio_resid + (blksize - 1)) / blksize;
221 ioseqcount = ap->a_ioflag >> 16;
222 if (seqcount < ioseqcount)
223 seqcount = ioseqcount;
66325755 224
8cd0a023 225 hammer_start_transaction(&trans, ip->hmp);
66325755
MD
226
227 /*
4a2796f3
MD
228 * Access the data typically in HAMMER_BUFSIZE blocks via the
229 * buffer cache, but HAMMER may use a variable block size based
230 * on the offset.
66325755 231 */
11ad5ade 232 while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
4a2796f3
MD
233 int64_t base_offset;
234 int64_t file_limit;
235
236 blksize = hammer_blocksize(uio->uio_offset);
237 offset = (int)uio->uio_offset & (blksize - 1);
238 base_offset = uio->uio_offset - offset;
239
a99b9ea2 240 if (hammer_debug_cluster_enable) {
4a2796f3
MD
241 /*
242 * Use file_limit to prevent cluster_read() from
243 * creating buffers of the wrong block size past
244 * the demarc.
245 */
246 file_limit = ip->ino_data.size;
247 if (base_offset < HAMMER_XDEMARC &&
248 file_limit > HAMMER_XDEMARC) {
249 file_limit = HAMMER_XDEMARC;
250 }
251 error = cluster_read(ap->a_vp,
252 file_limit, base_offset,
253 blksize, MAXPHYS,
254 seqcount, &bp);
a99b9ea2 255 } else {
4a2796f3 256 error = bread(ap->a_vp, base_offset, blksize, &bp);
a99b9ea2 257 }
66325755 258 if (error) {
4a2796f3 259 kprintf("error %d\n", error);
66325755
MD
260 brelse(bp);
261 break;
262 }
7bc5b8c2 263
c0ade690 264 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
4a2796f3 265 n = blksize - offset;
66325755
MD
266 if (n > uio->uio_resid)
267 n = uio->uio_resid;
11ad5ade
MD
268 if (n > ip->ino_data.size - uio->uio_offset)
269 n = (int)(ip->ino_data.size - uio->uio_offset);
66325755 270 error = uiomove((char *)bp->b_data + offset, n, uio);
7bc5b8c2
MD
271
272 /* data has a lower priority then meta-data */
273 bp->b_flags |= B_AGE;
66325755 274 bqrelse(bp);
af209b0f
MD
275 if (error)
276 break;
66325755 277 }
b84de5af
MD
278 if ((ip->flags & HAMMER_INODE_RO) == 0 &&
279 (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
bcac4bbb 280 ip->ino_data.atime = trans.time;
ddfdf542 281 hammer_modify_inode(ip, HAMMER_INODE_ATIME);
b84de5af
MD
282 }
283 hammer_done_transaction(&trans);
66325755 284 return (error);
427e5fc6
MD
285}
286
66325755
MD
287/*
288 * hammer_vop_write { vp, uio, ioflag, cred }
289 */
427e5fc6
MD
290static
291int
66325755 292hammer_vop_write(struct vop_write_args *ap)
427e5fc6 293{
66325755
MD
294 struct hammer_transaction trans;
295 struct hammer_inode *ip;
4a2796f3 296 hammer_mount_t hmp;
66325755 297 struct uio *uio;
4a2796f3 298 int offset;
47637bff 299 off_t base_offset;
66325755
MD
300 struct buf *bp;
301 int error;
302 int n;
c0ade690 303 int flags;
4a2796f3 304 int delta;
cb51be26 305 int seqcount;
66325755
MD
306
307 if (ap->a_vp->v_type != VREG)
308 return (EINVAL);
309 ip = VTOI(ap->a_vp);
4a2796f3 310 hmp = ip->hmp;
66325755 311 error = 0;
cb51be26 312 seqcount = ap->a_ioflag >> 16;
66325755 313
d113fda1
MD
314 if (ip->flags & HAMMER_INODE_RO)
315 return (EROFS);
316
66325755
MD
317 /*
318 * Create a transaction to cover the operations we perform.
319 */
4a2796f3 320 hammer_start_transaction(&trans, hmp);
66325755
MD
321 uio = ap->a_uio;
322
323 /*
324 * Check append mode
325 */
326 if (ap->a_ioflag & IO_APPEND)
11ad5ade 327 uio->uio_offset = ip->ino_data.size;
66325755
MD
328
329 /*
af209b0f
MD
330 * Check for illegal write offsets. Valid range is 0...2^63-1.
331 *
332 * NOTE: the base_off assignment is required to work around what
333 * I consider to be a GCC-4 optimization bug.
66325755 334 */
af209b0f
MD
335 if (uio->uio_offset < 0) {
336 hammer_done_transaction(&trans);
337 return (EFBIG);
338 }
339 base_offset = uio->uio_offset + uio->uio_resid; /* work around gcc-4 */
340 if (uio->uio_resid > 0 && base_offset <= 0) {
b84de5af 341 hammer_done_transaction(&trans);
66325755 342 return (EFBIG);
9c448776 343 }
66325755
MD
344
345 /*
4a2796f3
MD
346 * Access the data typically in HAMMER_BUFSIZE blocks via the
347 * buffer cache, but HAMMER may use a variable block size based
348 * on the offset.
66325755
MD
349 */
350 while (uio->uio_resid > 0) {
d5ef456e 351 int fixsize = 0;
4a2796f3
MD
352 int blksize;
353 int blkmask;
d5ef456e 354
a7e9bef1 355 if ((error = hammer_checkspace(hmp, HAMMER_CHECKSPACE_SLOP_WRITE)) != 0)
e63644f0
MD
356 break;
357
a9d52b76
MD
358 blksize = hammer_blocksize(uio->uio_offset);
359
059819e3 360 /*
4a2796f3
MD
361 * Do not allow HAMMER to blow out the buffer cache. Very
362 * large UIOs can lockout other processes due to bwillwrite()
363 * mechanics.
47637bff
MD
364 *
365 * Do not allow HAMMER to blow out system memory by
4a2796f3
MD
366 * accumulating too many records. Records are so well
367 * decoupled from the buffer cache that it is possible
368 * for userland to push data out to the media via
369 * direct-write, but build up the records queued to the
370 * backend faster then the backend can flush them out.
371 * HAMMER has hit its write limit but the frontend has
372 * no pushback to slow it down.
47637bff 373 *
df301614
MD
374 * The hammer inode is not locked during these operations.
375 * The vnode is locked which can interfere with the pageout
376 * daemon for non-UIO_NOCOPY writes but should not interfere
377 * with the buffer cache. Even so, we cannot afford to
378 * allow the pageout daemon to build up too many dirty buffer
379 * cache buffers.
380 */
381 bwillwrite(blksize);
382
383 /*
384 * Pending record flush check.
059819e3 385 */
df301614 386 if (hmp->rsv_recs > hammer_limit_recs / 2) {
4a2796f3 387 /*
df301614 388 * Get the inode on the flush list
4a2796f3 389 */
df301614
MD
390 if (ip->rsv_recs >= 64)
391 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
392 else if (ip->rsv_recs >= 16)
393 hammer_flush_inode(ip, 0);
4a2796f3
MD
394
395 /*
df301614
MD
396 * Keep the flusher going if the system keeps
397 * queueing records.
4a2796f3 398 */
df301614
MD
399 delta = hmp->count_newrecords -
400 hmp->last_newrecords;
401 if (delta < 0 || delta > hammer_limit_recs / 2) {
402 hmp->last_newrecords = hmp->count_newrecords;
403 hammer_sync_hmp(hmp, MNT_NOWAIT);
4a2796f3
MD
404 }
405
df301614
MD
406 /*
407 * If we have gotten behind start slowing
408 * down the writers.
409 */
410 delta = (hmp->rsv_recs - hammer_limit_recs) *
411 hz / hammer_limit_recs;
412 if (delta > 0)
413 tsleep(&trans, 0, "hmrslo", delta);
059819e3
MD
414 }
415
4a2796f3
MD
416 /*
417 * Calculate the blocksize at the current offset and figure
418 * out how much we can actually write.
419 */
4a2796f3
MD
420 blkmask = blksize - 1;
421 offset = (int)uio->uio_offset & blkmask;
422 base_offset = uio->uio_offset & ~(int64_t)blkmask;
423 n = blksize - offset;
d5ef456e
MD
424 if (n > uio->uio_resid)
425 n = uio->uio_resid;
11ad5ade 426 if (uio->uio_offset + n > ip->ino_data.size) {
d5ef456e
MD
427 vnode_pager_setsize(ap->a_vp, uio->uio_offset + n);
428 fixsize = 1;
429 }
430
c0ade690
MD
431 if (uio->uio_segflg == UIO_NOCOPY) {
432 /*
433 * Issuing a write with the same data backing the
434 * buffer. Instantiate the buffer to collect the
435 * backing vm pages, then read-in any missing bits.
436 *
437 * This case is used by vop_stdputpages().
438 */
47637bff 439 bp = getblk(ap->a_vp, base_offset,
4a2796f3 440 blksize, GETBLK_BHEAVY, 0);
c0ade690
MD
441 if ((bp->b_flags & B_CACHE) == 0) {
442 bqrelse(bp);
47637bff 443 error = bread(ap->a_vp, base_offset,
4a2796f3 444 blksize, &bp);
c0ade690 445 }
4a2796f3 446 } else if (offset == 0 && uio->uio_resid >= blksize) {
c0ade690 447 /*
a5fddc16
MD
448 * Even though we are entirely overwriting the buffer
449 * we may still have to zero it out to avoid a
450 * mmap/write visibility issue.
c0ade690 451 */
4a2796f3 452 bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
a5fddc16
MD
453 if ((bp->b_flags & B_CACHE) == 0)
454 vfs_bio_clrbuf(bp);
47637bff 455 } else if (base_offset >= ip->ino_data.size) {
c0ade690 456 /*
a5fddc16
MD
457 * If the base offset of the buffer is beyond the
458 * file EOF, we don't have to issue a read.
c0ade690 459 */
47637bff 460 bp = getblk(ap->a_vp, base_offset,
4a2796f3 461 blksize, GETBLK_BHEAVY, 0);
66325755
MD
462 vfs_bio_clrbuf(bp);
463 } else {
c0ade690
MD
464 /*
465 * Partial overwrite, read in any missing bits then
466 * replace the portion being written.
467 */
4a2796f3 468 error = bread(ap->a_vp, base_offset, blksize, &bp);
d5ef456e
MD
469 if (error == 0)
470 bheavy(bp);
66325755 471 }
47637bff 472 if (error == 0) {
4a2796f3 473 error = uiomove((char *)bp->b_data + offset,
47637bff
MD
474 n, uio);
475 }
d5ef456e
MD
476
477 /*
478 * If we screwed up we have to undo any VM size changes we
479 * made.
480 */
66325755
MD
481 if (error) {
482 brelse(bp);
d5ef456e 483 if (fixsize) {
11ad5ade 484 vtruncbuf(ap->a_vp, ip->ino_data.size,
4a2796f3 485 hammer_blocksize(ip->ino_data.size));
d5ef456e 486 }
66325755
MD
487 break;
488 }
c0ade690 489 /* bp->b_flags |= B_CLUSTEROK; temporarily disabled */
11ad5ade
MD
490 if (ip->ino_data.size < uio->uio_offset) {
491 ip->ino_data.size = uio->uio_offset;
492 flags = HAMMER_INODE_DDIRTY;
493 vnode_pager_setsize(ap->a_vp, ip->ino_data.size);
c0ade690 494 } else {
d113fda1 495 flags = 0;
66325755 496 }
11ad5ade 497 ip->ino_data.mtime = trans.time;
ddfdf542 498 flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
47637bff 499 hammer_modify_inode(ip, flags);
32c90105 500
47637bff
MD
501 /*
502 * Final buffer disposition.
503 */
cb51be26 504 bp->b_flags |= B_AGE;
66325755
MD
505 if (ap->a_ioflag & IO_SYNC) {
506 bwrite(bp);
507 } else if (ap->a_ioflag & IO_DIRECT) {
66325755 508 bawrite(bp);
4a2796f3
MD
509 } else {
510 bdwrite(bp);
511 }
66325755 512 }
b84de5af 513 hammer_done_transaction(&trans);
66325755 514 return (error);
427e5fc6
MD
515}
516
66325755
MD
517/*
518 * hammer_vop_access { vp, mode, cred }
519 */
427e5fc6
MD
520static
521int
66325755 522hammer_vop_access(struct vop_access_args *ap)
427e5fc6 523{
66325755
MD
524 struct hammer_inode *ip = VTOI(ap->a_vp);
525 uid_t uid;
526 gid_t gid;
527 int error;
528
529 uid = hammer_to_unix_xid(&ip->ino_data.uid);
530 gid = hammer_to_unix_xid(&ip->ino_data.gid);
531
532 error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
533 ip->ino_data.uflags);
534 return (error);
427e5fc6
MD
535}
536
66325755
MD
537/*
538 * hammer_vop_advlock { vp, id, op, fl, flags }
539 */
427e5fc6
MD
540static
541int
66325755 542hammer_vop_advlock(struct vop_advlock_args *ap)
427e5fc6 543{
4a2796f3 544 hammer_inode_t ip = VTOI(ap->a_vp);
66325755 545
11ad5ade 546 return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
427e5fc6
MD
547}
548
66325755
MD
549/*
550 * hammer_vop_close { vp, fflag }
551 */
427e5fc6
MD
552static
553int
66325755 554hammer_vop_close(struct vop_close_args *ap)
427e5fc6 555{
4a2796f3
MD
556 hammer_inode_t ip = VTOI(ap->a_vp);
557
558 if ((ip->flags | ip->sync_flags) & HAMMER_INODE_MODMASK)
559 hammer_inode_waitreclaims(ip->hmp);
a89aec1b 560 return (vop_stdclose(ap));
427e5fc6
MD
561}
562
66325755
MD
563/*
564 * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
565 *
566 * The operating system has already ensured that the directory entry
567 * does not exist and done all appropriate namespace locking.
568 */
427e5fc6
MD
569static
570int
66325755 571hammer_vop_ncreate(struct vop_ncreate_args *ap)
427e5fc6 572{
66325755
MD
573 struct hammer_transaction trans;
574 struct hammer_inode *dip;
575 struct hammer_inode *nip;
576 struct nchandle *nch;
577 int error;
578
579 nch = ap->a_nch;
580 dip = VTOI(ap->a_dvp);
581
d113fda1
MD
582 if (dip->flags & HAMMER_INODE_RO)
583 return (EROFS);
a7e9bef1 584 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHECKSPACE_SLOP_CREATE)) != 0)
e63644f0 585 return (error);
d113fda1 586
66325755
MD
587 /*
588 * Create a transaction to cover the operations we perform.
589 */
8cd0a023 590 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
591
592 /*
593 * Create a new filesystem object of the requested type. The
b84de5af
MD
594 * returned inode will be referenced and shared-locked to prevent
595 * it from being moved to the flusher.
66325755 596 */
8cd0a023 597
5a930e66
MD
598 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
599 dip, 0, &nip);
66325755 600 if (error) {
77062c8a 601 hkprintf("hammer_create_inode error %d\n", error);
b84de5af 602 hammer_done_transaction(&trans);
66325755
MD
603 *ap->a_vpp = NULL;
604 return (error);
605 }
66325755
MD
606
607 /*
608 * Add the new filesystem object to the directory. This will also
609 * bump the inode's link count.
610 */
5a930e66
MD
611 error = hammer_ip_add_directory(&trans, dip,
612 nch->ncp->nc_name, nch->ncp->nc_nlen,
613 nip);
0b075555 614 if (error)
77062c8a 615 hkprintf("hammer_ip_add_directory error %d\n", error);
66325755
MD
616
617 /*
618 * Finish up.
619 */
620 if (error) {
a89aec1b 621 hammer_rel_inode(nip, 0);
b84de5af 622 hammer_done_transaction(&trans);
66325755
MD
623 *ap->a_vpp = NULL;
624 } else {
e8599db1 625 error = hammer_get_vnode(nip, ap->a_vpp);
b84de5af 626 hammer_done_transaction(&trans);
a89aec1b
MD
627 hammer_rel_inode(nip, 0);
628 if (error == 0) {
629 cache_setunresolved(ap->a_nch);
630 cache_setvp(ap->a_nch, *ap->a_vpp);
631 }
66325755
MD
632 }
633 return (error);
427e5fc6
MD
634}
635
66325755
MD
636/*
637 * hammer_vop_getattr { vp, vap }
98f7132d
MD
638 *
639 * Retrieve an inode's attribute information. When accessing inodes
640 * historically we fake the atime field to ensure consistent results.
641 * The atime field is stored in the B-Tree element and allowed to be
642 * updated without cycling the element.
66325755 643 */
427e5fc6
MD
644static
645int
66325755 646hammer_vop_getattr(struct vop_getattr_args *ap)
427e5fc6 647{
66325755
MD
648 struct hammer_inode *ip = VTOI(ap->a_vp);
649 struct vattr *vap = ap->a_vap;
650
a56cb012
MD
651 /*
652 * We want the fsid to be different when accessing a filesystem
653 * with different as-of's so programs like diff don't think
654 * the files are the same.
655 *
656 * We also want the fsid to be the same when comparing snapshots,
657 * or when comparing mirrors (which might be backed by different
658 * physical devices). HAMMER fsids are based on the PFS's
659 * shared_uuid field.
660 *
661 * XXX there is a chance of collision here. The va_fsid reported
662 * by stat is different from the more involved fsid used in the
663 * mount structure.
c82af904 664 */
a56cb012
MD
665 vap->va_fsid = ip->pfsm->fsid_udev ^ (u_int32_t)ip->obj_asof ^
666 (u_int32_t)(ip->obj_asof >> 32);
667
11ad5ade 668 vap->va_fileid = ip->ino_leaf.base.obj_id;
66325755 669 vap->va_mode = ip->ino_data.mode;
11ad5ade 670 vap->va_nlink = ip->ino_data.nlinks;
66325755
MD
671 vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
672 vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
673 vap->va_rmajor = 0;
674 vap->va_rminor = 0;
11ad5ade 675 vap->va_size = ip->ino_data.size;
bcac4bbb
MD
676
677 /*
678 * We must provide a consistent atime and mtime for snapshots
679 * so people can do a 'tar cf - ... | md5' on them and get
680 * consistent results.
681 */
682 if (ip->flags & HAMMER_INODE_RO) {
ddfdf542
MD
683 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
684 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
bcac4bbb 685 } else {
ddfdf542
MD
686 hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
687 hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
bcac4bbb 688 }
ddfdf542 689 hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
66325755
MD
690 vap->va_flags = ip->ino_data.uflags;
691 vap->va_gen = 1; /* hammer inums are unique for all time */
bf686dbe 692 vap->va_blocksize = HAMMER_BUFSIZE;
4a2796f3
MD
693 if (ip->ino_data.size >= HAMMER_XDEMARC) {
694 vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
695 ~HAMMER_XBUFMASK64;
696 } else if (ip->ino_data.size > HAMMER_BUFSIZE / 2) {
697 vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
698 ~HAMMER_BUFMASK64;
699 } else {
700 vap->va_bytes = (ip->ino_data.size + 15) & ~15;
701 }
11ad5ade 702 vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
66325755 703 vap->va_filerev = 0; /* XXX */
4a2796f3 704 /* mtime uniquely identifies any adjustments made to the file XXX */
11ad5ade 705 vap->va_fsmid = ip->ino_data.mtime;
66325755
MD
706 vap->va_uid_uuid = ip->ino_data.uid;
707 vap->va_gid_uuid = ip->ino_data.gid;
708 vap->va_fsid_uuid = ip->hmp->fsid;
709 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
710 VA_FSID_UUID_VALID;
7a04d74f 711
11ad5ade 712 switch (ip->ino_data.obj_type) {
7a04d74f
MD
713 case HAMMER_OBJTYPE_CDEV:
714 case HAMMER_OBJTYPE_BDEV:
715 vap->va_rmajor = ip->ino_data.rmajor;
716 vap->va_rminor = ip->ino_data.rminor;
717 break;
718 default:
719 break;
720 }
721
66325755 722 return(0);
427e5fc6
MD
723}
724
66325755
MD
725/*
726 * hammer_vop_nresolve { nch, dvp, cred }
727 *
728 * Locate the requested directory entry.
729 */
427e5fc6
MD
730static
731int
66325755 732hammer_vop_nresolve(struct vop_nresolve_args *ap)
427e5fc6 733{
36f82b23 734 struct hammer_transaction trans;
66325755 735 struct namecache *ncp;
7f7c1f84
MD
736 hammer_inode_t dip;
737 hammer_inode_t ip;
738 hammer_tid_t asof;
8cd0a023 739 struct hammer_cursor cursor;
66325755
MD
740 struct vnode *vp;
741 int64_t namekey;
742 int error;
7f7c1f84
MD
743 int i;
744 int nlen;
d113fda1 745 int flags;
a56cb012 746 int ispfs;
adf01747 747 int64_t obj_id;
ddfdf542 748 u_int32_t localization;
7f7c1f84
MD
749
750 /*
751 * Misc initialization, plus handle as-of name extensions. Look for
752 * the '@@' extension. Note that as-of files and directories cannot
753 * be modified.
7f7c1f84
MD
754 */
755 dip = VTOI(ap->a_dvp);
756 ncp = ap->a_nch->ncp;
757 asof = dip->obj_asof;
758 nlen = ncp->nc_nlen;
d113fda1 759 flags = dip->flags;
a56cb012 760 ispfs = 0;
7f7c1f84 761
36f82b23
MD
762 hammer_simple_transaction(&trans, dip->hmp);
763
7f7c1f84
MD
764 for (i = 0; i < nlen; ++i) {
765 if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
d113fda1 766 asof = hammer_str_to_tid(ncp->nc_name + i + 2);
d113fda1 767 flags |= HAMMER_INODE_RO;
7f7c1f84
MD
768 break;
769 }
770 }
771 nlen = i;
66325755 772
d113fda1
MD
773 /*
774 * If there is no path component the time extension is relative to
775 * dip.
776 */
777 if (nlen == 0) {
bcac4bbb 778 ip = hammer_get_inode(&trans, dip, dip->obj_id,
ddfdf542
MD
779 asof, dip->obj_localization,
780 flags, &error);
d113fda1 781 if (error == 0) {
e8599db1 782 error = hammer_get_vnode(ip, &vp);
d113fda1
MD
783 hammer_rel_inode(ip, 0);
784 } else {
785 vp = NULL;
786 }
787 if (error == 0) {
788 vn_unlock(vp);
789 cache_setvp(ap->a_nch, vp);
790 vrele(vp);
791 }
36f82b23 792 goto done;
d113fda1
MD
793 }
794
8cd0a023
MD
795 /*
796 * Calculate the namekey and setup the key range for the scan. This
797 * works kinda like a chained hash table where the lower 32 bits
798 * of the namekey synthesize the chain.
799 *
800 * The key range is inclusive of both key_beg and key_end.
801 */
7f7c1f84 802 namekey = hammer_directory_namekey(ncp->nc_name, nlen);
66325755 803
bcac4bbb 804 error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
5a930e66
MD
805 cursor.key_beg.localization = dip->obj_localization +
806 HAMMER_LOCALIZE_MISC;
8cd0a023
MD
807 cursor.key_beg.obj_id = dip->obj_id;
808 cursor.key_beg.key = namekey;
d5530d22 809 cursor.key_beg.create_tid = 0;
8cd0a023
MD
810 cursor.key_beg.delete_tid = 0;
811 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
812 cursor.key_beg.obj_type = 0;
66325755 813
8cd0a023
MD
814 cursor.key_end = cursor.key_beg;
815 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
816 cursor.asof = asof;
817 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
66325755
MD
818
819 /*
8cd0a023 820 * Scan all matching records (the chain), locate the one matching
a89aec1b 821 * the requested path component.
8cd0a023
MD
822 *
823 * The hammer_ip_*() functions merge in-memory records with on-disk
824 * records for the purposes of the search.
66325755 825 */
6a37e7e4 826 obj_id = 0;
43c665ae 827 localization = HAMMER_DEF_LOCALIZATION;
6a37e7e4 828
4e17f465 829 if (error == 0) {
4e17f465
MD
830 error = hammer_ip_first(&cursor);
831 while (error == 0) {
832 error = hammer_ip_resolve_data(&cursor);
833 if (error)
834 break;
11ad5ade
MD
835 if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
836 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
837 obj_id = cursor.data->entry.obj_id;
a56cb012
MD
838
839 /*
840 * Force relookups whenever a PFS root is
841 * accessed.
842 */
843 if (obj_id == HAMMER_OBJID_ROOT)
844 ispfs = 1;
ddfdf542 845 localization = cursor.data->entry.localization;
4e17f465
MD
846 break;
847 }
848 error = hammer_ip_next(&cursor);
66325755
MD
849 }
850 }
6a37e7e4 851 hammer_done_cursor(&cursor);
66325755 852 if (error == 0) {
bcac4bbb 853 ip = hammer_get_inode(&trans, dip, obj_id,
ddfdf542
MD
854 asof, localization,
855 flags, &error);
a56cb012
MD
856 if (ispfs && asof > ip->pfsm->pfsd.sync_end_tid) {
857 asof = ip->pfsm->pfsd.sync_end_tid;
858 hammer_rel_inode(ip, 0);
859 ip = hammer_get_inode(&trans, dip, obj_id,
860 asof, localization,
861 flags, &error);
862 }
863
864
7f7c1f84 865 if (error == 0) {
e8599db1 866 error = hammer_get_vnode(ip, &vp);
7f7c1f84
MD
867 hammer_rel_inode(ip, 0);
868 } else {
869 vp = NULL;
870 }
66325755
MD
871 if (error == 0) {
872 vn_unlock(vp);
873 cache_setvp(ap->a_nch, vp);
a56cb012
MD
874 if (ispfs)
875 cache_settimeout(ap->a_nch, 0);
66325755
MD
876 vrele(vp);
877 }
878 } else if (error == ENOENT) {
879 cache_setvp(ap->a_nch, NULL);
880 }
36f82b23 881done:
b84de5af 882 hammer_done_transaction(&trans);
66325755 883 return (error);
427e5fc6
MD
884}
885
66325755
MD
886/*
887 * hammer_vop_nlookupdotdot { dvp, vpp, cred }
888 *
889 * Locate the parent directory of a directory vnode.
890 *
891 * dvp is referenced but not locked. *vpp must be returned referenced and
892 * locked. A parent_obj_id of 0 does not necessarily indicate that we are
893 * at the root, instead it could indicate that the directory we were in was
894 * removed.
42c7d26b
MD
895 *
896 * NOTE: as-of sequences are not linked into the directory structure. If
897 * we are at the root with a different asof then the mount point, reload
898 * the same directory with the mount point's asof. I'm not sure what this
899 * will do to NFS. We encode ASOF stamps in NFS file handles so it might not
900 * get confused, but it hasn't been tested.
66325755 901 */
427e5fc6
MD
902static
903int
66325755 904hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
427e5fc6 905{
36f82b23 906 struct hammer_transaction trans;
66325755 907 struct hammer_inode *dip;
d113fda1 908 struct hammer_inode *ip;
42c7d26b 909 int64_t parent_obj_id;
5a930e66 910 u_int32_t parent_obj_localization;
42c7d26b 911 hammer_tid_t asof;
d113fda1 912 int error;
66325755
MD
913
914 dip = VTOI(ap->a_dvp);
42c7d26b 915 asof = dip->obj_asof;
5a930e66
MD
916
917 /*
918 * Whos are parent? This could be the root of a pseudo-filesystem
919 * whos parent is in another localization domain.
920 */
42c7d26b 921 parent_obj_id = dip->ino_data.parent_obj_id;
5a930e66
MD
922 if (dip->obj_id == HAMMER_OBJID_ROOT)
923 parent_obj_localization = dip->ino_data.ext.obj.parent_obj_localization;
924 else
925 parent_obj_localization = dip->obj_localization;
42c7d26b
MD
926
927 if (parent_obj_id == 0) {
928 if (dip->obj_id == HAMMER_OBJID_ROOT &&
929 asof != dip->hmp->asof) {
930 parent_obj_id = dip->obj_id;
931 asof = dip->hmp->asof;
932 *ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
933 ksnprintf(*ap->a_fakename, 19, "0x%016llx",
934 dip->obj_asof);
935 } else {
936 *ap->a_vpp = NULL;
937 return ENOENT;
938 }
66325755 939 }
d113fda1 940
36f82b23
MD
941 hammer_simple_transaction(&trans, dip->hmp);
942
bcac4bbb 943 ip = hammer_get_inode(&trans, dip, parent_obj_id,
5a930e66 944 asof, parent_obj_localization,
ddfdf542 945 dip->flags, &error);
36f82b23 946 if (ip) {
e8599db1 947 error = hammer_get_vnode(ip, ap->a_vpp);
36f82b23
MD
948 hammer_rel_inode(ip, 0);
949 } else {
d113fda1 950 *ap->a_vpp = NULL;
d113fda1 951 }
b84de5af 952 hammer_done_transaction(&trans);
d113fda1 953 return (error);
427e5fc6
MD
954}
955
66325755
MD
956/*
957 * hammer_vop_nlink { nch, dvp, vp, cred }
958 */
427e5fc6
MD
959static
960int
66325755 961hammer_vop_nlink(struct vop_nlink_args *ap)
427e5fc6 962{
66325755
MD
963 struct hammer_transaction trans;
964 struct hammer_inode *dip;
965 struct hammer_inode *ip;
966 struct nchandle *nch;
967 int error;
968
969 nch = ap->a_nch;
970 dip = VTOI(ap->a_dvp);
971 ip = VTOI(ap->a_vp);
972
d113fda1
MD
973 if (dip->flags & HAMMER_INODE_RO)
974 return (EROFS);
975 if (ip->flags & HAMMER_INODE_RO)
976 return (EROFS);
a7e9bef1 977 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHECKSPACE_SLOP_CREATE)) != 0)
e63644f0 978 return (error);
d113fda1 979
66325755
MD
980 /*
981 * Create a transaction to cover the operations we perform.
982 */
8cd0a023 983 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
984
985 /*
986 * Add the filesystem object to the directory. Note that neither
987 * dip nor ip are referenced or locked, but their vnodes are
988 * referenced. This function will bump the inode's link count.
989 */
5a930e66
MD
990 error = hammer_ip_add_directory(&trans, dip,
991 nch->ncp->nc_name, nch->ncp->nc_nlen,
992 ip);
66325755
MD
993
994 /*
995 * Finish up.
996 */
b84de5af 997 if (error == 0) {
6b4f890b
MD
998 cache_setunresolved(nch);
999 cache_setvp(nch, ap->a_vp);
66325755 1000 }
b84de5af 1001 hammer_done_transaction(&trans);
66325755 1002 return (error);
427e5fc6
MD
1003}
1004
66325755
MD
1005/*
1006 * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1007 *
1008 * The operating system has already ensured that the directory entry
1009 * does not exist and done all appropriate namespace locking.
1010 */
427e5fc6
MD
1011static
1012int
66325755 1013hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
427e5fc6 1014{
66325755
MD
1015 struct hammer_transaction trans;
1016 struct hammer_inode *dip;
1017 struct hammer_inode *nip;
1018 struct nchandle *nch;
1019 int error;
1020
1021 nch = ap->a_nch;
1022 dip = VTOI(ap->a_dvp);
1023
d113fda1
MD
1024 if (dip->flags & HAMMER_INODE_RO)
1025 return (EROFS);
a7e9bef1 1026 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHECKSPACE_SLOP_CREATE)) != 0)
e63644f0 1027 return (error);
d113fda1 1028
66325755
MD
1029 /*
1030 * Create a transaction to cover the operations we perform.
1031 */
8cd0a023 1032 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
1033
1034 /*
1035 * Create a new filesystem object of the requested type. The
8cd0a023 1036 * returned inode will be referenced but not locked.
66325755 1037 */
5a930e66
MD
1038 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1039 dip, 0, &nip);
66325755 1040 if (error) {
77062c8a 1041 hkprintf("hammer_mkdir error %d\n", error);
b84de5af 1042 hammer_done_transaction(&trans);
66325755
MD
1043 *ap->a_vpp = NULL;
1044 return (error);
1045 }
66325755
MD
1046 /*
1047 * Add the new filesystem object to the directory. This will also
1048 * bump the inode's link count.
1049 */
5a930e66
MD
1050 error = hammer_ip_add_directory(&trans, dip,
1051 nch->ncp->nc_name, nch->ncp->nc_nlen,
1052 nip);
0b075555 1053 if (error)
77062c8a 1054 hkprintf("hammer_mkdir (add) error %d\n", error);
66325755
MD
1055
1056 /*
1057 * Finish up.
1058 */
1059 if (error) {
a89aec1b 1060 hammer_rel_inode(nip, 0);
66325755
MD
1061 *ap->a_vpp = NULL;
1062 } else {
e8599db1 1063 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
1064 hammer_rel_inode(nip, 0);
1065 if (error == 0) {
1066 cache_setunresolved(ap->a_nch);
1067 cache_setvp(ap->a_nch, *ap->a_vpp);
1068 }
66325755 1069 }
b84de5af 1070 hammer_done_transaction(&trans);
66325755 1071 return (error);
427e5fc6
MD
1072}
1073
66325755
MD
1074/*
1075 * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1076 *
1077 * The operating system has already ensured that the directory entry
1078 * does not exist and done all appropriate namespace locking.
1079 */
427e5fc6
MD
1080static
1081int
66325755 1082hammer_vop_nmknod(struct vop_nmknod_args *ap)
427e5fc6 1083{
66325755
MD
1084 struct hammer_transaction trans;
1085 struct hammer_inode *dip;
1086 struct hammer_inode *nip;
1087 struct nchandle *nch;
1088 int error;
5a930e66 1089 int pseudofs;
66325755
MD
1090
1091 nch = ap->a_nch;
1092 dip = VTOI(ap->a_dvp);
1093
d113fda1
MD
1094 if (dip->flags & HAMMER_INODE_RO)
1095 return (EROFS);
a7e9bef1 1096 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHECKSPACE_SLOP_CREATE)) != 0)
e63644f0 1097 return (error);
d113fda1 1098
66325755
MD
1099 /*
1100 * Create a transaction to cover the operations we perform.
1101 */
8cd0a023 1102 hammer_start_transaction(&trans, dip->hmp);
66325755
MD
1103
1104 /*
1105 * Create a new filesystem object of the requested type. The
8cd0a023 1106 * returned inode will be referenced but not locked.
5a930e66
MD
1107 *
1108 * If mknod specifies a directory a pseudo-fs is created.
66325755 1109 */
5a930e66
MD
1110 pseudofs = (ap->a_vap->va_type == VDIR);
1111 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1112 dip, pseudofs, &nip);
66325755 1113 if (error) {
b84de5af 1114 hammer_done_transaction(&trans);
66325755
MD
1115 *ap->a_vpp = NULL;
1116 return (error);
1117 }
66325755
MD
1118
1119 /*
1120 * Add the new filesystem object to the directory. This will also
1121 * bump the inode's link count.
1122 */
5a930e66
MD
1123 error = hammer_ip_add_directory(&trans, dip,
1124 nch->ncp->nc_name, nch->ncp->nc_nlen,
1125 nip);
66325755
MD
1126
1127 /*
1128 * Finish up.
1129 */
1130 if (error) {
a89aec1b 1131 hammer_rel_inode(nip, 0);
66325755
MD
1132 *ap->a_vpp = NULL;
1133 } else {
e8599db1 1134 error = hammer_get_vnode(nip, ap->a_vpp);
a89aec1b
MD
1135 hammer_rel_inode(nip, 0);
1136 if (error == 0) {
1137 cache_setunresolved(ap->a_nch);
1138 cache_setvp(ap->a_nch, *ap->a_vpp);
1139 }
66325755 1140 }
b84de5af 1141 hammer_done_transaction(&trans);
66325755 1142 return (error);
427e5fc6
MD
1143}
1144
66325755
MD
1145/*
1146 * hammer_vop_open { vp, mode, cred, fp }
1147 */
427e5fc6
MD
1148static
1149int
66325755 1150hammer_vop_open(struct vop_open_args *ap)
427e5fc6 1151{
9f5097dc
MD
1152 hammer_inode_t ip;
1153
1154 ip = VTOI(ap->a_vp);
1155
1156 if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
d113fda1 1157 return (EROFS);
a89aec1b 1158 return(vop_stdopen(ap));
427e5fc6
MD
1159}
1160
66325755
MD
1161/*
1162 * hammer_vop_pathconf { vp, name, retval }
1163 */
427e5fc6
MD
1164static
1165int
66325755 1166hammer_vop_pathconf(struct vop_pathconf_args *ap)
427e5fc6
MD
1167{
1168 return EOPNOTSUPP;
1169}
1170
66325755
MD
1171/*
1172 * hammer_vop_print { vp }
1173 */
427e5fc6
MD
1174static
1175int
66325755 1176hammer_vop_print(struct vop_print_args *ap)
427e5fc6
MD
1177{
1178 return EOPNOTSUPP;
1179}
1180
66325755 1181/*
6b4f890b 1182 * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
66325755 1183 */
427e5fc6
MD
1184static
1185int
66325755 1186hammer_vop_readdir(struct vop_readdir_args *ap)
427e5fc6 1187{
36f82b23 1188 struct hammer_transaction trans;
6b4f890b
MD
1189 struct hammer_cursor cursor;
1190 struct hammer_inode *ip;
1191 struct uio *uio;
6b4f890b
MD
1192 hammer_base_elm_t base;
1193 int error;
1194 int cookie_index;
1195 int ncookies;
1196 off_t *cookies;
1197 off_t saveoff;
1198 int r;
1199
1200 ip = VTOI(ap->a_vp);
1201 uio = ap->a_uio;
b3deaf57
MD
1202 saveoff = uio->uio_offset;
1203
1204 if (ap->a_ncookies) {
1205 ncookies = uio->uio_resid / 16 + 1;
1206 if (ncookies > 1024)
1207 ncookies = 1024;
1208 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1209 cookie_index = 0;
1210 } else {
1211 ncookies = -1;
1212 cookies = NULL;
1213 cookie_index = 0;
1214 }
1215
36f82b23
MD
1216 hammer_simple_transaction(&trans, ip->hmp);
1217
b3deaf57
MD
1218 /*
1219 * Handle artificial entries
1220 */
1221 error = 0;
1222 if (saveoff == 0) {
1223 r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1224 if (r)
1225 goto done;
1226 if (cookies)
1227 cookies[cookie_index] = saveoff;
1228 ++saveoff;
1229 ++cookie_index;
1230 if (cookie_index == ncookies)
1231 goto done;
1232 }
1233 if (saveoff == 1) {
1234 if (ip->ino_data.parent_obj_id) {
1235 r = vop_write_dirent(&error, uio,
1236 ip->ino_data.parent_obj_id,
1237 DT_DIR, 2, "..");
1238 } else {
1239 r = vop_write_dirent(&error, uio,
1240 ip->obj_id, DT_DIR, 2, "..");
1241 }
1242 if (r)
1243 goto done;
1244 if (cookies)
1245 cookies[cookie_index] = saveoff;
1246 ++saveoff;
1247 ++cookie_index;
1248 if (cookie_index == ncookies)
1249 goto done;
1250 }
6b4f890b
MD
1251
1252 /*
1253 * Key range (begin and end inclusive) to scan. Directory keys
1254 * directly translate to a 64 bit 'seek' position.
1255 */
bcac4bbb 1256 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
5a930e66
MD
1257 cursor.key_beg.localization = ip->obj_localization +
1258 HAMMER_LOCALIZE_MISC;
6b4f890b 1259 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1260 cursor.key_beg.create_tid = 0;
6b4f890b
MD
1261 cursor.key_beg.delete_tid = 0;
1262 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1263 cursor.key_beg.obj_type = 0;
b3deaf57 1264 cursor.key_beg.key = saveoff;
6b4f890b
MD
1265
1266 cursor.key_end = cursor.key_beg;
1267 cursor.key_end.key = HAMMER_MAX_KEY;
d5530d22
MD
1268 cursor.asof = ip->obj_asof;
1269 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
6b4f890b 1270
4e17f465 1271 error = hammer_ip_first(&cursor);
6b4f890b
MD
1272
1273 while (error == 0) {
11ad5ade 1274 error = hammer_ip_resolve_data(&cursor);
6b4f890b
MD
1275 if (error)
1276 break;
11ad5ade 1277 base = &cursor.leaf->base;
6b4f890b 1278 saveoff = base->key;
11ad5ade 1279 KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
6b4f890b 1280
7a04d74f
MD
1281 if (base->obj_id != ip->obj_id)
1282 panic("readdir: bad record at %p", cursor.node);
1283
6b4f890b 1284 r = vop_write_dirent(
11ad5ade
MD
1285 &error, uio, cursor.data->entry.obj_id,
1286 hammer_get_dtype(cursor.leaf->base.obj_type),
1287 cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1288 (void *)cursor.data->entry.name);
6b4f890b
MD
1289 if (r)
1290 break;
1291 ++saveoff;
1292 if (cookies)
1293 cookies[cookie_index] = base->key;
1294 ++cookie_index;
1295 if (cookie_index == ncookies)
1296 break;
1297 error = hammer_ip_next(&cursor);
1298 }
1299 hammer_done_cursor(&cursor);
1300
b3deaf57 1301done:
b84de5af 1302 hammer_done_transaction(&trans);
36f82b23 1303
6b4f890b
MD
1304 if (ap->a_eofflag)
1305 *ap->a_eofflag = (error == ENOENT);
6b4f890b
MD
1306 uio->uio_offset = saveoff;
1307 if (error && cookie_index == 0) {
b3deaf57
MD
1308 if (error == ENOENT)
1309 error = 0;
6b4f890b
MD
1310 if (cookies) {
1311 kfree(cookies, M_TEMP);
1312 *ap->a_ncookies = 0;
1313 *ap->a_cookies = NULL;
1314 }
1315 } else {
7a04d74f
MD
1316 if (error == ENOENT)
1317 error = 0;
6b4f890b
MD
1318 if (cookies) {
1319 *ap->a_ncookies = cookie_index;
1320 *ap->a_cookies = cookies;
1321 }
1322 }
1323 return(error);
427e5fc6
MD
1324}
1325
66325755
MD
1326/*
1327 * hammer_vop_readlink { vp, uio, cred }
1328 */
427e5fc6
MD
1329static
1330int
66325755 1331hammer_vop_readlink(struct vop_readlink_args *ap)
427e5fc6 1332{
36f82b23 1333 struct hammer_transaction trans;
7a04d74f
MD
1334 struct hammer_cursor cursor;
1335 struct hammer_inode *ip;
1336 int error;
1337
1338 ip = VTOI(ap->a_vp);
36f82b23 1339
2f85fa4d
MD
1340 /*
1341 * Shortcut if the symlink data was stuffed into ino_data.
1342 */
1343 if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1344 error = uiomove(ip->ino_data.ext.symlink,
1345 ip->ino_data.size, ap->a_uio);
1346 return(error);
1347 }
36f82b23 1348
2f85fa4d
MD
1349 /*
1350 * Long version
1351 */
1352 hammer_simple_transaction(&trans, ip->hmp);
bcac4bbb 1353 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
7a04d74f
MD
1354
1355 /*
1356 * Key range (begin and end inclusive) to scan. Directory keys
1357 * directly translate to a 64 bit 'seek' position.
1358 */
5a930e66
MD
1359 cursor.key_beg.localization = ip->obj_localization +
1360 HAMMER_LOCALIZE_MISC;
7a04d74f 1361 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 1362 cursor.key_beg.create_tid = 0;
7a04d74f
MD
1363 cursor.key_beg.delete_tid = 0;
1364 cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1365 cursor.key_beg.obj_type = 0;
1366 cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
d5530d22
MD
1367 cursor.asof = ip->obj_asof;
1368 cursor.flags |= HAMMER_CURSOR_ASOF;
7a04d74f 1369
45a014dc 1370 error = hammer_ip_lookup(&cursor);
7a04d74f
MD
1371 if (error == 0) {
1372 error = hammer_ip_resolve_data(&cursor);
1373 if (error == 0) {
11ad5ade
MD
1374 KKASSERT(cursor.leaf->data_len >=
1375 HAMMER_SYMLINK_NAME_OFF);
1376 error = uiomove(cursor.data->symlink.name,
1377 cursor.leaf->data_len -
1378 HAMMER_SYMLINK_NAME_OFF,
7a04d74f
MD
1379 ap->a_uio);
1380 }
1381 }
1382 hammer_done_cursor(&cursor);
b84de5af 1383 hammer_done_transaction(&trans);
7a04d74f 1384 return(error);
427e5fc6
MD
1385}
1386
66325755
MD
1387/*
1388 * hammer_vop_nremove { nch, dvp, cred }
1389 */
427e5fc6
MD
1390static
1391int
66325755 1392hammer_vop_nremove(struct vop_nremove_args *ap)
427e5fc6 1393{
b84de5af 1394 struct hammer_transaction trans;
e63644f0 1395 struct hammer_inode *dip;
b84de5af
MD
1396 int error;
1397
e63644f0
MD
1398 dip = VTOI(ap->a_dvp);
1399
1400 if (hammer_nohistory(dip) == 0 &&
a7e9bef1 1401 (error = hammer_checkspace(dip->hmp, HAMMER_CHECKSPACE_SLOP_REMOVE)) != 0) {
e63644f0
MD
1402 return (error);
1403 }
1404
1405 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1406 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1407 hammer_done_transaction(&trans);
1408
1409 return (error);
427e5fc6
MD
1410}
1411
66325755
MD
1412/*
1413 * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1414 */
427e5fc6
MD
1415static
1416int
66325755 1417hammer_vop_nrename(struct vop_nrename_args *ap)
427e5fc6 1418{
8cd0a023
MD
1419 struct hammer_transaction trans;
1420 struct namecache *fncp;
1421 struct namecache *tncp;
1422 struct hammer_inode *fdip;
1423 struct hammer_inode *tdip;
1424 struct hammer_inode *ip;
1425 struct hammer_cursor cursor;
8cd0a023 1426 int64_t namekey;
11ad5ade 1427 int nlen, error;
8cd0a023
MD
1428
1429 fdip = VTOI(ap->a_fdvp);
1430 tdip = VTOI(ap->a_tdvp);
1431 fncp = ap->a_fnch->ncp;
1432 tncp = ap->a_tnch->ncp;
b3deaf57
MD
1433 ip = VTOI(fncp->nc_vp);
1434 KKASSERT(ip != NULL);
d113fda1
MD
1435
1436 if (fdip->flags & HAMMER_INODE_RO)
1437 return (EROFS);
1438 if (tdip->flags & HAMMER_INODE_RO)
1439 return (EROFS);
1440 if (ip->flags & HAMMER_INODE_RO)
1441 return (EROFS);
a7e9bef1 1442 if ((error = hammer_checkspace(fdip->hmp, HAMMER_CHECKSPACE_SLOP_CREATE)) != 0)
e63644f0 1443 return (error);
d113fda1 1444
8cd0a023
MD
1445 hammer_start_transaction(&trans, fdip->hmp);
1446
1447 /*
b3deaf57
MD
1448 * Remove tncp from the target directory and then link ip as
1449 * tncp. XXX pass trans to dounlink
42c7d26b
MD
1450 *
1451 * Force the inode sync-time to match the transaction so it is
1452 * in-sync with the creation of the target directory entry.
8cd0a023 1453 */
b84de5af 1454 error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp, ap->a_cred, 0);
42c7d26b 1455 if (error == 0 || error == ENOENT) {
5a930e66
MD
1456 error = hammer_ip_add_directory(&trans, tdip,
1457 tncp->nc_name, tncp->nc_nlen,
1458 ip);
42c7d26b
MD
1459 if (error == 0) {
1460 ip->ino_data.parent_obj_id = tdip->obj_id;
47637bff 1461 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
42c7d26b
MD
1462 }
1463 }
b3deaf57
MD
1464 if (error)
1465 goto failed; /* XXX */
8cd0a023
MD
1466
1467 /*
1468 * Locate the record in the originating directory and remove it.
1469 *
1470 * Calculate the namekey and setup the key range for the scan. This
1471 * works kinda like a chained hash table where the lower 32 bits
1472 * of the namekey synthesize the chain.
1473 *
1474 * The key range is inclusive of both key_beg and key_end.
1475 */
1476 namekey = hammer_directory_namekey(fncp->nc_name, fncp->nc_nlen);
6a37e7e4 1477retry:
bcac4bbb 1478 hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
5a930e66
MD
1479 cursor.key_beg.localization = fdip->obj_localization +
1480 HAMMER_LOCALIZE_MISC;
8cd0a023
MD
1481 cursor.key_beg.obj_id = fdip->obj_id;
1482 cursor.key_beg.key = namekey;
d5530d22 1483 cursor.key_beg.create_tid = 0;
8cd0a023
MD
1484 cursor.key_beg.delete_tid = 0;
1485 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1486 cursor.key_beg.obj_type = 0;
1487
1488 cursor.key_end = cursor.key_beg;
1489 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
1490 cursor.asof = fdip->obj_asof;
1491 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023
MD
1492
1493 /*
1494 * Scan all matching records (the chain), locate the one matching
a89aec1b 1495 * the requested path component.
8cd0a023
MD
1496 *
1497 * The hammer_ip_*() functions merge in-memory records with on-disk
1498 * records for the purposes of the search.
1499 */
4e17f465 1500 error = hammer_ip_first(&cursor);
a89aec1b 1501 while (error == 0) {
8cd0a023
MD
1502 if (hammer_ip_resolve_data(&cursor) != 0)
1503 break;
11ad5ade
MD
1504 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
1505 KKASSERT(nlen > 0);
1506 if (fncp->nc_nlen == nlen &&
1507 bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
8cd0a023
MD
1508 break;
1509 }
a89aec1b 1510 error = hammer_ip_next(&cursor);
8cd0a023 1511 }
8cd0a023
MD
1512
1513 /*
1514 * If all is ok we have to get the inode so we can adjust nlinks.
6a37e7e4
MD
1515 *
1516 * WARNING: hammer_ip_del_directory() may have to terminate the
1517 * cursor to avoid a recursion. It's ok to call hammer_done_cursor()
1518 * twice.
8cd0a023 1519 */
9944ae54 1520 if (error == 0)
6a37e7e4 1521 error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
b84de5af
MD
1522
1523 /*
1524 * XXX A deadlock here will break rename's atomicy for the purposes
1525 * of crash recovery.
1526 */
1527 if (error == EDEADLK) {
b84de5af 1528 hammer_done_cursor(&cursor);
b84de5af
MD
1529 goto retry;
1530 }
1531
1532 /*
1533 * Cleanup and tell the kernel that the rename succeeded.
1534 */
c0ade690 1535 hammer_done_cursor(&cursor);
6a37e7e4
MD
1536 if (error == 0)
1537 cache_rename(ap->a_fnch, ap->a_tnch);
b84de5af 1538
b3deaf57 1539failed:
b84de5af 1540 hammer_done_transaction(&trans);
8cd0a023 1541 return (error);
427e5fc6
MD
1542}
1543
66325755
MD
1544/*
1545 * hammer_vop_nrmdir { nch, dvp, cred }
1546 */
427e5fc6
MD
1547static
1548int
66325755 1549hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
427e5fc6 1550{
b84de5af 1551 struct hammer_transaction trans;
e63644f0 1552 struct hammer_inode *dip;
b84de5af
MD
1553 int error;
1554
e63644f0
MD
1555 dip = VTOI(ap->a_dvp);
1556
1557 if (hammer_nohistory(dip) == 0 &&
a7e9bef1 1558 (error = hammer_checkspace(dip->hmp, HAMMER_CHECKSPACE_SLOP_REMOVE)) != 0) {
e63644f0
MD
1559 return (error);
1560 }
1561
1562 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1563 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0);
1564 hammer_done_transaction(&trans);
1565
1566 return (error);
427e5fc6
MD
1567}
1568
66325755
MD
1569/*
1570 * hammer_vop_setattr { vp, vap, cred }
1571 */
427e5fc6
MD
1572static
1573int
66325755 1574hammer_vop_setattr(struct vop_setattr_args *ap)
427e5fc6 1575{
8cd0a023
MD
1576 struct hammer_transaction trans;
1577 struct vattr *vap;
1578 struct hammer_inode *ip;
1579 int modflags;
1580 int error;
d5ef456e 1581 int truncating;
4a2796f3
MD
1582 int blksize;
1583 int64_t aligned_size;
8cd0a023 1584 u_int32_t flags;
8cd0a023
MD
1585
1586 vap = ap->a_vap;
1587 ip = ap->a_vp->v_data;
1588 modflags = 0;
1589
1590 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1591 return(EROFS);
d113fda1
MD
1592 if (ip->flags & HAMMER_INODE_RO)
1593 return (EROFS);
e63644f0 1594 if (hammer_nohistory(ip) == 0 &&
a7e9bef1 1595 (error = hammer_checkspace(ip->hmp, HAMMER_CHECKSPACE_SLOP_REMOVE)) != 0) {
e63644f0
MD
1596 return (error);
1597 }
8cd0a023
MD
1598
1599 hammer_start_transaction(&trans, ip->hmp);
1600 error = 0;
1601
1602 if (vap->va_flags != VNOVAL) {
1603 flags = ip->ino_data.uflags;
1604 error = vop_helper_setattr_flags(&flags, vap->va_flags,
1605 hammer_to_unix_xid(&ip->ino_data.uid),
1606 ap->a_cred);
1607 if (error == 0) {
1608 if (ip->ino_data.uflags != flags) {
1609 ip->ino_data.uflags = flags;
1610 modflags |= HAMMER_INODE_DDIRTY;
1611 }
1612 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1613 error = 0;
1614 goto done;
1615 }
1616 }
1617 goto done;
1618 }
1619 if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
1620 error = EPERM;
1621 goto done;
1622 }
7538695e
MD
1623 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
1624 mode_t cur_mode = ip->ino_data.mode;
1625 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1626 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1627 uuid_t uuid_uid;
1628 uuid_t uuid_gid;
1629
1630 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
1631 ap->a_cred,
1632 &cur_uid, &cur_gid, &cur_mode);
1633 if (error == 0) {
1634 hammer_guid_to_uuid(&uuid_uid, cur_uid);
1635 hammer_guid_to_uuid(&uuid_gid, cur_gid);
1636 if (bcmp(&uuid_uid, &ip->ino_data.uid,
1637 sizeof(uuid_uid)) ||
1638 bcmp(&uuid_gid, &ip->ino_data.gid,
1639 sizeof(uuid_gid)) ||
1640 ip->ino_data.mode != cur_mode
1641 ) {
1642 ip->ino_data.uid = uuid_uid;
1643 ip->ino_data.gid = uuid_gid;
1644 ip->ino_data.mode = cur_mode;
1645 }
8cd0a023
MD
1646 modflags |= HAMMER_INODE_DDIRTY;
1647 }
1648 }
11ad5ade 1649 while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
8cd0a023
MD
1650 switch(ap->a_vp->v_type) {
1651 case VREG:
11ad5ade 1652 if (vap->va_size == ip->ino_data.size)
d5ef456e 1653 break;
b84de5af
MD
1654 /*
1655 * XXX break atomicy, we can deadlock the backend
1656 * if we do not release the lock. Probably not a
1657 * big deal here.
1658 */
4a2796f3 1659 blksize = hammer_blocksize(vap->va_size);
11ad5ade 1660 if (vap->va_size < ip->ino_data.size) {
4a2796f3 1661 vtruncbuf(ap->a_vp, vap->va_size, blksize);
d5ef456e
MD
1662 truncating = 1;
1663 } else {
c0ade690 1664 vnode_pager_setsize(ap->a_vp, vap->va_size);
d5ef456e 1665 truncating = 0;
c0ade690 1666 }
11ad5ade
MD
1667 ip->ino_data.size = vap->va_size;
1668 modflags |= HAMMER_INODE_DDIRTY;
d5ef456e 1669
b84de5af
MD
1670 /*
1671 * on-media truncation is cached in the inode until
1672 * the inode is synchronized.
1673 */
d5ef456e 1674 if (truncating) {
47637bff 1675 hammer_ip_frontend_trunc(ip, vap->va_size);
0832c9bb
MD
1676#ifdef DEBUG_TRUNCATE
1677 if (HammerTruncIp == NULL)
1678 HammerTruncIp = ip;
1679#endif
b84de5af
MD
1680 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1681 ip->flags |= HAMMER_INODE_TRUNCATED;
1682 ip->trunc_off = vap->va_size;
0832c9bb
MD
1683#ifdef DEBUG_TRUNCATE
1684 if (ip == HammerTruncIp)
1685 kprintf("truncate1 %016llx\n", ip->trunc_off);
1686#endif
b84de5af
MD
1687 } else if (ip->trunc_off > vap->va_size) {
1688 ip->trunc_off = vap->va_size;
0832c9bb
MD
1689#ifdef DEBUG_TRUNCATE
1690 if (ip == HammerTruncIp)
1691 kprintf("truncate2 %016llx\n", ip->trunc_off);
1692#endif
1693 } else {
1694#ifdef DEBUG_TRUNCATE
1695 if (ip == HammerTruncIp)
1696 kprintf("truncate3 %016llx (ignored)\n", vap->va_size);
1697#endif
b84de5af 1698 }
d5ef456e 1699 }
b84de5af 1700
d5ef456e
MD
1701 /*
1702 * If truncating we have to clean out a portion of
b84de5af
MD
1703 * the last block on-disk. We do this in the
1704 * front-end buffer cache.
d5ef456e 1705 */
4a2796f3
MD
1706 aligned_size = (vap->va_size + (blksize - 1)) &
1707 ~(int64_t)(blksize - 1);
b84de5af 1708 if (truncating && vap->va_size < aligned_size) {
d5ef456e
MD
1709 struct buf *bp;
1710 int offset;
1711
4a2796f3 1712 aligned_size -= blksize;
47637bff 1713
4a2796f3 1714 offset = (int)vap->va_size & (blksize - 1);
47637bff 1715 error = bread(ap->a_vp, aligned_size,
4a2796f3 1716 blksize, &bp);
47637bff 1717 hammer_ip_frontend_trunc(ip, aligned_size);
d5ef456e
MD
1718 if (error == 0) {
1719 bzero(bp->b_data + offset,
4a2796f3 1720 blksize - offset);
d5ef456e
MD
1721 bdwrite(bp);
1722 } else {
47637bff 1723 kprintf("ERROR %d\n", error);
d5ef456e
MD
1724 brelse(bp);
1725 }
1726 }
76376933 1727 break;
8cd0a023 1728 case VDATABASE:
b84de5af
MD
1729 if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
1730 ip->flags |= HAMMER_INODE_TRUNCATED;
1731 ip->trunc_off = vap->va_size;
1732 } else if (ip->trunc_off > vap->va_size) {
1733 ip->trunc_off = vap->va_size;
1734 }
47637bff 1735 hammer_ip_frontend_trunc(ip, vap->va_size);
11ad5ade
MD
1736 ip->ino_data.size = vap->va_size;
1737 modflags |= HAMMER_INODE_DDIRTY;
8cd0a023
MD
1738 break;
1739 default:
1740 error = EINVAL;
1741 goto done;
1742 }
d26d0ae9 1743 break;
8cd0a023
MD
1744 }
1745 if (vap->va_atime.tv_sec != VNOVAL) {
bcac4bbb 1746 ip->ino_data.atime =
ddfdf542
MD
1747 hammer_timespec_to_time(&vap->va_atime);
1748 modflags |= HAMMER_INODE_ATIME;
8cd0a023
MD
1749 }
1750 if (vap->va_mtime.tv_sec != VNOVAL) {
11ad5ade 1751 ip->ino_data.mtime =
ddfdf542
MD
1752 hammer_timespec_to_time(&vap->va_mtime);
1753 modflags |= HAMMER_INODE_MTIME;
8cd0a023
MD
1754 }
1755 if (vap->va_mode != (mode_t)VNOVAL) {
7538695e
MD
1756 mode_t cur_mode = ip->ino_data.mode;
1757 uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1758 gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1759
1760 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
1761 cur_uid, cur_gid, &cur_mode);
1762 if (error == 0 && ip->ino_data.mode != cur_mode) {
1763 ip->ino_data.mode = cur_mode;
8cd0a023
MD
1764 modflags |= HAMMER_INODE_DDIRTY;
1765 }
1766 }
1767done:
b84de5af 1768 if (error == 0)
47637bff 1769 hammer_modify_inode(ip, modflags);
b84de5af 1770 hammer_done_transaction(&trans);
8cd0a023 1771 return (error);
427e5fc6
MD
1772}
1773
66325755
MD
1774/*
1775 * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1776 */
427e5fc6
MD
1777static
1778int
66325755 1779hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
427e5fc6 1780{
7a04d74f
MD
1781 struct hammer_transaction trans;
1782 struct hammer_inode *dip;
1783 struct hammer_inode *nip;
1784 struct nchandle *nch;
1785 hammer_record_t record;
1786 int error;
1787 int bytes;
1788
1789 ap->a_vap->va_type = VLNK;
1790
1791 nch = ap->a_nch;
1792 dip = VTOI(ap->a_dvp);
1793
d113fda1
MD
1794 if (dip->flags & HAMMER_INODE_RO)
1795 return (EROFS);
a7e9bef1 1796 if ((error = hammer_checkspace(dip->hmp, HAMMER_CHECKSPACE_SLOP_CREATE)) != 0)
e63644f0 1797 return (error);
d113fda1 1798
7a04d74f
MD
1799 /*
1800 * Create a transaction to cover the operations we perform.
1801 */
1802 hammer_start_transaction(&trans, dip->hmp);
1803
1804 /*
1805 * Create a new filesystem object of the requested type. The
1806 * returned inode will be referenced but not locked.
1807 */
1808
5a930e66
MD
1809 error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1810 dip, 0, &nip);
7a04d74f 1811 if (error) {
b84de5af 1812 hammer_done_transaction(&trans);
7a04d74f
MD
1813 *ap->a_vpp = NULL;
1814 return (error);
1815 }
1816
7a04d74f
MD
1817 /*
1818 * Add a record representing the symlink. symlink stores the link
1819 * as pure data, not a string, and is no \0 terminated.
1820 */
1821 if (error == 0) {
7a04d74f
MD
1822 bytes = strlen(ap->a_target);
1823
2f85fa4d
MD
1824 if (bytes <= HAMMER_INODE_BASESYMLEN) {
1825 bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
1826 } else {
1827 record = hammer_alloc_mem_record(nip, bytes);
1828 record->type = HAMMER_MEM_RECORD_GENERAL;
1829
5a930e66
MD
1830 record->leaf.base.localization = nip->obj_localization +
1831 HAMMER_LOCALIZE_MISC;
2f85fa4d
MD
1832 record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
1833 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
1834 record->leaf.data_len = bytes;
1835 KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
1836 bcopy(ap->a_target, record->data->symlink.name, bytes);
1837 error = hammer_ip_add_record(&trans, record);
1838 }
42c7d26b
MD
1839
1840 /*
1841 * Set the file size to the length of the link.
1842 */
1843 if (error == 0) {
11ad5ade 1844 nip->ino_data.size = bytes;
47637bff 1845 hammer_modify_inode(nip, HAMMER_INODE_DDIRTY);
42c7d26b 1846 }
7a04d74f 1847 }
1f07f686 1848 if (error == 0)
5a930e66
MD
1849 error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
1850 nch->ncp->nc_nlen, nip);
7a04d74f
MD
1851
1852 /*
1853 * Finish up.
1854 */
1855 if (error) {
1856 hammer_rel_inode(nip, 0);
7a04d74f
MD
1857 *ap->a_vpp = NULL;
1858 } else {
e8599db1 1859 error = hammer_get_vnode(nip, ap->a_vpp);
7a04d74f
MD
1860 hammer_rel_inode(nip, 0);
1861 if (error == 0) {
1862 cache_setunresolved(ap->a_nch);
1863 cache_setvp(ap->a_nch, *ap->a_vpp);
1864 }
1865 }
b84de5af 1866 hammer_done_transaction(&trans);
7a04d74f 1867 return (error);
427e5fc6
MD
1868}
1869
66325755
MD
1870/*
1871 * hammer_vop_nwhiteout { nch, dvp, cred, flags }
1872 */
427e5fc6
MD
1873static
1874int
66325755 1875hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
427e5fc6 1876{
b84de5af 1877 struct hammer_transaction trans;
e63644f0 1878 struct hammer_inode *dip;
b84de5af
MD
1879 int error;
1880
e63644f0
MD
1881 dip = VTOI(ap->a_dvp);
1882
1883 if (hammer_nohistory(dip) == 0 &&
a7e9bef1 1884 (error = hammer_checkspace(dip->hmp, HAMMER_CHECKSPACE_SLOP_CREATE)) != 0) {
e63644f0
MD
1885 return (error);
1886 }
1887
1888 hammer_start_transaction(&trans, dip->hmp);
b84de5af
MD
1889 error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
1890 ap->a_cred, ap->a_flags);
1891 hammer_done_transaction(&trans);
1892
1893 return (error);
427e5fc6
MD
1894}
1895
7dc57964
MD
1896/*
1897 * hammer_vop_ioctl { vp, command, data, fflag, cred }
1898 */
1899static
1900int
1901hammer_vop_ioctl(struct vop_ioctl_args *ap)
1902{
1903 struct hammer_inode *ip = ap->a_vp->v_data;
1904
1905 return(hammer_ioctl(ip, ap->a_command, ap->a_data,
1906 ap->a_fflag, ap->a_cred));
1907}
1908
513ca7d7
MD
1909static
1910int
1911hammer_vop_mountctl(struct vop_mountctl_args *ap)
1912{
1913 struct mount *mp;
1914 int error;
1915
1916 mp = ap->a_head.a_ops->head.vv_mount;
1917
1918 switch(ap->a_op) {
1919 case MOUNTCTL_SET_EXPORT:
1920 if (ap->a_ctllen != sizeof(struct export_args))
1921 error = EINVAL;
1922 error = hammer_vfs_export(mp, ap->a_op,
1923 (const struct export_args *)ap->a_ctl);
1924 break;
1925 default:
1926 error = journal_mountctl(ap);
1927 break;
1928 }
1929 return(error);
1930}
1931
66325755
MD
1932/*
1933 * hammer_vop_strategy { vp, bio }
8cd0a023
MD
1934 *
1935 * Strategy call, used for regular file read & write only. Note that the
1936 * bp may represent a cluster.
1937 *
1938 * To simplify operation and allow better optimizations in the future,
1939 * this code does not make any assumptions with regards to buffer alignment
1940 * or size.
66325755 1941 */
427e5fc6
MD
1942static
1943int
66325755 1944hammer_vop_strategy(struct vop_strategy_args *ap)
427e5fc6 1945{
8cd0a023
MD
1946 struct buf *bp;
1947 int error;
1948
1949 bp = ap->a_bio->bio_buf;
1950
1951 switch(bp->b_cmd) {
1952 case BUF_CMD_READ:
1953 error = hammer_vop_strategy_read(ap);
1954 break;
1955 case BUF_CMD_WRITE:
1956 error = hammer_vop_strategy_write(ap);
1957 break;
1958 default:
059819e3
MD
1959 bp->b_error = error = EINVAL;
1960 bp->b_flags |= B_ERROR;
1961 biodone(ap->a_bio);
8cd0a023
MD
1962 break;
1963 }
8cd0a023 1964 return (error);
427e5fc6
MD
1965}
1966
8cd0a023
MD
1967/*
1968 * Read from a regular file. Iterate the related records and fill in the
1969 * BIO/BUF. Gaps are zero-filled.
1970 *
1971 * The support code in hammer_object.c should be used to deal with mixed
1972 * in-memory and on-disk records.
1973 *
4a2796f3
MD
1974 * NOTE: Can be called from the cluster code with an oversized buf.
1975 *
8cd0a023
MD
1976 * XXX atime update
1977 */
1978static
1979int
1980hammer_vop_strategy_read(struct vop_strategy_args *ap)
1981{
36f82b23
MD
1982 struct hammer_transaction trans;
1983 struct hammer_inode *ip;
8cd0a023 1984 struct hammer_cursor cursor;
8cd0a023 1985 hammer_base_elm_t base;
4a2796f3 1986 hammer_off_t disk_offset;
8cd0a023 1987 struct bio *bio;
a99b9ea2 1988 struct bio *nbio;
8cd0a023
MD
1989 struct buf *bp;
1990 int64_t rec_offset;
a89aec1b 1991 int64_t ran_end;
195c19a1 1992 int64_t tmp64;
8cd0a023
MD
1993 int error;
1994 int boff;
1995 int roff;
1996 int n;
1997
1998 bio = ap->a_bio;
1999 bp = bio->bio_buf;
36f82b23 2000 ip = ap->a_vp->v_data;
8cd0a023 2001
a99b9ea2
MD
2002 /*
2003 * The zone-2 disk offset may have been set by the cluster code via
4a2796f3 2004 * a BMAP operation, or else should be NOOFFSET.
a99b9ea2 2005 *
4a2796f3 2006 * Checking the high bits for a match against zone-2 should suffice.
a99b9ea2
MD
2007 */
2008 nbio = push_bio(bio);
6aeaa7bd
MD
2009 if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2010 HAMMER_ZONE_RAW_BUFFER) {
4a2796f3 2011 error = hammer_io_direct_read(ip->hmp, nbio);
a99b9ea2
MD
2012 return (error);
2013 }
2014
2015 /*
4a2796f3
MD
2016 * Well, that sucked. Do it the hard way. If all the stars are
2017 * aligned we may still be able to issue a direct-read.
a99b9ea2 2018 */
36f82b23 2019 hammer_simple_transaction(&trans, ip->hmp);
47637bff 2020 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
8cd0a023
MD
2021
2022 /*
2023 * Key range (begin and end inclusive) to scan. Note that the key's
c0ade690
MD
2024 * stored in the actual records represent BASE+LEN, not BASE. The
2025 * first record containing bio_offset will have a key > bio_offset.
8cd0a023 2026 */
5a930e66
MD
2027 cursor.key_beg.localization = ip->obj_localization +
2028 HAMMER_LOCALIZE_MISC;
8cd0a023 2029 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 2030 cursor.key_beg.create_tid = 0;
8cd0a023 2031 cursor.key_beg.delete_tid = 0;
8cd0a023 2032 cursor.key_beg.obj_type = 0;
c0ade690 2033 cursor.key_beg.key = bio->bio_offset + 1;
d5530d22 2034 cursor.asof = ip->obj_asof;
bf3b416b 2035 cursor.flags |= HAMMER_CURSOR_ASOF;
8cd0a023
MD
2036
2037 cursor.key_end = cursor.key_beg;
11ad5ade 2038 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
b84de5af 2039#if 0
11ad5ade 2040 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
a89aec1b
MD
2041 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2042 cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2043 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
b84de5af
MD
2044 } else
2045#endif
2046 {
c0ade690 2047 ran_end = bio->bio_offset + bp->b_bufsize;
a89aec1b
MD
2048 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2049 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
195c19a1
MD
2050 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2051 if (tmp64 < ran_end)
a89aec1b
MD
2052 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2053 else
7f7c1f84 2054 cursor.key_end.key = ran_end + MAXPHYS + 1;
a89aec1b 2055 }
d26d0ae9 2056 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
8cd0a023 2057
4e17f465 2058 error = hammer_ip_first(&cursor);
8cd0a023
MD
2059 boff = 0;
2060
a89aec1b 2061 while (error == 0) {
47637bff
MD
2062 /*
2063 * Get the base file offset of the record. The key for
2064 * data records is (base + bytes) rather then (base).
2065 */
11ad5ade 2066 base = &cursor.leaf->base;
11ad5ade 2067 rec_offset = base->key - cursor.leaf->data_len;
8cd0a023 2068
66325755 2069 /*
a89aec1b 2070 * Calculate the gap, if any, and zero-fill it.
1fef775e
MD
2071 *
2072 * n is the offset of the start of the record verses our
2073 * current seek offset in the bio.
66325755 2074 */
8cd0a023
MD
2075 n = (int)(rec_offset - (bio->bio_offset + boff));
2076 if (n > 0) {
a89aec1b
MD
2077 if (n > bp->b_bufsize - boff)
2078 n = bp->b_bufsize - boff;
8cd0a023
MD
2079 bzero((char *)bp->b_data + boff, n);
2080 boff += n;
2081 n = 0;
66325755 2082 }
8cd0a023
MD
2083
2084 /*
2085 * Calculate the data offset in the record and the number
2086 * of bytes we can copy.
a89aec1b 2087 *
1fef775e
MD
2088 * There are two degenerate cases. First, boff may already
2089 * be at bp->b_bufsize. Secondly, the data offset within
2090 * the record may exceed the record's size.
8cd0a023
MD
2091 */
2092 roff = -n;
b84de5af 2093 rec_offset += roff;
11ad5ade 2094 n = cursor.leaf->data_len - roff;
1fef775e
MD
2095 if (n <= 0) {
2096 kprintf("strategy_read: bad n=%d roff=%d\n", n, roff);
2097 n = 0;
2098 } else if (n > bp->b_bufsize - boff) {
8cd0a023 2099 n = bp->b_bufsize - boff;
1fef775e 2100 }
059819e3 2101
b84de5af 2102 /*
47637bff
MD
2103 * Deal with cached truncations. This cool bit of code
2104 * allows truncate()/ftruncate() to avoid having to sync
2105 * the file.
2106 *
2107 * If the frontend is truncated then all backend records are
2108 * subject to the frontend's truncation.
2109 *
2110 * If the backend is truncated then backend records on-disk
2111 * (but not in-memory) are subject to the backend's
2112 * truncation. In-memory records owned by the backend
2113 * represent data written after the truncation point on the
2114 * backend and must not be truncated.
2115 *
2116 * Truncate operations deal with frontend buffer cache
2117 * buffers and frontend-owned in-memory records synchronously.
b84de5af 2118 */
47637bff
MD
2119 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2120 if (hammer_cursor_ondisk(&cursor) ||
2121 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2122 if (ip->trunc_off <= rec_offset)
2123 n = 0;
2124 else if (ip->trunc_off < rec_offset + n)
2125 n = (int)(ip->trunc_off - rec_offset);
2126 }
2127 }
2128 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2129 if (hammer_cursor_ondisk(&cursor)) {
2130 if (ip->sync_trunc_off <= rec_offset)
2131 n = 0;
2132 else if (ip->sync_trunc_off < rec_offset + n)
2133 n = (int)(ip->sync_trunc_off - rec_offset);
2134 }
2135 }
b84de5af
MD
2136
2137 /*
47637bff
MD
2138 * Try to issue a direct read into our bio if possible,
2139 * otherwise resolve the element data into a hammer_buffer
2140 * and copy.
4a2796f3
MD
2141 *
2142 * The buffer on-disk should be zerod past any real
2143 * truncation point, but may not be for any synthesized
2144 * truncation point from above.
b84de5af 2145 */
4a2796f3 2146 if (boff == 0 && n == bp->b_bufsize &&
a99b9ea2 2147 ((cursor.leaf->data_offset + roff) & HAMMER_BUFMASK) == 0) {
4a2796f3
MD
2148 disk_offset = hammer_blockmap_lookup(
2149 trans.hmp,
2150 cursor.leaf->data_offset + roff,
2151 &error);
2152 if (error)
2153 break;
2154 nbio->bio_offset = disk_offset;
2155 error = hammer_io_direct_read(trans.hmp, nbio);
47637bff
MD
2156 goto done;
2157 } else if (n) {
2158 error = hammer_ip_resolve_data(&cursor);
2159 if (error == 0) {
2160 bcopy((char *)cursor.data + roff,
2161 (char *)bp->b_data + boff, n);
2162 }
b84de5af 2163 }
47637bff
MD
2164 if (error)
2165 break;
2166
2167 /*
2168 * Iterate until we have filled the request.
2169 */
2170 boff += n;
8cd0a023 2171 if (boff == bp->b_bufsize)
66325755 2172 break;
a89aec1b 2173 error = hammer_ip_next(&cursor);
66325755
MD
2174 }
2175
2176 /*
8cd0a023 2177 * There may have been a gap after the last record
66325755 2178 */
8cd0a023
MD
2179 if (error == ENOENT)
2180 error = 0;
2181 if (error == 0 && boff != bp->b_bufsize) {
7f7c1f84 2182 KKASSERT(boff < bp->b_bufsize);
8cd0a023
MD
2183 bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2184 /* boff = bp->b_bufsize; */
2185 }
2186 bp->b_resid = 0;
059819e3
MD
2187 bp->b_error = error;
2188 if (error)
2189 bp->b_flags |= B_ERROR;
2190 biodone(ap->a_bio);
47637bff
MD
2191
2192done:
2193 if (cursor.node)
bcac4bbb 2194 hammer_cache_node(&ip->cache[1], cursor.node);
47637bff
MD
2195 hammer_done_cursor(&cursor);
2196 hammer_done_transaction(&trans);
8cd0a023
MD
2197 return(error);
2198}
2199
a99b9ea2
MD
2200/*
2201 * BMAP operation - used to support cluster_read() only.
2202 *
2203 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2204 *
2205 * This routine may return EOPNOTSUPP if the opration is not supported for
2206 * the specified offset. The contents of the pointer arguments do not
2207 * need to be initialized in that case.
2208 *
2209 * If a disk address is available and properly aligned return 0 with
2210 * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2211 * to the run-length relative to that offset. Callers may assume that
2212 * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2213 * large, so return EOPNOTSUPP if it is not sufficiently large.
2214 */
2215static
2216int
2217hammer_vop_bmap(struct vop_bmap_args *ap)
2218{
2219 struct hammer_transaction trans;
2220 struct hammer_inode *ip;
2221 struct hammer_cursor cursor;
2222 hammer_base_elm_t base;
2223 int64_t rec_offset;
2224 int64_t ran_end;
2225 int64_t tmp64;
2226 int64_t base_offset;
2227 int64_t base_disk_offset;
2228 int64_t last_offset;
2229 hammer_off_t last_disk_offset;
2230 hammer_off_t disk_offset;
2231 int rec_len;
2232 int error;
4a2796f3 2233 int blksize;
a99b9ea2
MD
2234
2235 ip = ap->a_vp->v_data;
2236
2237 /*
2238 * We can only BMAP regular files. We can't BMAP database files,
2239 * directories, etc.
2240 */
2241 if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
2242 return(EOPNOTSUPP);
2243
2244 /*
2245 * bmap is typically called with runp/runb both NULL when used
2246 * for writing. We do not support BMAP for writing atm.
2247 */
4a2796f3 2248 if (ap->a_cmd != BUF_CMD_READ)
a99b9ea2
MD
2249 return(EOPNOTSUPP);
2250
2251 /*
2252 * Scan the B-Tree to acquire blockmap addresses, then translate
2253 * to raw addresses.
2254 */
2255 hammer_simple_transaction(&trans, ip->hmp);
cb51be26
MD
2256#if 0
2257 kprintf("bmap_beg %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2258#endif
a99b9ea2
MD
2259 hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2260
2261 /*
2262 * Key range (begin and end inclusive) to scan. Note that the key's
2263 * stored in the actual records represent BASE+LEN, not BASE. The
2264 * first record containing bio_offset will have a key > bio_offset.
2265 */
5a930e66
MD
2266 cursor.key_beg.localization = ip->obj_localization +
2267 HAMMER_LOCALIZE_MISC;
a99b9ea2
MD
2268 cursor.key_beg.obj_id = ip->obj_id;
2269 cursor.key_beg.create_tid = 0;
2270 cursor.key_beg.delete_tid = 0;
2271 cursor.key_beg.obj_type = 0;
2272 if (ap->a_runb)
2273 cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
2274 else
2275 cursor.key_beg.key = ap->a_loffset + 1;
2276 if (cursor.key_beg.key < 0)
2277 cursor.key_beg.key = 0;
2278 cursor.asof = ip->obj_asof;
bf3b416b 2279 cursor.flags |= HAMMER_CURSOR_ASOF;
a99b9ea2
MD
2280
2281 cursor.key_end = cursor.key_beg;
2282 KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2283
2284 ran_end = ap->a_loffset + MAXPHYS;
2285 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2286 cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2287 tmp64 = ran_end + MAXPHYS + 1; /* work-around GCC-4 bug */
2288 if (tmp64 < ran_end)
2289 cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2290 else
2291 cursor.key_end.key = ran_end + MAXPHYS + 1;
2292
2293 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2294
2295 error = hammer_ip_first(&cursor);
2296 base_offset = last_offset = 0;
2297 base_disk_offset = last_disk_offset = 0;
2298
2299 while (error == 0) {
2300 /*
2301 * Get the base file offset of the record. The key for
2302 * data records is (base + bytes) rather then (base).
4a2796f3
MD
2303 *
2304 * NOTE: rec_offset + rec_len may exceed the end-of-file.
2305 * The extra bytes should be zero on-disk and the BMAP op
2306 * should still be ok.
a99b9ea2
MD
2307 */
2308 base = &cursor.leaf->base;
2309 rec_offset = base->key - cursor.leaf->data_len;
2310 rec_len = cursor.leaf->data_len;
2311
2312 /*
4a2796f3
MD
2313 * Incorporate any cached truncation.
2314 *
2315 * NOTE: Modifications to rec_len based on synthesized
2316 * truncation points remove the guarantee that any extended
2317 * data on disk is zero (since the truncations may not have
2318 * taken place on-media yet).
a99b9ea2
MD
2319 */
2320 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2321 if (hammer_cursor_ondisk(&cursor) ||
2322 cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
2323 if (ip->trunc_off <= rec_offset)
2324 rec_len = 0;
2325 else if (ip->trunc_off < rec_offset + rec_len)
2326 rec_len = (int)(ip->trunc_off - rec_offset);
2327 }
2328 }
2329 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2330 if (hammer_cursor_ondisk(&cursor)) {
2331 if (ip->sync_trunc_off <= rec_offset)
2332 rec_len = 0;
2333 else if (ip->sync_trunc_off < rec_offset + rec_len)
2334 rec_len = (int)(ip->sync_trunc_off - rec_offset);
2335 }
2336 }
2337
2338 /*
2339 * Accumulate information. If we have hit a discontiguous
2340 * block reset base_offset unless we are already beyond the
2341 * requested offset. If we are, that's it, we stop.
2342 */
2343 disk_offset = hammer_blockmap_lookup(trans.hmp,
2344 cursor.leaf->data_offset,
2345 &error);
2346 if (error)
2347 break;
2348 if (rec_offset != last_offset ||
2349 disk_offset != last_disk_offset) {
2350 if (rec_offset > ap->a_loffset)
2351 break;
2352 base_offset = rec_offset;
2353 base_disk_offset = disk_offset;
2354 }
2355 last_offset = rec_offset + rec_len;
2356 last_disk_offset = disk_offset + rec_len;
2357
2358 error = hammer_ip_next(&cursor);
2359 }
2360
2361#if 0
2362 kprintf("BMAP %016llx: %016llx - %016llx\n",
2363 ap->a_loffset, base_offset, last_offset);
2364 kprintf("BMAP %16s: %016llx - %016llx\n",
2365 "", base_disk_offset, last_disk_offset);
2366#endif
2367
cb51be26 2368 if (cursor.node) {
bcac4bbb 2369 hammer_cache_node(&ip->cache[1], cursor.node);
cb51be26
MD
2370#if 0
2371 kprintf("bmap_end2 %016llx ip->cache %p\n", ap->a_loffset, ip->cache[1]);
2372#endif
2373 }
a99b9ea2
MD
2374 hammer_done_cursor(&cursor);
2375 hammer_done_transaction(&trans);
2376
4a2796f3
MD
2377 /*
2378 * If we couldn't find any records or the records we did find were
2379 * all behind the requested offset, return failure. A forward
2380 * truncation can leave a hole w/ no on-disk records.
2381 */
2382 if (last_offset == 0 || last_offset < ap->a_loffset)
2383 return (EOPNOTSUPP);
2384
2385 /*
2386 * Figure out the block size at the requested offset and adjust
2387 * our limits so the cluster_read() does not create inappropriately
2388 * sized buffer cache buffers.
2389 */
2390 blksize = hammer_blocksize(ap->a_loffset);
2391 if (hammer_blocksize(base_offset) != blksize) {
2392 base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
2393 }
2394 if (last_offset != ap->a_loffset &&
2395 hammer_blocksize(last_offset - 1) != blksize) {
2396 last_offset = hammer_blockdemarc(ap->a_loffset,
2397 last_offset - 1);
2398 }
2399
2400 /*
2401 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
2402 * from occuring.
2403 */
2404 disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
2405
2406 /*
2407 * If doffsetp is not aligned or the forward run size does
2408 * not cover a whole buffer, disallow the direct I/O.
2409 */
2410 if ((disk_offset & HAMMER_BUFMASK) ||
2411 (last_offset - ap->a_loffset) < blksize) {
a99b9ea2
MD
2412 error = EOPNOTSUPP;
2413 } else {
4a2796f3
MD
2414 *ap->a_doffsetp = disk_offset;
2415 if (ap->a_runb) {
2416 *ap->a_runb = ap->a_loffset - base_offset;
2417 KKASSERT(*ap->a_runb >= 0);
a99b9ea2 2418 }
4a2796f3
MD
2419 if (ap->a_runp) {
2420 *ap->a_runp = last_offset - ap->a_loffset;
2421 KKASSERT(*ap->a_runp >= 0);
2422 }
2423 error = 0;
a99b9ea2
MD
2424 }
2425 return(error);
2426}
2427
8cd0a023 2428/*
059819e3 2429 * Write to a regular file. Because this is a strategy call the OS is
bcac4bbb 2430 * trying to actually get data onto the media.
8cd0a023
MD
2431 */
2432static
2433int
2434hammer_vop_strategy_write(struct vop_strategy_args *ap)
2435{
47637bff 2436 hammer_record_t record;
af209b0f 2437 hammer_mount_t hmp;
8cd0a023
MD
2438 hammer_inode_t ip;
2439 struct bio *bio;
2440 struct buf *bp;
a7e9bef1 2441 int blksize;
0832c9bb
MD
2442 int bytes;
2443 int error;
8cd0a023
MD
2444
2445 bio = ap->a_bio;
2446 bp = bio->bio_buf;
2447 ip = ap->a_vp->v_data;
af209b0f 2448 hmp = ip->hmp;
d113fda1 2449
a7e9bef1
MD
2450 blksize = hammer_blocksize(bio->bio_offset);
2451 KKASSERT(bp->b_bufsize == blksize);
4a2796f3 2452
059819e3
MD
2453 if (ip->flags & HAMMER_INODE_RO) {
2454 bp->b_error = EROFS;
2455 bp->b_flags |= B_ERROR;
2456 biodone(ap->a_bio);
2457 return(EROFS);
2458 }
b84de5af 2459
29ce0677
MD
2460 /*
2461 * Interlock with inode destruction (no in-kernel or directory
2462 * topology visibility). If we queue new IO while trying to
2463 * destroy the inode we can deadlock the vtrunc call in
2464 * hammer_inode_unloadable_check().
2465 */
2466 if (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
2467 bp->b_resid = 0;
2468 biodone(ap->a_bio);
2469 return(0);
2470 }
2471
b84de5af 2472 /*
a99b9ea2
MD
2473 * Reserve space and issue a direct-write from the front-end.
2474 * NOTE: The direct_io code will hammer_bread/bcopy smaller
2475 * allocations.
47637bff 2476 *
a99b9ea2
MD
2477 * An in-memory record will be installed to reference the storage
2478 * until the flusher can get to it.
47637bff
MD
2479 *
2480 * Since we own the high level bio the front-end will not try to
0832c9bb 2481 * do a direct-read until the write completes.
a99b9ea2
MD
2482 *
2483 * NOTE: The only time we do not reserve a full-sized buffers
2484 * worth of data is if the file is small. We do not try to
2485 * allocate a fragment (from the small-data zone) at the end of
2486 * an otherwise large file as this can lead to wildly separated
2487 * data.
47637bff 2488 */
0832c9bb
MD
2489 KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
2490 KKASSERT(bio->bio_offset < ip->ino_data.size);
a99b9ea2 2491 if (bio->bio_offset || ip->ino_data.size > HAMMER_BUFSIZE / 2)
4a2796f3 2492 bytes = bp->b_bufsize;
b84de5af 2493 else
a99b9ea2 2494 bytes = ((int)ip->ino_data.size + 15) & ~15;
0832c9bb
MD
2495
2496 record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
2497 bytes, &error);
2498 if (record) {
af209b0f 2499 hammer_io_direct_write(hmp, &record->leaf, bio);
0832c9bb 2500 hammer_rel_mem_record(record);
4a2796f3
MD
2501 if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
2502 hammer_flush_inode(ip, 0);
0832c9bb 2503 } else {
a99b9ea2 2504 bp->b_bio2.bio_offset = NOOFFSET;
0832c9bb
MD
2505 bp->b_error = error;
2506 bp->b_flags |= B_ERROR;
2507 biodone(ap->a_bio);
2508 }
0832c9bb 2509 return(error);
059819e3
MD
2510}
2511
8cd0a023
MD
2512/*
2513 * dounlink - disconnect a directory entry
2514 *
2515 * XXX whiteout support not really in yet
2516 */
2517static int
b84de5af
MD
2518hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
2519 struct vnode *dvp, struct ucred *cred, int flags)
8cd0a023 2520{
8cd0a023
MD
2521 struct namecache *ncp;
2522 hammer_inode_t dip;
2523 hammer_inode_t ip;
8cd0a023 2524 struct hammer_cursor cursor;
8cd0a023 2525 int64_t namekey;
11ad5ade 2526 int nlen, error;
8cd0a023
MD
2527
2528 /*
2529 * Calculate the namekey and setup the key range for the scan. This
2530 * works kinda like a chained hash table where the lower 32 bits
2531 * of the namekey synthesize the chain.
2532 *
2533 * The key range is inclusive of both key_beg and key_end.
2534 */
2535 dip = VTOI(dvp);
2536 ncp = nch->ncp;
d113fda1
MD
2537
2538 if (dip->flags & HAMMER_INODE_RO)
2539 return (EROFS);
2540
6a37e7e4
MD
2541 namekey = hammer_directory_namekey(ncp->nc_name, ncp->nc_nlen);
2542retry:
bcac4bbb 2543 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
5a930e66
MD
2544 cursor.key_beg.localization = dip->obj_localization +
2545 HAMMER_LOCALIZE_MISC;
8cd0a023
MD
2546 cursor.key_beg.obj_id = dip->obj_id;
2547 cursor.key_beg.key = namekey;
d5530d22 2548 cursor.key_beg.create_tid = 0;
8cd0a023
MD
2549 cursor.key_beg.delete_tid = 0;
2550 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2551 cursor.key_beg.obj_type = 0;
2552
2553 cursor.key_end = cursor.key_beg;
2554 cursor.key_end.key |= 0xFFFFFFFFULL;
d5530d22
MD
2555 cursor.asof = dip->obj_asof;
2556 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
8cd0a023 2557
8cd0a023
MD
2558 /*
2559 * Scan all matching records (the chain), locate the one matching
2560 * the requested path component. info->last_error contains the
2561 * error code on search termination and could be 0, ENOENT, or
2562 * something else.
2563 *
2564 * The hammer_ip_*() functions merge in-memory records with on-disk
2565 * records for the purposes of the search.
2566 */
4e17f465
MD
2567 error = hammer_ip_first(&cursor);
2568
a89aec1b
MD
2569 while (error == 0) {
2570 error = hammer_ip_resolve_data(&cursor);
2571 if (error)
66325755 2572 break;
11ad5ade
MD
2573 nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2574 KKASSERT(nlen > 0);
2575 if (ncp->nc_nlen == nlen &&
2576 bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
66325755
MD
2577 break;
2578 }
a89aec1b 2579 error = hammer_ip_next(&cursor);
66325755 2580 }
8cd0a023
MD
2581
2582 /*
2583 * If all is ok we have to get the inode so we can adjust nlinks.
269c5eab
MD
2584 * To avoid a deadlock with the flusher we must release the inode
2585 * lock on the directory when acquiring the inode for the entry.
b3deaf57
MD
2586 *
2587 * If the target is a directory, it must be empty.
8cd0a023 2588 */
66325755 2589 if (error == 0) {
269c5eab 2590 hammer_unlock(&cursor.ip->lock);
bcac4bbb 2591 ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
ddfdf542
MD
2592 dip->hmp->asof,
2593 cursor.data->entry.localization,
2594 0, &error);
269c5eab 2595 hammer_lock_sh(&cursor.ip->lock);
46fe7ae1 2596 if (error == ENOENT) {
11ad5ade 2597 kprintf("obj_id %016llx\n", cursor.data->entry.obj_id);
10a5d1ba 2598 Debugger("ENOENT unlinking object that should exist");
46fe7ae1 2599 }
1f07f686
MD
2600
2601 /*
2602 * If we are trying to remove a directory the directory must
2603 * be empty.
2604 *
2605 * WARNING: hammer_ip_check_directory_empty() may have to
2606 * terminate the cursor to avoid a deadlock. It is ok to
2607 * call hammer_done_cursor() twice.
2608 */
11ad5ade 2609 if (error == 0 && ip->ino_data.obj_type ==
b3deaf57 2610 HAMMER_OBJTYPE_DIRECTORY) {
98f7132d 2611 error = hammer_ip_check_directory_empty(trans, ip);
b3deaf57 2612 }
1f07f686 2613
6a37e7e4 2614 /*
1f07f686
MD
2615 * Delete the directory entry.
2616 *
6a37e7e4 2617 * WARNING: hammer_ip_del_directory() may have to terminate
1f07f686 2618 * the cursor to avoid a deadlock. It is ok to call
6a37e7e4
MD
2619 * hammer_done_cursor() twice.
2620 */
b84de5af 2621 if (error == 0) {
b84de5af
MD
2622 error = hammer_ip_del_directory(trans, &cursor,
2623 dip, ip);
b84de5af 2624 }
269c5eab 2625 hammer_done_cursor(&cursor);
8cd0a023
MD
2626 if (error == 0) {
2627 cache_setunresolved(nch);
2628 cache_setvp(nch, NULL);
2629 /* XXX locking */
2630 if (ip->vp)
2631 cache_inval_vp(ip->vp, CINV_DESTROY);
2632 }
af209b0f
MD
2633 if (ip)
2634 hammer_rel_inode(ip, 0);
269c5eab
MD
2635 } else {
2636 hammer_done_cursor(&cursor);
66325755 2637 }
6a37e7e4
MD
2638 if (error == EDEADLK)
2639 goto retry;
9c448776 2640
66325755 2641 return (error);
66325755
MD
2642}
2643
7a04d74f
MD
2644/************************************************************************
2645 * FIFO AND SPECFS OPS *
2646 ************************************************************************
2647 *
2648 */
2649
2650static int
2651hammer_vop_fifoclose (struct vop_close_args *ap)
2652{
2653 /* XXX update itimes */
2654 return (VOCALL(&fifo_vnode_vops, &ap->a_head));
2655}
2656
2657static int
2658hammer_vop_fiforead (struct vop_read_args *ap)
2659{
2660 int error;
2661
2662 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2663 /* XXX update access time */
2664 return (error);
2665}
2666
2667static int
2668hammer_vop_fifowrite (struct vop_write_args *ap)
2669{
2670 int error;
2671
2672 error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2673 /* XXX update access time */
2674 return (error);
2675}
2676
2677static int
2678hammer_vop_specclose (struct vop_close_args *ap)
2679{
2680 /* XXX update itimes */
2681 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2682}
2683
2684static int
2685hammer_vop_specread (struct vop_read_args *ap)
2686{
2687 /* XXX update access time */
2688 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2689}
2690
2691static int
2692hammer_vop_specwrite (struct vop_write_args *ap)
2693{
2694 /* XXX update last change time */
2695 return (VOCALL(&spec_vnode_vops, &ap->a_head));
2696}
2697