Merge branch 'master' of ssh://crater.dragonflybsd.org/repository/git/dragonfly
[dragonfly.git] / sys / vfs / hammer / hammer_vfsops.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_vfsops.c,v 1.74 2008/11/13 02:18:43 dillon Exp $
35  */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/malloc.h>
43 #include <sys/nlookup.h>
44 #include <sys/fcntl.h>
45 #include <sys/sysctl.h>
46 #include <sys/buf.h>
47 #include <sys/buf2.h>
48 #include "hammer.h"
49
50 /*
51  * NOTE!  Global statistics may not be MPSAFE so HAMMER never uses them
52  *        in conditionals.
53  */
54 int hammer_supported_version = HAMMER_VOL_VERSION_DEFAULT;
55 int hammer_debug_io;
56 int hammer_debug_general;
57 int hammer_debug_debug = 1;             /* medium-error panics */ 
58 int hammer_debug_inode;
59 int hammer_debug_locks;
60 int hammer_debug_btree;
61 int hammer_debug_tid;
62 int hammer_debug_recover;               /* -1 will disable, +1 will force */
63 int hammer_debug_recover_faults;
64 int hammer_debug_critical;              /* non-zero enter debugger on error */
65 int hammer_cluster_enable = 1;          /* enable read clustering by default */
66 int hammer_count_fsyncs;
67 int hammer_count_inodes;
68 int hammer_count_iqueued;
69 int hammer_count_reclaiming;
70 int hammer_count_records;
71 int hammer_count_record_datas;
72 int hammer_count_volumes;
73 int hammer_count_buffers;
74 int hammer_count_nodes;
75 int64_t hammer_count_extra_space_used;
76 int64_t hammer_stats_btree_lookups;
77 int64_t hammer_stats_btree_searches;
78 int64_t hammer_stats_btree_inserts;
79 int64_t hammer_stats_btree_deletes;
80 int64_t hammer_stats_btree_elements;
81 int64_t hammer_stats_btree_splits;
82 int64_t hammer_stats_btree_iterations;
83 int64_t hammer_stats_btree_root_iterations;
84 int64_t hammer_stats_record_iterations;
85
86 int64_t hammer_stats_file_read;
87 int64_t hammer_stats_file_write;
88 int64_t hammer_stats_file_iopsr;
89 int64_t hammer_stats_file_iopsw;
90 int64_t hammer_stats_disk_read;
91 int64_t hammer_stats_disk_write;
92 int64_t hammer_stats_inode_flushes;
93 int64_t hammer_stats_commits;
94 int64_t hammer_stats_undo;
95 int64_t hammer_stats_redo;
96
97 int hammer_count_dirtybufspace;         /* global */
98 int hammer_count_refedbufs;             /* global */
99 int hammer_count_reservations;
100 int hammer_count_io_running_read;
101 int hammer_count_io_running_write;
102 int hammer_count_io_locked;
103 int hammer_limit_dirtybufspace;         /* per-mount */
104 int hammer_limit_recs;                  /* as a whole XXX */
105 int hammer_limit_inode_recs = 1024;     /* per inode */
106 int hammer_limit_reclaim = HAMMER_RECLAIM_WAIT;
107 int hammer_limit_redo = 4096 * 1024;    /* per inode */
108 int hammer_autoflush = 2000;            /* auto flush */
109 int hammer_bio_count;
110 int hammer_verify_zone;
111 int hammer_verify_data = 1;
112 int hammer_write_mode;
113 int hammer_yield_check = 16;
114 int hammer_fsync_mode;
115 int64_t hammer_contention_count;
116 int64_t hammer_zone_limit;
117
118 SYSCTL_NODE(_vfs, OID_AUTO, hammer, CTLFLAG_RW, 0, "HAMMER filesystem");
119 SYSCTL_INT(_vfs_hammer, OID_AUTO, supported_version, CTLFLAG_RD,
120            &hammer_supported_version, 0, "");
121 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_general, CTLFLAG_RW,
122            &hammer_debug_general, 0, "");
123 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_io, CTLFLAG_RW,
124            &hammer_debug_io, 0, "");
125 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_debug, CTLFLAG_RW,
126            &hammer_debug_debug, 0, "");
127 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_inode, CTLFLAG_RW,
128            &hammer_debug_inode, 0, "");
129 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_locks, CTLFLAG_RW,
130            &hammer_debug_locks, 0, "");
131 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_btree, CTLFLAG_RW,
132            &hammer_debug_btree, 0, "");
133 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_tid, CTLFLAG_RW,
134            &hammer_debug_tid, 0, "");
135 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover, CTLFLAG_RW,
136            &hammer_debug_recover, 0, "");
137 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_recover_faults, CTLFLAG_RW,
138            &hammer_debug_recover_faults, 0, "");
139 SYSCTL_INT(_vfs_hammer, OID_AUTO, debug_critical, CTLFLAG_RW,
140            &hammer_debug_critical, 0, "");
141 SYSCTL_INT(_vfs_hammer, OID_AUTO, cluster_enable, CTLFLAG_RW,
142            &hammer_cluster_enable, 0, "");
143
144 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_dirtybufspace, CTLFLAG_RW,
145            &hammer_limit_dirtybufspace, 0, "");
146 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_recs, CTLFLAG_RW,
147            &hammer_limit_recs, 0, "");
148 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_inode_recs, CTLFLAG_RW,
149            &hammer_limit_inode_recs, 0, "");
150 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_reclaim, CTLFLAG_RW,
151            &hammer_limit_reclaim, 0, "");
152 SYSCTL_INT(_vfs_hammer, OID_AUTO, limit_redo, CTLFLAG_RW,
153            &hammer_limit_redo, 0, "");
154
155 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_fsyncs, CTLFLAG_RD,
156            &hammer_count_fsyncs, 0, "");
157 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_inodes, CTLFLAG_RD,
158            &hammer_count_inodes, 0, "");
159 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_iqueued, CTLFLAG_RD,
160            &hammer_count_iqueued, 0, "");
161 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reclaiming, CTLFLAG_RD,
162            &hammer_count_reclaiming, 0, "");
163 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_records, CTLFLAG_RD,
164            &hammer_count_records, 0, "");
165 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_record_datas, CTLFLAG_RD,
166            &hammer_count_record_datas, 0, "");
167 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_volumes, CTLFLAG_RD,
168            &hammer_count_volumes, 0, "");
169 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_buffers, CTLFLAG_RD,
170            &hammer_count_buffers, 0, "");
171 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_nodes, CTLFLAG_RD,
172            &hammer_count_nodes, 0, "");
173 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, count_extra_space_used, CTLFLAG_RD,
174            &hammer_count_extra_space_used, 0, "");
175
176 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_searches, CTLFLAG_RD,
177            &hammer_stats_btree_searches, 0, "");
178 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_lookups, CTLFLAG_RD,
179            &hammer_stats_btree_lookups, 0, "");
180 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_inserts, CTLFLAG_RD,
181            &hammer_stats_btree_inserts, 0, "");
182 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_deletes, CTLFLAG_RD,
183            &hammer_stats_btree_deletes, 0, "");
184 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_elements, CTLFLAG_RD,
185            &hammer_stats_btree_elements, 0, "");
186 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_splits, CTLFLAG_RD,
187            &hammer_stats_btree_splits, 0, "");
188 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_iterations, CTLFLAG_RD,
189            &hammer_stats_btree_iterations, 0, "");
190 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_btree_root_iterations, CTLFLAG_RD,
191            &hammer_stats_btree_root_iterations, 0, "");
192 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_record_iterations, CTLFLAG_RD,
193            &hammer_stats_record_iterations, 0, "");
194
195 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_read, CTLFLAG_RD,
196            &hammer_stats_file_read, 0, "");
197 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_write, CTLFLAG_RD,
198            &hammer_stats_file_write, 0, "");
199 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsr, CTLFLAG_RD,
200            &hammer_stats_file_iopsr, 0, "");
201 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_file_iopsw, CTLFLAG_RD,
202            &hammer_stats_file_iopsw, 0, "");
203 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_read, CTLFLAG_RD,
204            &hammer_stats_disk_read, 0, "");
205 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_disk_write, CTLFLAG_RD,
206            &hammer_stats_disk_write, 0, "");
207 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_inode_flushes, CTLFLAG_RD,
208            &hammer_stats_inode_flushes, 0, "");
209 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_commits, CTLFLAG_RD,
210            &hammer_stats_commits, 0, "");
211 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_undo, CTLFLAG_RD,
212            &hammer_stats_undo, 0, "");
213 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, stats_redo, CTLFLAG_RD,
214            &hammer_stats_redo, 0, "");
215
216 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_dirtybufspace, CTLFLAG_RD,
217            &hammer_count_dirtybufspace, 0, "");
218 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_refedbufs, CTLFLAG_RD,
219            &hammer_count_refedbufs, 0, "");
220 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_reservations, CTLFLAG_RD,
221            &hammer_count_reservations, 0, "");
222 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_read, CTLFLAG_RD,
223            &hammer_count_io_running_read, 0, "");
224 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_locked, CTLFLAG_RD,
225            &hammer_count_io_locked, 0, "");
226 SYSCTL_INT(_vfs_hammer, OID_AUTO, count_io_running_write, CTLFLAG_RD,
227            &hammer_count_io_running_write, 0, "");
228 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, zone_limit, CTLFLAG_RW,
229            &hammer_zone_limit, 0, "");
230 SYSCTL_QUAD(_vfs_hammer, OID_AUTO, contention_count, CTLFLAG_RW,
231            &hammer_contention_count, 0, "");
232 SYSCTL_INT(_vfs_hammer, OID_AUTO, autoflush, CTLFLAG_RW,
233            &hammer_autoflush, 0, "");
234 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_zone, CTLFLAG_RW,
235            &hammer_verify_zone, 0, "");
236 SYSCTL_INT(_vfs_hammer, OID_AUTO, verify_data, CTLFLAG_RW,
237            &hammer_verify_data, 0, "");
238 SYSCTL_INT(_vfs_hammer, OID_AUTO, write_mode, CTLFLAG_RW,
239            &hammer_write_mode, 0, "");
240 SYSCTL_INT(_vfs_hammer, OID_AUTO, yield_check, CTLFLAG_RW,
241            &hammer_yield_check, 0, "");
242 SYSCTL_INT(_vfs_hammer, OID_AUTO, fsync_mode, CTLFLAG_RW,
243            &hammer_fsync_mode, 0, "");
244
245 KTR_INFO_MASTER(hammer);
246
247 /*
248  * VFS ABI
249  */
250 static void     hammer_free_hmp(struct mount *mp);
251
252 static int      hammer_vfs_mount(struct mount *mp, char *path, caddr_t data,
253                                 struct ucred *cred);
254 static int      hammer_vfs_unmount(struct mount *mp, int mntflags);
255 static int      hammer_vfs_root(struct mount *mp, struct vnode **vpp);
256 static int      hammer_vfs_statfs(struct mount *mp, struct statfs *sbp,
257                                 struct ucred *cred);
258 static int      hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp,
259                                 struct ucred *cred);
260 static int      hammer_vfs_sync(struct mount *mp, int waitfor);
261 static int      hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
262                                 ino_t ino, struct vnode **vpp);
263 static int      hammer_vfs_init(struct vfsconf *conf);
264 static int      hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
265                                 struct fid *fhp, struct vnode **vpp);
266 static int      hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp);
267 static int      hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
268                                 int *exflagsp, struct ucred **credanonp);
269
270
271 static struct vfsops hammer_vfsops = {
272         .vfs_mount      = hammer_vfs_mount,
273         .vfs_unmount    = hammer_vfs_unmount,
274         .vfs_root       = hammer_vfs_root,
275         .vfs_statfs     = hammer_vfs_statfs,
276         .vfs_statvfs    = hammer_vfs_statvfs,
277         .vfs_sync       = hammer_vfs_sync,
278         .vfs_vget       = hammer_vfs_vget,
279         .vfs_init       = hammer_vfs_init,
280         .vfs_vptofh     = hammer_vfs_vptofh,
281         .vfs_fhtovp     = hammer_vfs_fhtovp,
282         .vfs_checkexp   = hammer_vfs_checkexp
283 };
284
285 MALLOC_DEFINE(M_HAMMER, "HAMMER-mount", "");
286
287 VFS_SET(hammer_vfsops, hammer, 0);
288 MODULE_VERSION(hammer, 1);
289
290 static int
291 hammer_vfs_init(struct vfsconf *conf)
292 {
293         int n;
294
295         if (hammer_limit_recs == 0) {
296                 hammer_limit_recs = nbuf * 25;
297                 n = kmalloc_limit(M_HAMMER) / 512;
298                 if (hammer_limit_recs > n)
299                         hammer_limit_recs = n;
300         }
301         if (hammer_limit_dirtybufspace == 0) {
302                 hammer_limit_dirtybufspace = hidirtybufspace / 2;
303                 if (hammer_limit_dirtybufspace < 100)
304                         hammer_limit_dirtybufspace = 100;
305         }
306         return(0);
307 }
308
309 static int
310 hammer_vfs_mount(struct mount *mp, char *mntpt, caddr_t data,
311                  struct ucred *cred)
312 {
313         struct hammer_mount_info info;
314         hammer_mount_t hmp;
315         hammer_volume_t rootvol;
316         struct vnode *rootvp;
317         struct vnode *devvp = NULL;
318         const char *upath;      /* volume name in userspace */
319         char *path;             /* volume name in system space */
320         int error;
321         int i;
322         int master_id;
323         int maxinodes;
324         char *next_volume_ptr = NULL;
325
326         /*
327          * Accept hammer_mount_info.  mntpt is NULL for root mounts at boot.
328          */
329         if (mntpt == NULL) {
330                 bzero(&info, sizeof(info));
331                 info.asof = 0;
332                 info.hflags = 0;
333                 info.nvolumes = 1;
334
335                 next_volume_ptr = mp->mnt_stat.f_mntfromname;
336
337                 /* Count number of volumes separated by ':' */
338                 for (char *p = next_volume_ptr; *p != '\0'; ++p) {
339                         if (*p == ':') {
340                                 ++info.nvolumes;
341                         }
342                 }
343
344                 mp->mnt_flag &= ~MNT_RDONLY; /* mount R/W */
345         } else {
346                 if ((error = copyin(data, &info, sizeof(info))) != 0)
347                         return (error);
348         }
349
350         /*
351          * updating or new mount
352          */
353         if (mp->mnt_flag & MNT_UPDATE) {
354                 hmp = (void *)mp->mnt_data;
355                 KKASSERT(hmp != NULL);
356         } else {
357                 if (info.nvolumes <= 0 || info.nvolumes >= 32768)
358                         return (EINVAL);
359                 hmp = NULL;
360         }
361
362         /*
363          * master-id validation.  The master id may not be changed by a
364          * mount update.
365          */
366         if (info.hflags & HMNT_MASTERID) {
367                 if (hmp && hmp->master_id != info.master_id) {
368                         kprintf("hammer: cannot change master id "
369                                 "with mount update\n");
370                         return(EINVAL);
371                 }
372                 master_id = info.master_id;
373                 if (master_id < -1 || master_id >= HAMMER_MAX_MASTERS)
374                         return (EINVAL);
375         } else {
376                 if (hmp)
377                         master_id = hmp->master_id;
378                 else
379                         master_id = 0;
380         }
381
382         /*
383          * Interal mount data structure
384          */
385         if (hmp == NULL) {
386                 hmp = kmalloc(sizeof(*hmp), M_HAMMER, M_WAITOK | M_ZERO);
387                 mp->mnt_data = (qaddr_t)hmp;
388                 hmp->mp = mp;
389                 /*TAILQ_INIT(&hmp->recycle_list);*/
390
391                 /*
392                  * Make sure kmalloc type limits are set appropriately.  If root
393                  * increases the vnode limit you may have to do a dummy remount
394                  * to adjust the HAMMER inode limit.
395                  */
396                 kmalloc_create(&hmp->m_misc, "HAMMER-others");
397                 kmalloc_create(&hmp->m_inodes, "HAMMER-inodes");
398
399                 maxinodes = desiredvnodes + desiredvnodes / 5 +
400                             hammer_limit_reclaim * 2;
401                 kmalloc_raise_limit(hmp->m_inodes,
402                                     maxinodes * sizeof(struct hammer_inode));
403
404                 hmp->root_btree_beg.localization = 0x00000000U;
405                 hmp->root_btree_beg.obj_id = -0x8000000000000000LL;
406                 hmp->root_btree_beg.key = -0x8000000000000000LL;
407                 hmp->root_btree_beg.create_tid = 1;
408                 hmp->root_btree_beg.delete_tid = 1;
409                 hmp->root_btree_beg.rec_type = 0;
410                 hmp->root_btree_beg.obj_type = 0;
411
412                 hmp->root_btree_end.localization = 0xFFFFFFFFU;
413                 hmp->root_btree_end.obj_id = 0x7FFFFFFFFFFFFFFFLL;
414                 hmp->root_btree_end.key = 0x7FFFFFFFFFFFFFFFLL;
415                 hmp->root_btree_end.create_tid = 0xFFFFFFFFFFFFFFFFULL;
416                 hmp->root_btree_end.delete_tid = 0;   /* special case */
417                 hmp->root_btree_end.rec_type = 0xFFFFU;
418                 hmp->root_btree_end.obj_type = 0;
419
420                 hmp->krate.freq = 1;    /* maximum reporting rate (hz) */
421                 hmp->krate.count = -16; /* initial burst */
422
423                 hmp->sync_lock.refs = 1;
424                 hmp->free_lock.refs = 1;
425                 hmp->undo_lock.refs = 1;
426                 hmp->blkmap_lock.refs = 1;
427                 hmp->snapshot_lock.refs = 1;
428                 hmp->volume_lock.refs = 1;
429
430                 TAILQ_INIT(&hmp->delay_list);
431                 TAILQ_INIT(&hmp->flush_group_list);
432                 TAILQ_INIT(&hmp->objid_cache_list);
433                 TAILQ_INIT(&hmp->undo_lru_list);
434                 TAILQ_INIT(&hmp->reclaim_list);
435         }
436         hmp->hflags &= ~HMNT_USERFLAGS;
437         hmp->hflags |= info.hflags & HMNT_USERFLAGS;
438
439         hmp->master_id = master_id;
440
441         if (info.asof) {
442                 mp->mnt_flag |= MNT_RDONLY;
443                 hmp->asof = info.asof;
444         } else {
445                 hmp->asof = HAMMER_MAX_TID;
446         }
447
448         hmp->volume_to_remove = -1;
449
450         /*
451          * Re-open read-write if originally read-only, or vise-versa.
452          *
453          * When going from read-only to read-write execute the stage2
454          * recovery if it has not already been run.
455          */
456         if (mp->mnt_flag & MNT_UPDATE) {
457                 error = 0;
458                 if (hmp->ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
459                         kprintf("HAMMER read-only -> read-write\n");
460                         hmp->ronly = 0;
461                         RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
462                                 hammer_adjust_volume_mode, NULL);
463                         rootvol = hammer_get_root_volume(hmp, &error);
464                         if (rootvol) {
465                                 hammer_recover_flush_buffers(hmp, rootvol, 1);
466                                 error = hammer_recover_stage2(hmp, rootvol);
467                                 bcopy(rootvol->ondisk->vol0_blockmap,
468                                       hmp->blockmap,
469                                       sizeof(hmp->blockmap));
470                                 hammer_rel_volume(rootvol, 0);
471                         }
472                         RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
473                                 hammer_reload_inode, NULL);
474                         /* kernel clears MNT_RDONLY */
475                 } else if (hmp->ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
476                         kprintf("HAMMER read-write -> read-only\n");
477                         hmp->ronly = 1; /* messy */
478                         RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
479                                 hammer_reload_inode, NULL);
480                         hmp->ronly = 0;
481                         hammer_flusher_sync(hmp);
482                         hammer_flusher_sync(hmp);
483                         hammer_flusher_sync(hmp);
484                         hmp->ronly = 1;
485                         RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
486                                 hammer_adjust_volume_mode, NULL);
487                 }
488                 return(error);
489         }
490
491         RB_INIT(&hmp->rb_vols_root);
492         RB_INIT(&hmp->rb_inos_root);
493         RB_INIT(&hmp->rb_redo_root);
494         RB_INIT(&hmp->rb_nods_root);
495         RB_INIT(&hmp->rb_undo_root);
496         RB_INIT(&hmp->rb_resv_root);
497         RB_INIT(&hmp->rb_bufs_root);
498         RB_INIT(&hmp->rb_pfsm_root);
499
500         hmp->ronly = ((mp->mnt_flag & MNT_RDONLY) != 0);
501
502         TAILQ_INIT(&hmp->volu_list);
503         TAILQ_INIT(&hmp->undo_list);
504         TAILQ_INIT(&hmp->data_list);
505         TAILQ_INIT(&hmp->meta_list);
506         TAILQ_INIT(&hmp->lose_list);
507         TAILQ_INIT(&hmp->iorun_list);
508
509         /*
510          * Load volumes
511          */
512         path = objcache_get(namei_oc, M_WAITOK);
513         hmp->nvolumes = -1;
514         for (i = 0; i < info.nvolumes; ++i) {
515                 if (mntpt == NULL) {
516                         /*
517                          * Root mount.
518                          */
519                         KKASSERT(next_volume_ptr != NULL);
520                         strcpy(path, "");
521                         if (*next_volume_ptr != '/') {
522                                 /* relative path */
523                                 strcpy(path, "/dev/");
524                         }
525                         int k;
526                         for (k = strlen(path); k < MAXPATHLEN-1; ++k) {
527                                 if (*next_volume_ptr == '\0') {
528                                         break;
529                                 } else if (*next_volume_ptr == ':') {
530                                         ++next_volume_ptr;
531                                         break;
532                                 } else {
533                                         path[k] = *next_volume_ptr;
534                                         ++next_volume_ptr;
535                                 }
536                         }
537                         path[k] = '\0';
538
539                         error = 0;
540                         cdev_t dev = kgetdiskbyname(path);
541                         error = bdevvp(dev, &devvp);
542                         if (error) {
543                                 kprintf("hammer_mountroot: can't find devvp\n");
544                         }
545                 } else {
546                         error = copyin(&info.volumes[i], &upath,
547                                        sizeof(char *));
548                         if (error == 0)
549                                 error = copyinstr(upath, path,
550                                                   MAXPATHLEN, NULL);
551                 }
552                 if (error == 0)
553                         error = hammer_install_volume(hmp, path, devvp);
554                 if (error)
555                         break;
556         }
557         objcache_put(namei_oc, path);
558
559         /*
560          * Make sure we found a root volume
561          */
562         if (error == 0 && hmp->rootvol == NULL) {
563                 kprintf("hammer_mount: No root volume found!\n");
564                 error = EINVAL;
565         }
566
567         /*
568          * Check that all required volumes are available
569          */
570         if (error == 0 && hammer_mountcheck_volumes(hmp)) {
571                 kprintf("hammer_mount: Missing volumes, cannot mount!\n");
572                 error = EINVAL;
573         }
574
575         if (error) {
576                 hammer_free_hmp(mp);
577                 return (error);
578         }
579
580         /*
581          * No errors, setup enough of the mount point so we can lookup the
582          * root vnode.
583          */
584         mp->mnt_iosize_max = MAXPHYS;
585         mp->mnt_kern_flag |= MNTK_FSMID;
586
587         /*
588          * MPSAFE code.  Note that VOPs and VFSops which are not MPSAFE
589          * will acquire a per-mount token prior to entry and release it
590          * on return, so even if we do not specify it we no longer get
591          * the BGL regardlless of how we are flagged.
592          */
593         mp->mnt_kern_flag |= MNTK_RD_MPSAFE | MNTK_GA_MPSAFE |
594                              MNTK_IN_MPSAFE;
595
596         /* 
597          * note: f_iosize is used by vnode_pager_haspage() when constructing
598          * its VOP_BMAP call.
599          */
600         mp->mnt_stat.f_iosize = HAMMER_BUFSIZE;
601         mp->mnt_stat.f_bsize = HAMMER_BUFSIZE;
602
603         mp->mnt_vstat.f_frsize = HAMMER_BUFSIZE;
604         mp->mnt_vstat.f_bsize = HAMMER_BUFSIZE;
605
606         mp->mnt_maxsymlinklen = 255;
607         mp->mnt_flag |= MNT_LOCAL;
608
609         vfs_add_vnodeops(mp, &hammer_vnode_vops, &mp->mnt_vn_norm_ops);
610         vfs_add_vnodeops(mp, &hammer_spec_vops, &mp->mnt_vn_spec_ops);
611         vfs_add_vnodeops(mp, &hammer_fifo_vops, &mp->mnt_vn_fifo_ops);
612
613         /*
614          * The root volume's ondisk pointer is only valid if we hold a
615          * reference to it.
616          */
617         rootvol = hammer_get_root_volume(hmp, &error);
618         if (error)
619                 goto failed;
620
621         /*
622          * Perform any necessary UNDO operations.  The recovery code does
623          * call hammer_undo_lookup() so we have to pre-cache the blockmap,
624          * and then re-copy it again after recovery is complete.
625          *
626          * If this is a read-only mount the UNDO information is retained
627          * in memory in the form of dirty buffer cache buffers, and not
628          * written back to the media.
629          */
630         bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
631               sizeof(hmp->blockmap));
632
633         /*
634          * Check filesystem version
635          */
636         hmp->version = rootvol->ondisk->vol_version;
637         if (hmp->version < HAMMER_VOL_VERSION_MIN ||
638             hmp->version > HAMMER_VOL_VERSION_MAX) {
639                 kprintf("HAMMER: mount unsupported fs version %d\n",
640                         hmp->version);
641                 error = ERANGE;
642                 goto done;
643         }
644
645         /*
646          * The undo_rec_limit limits the size of flush groups to avoid
647          * blowing out the UNDO FIFO.  This calculation is typically in
648          * the tens of thousands and is designed primarily when small
649          * HAMMER filesystems are created.
650          */
651         hmp->undo_rec_limit = hammer_undo_max(hmp) / 8192 + 100;
652         if (hammer_debug_general & 0x0001)
653                 kprintf("HAMMER: undo_rec_limit %d\n", hmp->undo_rec_limit);
654
655         /*
656          * NOTE: Recover stage1 not only handles meta-data recovery, it
657          *       also sets hmp->undo_seqno for HAMMER VERSION 4+ filesystems.
658          */
659         error = hammer_recover_stage1(hmp, rootvol);
660         if (error) {
661                 kprintf("Failed to recover HAMMER filesystem on mount\n");
662                 goto done;
663         }
664
665         /*
666          * Finish setup now that we have a good root volume.
667          *
668          * The top 16 bits of fsid.val[1] is a pfs id.
669          */
670         ksnprintf(mp->mnt_stat.f_mntfromname,
671                   sizeof(mp->mnt_stat.f_mntfromname), "%s",
672                   rootvol->ondisk->vol_name);
673         mp->mnt_stat.f_fsid.val[0] =
674                 crc32((char *)&rootvol->ondisk->vol_fsid + 0, 8);
675         mp->mnt_stat.f_fsid.val[1] =
676                 crc32((char *)&rootvol->ondisk->vol_fsid + 8, 8);
677         mp->mnt_stat.f_fsid.val[1] &= 0x0000FFFF;
678
679         mp->mnt_vstat.f_fsid_uuid = rootvol->ondisk->vol_fsid;
680         mp->mnt_vstat.f_fsid = crc32(&mp->mnt_vstat.f_fsid_uuid,
681                                      sizeof(mp->mnt_vstat.f_fsid_uuid));
682
683         /*
684          * Certain often-modified fields in the root volume are cached in
685          * the hammer_mount structure so we do not have to generate lots
686          * of little UNDO structures for them.
687          *
688          * Recopy after recovery.  This also has the side effect of
689          * setting our cached undo FIFO's first_offset, which serves to
690          * placemark the FIFO start for the NEXT flush cycle while the
691          * on-disk first_offset represents the LAST flush cycle.
692          */
693         hmp->next_tid = rootvol->ondisk->vol0_next_tid;
694         hmp->flush_tid1 = hmp->next_tid;
695         hmp->flush_tid2 = hmp->next_tid;
696         bcopy(rootvol->ondisk->vol0_blockmap, hmp->blockmap,
697               sizeof(hmp->blockmap));
698         hmp->copy_stat_freebigblocks = rootvol->ondisk->vol0_stat_freebigblocks;
699
700         hammer_flusher_create(hmp);
701
702         /*
703          * Locate the root directory using the root cluster's B-Tree as a
704          * starting point.  The root directory uses an obj_id of 1.
705          *
706          * FUTURE: Leave the root directory cached referenced but unlocked
707          * in hmp->rootvp (need to flush it on unmount).
708          */
709         error = hammer_vfs_vget(mp, NULL, 1, &rootvp);
710         if (error)
711                 goto done;
712         vput(rootvp);
713         /*vn_unlock(hmp->rootvp);*/
714         if (hmp->ronly == 0)
715                 error = hammer_recover_stage2(hmp, rootvol);
716
717 done:
718         hammer_rel_volume(rootvol, 0);
719 failed:
720         /*
721          * Cleanup and return.
722          */
723         if (error)
724                 hammer_free_hmp(mp);
725         return (error);
726 }
727
728 static int
729 hammer_vfs_unmount(struct mount *mp, int mntflags)
730 {
731 #if 0
732         struct hammer_mount *hmp = (void *)mp->mnt_data;
733 #endif
734         int flags;
735         int error;
736
737         /*
738          * Clean out the vnodes
739          */
740         flags = 0;
741         if (mntflags & MNT_FORCE)
742                 flags |= FORCECLOSE;
743         if ((error = vflush(mp, 0, flags)) != 0)
744                 return (error);
745
746         /*
747          * Clean up the internal mount structure and related entities.  This
748          * may issue I/O.
749          */
750         hammer_free_hmp(mp);
751         return(0);
752 }
753
754 /*
755  * Clean up the internal mount structure and disassociate it from the mount.
756  * This may issue I/O.
757  */
758 static void
759 hammer_free_hmp(struct mount *mp)
760 {
761         struct hammer_mount *hmp = (void *)mp->mnt_data;
762         hammer_flush_group_t flg;
763         int count;
764         int dummy;
765
766         /*
767          * Flush anything dirty.  This won't even run if the
768          * filesystem errored-out.
769          */
770         count = 0;
771         while (hammer_flusher_haswork(hmp)) {
772                 hammer_flusher_sync(hmp);
773                 ++count;
774                 if (count >= 5) {
775                         if (count == 5)
776                                 kprintf("HAMMER: umount flushing.");
777                         else
778                                 kprintf(".");
779                         tsleep(&dummy, 0, "hmrufl", hz);
780                 }
781                 if (count == 30) {
782                         kprintf("giving up\n");
783                         break;
784                 }
785         }
786         if (count >= 5 && count < 30)
787                 kprintf("\n");
788
789         /*
790          * If the mount had a critical error we have to destroy any
791          * remaining inodes before we can finish cleaning up the flusher.
792          */
793         if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
794                 RB_SCAN(hammer_ino_rb_tree, &hmp->rb_inos_root, NULL,
795                         hammer_destroy_inode_callback, NULL);
796         }
797
798         /*
799          * There shouldn't be any inodes left now and any left over
800          * flush groups should now be empty.
801          */
802         KKASSERT(RB_EMPTY(&hmp->rb_inos_root));
803         while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
804                 TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
805                 KKASSERT(RB_EMPTY(&flg->flush_tree));
806                 if (flg->refs) {
807                         kprintf("HAMMER: Warning, flush_group %p was "
808                                 "not empty on umount!\n", flg);
809                 }
810                 kfree(flg, hmp->m_misc);
811         }
812
813         /*
814          * We can finally destroy the flusher
815          */
816         hammer_flusher_destroy(hmp);
817
818         /*
819          * We may have held recovered buffers due to a read-only mount.
820          * These must be discarded.
821          */
822         if (hmp->ronly)
823                 hammer_recover_flush_buffers(hmp, NULL, -1);
824
825         /*
826          * Unload buffers and then volumes
827          */
828         RB_SCAN(hammer_buf_rb_tree, &hmp->rb_bufs_root, NULL,
829                 hammer_unload_buffer, NULL);
830         RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL,
831                 hammer_unload_volume, NULL);
832
833         mp->mnt_data = NULL;
834         mp->mnt_flag &= ~MNT_LOCAL;
835         hmp->mp = NULL;
836         hammer_destroy_objid_cache(hmp);
837         kmalloc_destroy(&hmp->m_misc);
838         kmalloc_destroy(&hmp->m_inodes);
839         kfree(hmp, M_HAMMER);
840 }
841
842 /*
843  * Report critical errors.  ip may be NULL.
844  */
845 void
846 hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip,
847                       int error, const char *msg)
848 {
849         hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR;
850
851         krateprintf(&hmp->krate,
852                     "HAMMER(%s): Critical error inode=%jd error=%d %s\n",
853                     hmp->mp->mnt_stat.f_mntfromname,
854                     (intmax_t)(ip ? ip->obj_id : -1),
855                     error, msg);
856
857         if (hmp->ronly == 0) {
858                 hmp->ronly = 2;         /* special errored read-only mode */
859                 hmp->mp->mnt_flag |= MNT_RDONLY;
860                 kprintf("HAMMER(%s): Forcing read-only mode\n",
861                         hmp->mp->mnt_stat.f_mntfromname);
862         }
863         hmp->error = error;
864         if (hammer_debug_critical)
865                 Debugger("Entering debugger");
866 }
867
868
869 /*
870  * Obtain a vnode for the specified inode number.  An exclusively locked
871  * vnode is returned.
872  */
873 int
874 hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
875                 ino_t ino, struct vnode **vpp)
876 {
877         struct hammer_transaction trans;
878         struct hammer_mount *hmp = (void *)mp->mnt_data;
879         struct hammer_inode *ip;
880         int error;
881         u_int32_t localization;
882
883         hammer_simple_transaction(&trans, hmp);
884
885         /*
886          * If a directory vnode is supplied (mainly NFS) then we can acquire
887          * the PFS domain from it.  Otherwise we would only be able to vget
888          * inodes in the root PFS.
889          */
890         if (dvp) {
891                 localization = HAMMER_DEF_LOCALIZATION +
892                                 VTOI(dvp)->obj_localization;
893         } else {
894                 localization = HAMMER_DEF_LOCALIZATION;
895         }
896
897         /*
898          * Lookup the requested HAMMER inode.  The structure must be
899          * left unlocked while we manipulate the related vnode to avoid
900          * a deadlock.
901          */
902         ip = hammer_get_inode(&trans, NULL, ino,
903                               hmp->asof, localization,
904                               0, &error);
905         if (ip == NULL) {
906                 *vpp = NULL;
907                 hammer_done_transaction(&trans);
908                 return(error);
909         }
910         error = hammer_get_vnode(ip, vpp);
911         hammer_rel_inode(ip, 0);
912         hammer_done_transaction(&trans);
913         return (error);
914 }
915
916 /*
917  * Return the root vnode for the filesystem.
918  *
919  * HAMMER stores the root vnode in the hammer_mount structure so
920  * getting it is easy.
921  */
922 static int
923 hammer_vfs_root(struct mount *mp, struct vnode **vpp)
924 {
925 #if 0
926         struct hammer_mount *hmp = (void *)mp->mnt_data;
927 #endif
928         int error;
929
930         error = hammer_vfs_vget(mp, NULL, 1, vpp);
931         return (error);
932 }
933
934 static int
935 hammer_vfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
936 {
937         struct hammer_mount *hmp = (void *)mp->mnt_data;
938         hammer_volume_t volume;
939         hammer_volume_ondisk_t ondisk;
940         int error;
941         int64_t bfree;
942         int64_t breserved;
943
944         volume = hammer_get_root_volume(hmp, &error);
945         if (error)
946                 return(error);
947         ondisk = volume->ondisk;
948
949         /*
950          * Basic stats
951          */
952         _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
953         mp->mnt_stat.f_files = ondisk->vol0_stat_inodes;
954         bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
955         hammer_rel_volume(volume, 0);
956
957         mp->mnt_stat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
958         mp->mnt_stat.f_bavail = mp->mnt_stat.f_bfree;
959         if (mp->mnt_stat.f_files < 0)
960                 mp->mnt_stat.f_files = 0;
961
962         *sbp = mp->mnt_stat;
963         return(0);
964 }
965
966 static int
967 hammer_vfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
968 {
969         struct hammer_mount *hmp = (void *)mp->mnt_data;
970         hammer_volume_t volume;
971         hammer_volume_ondisk_t ondisk;
972         int error;
973         int64_t bfree;
974         int64_t breserved;
975
976         volume = hammer_get_root_volume(hmp, &error);
977         if (error)
978                 return(error);
979         ondisk = volume->ondisk;
980
981         /*
982          * Basic stats
983          */
984         _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &breserved);
985         mp->mnt_vstat.f_files = ondisk->vol0_stat_inodes;
986         bfree = ondisk->vol0_stat_freebigblocks * HAMMER_LARGEBLOCK_SIZE;
987         hammer_rel_volume(volume, 0);
988
989         mp->mnt_vstat.f_bfree = (bfree - breserved) / HAMMER_BUFSIZE;
990         mp->mnt_vstat.f_bavail = mp->mnt_vstat.f_bfree;
991         if (mp->mnt_vstat.f_files < 0)
992                 mp->mnt_vstat.f_files = 0;
993         *sbp = mp->mnt_vstat;
994         return(0);
995 }
996
997 /*
998  * Sync the filesystem.  Currently we have to run it twice, the second
999  * one will advance the undo start index to the end index, so if a crash
1000  * occurs no undos will be run on mount.
1001  *
1002  * We do not sync the filesystem if we are called from a panic.  If we did
1003  * we might end up blowing up a sync that was already in progress.
1004  */
1005 static int
1006 hammer_vfs_sync(struct mount *mp, int waitfor)
1007 {
1008         struct hammer_mount *hmp = (void *)mp->mnt_data;
1009         int error;
1010
1011         if (panicstr == NULL) {
1012                 error = hammer_sync_hmp(hmp, waitfor);
1013         } else {
1014                 error = EIO;
1015         }
1016         return (error);
1017 }
1018
1019 /*
1020  * Convert a vnode to a file handle.
1021  */
1022 static int
1023 hammer_vfs_vptofh(struct vnode *vp, struct fid *fhp)
1024 {
1025         hammer_inode_t ip;
1026
1027         KKASSERT(MAXFIDSZ >= 16);
1028         ip = VTOI(vp);
1029         fhp->fid_len = offsetof(struct fid, fid_data[16]);
1030         fhp->fid_ext = ip->obj_localization >> 16;
1031         bcopy(&ip->obj_id, fhp->fid_data + 0, sizeof(ip->obj_id));
1032         bcopy(&ip->obj_asof, fhp->fid_data + 8, sizeof(ip->obj_asof));
1033         return(0);
1034 }
1035
1036
1037 /*
1038  * Convert a file handle back to a vnode.
1039  *
1040  * Use rootvp to enforce PFS isolation when a PFS is exported via a
1041  * null mount.
1042  */
1043 static int
1044 hammer_vfs_fhtovp(struct mount *mp, struct vnode *rootvp,
1045                   struct fid *fhp, struct vnode **vpp)
1046 {
1047         struct hammer_transaction trans;
1048         struct hammer_inode *ip;
1049         struct hammer_inode_info info;
1050         int error;
1051         u_int32_t localization;
1052
1053         bcopy(fhp->fid_data + 0, &info.obj_id, sizeof(info.obj_id));
1054         bcopy(fhp->fid_data + 8, &info.obj_asof, sizeof(info.obj_asof));
1055         if (rootvp)
1056                 localization = VTOI(rootvp)->obj_localization;
1057         else
1058                 localization = (u_int32_t)fhp->fid_ext << 16;
1059
1060         hammer_simple_transaction(&trans, (void *)mp->mnt_data);
1061
1062         /*
1063          * Get/allocate the hammer_inode structure.  The structure must be
1064          * unlocked while we manipulate the related vnode to avoid a
1065          * deadlock.
1066          */
1067         ip = hammer_get_inode(&trans, NULL, info.obj_id,
1068                               info.obj_asof, localization, 0, &error);
1069         if (ip) {
1070                 error = hammer_get_vnode(ip, vpp);
1071                 hammer_rel_inode(ip, 0);
1072         } else {
1073                 *vpp = NULL;
1074         }
1075         hammer_done_transaction(&trans);
1076         return (error);
1077 }
1078
1079 static int
1080 hammer_vfs_checkexp(struct mount *mp, struct sockaddr *nam,
1081                     int *exflagsp, struct ucred **credanonp)
1082 {
1083         hammer_mount_t hmp = (void *)mp->mnt_data;
1084         struct netcred *np;
1085         int error;
1086
1087         np = vfs_export_lookup(mp, &hmp->export, nam);
1088         if (np) {
1089                 *exflagsp = np->netc_exflags;
1090                 *credanonp = &np->netc_anon;
1091                 error = 0;
1092         } else {
1093                 error = EACCES;
1094         }
1095         return (error);
1096
1097 }
1098
1099 int
1100 hammer_vfs_export(struct mount *mp, int op, const struct export_args *export)
1101 {
1102         hammer_mount_t hmp = (void *)mp->mnt_data;
1103         int error;
1104
1105         switch(op) {
1106         case MOUNTCTL_SET_EXPORT:
1107                 error = vfs_export(mp, &hmp->export, export);
1108                 break;
1109         default:
1110                 error = EOPNOTSUPP;
1111                 break;
1112         }
1113         return(error);
1114 }
1115