HAMMER - new hammer_lock code, fix mplock bug in last commit, mpsafe getattr.
[dragonfly.git] / sys / vfs / hammer / hammer_subs.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_subs.c,v 1.35 2008/10/15 22:38:37 dillon Exp $
35  */
36 /*
37  * HAMMER structural locking
38  */
39
40 #include "hammer.h"
41 #include <sys/dirent.h>
42
43 void
44 hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
45 {
46         thread_t td = curthread;
47         u_int lv;
48         u_int nlv;
49
50         KKASSERT(lock->refs > 0);
51         for (;;) {
52                 lv = lock->lockval;
53
54                 if (lv == 0) {
55                         nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
56                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
57                                 lock->owner = td;
58                                 break;
59                         }
60                 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) && lock->owner == td) {
61                         nlv = (lv + 1);
62                         if (atomic_cmpset_int(&lock->lockval, lv, nlv))
63                                 break;
64                 } else {
65                         if (hammer_debug_locks) {
66                                 kprintf("hammer_lock_ex: held by %p\n",
67                                         lock->owner);
68                         }
69                         nlv = lv | HAMMER_LOCKF_WANTED;
70                         ++hammer_contention_count;
71                         crit_enter();
72                         tsleep_interlock(lock);
73                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
74                                 tsleep(lock, 0, ident, 0);
75                                 if (hammer_debug_locks)
76                                         kprintf("hammer_lock_ex: try again\n");
77                         }
78                         crit_exit();
79                 }
80         }
81 }
82
83 /*
84  * Try to obtain an exclusive lock
85  */
86 int
87 hammer_lock_ex_try(struct hammer_lock *lock)
88 {
89         thread_t td = curthread;
90         int error;
91         u_int lv;
92         u_int nlv;
93
94         KKASSERT(lock->refs > 0);
95         for (;;) {
96                 lv = lock->lockval;
97
98                 if (lv == 0) {
99                         nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
100                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
101                                 lock->owner = td;
102                                 error = 0;
103                                 break;
104                         }
105                 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) && lock->owner == td) {
106                         nlv = (lv + 1);
107                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
108                                 error = 0;
109                                 break;
110                         }
111                 } else {
112                         error = EAGAIN;
113                         break;
114                 }
115         }
116         return (error);
117 }
118
119 /*
120  * Obtain a shared lock
121  *
122  * We do not give pending exclusive locks priority over shared locks as
123  * doing so could lead to a deadlock.
124  */
125 void
126 hammer_lock_sh(struct hammer_lock *lock)
127 {
128         thread_t td = curthread;
129         u_int lv;
130         u_int nlv;
131
132         KKASSERT(lock->refs > 0);
133         for (;;) {
134                 lv = lock->lockval;
135
136                 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
137                         nlv = (lv + 1);
138                         if (atomic_cmpset_int(&lock->lockval, lv, nlv))
139                                 break;
140                 } else if (lock->owner == td) {
141                         /*
142                          * Disallowed case, drop into kernel debugger for
143                          * now.  A cont continues w/ an exclusive lock.
144                          */
145                         nlv = (lv + 1);
146                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
147                                 Debugger("hammer_lock_sh: already hold ex");
148                                 break;
149                         }
150                 } else {
151                         nlv = lv | HAMMER_LOCKF_WANTED;
152                         ++hammer_contention_count;
153                         crit_enter();
154                         tsleep_interlock(lock);
155                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
156                                 tsleep(lock, 0, "hmrlck", 0);
157                         }
158                         crit_exit();
159                 }
160         }
161 }
162
163 int
164 hammer_lock_sh_try(struct hammer_lock *lock)
165 {
166         thread_t td = curthread;
167         u_int lv;
168         u_int nlv;
169         int error;
170
171         KKASSERT(lock->refs > 0);
172         for (;;) {
173                 lv = lock->lockval;
174
175                 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
176                         nlv = (lv + 1);
177                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
178                                 error = 0;
179                                 break;
180                         }
181                 } else if (lock->owner == td) {
182                         /*
183                          * Disallowed case, drop into kernel debugger for
184                          * now.  A cont continues w/ an exclusive lock.
185                          */
186                         nlv = (lv + 1);
187                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
188                                 Debugger("hammer_lock_sh: already hold ex");
189                                 error = 0;
190                                 break;
191                         }
192                 } else {
193                         error = EAGAIN;
194                         break;
195                 }
196         }
197         return (error);
198 }
199
200 /*
201  * Upgrade a shared lock to an exclusively held lock.  This function will
202  * return EDEADLK If there is more then one shared holder.
203  *
204  * No error occurs and no action is taken if the lock is already exclusively
205  * held by the caller.  If the lock is not held at all or held exclusively
206  * by someone else, this function will panic.
207  */
208 int
209 hammer_lock_upgrade(struct hammer_lock *lock)
210 {
211         thread_t td = curthread;
212         u_int lv;
213         u_int nlv;
214         int error;
215
216         for (;;) {
217                 lv = lock->lockval;
218
219                 if ((lv & ~HAMMER_LOCKF_WANTED) == 1) {
220                         nlv = lv | HAMMER_LOCKF_EXCLUSIVE;
221                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
222                                 lock->owner = td;
223                                 error = 0;
224                                 break;
225                         }
226                 } else if (lv & HAMMER_LOCKF_EXCLUSIVE) {
227                         if (lock->owner != curthread)
228                                 panic("hammer_lock_upgrade: illegal state");
229                         error = 0;
230                         break;
231                 } else if ((lv & ~HAMMER_LOCKF_WANTED) == 0) {
232                         panic("hammer_lock_upgrade: lock is not held");
233                         /* NOT REACHED */
234                         error = EDEADLK;
235                         break;
236                 } else {
237                         error = EDEADLK;
238                         break;
239                 }
240         }
241         return (error);
242 }
243
244 /*
245  * Downgrade an exclusively held lock to a shared lock.
246  */
247 void
248 hammer_lock_downgrade(struct hammer_lock *lock)
249 {
250         thread_t td = curthread;
251         u_int lv;
252         u_int nlv;
253
254         KKASSERT((lock->lockval & ~HAMMER_LOCKF_WANTED) ==
255                  (HAMMER_LOCKF_EXCLUSIVE | 1));
256         KKASSERT(lock->owner == td);
257
258         /*
259          * NOTE: Must clear owner before releasing exclusivity
260          */
261         lock->owner = NULL;
262
263         for (;;) {
264                 lv = lock->lockval;
265                 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
266                 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
267                         if (lv & HAMMER_LOCKF_WANTED)
268                                 wakeup(lock);
269                         break;
270                 }
271         }
272 }
273
274 void
275 hammer_unlock(struct hammer_lock *lock)
276 {
277         thread_t td = curthread;
278         u_int lv;
279         u_int nlv;
280
281         lv = lock->lockval;
282         KKASSERT(lv != 0);
283         if (lv & HAMMER_LOCKF_EXCLUSIVE)
284                 KKASSERT(lock->owner == td);
285
286         for (;;) {
287                 lv = lock->lockval;
288                 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
289                 if (nlv > 1) {
290                         nlv = lv - 1;
291                         if (atomic_cmpset_int(&lock->lockval, lv, nlv))
292                                 break;
293                 } else if (nlv == 1) {
294                         nlv = 0;
295                         if (lv & HAMMER_LOCKF_EXCLUSIVE)
296                                 lock->owner = NULL;
297                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
298                                 if (lv & HAMMER_LOCKF_WANTED)
299                                         wakeup(lock);
300                                 break;
301                         }
302                 } else {
303                         panic("hammer_unlock: lock %p is not held", lock);
304                 }
305         }
306 }
307
308 /*
309  * The calling thread must be holding a shared or exclusive lock.
310  * Returns < 0 if lock is held shared, and > 0 if held exlusively.
311  */
312 int
313 hammer_lock_status(struct hammer_lock *lock)
314 {
315         u_int lv = lock->lockval;
316
317         if (lv & HAMMER_LOCKF_EXCLUSIVE)
318                 return(1);
319         else if (lv)
320                 return(-1);
321         panic("hammer_lock_status: lock must be held: %p", lock);
322 }
323
324 void
325 hammer_ref(struct hammer_lock *lock)
326 {
327         KKASSERT(lock->refs >= 0);
328         atomic_add_int(&lock->refs, 1);
329 }
330
331 void
332 hammer_unref(struct hammer_lock *lock)
333 {
334         KKASSERT(lock->refs > 0);
335         atomic_subtract_int(&lock->refs, 1);
336 }
337
338 /*
339  * The sync_lock must be held when doing any modifying operations on
340  * meta-data.  It does not have to be held when modifying non-meta-data buffers
341  * (backend or frontend).
342  *
343  * The flusher holds the lock exclusively while all other consumers hold it
344  * shared.  All modifying operations made while holding the lock are atomic
345  * in that they will be made part of the same flush group.
346  *
347  * Due to the atomicy requirement deadlock recovery code CANNOT release the
348  * sync lock, nor can we give pending exclusive sync locks priority over
349  * a shared sync lock as this could lead to a 3-way deadlock.
350  */
351 void
352 hammer_sync_lock_ex(hammer_transaction_t trans)
353 {
354         ++trans->sync_lock_refs;
355         hammer_lock_ex(&trans->hmp->sync_lock);
356 }
357
358 void
359 hammer_sync_lock_sh(hammer_transaction_t trans)
360 {
361         ++trans->sync_lock_refs;
362         hammer_lock_sh(&trans->hmp->sync_lock);
363 }
364
365 int
366 hammer_sync_lock_sh_try(hammer_transaction_t trans)
367 {
368         int error;
369
370         ++trans->sync_lock_refs;
371         if ((error = hammer_lock_sh_try(&trans->hmp->sync_lock)) != 0)
372                 --trans->sync_lock_refs;
373         return (error);
374 }
375
376 void
377 hammer_sync_unlock(hammer_transaction_t trans)
378 {
379         --trans->sync_lock_refs;
380         hammer_unlock(&trans->hmp->sync_lock);
381 }
382
383 /*
384  * Misc
385  */
386 u_int32_t
387 hammer_to_unix_xid(uuid_t *uuid)
388 {
389         return(*(u_int32_t *)&uuid->node[2]);
390 }
391
392 void
393 hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid)
394 {
395         bzero(uuid, sizeof(*uuid));
396         *(u_int32_t *)&uuid->node[2] = guid;
397 }
398
399 void
400 hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts)
401 {
402         ts->tv_sec = (unsigned long)(xtime / 1000000);
403         ts->tv_nsec = (unsigned int)(xtime % 1000000) * 1000L;
404 }
405
406 u_int64_t
407 hammer_timespec_to_time(struct timespec *ts)
408 {
409         u_int64_t xtime;
410
411         xtime = (unsigned)(ts->tv_nsec / 1000) +
412                 (unsigned long)ts->tv_sec * 1000000ULL;
413         return(xtime);
414 }
415
416
417 /*
418  * Convert a HAMMER filesystem object type to a vnode type
419  */
420 enum vtype
421 hammer_get_vnode_type(u_int8_t obj_type)
422 {
423         switch(obj_type) {
424         case HAMMER_OBJTYPE_DIRECTORY:
425                 return(VDIR);
426         case HAMMER_OBJTYPE_REGFILE:
427                 return(VREG);
428         case HAMMER_OBJTYPE_DBFILE:
429                 return(VDATABASE);
430         case HAMMER_OBJTYPE_FIFO:
431                 return(VFIFO);
432         case HAMMER_OBJTYPE_SOCKET:
433                 return(VSOCK);
434         case HAMMER_OBJTYPE_CDEV:
435                 return(VCHR);
436         case HAMMER_OBJTYPE_BDEV:
437                 return(VBLK);
438         case HAMMER_OBJTYPE_SOFTLINK:
439                 return(VLNK);
440         default:
441                 return(VBAD);
442         }
443         /* not reached */
444 }
445
446 int
447 hammer_get_dtype(u_int8_t obj_type)
448 {
449         switch(obj_type) {
450         case HAMMER_OBJTYPE_DIRECTORY:
451                 return(DT_DIR);
452         case HAMMER_OBJTYPE_REGFILE:
453                 return(DT_REG);
454         case HAMMER_OBJTYPE_DBFILE:
455                 return(DT_DBF);
456         case HAMMER_OBJTYPE_FIFO:
457                 return(DT_FIFO);
458         case HAMMER_OBJTYPE_SOCKET:
459                 return(DT_SOCK);
460         case HAMMER_OBJTYPE_CDEV:
461                 return(DT_CHR);
462         case HAMMER_OBJTYPE_BDEV:
463                 return(DT_BLK);
464         case HAMMER_OBJTYPE_SOFTLINK:
465                 return(DT_LNK);
466         default:
467                 return(DT_UNKNOWN);
468         }
469         /* not reached */
470 }
471
472 u_int8_t
473 hammer_get_obj_type(enum vtype vtype)
474 {
475         switch(vtype) {
476         case VDIR:
477                 return(HAMMER_OBJTYPE_DIRECTORY);
478         case VREG:
479                 return(HAMMER_OBJTYPE_REGFILE);
480         case VDATABASE:
481                 return(HAMMER_OBJTYPE_DBFILE);
482         case VFIFO:
483                 return(HAMMER_OBJTYPE_FIFO);
484         case VSOCK:
485                 return(HAMMER_OBJTYPE_SOCKET);
486         case VCHR:
487                 return(HAMMER_OBJTYPE_CDEV);
488         case VBLK:
489                 return(HAMMER_OBJTYPE_BDEV);
490         case VLNK:
491                 return(HAMMER_OBJTYPE_SOFTLINK);
492         default:
493                 return(HAMMER_OBJTYPE_UNKNOWN);
494         }
495         /* not reached */
496 }
497
498 /*
499  * Return flags for hammer_delete_at_cursor()
500  */
501 int
502 hammer_nohistory(hammer_inode_t ip)
503 {
504         if (ip->hmp->hflags & HMNT_NOHISTORY)
505                 return(HAMMER_DELETE_DESTROY);
506         if (ip->ino_data.uflags & (SF_NOHISTORY|UF_NOHISTORY))
507                 return(HAMMER_DELETE_DESTROY);
508         return(0);
509 }
510
511 /*
512  * ALGORITHM VERSION 1:
513  *      Return a namekey hash.   The 64 bit namekey hash consists of a 32 bit
514  *      crc in the MSB and 0 in the LSB.  The caller will use the low 32 bits
515  *      to generate a unique key and will scan all entries with the same upper
516  *      32 bits when issuing a lookup.
517  *
518  *      0hhhhhhhhhhhhhhh hhhhhhhhhhhhhhhh 0000000000000000 0000000000000000
519  *
520  * ALGORITHM VERSION 2:
521  *
522  *      The 64 bit hash key is generated from the following components.  The
523  *      first three characters are encoded as 5-bit quantities, the middle
524  *      N characters are hashed into a 6 bit quantity, and the last two
525  *      characters are encoded as 5-bit quantities.  A 32 bit hash of the
526  *      entire filename is encoded in the low 32 bits.  Bit 0 is set to
527  *      0 to guarantee us a 2^24 bit iteration space.
528  *
529  *      0aaaaabbbbbccccc mmmmmmyyyyyzzzzz hhhhhhhhhhhhhhhh hhhhhhhhhhhhhhh0
530  *
531  *      This gives us a domain sort for the first three characters, the last
532  *      two characters, and breaks the middle space into 64 random domains.
533  *      The domain sort folds upper case, lower case, digits, and punctuation
534  *      spaces together, the idea being the filenames tend to not be a mix
535  *      of those domains.
536  *
537  *      The 64 random domains act as a sub-sort for the middle characters
538  *      but may cause a random seek.  If the filesystem is being accessed
539  *      in sorted order we should tend to get very good linearity for most
540  *      filenames and devolve into more random seeks otherwise.
541  *
542  * We strip bit 63 in order to provide a positive key, this way a seek
543  * offset of 0 will represent the base of the directory.
544  *
545  * This function can never return 0.  We use the MSB-0 space to synthesize
546  * artificial directory entries such as "." and "..".
547  */
548 int64_t
549 hammer_directory_namekey(hammer_inode_t dip, const void *name, int len,
550                          u_int32_t *max_iterationsp)
551 {
552         int64_t key;
553         int32_t crcx;
554         const char *aname = name;
555
556         switch (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIRHASH_MASK) {
557         case HAMMER_INODE_CAP_DIRHASH_ALG0:
558                 key = (int64_t)(crc32(aname, len) & 0x7FFFFFFF) << 32;
559                 if (key == 0)
560                         key |= 0x100000000LL;
561                 *max_iterationsp = 0xFFFFFFFFU;
562                 break;
563         case HAMMER_INODE_CAP_DIRHASH_ALG1:
564                 key = (u_int32_t)crc32(aname, len) & 0xFFFFFFFEU;
565
566                 switch(len) {
567                 default:
568                         crcx = crc32(aname + 3, len - 5);
569                         crcx = crcx ^ (crcx >> 6) ^ (crcx >> 12);
570                         key |=  (int64_t)(crcx & 0x3F) << 42;
571                         /* fall through */
572                 case 5:
573                 case 4:
574                         /* fall through */
575                 case 3:
576                         key |= ((int64_t)(aname[2] & 0x1F) << 48);
577                         /* fall through */
578                 case 2:
579                         key |= ((int64_t)(aname[1] & 0x1F) << 53) |
580                                ((int64_t)(aname[len-2] & 0x1F) << 37);
581                         /* fall through */
582                 case 1:
583                         key |= ((int64_t)(aname[0] & 0x1F) << 58) |
584                                ((int64_t)(aname[len-1] & 0x1F) << 32);
585                         /* fall through */
586                 case 0:
587                         break;
588                 }
589                 if ((key & 0xFFFFFFFF00000000LL) == 0)
590                         key |= 0x100000000LL;
591                 if (hammer_debug_general & 0x0400) {
592                         kprintf("namekey2: 0x%016llx %*.*s\n",
593                                 (long long)key, len, len, aname);
594                 }
595                 *max_iterationsp = 0x00FFFFFF;
596                 break;
597         case HAMMER_INODE_CAP_DIRHASH_ALG2:
598         case HAMMER_INODE_CAP_DIRHASH_ALG3:
599         default:
600                 key = 0;                        /* compiler warning */
601                 *max_iterationsp = 1;           /* sanity */
602                 panic("hammer_directory_namekey: bad algorithm %p\n", dip);
603                 break;
604         }
605         return(key);
606 }
607
608 /*
609  * Convert string after @@ (@@ not included) to TID.  Returns 0 on success,
610  * EINVAL on failure.
611  *
612  * If this function fails *ispfs, *tidp, and *localizationp will not
613  * be modified.
614  */
615 int
616 hammer_str_to_tid(const char *str, int *ispfsp,
617                   hammer_tid_t *tidp, u_int32_t *localizationp)
618 {
619         hammer_tid_t tid;
620         u_int32_t localization;
621         char *ptr;
622         int ispfs;
623         int n;
624
625         /*
626          * Forms allowed for TID:  "0x%016llx"
627          *                         "-1"
628          */
629         tid = strtouq(str, &ptr, 0);
630         n = ptr - str;
631         if (n == 2 && str[0] == '-' && str[1] == '1') {
632                 /* ok */
633         } else if (n == 18 && str[0] == '0' && (str[1] | 0x20) == 'x') {
634                 /* ok */
635         } else {
636                 return(EINVAL);
637         }
638
639         /*
640          * Forms allowed for PFS:  ":%05d"  (i.e. "...:0" would be illegal).
641          */
642         str = ptr;
643         if (*str == ':') {
644                 localization = strtoul(str + 1, &ptr, 10) << 16;
645                 if (ptr - str != 6)
646                         return(EINVAL);
647                 str = ptr;
648                 ispfs = 1;
649         } else {
650                 localization = *localizationp;
651                 ispfs = 0;
652         }
653
654         /*
655          * Any trailing junk invalidates special extension handling.
656          */
657         if (*str)
658                 return(EINVAL);
659         *tidp = tid;
660         *localizationp = localization;
661         *ispfsp = ispfs;
662         return(0);
663 }
664
665 void
666 hammer_crc_set_blockmap(hammer_blockmap_t blockmap)
667 {
668         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
669 }
670
671 void
672 hammer_crc_set_volume(hammer_volume_ondisk_t ondisk)
673 {
674         ondisk->vol_crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
675                           crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
676 }
677
678 int
679 hammer_crc_test_blockmap(hammer_blockmap_t blockmap)
680 {
681         hammer_crc_t crc;
682
683         crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
684         return (blockmap->entry_crc == crc);
685 }
686
687 int
688 hammer_crc_test_volume(hammer_volume_ondisk_t ondisk)
689 {
690         hammer_crc_t crc;
691
692         crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
693               crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
694         return (ondisk->vol_crc == crc);
695 }
696
697 int
698 hammer_crc_test_btree(hammer_node_ondisk_t ondisk)
699 {
700         hammer_crc_t crc;
701
702         crc = crc32(&ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
703         return (ondisk->crc == crc);
704 }
705
706 /*
707  * Test or set the leaf->data_crc field.  Deal with any special cases given
708  * a generic B-Tree leaf element and its data.
709  *
710  * NOTE: Inode-data: the atime and mtime fields are not CRCd, allowing them
711  *       to be updated in-place.
712  */
713 int
714 hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf)
715 {
716         hammer_crc_t crc;
717
718         if (leaf->data_len == 0) {
719                 crc = 0;
720         } else {
721                 switch(leaf->base.rec_type) {
722                 case HAMMER_RECTYPE_INODE:
723                         if (leaf->data_len != sizeof(struct hammer_inode_data))
724                                 return(0);
725                         crc = crc32(data, HAMMER_INODE_CRCSIZE);
726                         break;
727                 default:
728                         crc = crc32(data, leaf->data_len);
729                         break;
730                 }
731         }
732         return (leaf->data_crc == crc);
733 }
734
735 void
736 hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf)
737 {
738         if (leaf->data_len == 0) {
739                 leaf->data_crc = 0;
740         } else {
741                 switch(leaf->base.rec_type) {
742                 case HAMMER_RECTYPE_INODE:
743                         KKASSERT(leaf->data_len ==
744                                   sizeof(struct hammer_inode_data));
745                         leaf->data_crc = crc32(data, HAMMER_INODE_CRCSIZE);
746                         break;
747                 default:
748                         leaf->data_crc = crc32(data, leaf->data_len);
749                         break;
750                 }
751         }
752 }
753
754 void
755 hkprintf(const char *ctl, ...)
756 {
757         __va_list va;
758
759         if (hammer_debug_debug) {
760                 __va_start(va, ctl);
761                 kvprintf(ctl, va);
762                 __va_end(va);
763         }
764 }
765
766 /*
767  * Return the block size at the specified file offset.
768  */
769 int
770 hammer_blocksize(int64_t file_offset)
771 {
772         if (file_offset < HAMMER_XDEMARC)
773                 return(HAMMER_BUFSIZE);
774         else
775                 return(HAMMER_XBUFSIZE);
776 }
777
778 /*
779  * Return the demarkation point between the two offsets where
780  * the block size changes. 
781  */
782 int64_t
783 hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2)
784 {
785         if (file_offset1 < HAMMER_XDEMARC) {
786                 if (file_offset2 <= HAMMER_XDEMARC)
787                         return(file_offset2);
788                 return(HAMMER_XDEMARC);
789         }
790         panic("hammer_blockdemarc: illegal range %lld %lld\n",
791               (long long)file_offset1, (long long)file_offset2);
792 }
793
794 udev_t
795 hammer_fsid_to_udev(uuid_t *uuid)
796 {
797         u_int32_t crc;
798
799         crc = crc32(uuid, sizeof(*uuid));
800         return((udev_t)crc);
801 }
802