Merge branch 'vendor/GCC44'
[dragonfly.git] / sys / vfs / hammer / hammer_subs.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_subs.c,v 1.35 2008/10/15 22:38:37 dillon Exp $
35  */
36 /*
37  * HAMMER structural locking
38  */
39
40 #include "hammer.h"
41 #include <sys/dirent.h>
42
43 void
44 hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
45 {
46         thread_t td = curthread;
47         u_int lv;
48         u_int nlv;
49
50         KKASSERT(lock->refs > 0);
51         for (;;) {
52                 lv = lock->lockval;
53
54                 if (lv == 0) {
55                         nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
56                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
57                                 lock->owner = td;
58                                 break;
59                         }
60                 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) && lock->owner == td) {
61                         nlv = (lv + 1);
62                         if (atomic_cmpset_int(&lock->lockval, lv, nlv))
63                                 break;
64                 } else {
65                         if (hammer_debug_locks) {
66                                 kprintf("hammer_lock_ex: held by %p\n",
67                                         lock->owner);
68                         }
69                         nlv = lv | HAMMER_LOCKF_WANTED;
70                         ++hammer_contention_count;
71                         tsleep_interlock(lock, 0);
72                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
73                                 tsleep(lock, PINTERLOCKED, ident, 0);
74                                 if (hammer_debug_locks)
75                                         kprintf("hammer_lock_ex: try again\n");
76                         }
77                 }
78         }
79 }
80
81 /*
82  * Try to obtain an exclusive lock
83  */
84 int
85 hammer_lock_ex_try(struct hammer_lock *lock)
86 {
87         thread_t td = curthread;
88         int error;
89         u_int lv;
90         u_int nlv;
91
92         KKASSERT(lock->refs > 0);
93         for (;;) {
94                 lv = lock->lockval;
95
96                 if (lv == 0) {
97                         nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
98                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
99                                 lock->owner = td;
100                                 error = 0;
101                                 break;
102                         }
103                 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) && lock->owner == td) {
104                         nlv = (lv + 1);
105                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
106                                 error = 0;
107                                 break;
108                         }
109                 } else {
110                         error = EAGAIN;
111                         break;
112                 }
113         }
114         return (error);
115 }
116
117 /*
118  * Obtain a shared lock
119  *
120  * We do not give pending exclusive locks priority over shared locks as
121  * doing so could lead to a deadlock.
122  */
123 void
124 hammer_lock_sh(struct hammer_lock *lock)
125 {
126         thread_t td = curthread;
127         u_int lv;
128         u_int nlv;
129
130         KKASSERT(lock->refs > 0);
131         for (;;) {
132                 lv = lock->lockval;
133
134                 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
135                         nlv = (lv + 1);
136                         if (atomic_cmpset_int(&lock->lockval, lv, nlv))
137                                 break;
138                 } else if (lock->owner == td) {
139                         /*
140                          * Disallowed case, drop into kernel debugger for
141                          * now.  A cont continues w/ an exclusive lock.
142                          */
143                         nlv = (lv + 1);
144                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
145                                 Debugger("hammer_lock_sh: already hold ex");
146                                 break;
147                         }
148                 } else {
149                         nlv = lv | HAMMER_LOCKF_WANTED;
150                         ++hammer_contention_count;
151                         tsleep_interlock(lock, 0);
152                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
153                                 tsleep(lock, PINTERLOCKED, "hmrlck", 0);
154                         }
155                 }
156         }
157 }
158
159 int
160 hammer_lock_sh_try(struct hammer_lock *lock)
161 {
162         thread_t td = curthread;
163         u_int lv;
164         u_int nlv;
165         int error;
166
167         KKASSERT(lock->refs > 0);
168         for (;;) {
169                 lv = lock->lockval;
170
171                 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
172                         nlv = (lv + 1);
173                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
174                                 error = 0;
175                                 break;
176                         }
177                 } else if (lock->owner == td) {
178                         /*
179                          * Disallowed case, drop into kernel debugger for
180                          * now.  A cont continues w/ an exclusive lock.
181                          */
182                         nlv = (lv + 1);
183                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
184                                 Debugger("hammer_lock_sh: already hold ex");
185                                 error = 0;
186                                 break;
187                         }
188                 } else {
189                         error = EAGAIN;
190                         break;
191                 }
192         }
193         return (error);
194 }
195
196 /*
197  * Upgrade a shared lock to an exclusively held lock.  This function will
198  * return EDEADLK If there is more then one shared holder.
199  *
200  * No error occurs and no action is taken if the lock is already exclusively
201  * held by the caller.  If the lock is not held at all or held exclusively
202  * by someone else, this function will panic.
203  */
204 int
205 hammer_lock_upgrade(struct hammer_lock *lock)
206 {
207         thread_t td = curthread;
208         u_int lv;
209         u_int nlv;
210         int error;
211
212         for (;;) {
213                 lv = lock->lockval;
214
215                 if ((lv & ~HAMMER_LOCKF_WANTED) == 1) {
216                         nlv = lv | HAMMER_LOCKF_EXCLUSIVE;
217                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
218                                 lock->owner = td;
219                                 error = 0;
220                                 break;
221                         }
222                 } else if (lv & HAMMER_LOCKF_EXCLUSIVE) {
223                         if (lock->owner != curthread)
224                                 panic("hammer_lock_upgrade: illegal state");
225                         error = 0;
226                         break;
227                 } else if ((lv & ~HAMMER_LOCKF_WANTED) == 0) {
228                         panic("hammer_lock_upgrade: lock is not held");
229                         /* NOT REACHED */
230                         error = EDEADLK;
231                         break;
232                 } else {
233                         error = EDEADLK;
234                         break;
235                 }
236         }
237         return (error);
238 }
239
240 /*
241  * Downgrade an exclusively held lock to a shared lock.
242  */
243 void
244 hammer_lock_downgrade(struct hammer_lock *lock)
245 {
246         thread_t td = curthread;
247         u_int lv;
248         u_int nlv;
249
250         KKASSERT((lock->lockval & ~HAMMER_LOCKF_WANTED) ==
251                  (HAMMER_LOCKF_EXCLUSIVE | 1));
252         KKASSERT(lock->owner == td);
253
254         /*
255          * NOTE: Must clear owner before releasing exclusivity
256          */
257         lock->owner = NULL;
258
259         for (;;) {
260                 lv = lock->lockval;
261                 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
262                 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
263                         if (lv & HAMMER_LOCKF_WANTED)
264                                 wakeup(lock);
265                         break;
266                 }
267         }
268 }
269
270 void
271 hammer_unlock(struct hammer_lock *lock)
272 {
273         thread_t td = curthread;
274         u_int lv;
275         u_int nlv;
276
277         lv = lock->lockval;
278         KKASSERT(lv != 0);
279         if (lv & HAMMER_LOCKF_EXCLUSIVE)
280                 KKASSERT(lock->owner == td);
281
282         for (;;) {
283                 lv = lock->lockval;
284                 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
285                 if (nlv > 1) {
286                         nlv = lv - 1;
287                         if (atomic_cmpset_int(&lock->lockval, lv, nlv))
288                                 break;
289                 } else if (nlv == 1) {
290                         nlv = 0;
291                         if (lv & HAMMER_LOCKF_EXCLUSIVE)
292                                 lock->owner = NULL;
293                         if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
294                                 if (lv & HAMMER_LOCKF_WANTED)
295                                         wakeup(lock);
296                                 break;
297                         }
298                 } else {
299                         panic("hammer_unlock: lock %p is not held", lock);
300                 }
301         }
302 }
303
304 /*
305  * The calling thread must be holding a shared or exclusive lock.
306  * Returns < 0 if lock is held shared, and > 0 if held exlusively.
307  */
308 int
309 hammer_lock_status(struct hammer_lock *lock)
310 {
311         u_int lv = lock->lockval;
312
313         if (lv & HAMMER_LOCKF_EXCLUSIVE)
314                 return(1);
315         else if (lv)
316                 return(-1);
317         panic("hammer_lock_status: lock must be held: %p", lock);
318 }
319
320 void
321 hammer_ref(struct hammer_lock *lock)
322 {
323         KKASSERT(lock->refs >= 0);
324         atomic_add_int(&lock->refs, 1);
325 }
326
327 void
328 hammer_unref(struct hammer_lock *lock)
329 {
330         KKASSERT(lock->refs > 0);
331         atomic_subtract_int(&lock->refs, 1);
332 }
333
334 /*
335  * The sync_lock must be held when doing any modifying operations on
336  * meta-data.  It does not have to be held when modifying non-meta-data buffers
337  * (backend or frontend).
338  *
339  * The flusher holds the lock exclusively while all other consumers hold it
340  * shared.  All modifying operations made while holding the lock are atomic
341  * in that they will be made part of the same flush group.
342  *
343  * Due to the atomicy requirement deadlock recovery code CANNOT release the
344  * sync lock, nor can we give pending exclusive sync locks priority over
345  * a shared sync lock as this could lead to a 3-way deadlock.
346  */
347 void
348 hammer_sync_lock_ex(hammer_transaction_t trans)
349 {
350         ++trans->sync_lock_refs;
351         hammer_lock_ex(&trans->hmp->sync_lock);
352 }
353
354 void
355 hammer_sync_lock_sh(hammer_transaction_t trans)
356 {
357         ++trans->sync_lock_refs;
358         hammer_lock_sh(&trans->hmp->sync_lock);
359 }
360
361 int
362 hammer_sync_lock_sh_try(hammer_transaction_t trans)
363 {
364         int error;
365
366         ++trans->sync_lock_refs;
367         if ((error = hammer_lock_sh_try(&trans->hmp->sync_lock)) != 0)
368                 --trans->sync_lock_refs;
369         return (error);
370 }
371
372 void
373 hammer_sync_unlock(hammer_transaction_t trans)
374 {
375         --trans->sync_lock_refs;
376         hammer_unlock(&trans->hmp->sync_lock);
377 }
378
379 /*
380  * Misc
381  */
382 u_int32_t
383 hammer_to_unix_xid(uuid_t *uuid)
384 {
385         return(*(u_int32_t *)&uuid->node[2]);
386 }
387
388 void
389 hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid)
390 {
391         bzero(uuid, sizeof(*uuid));
392         *(u_int32_t *)&uuid->node[2] = guid;
393 }
394
395 void
396 hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts)
397 {
398         ts->tv_sec = (unsigned long)(xtime / 1000000);
399         ts->tv_nsec = (unsigned int)(xtime % 1000000) * 1000L;
400 }
401
402 u_int64_t
403 hammer_timespec_to_time(struct timespec *ts)
404 {
405         u_int64_t xtime;
406
407         xtime = (unsigned)(ts->tv_nsec / 1000) +
408                 (unsigned long)ts->tv_sec * 1000000ULL;
409         return(xtime);
410 }
411
412
413 /*
414  * Convert a HAMMER filesystem object type to a vnode type
415  */
416 enum vtype
417 hammer_get_vnode_type(u_int8_t obj_type)
418 {
419         switch(obj_type) {
420         case HAMMER_OBJTYPE_DIRECTORY:
421                 return(VDIR);
422         case HAMMER_OBJTYPE_REGFILE:
423                 return(VREG);
424         case HAMMER_OBJTYPE_DBFILE:
425                 return(VDATABASE);
426         case HAMMER_OBJTYPE_FIFO:
427                 return(VFIFO);
428         case HAMMER_OBJTYPE_SOCKET:
429                 return(VSOCK);
430         case HAMMER_OBJTYPE_CDEV:
431                 return(VCHR);
432         case HAMMER_OBJTYPE_BDEV:
433                 return(VBLK);
434         case HAMMER_OBJTYPE_SOFTLINK:
435                 return(VLNK);
436         default:
437                 return(VBAD);
438         }
439         /* not reached */
440 }
441
442 int
443 hammer_get_dtype(u_int8_t obj_type)
444 {
445         switch(obj_type) {
446         case HAMMER_OBJTYPE_DIRECTORY:
447                 return(DT_DIR);
448         case HAMMER_OBJTYPE_REGFILE:
449                 return(DT_REG);
450         case HAMMER_OBJTYPE_DBFILE:
451                 return(DT_DBF);
452         case HAMMER_OBJTYPE_FIFO:
453                 return(DT_FIFO);
454         case HAMMER_OBJTYPE_SOCKET:
455                 return(DT_SOCK);
456         case HAMMER_OBJTYPE_CDEV:
457                 return(DT_CHR);
458         case HAMMER_OBJTYPE_BDEV:
459                 return(DT_BLK);
460         case HAMMER_OBJTYPE_SOFTLINK:
461                 return(DT_LNK);
462         default:
463                 return(DT_UNKNOWN);
464         }
465         /* not reached */
466 }
467
468 u_int8_t
469 hammer_get_obj_type(enum vtype vtype)
470 {
471         switch(vtype) {
472         case VDIR:
473                 return(HAMMER_OBJTYPE_DIRECTORY);
474         case VREG:
475                 return(HAMMER_OBJTYPE_REGFILE);
476         case VDATABASE:
477                 return(HAMMER_OBJTYPE_DBFILE);
478         case VFIFO:
479                 return(HAMMER_OBJTYPE_FIFO);
480         case VSOCK:
481                 return(HAMMER_OBJTYPE_SOCKET);
482         case VCHR:
483                 return(HAMMER_OBJTYPE_CDEV);
484         case VBLK:
485                 return(HAMMER_OBJTYPE_BDEV);
486         case VLNK:
487                 return(HAMMER_OBJTYPE_SOFTLINK);
488         default:
489                 return(HAMMER_OBJTYPE_UNKNOWN);
490         }
491         /* not reached */
492 }
493
494 /*
495  * Return flags for hammer_delete_at_cursor()
496  */
497 int
498 hammer_nohistory(hammer_inode_t ip)
499 {
500         if (ip->hmp->hflags & HMNT_NOHISTORY)
501                 return(HAMMER_DELETE_DESTROY);
502         if (ip->ino_data.uflags & (SF_NOHISTORY|UF_NOHISTORY))
503                 return(HAMMER_DELETE_DESTROY);
504         return(0);
505 }
506
507 /*
508  * ALGORITHM VERSION 1:
509  *      Return a namekey hash.   The 64 bit namekey hash consists of a 32 bit
510  *      crc in the MSB and 0 in the LSB.  The caller will use the low 32 bits
511  *      to generate a unique key and will scan all entries with the same upper
512  *      32 bits when issuing a lookup.
513  *
514  *      0hhhhhhhhhhhhhhh hhhhhhhhhhhhhhhh 0000000000000000 0000000000000000
515  *
516  * ALGORITHM VERSION 2:
517  *
518  *      The 64 bit hash key is generated from the following components.  The
519  *      first three characters are encoded as 5-bit quantities, the middle
520  *      N characters are hashed into a 6 bit quantity, and the last two
521  *      characters are encoded as 5-bit quantities.  A 32 bit hash of the
522  *      entire filename is encoded in the low 32 bits.  Bit 0 is set to
523  *      0 to guarantee us a 2^24 bit iteration space.
524  *
525  *      0aaaaabbbbbccccc mmmmmmyyyyyzzzzz hhhhhhhhhhhhhhhh hhhhhhhhhhhhhhh0
526  *
527  *      This gives us a domain sort for the first three characters, the last
528  *      two characters, and breaks the middle space into 64 random domains.
529  *      The domain sort folds upper case, lower case, digits, and punctuation
530  *      spaces together, the idea being the filenames tend to not be a mix
531  *      of those domains.
532  *
533  *      The 64 random domains act as a sub-sort for the middle characters
534  *      but may cause a random seek.  If the filesystem is being accessed
535  *      in sorted order we should tend to get very good linearity for most
536  *      filenames and devolve into more random seeks otherwise.
537  *
538  * We strip bit 63 in order to provide a positive key, this way a seek
539  * offset of 0 will represent the base of the directory.
540  *
541  * This function can never return 0.  We use the MSB-0 space to synthesize
542  * artificial directory entries such as "." and "..".
543  */
544 int64_t
545 hammer_directory_namekey(hammer_inode_t dip, const void *name, int len,
546                          u_int32_t *max_iterationsp)
547 {
548         int64_t key;
549         int32_t crcx;
550         const char *aname = name;
551
552         switch (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIRHASH_MASK) {
553         case HAMMER_INODE_CAP_DIRHASH_ALG0:
554                 key = (int64_t)(crc32(aname, len) & 0x7FFFFFFF) << 32;
555                 if (key == 0)
556                         key |= 0x100000000LL;
557                 *max_iterationsp = 0xFFFFFFFFU;
558                 break;
559         case HAMMER_INODE_CAP_DIRHASH_ALG1:
560                 key = (u_int32_t)crc32(aname, len) & 0xFFFFFFFEU;
561
562                 switch(len) {
563                 default:
564                         crcx = crc32(aname + 3, len - 5);
565                         crcx = crcx ^ (crcx >> 6) ^ (crcx >> 12);
566                         key |=  (int64_t)(crcx & 0x3F) << 42;
567                         /* fall through */
568                 case 5:
569                 case 4:
570                         /* fall through */
571                 case 3:
572                         key |= ((int64_t)(aname[2] & 0x1F) << 48);
573                         /* fall through */
574                 case 2:
575                         key |= ((int64_t)(aname[1] & 0x1F) << 53) |
576                                ((int64_t)(aname[len-2] & 0x1F) << 37);
577                         /* fall through */
578                 case 1:
579                         key |= ((int64_t)(aname[0] & 0x1F) << 58) |
580                                ((int64_t)(aname[len-1] & 0x1F) << 32);
581                         /* fall through */
582                 case 0:
583                         break;
584                 }
585                 if ((key & 0xFFFFFFFF00000000LL) == 0)
586                         key |= 0x100000000LL;
587                 if (hammer_debug_general & 0x0400) {
588                         kprintf("namekey2: 0x%016llx %*.*s\n",
589                                 (long long)key, len, len, aname);
590                 }
591                 *max_iterationsp = 0x00FFFFFF;
592                 break;
593         case HAMMER_INODE_CAP_DIRHASH_ALG2:
594         case HAMMER_INODE_CAP_DIRHASH_ALG3:
595         default:
596                 key = 0;                        /* compiler warning */
597                 *max_iterationsp = 1;           /* sanity */
598                 panic("hammer_directory_namekey: bad algorithm %p\n", dip);
599                 break;
600         }
601         return(key);
602 }
603
604 /*
605  * Convert string after @@ (@@ not included) to TID.  Returns 0 on success,
606  * EINVAL on failure.
607  *
608  * If this function fails *ispfs, *tidp, and *localizationp will not
609  * be modified.
610  */
611 int
612 hammer_str_to_tid(const char *str, int *ispfsp,
613                   hammer_tid_t *tidp, u_int32_t *localizationp)
614 {
615         hammer_tid_t tid;
616         u_int32_t localization;
617         char *ptr;
618         int ispfs;
619         int n;
620
621         /*
622          * Forms allowed for TID:  "0x%016llx"
623          *                         "-1"
624          */
625         tid = strtouq(str, &ptr, 0);
626         n = ptr - str;
627         if (n == 2 && str[0] == '-' && str[1] == '1') {
628                 /* ok */
629         } else if (n == 18 && str[0] == '0' && (str[1] | 0x20) == 'x') {
630                 /* ok */
631         } else {
632                 return(EINVAL);
633         }
634
635         /*
636          * Forms allowed for PFS:  ":%05d"  (i.e. "...:0" would be illegal).
637          */
638         str = ptr;
639         if (*str == ':') {
640                 localization = strtoul(str + 1, &ptr, 10) << 16;
641                 if (ptr - str != 6)
642                         return(EINVAL);
643                 str = ptr;
644                 ispfs = 1;
645         } else {
646                 localization = *localizationp;
647                 ispfs = 0;
648         }
649
650         /*
651          * Any trailing junk invalidates special extension handling.
652          */
653         if (*str)
654                 return(EINVAL);
655         *tidp = tid;
656         *localizationp = localization;
657         *ispfsp = ispfs;
658         return(0);
659 }
660
661 void
662 hammer_crc_set_blockmap(hammer_blockmap_t blockmap)
663 {
664         blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
665 }
666
667 void
668 hammer_crc_set_volume(hammer_volume_ondisk_t ondisk)
669 {
670         ondisk->vol_crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
671                           crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
672 }
673
674 int
675 hammer_crc_test_blockmap(hammer_blockmap_t blockmap)
676 {
677         hammer_crc_t crc;
678
679         crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
680         return (blockmap->entry_crc == crc);
681 }
682
683 int
684 hammer_crc_test_volume(hammer_volume_ondisk_t ondisk)
685 {
686         hammer_crc_t crc;
687
688         crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
689               crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
690         return (ondisk->vol_crc == crc);
691 }
692
693 int
694 hammer_crc_test_btree(hammer_node_ondisk_t ondisk)
695 {
696         hammer_crc_t crc;
697
698         crc = crc32(&ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
699         return (ondisk->crc == crc);
700 }
701
702 /*
703  * Test or set the leaf->data_crc field.  Deal with any special cases given
704  * a generic B-Tree leaf element and its data.
705  *
706  * NOTE: Inode-data: the atime and mtime fields are not CRCd, allowing them
707  *       to be updated in-place.
708  */
709 int
710 hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf)
711 {
712         hammer_crc_t crc;
713
714         if (leaf->data_len == 0) {
715                 crc = 0;
716         } else {
717                 switch(leaf->base.rec_type) {
718                 case HAMMER_RECTYPE_INODE:
719                         if (leaf->data_len != sizeof(struct hammer_inode_data))
720                                 return(0);
721                         crc = crc32(data, HAMMER_INODE_CRCSIZE);
722                         break;
723                 default:
724                         crc = crc32(data, leaf->data_len);
725                         break;
726                 }
727         }
728         return (leaf->data_crc == crc);
729 }
730
731 void
732 hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf)
733 {
734         if (leaf->data_len == 0) {
735                 leaf->data_crc = 0;
736         } else {
737                 switch(leaf->base.rec_type) {
738                 case HAMMER_RECTYPE_INODE:
739                         KKASSERT(leaf->data_len ==
740                                   sizeof(struct hammer_inode_data));
741                         leaf->data_crc = crc32(data, HAMMER_INODE_CRCSIZE);
742                         break;
743                 default:
744                         leaf->data_crc = crc32(data, leaf->data_len);
745                         break;
746                 }
747         }
748 }
749
750 void
751 hkprintf(const char *ctl, ...)
752 {
753         __va_list va;
754
755         if (hammer_debug_debug) {
756                 __va_start(va, ctl);
757                 kvprintf(ctl, va);
758                 __va_end(va);
759         }
760 }
761
762 /*
763  * Return the block size at the specified file offset.
764  */
765 int
766 hammer_blocksize(int64_t file_offset)
767 {
768         if (file_offset < HAMMER_XDEMARC)
769                 return(HAMMER_BUFSIZE);
770         else
771                 return(HAMMER_XBUFSIZE);
772 }
773
774 /*
775  * Return the demarkation point between the two offsets where
776  * the block size changes. 
777  */
778 int64_t
779 hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2)
780 {
781         if (file_offset1 < HAMMER_XDEMARC) {
782                 if (file_offset2 <= HAMMER_XDEMARC)
783                         return(file_offset2);
784                 return(HAMMER_XDEMARC);
785         }
786         panic("hammer_blockdemarc: illegal range %lld %lld\n",
787               (long long)file_offset1, (long long)file_offset2);
788 }
789
790 udev_t
791 hammer_fsid_to_udev(uuid_t *uuid)
792 {
793         u_int32_t crc;
794
795         crc = crc32(uuid, sizeof(*uuid));
796         return((udev_t)crc);
797 }
798