2 * Copyright (c) 2007-2011 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * HAMMER structural locking
39 #include <sys/dirent.h>
42 hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
44 thread_t td = curthread;
53 nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
54 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
58 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
61 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
64 if (hammer_debug_locks) {
65 kprintf("hammer_lock_ex: held by %p\n",
68 nlv = lv | HAMMER_LOCKF_WANTED;
69 ++hammer_contention_count;
70 tsleep_interlock(&lock->lockval, 0);
71 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
72 tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
73 if (hammer_debug_locks)
74 kprintf("hammer_lock_ex: try again\n");
81 * Try to obtain an exclusive lock
84 hammer_lock_ex_try(struct hammer_lock *lock)
86 thread_t td = curthread;
96 nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
97 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
102 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
103 lock->lowner == td) {
105 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
118 * Obtain a shared lock
120 * We do not give pending exclusive locks priority over shared locks as
121 * doing so could lead to a deadlock.
124 hammer_lock_sh(struct hammer_lock *lock)
126 thread_t td = curthread;
129 const char *ident = "hmrlck";
131 KKASSERT(lock->refs);
135 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
137 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
139 } else if (lock->lowner == td) {
141 * Disallowed case, drop into kernel debugger for
142 * now. A cont continues w/ an exclusive lock.
145 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
146 if (hammer_debug_critical)
147 Debugger("hammer_lock_sh: holding ex");
151 nlv = lv | HAMMER_LOCKF_WANTED;
152 ++hammer_contention_count;
153 tsleep_interlock(&lock->lockval, 0);
154 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
155 tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
161 hammer_lock_sh_try(struct hammer_lock *lock)
163 thread_t td = curthread;
168 KKASSERT(lock->refs);
172 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
174 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
178 } else if (lock->lowner == td) {
180 * Disallowed case, drop into kernel debugger for
181 * now. A cont continues w/ an exclusive lock.
184 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
185 if (hammer_debug_critical)
186 Debugger("hammer_lock_sh: holding ex");
199 * Upgrade a shared lock to an exclusively held lock. This function will
200 * return EDEADLK If there is more then one shared holder.
202 * No error occurs and no action is taken if the lock is already exclusively
203 * held by the caller. If the lock is not held at all or held exclusively
204 * by someone else, this function will panic.
207 hammer_lock_upgrade(struct hammer_lock *lock, int shcount)
209 thread_t td = curthread;
217 if ((lv & ~HAMMER_LOCKF_WANTED) == shcount) {
218 nlv = lv | HAMMER_LOCKF_EXCLUSIVE;
219 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
224 } else if (lv & HAMMER_LOCKF_EXCLUSIVE) {
225 if (lock->lowner != curthread)
226 panic("hammer_lock_upgrade: illegal state");
229 } else if ((lv & ~HAMMER_LOCKF_WANTED) == 0) {
230 panic("hammer_lock_upgrade: lock is not held");
243 * Downgrade an exclusively held lock to a shared lock.
246 hammer_lock_downgrade(struct hammer_lock *lock, int shcount)
248 thread_t td __debugvar = curthread;
252 KKASSERT((lock->lockval & ~HAMMER_LOCKF_WANTED) ==
253 (HAMMER_LOCKF_EXCLUSIVE | shcount));
254 KKASSERT(lock->lowner == td);
257 * NOTE: Must clear owner before releasing exclusivity
263 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
264 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
265 if (lv & HAMMER_LOCKF_WANTED)
266 wakeup(&lock->lockval);
273 hammer_unlock(struct hammer_lock *lock)
275 thread_t td __debugvar = curthread;
281 if (lv & HAMMER_LOCKF_EXCLUSIVE)
282 KKASSERT(lock->lowner == td);
286 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
289 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
291 } else if (nlv == 1) {
293 if (lv & HAMMER_LOCKF_EXCLUSIVE)
295 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
296 if (lv & HAMMER_LOCKF_WANTED)
297 wakeup(&lock->lockval);
301 panic("hammer_unlock: lock %p is not held", lock);
307 * The calling thread must be holding a shared or exclusive lock.
308 * Returns < 0 if lock is held shared, and > 0 if held exlusively.
311 hammer_lock_status(struct hammer_lock *lock)
313 u_int lv = lock->lockval;
315 if (lv & HAMMER_LOCKF_EXCLUSIVE)
319 panic("hammer_lock_status: lock must be held: %p", lock);
323 * Bump the ref count for a lock (not the excl/share count, but a separate
324 * structural reference count). The CHECK flag will be set on a 0->1
327 * This function does nothing to serialize races between multple threads.
328 * The caller can interlock it later on to deal with serialization.
333 hammer_ref(struct hammer_lock *lock)
340 if ((lv & ~HAMMER_REFS_FLAGS) == 0) {
341 nlv = (lv + 1) | HAMMER_REFS_CHECK;
342 if (atomic_cmpset_int(&lock->refs, lv, nlv))
346 KKASSERT((int)nlv > 0);
347 if (atomic_cmpset_int(&lock->refs, lv, nlv))
355 * Drop the ref count for a lock (not the excl/share count, but a separate
356 * structural reference count). The CHECK flag will be cleared on a 1->0
359 * This function does nothing to serialize races between multple threads.
364 hammer_rel(struct hammer_lock *lock)
371 if ((lv & ~HAMMER_REFS_FLAGS) == 1) {
372 nlv = (lv - 1) & ~HAMMER_REFS_CHECK;
373 if (atomic_cmpset_int(&lock->refs, lv, nlv))
376 KKASSERT((int)lv > 0);
378 if (atomic_cmpset_int(&lock->refs, lv, nlv))
386 * The hammer_*_interlock() and hammer_*_interlock_done() functions are
387 * more sophisticated versions which handle MP transition races and block
390 * hammer_ref_interlock() bumps the ref-count and conditionally acquires
391 * the interlock for 0->1 transitions or if the CHECK is found to be set.
393 * This case will return TRUE, the interlock will be held, and the CHECK
394 * bit also set. Other threads attempting to ref will see the CHECK bit
395 * and block until we clean up.
397 * FALSE is returned for transitions other than 0->1 when the CHECK bit
398 * is not found to be set, or if the function loses the race with another
401 * TRUE is only returned to one thread and the others will block.
402 * Effectively a TRUE indicator means 'someone transitioned 0->1
403 * and you are the first guy to successfully lock it after that, so you
404 * need to check'. Due to races the ref-count may be greater than 1 upon
410 hammer_ref_interlock(struct hammer_lock *lock)
416 * Integrated reference count bump, lock, and check, with hot-path.
418 * (a) Return 1 (+LOCKED, +CHECK) 0->1 transition
419 * (b) Return 0 (-LOCKED, -CHECK) N->N+1 transition
420 * (c) Break out (+CHECK) Check condition and Cannot lock
421 * (d) Return 1 (+LOCKED, +CHECK) Successfully locked
426 nlv = 1 | HAMMER_REFS_LOCKED | HAMMER_REFS_CHECK;
427 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
428 lock->rowner = curthread;
433 if ((lv & ~HAMMER_REFS_FLAGS) == 0)
434 nlv |= HAMMER_REFS_CHECK;
435 if ((nlv & HAMMER_REFS_CHECK) == 0) {
436 if (atomic_cmpset_int(&lock->refs, lv, nlv))
438 } else if (lv & HAMMER_REFS_LOCKED) {
439 /* CHECK also set here */
440 if (atomic_cmpset_int(&lock->refs, lv, nlv))
443 /* CHECK also set here */
444 nlv |= HAMMER_REFS_LOCKED;
445 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
446 lock->rowner = curthread;
454 * Defered check condition because we were unable to acquire the
455 * lock. We must block until the check condition is cleared due
456 * to a race with another thread, or we are able to acquire the
459 * (a) Return 0 (-CHECK) Another thread handled it
460 * (b) Return 1 (+LOCKED, +CHECK) We handled it.
464 if ((lv & HAMMER_REFS_CHECK) == 0)
466 if (lv & HAMMER_REFS_LOCKED) {
467 tsleep_interlock(&lock->refs, 0);
468 nlv = (lv | HAMMER_REFS_WANTED);
469 if (atomic_cmpset_int(&lock->refs, lv, nlv))
470 tsleep(&lock->refs, PINTERLOCKED, "h1lk", 0);
472 /* CHECK also set here */
473 nlv = lv | HAMMER_REFS_LOCKED;
474 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
475 lock->rowner = curthread;
484 * This is the same as hammer_ref_interlock() but asserts that the
485 * 0->1 transition is always true, thus the lock must have no references
486 * on entry or have CHECK set, and will have one reference with the
487 * interlock held on return. It must also not be interlocked on entry
490 * NOTE that CHECK will never be found set when the ref-count is 0.
492 * TRUE is always returned to match the API for hammer_ref_interlock().
493 * This function returns with one ref, the lock held, and the CHECK bit set.
496 hammer_ref_interlock_true(struct hammer_lock *lock)
505 panic("hammer_ref_interlock_true: bad lock %p %08x",
508 nlv = 1 | HAMMER_REFS_LOCKED | HAMMER_REFS_CHECK;
509 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
510 lock->rowner = curthread;
517 * Unlock the interlock acquired by hammer_ref_interlock() and clear the
518 * CHECK flag. The ref-count remains unchanged.
520 * This routine is called in the load path when the load succeeds.
523 hammer_ref_interlock_done(struct hammer_lock *lock)
530 nlv = lv & ~HAMMER_REFS_FLAGS;
531 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
532 if (lv & HAMMER_REFS_WANTED)
540 * hammer_rel_interlock() works a bit differently in that it must
541 * acquire the lock in tandem with a 1->0 transition. CHECK is
544 * TRUE is returned on 1->0 transitions with the lock held on return
545 * and FALSE is returned otherwise with the lock not held.
547 * It is important to note that the refs are not stable and may
548 * increase while we hold the lock, the TRUE indication only means
549 * that we transitioned 1->0, not necessarily that we stayed at 0.
551 * Another thread bumping refs while we hold the lock will set CHECK,
552 * causing one of the competing hammer_ref_interlock() calls to
553 * return TRUE after we release our lock.
558 hammer_rel_interlock(struct hammer_lock *lock, int locked)
564 * In locked mode (failure/unload path) we release the
565 * ref-count but leave it locked.
573 * Integrated reference count drop with LOCKED, plus the hot-path
580 nlv = 0 | HAMMER_REFS_LOCKED;
581 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
582 lock->rowner = curthread;
585 } else if ((lv & ~HAMMER_REFS_FLAGS) == 1) {
586 if ((lv & HAMMER_REFS_LOCKED) == 0) {
587 nlv = (lv - 1) | HAMMER_REFS_LOCKED;
588 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
589 lock->rowner = curthread;
593 nlv = lv | HAMMER_REFS_WANTED;
594 tsleep_interlock(&lock->refs, 0);
595 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
596 tsleep(&lock->refs, PINTERLOCKED,
602 KKASSERT((int)nlv >= 0);
603 if (atomic_cmpset_int(&lock->refs, lv, nlv))
611 * Unlock the interlock acquired by hammer_rel_interlock().
613 * If orig_locked is non-zero the interlock was originally held prior to
614 * the hammer_rel_interlock() call and passed through to us. In this
615 * case we want to retain the CHECK error state if not transitioning
618 * The code is the same either way so we do not have to conditionalize
622 hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked __unused)
629 nlv = lv & ~(HAMMER_REFS_LOCKED | HAMMER_REFS_WANTED);
630 if ((lv & ~HAMMER_REFS_FLAGS) == 0)
631 nlv &= ~HAMMER_REFS_CHECK;
632 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
633 if (lv & HAMMER_REFS_WANTED)
641 * Acquire the interlock on lock->refs.
643 * Return TRUE if CHECK is currently set. Note that CHECK will not
644 * be set if the reference count is 0, but can get set if this function
645 * is preceeded by, say, hammer_ref(), or through races with other
646 * threads. The return value allows the caller to use the same logic
647 * as hammer_ref_interlock().
652 hammer_get_interlock(struct hammer_lock *lock)
659 if (lv & HAMMER_REFS_LOCKED) {
660 nlv = lv | HAMMER_REFS_WANTED;
661 tsleep_interlock(&lock->refs, 0);
662 if (atomic_cmpset_int(&lock->refs, lv, nlv))
663 tsleep(&lock->refs, PINTERLOCKED, "hilk", 0);
665 nlv = (lv | HAMMER_REFS_LOCKED);
666 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
667 lock->rowner = curthread;
668 return((lv & HAMMER_REFS_CHECK) ? 1 : 0);
675 * Attempt to acquire the interlock and expect 0 refs. Used by the buffer
676 * cache callback code to disassociate or lock the bufs related to HAMMER
679 * During teardown the related bp will be acquired by hammer_io_release()
680 * which interocks our test.
682 * Returns non-zero on success, zero on failure.
685 hammer_try_interlock_norefs(struct hammer_lock *lock)
693 nlv = lv | HAMMER_REFS_LOCKED;
694 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
695 lock->rowner = curthread;
706 * Release the interlock on lock->refs. This function will set
707 * CHECK if the refs is non-zero and error is non-zero, and clear
713 hammer_put_interlock(struct hammer_lock *lock, int error)
720 KKASSERT(lv & HAMMER_REFS_LOCKED);
721 nlv = lv & ~(HAMMER_REFS_LOCKED | HAMMER_REFS_WANTED);
723 if ((nlv & ~HAMMER_REFS_FLAGS) == 0 || error == 0)
724 nlv &= ~HAMMER_REFS_CHECK;
726 nlv |= HAMMER_REFS_CHECK;
728 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
729 if (lv & HAMMER_REFS_WANTED)
737 * The sync_lock must be held when doing any modifying operations on
738 * meta-data. It does not have to be held when modifying non-meta-data buffers
739 * (backend or frontend).
741 * The flusher holds the lock exclusively while all other consumers hold it
742 * shared. All modifying operations made while holding the lock are atomic
743 * in that they will be made part of the same flush group.
745 * Due to the atomicy requirement deadlock recovery code CANNOT release the
746 * sync lock, nor can we give pending exclusive sync locks priority over
747 * a shared sync lock as this could lead to a 3-way deadlock.
750 hammer_sync_lock_ex(hammer_transaction_t trans)
752 ++trans->sync_lock_refs;
753 hammer_lock_ex(&trans->hmp->sync_lock);
757 hammer_sync_lock_sh(hammer_transaction_t trans)
759 ++trans->sync_lock_refs;
760 hammer_lock_sh(&trans->hmp->sync_lock);
764 hammer_sync_lock_sh_try(hammer_transaction_t trans)
768 ++trans->sync_lock_refs;
769 if ((error = hammer_lock_sh_try(&trans->hmp->sync_lock)) != 0)
770 --trans->sync_lock_refs;
775 hammer_sync_unlock(hammer_transaction_t trans)
777 --trans->sync_lock_refs;
778 hammer_unlock(&trans->hmp->sync_lock);
785 hammer_to_unix_xid(uuid_t *uuid)
787 return(*(u_int32_t *)&uuid->node[2]);
791 hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid)
793 bzero(uuid, sizeof(*uuid));
794 *(u_int32_t *)&uuid->node[2] = guid;
798 hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts)
800 ts->tv_sec = (unsigned long)(xtime / 1000000);
801 ts->tv_nsec = (unsigned int)(xtime % 1000000) * 1000L;
805 hammer_timespec_to_time(struct timespec *ts)
809 xtime = (unsigned)(ts->tv_nsec / 1000) +
810 (unsigned long)ts->tv_sec * 1000000ULL;
816 * Convert a HAMMER filesystem object type to a vnode type
819 hammer_get_vnode_type(u_int8_t obj_type)
822 case HAMMER_OBJTYPE_DIRECTORY:
824 case HAMMER_OBJTYPE_REGFILE:
826 case HAMMER_OBJTYPE_DBFILE:
828 case HAMMER_OBJTYPE_FIFO:
830 case HAMMER_OBJTYPE_SOCKET:
832 case HAMMER_OBJTYPE_CDEV:
834 case HAMMER_OBJTYPE_BDEV:
836 case HAMMER_OBJTYPE_SOFTLINK:
845 hammer_get_dtype(u_int8_t obj_type)
848 case HAMMER_OBJTYPE_DIRECTORY:
850 case HAMMER_OBJTYPE_REGFILE:
852 case HAMMER_OBJTYPE_DBFILE:
854 case HAMMER_OBJTYPE_FIFO:
856 case HAMMER_OBJTYPE_SOCKET:
858 case HAMMER_OBJTYPE_CDEV:
860 case HAMMER_OBJTYPE_BDEV:
862 case HAMMER_OBJTYPE_SOFTLINK:
871 hammer_get_obj_type(enum vtype vtype)
875 return(HAMMER_OBJTYPE_DIRECTORY);
877 return(HAMMER_OBJTYPE_REGFILE);
879 return(HAMMER_OBJTYPE_DBFILE);
881 return(HAMMER_OBJTYPE_FIFO);
883 return(HAMMER_OBJTYPE_SOCKET);
885 return(HAMMER_OBJTYPE_CDEV);
887 return(HAMMER_OBJTYPE_BDEV);
889 return(HAMMER_OBJTYPE_SOFTLINK);
891 return(HAMMER_OBJTYPE_UNKNOWN);
897 * Return flags for hammer_delete_at_cursor()
900 hammer_nohistory(hammer_inode_t ip)
902 if (ip->hmp->hflags & HMNT_NOHISTORY)
903 return(HAMMER_DELETE_DESTROY);
904 if (ip->ino_data.uflags & (SF_NOHISTORY|UF_NOHISTORY))
905 return(HAMMER_DELETE_DESTROY);
910 * ALGORITHM VERSION 0:
911 * Return a namekey hash. The 64 bit namekey hash consists of a 32 bit
912 * crc in the MSB and 0 in the LSB. The caller will use the low 32 bits
913 * to generate a unique key and will scan all entries with the same upper
914 * 32 bits when issuing a lookup.
916 * 0hhhhhhhhhhhhhhh hhhhhhhhhhhhhhhh 0000000000000000 0000000000000000
918 * ALGORITHM VERSION 1:
920 * This algorithm breaks the filename down into a separate 32-bit crcs
921 * for each filename segment separated by a special character (dot,
922 * underscore, underline, or tilde). The CRCs are then added together.
923 * This allows temporary names. A full-filename 16 bit crc is also
924 * generated to deal with degenerate conditions.
926 * The algorithm is designed to handle create/rename situations such
927 * that a create with an extention to a rename without an extention
928 * only shifts the key space rather than randomizes it.
930 * NOTE: The inode allocator cache can only match 10 bits so we do
931 * not really have any room for a partial sorted name, and
932 * numbers don't sort well in that situation anyway.
934 * 0mmmmmmmmmmmmmmm mmmmmmmmmmmmmmmm llllllllllllllll 0000000000000000
937 * We strip bit 63 in order to provide a positive key, this way a seek
938 * offset of 0 will represent the base of the directory.
940 * We usually strip bit 0 (set it to 0) in order to provide a consistent
941 * iteration space for collisions.
943 * This function can never return 0. We use the MSB-0 space to synthesize
944 * artificial directory entries such as "." and "..".
947 hammer_directory_namekey(hammer_inode_t dip, const void *name, int len,
948 u_int32_t *max_iterationsp)
950 const char *aname = name;
956 switch (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIRHASH_MASK) {
957 case HAMMER_INODE_CAP_DIRHASH_ALG0:
961 key = (int64_t)(crc32(aname, len) & 0x7FFFFFFF) << 32;
963 key |= 0x100000000LL;
964 *max_iterationsp = 0xFFFFFFFFU;
966 case HAMMER_INODE_CAP_DIRHASH_ALG1:
968 * Filesystem version 6 or better will create directories
969 * using the ALG1 dirhash. This hash breaks the filename
970 * up into domains separated by special characters and
971 * hashes each domain independently.
973 * We also do a simple sub-sort using the first character
974 * of the filename in the top 5-bits.
982 for (i = j = 0; i < len; ++i) {
983 if (aname[i] == '.' ||
988 crcx += crc32(aname + j, i - j);
993 crcx += crc32(aname + j, i - j);
997 * xor top 5 bits 0mmmm into low bits and steal the top 5
998 * bits as a semi sub sort using the first character of
999 * the filename. bit 63 is always left as 0 so directory
1000 * keys are positive numbers.
1002 crcx ^= (uint32_t)crcx >> (32 - 5);
1003 crcx = (crcx & 0x07FFFFFF) | ((aname[0] & 0x0F) << (32 - 5));
1005 crcx &= 0x7FFFFFFFU;
1007 key |= (uint64_t)crcx << 32;
1010 * l16 - crc of entire filename
1012 * This crc reduces degenerate hash collision conditions
1014 crcx = crc32(aname, len);
1015 crcx = crcx ^ (crcx << 16);
1016 key |= crcx & 0xFFFF0000U;
1021 if ((key & 0xFFFFFFFF00000000LL) == 0)
1022 key |= 0x100000000LL;
1023 if (hammer_debug_general & 0x0400) {
1024 kprintf("namekey2: 0x%016llx %*.*s\n",
1025 (long long)key, len, len, aname);
1027 *max_iterationsp = 0x00FFFFFF;
1029 case HAMMER_INODE_CAP_DIRHASH_ALG2:
1030 case HAMMER_INODE_CAP_DIRHASH_ALG3:
1032 key = 0; /* compiler warning */
1033 *max_iterationsp = 1; /* sanity */
1034 panic("hammer_directory_namekey: bad algorithm %p", dip);
1041 * Convert string after @@ (@@ not included) to TID. Returns 0 on success,
1042 * EINVAL on failure.
1044 * If this function fails *ispfs, *tidp, and *localizationp will not
1048 hammer_str_to_tid(const char *str, int *ispfsp,
1049 hammer_tid_t *tidp, u_int32_t *localizationp)
1052 u_int32_t localization;
1058 * Forms allowed for TID: "0x%016llx"
1061 tid = strtouq(str, &ptr, 0);
1063 if (n == 2 && str[0] == '-' && str[1] == '1') {
1065 } else if (n == 18 && str[0] == '0' && (str[1] | 0x20) == 'x') {
1072 * Forms allowed for PFS: ":%05d" (i.e. "...:0" would be illegal).
1076 localization = strtoul(str + 1, &ptr, 10) << 16;
1082 localization = *localizationp;
1087 * Any trailing junk invalidates special extension handling.
1092 *localizationp = localization;
1098 hammer_crc_set_blockmap(hammer_blockmap_t blockmap)
1100 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
1104 hammer_crc_set_volume(hammer_volume_ondisk_t ondisk)
1106 ondisk->vol_crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
1107 crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
1111 hammer_crc_test_blockmap(hammer_blockmap_t blockmap)
1115 crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
1116 return (blockmap->entry_crc == crc);
1120 hammer_crc_test_volume(hammer_volume_ondisk_t ondisk)
1124 crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
1125 crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
1126 return (ondisk->vol_crc == crc);
1130 hammer_crc_test_btree(hammer_node_ondisk_t ondisk)
1134 crc = crc32(&ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
1135 return (ondisk->crc == crc);
1139 * Test or set the leaf->data_crc field. Deal with any special cases given
1140 * a generic B-Tree leaf element and its data.
1142 * NOTE: Inode-data: the atime and mtime fields are not CRCd, allowing them
1143 * to be updated in-place.
1146 hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf)
1150 if (leaf->data_len == 0) {
1153 switch(leaf->base.rec_type) {
1154 case HAMMER_RECTYPE_INODE:
1155 if (leaf->data_len != sizeof(struct hammer_inode_data))
1157 crc = crc32(data, HAMMER_INODE_CRCSIZE);
1160 crc = crc32(data, leaf->data_len);
1164 return (leaf->data_crc == crc);
1168 hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf)
1170 if (leaf->data_len == 0) {
1173 switch(leaf->base.rec_type) {
1174 case HAMMER_RECTYPE_INODE:
1175 KKASSERT(leaf->data_len ==
1176 sizeof(struct hammer_inode_data));
1177 leaf->data_crc = crc32(data, HAMMER_INODE_CRCSIZE);
1180 leaf->data_crc = crc32(data, leaf->data_len);
1187 hkprintf(const char *ctl, ...)
1191 if (hammer_debug_debug) {
1192 __va_start(va, ctl);
1199 * Return the block size at the specified file offset.
1202 hammer_blocksize(int64_t file_offset)
1204 if (file_offset < HAMMER_XDEMARC)
1205 return(HAMMER_BUFSIZE);
1207 return(HAMMER_XBUFSIZE);
1211 hammer_blockoff(int64_t file_offset)
1213 if (file_offset < HAMMER_XDEMARC)
1214 return((int)file_offset & HAMMER_BUFMASK);
1216 return((int)file_offset & HAMMER_XBUFMASK);
1220 * Return the demarkation point between the two offsets where
1221 * the block size changes.
1224 hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2)
1226 if (file_offset1 < HAMMER_XDEMARC) {
1227 if (file_offset2 <= HAMMER_XDEMARC)
1228 return(file_offset2);
1229 return(HAMMER_XDEMARC);
1231 panic("hammer_blockdemarc: illegal range %lld %lld",
1232 (long long)file_offset1, (long long)file_offset2);
1236 hammer_fsid_to_udev(uuid_t *uuid)
1240 crc = crc32(uuid, sizeof(*uuid));
1241 return((udev_t)crc);