2 * Copyright (c) 2007-2011 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * HAMMER structural locking
41 hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
43 thread_t td = curthread;
52 nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
53 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
57 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
60 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
63 if (hammer_debug_locks) {
64 hdkprintf("held by %p\n", lock->lowner);
66 nlv = lv | HAMMER_LOCKF_WANTED;
67 ++hammer_contention_count;
68 tsleep_interlock(&lock->lockval, 0);
69 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
70 tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
71 if (hammer_debug_locks)
72 hdkprintf("try again\n");
79 * Try to obtain an exclusive lock
82 hammer_lock_ex_try(struct hammer_lock *lock)
84 thread_t td = curthread;
94 nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
95 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
100 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
101 lock->lowner == td) {
103 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
116 * Obtain a shared lock
118 * We do not give pending exclusive locks priority over shared locks as
119 * doing so could lead to a deadlock.
122 hammer_lock_sh(struct hammer_lock *lock)
124 thread_t td = curthread;
127 const char *ident = "hmrlck";
129 KKASSERT(lock->refs);
133 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
135 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
137 } else if (lock->lowner == td) {
139 * Disallowed case, drop into kernel debugger for
140 * now. A cont continues w/ an exclusive lock.
143 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
144 if (hammer_debug_critical)
145 Debugger("hammer_lock_sh: holding ex");
149 nlv = lv | HAMMER_LOCKF_WANTED;
150 ++hammer_contention_count;
151 tsleep_interlock(&lock->lockval, 0);
152 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
153 tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
159 hammer_lock_sh_try(struct hammer_lock *lock)
161 thread_t td = curthread;
166 KKASSERT(lock->refs);
170 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
172 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
176 } else if (lock->lowner == td) {
178 * Disallowed case, drop into kernel debugger for
179 * now. A cont continues w/ an exclusive lock.
182 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
183 if (hammer_debug_critical)
184 Debugger("hammer_lock_sh: holding ex");
197 * Upgrade a shared lock to an exclusively held lock. This function will
198 * return EDEADLK If there is more then one shared holder.
200 * No error occurs and no action is taken if the lock is already exclusively
201 * held by the caller. If the lock is not held at all or held exclusively
202 * by someone else, this function will panic.
205 hammer_lock_upgrade(struct hammer_lock *lock, int shcount)
207 thread_t td = curthread;
215 if ((lv & ~HAMMER_LOCKF_WANTED) == shcount) {
216 nlv = lv | HAMMER_LOCKF_EXCLUSIVE;
217 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
222 } else if (lv & HAMMER_LOCKF_EXCLUSIVE) {
223 if (lock->lowner != curthread)
224 hpanic("illegal state");
227 } else if ((lv & ~HAMMER_LOCKF_WANTED) == 0) {
228 hpanic("lock is not held");
241 * Downgrade an exclusively held lock to a shared lock.
244 hammer_lock_downgrade(struct hammer_lock *lock, int shcount)
246 thread_t td __debugvar = curthread;
250 KKASSERT((lock->lockval & ~HAMMER_LOCKF_WANTED) ==
251 (HAMMER_LOCKF_EXCLUSIVE | shcount));
252 KKASSERT(lock->lowner == td);
255 * NOTE: Must clear owner before releasing exclusivity
261 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
262 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
263 if (lv & HAMMER_LOCKF_WANTED)
264 wakeup(&lock->lockval);
271 hammer_unlock(struct hammer_lock *lock)
273 thread_t td __debugvar = curthread;
279 if (lv & HAMMER_LOCKF_EXCLUSIVE)
280 KKASSERT(lock->lowner == td);
284 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
287 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
289 } else if (nlv == 1) {
291 if (lv & HAMMER_LOCKF_EXCLUSIVE)
293 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
294 if (lv & HAMMER_LOCKF_WANTED)
295 wakeup(&lock->lockval);
299 hpanic("lock %p is not held", lock);
305 * The calling thread must be holding a shared or exclusive lock.
306 * Returns < 0 if lock is held shared, and > 0 if held exlusively.
309 hammer_lock_status(struct hammer_lock *lock)
311 u_int lv = lock->lockval;
313 if (lv & HAMMER_LOCKF_EXCLUSIVE)
317 hpanic("lock must be held: %p", lock);
321 * Bump the ref count for a lock (not the excl/share count, but a separate
322 * structural reference count). The CHECK flag will be set on a 0->1
325 * This function does nothing to serialize races between multple threads.
326 * The caller can interlock it later on to deal with serialization.
331 hammer_ref(struct hammer_lock *lock)
338 if ((lv & ~HAMMER_REFS_FLAGS) == 0) {
339 nlv = (lv + 1) | HAMMER_REFS_CHECK;
340 if (atomic_cmpset_int(&lock->refs, lv, nlv))
344 KKASSERT((int)nlv > 0);
345 if (atomic_cmpset_int(&lock->refs, lv, nlv))
353 * Drop the ref count for a lock (not the excl/share count, but a separate
354 * structural reference count). The CHECK flag will be cleared on a 1->0
357 * This function does nothing to serialize races between multple threads.
362 hammer_rel(struct hammer_lock *lock)
369 if ((lv & ~HAMMER_REFS_FLAGS) == 1) {
370 nlv = (lv - 1) & ~HAMMER_REFS_CHECK;
371 if (atomic_cmpset_int(&lock->refs, lv, nlv))
374 KKASSERT((int)lv > 0);
376 if (atomic_cmpset_int(&lock->refs, lv, nlv))
384 * The hammer_*_interlock() and hammer_*_interlock_done() functions are
385 * more sophisticated versions which handle MP transition races and block
388 * hammer_ref_interlock() bumps the ref-count and conditionally acquires
389 * the interlock for 0->1 transitions or if the CHECK is found to be set.
391 * This case will return 1, the interlock will be held, and the CHECK
392 * bit also set. Other threads attempting to ref will see the CHECK bit
393 * and block until we clean up.
395 * 0 is returned for transitions other than 0->1 when the CHECK bit
396 * is not found to be set, or if the function loses the race with another
399 * 1 is only returned to one thread and the others will block.
400 * Effectively a 1 indicator means 'someone transitioned 0->1
401 * and you are the first guy to successfully lock it after that, so you
402 * need to check'. Due to races the ref-count may be greater than 1 upon
408 hammer_ref_interlock(struct hammer_lock *lock)
414 * Integrated reference count bump, lock, and check, with hot-path.
416 * (a) Return 1 (+LOCKED, +CHECK) 0->1 transition
417 * (b) Return 0 (-LOCKED, -CHECK) N->N+1 transition
418 * (c) Break out (+CHECK) Check condition and Cannot lock
419 * (d) Return 1 (+LOCKED, +CHECK) Successfully locked
424 nlv = 1 | HAMMER_REFS_LOCKED | HAMMER_REFS_CHECK;
425 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
426 lock->rowner = curthread;
431 if ((lv & ~HAMMER_REFS_FLAGS) == 0)
432 nlv |= HAMMER_REFS_CHECK;
433 if ((nlv & HAMMER_REFS_CHECK) == 0) {
434 if (atomic_cmpset_int(&lock->refs, lv, nlv))
436 } else if (lv & HAMMER_REFS_LOCKED) {
437 /* CHECK also set here */
438 if (atomic_cmpset_int(&lock->refs, lv, nlv))
441 /* CHECK also set here */
442 nlv |= HAMMER_REFS_LOCKED;
443 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
444 lock->rowner = curthread;
452 * Defered check condition because we were unable to acquire the
453 * lock. We must block until the check condition is cleared due
454 * to a race with another thread, or we are able to acquire the
457 * (a) Return 0 (-CHECK) Another thread handled it
458 * (b) Return 1 (+LOCKED, +CHECK) We handled it.
462 if ((lv & HAMMER_REFS_CHECK) == 0)
464 if (lv & HAMMER_REFS_LOCKED) {
465 tsleep_interlock(&lock->refs, 0);
466 nlv = (lv | HAMMER_REFS_WANTED);
467 if (atomic_cmpset_int(&lock->refs, lv, nlv))
468 tsleep(&lock->refs, PINTERLOCKED, "h1lk", 0);
470 /* CHECK also set here */
471 nlv = lv | HAMMER_REFS_LOCKED;
472 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
473 lock->rowner = curthread;
482 * This is the same as hammer_ref_interlock() but asserts that the
483 * 0->1 transition is always true, thus the lock must have no references
484 * on entry or have CHECK set, and will have one reference with the
485 * interlock held on return. It must also not be interlocked on entry
488 * NOTE that CHECK will never be found set when the ref-count is 0.
490 * 1 is always returned to match the API for hammer_ref_interlock().
491 * This function returns with one ref, the lock held, and the CHECK bit set.
494 hammer_ref_interlock_true(struct hammer_lock *lock)
503 hpanic("bad lock %p %08x", lock, lock->refs);
505 nlv = 1 | HAMMER_REFS_LOCKED | HAMMER_REFS_CHECK;
506 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
507 lock->rowner = curthread;
514 * Unlock the interlock acquired by hammer_ref_interlock() and clear the
515 * CHECK flag. The ref-count remains unchanged.
517 * This routine is called in the load path when the load succeeds.
520 hammer_ref_interlock_done(struct hammer_lock *lock)
527 nlv = lv & ~HAMMER_REFS_FLAGS;
528 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
529 if (lv & HAMMER_REFS_WANTED)
537 * hammer_rel_interlock() works a bit differently in that it must
538 * acquire the lock in tandem with a 1->0 transition. CHECK is
541 * 1 is returned on 1->0 transitions with the lock held on return
542 * and 0 is returned otherwise with the lock not held.
544 * It is important to note that the refs are not stable and may
545 * increase while we hold the lock, the 1 indication only means
546 * that we transitioned 1->0, not necessarily that we stayed at 0.
548 * Another thread bumping refs while we hold the lock will set CHECK,
549 * causing one of the competing hammer_ref_interlock() calls to
550 * return 1 after we release our lock.
555 hammer_rel_interlock(struct hammer_lock *lock, int locked)
561 * In locked mode (failure/unload path) we release the
562 * ref-count but leave it locked.
570 * Integrated reference count drop with LOCKED, plus the hot-path
577 nlv = 0 | HAMMER_REFS_LOCKED;
578 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
579 lock->rowner = curthread;
582 } else if ((lv & ~HAMMER_REFS_FLAGS) == 1) {
583 if ((lv & HAMMER_REFS_LOCKED) == 0) {
584 nlv = (lv - 1) | HAMMER_REFS_LOCKED;
585 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
586 lock->rowner = curthread;
590 nlv = lv | HAMMER_REFS_WANTED;
591 tsleep_interlock(&lock->refs, 0);
592 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
593 tsleep(&lock->refs, PINTERLOCKED,
599 KKASSERT((int)nlv >= 0);
600 if (atomic_cmpset_int(&lock->refs, lv, nlv))
608 * Unlock the interlock acquired by hammer_rel_interlock().
610 * If orig_locked is non-zero the interlock was originally held prior to
611 * the hammer_rel_interlock() call and passed through to us. In this
612 * case we want to retain the CHECK error state if not transitioning
615 * The code is the same either way so we do not have to conditionalize
619 hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked __unused)
626 nlv = lv & ~(HAMMER_REFS_LOCKED | HAMMER_REFS_WANTED);
627 if ((lv & ~HAMMER_REFS_FLAGS) == 0)
628 nlv &= ~HAMMER_REFS_CHECK;
629 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
630 if (lv & HAMMER_REFS_WANTED)
638 * Acquire the interlock on lock->refs.
640 * Return 1 if CHECK is currently set. Note that CHECK will not
641 * be set if the reference count is 0, but can get set if this function
642 * is preceeded by, say, hammer_ref(), or through races with other
643 * threads. The return value allows the caller to use the same logic
644 * as hammer_ref_interlock().
649 hammer_get_interlock(struct hammer_lock *lock)
656 if (lv & HAMMER_REFS_LOCKED) {
657 nlv = lv | HAMMER_REFS_WANTED;
658 tsleep_interlock(&lock->refs, 0);
659 if (atomic_cmpset_int(&lock->refs, lv, nlv))
660 tsleep(&lock->refs, PINTERLOCKED, "hilk", 0);
662 nlv = (lv | HAMMER_REFS_LOCKED);
663 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
664 lock->rowner = curthread;
665 return((lv & HAMMER_REFS_CHECK) ? 1 : 0);
672 * Attempt to acquire the interlock and expect 0 refs. Used by the buffer
673 * cache callback code to disassociate or lock the bufs related to HAMMER
676 * During teardown the related bp will be acquired by hammer_io_release()
677 * which interocks our test.
679 * Returns non-zero on success, zero on failure.
682 hammer_try_interlock_norefs(struct hammer_lock *lock)
690 nlv = lv | HAMMER_REFS_LOCKED;
691 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
692 lock->rowner = curthread;
703 * Release the interlock on lock->refs. This function will set
704 * CHECK if the refs is non-zero and error is non-zero, and clear
710 hammer_put_interlock(struct hammer_lock *lock, int error)
717 KKASSERT(lv & HAMMER_REFS_LOCKED);
718 nlv = lv & ~(HAMMER_REFS_LOCKED | HAMMER_REFS_WANTED);
720 if ((nlv & ~HAMMER_REFS_FLAGS) == 0 || error == 0)
721 nlv &= ~HAMMER_REFS_CHECK;
723 nlv |= HAMMER_REFS_CHECK;
725 if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
726 if (lv & HAMMER_REFS_WANTED)
734 * The sync_lock must be held when doing any modifying operations on
735 * meta-data. It does not have to be held when modifying non-meta-data buffers
736 * (backend or frontend).
738 * The flusher holds the lock exclusively while all other consumers hold it
739 * shared. All modifying operations made while holding the lock are atomic
740 * in that they will be made part of the same flush group.
742 * Due to the atomicy requirement deadlock recovery code CANNOT release the
743 * sync lock, nor can we give pending exclusive sync locks priority over
744 * a shared sync lock as this could lead to a 3-way deadlock.
747 hammer_sync_lock_ex(hammer_transaction_t trans)
749 ++trans->sync_lock_refs;
750 hammer_lock_ex(&trans->hmp->sync_lock);
754 hammer_sync_lock_sh(hammer_transaction_t trans)
756 ++trans->sync_lock_refs;
757 hammer_lock_sh(&trans->hmp->sync_lock);
761 hammer_sync_lock_sh_try(hammer_transaction_t trans)
765 ++trans->sync_lock_refs;
766 if ((error = hammer_lock_sh_try(&trans->hmp->sync_lock)) != 0)
767 --trans->sync_lock_refs;
772 hammer_sync_unlock(hammer_transaction_t trans)
774 --trans->sync_lock_refs;
775 hammer_unlock(&trans->hmp->sync_lock);
782 hammer_to_unix_xid(hammer_uuid_t *uuid)
784 return(*(uint32_t *)&uuid->node[2]);
788 hammer_guid_to_uuid(hammer_uuid_t *uuid, uint32_t guid)
790 bzero(uuid, sizeof(*uuid));
791 *(uint32_t *)&uuid->node[2] = guid;
795 hammer_time_to_timespec(uint64_t xtime, struct timespec *ts)
797 ts->tv_sec = (unsigned long)(xtime / 1000000);
798 ts->tv_nsec = (unsigned int)(xtime % 1000000) * 1000L;
802 hammer_timespec_to_time(struct timespec *ts)
806 xtime = (unsigned)(ts->tv_nsec / 1000) +
807 (unsigned long)ts->tv_sec * 1000000ULL;
813 * Convert a HAMMER filesystem object type to a vnode type
816 hammer_get_vnode_type(uint8_t obj_type)
819 case HAMMER_OBJTYPE_DIRECTORY:
821 case HAMMER_OBJTYPE_REGFILE:
823 case HAMMER_OBJTYPE_DBFILE:
825 case HAMMER_OBJTYPE_FIFO:
827 case HAMMER_OBJTYPE_SOCKET:
829 case HAMMER_OBJTYPE_CDEV:
831 case HAMMER_OBJTYPE_BDEV:
833 case HAMMER_OBJTYPE_SOFTLINK:
842 hammer_get_dtype(uint8_t obj_type)
845 case HAMMER_OBJTYPE_DIRECTORY:
847 case HAMMER_OBJTYPE_REGFILE:
849 case HAMMER_OBJTYPE_DBFILE:
851 case HAMMER_OBJTYPE_FIFO:
853 case HAMMER_OBJTYPE_SOCKET:
855 case HAMMER_OBJTYPE_CDEV:
857 case HAMMER_OBJTYPE_BDEV:
859 case HAMMER_OBJTYPE_SOFTLINK:
868 hammer_get_obj_type(enum vtype vtype)
872 return(HAMMER_OBJTYPE_DIRECTORY);
874 return(HAMMER_OBJTYPE_REGFILE);
876 return(HAMMER_OBJTYPE_DBFILE);
878 return(HAMMER_OBJTYPE_FIFO);
880 return(HAMMER_OBJTYPE_SOCKET);
882 return(HAMMER_OBJTYPE_CDEV);
884 return(HAMMER_OBJTYPE_BDEV);
886 return(HAMMER_OBJTYPE_SOFTLINK);
888 return(HAMMER_OBJTYPE_UNKNOWN);
894 * Return flags for hammer_delete_at_cursor()
897 hammer_nohistory(hammer_inode_t ip)
899 if (ip->hmp->hflags & HMNT_NOHISTORY)
900 return(HAMMER_DELETE_DESTROY);
901 if (ip->ino_data.uflags & (SF_NOHISTORY|UF_NOHISTORY))
902 return(HAMMER_DELETE_DESTROY);
907 * ALGORITHM VERSION 0:
908 * Return a namekey hash. The 64 bit namekey hash consists of a 32 bit
909 * crc in the MSB and 0 in the LSB. The caller will use the low 32 bits
910 * to generate a unique key and will scan all entries with the same upper
911 * 32 bits when issuing a lookup.
913 * 0hhhhhhhhhhhhhhh hhhhhhhhhhhhhhhh 0000000000000000 0000000000000000
915 * ALGORITHM VERSION 1:
917 * This algorithm breaks the filename down into a separate 32-bit crcs
918 * for each filename segment separated by a special character (dot,
919 * underscore, underline, or tilde). The CRCs are then added together.
920 * This allows temporary names. A full-filename 16 bit crc is also
921 * generated to deal with degenerate conditions.
923 * The algorithm is designed to handle create/rename situations such
924 * that a create with an extention to a rename without an extention
925 * only shifts the key space rather than randomizes it.
927 * NOTE: The inode allocator cache can only match 10 bits so we do
928 * not really have any room for a partial sorted name, and
929 * numbers don't sort well in that situation anyway.
931 * 0mmmmmmmmmmmmmmm mmmmmmmmmmmmmmmm llllllllllllllll 0000000000000000
934 * We strip bit 63 in order to provide a positive key, this way a seek
935 * offset of 0 will represent the base of the directory.
937 * We usually strip bit 0 (set it to 0) in order to provide a consistent
938 * iteration space for collisions.
940 * This function can never return 0. We use the MSB-0 space to synthesize
941 * artificial directory entries such as "." and "..".
944 hammer_direntry_namekey(hammer_inode_t dip, const void *name, int len,
945 uint32_t *max_iterationsp)
947 const char *aname = name;
953 switch (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIRHASH_MASK) {
954 case HAMMER_INODE_CAP_DIRHASH_ALG0:
958 key = (int64_t)(crc32(aname, len) & 0x7FFFFFFF) << 32;
960 key |= 0x100000000LL;
961 *max_iterationsp = 0xFFFFFFFFU;
963 case HAMMER_INODE_CAP_DIRHASH_ALG1:
965 * Filesystem version 6 or better will create directories
966 * using the ALG1 dirhash. This hash breaks the filename
967 * up into domains separated by special characters and
968 * hashes each domain independently.
970 * We also do a simple sub-sort using the first character
971 * of the filename in the top 5-bits.
979 for (i = j = 0; i < len; ++i) {
980 if (aname[i] == '.' ||
985 crcx += crc32(aname + j, i - j);
990 crcx += crc32(aname + j, i - j);
994 * xor top 5 bits 0mmmm into low bits and steal the top 5
995 * bits as a semi sub sort using the first character of
996 * the filename. bit 63 is always left as 0 so directory
997 * keys are positive numbers.
999 crcx ^= (uint32_t)crcx >> (32 - 5);
1000 crcx = (crcx & 0x07FFFFFF) | ((aname[0] & 0x0F) << (32 - 5));
1002 crcx &= 0x7FFFFFFFU;
1004 key |= (uint64_t)crcx << 32;
1007 * l16 - crc of entire filename
1009 * This crc reduces degenerate hash collision conditions
1011 crcx = crc32(aname, len);
1012 crcx = crcx ^ (crcx << 16);
1013 key |= crcx & 0xFFFF0000U;
1018 if ((key & 0xFFFFFFFF00000000LL) == 0)
1019 key |= 0x100000000LL;
1020 if (hammer_debug_general & 0x0400) {
1021 hdkprintf("0x%016jx %*.*s\n",
1022 (intmax_t)key, len, len, aname);
1024 *max_iterationsp = 0x00FFFFFF;
1026 case HAMMER_INODE_CAP_DIRHASH_ALG2:
1027 case HAMMER_INODE_CAP_DIRHASH_ALG3:
1029 key = 0; /* compiler warning */
1030 *max_iterationsp = 1; /* sanity */
1031 hpanic("bad algorithm %p", dip);
1038 * Convert string after @@ (@@ not included) to TID. Returns 0 on success,
1039 * EINVAL on failure.
1041 * If this function fails *ispfs, *tidp, and *localizationp will not
1045 hammer_str_to_tid(const char *str, int *ispfsp,
1046 hammer_tid_t *tidp, uint32_t *localizationp)
1049 uint32_t localization;
1055 * Forms allowed for TID: "0x%016llx"
1058 tid = strtouq(str, &ptr, 0);
1060 if (n == 2 && str[0] == '-' && str[1] == '1') {
1062 } else if (n == 18 && str[0] == '0' && (str[1] | 0x20) == 'x') {
1069 * Forms allowed for PFS: ":%05d" (i.e. "...:0" would be illegal).
1073 localization = pfs_to_lo(strtoul(str + 1, &ptr, 10));
1079 localization = *localizationp;
1084 * Any trailing junk invalidates special extension handling.
1089 *localizationp = localization;
1095 * Return the block size at the specified file offset.
1098 hammer_blocksize(int64_t file_offset)
1100 if (file_offset < HAMMER_XDEMARC)
1101 return(HAMMER_BUFSIZE);
1103 return(HAMMER_XBUFSIZE);
1107 hammer_blockoff(int64_t file_offset)
1109 if (file_offset < HAMMER_XDEMARC)
1110 return((int)file_offset & HAMMER_BUFMASK);
1112 return((int)file_offset & HAMMER_XBUFMASK);
1116 * Return the demarkation point between the two offsets where
1117 * the block size changes.
1120 hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2)
1122 if (file_offset1 < HAMMER_XDEMARC) {
1123 if (file_offset2 <= HAMMER_XDEMARC)
1124 return(file_offset2);
1125 return(HAMMER_XDEMARC);
1127 hpanic("illegal range %jd %jd",
1128 (intmax_t)file_offset1, (intmax_t)file_offset2);
1132 hammer_fsid_to_udev(hammer_uuid_t *uuid)
1136 crc = crc32(uuid, sizeof(*uuid));
1137 return((udev_t)crc);