2 xxHash - Fast Hash algorithm
3 Copyright (C) 2012-2014, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 You can contact the author at :
30 - xxHash source repository : http://code.google.com/p/xxhash/
32 #include "archive_platform.h"
37 #include "archive_xxhash.h"
41 /***************************************
43 ****************************************/
44 /* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
45 ** For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
46 ** If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
47 ** You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
49 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
50 # define XXH_USE_UNALIGNED_ACCESS 1
53 /* XXH_ACCEPT_NULL_INPUT_POINTER :
54 ** If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
55 ** When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
56 ** This option has a very small performance cost (only measurable on small inputs).
57 ** By default, this option is disabled. To enable it, uncomment below define :
58 ** #define XXH_ACCEPT_NULL_INPUT_POINTER 1
60 ** XXH_FORCE_NATIVE_FORMAT :
61 ** By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
62 ** Results are therefore identical for little-endian and big-endian CPU.
63 ** This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
64 ** Should endian-independence be of no importance for your application, you may set the #define below to 1.
65 ** It will improve speed for Big-endian CPU.
66 ** This option has no impact on Little_Endian CPU.
68 #define XXH_FORCE_NATIVE_FORMAT 0
70 /***************************************
71 ** Compiler Specific Options
72 ****************************************/
73 /* Disable some Visual warning messages */
74 #ifdef _MSC_VER /* Visual Studio */
75 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
78 #ifdef _MSC_VER /* Visual Studio */
79 # define FORCE_INLINE __forceinline
82 # define FORCE_INLINE inline __attribute__((always_inline))
84 # define FORCE_INLINE inline
88 /***************************************
89 ** Includes & Memory related functions
90 ****************************************/
91 #define XXH_malloc malloc
93 #define XXH_memcpy memcpy
96 static unsigned int XXH32 (const void*, unsigned int, unsigned int);
97 static void* XXH32_init (unsigned int);
98 static XXH_errorcode XXH32_update (void*, const void*, unsigned int);
99 static unsigned int XXH32_digest (void*);
100 /*static int XXH32_sizeofState(void);*/
101 static XXH_errorcode XXH32_resetState(void*, unsigned int);
102 #define XXH32_SIZEOFSTATE 48
103 typedef struct { long long ll[(XXH32_SIZEOFSTATE+(sizeof(long long)-1))/sizeof(long long)]; } XXH32_stateSpace_t;
104 static unsigned int XXH32_intermediateDigest (void*);
106 /***************************************
108 ****************************************/
109 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
111 typedef uint8_t BYTE;
112 typedef uint16_t U16;
113 typedef uint32_t U32;
115 typedef uint64_t U64;
117 typedef unsigned char BYTE;
118 typedef unsigned short U16;
119 typedef unsigned int U32;
120 typedef signed int S32;
121 typedef unsigned long long U64;
124 #if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
125 # define _PACKED __attribute__ ((packed))
130 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
134 # pragma pack(push, 1)
138 typedef struct _U32_S { U32 v; } _PACKED U32_S;
140 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
145 /****************************************
146 ** Compiler-specific Functions and Macros
147 *****************************************/
148 #define GCC_VERSION ((__GNUC__-0) * 100 + (__GNUC_MINOR__ - 0))
150 #if GCC_VERSION >= 409
151 __attribute__((__no_sanitize_undefined__))
153 #if defined(_MSC_VER)
154 static __inline U32 A32(const void * x)
156 static inline U32 A32(const void* x)
159 return (((const U32_S *)(x))->v);
162 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
163 #if defined(_MSC_VER)
164 # define XXH_rotl32(x,r) _rotl(x,r)
166 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
169 #if defined(_MSC_VER) /* Visual Studio */
170 # define XXH_swap32 _byteswap_ulong
171 #elif GCC_VERSION >= 403
172 # define XXH_swap32 __builtin_bswap32
174 static inline U32 XXH_swap32 (U32 x) {
175 return ((x << 24) & 0xff000000 ) |
176 ((x << 8) & 0x00ff0000 ) |
177 ((x >> 8) & 0x0000ff00 ) |
178 ((x >> 24) & 0x000000ff );}
182 /***************************************
184 ****************************************/
185 #define PRIME32_1 2654435761U
186 #define PRIME32_2 2246822519U
187 #define PRIME32_3 3266489917U
188 #define PRIME32_4 668265263U
189 #define PRIME32_5 374761393U
192 /***************************************
193 ** Architecture Macros
194 ****************************************/
195 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
196 #ifndef XXH_CPU_LITTLE_ENDIAN /* It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch */
197 static const int one = 1;
198 # define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&one))
202 /***************************************
204 ****************************************/
205 #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
208 /*****************************
210 ******************************/
211 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
214 FORCE_INLINE U32 XXH_readLE32_align(const U32* ptr, XXH_endianess endian, XXH_alignment align)
216 if (align==XXH_unaligned)
217 return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr));
219 return endian==XXH_littleEndian ? *ptr : XXH_swap32(*ptr);
223 FORCE_INLINE U32 XXH_readLE32(const U32* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); }
226 /*****************************
227 ** Simple Hash Functions
228 ******************************/
230 FORCE_INLINE U32 XXH32_endian_align(const void* input, unsigned int len, U32 seed, XXH_endianess endian, XXH_alignment align)
232 const BYTE* p = (const BYTE*)input;
233 const BYTE* bEnd = p + len;
235 #define XXH_get32bits(p) XXH_readLE32_align((const U32*)p, endian, align)
237 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
238 if (p==NULL) { len=0; bEnd=p=(const BYTE*)(size_t)16; }
243 const BYTE* const limit = bEnd - 16;
244 U32 v1 = seed + PRIME32_1 + PRIME32_2;
245 U32 v2 = seed + PRIME32_2;
247 U32 v4 = seed - PRIME32_1;
251 v1 += XXH_get32bits(p) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
252 v2 += XXH_get32bits(p) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
253 v3 += XXH_get32bits(p) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
254 v4 += XXH_get32bits(p) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
257 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
261 h32 = seed + PRIME32_5;
268 h32 += XXH_get32bits(p) * PRIME32_3;
269 h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
275 h32 += (*p) * PRIME32_5;
276 h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
290 U32 XXH32(const void* input, unsigned int len, U32 seed)
293 // Simple version, good for code maintenance, but unfortunately slow for small inputs
294 void* state = XXH32_init(seed);
295 XXH32_update(state, input, len);
296 return XXH32_digest(state);
298 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
300 # if !defined(XXH_USE_UNALIGNED_ACCESS)
301 if ((((size_t)input) & 3) == 0) /* Input is aligned, let's leverage the speed advantage */
303 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
304 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
306 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
310 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
311 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
313 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
317 /*****************************
318 ** Advanced Hash Functions
319 ******************************/
335 int XXH32_sizeofState(void)
337 XXH_STATIC_ASSERT(XXH32_SIZEOFSTATE >= sizeof(struct XXH_state32_t)); /* A compilation error here means XXH32_SIZEOFSTATE is not large enough */
338 return sizeof(struct XXH_state32_t);
343 XXH_errorcode XXH32_resetState(void* state_in, U32 seed)
345 struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
347 state->v1 = seed + PRIME32_1 + PRIME32_2;
348 state->v2 = seed + PRIME32_2;
349 state->v3 = seed + 0;
350 state->v4 = seed - PRIME32_1;
351 state->total_len = 0;
357 void* XXH32_init (U32 seed)
359 void* state = XXH_malloc (sizeof(struct XXH_state32_t));
360 XXH32_resetState(state, seed);
365 FORCE_INLINE XXH_errorcode XXH32_update_endian (void* state_in, const void* input, int len, XXH_endianess endian)
367 struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
368 const BYTE* p = (const BYTE*)input;
369 const BYTE* const bEnd = p + len;
371 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
372 if (input==NULL) return XXH_ERROR;
375 state->total_len += len;
377 if (state->memsize + len < 16) /* fill in tmp buffer */
379 XXH_memcpy(state->memory + state->memsize, input, len);
380 state->memsize += len;
384 if (state->memsize) /* some data left from previous update */
386 XXH_memcpy(state->memory + state->memsize, input, 16-state->memsize);
388 const U32* p32 = (const U32*)state->memory;
389 state->v1 += XXH_readLE32(p32, endian) * PRIME32_2; state->v1 = XXH_rotl32(state->v1, 13); state->v1 *= PRIME32_1; p32++;
390 state->v2 += XXH_readLE32(p32, endian) * PRIME32_2; state->v2 = XXH_rotl32(state->v2, 13); state->v2 *= PRIME32_1; p32++;
391 state->v3 += XXH_readLE32(p32, endian) * PRIME32_2; state->v3 = XXH_rotl32(state->v3, 13); state->v3 *= PRIME32_1; p32++;
392 state->v4 += XXH_readLE32(p32, endian) * PRIME32_2; state->v4 = XXH_rotl32(state->v4, 13); state->v4 *= PRIME32_1; p32++;
394 p += 16-state->memsize;
400 const BYTE* const limit = bEnd - 16;
408 v1 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
409 v2 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
410 v3 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
411 v4 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
422 XXH_memcpy(state->memory, p, bEnd-p);
423 state->memsize = (int)(bEnd-p);
430 XXH_errorcode XXH32_update (void* state_in, const void* input, unsigned int len)
432 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
434 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
435 return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
437 return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
443 FORCE_INLINE U32 XXH32_intermediateDigest_endian (void* state_in, XXH_endianess endian)
445 struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
446 const BYTE * p = (const BYTE*)state->memory;
447 BYTE* bEnd = (BYTE*)state->memory + state->memsize;
450 if (state->total_len >= 16)
452 h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
456 h32 = state->seed + PRIME32_5;
459 h32 += (U32) state->total_len;
463 h32 += XXH_readLE32((const U32*)p, endian) * PRIME32_3;
464 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
470 h32 += (*p) * PRIME32_5;
471 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
485 U32 XXH32_intermediateDigest (void* state_in)
487 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
489 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
490 return XXH32_intermediateDigest_endian(state_in, XXH_littleEndian);
492 return XXH32_intermediateDigest_endian(state_in, XXH_bigEndian);
496 U32 XXH32_digest (void* state_in)
498 U32 h32 = XXH32_intermediateDigest(state_in);
506 struct archive_xxhash __archive_xxhash = {
515 * Define an empty version of the struct if we aren't using the LZ4 library.
518 struct archive_xxhash __archive_xxhash = {
525 #endif /* HAVE_LIBLZ4 */