1 ///////////////////////////////////////////////////////////////////////////////
3 /// \file tuklib_integer.h
4 /// \brief Various integer and bit operations
6 /// This file provides macros or functions to do some basic integer and bit
9 /// Native endian inline functions (XX = 16, 32, or 64):
10 /// - Unaligned native endian reads: readXXne(ptr)
11 /// - Unaligned native endian writes: writeXXne(ptr, num)
12 /// - Aligned native endian reads: aligned_readXXne(ptr)
13 /// - Aligned native endian writes: aligned_writeXXne(ptr, num)
15 /// Endianness-converting integer operations (these can be macros!)
16 /// (XX = 16, 32, or 64; Y = b or l):
17 /// - Byte swapping: bswapXX(num)
18 /// - Byte order conversions to/from native (byteswaps if Y isn't
19 /// the native endianness): convXXYe(num)
20 /// - Unaligned reads (16/32-bit only): readXXYe(ptr)
21 /// - Unaligned writes (16/32-bit only): writeXXYe(ptr, num)
22 /// - Aligned reads: aligned_readXXYe(ptr)
23 /// - Aligned writes: aligned_writeXXYe(ptr, num)
25 /// Since the above can macros, the arguments should have no side effects
26 /// because they may be evaluated more than once.
28 /// Bit scan operations for non-zero 32-bit integers (inline functions):
29 /// - Bit scan reverse (find highest non-zero bit): bsr32(num)
30 /// - Count leading zeros: clz32(num)
31 /// - Count trailing zeros: ctz32(num)
32 /// - Bit scan forward (simply an alias for ctz32()): bsf32(num)
34 /// The above bit scan operations return 0-31. If num is zero,
35 /// the result is undefined.
37 // Authors: Lasse Collin
40 // This file has been put into the public domain.
41 // You can do whatever you want with this file.
43 ///////////////////////////////////////////////////////////////////////////////
45 #ifndef TUKLIB_INTEGER_H
46 #define TUKLIB_INTEGER_H
48 #include "tuklib_common.h"
51 // Newer Intel C compilers require immintrin.h for _bit_scan_reverse()
52 // and such functions.
53 #if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500)
54 # include <immintrin.h>
62 #if defined(HAVE___BUILTIN_BSWAPXX)
63 // GCC >= 4.8 and Clang
64 # define bswap16(n) __builtin_bswap16(n)
65 # define bswap32(n) __builtin_bswap32(n)
66 # define bswap64(n) __builtin_bswap64(n)
68 #elif defined(HAVE_BYTESWAP_H)
69 // glibc, uClibc, dietlibc
70 # include <byteswap.h>
72 # define bswap16(num) bswap_16(num)
75 # define bswap32(num) bswap_32(num)
78 # define bswap64(num) bswap_64(num)
81 #elif defined(HAVE_SYS_ENDIAN_H)
83 # include <sys/endian.h>
85 #elif defined(HAVE_SYS_BYTEORDER_H)
87 # include <sys/byteorder.h>
89 # define bswap16(num) BSWAP_16(num)
92 # define bswap32(num) BSWAP_32(num)
95 # define bswap64(num) BSWAP_64(num)
98 # define conv16be(num) BE_16(num)
101 # define conv32be(num) BE_32(num)
104 # define conv64be(num) BE_64(num)
107 # define conv16le(num) LE_16(num)
110 # define conv32le(num) LE_32(num)
113 # define conv64le(num) LE_64(num)
118 # define bswap16(n) (uint16_t)( \
119 (((n) & 0x00FFU) << 8) \
120 | (((n) & 0xFF00U) >> 8) \
125 # define bswap32(n) (uint32_t)( \
126 (((n) & UINT32_C(0x000000FF)) << 24) \
127 | (((n) & UINT32_C(0x0000FF00)) << 8) \
128 | (((n) & UINT32_C(0x00FF0000)) >> 8) \
129 | (((n) & UINT32_C(0xFF000000)) >> 24) \
134 # define bswap64(n) (uint64_t)( \
135 (((n) & UINT64_C(0x00000000000000FF)) << 56) \
136 | (((n) & UINT64_C(0x000000000000FF00)) << 40) \
137 | (((n) & UINT64_C(0x0000000000FF0000)) << 24) \
138 | (((n) & UINT64_C(0x00000000FF000000)) << 8) \
139 | (((n) & UINT64_C(0x000000FF00000000)) >> 8) \
140 | (((n) & UINT64_C(0x0000FF0000000000)) >> 24) \
141 | (((n) & UINT64_C(0x00FF000000000000)) >> 40) \
142 | (((n) & UINT64_C(0xFF00000000000000)) >> 56) \
146 // Define conversion macros using the basic byte swapping macros.
147 #ifdef WORDS_BIGENDIAN
149 # define conv16be(num) ((uint16_t)(num))
152 # define conv32be(num) ((uint32_t)(num))
155 # define conv64be(num) ((uint64_t)(num))
158 # define conv16le(num) bswap16(num)
161 # define conv32le(num) bswap32(num)
164 # define conv64le(num) bswap64(num)
168 # define conv16be(num) bswap16(num)
171 # define conv32be(num) bswap32(num)
174 # define conv64be(num) bswap64(num)
177 # define conv16le(num) ((uint16_t)(num))
180 # define conv32le(num) ((uint32_t)(num))
183 # define conv64le(num) ((uint64_t)(num))
188 ////////////////////////////////
189 // Unaligned reads and writes //
190 ////////////////////////////////
192 // The traditional way of casting e.g. *(const uint16_t *)uint8_pointer
193 // is bad even if the uint8_pointer is properly aligned because this kind
194 // of casts break strict aliasing rules and result in undefined behavior.
195 // With unaligned pointers it's even worse: compilers may emit vector
196 // instructions that require aligned pointers even if non-vector
197 // instructions work with unaligned pointers.
199 // Using memcpy() is the standard compliant way to do unaligned access.
200 // Many modern compilers inline it so there is no function call overhead.
201 // For those compilers that don't handle the memcpy() method well, the
202 // old casting method (that violates strict aliasing) can be requested at
203 // build time. A third method, casting to a packed struct, would also be
204 // an option but isn't provided to keep things simpler (it's already a mess).
205 // Hopefully this is flexible enough in practice.
207 static inline uint16_t
208 read16ne(const uint8_t *buf)
210 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
211 && defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
212 return *(const uint16_t *)buf;
215 memcpy(&num, buf, sizeof(num));
221 static inline uint32_t
222 read32ne(const uint8_t *buf)
224 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
225 && defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
226 return *(const uint32_t *)buf;
229 memcpy(&num, buf, sizeof(num));
235 static inline uint64_t
236 read64ne(const uint8_t *buf)
238 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
239 && defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
240 return *(const uint64_t *)buf;
243 memcpy(&num, buf, sizeof(num));
250 write16ne(uint8_t *buf, uint16_t num)
252 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
253 && defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
254 *(uint16_t *)buf = num;
256 memcpy(buf, &num, sizeof(num));
263 write32ne(uint8_t *buf, uint32_t num)
265 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
266 && defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
267 *(uint32_t *)buf = num;
269 memcpy(buf, &num, sizeof(num));
276 write64ne(uint8_t *buf, uint64_t num)
278 #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
279 && defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING)
280 *(uint64_t *)buf = num;
282 memcpy(buf, &num, sizeof(num));
288 static inline uint16_t
289 read16be(const uint8_t *buf)
291 #if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
292 uint16_t num = read16ne(buf);
293 return conv16be(num);
295 uint16_t num = ((uint16_t)buf[0] << 8) | (uint16_t)buf[1];
301 static inline uint16_t
302 read16le(const uint8_t *buf)
304 #if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
305 uint16_t num = read16ne(buf);
306 return conv16le(num);
308 uint16_t num = ((uint16_t)buf[0]) | ((uint16_t)buf[1] << 8);
314 static inline uint32_t
315 read32be(const uint8_t *buf)
317 #if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
318 uint32_t num = read32ne(buf);
319 return conv32be(num);
321 uint32_t num = (uint32_t)buf[0] << 24;
322 num |= (uint32_t)buf[1] << 16;
323 num |= (uint32_t)buf[2] << 8;
324 num |= (uint32_t)buf[3];
330 static inline uint32_t
331 read32le(const uint8_t *buf)
333 #if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
334 uint32_t num = read32ne(buf);
335 return conv32le(num);
337 uint32_t num = (uint32_t)buf[0];
338 num |= (uint32_t)buf[1] << 8;
339 num |= (uint32_t)buf[2] << 16;
340 num |= (uint32_t)buf[3] << 24;
346 // NOTE: Possible byte swapping must be done in a macro to allow the compiler
347 // to optimize byte swapping of constants when using glibc's or *BSD's
348 // byte swapping macros. The actual write is done in an inline function
349 // to make type checking of the buf pointer possible.
350 #if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
351 # define write16be(buf, num) write16ne(buf, conv16be(num))
352 # define write32be(buf, num) write32ne(buf, conv32be(num))
355 #if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
356 # define write16le(buf, num) write16ne(buf, conv16le(num))
357 # define write32le(buf, num) write32ne(buf, conv32le(num))
363 write16be(uint8_t *buf, uint16_t num)
365 buf[0] = (uint8_t)(num >> 8);
366 buf[1] = (uint8_t)num;
374 write16le(uint8_t *buf, uint16_t num)
376 buf[0] = (uint8_t)num;
377 buf[1] = (uint8_t)(num >> 8);
385 write32be(uint8_t *buf, uint32_t num)
387 buf[0] = (uint8_t)(num >> 24);
388 buf[1] = (uint8_t)(num >> 16);
389 buf[2] = (uint8_t)(num >> 8);
390 buf[3] = (uint8_t)num;
398 write32le(uint8_t *buf, uint32_t num)
400 buf[0] = (uint8_t)num;
401 buf[1] = (uint8_t)(num >> 8);
402 buf[2] = (uint8_t)(num >> 16);
403 buf[3] = (uint8_t)(num >> 24);
409 //////////////////////////////
410 // Aligned reads and writes //
411 //////////////////////////////
413 // Separate functions for aligned reads and writes are provided since on
414 // strict-align archs aligned access is much faster than unaligned access.
416 // Just like in the unaligned case, memcpy() is needed to avoid
417 // strict aliasing violations. However, on archs that don't support
418 // unaligned access the compiler cannot know that the pointers given
419 // to memcpy() are aligned which results in slow code. As of C11 there is
420 // no standard way to tell the compiler that we know that the address is
421 // aligned but some compilers have language extensions to do that. With
422 // such language extensions the memcpy() method gives excellent results.
424 // What to do on a strict-align system when no known language extentensions
425 // are available? Falling back to byte-by-byte access would be safe but ruin
426 // optimizations that have been made specifically with aligned access in mind.
427 // As a compromise, aligned reads will fall back to non-compliant type punning
428 // but aligned writes will be byte-by-byte, that is, fast reads are preferred
429 // over fast writes. This obviously isn't great but hopefully it's a working
430 // compromise for now.
432 // __builtin_assume_aligned is support by GCC >= 4.7 and clang >= 3.6.
433 #ifdef HAVE___BUILTIN_ASSUME_ALIGNED
434 # define tuklib_memcpy_aligned(dest, src, size) \
435 memcpy(dest, __builtin_assume_aligned(src, size), size)
437 # define tuklib_memcpy_aligned(dest, src, size) \
438 memcpy(dest, src, size)
439 # ifndef TUKLIB_FAST_UNALIGNED_ACCESS
440 # define TUKLIB_USE_UNSAFE_ALIGNED_READS 1
445 static inline uint16_t
446 aligned_read16ne(const uint8_t *buf)
448 #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
449 || defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
450 return *(const uint16_t *)buf;
453 tuklib_memcpy_aligned(&num, buf, sizeof(num));
459 static inline uint32_t
460 aligned_read32ne(const uint8_t *buf)
462 #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
463 || defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
464 return *(const uint32_t *)buf;
467 tuklib_memcpy_aligned(&num, buf, sizeof(num));
473 static inline uint64_t
474 aligned_read64ne(const uint8_t *buf)
476 #if defined(TUKLIB_USE_UNSAFE_TYPE_PUNNING) \
477 || defined(TUKLIB_USE_UNSAFE_ALIGNED_READS)
478 return *(const uint64_t *)buf;
481 tuklib_memcpy_aligned(&num, buf, sizeof(num));
488 aligned_write16ne(uint8_t *buf, uint16_t num)
490 #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
491 *(uint16_t *)buf = num;
493 tuklib_memcpy_aligned(buf, &num, sizeof(num));
500 aligned_write32ne(uint8_t *buf, uint32_t num)
502 #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
503 *(uint32_t *)buf = num;
505 tuklib_memcpy_aligned(buf, &num, sizeof(num));
512 aligned_write64ne(uint8_t *buf, uint64_t num)
514 #ifdef TUKLIB_USE_UNSAFE_TYPE_PUNNING
515 *(uint64_t *)buf = num;
517 tuklib_memcpy_aligned(buf, &num, sizeof(num));
523 static inline uint16_t
524 aligned_read16be(const uint8_t *buf)
526 uint16_t num = aligned_read16ne(buf);
527 return conv16be(num);
531 static inline uint16_t
532 aligned_read16le(const uint8_t *buf)
534 uint16_t num = aligned_read16ne(buf);
535 return conv16le(num);
539 static inline uint32_t
540 aligned_read32be(const uint8_t *buf)
542 uint32_t num = aligned_read32ne(buf);
543 return conv32be(num);
547 static inline uint32_t
548 aligned_read32le(const uint8_t *buf)
550 uint32_t num = aligned_read32ne(buf);
551 return conv32le(num);
555 static inline uint64_t
556 aligned_read64be(const uint8_t *buf)
558 uint64_t num = aligned_read64ne(buf);
559 return conv64be(num);
563 static inline uint64_t
564 aligned_read64le(const uint8_t *buf)
566 uint64_t num = aligned_read64ne(buf);
567 return conv64le(num);
571 // These need to be macros like in the unaligned case.
572 #define aligned_write16be(buf, num) aligned_write16ne((buf), conv16be(num))
573 #define aligned_write16le(buf, num) aligned_write16ne((buf), conv16le(num))
574 #define aligned_write32be(buf, num) aligned_write32ne((buf), conv32be(num))
575 #define aligned_write32le(buf, num) aligned_write32ne((buf), conv32le(num))
576 #define aligned_write64be(buf, num) aligned_write64ne((buf), conv64be(num))
577 #define aligned_write64le(buf, num) aligned_write64ne((buf), conv64le(num))
584 static inline uint32_t
587 // Check for ICC first, since it tends to define __GNUC__ too.
588 #if defined(__INTEL_COMPILER)
589 return _bit_scan_reverse(n);
591 #elif TUKLIB_GNUC_REQ(3, 4) && UINT_MAX == UINT32_MAX
592 // GCC >= 3.4 has __builtin_clz(), which gives good results on
593 // multiple architectures. On x86, __builtin_clz() ^ 31U becomes
594 // either plain BSR (so the XOR gets optimized away) or LZCNT and
595 // XOR (if -march indicates that SSE4a instructions are supported).
596 return (uint32_t)__builtin_clz(n) ^ 31U;
598 #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
600 __asm__("bsrl %1, %0" : "=r" (i) : "rm" (n));
603 #elif defined(_MSC_VER)
605 _BitScanReverse(&i, n);
611 if ((n & 0xFFFF0000) == 0) {
616 if ((n & 0xFF000000) == 0) {
621 if ((n & 0xF0000000) == 0) {
626 if ((n & 0xC0000000) == 0) {
631 if ((n & 0x80000000) == 0)
639 static inline uint32_t
642 #if defined(__INTEL_COMPILER)
643 return _bit_scan_reverse(n) ^ 31U;
645 #elif TUKLIB_GNUC_REQ(3, 4) && UINT_MAX == UINT32_MAX
646 return (uint32_t)__builtin_clz(n);
648 #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
650 __asm__("bsrl %1, %0\n\t"
652 : "=r" (i) : "rm" (n));
655 #elif defined(_MSC_VER)
657 _BitScanReverse(&i, n);
663 if ((n & 0xFFFF0000) == 0) {
668 if ((n & 0xFF000000) == 0) {
673 if ((n & 0xF0000000) == 0) {
678 if ((n & 0xC0000000) == 0) {
683 if ((n & 0x80000000) == 0)
691 static inline uint32_t
694 #if defined(__INTEL_COMPILER)
695 return _bit_scan_forward(n);
697 #elif TUKLIB_GNUC_REQ(3, 4) && UINT_MAX >= UINT32_MAX
698 return (uint32_t)__builtin_ctz(n);
700 #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
702 __asm__("bsfl %1, %0" : "=r" (i) : "rm" (n));
705 #elif defined(_MSC_VER)
707 _BitScanForward(&i, n);
713 if ((n & 0x0000FFFF) == 0) {
718 if ((n & 0x000000FF) == 0) {
723 if ((n & 0x0000000F) == 0) {
728 if ((n & 0x00000003) == 0) {
733 if ((n & 0x00000001) == 0)