diff options
author | Chris Robinson <[email protected]> | 2019-03-18 22:06:01 -0700 |
---|---|---|
committer | Chris Robinson <[email protected]> | 2019-03-18 22:06:01 -0700 |
commit | d31514f8beff74d4425a7dd7bcc2084023a3ffdd (patch) | |
tree | 8e3484aeb023fcf8351c127c3cd238858cb857d3 /common/alnumeric.h | |
parent | 3e816de4fb3f8be4746643f5b9c5c07186e16b6c (diff) |
Move some inline functions from alMain.h to alnumeric.h
Diffstat (limited to 'common/alnumeric.h')
-rw-r--r-- | common/alnumeric.h | 235 |
1 files changed, 235 insertions, 0 deletions
diff --git a/common/alnumeric.h b/common/alnumeric.h index 08a4858d..a2bcd8ee 100644 --- a/common/alnumeric.h +++ b/common/alnumeric.h @@ -2,6 +2,15 @@ #define AL_NUMERIC_H #include <stdint.h> +#ifdef HAVE_INTRIN_H +#include <intrin.h> +#endif +#ifdef HAVE_SSE_INTRINSICS +#include <xmmintrin.h> +#endif + +#include "opthelpers.h" + inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); } inline constexpr uint64_t operator "" _u64(unsigned long long int n) noexcept { return static_cast<uint64_t>(n); } @@ -79,4 +88,230 @@ inline size_t RoundUp(size_t value, size_t r) noexcept return value - (value%r); } + +/* Define CTZ macros (count trailing zeros), and POPCNT macros (population + * count/count 1 bits), for 32- and 64-bit integers. The CTZ macros' results + * are *UNDEFINED* if the value is 0. + */ +#ifdef __GNUC__ + +#define POPCNT32 __builtin_popcount +#define CTZ32 __builtin_ctz +#if SIZEOF_LONG == 8 +#define POPCNT64 __builtin_popcountl +#define CTZ64 __builtin_ctzl +#else +#define POPCNT64 __builtin_popcountll +#define CTZ64 __builtin_ctzll +#endif + +#elif defined(HAVE_BITSCANFORWARD64_INTRINSIC) + +inline int msvc64_popcnt32(ALuint v) +{ return (int)__popcnt(v); } +#define POPCNT32 msvc64_popcnt32 +inline int msvc64_ctz32(ALuint v) +{ + unsigned long idx = 32; + _BitScanForward(&idx, v); + return (int)idx; +} +#define CTZ32 msvc64_ctz32 + +inline int msvc64_popcnt64(uint64_t v) +{ return (int)__popcnt64(v); } +#define POPCNT64 msvc64_popcnt64 +inline int msvc64_ctz64(uint64_t v) +{ + unsigned long idx = 64; + _BitScanForward64(&idx, v); + return (int)idx; +} +#define CTZ64 msvc64_ctz64 + +#elif defined(HAVE_BITSCANFORWARD_INTRINSIC) + +inline int msvc_popcnt32(ALuint v) +{ return (int)__popcnt(v); } +#define POPCNT32 msvc_popcnt32 +inline int msvc_ctz32(ALuint v) +{ + unsigned long idx = 32; + _BitScanForward(&idx, v); + return (int)idx; +} +#define CTZ32 msvc_ctz32 + +inline int msvc_popcnt64(uint64_t v) +{ return (int)(__popcnt((ALuint)v) + __popcnt((ALuint)(v>>32))); } +#define POPCNT64 msvc_popcnt64 +inline int msvc_ctz64(uint64_t v) +{ + unsigned long idx = 64; + if(!_BitScanForward(&idx, v&0xffffffff)) + { + if(_BitScanForward(&idx, v>>32)) + idx += 32; + } + return (int)idx; +} +#define CTZ64 msvc_ctz64 + +#else + +/* There be black magics here. The popcnt method is derived from + * https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel + * while the ctz-utilizing-popcnt algorithm is shown here + * http://www.hackersdelight.org/hdcodetxt/ntz.c.txt + * as the ntz2 variant. These likely aren't the most efficient methods, but + * they're good enough if the GCC or MSVC intrinsics aren't available. + */ +inline int fallback_popcnt32(ALuint v) +{ + v = v - ((v >> 1) & 0x55555555u); + v = (v & 0x33333333u) + ((v >> 2) & 0x33333333u); + v = (v + (v >> 4)) & 0x0f0f0f0fu; + return (int)((v * 0x01010101u) >> 24); +} +#define POPCNT32 fallback_popcnt32 +inline int fallback_ctz32(ALuint value) +{ return fallback_popcnt32(~value & (value - 1)); } +#define CTZ32 fallback_ctz32 + +inline int fallback_popcnt64(uint64_t v) +{ + v = v - ((v >> 1) & 0x5555555555555555_u64); + v = (v & 0x3333333333333333_u64) + ((v >> 2) & 0x3333333333333333_u64); + v = (v + (v >> 4)) & 0x0f0f0f0f0f0f0f0f_u64; + return (int)((v * 0x0101010101010101_u64) >> 56); +} +#define POPCNT64 fallback_popcnt64 +inline int fallback_ctz64(uint64_t value) +{ return fallback_popcnt64(~value & (value - 1)); } +#define CTZ64 fallback_ctz64 +#endif + + +/** + * Fast float-to-int conversion. No particular rounding mode is assumed; the + * IEEE-754 default is round-to-nearest with ties-to-even, though an app could + * change it on its own threads. On some systems, a truncating conversion may + * always be the fastest method. + */ +inline int fastf2i(float f) noexcept +{ +#if defined(HAVE_SSE_INTRINSICS) + return _mm_cvt_ss2si(_mm_set_ss(f)); + +#elif defined(_MSC_VER) && defined(_M_IX86_FP) + + int i; + __asm fld f + __asm fistp i + return i; + +#elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) + + int i; +#ifdef __SSE_MATH__ + __asm__("cvtss2si %1, %0" : "=r"(i) : "x"(f)); +#else + __asm__ __volatile__("fistpl %0" : "=m"(i) : "t"(f) : "st"); +#endif + return i; + +#else + + return static_cast<int>(f); +#endif +} + +/** Converts float-to-int using standard behavior (truncation). */ +inline int float2int(float f) noexcept +{ +#if defined(HAVE_SSE_INTRINSICS) + return _mm_cvtt_ss2si(_mm_set_ss(f)); + +#elif ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \ + !defined(__SSE_MATH__)) || (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0) + int sign, shift, mant; + union { + float f; + int i; + } conv; + + conv.f = f; + sign = (conv.i>>31) | 1; + shift = ((conv.i>>23)&0xff) - (127+23); + + /* Over/underflow */ + if(UNLIKELY(shift >= 31 || shift < -23)) + return 0; + + mant = (conv.i&0x7fffff) | 0x800000; + if(LIKELY(shift < 0)) + return (mant >> -shift) * sign; + return (mant << shift) * sign; + +#else + + return static_cast<int>(f); +#endif +} + +/** + * Rounds a float to the nearest integral value, according to the current + * rounding mode. This is essentially an inlined version of rintf, although + * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0). + */ +inline float fast_roundf(float f) noexcept +{ +#if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \ + !defined(__SSE_MATH__) + + float out; + __asm__ __volatile__("frndint" : "=t"(out) : "0"(f)); + return out; + +#else + + /* Integral limit, where sub-integral precision is not available for + * floats. + */ + static constexpr float ilim[2] = { + 8388608.0f /* 0x1.0p+23 */, + -8388608.0f /* -0x1.0p+23 */ + }; + unsigned int sign, expo; + union { + float f; + unsigned int i; + } conv; + + conv.f = f; + sign = (conv.i>>31)&0x01; + expo = (conv.i>>23)&0xff; + + if(UNLIKELY(expo >= 150/*+23*/)) + { + /* An exponent (base-2) of 23 or higher is incapable of sub-integral + * precision, so it's already an integral value. We don't need to worry + * about infinity or NaN here. + */ + return f; + } + /* Adding the integral limit to the value (with a matching sign) forces a + * result that has no sub-integral precision, and is consequently forced to + * round to an integral value. Removing the integral limit then restores + * the initial value rounded to the integral. The compiler should not + * optimize this out because of non-associative rules on floating-point + * math (as long as you don't use -fassociative-math, + * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this + * may break). + */ + f += ilim[sign]; + return f - ilim[sign]; +#endif +} + #endif /* AL_NUMERIC_H */ |