aboutsummaryrefslogtreecommitdiffstats
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to 'common')
-rw-r--r--common/align.h21
-rw-r--r--common/almalloc.h21
-rw-r--r--common/atomic.h425
-rw-r--r--common/bool.h18
-rw-r--r--common/math_defs.h19
-rw-r--r--common/rwlock.h31
-rw-r--r--common/static_assert.h21
-rw-r--r--common/threads.h237
-rw-r--r--common/uintmap.h46
9 files changed, 839 insertions, 0 deletions
diff --git a/common/align.h b/common/align.h
new file mode 100644
index 00000000..e2dc81df
--- /dev/null
+++ b/common/align.h
@@ -0,0 +1,21 @@
+#ifndef AL_ALIGN_H
+#define AL_ALIGN_H
+
+#if defined(HAVE_STDALIGN_H) && defined(HAVE_C11_ALIGNAS)
+#include <stdalign.h>
+#endif
+
+#ifndef alignas
+#if defined(IN_IDE_PARSER)
+/* KDevelop has problems with our align macro, so just use nothing for parsing. */
+#define alignas(x)
+#elif defined(HAVE_C11_ALIGNAS)
+#define alignas _Alignas
+#else
+/* NOTE: Our custom ALIGN macro can't take a type name like alignas can. For
+ * maximum compatibility, only provide constant integer values to alignas. */
+#define alignas(_x) ALIGN(_x)
+#endif
+#endif
+
+#endif /* AL_ALIGN_H */
diff --git a/common/almalloc.h b/common/almalloc.h
new file mode 100644
index 00000000..8eadb5b3
--- /dev/null
+++ b/common/almalloc.h
@@ -0,0 +1,21 @@
+#ifndef AL_MALLOC_H
+#define AL_MALLOC_H
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Minimum alignment required by posix_memalign. */
+#define DEF_ALIGN sizeof(void*)
+
+void *al_malloc(size_t alignment, size_t size);
+void *al_calloc(size_t alignment, size_t size);
+void al_free(void *ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AL_MALLOC_H */
diff --git a/common/atomic.h b/common/atomic.h
new file mode 100644
index 00000000..874d510d
--- /dev/null
+++ b/common/atomic.h
@@ -0,0 +1,425 @@
+#ifndef AL_ATOMIC_H
+#define AL_ATOMIC_H
+
+#include "static_assert.h"
+#include "bool.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Atomics using C11 */
+#ifdef HAVE_C11_ATOMIC
+
+#include <stdatomic.h>
+
+#define almemory_order memory_order
+#define almemory_order_relaxed memory_order_relaxed
+#define almemory_order_consume memory_order_consume
+#define almemory_order_acquire memory_order_acquire
+#define almemory_order_release memory_order_release
+#define almemory_order_acq_rel memory_order_acq_rel
+#define almemory_order_seq_cst memory_order_seq_cst
+
+#define ATOMIC(T) T _Atomic
+#define ATOMIC_FLAG atomic_flag
+
+#define ATOMIC_INIT atomic_init
+#define ATOMIC_INIT_STATIC ATOMIC_VAR_INIT
+/*#define ATOMIC_FLAG_INIT ATOMIC_FLAG_INIT*/
+
+#define ATOMIC_LOAD atomic_load_explicit
+#define ATOMIC_STORE atomic_store_explicit
+
+#define ATOMIC_ADD atomic_fetch_add_explicit
+#define ATOMIC_SUB atomic_fetch_sub_explicit
+
+#define ATOMIC_EXCHANGE atomic_exchange_explicit
+#define ATOMIC_COMPARE_EXCHANGE_STRONG atomic_compare_exchange_strong_explicit
+#define ATOMIC_COMPARE_EXCHANGE_WEAK atomic_compare_exchange_weak_explicit
+
+#define ATOMIC_FLAG_TEST_AND_SET atomic_flag_test_and_set_explicit
+#define ATOMIC_FLAG_CLEAR atomic_flag_clear_explicit
+
+#define ATOMIC_THREAD_FENCE atomic_thread_fence
+
+/* Atomics using GCC intrinsics */
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) && !defined(__QNXNTO__)
+
+enum almemory_order {
+ almemory_order_relaxed,
+ almemory_order_consume,
+ almemory_order_acquire,
+ almemory_order_release,
+ almemory_order_acq_rel,
+ almemory_order_seq_cst
+};
+
+#define ATOMIC(T) struct { T volatile value; }
+#define ATOMIC_FLAG ATOMIC(int)
+
+#define ATOMIC_INIT(_val, _newval) do { (_val)->value = (_newval); } while(0)
+#define ATOMIC_INIT_STATIC(_newval) {(_newval)}
+#define ATOMIC_FLAG_INIT ATOMIC_INIT_STATIC(0)
+
+#define ATOMIC_LOAD(_val, _MO) __extension__({ \
+ __typeof((_val)->value) _r = (_val)->value; \
+ __asm__ __volatile__("" ::: "memory"); \
+ _r; \
+})
+#define ATOMIC_STORE(_val, _newval, _MO) do { \
+ __asm__ __volatile__("" ::: "memory"); \
+ (_val)->value = (_newval); \
+} while(0)
+
+#define ATOMIC_ADD(_val, _incr, _MO) __sync_fetch_and_add(&(_val)->value, (_incr))
+#define ATOMIC_SUB(_val, _decr, _MO) __sync_fetch_and_sub(&(_val)->value, (_decr))
+
+#define ATOMIC_EXCHANGE(_val, _newval, _MO) __extension__({ \
+ __asm__ __volatile__("" ::: "memory"); \
+ __sync_lock_test_and_set(&(_val)->value, (_newval)); \
+})
+#define ATOMIC_COMPARE_EXCHANGE_STRONG(_val, _oldval, _newval, _MO1, _MO2) __extension__({ \
+ __typeof(*(_oldval)) _o = *(_oldval); \
+ *(_oldval) = __sync_val_compare_and_swap(&(_val)->value, _o, (_newval)); \
+ *(_oldval) == _o; \
+})
+
+#define ATOMIC_FLAG_TEST_AND_SET(_val, _MO) __extension__({ \
+ __asm__ __volatile__("" ::: "memory"); \
+ __sync_lock_test_and_set(&(_val)->value, 1); \
+})
+#define ATOMIC_FLAG_CLEAR(_val, _MO) __extension__({ \
+ __sync_lock_release(&(_val)->value); \
+ __asm__ __volatile__("" ::: "memory"); \
+})
+
+
+#define ATOMIC_THREAD_FENCE(order) do { \
+ enum { must_be_constant = (order) }; \
+ const int _o = must_be_constant; \
+ if(_o > almemory_order_relaxed) \
+ __asm__ __volatile__("" ::: "memory"); \
+} while(0)
+
+/* Atomics using x86/x86-64 GCC inline assembly */
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+
+#define WRAP_ADD(S, ret, dest, incr) __asm__ __volatile__( \
+ "lock; xadd"S" %0,(%1)" \
+ : "=r" (ret) \
+ : "r" (dest), "0" (incr) \
+ : "memory" \
+)
+#define WRAP_SUB(S, ret, dest, decr) __asm__ __volatile__( \
+ "lock; xadd"S" %0,(%1)" \
+ : "=r" (ret) \
+ : "r" (dest), "0" (-(decr)) \
+ : "memory" \
+)
+
+#define WRAP_XCHG(S, ret, dest, newval) __asm__ __volatile__( \
+ "lock; xchg"S" %0,(%1)" \
+ : "=r" (ret) \
+ : "r" (dest), "0" (newval) \
+ : "memory" \
+)
+#define WRAP_CMPXCHG(S, ret, dest, oldval, newval) __asm__ __volatile__( \
+ "lock; cmpxchg"S" %2,(%1)" \
+ : "=a" (ret) \
+ : "r" (dest), "r" (newval), "0" (oldval) \
+ : "memory" \
+)
+
+
+enum almemory_order {
+ almemory_order_relaxed,
+ almemory_order_consume,
+ almemory_order_acquire,
+ almemory_order_release,
+ almemory_order_acq_rel,
+ almemory_order_seq_cst
+};
+
+#define ATOMIC(T) struct { T volatile value; }
+
+#define ATOMIC_INIT(_val, _newval) do { (_val)->value = (_newval); } while(0)
+#define ATOMIC_INIT_STATIC(_newval) {(_newval)}
+
+#define ATOMIC_LOAD(_val, _MO) __extension__({ \
+ __typeof((_val)->value) _r = (_val)->value; \
+ __asm__ __volatile__("" ::: "memory"); \
+ _r; \
+})
+#define ATOMIC_STORE(_val, _newval, _MO) do { \
+ __asm__ __volatile__("" ::: "memory"); \
+ (_val)->value = (_newval); \
+} while(0)
+
+#define ATOMIC_ADD(_val, _incr, _MO) __extension__({ \
+ static_assert(sizeof((_val)->value)==4 || sizeof((_val)->value)==8, "Unsupported size!"); \
+ __typeof((_val)->value) _r; \
+ if(sizeof((_val)->value) == 4) WRAP_ADD("l", _r, &(_val)->value, _incr); \
+ else if(sizeof((_val)->value) == 8) WRAP_ADD("q", _r, &(_val)->value, _incr); \
+ _r; \
+})
+#define ATOMIC_SUB(_val, _decr, _MO) __extension__({ \
+ static_assert(sizeof((_val)->value)==4 || sizeof((_val)->value)==8, "Unsupported size!"); \
+ __typeof((_val)->value) _r; \
+ if(sizeof((_val)->value) == 4) WRAP_SUB("l", _r, &(_val)->value, _decr); \
+ else if(sizeof((_val)->value) == 8) WRAP_SUB("q", _r, &(_val)->value, _decr); \
+ _r; \
+})
+
+#define ATOMIC_EXCHANGE(_val, _newval, _MO) __extension__({ \
+ __typeof((_val)->value) _r; \
+ if(sizeof((_val)->value) == 4) WRAP_XCHG("l", _r, &(_val)->value, (_newval)); \
+ else if(sizeof((_val)->value) == 8) WRAP_XCHG("q", _r, &(_val)->value, (_newval)); \
+ _r; \
+})
+#define ATOMIC_COMPARE_EXCHANGE_STRONG(_val, _oldval, _newval, _MO1, _MO2) __extension__({ \
+ __typeof(*(_oldval)) _old = *(_oldval); \
+ if(sizeof((_val)->value) == 4) WRAP_CMPXCHG("l", *(_oldval), &(_val)->value, _old, (_newval)); \
+ else if(sizeof((_val)->value) == 8) WRAP_CMPXCHG("q", *(_oldval), &(_val)->value, _old, (_newval)); \
+ *(_oldval) == _old; \
+})
+
+#define ATOMIC_EXCHANGE_PTR(_val, _newval, _MO) __extension__({ \
+ void *_r; \
+ if(sizeof(void*) == 4) WRAP_XCHG("l", _r, &(_val)->value, (_newval)); \
+ else if(sizeof(void*) == 8) WRAP_XCHG("q", _r, &(_val)->value, (_newval));\
+ _r; \
+})
+#define ATOMIC_COMPARE_EXCHANGE_PTR_STRONG(_val, _oldval, _newval, _MO1, _MO2) __extension__({ \
+ void *_old = *(_oldval); \
+ if(sizeof(void*) == 4) WRAP_CMPXCHG("l", *(_oldval), &(_val)->value, _old, (_newval)); \
+ else if(sizeof(void*) == 8) WRAP_CMPXCHG("q", *(_oldval), &(_val)->value, _old, (_newval)); \
+ *(_oldval) == _old; \
+})
+
+#define ATOMIC_THREAD_FENCE(order) do { \
+ enum { must_be_constant = (order) }; \
+ const int _o = must_be_constant; \
+ if(_o > almemory_order_relaxed) \
+ __asm__ __volatile__("" ::: "memory"); \
+} while(0)
+
+/* Atomics using Windows methods */
+#elif defined(_WIN32)
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+/* NOTE: This mess is *extremely* touchy. It lacks quite a bit of safety
+ * checking due to the lack of multi-statement expressions, typeof(), and C99
+ * compound literals. It is incapable of properly exchanging floats, which get
+ * casted to LONG/int, and could cast away potential warnings.
+ *
+ * Unfortunately, it's the only semi-safe way that doesn't rely on C99 (because
+ * MSVC).
+ */
+
+inline LONG AtomicAdd32(volatile LONG *dest, LONG incr)
+{
+ return InterlockedExchangeAdd(dest, incr);
+}
+inline LONGLONG AtomicAdd64(volatile LONGLONG *dest, LONGLONG incr)
+{
+ return InterlockedExchangeAdd64(dest, incr);
+}
+inline LONG AtomicSub32(volatile LONG *dest, LONG decr)
+{
+ return InterlockedExchangeAdd(dest, -decr);
+}
+inline LONGLONG AtomicSub64(volatile LONGLONG *dest, LONGLONG decr)
+{
+ return InterlockedExchangeAdd64(dest, -decr);
+}
+
+inline LONG AtomicSwap32(volatile LONG *dest, LONG newval)
+{
+ return InterlockedExchange(dest, newval);
+}
+inline LONGLONG AtomicSwap64(volatile LONGLONG *dest, LONGLONG newval)
+{
+ return InterlockedExchange64(dest, newval);
+}
+inline void *AtomicSwapPtr(void *volatile *dest, void *newval)
+{
+ return InterlockedExchangePointer(dest, newval);
+}
+
+inline bool CompareAndSwap32(volatile LONG *dest, LONG newval, LONG *oldval)
+{
+ LONG old = *oldval;
+ *oldval = InterlockedCompareExchange(dest, newval, *oldval);
+ return old == *oldval;
+}
+inline bool CompareAndSwap64(volatile LONGLONG *dest, LONGLONG newval, LONGLONG *oldval)
+{
+ LONGLONG old = *oldval;
+ *oldval = InterlockedCompareExchange64(dest, newval, *oldval);
+ return old == *oldval;
+}
+inline bool CompareAndSwapPtr(void *volatile *dest, void *newval, void **oldval)
+{
+ void *old = *oldval;
+ *oldval = InterlockedCompareExchangePointer(dest, newval, *oldval);
+ return old == *oldval;
+}
+
+#define WRAP_ADDSUB(T, _func, _ptr, _amnt) _func((T volatile*)(_ptr), (_amnt))
+#define WRAP_XCHG(T, _func, _ptr, _newval) _func((T volatile*)(_ptr), (_newval))
+#define WRAP_CMPXCHG(T, _func, _ptr, _newval, _oldval) _func((T volatile*)(_ptr), (_newval), (T*)(_oldval))
+
+
+enum almemory_order {
+ almemory_order_relaxed,
+ almemory_order_consume,
+ almemory_order_acquire,
+ almemory_order_release,
+ almemory_order_acq_rel,
+ almemory_order_seq_cst
+};
+
+#define ATOMIC(T) struct { T volatile value; }
+
+#define ATOMIC_INIT(_val, _newval) do { (_val)->value = (_newval); } while(0)
+#define ATOMIC_INIT_STATIC(_newval) {(_newval)}
+
+#define ATOMIC_LOAD(_val, _MO) ((_val)->value)
+#define ATOMIC_STORE(_val, _newval, _MO) do { \
+ (_val)->value = (_newval); \
+} while(0)
+
+int _al_invalid_atomic_size(); /* not defined */
+
+#define ATOMIC_ADD(_val, _incr, _MO) \
+ ((sizeof((_val)->value)==4) ? WRAP_ADDSUB(LONG, AtomicAdd32, &(_val)->value, (_incr)) : \
+ (sizeof((_val)->value)==8) ? WRAP_ADDSUB(LONGLONG, AtomicAdd64, &(_val)->value, (_incr)) : \
+ _al_invalid_atomic_size())
+#define ATOMIC_SUB(_val, _decr, _MO) \
+ ((sizeof((_val)->value)==4) ? WRAP_ADDSUB(LONG, AtomicSub32, &(_val)->value, (_decr)) : \
+ (sizeof((_val)->value)==8) ? WRAP_ADDSUB(LONGLONG, AtomicSub64, &(_val)->value, (_decr)) : \
+ _al_invalid_atomic_size())
+
+#define ATOMIC_EXCHANGE(_val, _newval, _MO) \
+ ((sizeof((_val)->value)==4) ? WRAP_XCHG(LONG, AtomicSwap32, &(_val)->value, (_newval)) : \
+ (sizeof((_val)->value)==8) ? WRAP_XCHG(LONGLONG, AtomicSwap64, &(_val)->value, (_newval)) : \
+ (LONG)_al_invalid_atomic_size())
+#define ATOMIC_COMPARE_EXCHANGE_STRONG(_val, _oldval, _newval, _MO1, _MO2) \
+ ((sizeof((_val)->value)==4) ? WRAP_CMPXCHG(LONG, CompareAndSwap32, &(_val)->value, (_newval), (_oldval)) : \
+ (sizeof((_val)->value)==8) ? WRAP_CMPXCHG(LONGLONG, CompareAndSwap64, &(_val)->value, (_newval), (_oldval)) : \
+ (bool)_al_invalid_atomic_size())
+
+#define ATOMIC_EXCHANGE_PTR(_val, _newval, _MO) \
+ ((sizeof((_val)->value)==sizeof(void*)) ? AtomicSwapPtr((void*volatile*)&(_val)->value, (_newval)) : \
+ (void*)_al_invalid_atomic_size())
+#define ATOMIC_COMPARE_EXCHANGE_PTR_STRONG(_val, _oldval, _newval, _MO1, _MO2)\
+ ((sizeof((_val)->value)==sizeof(void*)) ? CompareAndSwapPtr((void*volatile*)&(_val)->value, (_newval), (void**)(_oldval)) : \
+ (bool)_al_invalid_atomic_size())
+
+#define ATOMIC_THREAD_FENCE(order) do { \
+ enum { must_be_constant = (order) }; \
+ const int _o = must_be_constant; \
+ if(_o > almemory_order_relaxed) \
+ _ReadWriteBarrier(); \
+} while(0)
+
+#else
+
+#error "No atomic functions available on this platform!"
+
+#define ATOMIC(T) T
+
+#define ATOMIC_INIT(_val, _newval) ((void)0)
+#define ATOMIC_INIT_STATIC(_newval) (0)
+
+#define ATOMIC_LOAD(...) (0)
+#define ATOMIC_STORE(...) ((void)0)
+
+#define ATOMIC_ADD(...) (0)
+#define ATOMIC_SUB(...) (0)
+
+#define ATOMIC_EXCHANGE(...) (0)
+#define ATOMIC_COMPARE_EXCHANGE_STRONG(...) (0)
+
+#define ATOMIC_THREAD_FENCE(...) ((void)0)
+#endif
+
+/* If no PTR xchg variants are provided, the normal ones can handle it. */
+#ifndef ATOMIC_EXCHANGE_PTR
+#define ATOMIC_EXCHANGE_PTR ATOMIC_EXCHANGE
+#define ATOMIC_COMPARE_EXCHANGE_PTR_STRONG ATOMIC_COMPARE_EXCHANGE_STRONG
+#define ATOMIC_COMPARE_EXCHANGE_PTR_WEAK ATOMIC_COMPARE_EXCHANGE_WEAK
+#endif
+
+/* If no weak cmpxchg is provided (not all systems will have one), substitute a
+ * strong cmpxchg. */
+#ifndef ATOMIC_COMPARE_EXCHANGE_WEAK
+#define ATOMIC_COMPARE_EXCHANGE_WEAK ATOMIC_COMPARE_EXCHANGE_STRONG
+#endif
+#ifndef ATOMIC_COMPARE_EXCHANGE_PTR_WEAK
+#define ATOMIC_COMPARE_EXCHANGE_PTR_WEAK ATOMIC_COMPARE_EXCHANGE_PTR_STRONG
+#endif
+
+/* If no ATOMIC_FLAG is defined, simulate one with an atomic int using exchange
+ * and store ops.
+ */
+#ifndef ATOMIC_FLAG
+#define ATOMIC_FLAG ATOMIC(int)
+#define ATOMIC_FLAG_INIT ATOMIC_INIT_STATIC(0)
+#define ATOMIC_FLAG_TEST_AND_SET(_val, _MO) ATOMIC_EXCHANGE(_val, 1, _MO)
+#define ATOMIC_FLAG_CLEAR(_val, _MO) ATOMIC_STORE(_val, 0, _MO)
+#endif
+
+
+#define ATOMIC_LOAD_SEQ(_val) ATOMIC_LOAD(_val, almemory_order_seq_cst)
+#define ATOMIC_STORE_SEQ(_val, _newval) ATOMIC_STORE(_val, _newval, almemory_order_seq_cst)
+
+#define ATOMIC_ADD_SEQ(_val, _incr) ATOMIC_ADD(_val, _incr, almemory_order_seq_cst)
+#define ATOMIC_SUB_SEQ(_val, _decr) ATOMIC_SUB(_val, _decr, almemory_order_seq_cst)
+
+#define ATOMIC_EXCHANGE_SEQ(_val, _newval) ATOMIC_EXCHANGE(_val, _newval, almemory_order_seq_cst)
+#define ATOMIC_COMPARE_EXCHANGE_STRONG_SEQ(_val, _oldval, _newval) \
+ ATOMIC_COMPARE_EXCHANGE_STRONG(_val, _oldval, _newval, almemory_order_seq_cst, almemory_order_seq_cst)
+#define ATOMIC_COMPARE_EXCHANGE_WEAK_SEQ(_val, _oldval, _newval) \
+ ATOMIC_COMPARE_EXCHANGE_WEAK(_val, _oldval, _newval, almemory_order_seq_cst, almemory_order_seq_cst)
+
+#define ATOMIC_EXCHANGE_PTR_SEQ(_val, _newval) ATOMIC_EXCHANGE_PTR(_val, _newval, almemory_order_seq_cst)
+#define ATOMIC_COMPARE_EXCHANGE_PTR_STRONG_SEQ(_val, _oldval, _newval) \
+ ATOMIC_COMPARE_EXCHANGE_PTR_STRONG(_val, _oldval, _newval, almemory_order_seq_cst, almemory_order_seq_cst)
+#define ATOMIC_COMPARE_EXCHANGE_PTR_WEAK_SEQ(_val, _oldval, _newval) \
+ ATOMIC_COMPARE_EXCHANGE_PTR_WEAK(_val, _oldval, _newval, almemory_order_seq_cst, almemory_order_seq_cst)
+
+
+typedef unsigned int uint;
+typedef ATOMIC(uint) RefCount;
+
+inline void InitRef(RefCount *ptr, uint value)
+{ ATOMIC_INIT(ptr, value); }
+inline uint ReadRef(RefCount *ptr)
+{ return ATOMIC_LOAD_SEQ(ptr); }
+inline uint IncrementRef(RefCount *ptr)
+{ return ATOMIC_ADD_SEQ(ptr, 1)+1; }
+inline uint DecrementRef(RefCount *ptr)
+{ return ATOMIC_SUB_SEQ(ptr, 1)-1; }
+
+
+/* WARNING: A livelock is theoretically possible if another thread keeps
+ * changing the head without giving this a chance to actually swap in the new
+ * one (practically impossible with this little code, but...).
+ */
+#define ATOMIC_REPLACE_HEAD(T, _head, _entry) do { \
+ T _first = ATOMIC_LOAD(_head, almemory_order_acquire); \
+ do { \
+ ATOMIC_STORE(&(_entry)->next, _first, almemory_order_relaxed); \
+ } while(ATOMIC_COMPARE_EXCHANGE_PTR_WEAK(_head, &_first, _entry, \
+ almemory_order_acq_rel, almemory_order_acquire) == 0); \
+} while(0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AL_ATOMIC_H */
diff --git a/common/bool.h b/common/bool.h
new file mode 100644
index 00000000..6f714d09
--- /dev/null
+++ b/common/bool.h
@@ -0,0 +1,18 @@
+#ifndef AL_BOOL_H
+#define AL_BOOL_H
+
+#ifdef HAVE_STDBOOL_H
+#include <stdbool.h>
+#endif
+
+#ifndef bool
+#ifdef HAVE_C99_BOOL
+#define bool _Bool
+#else
+#define bool int
+#endif
+#define false 0
+#define true 1
+#endif
+
+#endif /* AL_BOOL_H */
diff --git a/common/math_defs.h b/common/math_defs.h
new file mode 100644
index 00000000..149cf80b
--- /dev/null
+++ b/common/math_defs.h
@@ -0,0 +1,19 @@
+#ifndef AL_MATH_DEFS_H
+#define AL_MATH_DEFS_H
+
+#ifdef HAVE_FLOAT_H
+#include <float.h>
+#endif
+
+#define F_PI (3.14159265358979323846f)
+#define F_PI_2 (1.57079632679489661923f)
+#define F_TAU (6.28318530717958647692f)
+
+#ifndef FLT_EPSILON
+#define FLT_EPSILON (1.19209290e-07f)
+#endif
+
+#define DEG2RAD(x) ((ALfloat)(x) * (F_PI/180.0f))
+#define RAD2DEG(x) ((ALfloat)(x) * (180.0f/F_PI))
+
+#endif /* AL_MATH_DEFS_H */
diff --git a/common/rwlock.h b/common/rwlock.h
new file mode 100644
index 00000000..8e36fa1a
--- /dev/null
+++ b/common/rwlock.h
@@ -0,0 +1,31 @@
+#ifndef AL_RWLOCK_H
+#define AL_RWLOCK_H
+
+#include "bool.h"
+#include "atomic.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ RefCount read_count;
+ RefCount write_count;
+ ATOMIC_FLAG read_lock;
+ ATOMIC_FLAG read_entry_lock;
+ ATOMIC_FLAG write_lock;
+} RWLock;
+#define RWLOCK_STATIC_INITIALIZE { ATOMIC_INIT_STATIC(0), ATOMIC_INIT_STATIC(0), \
+ ATOMIC_FLAG_INIT, ATOMIC_FLAG_INIT, ATOMIC_FLAG_INIT }
+
+void RWLockInit(RWLock *lock);
+void ReadLock(RWLock *lock);
+void ReadUnlock(RWLock *lock);
+void WriteLock(RWLock *lock);
+void WriteUnlock(RWLock *lock);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AL_RWLOCK_H */
diff --git a/common/static_assert.h b/common/static_assert.h
new file mode 100644
index 00000000..bf0ce065
--- /dev/null
+++ b/common/static_assert.h
@@ -0,0 +1,21 @@
+#ifndef AL_STATIC_ASSERT_H
+#define AL_STATIC_ASSERT_H
+
+#include <assert.h>
+
+
+#ifndef static_assert
+#ifdef HAVE_C11_STATIC_ASSERT
+#define static_assert _Static_assert
+#else
+#define CTASTR2(_pre,_post) _pre##_post
+#define CTASTR(_pre,_post) CTASTR2(_pre,_post)
+#if defined(__COUNTER__)
+#define static_assert(_cond, _msg) typedef struct { int CTASTR(static_assert_failed_at_line_,__LINE__) : !!(_cond); } CTASTR(static_assertion_,__COUNTER__)
+#else
+#define static_assert(_cond, _msg) struct { int CTASTR(static_assert_failed_at_line_,__LINE__) : !!(_cond); }
+#endif
+#endif
+#endif
+
+#endif /* AL_STATIC_ASSERT_H */
diff --git a/common/threads.h b/common/threads.h
new file mode 100644
index 00000000..c2848ee7
--- /dev/null
+++ b/common/threads.h
@@ -0,0 +1,237 @@
+#ifndef AL_THREADS_H
+#define AL_THREADS_H
+
+#include <time.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+ althrd_success = 0,
+ althrd_error,
+ althrd_nomem,
+ althrd_timedout,
+ althrd_busy
+};
+
+enum {
+ almtx_plain = 0,
+ almtx_recursive = 1,
+ almtx_timed = 2
+};
+
+typedef int (*althrd_start_t)(void*);
+typedef void (*altss_dtor_t)(void*);
+
+
+#define AL_TIME_UTC 1
+
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+
+#ifndef HAVE_STRUCT_TIMESPEC
+struct timespec {
+ time_t tv_sec;
+ long tv_nsec;
+};
+#endif
+
+typedef DWORD althrd_t;
+typedef CRITICAL_SECTION almtx_t;
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0600
+typedef CONDITION_VARIABLE alcnd_t;
+#else
+typedef struct { void *Ptr; } alcnd_t;
+#endif
+typedef DWORD altss_t;
+typedef LONG alonce_flag;
+
+#define AL_ONCE_FLAG_INIT 0
+
+int althrd_sleep(const struct timespec *ts, struct timespec *rem);
+void alcall_once(alonce_flag *once, void (*callback)(void));
+
+
+inline althrd_t althrd_current(void)
+{
+ return GetCurrentThreadId();
+}
+
+inline int althrd_equal(althrd_t thr0, althrd_t thr1)
+{
+ return thr0 == thr1;
+}
+
+inline void althrd_exit(int res)
+{
+ ExitThread(res);
+}
+
+inline void althrd_yield(void)
+{
+ SwitchToThread();
+}
+
+
+inline int almtx_lock(almtx_t *mtx)
+{
+ if(!mtx) return althrd_error;
+ EnterCriticalSection(mtx);
+ return althrd_success;
+}
+
+inline int almtx_unlock(almtx_t *mtx)
+{
+ if(!mtx) return althrd_error;
+ LeaveCriticalSection(mtx);
+ return althrd_success;
+}
+
+inline int almtx_trylock(almtx_t *mtx)
+{
+ if(!mtx) return althrd_error;
+ if(!TryEnterCriticalSection(mtx))
+ return althrd_busy;
+ return althrd_success;
+}
+
+
+inline void *altss_get(altss_t tss_id)
+{
+ return TlsGetValue(tss_id);
+}
+
+inline int altss_set(altss_t tss_id, void *val)
+{
+ if(TlsSetValue(tss_id, val) == 0)
+ return althrd_error;
+ return althrd_success;
+}
+
+#else
+
+#include <stdint.h>
+#include <errno.h>
+#include <pthread.h>
+
+
+typedef pthread_t althrd_t;
+typedef pthread_mutex_t almtx_t;
+typedef pthread_cond_t alcnd_t;
+typedef pthread_key_t altss_t;
+typedef pthread_once_t alonce_flag;
+
+#define AL_ONCE_FLAG_INIT PTHREAD_ONCE_INIT
+
+
+inline althrd_t althrd_current(void)
+{
+ return pthread_self();
+}
+
+inline int althrd_equal(althrd_t thr0, althrd_t thr1)
+{
+ return pthread_equal(thr0, thr1);
+}
+
+inline void althrd_exit(int res)
+{
+ pthread_exit((void*)(intptr_t)res);
+}
+
+inline void althrd_yield(void)
+{
+ sched_yield();
+}
+
+inline int althrd_sleep(const struct timespec *ts, struct timespec *rem)
+{
+ int ret = nanosleep(ts, rem);
+ if(ret != 0)
+ {
+ ret = ((errno==EINTR) ? -1 : -2);
+ errno = 0;
+ }
+ return ret;
+}
+
+
+inline int almtx_lock(almtx_t *mtx)
+{
+ if(pthread_mutex_lock(mtx) != 0)
+ return althrd_error;
+ return althrd_success;
+}
+
+inline int almtx_unlock(almtx_t *mtx)
+{
+ if(pthread_mutex_unlock(mtx) != 0)
+ return althrd_error;
+ return althrd_success;
+}
+
+inline int almtx_trylock(almtx_t *mtx)
+{
+ int ret = pthread_mutex_trylock(mtx);
+ switch(ret)
+ {
+ case 0: return althrd_success;
+ case EBUSY: return althrd_busy;
+ }
+ return althrd_error;
+}
+
+
+inline void *altss_get(altss_t tss_id)
+{
+ return pthread_getspecific(tss_id);
+}
+
+inline int altss_set(altss_t tss_id, void *val)
+{
+ if(pthread_setspecific(tss_id, val) != 0)
+ return althrd_error;
+ return althrd_success;
+}
+
+
+inline void alcall_once(alonce_flag *once, void (*callback)(void))
+{
+ pthread_once(once, callback);
+}
+
+#endif
+
+
+int althrd_create(althrd_t *thr, althrd_start_t func, void *arg);
+int althrd_detach(althrd_t thr);
+int althrd_join(althrd_t thr, int *res);
+void althrd_setname(althrd_t thr, const char *name);
+
+int almtx_init(almtx_t *mtx, int type);
+void almtx_destroy(almtx_t *mtx);
+int almtx_timedlock(almtx_t *mtx, const struct timespec *ts);
+
+int alcnd_init(alcnd_t *cond);
+int alcnd_signal(alcnd_t *cond);
+int alcnd_broadcast(alcnd_t *cond);
+int alcnd_wait(alcnd_t *cond, almtx_t *mtx);
+int alcnd_timedwait(alcnd_t *cond, almtx_t *mtx, const struct timespec *time_point);
+void alcnd_destroy(alcnd_t *cond);
+
+int altss_create(altss_t *tss_id, altss_dtor_t callback);
+void altss_delete(altss_t tss_id);
+
+int altimespec_get(struct timespec *ts, int base);
+
+void al_nssleep(unsigned long nsec);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AL_THREADS_H */
diff --git a/common/uintmap.h b/common/uintmap.h
new file mode 100644
index 00000000..f70d99fd
--- /dev/null
+++ b/common/uintmap.h
@@ -0,0 +1,46 @@
+#ifndef AL_UINTMAP_H
+#define AL_UINTMAP_H
+
+#include "AL/al.h"
+#include "rwlock.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct UIntMap {
+ ALuint *keys;
+ /* Shares memory with keys. */
+ ALvoid **values;
+
+ ALsizei size;
+ ALsizei capacity;
+ ALsizei limit;
+ RWLock lock;
+} UIntMap;
+#define UINTMAP_STATIC_INITIALIZE_N(_n) { NULL, NULL, 0, 0, (_n), RWLOCK_STATIC_INITIALIZE }
+#define UINTMAP_STATIC_INITIALIZE UINTMAP_STATIC_INITIALIZE_N(~0)
+
+void InitUIntMap(UIntMap *map, ALsizei limit);
+void ResetUIntMap(UIntMap *map);
+ALenum InsertUIntMapEntry(UIntMap *map, ALuint key, ALvoid *value);
+ALenum InsertUIntMapEntryNoLock(UIntMap *map, ALuint key, ALvoid *value);
+ALvoid *RemoveUIntMapKey(UIntMap *map, ALuint key);
+ALvoid *RemoveUIntMapKeyNoLock(UIntMap *map, ALuint key);
+ALvoid *LookupUIntMapKey(UIntMap *map, ALuint key);
+ALvoid *LookupUIntMapKeyNoLock(UIntMap *map, ALuint key);
+
+inline void LockUIntMapRead(UIntMap *map)
+{ ReadLock(&map->lock); }
+inline void UnlockUIntMapRead(UIntMap *map)
+{ ReadUnlock(&map->lock); }
+inline void LockUIntMapWrite(UIntMap *map)
+{ WriteLock(&map->lock); }
+inline void UnlockUIntMapWrite(UIntMap *map)
+{ WriteUnlock(&map->lock); }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AL_UINTMAP_H */