1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
|
#ifndef AL_ATOMIC_H
#define AL_ATOMIC_H
#include "static_assert.h"
#include "bool.h"
#ifdef __GNUC__
/* This helps cast away the const-ness of a pointer without accidentally
* changing the pointer type. This is necessary due to Clang's inability to use
* atomic_load on a const _Atomic variable.
*/
#define CONST_CAST(T, V) __extension__({ \
const T _tmp = (V); \
(T)_tmp; \
})
#else
#define CONST_CAST(T, V) ((T)(V))
#endif
#ifdef HAVE_C11_ATOMIC
#ifdef __cplusplus
/* C++11 doesn't support compatibility with C11 atomics. So instead, make C++11
* atomic declarations global to emulate C11. There's no standard guarantee of
* ABI compatibility, but it's desired behavior that mostly works. See:
*
* http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0943r1.html
*
* Any alignment issues arising from this can be fixed with explicit alignas()
* specifiers on affected variables.
*
* Only do this when C11 atomics are supported in C, since MSVC and pre-C11
* compilers may use something else that is significantly different in C
* despite supporting C++11 atomics.
*/
#include <atomic>
#define _Atomic(T) std::atomic<T>
using std::memory_order;
using std::memory_order_relaxed;
using std::memory_order_consume;
using std::memory_order_acquire;
using std::memory_order_release;
using std::memory_order_acq_rel;
using std::memory_order_seq_cst;
using std::atomic_init;
using std::atomic_load_explicit;
using std::atomic_store_explicit;
using std::atomic_fetch_add_explicit;
using std::atomic_fetch_sub_explicit;
using std::atomic_exchange_explicit;
using std::atomic_compare_exchange_strong_explicit;
using std::atomic_compare_exchange_weak_explicit;
using std::atomic_thread_fence;
#else
#include <stdatomic.h>
#endif /* __cplusplus */
#endif /* HAVE_C11_ATOMIC */
#ifdef __cplusplus
extern "C" {
#endif
/* Atomics using C11 */
#ifdef HAVE_C11_ATOMIC
#define almemory_order memory_order
#define almemory_order_relaxed memory_order_relaxed
#define almemory_order_consume memory_order_consume
#define almemory_order_acquire memory_order_acquire
#define almemory_order_release memory_order_release
#define almemory_order_acq_rel memory_order_acq_rel
#define almemory_order_seq_cst memory_order_seq_cst
#define ATOMIC(T) _Atomic(T)
#define ATOMIC_INIT atomic_init
#define ATOMIC_INIT_STATIC ATOMIC_VAR_INIT
#define ATOMIC_LOAD atomic_load_explicit
#define ATOMIC_STORE atomic_store_explicit
#define ATOMIC_ADD atomic_fetch_add_explicit
#define ATOMIC_SUB atomic_fetch_sub_explicit
#define ATOMIC_EXCHANGE atomic_exchange_explicit
#define ATOMIC_COMPARE_EXCHANGE_STRONG atomic_compare_exchange_strong_explicit
#define ATOMIC_COMPARE_EXCHANGE_WEAK atomic_compare_exchange_weak_explicit
#define ATOMIC_THREAD_FENCE atomic_thread_fence
/* Atomics using GCC intrinsics */
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) && !defined(__QNXNTO__)
enum almemory_order {
almemory_order_relaxed,
almemory_order_consume,
almemory_order_acquire,
almemory_order_release,
almemory_order_acq_rel,
almemory_order_seq_cst
};
#define ATOMIC(T) struct { T volatile value; }
#define ATOMIC_INIT(_val, _newval) do { (_val)->value = (_newval); } while(0)
#define ATOMIC_INIT_STATIC(_newval) {(_newval)}
#define ATOMIC_LOAD(_val, _MO) __extension__({ \
__typeof((_val)->value) _r = (_val)->value; \
__asm__ __volatile__("" ::: "memory"); \
_r; \
})
#define ATOMIC_STORE(_val, _newval, _MO) do { \
__asm__ __volatile__("" ::: "memory"); \
(_val)->value = (_newval); \
} while(0)
#define ATOMIC_ADD(_val, _incr, _MO) __sync_fetch_and_add(&(_val)->value, (_incr))
#define ATOMIC_SUB(_val, _decr, _MO) __sync_fetch_and_sub(&(_val)->value, (_decr))
#define ATOMIC_EXCHANGE(_val, _newval, _MO) __extension__({ \
__asm__ __volatile__("" ::: "memory"); \
__sync_lock_test_and_set(&(_val)->value, (_newval)); \
})
#define ATOMIC_COMPARE_EXCHANGE_STRONG(_val, _oldval, _newval, _MO1, _MO2) __extension__({ \
__typeof(*(_oldval)) _o = *(_oldval); \
*(_oldval) = __sync_val_compare_and_swap(&(_val)->value, _o, (_newval)); \
*(_oldval) == _o; \
})
#define ATOMIC_THREAD_FENCE(order) do { \
enum { must_be_constant = (order) }; \
const int _o = must_be_constant; \
if(_o > almemory_order_relaxed) \
__asm__ __volatile__("" ::: "memory"); \
} while(0)
/* Atomics using x86/x86-64 GCC inline assembly */
#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
#define WRAP_ADD(S, ret, dest, incr) __asm__ __volatile__( \
"lock; xadd" S " %0,(%1)" \
: "=r" (ret) \
: "r" (dest), "0" (incr) \
: "memory" \
)
#define WRAP_SUB(S, ret, dest, decr) __asm__ __volatile__( \
"lock; xadd" S " %0,(%1)" \
: "=r" (ret) \
: "r" (dest), "0" (-(decr)) \
: "memory" \
)
#define WRAP_XCHG(S, ret, dest, newval) __asm__ __volatile__( \
"lock; xchg" S " %0,(%1)" \
: "=r" (ret) \
: "r" (dest), "0" (newval) \
: "memory" \
)
#define WRAP_CMPXCHG(S, ret, dest, oldval, newval) __asm__ __volatile__( \
"lock; cmpxchg" S " %2,(%1)" \
: "=a" (ret) \
: "r" (dest), "r" (newval), "0" (oldval) \
: "memory" \
)
enum almemory_order {
almemory_order_relaxed,
almemory_order_consume,
almemory_order_acquire,
almemory_order_release,
almemory_order_acq_rel,
almemory_order_seq_cst
};
#define ATOMIC(T) struct { T volatile value; }
#define ATOMIC_INIT(_val, _newval) do { (_val)->value = (_newval); } while(0)
#define ATOMIC_INIT_STATIC(_newval) {(_newval)}
#define ATOMIC_LOAD(_val, _MO) __extension__({ \
__typeof((_val)->value) _r = (_val)->value; \
__asm__ __volatile__("" ::: "memory"); \
_r; \
})
#define ATOMIC_STORE(_val, _newval, _MO) do { \
__asm__ __volatile__("" ::: "memory"); \
(_val)->value = (_newval); \
} while(0)
#define ATOMIC_ADD(_val, _incr, _MO) __extension__({ \
static_assert(sizeof((_val)->value)==4 || sizeof((_val)->value)==8, "Unsupported size!"); \
__typeof((_val)->value) _r; \
if(sizeof((_val)->value) == 4) WRAP_ADD("l", _r, &(_val)->value, _incr); \
else if(sizeof((_val)->value) == 8) WRAP_ADD("q", _r, &(_val)->value, _incr); \
_r; \
})
#define ATOMIC_SUB(_val, _decr, _MO) __extension__({ \
static_assert(sizeof((_val)->value)==4 || sizeof((_val)->value)==8, "Unsupported size!"); \
__typeof((_val)->value) _r; \
if(sizeof((_val)->value) == 4) WRAP_SUB("l", _r, &(_val)->value, _decr); \
else if(sizeof((_val)->value) == 8) WRAP_SUB("q", _r, &(_val)->value, _decr); \
_r; \
})
#define ATOMIC_EXCHANGE(_val, _newval, _MO) __extension__({ \
__typeof((_val)->value) _r; \
if(sizeof((_val)->value) == 4) WRAP_XCHG("l", _r, &(_val)->value, (_newval)); \
else if(sizeof((_val)->value) == 8) WRAP_XCHG("q", _r, &(_val)->value, (_newval)); \
_r; \
})
#define ATOMIC_COMPARE_EXCHANGE_STRONG(_val, _oldval, _newval, _MO1, _MO2) __extension__({ \
__typeof(*(_oldval)) _old = *(_oldval); \
if(sizeof((_val)->value) == 4) WRAP_CMPXCHG("l", *(_oldval), &(_val)->value, _old, (_newval)); \
else if(sizeof((_val)->value) == 8) WRAP_CMPXCHG("q", *(_oldval), &(_val)->value, _old, (_newval)); \
*(_oldval) == _old; \
})
#define ATOMIC_EXCHANGE_PTR(_val, _newval, _MO) __extension__({ \
void *_r; \
if(sizeof(void*) == 4) WRAP_XCHG("l", _r, &(_val)->value, (_newval)); \
else if(sizeof(void*) == 8) WRAP_XCHG("q", _r, &(_val)->value, (_newval));\
_r; \
})
#define ATOMIC_COMPARE_EXCHANGE_PTR_STRONG(_val, _oldval, _newval, _MO1, _MO2) __extension__({ \
void *_old = *(_oldval); \
if(sizeof(void*) == 4) WRAP_CMPXCHG("l", *(_oldval), &(_val)->value, _old, (_newval)); \
else if(sizeof(void*) == 8) WRAP_CMPXCHG("q", *(_oldval), &(_val)->value, _old, (_newval)); \
*(_oldval) == _old; \
})
#define ATOMIC_THREAD_FENCE(order) do { \
enum { must_be_constant = (order) }; \
const int _o = must_be_constant; \
if(_o > almemory_order_relaxed) \
__asm__ __volatile__("" ::: "memory"); \
} while(0)
/* Atomics using Windows methods */
#elif defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
/* NOTE: This mess is *extremely* touchy. It lacks quite a bit of safety
* checking due to the lack of multi-statement expressions, typeof(), and C99
* compound literals. It is incapable of properly exchanging floats, which get
* casted to LONG/int, and could cast away potential warnings.
*
* Unfortunately, it's the only semi-safe way that doesn't rely on C99 (because
* MSVC).
*/
inline LONG AtomicAdd32(volatile LONG *dest, LONG incr)
{
return InterlockedExchangeAdd(dest, incr);
}
inline LONGLONG AtomicAdd64(volatile LONGLONG *dest, LONGLONG incr)
{
return InterlockedExchangeAdd64(dest, incr);
}
inline LONG AtomicSub32(volatile LONG *dest, LONG decr)
{
return InterlockedExchangeAdd(dest, -decr);
}
inline LONGLONG AtomicSub64(volatile LONGLONG *dest, LONGLONG decr)
{
return InterlockedExchangeAdd64(dest, -decr);
}
inline LONG AtomicSwap32(volatile LONG *dest, LONG newval)
{
return InterlockedExchange(dest, newval);
}
inline LONGLONG AtomicSwap64(volatile LONGLONG *dest, LONGLONG newval)
{
return InterlockedExchange64(dest, newval);
}
inline void *AtomicSwapPtr(void *volatile *dest, void *newval)
{
return InterlockedExchangePointer(dest, newval);
}
inline bool CompareAndSwap32(volatile LONG *dest, LONG newval, LONG *oldval)
{
LONG old = *oldval;
*oldval = InterlockedCompareExchange(dest, newval, *oldval);
return old == *oldval;
}
inline bool CompareAndSwap64(volatile LONGLONG *dest, LONGLONG newval, LONGLONG *oldval)
{
LONGLONG old = *oldval;
*oldval = InterlockedCompareExchange64(dest, newval, *oldval);
return old == *oldval;
}
inline bool CompareAndSwapPtr(void *volatile *dest, void *newval, void **oldval)
{
void *old = *oldval;
*oldval = InterlockedCompareExchangePointer(dest, newval, *oldval);
return old == *oldval;
}
#define WRAP_ADDSUB(T, _func, _ptr, _amnt) _func((T volatile*)(_ptr), (_amnt))
#define WRAP_XCHG(T, _func, _ptr, _newval) _func((T volatile*)(_ptr), (_newval))
#define WRAP_CMPXCHG(T, _func, _ptr, _newval, _oldval) _func((T volatile*)(_ptr), (_newval), (T*)(_oldval))
enum almemory_order {
almemory_order_relaxed,
almemory_order_consume,
almemory_order_acquire,
almemory_order_release,
almemory_order_acq_rel,
almemory_order_seq_cst
};
#define ATOMIC(T) struct { T volatile value; }
#define ATOMIC_INIT(_val, _newval) do { (_val)->value = (_newval); } while(0)
#define ATOMIC_INIT_STATIC(_newval) {(_newval)}
#define ATOMIC_LOAD(_val, _MO) ((_val)->value)
#define ATOMIC_STORE(_val, _newval, _MO) do { \
(_val)->value = (_newval); \
} while(0)
int _al_invalid_atomic_size(); /* not defined */
void *_al_invalid_atomic_ptr_size(); /* not defined */
#define ATOMIC_ADD(_val, _incr, _MO) \
((sizeof((_val)->value)==4) ? WRAP_ADDSUB(LONG, AtomicAdd32, &(_val)->value, (_incr)) : \
(sizeof((_val)->value)==8) ? WRAP_ADDSUB(LONGLONG, AtomicAdd64, &(_val)->value, (_incr)) : \
_al_invalid_atomic_size())
#define ATOMIC_SUB(_val, _decr, _MO) \
((sizeof((_val)->value)==4) ? WRAP_ADDSUB(LONG, AtomicSub32, &(_val)->value, (_decr)) : \
(sizeof((_val)->value)==8) ? WRAP_ADDSUB(LONGLONG, AtomicSub64, &(_val)->value, (_decr)) : \
_al_invalid_atomic_size())
#define ATOMIC_EXCHANGE(_val, _newval, _MO) \
((sizeof((_val)->value)==4) ? WRAP_XCHG(LONG, AtomicSwap32, &(_val)->value, (_newval)) : \
(sizeof((_val)->value)==8) ? WRAP_XCHG(LONGLONG, AtomicSwap64, &(_val)->value, (_newval)) : \
(LONG)_al_invalid_atomic_size())
#define ATOMIC_COMPARE_EXCHANGE_STRONG(_val, _oldval, _newval, _MO1, _MO2) \
((sizeof((_val)->value)==4) ? WRAP_CMPXCHG(LONG, CompareAndSwap32, &(_val)->value, (_newval), (_oldval)) : \
(sizeof((_val)->value)==8) ? WRAP_CMPXCHG(LONGLONG, CompareAndSwap64, &(_val)->value, (_newval), (_oldval)) : \
(bool)_al_invalid_atomic_size())
#define ATOMIC_EXCHANGE_PTR(_val, _newval, _MO) \
((sizeof((_val)->value)==sizeof(void*)) ? AtomicSwapPtr((void*volatile*)&(_val)->value, (_newval)) : \
_al_invalid_atomic_ptr_size())
#define ATOMIC_COMPARE_EXCHANGE_PTR_STRONG(_val, _oldval, _newval, _MO1, _MO2)\
((sizeof((_val)->value)==sizeof(void*)) ? CompareAndSwapPtr((void*volatile*)&(_val)->value, (_newval), (void**)(_oldval)) : \
(bool)_al_invalid_atomic_size())
#define ATOMIC_THREAD_FENCE(order) do { \
enum { must_be_constant = (order) }; \
const int _o = must_be_constant; \
if(_o > almemory_order_relaxed) \
_ReadWriteBarrier(); \
} while(0)
#else
#error "No atomic functions available on this platform!"
#define ATOMIC(T) T
#define ATOMIC_INIT(_val, _newval) ((void)0)
#define ATOMIC_INIT_STATIC(_newval) (0)
#define ATOMIC_LOAD(...) (0)
#define ATOMIC_STORE(...) ((void)0)
#define ATOMIC_ADD(...) (0)
#define ATOMIC_SUB(...) (0)
#define ATOMIC_EXCHANGE(...) (0)
#define ATOMIC_COMPARE_EXCHANGE_STRONG(...) (0)
#define ATOMIC_THREAD_FENCE(...) ((void)0)
#endif
/* If no PTR xchg variants are provided, the normal ones can handle it. */
#ifndef ATOMIC_EXCHANGE_PTR
#define ATOMIC_EXCHANGE_PTR ATOMIC_EXCHANGE
#define ATOMIC_COMPARE_EXCHANGE_PTR_STRONG ATOMIC_COMPARE_EXCHANGE_STRONG
#define ATOMIC_COMPARE_EXCHANGE_PTR_WEAK ATOMIC_COMPARE_EXCHANGE_WEAK
#endif
/* If no weak cmpxchg is provided (not all systems will have one), substitute a
* strong cmpxchg. */
#ifndef ATOMIC_COMPARE_EXCHANGE_WEAK
#define ATOMIC_COMPARE_EXCHANGE_WEAK ATOMIC_COMPARE_EXCHANGE_STRONG
#endif
#ifndef ATOMIC_COMPARE_EXCHANGE_PTR_WEAK
#define ATOMIC_COMPARE_EXCHANGE_PTR_WEAK ATOMIC_COMPARE_EXCHANGE_PTR_STRONG
#endif
#define ATOMIC_LOAD_SEQ(_val) ATOMIC_LOAD(_val, almemory_order_seq_cst)
#define ATOMIC_STORE_SEQ(_val, _newval) ATOMIC_STORE(_val, _newval, almemory_order_seq_cst)
#define ATOMIC_ADD_SEQ(_val, _incr) ATOMIC_ADD(_val, _incr, almemory_order_seq_cst)
#define ATOMIC_SUB_SEQ(_val, _decr) ATOMIC_SUB(_val, _decr, almemory_order_seq_cst)
#define ATOMIC_EXCHANGE_SEQ(_val, _newval) ATOMIC_EXCHANGE(_val, _newval, almemory_order_seq_cst)
#define ATOMIC_COMPARE_EXCHANGE_STRONG_SEQ(_val, _oldval, _newval) \
ATOMIC_COMPARE_EXCHANGE_STRONG(_val, _oldval, _newval, almemory_order_seq_cst, almemory_order_seq_cst)
#define ATOMIC_COMPARE_EXCHANGE_WEAK_SEQ(_val, _oldval, _newval) \
ATOMIC_COMPARE_EXCHANGE_WEAK(_val, _oldval, _newval, almemory_order_seq_cst, almemory_order_seq_cst)
#define ATOMIC_EXCHANGE_PTR_SEQ(_val, _newval) ATOMIC_EXCHANGE_PTR(_val, _newval, almemory_order_seq_cst)
#define ATOMIC_COMPARE_EXCHANGE_PTR_STRONG_SEQ(_val, _oldval, _newval) \
ATOMIC_COMPARE_EXCHANGE_PTR_STRONG(_val, _oldval, _newval, almemory_order_seq_cst, almemory_order_seq_cst)
#define ATOMIC_COMPARE_EXCHANGE_PTR_WEAK_SEQ(_val, _oldval, _newval) \
ATOMIC_COMPARE_EXCHANGE_PTR_WEAK(_val, _oldval, _newval, almemory_order_seq_cst, almemory_order_seq_cst)
typedef unsigned int uint;
typedef ATOMIC(uint) RefCount;
inline void InitRef(RefCount *ptr, uint value)
{ ATOMIC_INIT(ptr, value); }
inline uint ReadRef(RefCount *ptr)
{ return ATOMIC_LOAD(ptr, almemory_order_acquire); }
inline uint IncrementRef(RefCount *ptr)
{ return ATOMIC_ADD(ptr, 1u, almemory_order_acq_rel)+1; }
inline uint DecrementRef(RefCount *ptr)
{ return ATOMIC_SUB(ptr, 1u, almemory_order_acq_rel)-1; }
/* WARNING: A livelock is theoretically possible if another thread keeps
* changing the head without giving this a chance to actually swap in the new
* one (practically impossible with this little code, but...).
*/
#define ATOMIC_REPLACE_HEAD(T, _head, _entry) do { \
T _first = ATOMIC_LOAD(_head, almemory_order_acquire); \
do { \
ATOMIC_STORE(&(_entry)->next, _first, almemory_order_relaxed); \
} while(ATOMIC_COMPARE_EXCHANGE_PTR_WEAK(_head, &_first, _entry, \
almemory_order_acq_rel, almemory_order_acquire) == 0); \
} while(0)
#ifdef __cplusplus
}
#endif
#endif /* AL_ATOMIC_H */
|