aboutsummaryrefslogtreecommitdiffstats
path: root/Alc/alcRing.c
diff options
context:
space:
mode:
authorChris Robinson <[email protected]>2017-04-21 16:58:55 -0700
committerChris Robinson <[email protected]>2017-04-21 16:58:55 -0700
commita0a41921fc28a1ff76a5850936cb32e912887735 (patch)
tree423c01d929f955e4f12c8188036507d6b88c294d /Alc/alcRing.c
parentd85177cd3e687f19e080fde68642d1f7e080f129 (diff)
Remove const from _Atomic vars to make Clang happy
Clang does not allow using C11's atomic_load on const _Atomic variables. Previously it just disabled use of C11 atomics if atomic_load didn't work on a const _Atomic variable, but I think I'd prefer to have Clang use C11 atomics for the added features (more explicit memory ordering) even if it means a few instances of breaking const.
Diffstat (limited to 'Alc/alcRing.c')
-rw-r--r--Alc/alcRing.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/Alc/alcRing.c b/Alc/alcRing.c
index 2cb001bf..d72b34f1 100644
--- a/Alc/alcRing.c
+++ b/Alc/alcRing.c
@@ -103,16 +103,16 @@ void ll_ringbuffer_reset(ll_ringbuffer_t *rb)
* elements in front of the read pointer and behind the write pointer. */
size_t ll_ringbuffer_read_space(const ll_ringbuffer_t *rb)
{
- size_t w = ATOMIC_LOAD(&rb->write_ptr, almemory_order_acquire) & rb->size_mask;
- size_t r = ATOMIC_LOAD(&rb->read_ptr, almemory_order_acquire) & rb->size_mask;
+ size_t w = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->write_ptr, almemory_order_acquire);
+ size_t r = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->read_ptr, almemory_order_acquire);
return (w-r) & rb->size_mask;
}
/* Return the number of elements available for writing. This is the number of
* elements in front of the write pointer and behind the read pointer. */
size_t ll_ringbuffer_write_space(const ll_ringbuffer_t *rb)
{
- size_t w = ATOMIC_LOAD(&rb->write_ptr, almemory_order_acquire) & rb->size_mask;
- size_t r = ATOMIC_LOAD(&rb->read_ptr, almemory_order_acquire) & rb->size_mask;
+ size_t w = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->write_ptr, almemory_order_acquire);
+ size_t r = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->read_ptr, almemory_order_acquire);
return (r-w-1) & rb->size_mask;
}
@@ -256,8 +256,10 @@ void ll_ringbuffer_get_read_vector(const ll_ringbuffer_t *rb, ll_ringbuffer_data
size_t cnt2;
size_t w, r;
- w = ATOMIC_LOAD(&rb->write_ptr, almemory_order_acquire) & rb->size_mask;
- r = ATOMIC_LOAD(&rb->read_ptr, almemory_order_acquire) & rb->size_mask;
+ w = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->write_ptr, almemory_order_acquire);
+ r = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->read_ptr, almemory_order_acquire);
+ w &= rb->size_mask;
+ r &= rb->size_mask;
free_cnt = (w-r) & rb->size_mask;
cnt2 = r + free_cnt;
@@ -289,8 +291,10 @@ void ll_ringbuffer_get_write_vector(const ll_ringbuffer_t *rb, ll_ringbuffer_dat
size_t cnt2;
size_t w, r;
- w = ATOMIC_LOAD(&rb->write_ptr, almemory_order_acquire) & rb->size_mask;
- r = ATOMIC_LOAD(&rb->read_ptr, almemory_order_acquire) & rb->size_mask;
+ w = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->write_ptr, almemory_order_acquire);
+ r = ATOMIC_LOAD(&CONST_CAST(ll_ringbuffer_t*,rb)->read_ptr, almemory_order_acquire);
+ w &= rb->size_mask;
+ r &= rb->size_mask;
free_cnt = (r-w-1) & rb->size_mask;
cnt2 = w + free_cnt;