aboutsummaryrefslogtreecommitdiffstats
path: root/alc
diff options
context:
space:
mode:
Diffstat (limited to 'alc')
-rw-r--r--alc/alc.cpp4342
-rw-r--r--alc/alcmain.h534
-rw-r--r--alc/alconfig.cpp545
-rw-r--r--alc/alconfig.h20
-rw-r--r--alc/alcontext.h217
-rw-r--r--alc/alu.cpp1798
-rw-r--r--alc/alu.h466
-rw-r--r--alc/ambdec.cpp436
-rw-r--r--alc/ambdec.h48
-rw-r--r--alc/ambidefs.h119
-rw-r--r--alc/backends/alsa.cpp1288
-rw-r--r--alc/backends/alsa.h19
-rw-r--r--alc/backends/base.cpp58
-rw-r--r--alc/backends/base.h78
-rw-r--r--alc/backends/coreaudio.cpp709
-rw-r--r--alc/backends/coreaudio.h19
-rw-r--r--alc/backends/dsound.cpp938
-rw-r--r--alc/backends/dsound.h19
-rw-r--r--alc/backends/jack.cpp562
-rw-r--r--alc/backends/jack.h19
-rw-r--r--alc/backends/loopback.cpp80
-rw-r--r--alc/backends/loopback.h19
-rw-r--r--alc/backends/null.cpp184
-rw-r--r--alc/backends/null.h19
-rw-r--r--alc/backends/opensl.cpp936
-rw-r--r--alc/backends/opensl.h19
-rw-r--r--alc/backends/oss.cpp751
-rw-r--r--alc/backends/oss.h19
-rw-r--r--alc/backends/portaudio.cpp463
-rw-r--r--alc/backends/portaudio.h19
-rw-r--r--alc/backends/pulseaudio.cpp1532
-rw-r--r--alc/backends/pulseaudio.h19
-rw-r--r--alc/backends/qsa.cpp953
-rw-r--r--alc/backends/qsa.h19
-rw-r--r--alc/backends/sdl2.cpp227
-rw-r--r--alc/backends/sdl2.h19
-rw-r--r--alc/backends/sndio.cpp495
-rw-r--r--alc/backends/sndio.h19
-rw-r--r--alc/backends/solaris.cpp302
-rw-r--r--alc/backends/solaris.h19
-rw-r--r--alc/backends/wasapi.cpp1763
-rw-r--r--alc/backends/wasapi.h19
-rw-r--r--alc/backends/wave.cpp402
-rw-r--r--alc/backends/wave.h19
-rw-r--r--alc/backends/winmm.cpp640
-rw-r--r--alc/backends/winmm.h19
-rw-r--r--alc/bformatdec.cpp200
-rw-r--r--alc/bformatdec.h62
-rw-r--r--alc/bs2b.cpp188
-rw-r--r--alc/bs2b.h90
-rw-r--r--alc/compat.h121
-rw-r--r--alc/converter.cpp367
-rw-r--r--alc/converter.h70
-rw-r--r--alc/cpu_caps.h16
-rw-r--r--alc/effects/autowah.cpp298
-rw-r--r--alc/effects/base.h196
-rw-r--r--alc/effects/chorus.cpp538
-rw-r--r--alc/effects/compressor.cpp222
-rw-r--r--alc/effects/dedicated.cpp159
-rw-r--r--alc/effects/distortion.cpp269
-rw-r--r--alc/effects/echo.cpp271
-rw-r--r--alc/effects/equalizer.cpp337
-rw-r--r--alc/effects/fshifter.cpp301
-rw-r--r--alc/effects/modulator.cpp279
-rw-r--r--alc/effects/null.cpp164
-rw-r--r--alc/effects/pshifter.cpp405
-rw-r--r--alc/effects/reverb.cpp2102
-rw-r--r--alc/effects/vmorpher.cpp430
-rw-r--r--alc/filters/biquad.cpp127
-rw-r--r--alc/filters/biquad.h113
-rw-r--r--alc/filters/nfc.cpp391
-rw-r--r--alc/filters/nfc.h58
-rw-r--r--alc/filters/splitter.cpp115
-rw-r--r--alc/filters/splitter.h50
-rw-r--r--alc/fpu_modes.h25
-rw-r--r--alc/helpers.cpp851
-rw-r--r--alc/hrtf.cpp1400
-rw-r--r--alc/hrtf.h124
-rw-r--r--alc/inprogext.h92
-rw-r--r--alc/logging.h64
-rw-r--r--alc/mastering.cpp479
-rw-r--r--alc/mastering.h104
-rw-r--r--alc/mixer/defs.h59
-rw-r--r--alc/mixer/hrtfbase.h138
-rw-r--r--alc/mixer/mixer_c.cpp208
-rw-r--r--alc/mixer/mixer_neon.cpp307
-rw-r--r--alc/mixer/mixer_sse.cpp262
-rw-r--r--alc/mixer/mixer_sse2.cpp84
-rw-r--r--alc/mixer/mixer_sse3.cpp0
-rw-r--r--alc/mixer/mixer_sse41.cpp85
-rw-r--r--alc/mixvoice.cpp954
-rw-r--r--alc/panning.cpp964
-rw-r--r--alc/ringbuffer.cpp253
-rw-r--r--alc/ringbuffer.h99
-rw-r--r--alc/uhjfilter.cpp131
-rw-r--r--alc/uhjfilter.h54
-rw-r--r--alc/vector.h15
97 files changed, 35900 insertions, 0 deletions
diff --git a/alc/alc.cpp b/alc/alc.cpp
new file mode 100644
index 00000000..00f90d91
--- /dev/null
+++ b/alc/alc.cpp
@@ -0,0 +1,4342 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "version.h"
+
+#include <exception>
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <cctype>
+#include <chrono>
+#include <climits>
+#include <cmath>
+#include <csignal>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <new>
+#include <numeric>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/alext.h"
+#include "AL/efx.h"
+
+#include "alAuxEffectSlot.h"
+#include "alcmain.h"
+#include "alEffect.h"
+#include "alError.h"
+#include "alFilter.h"
+#include "alListener.h"
+#include "alSource.h"
+#include "albyte.h"
+#include "alconfig.h"
+#include "alcontext.h"
+#include "alexcpt.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+#include "aloptional.h"
+#include "alspan.h"
+#include "alu.h"
+#include "ambidefs.h"
+#include "atomic.h"
+#include "bformatdec.h"
+#include "bs2b.h"
+#include "compat.h"
+#include "cpu_caps.h"
+#include "effects/base.h"
+#include "filters/nfc.h"
+#include "filters/splitter.h"
+#include "fpu_modes.h"
+#include "hrtf.h"
+#include "inprogext.h"
+#include "logging.h"
+#include "mastering.h"
+#include "opthelpers.h"
+#include "ringbuffer.h"
+#include "threads.h"
+#include "uhjfilter.h"
+#include "vecmat.h"
+#include "vector.h"
+
+#include "backends/base.h"
+#include "backends/null.h"
+#include "backends/loopback.h"
+#ifdef HAVE_JACK
+#include "backends/jack.h"
+#endif
+#ifdef HAVE_PULSEAUDIO
+#include "backends/pulseaudio.h"
+#endif
+#ifdef HAVE_ALSA
+#include "backends/alsa.h"
+#endif
+#ifdef HAVE_WASAPI
+#include "backends/wasapi.h"
+#endif
+#ifdef HAVE_COREAUDIO
+#include "backends/coreaudio.h"
+#endif
+#ifdef HAVE_OPENSL
+#include "backends/opensl.h"
+#endif
+#ifdef HAVE_SOLARIS
+#include "backends/solaris.h"
+#endif
+#ifdef HAVE_SNDIO
+#include "backends/sndio.h"
+#endif
+#ifdef HAVE_OSS
+#include "backends/oss.h"
+#endif
+#ifdef HAVE_QSA
+#include "backends/qsa.h"
+#endif
+#ifdef HAVE_DSOUND
+#include "backends/dsound.h"
+#endif
+#ifdef HAVE_WINMM
+#include "backends/winmm.h"
+#endif
+#ifdef HAVE_PORTAUDIO
+#include "backends/portaudio.h"
+#endif
+#ifdef HAVE_SDL2
+#include "backends/sdl2.h"
+#endif
+#ifdef HAVE_WAVE
+#include "backends/wave.h"
+#endif
+
+
+namespace {
+
+using namespace std::placeholders;
+using std::chrono::seconds;
+using std::chrono::nanoseconds;
+
+
+/************************************************
+ * Backends
+ ************************************************/
+struct BackendInfo {
+ const char *name;
+ BackendFactory& (*getFactory)(void);
+};
+
+BackendInfo BackendList[] = {
+#ifdef HAVE_JACK
+ { "jack", JackBackendFactory::getFactory },
+#endif
+#ifdef HAVE_PULSEAUDIO
+ { "pulse", PulseBackendFactory::getFactory },
+#endif
+#ifdef HAVE_ALSA
+ { "alsa", AlsaBackendFactory::getFactory },
+#endif
+#ifdef HAVE_WASAPI
+ { "wasapi", WasapiBackendFactory::getFactory },
+#endif
+#ifdef HAVE_COREAUDIO
+ { "core", CoreAudioBackendFactory::getFactory },
+#endif
+#ifdef HAVE_OPENSL
+ { "opensl", OSLBackendFactory::getFactory },
+#endif
+#ifdef HAVE_SOLARIS
+ { "solaris", SolarisBackendFactory::getFactory },
+#endif
+#ifdef HAVE_SNDIO
+ { "sndio", SndIOBackendFactory::getFactory },
+#endif
+#ifdef HAVE_OSS
+ { "oss", OSSBackendFactory::getFactory },
+#endif
+#ifdef HAVE_QSA
+ { "qsa", QSABackendFactory::getFactory },
+#endif
+#ifdef HAVE_DSOUND
+ { "dsound", DSoundBackendFactory::getFactory },
+#endif
+#ifdef HAVE_WINMM
+ { "winmm", WinMMBackendFactory::getFactory },
+#endif
+#ifdef HAVE_PORTAUDIO
+ { "port", PortBackendFactory::getFactory },
+#endif
+#ifdef HAVE_SDL2
+ { "sdl2", SDL2BackendFactory::getFactory },
+#endif
+
+ { "null", NullBackendFactory::getFactory },
+#ifdef HAVE_WAVE
+ { "wave", WaveBackendFactory::getFactory },
+#endif
+};
+auto BackendListEnd = std::end(BackendList);
+
+BackendFactory *PlaybackFactory{};
+BackendFactory *CaptureFactory{};
+
+
+/************************************************
+ * Functions, enums, and errors
+ ************************************************/
+#define DECL(x) { #x, (ALCvoid*)(x) }
+const struct {
+ const ALCchar *funcName;
+ ALCvoid *address;
+} alcFunctions[] = {
+ DECL(alcCreateContext),
+ DECL(alcMakeContextCurrent),
+ DECL(alcProcessContext),
+ DECL(alcSuspendContext),
+ DECL(alcDestroyContext),
+ DECL(alcGetCurrentContext),
+ DECL(alcGetContextsDevice),
+ DECL(alcOpenDevice),
+ DECL(alcCloseDevice),
+ DECL(alcGetError),
+ DECL(alcIsExtensionPresent),
+ DECL(alcGetProcAddress),
+ DECL(alcGetEnumValue),
+ DECL(alcGetString),
+ DECL(alcGetIntegerv),
+ DECL(alcCaptureOpenDevice),
+ DECL(alcCaptureCloseDevice),
+ DECL(alcCaptureStart),
+ DECL(alcCaptureStop),
+ DECL(alcCaptureSamples),
+
+ DECL(alcSetThreadContext),
+ DECL(alcGetThreadContext),
+
+ DECL(alcLoopbackOpenDeviceSOFT),
+ DECL(alcIsRenderFormatSupportedSOFT),
+ DECL(alcRenderSamplesSOFT),
+
+ DECL(alcDevicePauseSOFT),
+ DECL(alcDeviceResumeSOFT),
+
+ DECL(alcGetStringiSOFT),
+ DECL(alcResetDeviceSOFT),
+
+ DECL(alcGetInteger64vSOFT),
+
+ DECL(alEnable),
+ DECL(alDisable),
+ DECL(alIsEnabled),
+ DECL(alGetString),
+ DECL(alGetBooleanv),
+ DECL(alGetIntegerv),
+ DECL(alGetFloatv),
+ DECL(alGetDoublev),
+ DECL(alGetBoolean),
+ DECL(alGetInteger),
+ DECL(alGetFloat),
+ DECL(alGetDouble),
+ DECL(alGetError),
+ DECL(alIsExtensionPresent),
+ DECL(alGetProcAddress),
+ DECL(alGetEnumValue),
+ DECL(alListenerf),
+ DECL(alListener3f),
+ DECL(alListenerfv),
+ DECL(alListeneri),
+ DECL(alListener3i),
+ DECL(alListeneriv),
+ DECL(alGetListenerf),
+ DECL(alGetListener3f),
+ DECL(alGetListenerfv),
+ DECL(alGetListeneri),
+ DECL(alGetListener3i),
+ DECL(alGetListeneriv),
+ DECL(alGenSources),
+ DECL(alDeleteSources),
+ DECL(alIsSource),
+ DECL(alSourcef),
+ DECL(alSource3f),
+ DECL(alSourcefv),
+ DECL(alSourcei),
+ DECL(alSource3i),
+ DECL(alSourceiv),
+ DECL(alGetSourcef),
+ DECL(alGetSource3f),
+ DECL(alGetSourcefv),
+ DECL(alGetSourcei),
+ DECL(alGetSource3i),
+ DECL(alGetSourceiv),
+ DECL(alSourcePlayv),
+ DECL(alSourceStopv),
+ DECL(alSourceRewindv),
+ DECL(alSourcePausev),
+ DECL(alSourcePlay),
+ DECL(alSourceStop),
+ DECL(alSourceRewind),
+ DECL(alSourcePause),
+ DECL(alSourceQueueBuffers),
+ DECL(alSourceUnqueueBuffers),
+ DECL(alGenBuffers),
+ DECL(alDeleteBuffers),
+ DECL(alIsBuffer),
+ DECL(alBufferData),
+ DECL(alBufferf),
+ DECL(alBuffer3f),
+ DECL(alBufferfv),
+ DECL(alBufferi),
+ DECL(alBuffer3i),
+ DECL(alBufferiv),
+ DECL(alGetBufferf),
+ DECL(alGetBuffer3f),
+ DECL(alGetBufferfv),
+ DECL(alGetBufferi),
+ DECL(alGetBuffer3i),
+ DECL(alGetBufferiv),
+ DECL(alDopplerFactor),
+ DECL(alDopplerVelocity),
+ DECL(alSpeedOfSound),
+ DECL(alDistanceModel),
+
+ DECL(alGenFilters),
+ DECL(alDeleteFilters),
+ DECL(alIsFilter),
+ DECL(alFilteri),
+ DECL(alFilteriv),
+ DECL(alFilterf),
+ DECL(alFilterfv),
+ DECL(alGetFilteri),
+ DECL(alGetFilteriv),
+ DECL(alGetFilterf),
+ DECL(alGetFilterfv),
+ DECL(alGenEffects),
+ DECL(alDeleteEffects),
+ DECL(alIsEffect),
+ DECL(alEffecti),
+ DECL(alEffectiv),
+ DECL(alEffectf),
+ DECL(alEffectfv),
+ DECL(alGetEffecti),
+ DECL(alGetEffectiv),
+ DECL(alGetEffectf),
+ DECL(alGetEffectfv),
+ DECL(alGenAuxiliaryEffectSlots),
+ DECL(alDeleteAuxiliaryEffectSlots),
+ DECL(alIsAuxiliaryEffectSlot),
+ DECL(alAuxiliaryEffectSloti),
+ DECL(alAuxiliaryEffectSlotiv),
+ DECL(alAuxiliaryEffectSlotf),
+ DECL(alAuxiliaryEffectSlotfv),
+ DECL(alGetAuxiliaryEffectSloti),
+ DECL(alGetAuxiliaryEffectSlotiv),
+ DECL(alGetAuxiliaryEffectSlotf),
+ DECL(alGetAuxiliaryEffectSlotfv),
+
+ DECL(alDeferUpdatesSOFT),
+ DECL(alProcessUpdatesSOFT),
+
+ DECL(alSourcedSOFT),
+ DECL(alSource3dSOFT),
+ DECL(alSourcedvSOFT),
+ DECL(alGetSourcedSOFT),
+ DECL(alGetSource3dSOFT),
+ DECL(alGetSourcedvSOFT),
+ DECL(alSourcei64SOFT),
+ DECL(alSource3i64SOFT),
+ DECL(alSourcei64vSOFT),
+ DECL(alGetSourcei64SOFT),
+ DECL(alGetSource3i64SOFT),
+ DECL(alGetSourcei64vSOFT),
+
+ DECL(alGetStringiSOFT),
+
+ DECL(alBufferStorageSOFT),
+ DECL(alMapBufferSOFT),
+ DECL(alUnmapBufferSOFT),
+ DECL(alFlushMappedBufferSOFT),
+
+ DECL(alEventControlSOFT),
+ DECL(alEventCallbackSOFT),
+ DECL(alGetPointerSOFT),
+ DECL(alGetPointervSOFT),
+};
+#undef DECL
+
+#define DECL(x) { #x, (x) }
+constexpr struct {
+ const ALCchar *enumName;
+ ALCenum value;
+} alcEnumerations[] = {
+ DECL(ALC_INVALID),
+ DECL(ALC_FALSE),
+ DECL(ALC_TRUE),
+
+ DECL(ALC_MAJOR_VERSION),
+ DECL(ALC_MINOR_VERSION),
+ DECL(ALC_ATTRIBUTES_SIZE),
+ DECL(ALC_ALL_ATTRIBUTES),
+ DECL(ALC_DEFAULT_DEVICE_SPECIFIER),
+ DECL(ALC_DEVICE_SPECIFIER),
+ DECL(ALC_ALL_DEVICES_SPECIFIER),
+ DECL(ALC_DEFAULT_ALL_DEVICES_SPECIFIER),
+ DECL(ALC_EXTENSIONS),
+ DECL(ALC_FREQUENCY),
+ DECL(ALC_REFRESH),
+ DECL(ALC_SYNC),
+ DECL(ALC_MONO_SOURCES),
+ DECL(ALC_STEREO_SOURCES),
+ DECL(ALC_CAPTURE_DEVICE_SPECIFIER),
+ DECL(ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER),
+ DECL(ALC_CAPTURE_SAMPLES),
+ DECL(ALC_CONNECTED),
+
+ DECL(ALC_EFX_MAJOR_VERSION),
+ DECL(ALC_EFX_MINOR_VERSION),
+ DECL(ALC_MAX_AUXILIARY_SENDS),
+
+ DECL(ALC_FORMAT_CHANNELS_SOFT),
+ DECL(ALC_FORMAT_TYPE_SOFT),
+
+ DECL(ALC_MONO_SOFT),
+ DECL(ALC_STEREO_SOFT),
+ DECL(ALC_QUAD_SOFT),
+ DECL(ALC_5POINT1_SOFT),
+ DECL(ALC_6POINT1_SOFT),
+ DECL(ALC_7POINT1_SOFT),
+ DECL(ALC_BFORMAT3D_SOFT),
+
+ DECL(ALC_BYTE_SOFT),
+ DECL(ALC_UNSIGNED_BYTE_SOFT),
+ DECL(ALC_SHORT_SOFT),
+ DECL(ALC_UNSIGNED_SHORT_SOFT),
+ DECL(ALC_INT_SOFT),
+ DECL(ALC_UNSIGNED_INT_SOFT),
+ DECL(ALC_FLOAT_SOFT),
+
+ DECL(ALC_HRTF_SOFT),
+ DECL(ALC_DONT_CARE_SOFT),
+ DECL(ALC_HRTF_STATUS_SOFT),
+ DECL(ALC_HRTF_DISABLED_SOFT),
+ DECL(ALC_HRTF_ENABLED_SOFT),
+ DECL(ALC_HRTF_DENIED_SOFT),
+ DECL(ALC_HRTF_REQUIRED_SOFT),
+ DECL(ALC_HRTF_HEADPHONES_DETECTED_SOFT),
+ DECL(ALC_HRTF_UNSUPPORTED_FORMAT_SOFT),
+ DECL(ALC_NUM_HRTF_SPECIFIERS_SOFT),
+ DECL(ALC_HRTF_SPECIFIER_SOFT),
+ DECL(ALC_HRTF_ID_SOFT),
+
+ DECL(ALC_AMBISONIC_LAYOUT_SOFT),
+ DECL(ALC_AMBISONIC_SCALING_SOFT),
+ DECL(ALC_AMBISONIC_ORDER_SOFT),
+ DECL(ALC_ACN_SOFT),
+ DECL(ALC_FUMA_SOFT),
+ DECL(ALC_N3D_SOFT),
+ DECL(ALC_SN3D_SOFT),
+
+ DECL(ALC_OUTPUT_LIMITER_SOFT),
+
+ DECL(ALC_NO_ERROR),
+ DECL(ALC_INVALID_DEVICE),
+ DECL(ALC_INVALID_CONTEXT),
+ DECL(ALC_INVALID_ENUM),
+ DECL(ALC_INVALID_VALUE),
+ DECL(ALC_OUT_OF_MEMORY),
+
+
+ DECL(AL_INVALID),
+ DECL(AL_NONE),
+ DECL(AL_FALSE),
+ DECL(AL_TRUE),
+
+ DECL(AL_SOURCE_RELATIVE),
+ DECL(AL_CONE_INNER_ANGLE),
+ DECL(AL_CONE_OUTER_ANGLE),
+ DECL(AL_PITCH),
+ DECL(AL_POSITION),
+ DECL(AL_DIRECTION),
+ DECL(AL_VELOCITY),
+ DECL(AL_LOOPING),
+ DECL(AL_BUFFER),
+ DECL(AL_GAIN),
+ DECL(AL_MIN_GAIN),
+ DECL(AL_MAX_GAIN),
+ DECL(AL_ORIENTATION),
+ DECL(AL_REFERENCE_DISTANCE),
+ DECL(AL_ROLLOFF_FACTOR),
+ DECL(AL_CONE_OUTER_GAIN),
+ DECL(AL_MAX_DISTANCE),
+ DECL(AL_SEC_OFFSET),
+ DECL(AL_SAMPLE_OFFSET),
+ DECL(AL_BYTE_OFFSET),
+ DECL(AL_SOURCE_TYPE),
+ DECL(AL_STATIC),
+ DECL(AL_STREAMING),
+ DECL(AL_UNDETERMINED),
+ DECL(AL_METERS_PER_UNIT),
+ DECL(AL_LOOP_POINTS_SOFT),
+ DECL(AL_DIRECT_CHANNELS_SOFT),
+
+ DECL(AL_DIRECT_FILTER),
+ DECL(AL_AUXILIARY_SEND_FILTER),
+ DECL(AL_AIR_ABSORPTION_FACTOR),
+ DECL(AL_ROOM_ROLLOFF_FACTOR),
+ DECL(AL_CONE_OUTER_GAINHF),
+ DECL(AL_DIRECT_FILTER_GAINHF_AUTO),
+ DECL(AL_AUXILIARY_SEND_FILTER_GAIN_AUTO),
+ DECL(AL_AUXILIARY_SEND_FILTER_GAINHF_AUTO),
+
+ DECL(AL_SOURCE_STATE),
+ DECL(AL_INITIAL),
+ DECL(AL_PLAYING),
+ DECL(AL_PAUSED),
+ DECL(AL_STOPPED),
+
+ DECL(AL_BUFFERS_QUEUED),
+ DECL(AL_BUFFERS_PROCESSED),
+
+ DECL(AL_FORMAT_MONO8),
+ DECL(AL_FORMAT_MONO16),
+ DECL(AL_FORMAT_MONO_FLOAT32),
+ DECL(AL_FORMAT_MONO_DOUBLE_EXT),
+ DECL(AL_FORMAT_STEREO8),
+ DECL(AL_FORMAT_STEREO16),
+ DECL(AL_FORMAT_STEREO_FLOAT32),
+ DECL(AL_FORMAT_STEREO_DOUBLE_EXT),
+ DECL(AL_FORMAT_MONO_IMA4),
+ DECL(AL_FORMAT_STEREO_IMA4),
+ DECL(AL_FORMAT_MONO_MSADPCM_SOFT),
+ DECL(AL_FORMAT_STEREO_MSADPCM_SOFT),
+ DECL(AL_FORMAT_QUAD8_LOKI),
+ DECL(AL_FORMAT_QUAD16_LOKI),
+ DECL(AL_FORMAT_QUAD8),
+ DECL(AL_FORMAT_QUAD16),
+ DECL(AL_FORMAT_QUAD32),
+ DECL(AL_FORMAT_51CHN8),
+ DECL(AL_FORMAT_51CHN16),
+ DECL(AL_FORMAT_51CHN32),
+ DECL(AL_FORMAT_61CHN8),
+ DECL(AL_FORMAT_61CHN16),
+ DECL(AL_FORMAT_61CHN32),
+ DECL(AL_FORMAT_71CHN8),
+ DECL(AL_FORMAT_71CHN16),
+ DECL(AL_FORMAT_71CHN32),
+ DECL(AL_FORMAT_REAR8),
+ DECL(AL_FORMAT_REAR16),
+ DECL(AL_FORMAT_REAR32),
+ DECL(AL_FORMAT_MONO_MULAW),
+ DECL(AL_FORMAT_MONO_MULAW_EXT),
+ DECL(AL_FORMAT_STEREO_MULAW),
+ DECL(AL_FORMAT_STEREO_MULAW_EXT),
+ DECL(AL_FORMAT_QUAD_MULAW),
+ DECL(AL_FORMAT_51CHN_MULAW),
+ DECL(AL_FORMAT_61CHN_MULAW),
+ DECL(AL_FORMAT_71CHN_MULAW),
+ DECL(AL_FORMAT_REAR_MULAW),
+ DECL(AL_FORMAT_MONO_ALAW_EXT),
+ DECL(AL_FORMAT_STEREO_ALAW_EXT),
+
+ DECL(AL_FORMAT_BFORMAT2D_8),
+ DECL(AL_FORMAT_BFORMAT2D_16),
+ DECL(AL_FORMAT_BFORMAT2D_FLOAT32),
+ DECL(AL_FORMAT_BFORMAT2D_MULAW),
+ DECL(AL_FORMAT_BFORMAT3D_8),
+ DECL(AL_FORMAT_BFORMAT3D_16),
+ DECL(AL_FORMAT_BFORMAT3D_FLOAT32),
+ DECL(AL_FORMAT_BFORMAT3D_MULAW),
+
+ DECL(AL_FREQUENCY),
+ DECL(AL_BITS),
+ DECL(AL_CHANNELS),
+ DECL(AL_SIZE),
+ DECL(AL_UNPACK_BLOCK_ALIGNMENT_SOFT),
+ DECL(AL_PACK_BLOCK_ALIGNMENT_SOFT),
+
+ DECL(AL_SOURCE_RADIUS),
+
+ DECL(AL_STEREO_ANGLES),
+
+ DECL(AL_UNUSED),
+ DECL(AL_PENDING),
+ DECL(AL_PROCESSED),
+
+ DECL(AL_NO_ERROR),
+ DECL(AL_INVALID_NAME),
+ DECL(AL_INVALID_ENUM),
+ DECL(AL_INVALID_VALUE),
+ DECL(AL_INVALID_OPERATION),
+ DECL(AL_OUT_OF_MEMORY),
+
+ DECL(AL_VENDOR),
+ DECL(AL_VERSION),
+ DECL(AL_RENDERER),
+ DECL(AL_EXTENSIONS),
+
+ DECL(AL_DOPPLER_FACTOR),
+ DECL(AL_DOPPLER_VELOCITY),
+ DECL(AL_DISTANCE_MODEL),
+ DECL(AL_SPEED_OF_SOUND),
+ DECL(AL_SOURCE_DISTANCE_MODEL),
+ DECL(AL_DEFERRED_UPDATES_SOFT),
+ DECL(AL_GAIN_LIMIT_SOFT),
+
+ DECL(AL_INVERSE_DISTANCE),
+ DECL(AL_INVERSE_DISTANCE_CLAMPED),
+ DECL(AL_LINEAR_DISTANCE),
+ DECL(AL_LINEAR_DISTANCE_CLAMPED),
+ DECL(AL_EXPONENT_DISTANCE),
+ DECL(AL_EXPONENT_DISTANCE_CLAMPED),
+
+ DECL(AL_FILTER_TYPE),
+ DECL(AL_FILTER_NULL),
+ DECL(AL_FILTER_LOWPASS),
+ DECL(AL_FILTER_HIGHPASS),
+ DECL(AL_FILTER_BANDPASS),
+
+ DECL(AL_LOWPASS_GAIN),
+ DECL(AL_LOWPASS_GAINHF),
+
+ DECL(AL_HIGHPASS_GAIN),
+ DECL(AL_HIGHPASS_GAINLF),
+
+ DECL(AL_BANDPASS_GAIN),
+ DECL(AL_BANDPASS_GAINHF),
+ DECL(AL_BANDPASS_GAINLF),
+
+ DECL(AL_EFFECT_TYPE),
+ DECL(AL_EFFECT_NULL),
+ DECL(AL_EFFECT_REVERB),
+ DECL(AL_EFFECT_EAXREVERB),
+ DECL(AL_EFFECT_CHORUS),
+ DECL(AL_EFFECT_DISTORTION),
+ DECL(AL_EFFECT_ECHO),
+ DECL(AL_EFFECT_FLANGER),
+ DECL(AL_EFFECT_PITCH_SHIFTER),
+ DECL(AL_EFFECT_FREQUENCY_SHIFTER),
+ DECL(AL_EFFECT_VOCAL_MORPHER),
+ DECL(AL_EFFECT_RING_MODULATOR),
+ DECL(AL_EFFECT_AUTOWAH),
+ DECL(AL_EFFECT_COMPRESSOR),
+ DECL(AL_EFFECT_EQUALIZER),
+ DECL(AL_EFFECT_DEDICATED_LOW_FREQUENCY_EFFECT),
+ DECL(AL_EFFECT_DEDICATED_DIALOGUE),
+
+ DECL(AL_EFFECTSLOT_EFFECT),
+ DECL(AL_EFFECTSLOT_GAIN),
+ DECL(AL_EFFECTSLOT_AUXILIARY_SEND_AUTO),
+ DECL(AL_EFFECTSLOT_NULL),
+
+ DECL(AL_EAXREVERB_DENSITY),
+ DECL(AL_EAXREVERB_DIFFUSION),
+ DECL(AL_EAXREVERB_GAIN),
+ DECL(AL_EAXREVERB_GAINHF),
+ DECL(AL_EAXREVERB_GAINLF),
+ DECL(AL_EAXREVERB_DECAY_TIME),
+ DECL(AL_EAXREVERB_DECAY_HFRATIO),
+ DECL(AL_EAXREVERB_DECAY_LFRATIO),
+ DECL(AL_EAXREVERB_REFLECTIONS_GAIN),
+ DECL(AL_EAXREVERB_REFLECTIONS_DELAY),
+ DECL(AL_EAXREVERB_REFLECTIONS_PAN),
+ DECL(AL_EAXREVERB_LATE_REVERB_GAIN),
+ DECL(AL_EAXREVERB_LATE_REVERB_DELAY),
+ DECL(AL_EAXREVERB_LATE_REVERB_PAN),
+ DECL(AL_EAXREVERB_ECHO_TIME),
+ DECL(AL_EAXREVERB_ECHO_DEPTH),
+ DECL(AL_EAXREVERB_MODULATION_TIME),
+ DECL(AL_EAXREVERB_MODULATION_DEPTH),
+ DECL(AL_EAXREVERB_AIR_ABSORPTION_GAINHF),
+ DECL(AL_EAXREVERB_HFREFERENCE),
+ DECL(AL_EAXREVERB_LFREFERENCE),
+ DECL(AL_EAXREVERB_ROOM_ROLLOFF_FACTOR),
+ DECL(AL_EAXREVERB_DECAY_HFLIMIT),
+
+ DECL(AL_REVERB_DENSITY),
+ DECL(AL_REVERB_DIFFUSION),
+ DECL(AL_REVERB_GAIN),
+ DECL(AL_REVERB_GAINHF),
+ DECL(AL_REVERB_DECAY_TIME),
+ DECL(AL_REVERB_DECAY_HFRATIO),
+ DECL(AL_REVERB_REFLECTIONS_GAIN),
+ DECL(AL_REVERB_REFLECTIONS_DELAY),
+ DECL(AL_REVERB_LATE_REVERB_GAIN),
+ DECL(AL_REVERB_LATE_REVERB_DELAY),
+ DECL(AL_REVERB_AIR_ABSORPTION_GAINHF),
+ DECL(AL_REVERB_ROOM_ROLLOFF_FACTOR),
+ DECL(AL_REVERB_DECAY_HFLIMIT),
+
+ DECL(AL_CHORUS_WAVEFORM),
+ DECL(AL_CHORUS_PHASE),
+ DECL(AL_CHORUS_RATE),
+ DECL(AL_CHORUS_DEPTH),
+ DECL(AL_CHORUS_FEEDBACK),
+ DECL(AL_CHORUS_DELAY),
+
+ DECL(AL_DISTORTION_EDGE),
+ DECL(AL_DISTORTION_GAIN),
+ DECL(AL_DISTORTION_LOWPASS_CUTOFF),
+ DECL(AL_DISTORTION_EQCENTER),
+ DECL(AL_DISTORTION_EQBANDWIDTH),
+
+ DECL(AL_ECHO_DELAY),
+ DECL(AL_ECHO_LRDELAY),
+ DECL(AL_ECHO_DAMPING),
+ DECL(AL_ECHO_FEEDBACK),
+ DECL(AL_ECHO_SPREAD),
+
+ DECL(AL_FLANGER_WAVEFORM),
+ DECL(AL_FLANGER_PHASE),
+ DECL(AL_FLANGER_RATE),
+ DECL(AL_FLANGER_DEPTH),
+ DECL(AL_FLANGER_FEEDBACK),
+ DECL(AL_FLANGER_DELAY),
+
+ DECL(AL_FREQUENCY_SHIFTER_FREQUENCY),
+ DECL(AL_FREQUENCY_SHIFTER_LEFT_DIRECTION),
+ DECL(AL_FREQUENCY_SHIFTER_RIGHT_DIRECTION),
+
+ DECL(AL_RING_MODULATOR_FREQUENCY),
+ DECL(AL_RING_MODULATOR_HIGHPASS_CUTOFF),
+ DECL(AL_RING_MODULATOR_WAVEFORM),
+
+ DECL(AL_PITCH_SHIFTER_COARSE_TUNE),
+ DECL(AL_PITCH_SHIFTER_FINE_TUNE),
+
+ DECL(AL_COMPRESSOR_ONOFF),
+
+ DECL(AL_EQUALIZER_LOW_GAIN),
+ DECL(AL_EQUALIZER_LOW_CUTOFF),
+ DECL(AL_EQUALIZER_MID1_GAIN),
+ DECL(AL_EQUALIZER_MID1_CENTER),
+ DECL(AL_EQUALIZER_MID1_WIDTH),
+ DECL(AL_EQUALIZER_MID2_GAIN),
+ DECL(AL_EQUALIZER_MID2_CENTER),
+ DECL(AL_EQUALIZER_MID2_WIDTH),
+ DECL(AL_EQUALIZER_HIGH_GAIN),
+ DECL(AL_EQUALIZER_HIGH_CUTOFF),
+
+ DECL(AL_DEDICATED_GAIN),
+
+ DECL(AL_AUTOWAH_ATTACK_TIME),
+ DECL(AL_AUTOWAH_RELEASE_TIME),
+ DECL(AL_AUTOWAH_RESONANCE),
+ DECL(AL_AUTOWAH_PEAK_GAIN),
+
+ DECL(AL_NUM_RESAMPLERS_SOFT),
+ DECL(AL_DEFAULT_RESAMPLER_SOFT),
+ DECL(AL_SOURCE_RESAMPLER_SOFT),
+ DECL(AL_RESAMPLER_NAME_SOFT),
+
+ DECL(AL_SOURCE_SPATIALIZE_SOFT),
+ DECL(AL_AUTO_SOFT),
+
+ DECL(AL_MAP_READ_BIT_SOFT),
+ DECL(AL_MAP_WRITE_BIT_SOFT),
+ DECL(AL_MAP_PERSISTENT_BIT_SOFT),
+ DECL(AL_PRESERVE_DATA_BIT_SOFT),
+
+ DECL(AL_EVENT_CALLBACK_FUNCTION_SOFT),
+ DECL(AL_EVENT_CALLBACK_USER_PARAM_SOFT),
+ DECL(AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT),
+ DECL(AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT),
+ DECL(AL_EVENT_TYPE_ERROR_SOFT),
+ DECL(AL_EVENT_TYPE_PERFORMANCE_SOFT),
+ DECL(AL_EVENT_TYPE_DEPRECATED_SOFT),
+};
+#undef DECL
+
+constexpr ALCchar alcNoError[] = "No Error";
+constexpr ALCchar alcErrInvalidDevice[] = "Invalid Device";
+constexpr ALCchar alcErrInvalidContext[] = "Invalid Context";
+constexpr ALCchar alcErrInvalidEnum[] = "Invalid Enum";
+constexpr ALCchar alcErrInvalidValue[] = "Invalid Value";
+constexpr ALCchar alcErrOutOfMemory[] = "Out of Memory";
+
+
+/************************************************
+ * Global variables
+ ************************************************/
+
+/* Enumerated device names */
+constexpr ALCchar alcDefaultName[] = "OpenAL Soft\0";
+
+std::string alcAllDevicesList;
+std::string alcCaptureDeviceList;
+
+/* Default is always the first in the list */
+std::string alcDefaultAllDevicesSpecifier;
+std::string alcCaptureDefaultDeviceSpecifier;
+
+/* Default context extensions */
+constexpr ALchar alExtList[] =
+ "AL_EXT_ALAW "
+ "AL_EXT_BFORMAT "
+ "AL_EXT_DOUBLE "
+ "AL_EXT_EXPONENT_DISTANCE "
+ "AL_EXT_FLOAT32 "
+ "AL_EXT_IMA4 "
+ "AL_EXT_LINEAR_DISTANCE "
+ "AL_EXT_MCFORMATS "
+ "AL_EXT_MULAW "
+ "AL_EXT_MULAW_BFORMAT "
+ "AL_EXT_MULAW_MCFORMATS "
+ "AL_EXT_OFFSET "
+ "AL_EXT_source_distance_model "
+ "AL_EXT_SOURCE_RADIUS "
+ "AL_EXT_STEREO_ANGLES "
+ "AL_LOKI_quadriphonic "
+ "AL_SOFT_block_alignment "
+ "AL_SOFT_deferred_updates "
+ "AL_SOFT_direct_channels "
+ "AL_SOFTX_effect_chain "
+ "AL_SOFTX_events "
+ "AL_SOFTX_filter_gain_ex "
+ "AL_SOFT_gain_clamp_ex "
+ "AL_SOFT_loop_points "
+ "AL_SOFTX_map_buffer "
+ "AL_SOFT_MSADPCM "
+ "AL_SOFT_source_latency "
+ "AL_SOFT_source_length "
+ "AL_SOFT_source_resampler "
+ "AL_SOFT_source_spatialize";
+
+std::atomic<ALCenum> LastNullDeviceError{ALC_NO_ERROR};
+
+/* Thread-local current context */
+void ReleaseThreadCtx(ALCcontext *context)
+{
+ auto ref = DecrementRef(&context->ref);
+ TRACEREF("ALCcontext %p decreasing refcount to %u\n", context, ref);
+ ERR("Context %p current for thread being destroyed, possible leak!\n", context);
+}
+
+std::atomic<void(*)(ALCcontext*)> ThreadCtxProc{ReleaseThreadCtx};
+class ThreadCtx {
+ ALCcontext *ctx{nullptr};
+
+public:
+ ~ThreadCtx()
+ {
+ auto destruct = ThreadCtxProc.load();
+ if(destruct && ctx)
+ destruct(ctx);
+ ctx = nullptr;
+ }
+
+ ALCcontext *get() const noexcept { return ctx; }
+ void set(ALCcontext *ctx_) noexcept { ctx = ctx_; }
+};
+thread_local ThreadCtx LocalContext;
+/* Process-wide current context */
+std::atomic<ALCcontext*> GlobalContext{nullptr};
+
+/* Flag to trap ALC device errors */
+bool TrapALCError{false};
+
+/* One-time configuration init control */
+std::once_flag alc_config_once{};
+
+/* Default effect that applies to sources that don't have an effect on send 0 */
+ALeffect DefaultEffect;
+
+/* Flag to specify if alcSuspendContext/alcProcessContext should defer/process
+ * updates.
+ */
+bool SuspendDefers{true};
+
+
+/************************************************
+ * ALC information
+ ************************************************/
+constexpr ALCchar alcNoDeviceExtList[] =
+ "ALC_ENUMERATE_ALL_EXT "
+ "ALC_ENUMERATION_EXT "
+ "ALC_EXT_CAPTURE "
+ "ALC_EXT_thread_local_context "
+ "ALC_SOFT_loopback";
+constexpr ALCchar alcExtensionList[] =
+ "ALC_ENUMERATE_ALL_EXT "
+ "ALC_ENUMERATION_EXT "
+ "ALC_EXT_CAPTURE "
+ "ALC_EXT_DEDICATED "
+ "ALC_EXT_disconnect "
+ "ALC_EXT_EFX "
+ "ALC_EXT_thread_local_context "
+ "ALC_SOFT_device_clock "
+ "ALC_SOFT_HRTF "
+ "ALC_SOFT_loopback "
+ "ALC_SOFT_output_limiter "
+ "ALC_SOFT_pause_device";
+constexpr ALCint alcMajorVersion = 1;
+constexpr ALCint alcMinorVersion = 1;
+
+constexpr ALCint alcEFXMajorVersion = 1;
+constexpr ALCint alcEFXMinorVersion = 0;
+
+
+/* To avoid extraneous allocations, a 0-sized FlexArray<ALCcontext*> is defined
+ * globally as a sharable object.
+ */
+al::FlexArray<ALCcontext*> EmptyContextArray{0u};
+
+
+void ALCdevice_IncRef(ALCdevice *device)
+{
+ auto ref = IncrementRef(&device->ref);
+ TRACEREF("ALCdevice %p increasing refcount to %u\n", device, ref);
+}
+
+void ALCdevice_DecRef(ALCdevice *device)
+{
+ auto ref = DecrementRef(&device->ref);
+ TRACEREF("ALCdevice %p decreasing refcount to %u\n", device, ref);
+ if(UNLIKELY(ref == 0)) delete device;
+}
+
+/* Simple RAII device reference. Takes the reference of the provided ALCdevice,
+ * and decrements it when leaving scope. Movable (transfer reference) but not
+ * copyable (no new references).
+ */
+class DeviceRef {
+ ALCdevice *mDev{nullptr};
+
+ void reset() noexcept
+ {
+ if(mDev)
+ ALCdevice_DecRef(mDev);
+ mDev = nullptr;
+ }
+
+public:
+ DeviceRef() noexcept = default;
+ DeviceRef(DeviceRef&& rhs) noexcept : mDev{rhs.mDev}
+ { rhs.mDev = nullptr; }
+ explicit DeviceRef(ALCdevice *dev) noexcept : mDev(dev) { }
+ ~DeviceRef() { reset(); }
+
+ DeviceRef& operator=(const DeviceRef&) = delete;
+ DeviceRef& operator=(DeviceRef&& rhs) noexcept
+ {
+ std::swap(mDev, rhs.mDev);
+ return *this;
+ }
+
+ operator bool() const noexcept { return mDev != nullptr; }
+
+ ALCdevice* operator->() const noexcept { return mDev; }
+ ALCdevice* get() const noexcept { return mDev; }
+
+ ALCdevice* release() noexcept
+ {
+ ALCdevice *ret{mDev};
+ mDev = nullptr;
+ return ret;
+ }
+};
+
+inline bool operator==(const DeviceRef &lhs, const ALCdevice *rhs) noexcept
+{ return lhs.get() == rhs; }
+inline bool operator!=(const DeviceRef &lhs, const ALCdevice *rhs) noexcept
+{ return !(lhs == rhs); }
+inline bool operator<(const DeviceRef &lhs, const ALCdevice *rhs) noexcept
+{ return lhs.get() < rhs; }
+
+
+/************************************************
+ * Device lists
+ ************************************************/
+al::vector<DeviceRef> DeviceList;
+al::vector<ContextRef> ContextList;
+
+std::recursive_mutex ListLock;
+
+
+void alc_initconfig(void)
+{
+ const char *str{getenv("ALSOFT_LOGLEVEL")};
+ if(str)
+ {
+ long lvl = strtol(str, nullptr, 0);
+ if(lvl >= NoLog && lvl <= LogRef)
+ gLogLevel = static_cast<LogLevel>(lvl);
+ }
+
+ str = getenv("ALSOFT_LOGFILE");
+ if(str && str[0])
+ {
+#ifdef _WIN32
+ std::wstring wname{utf8_to_wstr(str)};
+ FILE *logfile = _wfopen(wname.c_str(), L"wt");
+#else
+ FILE *logfile = fopen(str, "wt");
+#endif
+ if(logfile) gLogFile = logfile;
+ else ERR("Failed to open log file '%s'\n", str);
+ }
+
+ TRACE("Initializing library v%s-%s %s\n", ALSOFT_VERSION, ALSOFT_GIT_COMMIT_HASH,
+ ALSOFT_GIT_BRANCH);
+ {
+ std::string names;
+ if(std::begin(BackendList) == BackendListEnd)
+ names += "(none)";
+ else
+ {
+ const al::span<const BackendInfo> infos{std::begin(BackendList), BackendListEnd};
+ names += infos[0].name;
+ for(const auto &backend : infos.subspan(1))
+ {
+ names += ", ";
+ names += backend.name;
+ }
+ }
+ TRACE("Supported backends: %s\n", names.c_str());
+ }
+ ReadALConfig();
+
+ str = getenv("__ALSOFT_SUSPEND_CONTEXT");
+ if(str && *str)
+ {
+ if(strcasecmp(str, "ignore") == 0)
+ {
+ SuspendDefers = false;
+ TRACE("Selected context suspend behavior, \"ignore\"\n");
+ }
+ else
+ ERR("Unhandled context suspend behavior setting: \"%s\"\n", str);
+ }
+
+ int capfilter{0};
+#if defined(HAVE_SSE4_1)
+ capfilter |= CPU_CAP_SSE | CPU_CAP_SSE2 | CPU_CAP_SSE3 | CPU_CAP_SSE4_1;
+#elif defined(HAVE_SSE3)
+ capfilter |= CPU_CAP_SSE | CPU_CAP_SSE2 | CPU_CAP_SSE3;
+#elif defined(HAVE_SSE2)
+ capfilter |= CPU_CAP_SSE | CPU_CAP_SSE2;
+#elif defined(HAVE_SSE)
+ capfilter |= CPU_CAP_SSE;
+#endif
+#ifdef HAVE_NEON
+ capfilter |= CPU_CAP_NEON;
+#endif
+ if(auto cpuopt = ConfigValueStr(nullptr, nullptr, "disable-cpu-exts"))
+ {
+ str = cpuopt->c_str();
+ if(strcasecmp(str, "all") == 0)
+ capfilter = 0;
+ else
+ {
+ const char *next = str;
+ do {
+ str = next;
+ while(isspace(str[0]))
+ str++;
+ next = strchr(str, ',');
+
+ if(!str[0] || str[0] == ',')
+ continue;
+
+ size_t len{next ? static_cast<size_t>(next-str) : strlen(str)};
+ while(len > 0 && isspace(str[len-1]))
+ len--;
+ if(len == 3 && strncasecmp(str, "sse", len) == 0)
+ capfilter &= ~CPU_CAP_SSE;
+ else if(len == 4 && strncasecmp(str, "sse2", len) == 0)
+ capfilter &= ~CPU_CAP_SSE2;
+ else if(len == 4 && strncasecmp(str, "sse3", len) == 0)
+ capfilter &= ~CPU_CAP_SSE3;
+ else if(len == 6 && strncasecmp(str, "sse4.1", len) == 0)
+ capfilter &= ~CPU_CAP_SSE4_1;
+ else if(len == 4 && strncasecmp(str, "neon", len) == 0)
+ capfilter &= ~CPU_CAP_NEON;
+ else
+ WARN("Invalid CPU extension \"%s\"\n", str);
+ } while(next++);
+ }
+ }
+ FillCPUCaps(capfilter);
+
+#ifdef _WIN32
+#define DEF_MIXER_PRIO 1
+#else
+#define DEF_MIXER_PRIO 0
+#endif
+ RTPrioLevel = ConfigValueInt(nullptr, nullptr, "rt-prio").value_or(DEF_MIXER_PRIO);
+#undef DEF_MIXER_PRIO
+
+ aluInit();
+ aluInitMixer();
+
+ str = getenv("ALSOFT_TRAP_ERROR");
+ if(str && (strcasecmp(str, "true") == 0 || strtol(str, nullptr, 0) == 1))
+ {
+ TrapALError = true;
+ TrapALCError = true;
+ }
+ else
+ {
+ str = getenv("ALSOFT_TRAP_AL_ERROR");
+ if(str && (strcasecmp(str, "true") == 0 || strtol(str, nullptr, 0) == 1))
+ TrapALError = true;
+ TrapALError = !!GetConfigValueBool(nullptr, nullptr, "trap-al-error", TrapALError);
+
+ str = getenv("ALSOFT_TRAP_ALC_ERROR");
+ if(str && (strcasecmp(str, "true") == 0 || strtol(str, nullptr, 0) == 1))
+ TrapALCError = true;
+ TrapALCError = !!GetConfigValueBool(nullptr, nullptr, "trap-alc-error", TrapALCError);
+ }
+
+ if(auto boostopt = ConfigValueFloat(nullptr, "reverb", "boost"))
+ {
+ const float valf{std::isfinite(*boostopt) ? clampf(*boostopt, -24.0f, 24.0f) : 0.0f};
+ ReverbBoost *= std::pow(10.0f, valf / 20.0f);
+ }
+
+ auto devopt = ConfigValueStr(nullptr, nullptr, "drivers");
+ if(const char *devs{getenv("ALSOFT_DRIVERS")})
+ {
+ if(devs[0])
+ devopt = devs;
+ }
+ if(devopt)
+ {
+ auto backendlist_cur = std::begin(BackendList);
+
+ bool endlist{true};
+ const char *next{devopt->c_str()};
+ do {
+ const char *devs{next};
+ while(isspace(devs[0]))
+ devs++;
+ next = strchr(devs, ',');
+
+ const bool delitem{devs[0] == '-'};
+ if(devs[0] == '-') devs++;
+
+ if(!devs[0] || devs[0] == ',')
+ {
+ endlist = false;
+ continue;
+ }
+ endlist = true;
+
+ size_t len{next ? (static_cast<size_t>(next-devs)) : strlen(devs)};
+ while(len > 0 && isspace(devs[len-1])) --len;
+#ifdef HAVE_WASAPI
+ /* HACK: For backwards compatibility, convert backend references of
+ * mmdevapi to wasapi. This should eventually be removed.
+ */
+ if(len == 8 && strncmp(devs, "mmdevapi", len) == 0)
+ {
+ devs = "wasapi";
+ len = 6;
+ }
+#endif
+
+ auto find_backend = [devs,len](const BackendInfo &backend) -> bool
+ { return len == strlen(backend.name) && strncmp(backend.name, devs, len) == 0; };
+ auto this_backend = std::find_if(std::begin(BackendList), BackendListEnd,
+ find_backend);
+
+ if(this_backend == BackendListEnd)
+ continue;
+
+ if(delitem)
+ BackendListEnd = std::move(this_backend+1, BackendListEnd, this_backend);
+ else
+ backendlist_cur = std::rotate(backendlist_cur, this_backend, this_backend+1);
+ } while(next++);
+
+ if(endlist)
+ BackendListEnd = backendlist_cur;
+ }
+
+ auto init_backend = [](BackendInfo &backend) -> bool
+ {
+ if(PlaybackFactory && CaptureFactory)
+ return true;
+
+ BackendFactory &factory = backend.getFactory();
+ if(!factory.init())
+ {
+ WARN("Failed to initialize backend \"%s\"\n", backend.name);
+ return true;
+ }
+
+ TRACE("Initialized backend \"%s\"\n", backend.name);
+ if(!PlaybackFactory && factory.querySupport(BackendType::Playback))
+ {
+ PlaybackFactory = &factory;
+ TRACE("Added \"%s\" for playback\n", backend.name);
+ }
+ if(!CaptureFactory && factory.querySupport(BackendType::Capture))
+ {
+ CaptureFactory = &factory;
+ TRACE("Added \"%s\" for capture\n", backend.name);
+ }
+ return false;
+ };
+ BackendListEnd = std::remove_if(std::begin(BackendList), BackendListEnd, init_backend);
+
+ LoopbackBackendFactory::getFactory().init();
+
+ if(!PlaybackFactory)
+ WARN("No playback backend available!\n");
+ if(!CaptureFactory)
+ WARN("No capture backend available!\n");
+
+ if(auto exclopt = ConfigValueStr(nullptr, nullptr, "excludefx"))
+ {
+ const char *next{exclopt->c_str()};
+ do {
+ str = next;
+ next = strchr(str, ',');
+
+ if(!str[0] || next == str)
+ continue;
+
+ size_t len{next ? static_cast<size_t>(next-str) : strlen(str)};
+ for(const EffectList &effectitem : gEffectList)
+ {
+ if(len == strlen(effectitem.name) &&
+ strncmp(effectitem.name, str, len) == 0)
+ DisabledEffects[effectitem.type] = AL_TRUE;
+ }
+ } while(next++);
+ }
+
+ InitEffect(&DefaultEffect);
+ auto defrevopt = ConfigValueStr(nullptr, nullptr, "default-reverb");
+ if((str=getenv("ALSOFT_DEFAULT_REVERB")) && str[0])
+ defrevopt = str;
+ if(defrevopt) LoadReverbPreset(defrevopt->c_str(), &DefaultEffect);
+}
+#define DO_INITCONFIG() std::call_once(alc_config_once, [](){alc_initconfig();})
+
+
+/************************************************
+ * Device enumeration
+ ************************************************/
+void ProbeAllDevicesList()
+{
+ DO_INITCONFIG();
+
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ alcAllDevicesList.clear();
+ if(PlaybackFactory)
+ PlaybackFactory->probe(DevProbe::Playback, &alcAllDevicesList);
+}
+void ProbeCaptureDeviceList()
+{
+ DO_INITCONFIG();
+
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ alcCaptureDeviceList.clear();
+ if(CaptureFactory)
+ CaptureFactory->probe(DevProbe::Capture, &alcCaptureDeviceList);
+}
+
+} // namespace
+
+/* Mixing thread piority level */
+ALint RTPrioLevel;
+
+FILE *gLogFile{stderr};
+#ifdef _DEBUG
+LogLevel gLogLevel{LogWarning};
+#else
+LogLevel gLogLevel{LogError};
+#endif
+
+/************************************************
+ * Library initialization
+ ************************************************/
+#if defined(_WIN32) && !defined(AL_LIBTYPE_STATIC)
+BOOL APIENTRY DllMain(HINSTANCE module, DWORD reason, LPVOID /*reserved*/)
+{
+ switch(reason)
+ {
+ case DLL_PROCESS_ATTACH:
+ /* Pin the DLL so we won't get unloaded until the process terminates */
+ GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_PIN | GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
+ (WCHAR*)module, &module);
+ break;
+
+ case DLL_PROCESS_DETACH:
+ break;
+ }
+ return TRUE;
+}
+#endif
+
+/************************************************
+ * Device format information
+ ************************************************/
+const ALCchar *DevFmtTypeString(DevFmtType type) noexcept
+{
+ switch(type)
+ {
+ case DevFmtByte: return "Signed Byte";
+ case DevFmtUByte: return "Unsigned Byte";
+ case DevFmtShort: return "Signed Short";
+ case DevFmtUShort: return "Unsigned Short";
+ case DevFmtInt: return "Signed Int";
+ case DevFmtUInt: return "Unsigned Int";
+ case DevFmtFloat: return "Float";
+ }
+ return "(unknown type)";
+}
+const ALCchar *DevFmtChannelsString(DevFmtChannels chans) noexcept
+{
+ switch(chans)
+ {
+ case DevFmtMono: return "Mono";
+ case DevFmtStereo: return "Stereo";
+ case DevFmtQuad: return "Quadraphonic";
+ case DevFmtX51: return "5.1 Surround";
+ case DevFmtX51Rear: return "5.1 Surround (Rear)";
+ case DevFmtX61: return "6.1 Surround";
+ case DevFmtX71: return "7.1 Surround";
+ case DevFmtAmbi3D: return "Ambisonic 3D";
+ }
+ return "(unknown channels)";
+}
+
+ALsizei BytesFromDevFmt(DevFmtType type) noexcept
+{
+ switch(type)
+ {
+ case DevFmtByte: return sizeof(ALbyte);
+ case DevFmtUByte: return sizeof(ALubyte);
+ case DevFmtShort: return sizeof(ALshort);
+ case DevFmtUShort: return sizeof(ALushort);
+ case DevFmtInt: return sizeof(ALint);
+ case DevFmtUInt: return sizeof(ALuint);
+ case DevFmtFloat: return sizeof(ALfloat);
+ }
+ return 0;
+}
+ALsizei ChannelsFromDevFmt(DevFmtChannels chans, ALsizei ambiorder) noexcept
+{
+ switch(chans)
+ {
+ case DevFmtMono: return 1;
+ case DevFmtStereo: return 2;
+ case DevFmtQuad: return 4;
+ case DevFmtX51: return 6;
+ case DevFmtX51Rear: return 6;
+ case DevFmtX61: return 7;
+ case DevFmtX71: return 8;
+ case DevFmtAmbi3D: return (ambiorder+1) * (ambiorder+1);
+ }
+ return 0;
+}
+
+struct DevFmtPair { DevFmtChannels chans; DevFmtType type; };
+static al::optional<DevFmtPair> DecomposeDevFormat(ALenum format)
+{
+ static const struct {
+ ALenum format;
+ DevFmtChannels channels;
+ DevFmtType type;
+ } list[] = {
+ { AL_FORMAT_MONO8, DevFmtMono, DevFmtUByte },
+ { AL_FORMAT_MONO16, DevFmtMono, DevFmtShort },
+ { AL_FORMAT_MONO_FLOAT32, DevFmtMono, DevFmtFloat },
+
+ { AL_FORMAT_STEREO8, DevFmtStereo, DevFmtUByte },
+ { AL_FORMAT_STEREO16, DevFmtStereo, DevFmtShort },
+ { AL_FORMAT_STEREO_FLOAT32, DevFmtStereo, DevFmtFloat },
+
+ { AL_FORMAT_QUAD8, DevFmtQuad, DevFmtUByte },
+ { AL_FORMAT_QUAD16, DevFmtQuad, DevFmtShort },
+ { AL_FORMAT_QUAD32, DevFmtQuad, DevFmtFloat },
+
+ { AL_FORMAT_51CHN8, DevFmtX51, DevFmtUByte },
+ { AL_FORMAT_51CHN16, DevFmtX51, DevFmtShort },
+ { AL_FORMAT_51CHN32, DevFmtX51, DevFmtFloat },
+
+ { AL_FORMAT_61CHN8, DevFmtX61, DevFmtUByte },
+ { AL_FORMAT_61CHN16, DevFmtX61, DevFmtShort },
+ { AL_FORMAT_61CHN32, DevFmtX61, DevFmtFloat },
+
+ { AL_FORMAT_71CHN8, DevFmtX71, DevFmtUByte },
+ { AL_FORMAT_71CHN16, DevFmtX71, DevFmtShort },
+ { AL_FORMAT_71CHN32, DevFmtX71, DevFmtFloat },
+ };
+
+ for(const auto &item : list)
+ {
+ if(item.format == format)
+ return al::make_optional(DevFmtPair{item.channels, item.type});
+ }
+
+ return al::nullopt;
+}
+
+static ALCboolean IsValidALCType(ALCenum type)
+{
+ switch(type)
+ {
+ case ALC_BYTE_SOFT:
+ case ALC_UNSIGNED_BYTE_SOFT:
+ case ALC_SHORT_SOFT:
+ case ALC_UNSIGNED_SHORT_SOFT:
+ case ALC_INT_SOFT:
+ case ALC_UNSIGNED_INT_SOFT:
+ case ALC_FLOAT_SOFT:
+ return ALC_TRUE;
+ }
+ return ALC_FALSE;
+}
+
+static ALCboolean IsValidALCChannels(ALCenum channels)
+{
+ switch(channels)
+ {
+ case ALC_MONO_SOFT:
+ case ALC_STEREO_SOFT:
+ case ALC_QUAD_SOFT:
+ case ALC_5POINT1_SOFT:
+ case ALC_6POINT1_SOFT:
+ case ALC_7POINT1_SOFT:
+ case ALC_BFORMAT3D_SOFT:
+ return ALC_TRUE;
+ }
+ return ALC_FALSE;
+}
+
+static ALCboolean IsValidAmbiLayout(ALCenum layout)
+{
+ switch(layout)
+ {
+ case ALC_ACN_SOFT:
+ case ALC_FUMA_SOFT:
+ return ALC_TRUE;
+ }
+ return ALC_FALSE;
+}
+
+static ALCboolean IsValidAmbiScaling(ALCenum scaling)
+{
+ switch(scaling)
+ {
+ case ALC_N3D_SOFT:
+ case ALC_SN3D_SOFT:
+ case ALC_FUMA_SOFT:
+ return ALC_TRUE;
+ }
+ return ALC_FALSE;
+}
+
+/************************************************
+ * Miscellaneous ALC helpers
+ ************************************************/
+
+/* SetDefaultWFXChannelOrder
+ *
+ * Sets the default channel order used by WaveFormatEx.
+ */
+void SetDefaultWFXChannelOrder(ALCdevice *device)
+{
+ device->RealOut.ChannelIndex.fill(-1);
+
+ switch(device->FmtChans)
+ {
+ case DevFmtMono:
+ device->RealOut.ChannelIndex[FrontCenter] = 0;
+ break;
+ case DevFmtStereo:
+ device->RealOut.ChannelIndex[FrontLeft] = 0;
+ device->RealOut.ChannelIndex[FrontRight] = 1;
+ break;
+ case DevFmtQuad:
+ device->RealOut.ChannelIndex[FrontLeft] = 0;
+ device->RealOut.ChannelIndex[FrontRight] = 1;
+ device->RealOut.ChannelIndex[BackLeft] = 2;
+ device->RealOut.ChannelIndex[BackRight] = 3;
+ break;
+ case DevFmtX51:
+ device->RealOut.ChannelIndex[FrontLeft] = 0;
+ device->RealOut.ChannelIndex[FrontRight] = 1;
+ device->RealOut.ChannelIndex[FrontCenter] = 2;
+ device->RealOut.ChannelIndex[LFE] = 3;
+ device->RealOut.ChannelIndex[SideLeft] = 4;
+ device->RealOut.ChannelIndex[SideRight] = 5;
+ break;
+ case DevFmtX51Rear:
+ device->RealOut.ChannelIndex[FrontLeft] = 0;
+ device->RealOut.ChannelIndex[FrontRight] = 1;
+ device->RealOut.ChannelIndex[FrontCenter] = 2;
+ device->RealOut.ChannelIndex[LFE] = 3;
+ device->RealOut.ChannelIndex[BackLeft] = 4;
+ device->RealOut.ChannelIndex[BackRight] = 5;
+ break;
+ case DevFmtX61:
+ device->RealOut.ChannelIndex[FrontLeft] = 0;
+ device->RealOut.ChannelIndex[FrontRight] = 1;
+ device->RealOut.ChannelIndex[FrontCenter] = 2;
+ device->RealOut.ChannelIndex[LFE] = 3;
+ device->RealOut.ChannelIndex[BackCenter] = 4;
+ device->RealOut.ChannelIndex[SideLeft] = 5;
+ device->RealOut.ChannelIndex[SideRight] = 6;
+ break;
+ case DevFmtX71:
+ device->RealOut.ChannelIndex[FrontLeft] = 0;
+ device->RealOut.ChannelIndex[FrontRight] = 1;
+ device->RealOut.ChannelIndex[FrontCenter] = 2;
+ device->RealOut.ChannelIndex[LFE] = 3;
+ device->RealOut.ChannelIndex[BackLeft] = 4;
+ device->RealOut.ChannelIndex[BackRight] = 5;
+ device->RealOut.ChannelIndex[SideLeft] = 6;
+ device->RealOut.ChannelIndex[SideRight] = 7;
+ break;
+ case DevFmtAmbi3D:
+ device->RealOut.ChannelIndex[Aux0] = 0;
+ if(device->mAmbiOrder > 0)
+ {
+ device->RealOut.ChannelIndex[Aux1] = 1;
+ device->RealOut.ChannelIndex[Aux2] = 2;
+ device->RealOut.ChannelIndex[Aux3] = 3;
+ }
+ if(device->mAmbiOrder > 1)
+ {
+ device->RealOut.ChannelIndex[Aux4] = 4;
+ device->RealOut.ChannelIndex[Aux5] = 5;
+ device->RealOut.ChannelIndex[Aux6] = 6;
+ device->RealOut.ChannelIndex[Aux7] = 7;
+ device->RealOut.ChannelIndex[Aux8] = 8;
+ }
+ if(device->mAmbiOrder > 2)
+ {
+ device->RealOut.ChannelIndex[Aux9] = 9;
+ device->RealOut.ChannelIndex[Aux10] = 10;
+ device->RealOut.ChannelIndex[Aux11] = 11;
+ device->RealOut.ChannelIndex[Aux12] = 12;
+ device->RealOut.ChannelIndex[Aux13] = 13;
+ device->RealOut.ChannelIndex[Aux14] = 14;
+ device->RealOut.ChannelIndex[Aux15] = 15;
+ }
+ break;
+ }
+}
+
+/* SetDefaultChannelOrder
+ *
+ * Sets the default channel order used by most non-WaveFormatEx-based APIs.
+ */
+void SetDefaultChannelOrder(ALCdevice *device)
+{
+ device->RealOut.ChannelIndex.fill(-1);
+
+ switch(device->FmtChans)
+ {
+ case DevFmtX51Rear:
+ device->RealOut.ChannelIndex[FrontLeft] = 0;
+ device->RealOut.ChannelIndex[FrontRight] = 1;
+ device->RealOut.ChannelIndex[BackLeft] = 2;
+ device->RealOut.ChannelIndex[BackRight] = 3;
+ device->RealOut.ChannelIndex[FrontCenter] = 4;
+ device->RealOut.ChannelIndex[LFE] = 5;
+ return;
+ case DevFmtX71:
+ device->RealOut.ChannelIndex[FrontLeft] = 0;
+ device->RealOut.ChannelIndex[FrontRight] = 1;
+ device->RealOut.ChannelIndex[BackLeft] = 2;
+ device->RealOut.ChannelIndex[BackRight] = 3;
+ device->RealOut.ChannelIndex[FrontCenter] = 4;
+ device->RealOut.ChannelIndex[LFE] = 5;
+ device->RealOut.ChannelIndex[SideLeft] = 6;
+ device->RealOut.ChannelIndex[SideRight] = 7;
+ return;
+
+ /* Same as WFX order */
+ case DevFmtMono:
+ case DevFmtStereo:
+ case DevFmtQuad:
+ case DevFmtX51:
+ case DevFmtX61:
+ case DevFmtAmbi3D:
+ SetDefaultWFXChannelOrder(device);
+ break;
+ }
+}
+
+
+/* ALCcontext_DeferUpdates
+ *
+ * Defers/suspends updates for the given context's listener and sources. This
+ * does *NOT* stop mixing, but rather prevents certain property changes from
+ * taking effect.
+ */
+void ALCcontext_DeferUpdates(ALCcontext *context)
+{
+ context->DeferUpdates.store(true);
+}
+
+/* ALCcontext_ProcessUpdates
+ *
+ * Resumes update processing after being deferred.
+ */
+void ALCcontext_ProcessUpdates(ALCcontext *context)
+{
+ std::lock_guard<std::mutex> _{context->PropLock};
+ if(context->DeferUpdates.exchange(false))
+ {
+ /* Tell the mixer to stop applying updates, then wait for any active
+ * updating to finish, before providing updates.
+ */
+ context->HoldUpdates.store(true, std::memory_order_release);
+ while((context->UpdateCount.load(std::memory_order_acquire)&1) != 0)
+ std::this_thread::yield();
+
+ if(!context->PropsClean.test_and_set(std::memory_order_acq_rel))
+ UpdateContextProps(context);
+ if(!context->Listener.PropsClean.test_and_set(std::memory_order_acq_rel))
+ UpdateListenerProps(context);
+ UpdateAllEffectSlotProps(context);
+ UpdateAllSourceProps(context);
+
+ /* Now with all updates declared, let the mixer continue applying them
+ * so they all happen at once.
+ */
+ context->HoldUpdates.store(false, std::memory_order_release);
+ }
+}
+
+
+/* alcSetError
+ *
+ * Stores the latest ALC device error
+ */
+static void alcSetError(ALCdevice *device, ALCenum errorCode)
+{
+ WARN("Error generated on device %p, code 0x%04x\n", device, errorCode);
+ if(TrapALCError)
+ {
+#ifdef _WIN32
+ /* DebugBreak() will cause an exception if there is no debugger */
+ if(IsDebuggerPresent())
+ DebugBreak();
+#elif defined(SIGTRAP)
+ raise(SIGTRAP);
+#endif
+ }
+
+ if(device)
+ device->LastError.store(errorCode);
+ else
+ LastNullDeviceError.store(errorCode);
+}
+
+
+static std::unique_ptr<Compressor> CreateDeviceLimiter(const ALCdevice *device, const ALfloat threshold)
+{
+ return CompressorInit(static_cast<ALuint>(device->RealOut.Buffer.size()), device->Frequency,
+ AL_TRUE, AL_TRUE, AL_TRUE, AL_TRUE, AL_TRUE, 0.001f, 0.002f, 0.0f, 0.0f, threshold,
+ INFINITY, 0.0f, 0.020f, 0.200f);
+}
+
+/* UpdateClockBase
+ *
+ * Updates the device's base clock time with however many samples have been
+ * done. This is used so frequency changes on the device don't cause the time
+ * to jump forward or back. Must not be called while the device is running/
+ * mixing.
+ */
+static inline void UpdateClockBase(ALCdevice *device)
+{
+ IncrementRef(&device->MixCount);
+ device->ClockBase += nanoseconds{seconds{device->SamplesDone}} / device->Frequency;
+ device->SamplesDone = 0;
+ IncrementRef(&device->MixCount);
+}
+
+/* UpdateDeviceParams
+ *
+ * Updates device parameters according to the attribute list (caller is
+ * responsible for holding the list lock).
+ */
+static ALCenum UpdateDeviceParams(ALCdevice *device, const ALCint *attrList)
+{
+ HrtfRequestMode hrtf_userreq = Hrtf_Default;
+ HrtfRequestMode hrtf_appreq = Hrtf_Default;
+ ALCenum gainLimiter = device->LimiterState;
+ const ALsizei old_sends = device->NumAuxSends;
+ ALsizei new_sends = device->NumAuxSends;
+ DevFmtChannels oldChans;
+ DevFmtType oldType;
+ ALboolean update_failed;
+ ALCsizei hrtf_id = -1;
+ ALCuint oldFreq;
+
+ if((!attrList || !attrList[0]) && device->Type == Loopback)
+ {
+ WARN("Missing attributes for loopback device\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Check for attributes
+ if(attrList && attrList[0])
+ {
+ ALCenum alayout{AL_NONE};
+ ALCenum ascale{AL_NONE};
+ ALCenum schans{AL_NONE};
+ ALCenum stype{AL_NONE};
+ ALCsizei attrIdx{0};
+ ALCsizei aorder{0};
+ ALCuint freq{0u};
+
+ ALuint numMono{device->NumMonoSources};
+ ALuint numStereo{device->NumStereoSources};
+ ALsizei numSends{old_sends};
+
+#define TRACE_ATTR(a, v) TRACE("%s = %d\n", #a, v)
+ while(attrList[attrIdx])
+ {
+ switch(attrList[attrIdx])
+ {
+ case ALC_FORMAT_CHANNELS_SOFT:
+ schans = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_FORMAT_CHANNELS_SOFT, schans);
+ break;
+
+ case ALC_FORMAT_TYPE_SOFT:
+ stype = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_FORMAT_TYPE_SOFT, stype);
+ break;
+
+ case ALC_FREQUENCY:
+ freq = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_FREQUENCY, freq);
+ break;
+
+ case ALC_AMBISONIC_LAYOUT_SOFT:
+ alayout = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_AMBISONIC_LAYOUT_SOFT, alayout);
+ break;
+
+ case ALC_AMBISONIC_SCALING_SOFT:
+ ascale = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_AMBISONIC_SCALING_SOFT, ascale);
+ break;
+
+ case ALC_AMBISONIC_ORDER_SOFT:
+ aorder = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_AMBISONIC_ORDER_SOFT, aorder);
+ break;
+
+ case ALC_MONO_SOURCES:
+ numMono = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_MONO_SOURCES, numMono);
+ if(numMono > INT_MAX) numMono = 0;
+ break;
+
+ case ALC_STEREO_SOURCES:
+ numStereo = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_STEREO_SOURCES, numStereo);
+ if(numStereo > INT_MAX) numStereo = 0;
+ break;
+
+ case ALC_MAX_AUXILIARY_SENDS:
+ numSends = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_MAX_AUXILIARY_SENDS, numSends);
+ numSends = clampi(numSends, 0, MAX_SENDS);
+ break;
+
+ case ALC_HRTF_SOFT:
+ TRACE_ATTR(ALC_HRTF_SOFT, attrList[attrIdx + 1]);
+ if(attrList[attrIdx + 1] == ALC_FALSE)
+ hrtf_appreq = Hrtf_Disable;
+ else if(attrList[attrIdx + 1] == ALC_TRUE)
+ hrtf_appreq = Hrtf_Enable;
+ else
+ hrtf_appreq = Hrtf_Default;
+ break;
+
+ case ALC_HRTF_ID_SOFT:
+ hrtf_id = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_HRTF_ID_SOFT, hrtf_id);
+ break;
+
+ case ALC_OUTPUT_LIMITER_SOFT:
+ gainLimiter = attrList[attrIdx + 1];
+ TRACE_ATTR(ALC_OUTPUT_LIMITER_SOFT, gainLimiter);
+ break;
+
+ default:
+ TRACE("0x%04X = %d (0x%x)\n", attrList[attrIdx],
+ attrList[attrIdx + 1], attrList[attrIdx + 1]);
+ break;
+ }
+
+ attrIdx += 2;
+ }
+#undef TRACE_ATTR
+
+ const bool loopback{device->Type == Loopback};
+ if(loopback)
+ {
+ if(!schans || !stype || !freq)
+ {
+ WARN("Missing format for loopback device\n");
+ return ALC_INVALID_VALUE;
+ }
+ if(!IsValidALCChannels(schans) || !IsValidALCType(stype) || freq < MIN_OUTPUT_RATE)
+ return ALC_INVALID_VALUE;
+ if(schans == ALC_BFORMAT3D_SOFT)
+ {
+ if(!alayout || !ascale || !aorder)
+ {
+ WARN("Missing ambisonic info for loopback device\n");
+ return ALC_INVALID_VALUE;
+ }
+ if(!IsValidAmbiLayout(alayout) || !IsValidAmbiScaling(ascale))
+ return ALC_INVALID_VALUE;
+ if(aorder < 1 || aorder > MAX_AMBI_ORDER)
+ return ALC_INVALID_VALUE;
+ if((alayout == ALC_FUMA_SOFT || ascale == ALC_FUMA_SOFT) && aorder > 3)
+ return ALC_INVALID_VALUE;
+ }
+ }
+
+ /* If a context is already running on the device, stop playback so the
+ * device attributes can be updated.
+ */
+ if(device->Flags.get<DeviceRunning>())
+ device->Backend->stop();
+ device->Flags.unset<DeviceRunning>();
+
+ UpdateClockBase(device);
+
+ const char *devname{nullptr};
+ if(!loopback)
+ {
+ devname = device->DeviceName.c_str();
+
+ device->BufferSize = DEFAULT_UPDATE_SIZE * DEFAULT_NUM_UPDATES;
+ device->UpdateSize = DEFAULT_UPDATE_SIZE;
+ device->Frequency = DEFAULT_OUTPUT_RATE;
+
+ freq = ConfigValueUInt(devname, nullptr, "frequency").value_or(freq);
+ if(freq < 1)
+ device->Flags.unset<FrequencyRequest>();
+ else
+ {
+ freq = maxi(freq, MIN_OUTPUT_RATE);
+
+ device->UpdateSize = (device->UpdateSize*freq + device->Frequency/2) /
+ device->Frequency;
+ device->BufferSize = (device->BufferSize*freq + device->Frequency/2) /
+ device->Frequency;
+
+ device->Frequency = freq;
+ device->Flags.set<FrequencyRequest>();
+ }
+
+ if(auto persizeopt = ConfigValueUInt(devname, nullptr, "period_size"))
+ device->UpdateSize = clampu(*persizeopt, 64, 8192);
+
+ if(auto peropt = ConfigValueUInt(devname, nullptr, "periods"))
+ device->BufferSize = device->UpdateSize * clampu(*peropt, 2, 16);
+ else
+ device->BufferSize = maxu(device->BufferSize, device->UpdateSize*2);
+ }
+ else
+ {
+ device->Frequency = freq;
+ device->FmtChans = static_cast<DevFmtChannels>(schans);
+ device->FmtType = static_cast<DevFmtType>(stype);
+ if(schans == ALC_BFORMAT3D_SOFT)
+ {
+ device->mAmbiOrder = aorder;
+ device->mAmbiLayout = static_cast<AmbiLayout>(alayout);
+ device->mAmbiScale = static_cast<AmbiNorm>(ascale);
+ }
+ }
+
+ if(numMono > INT_MAX-numStereo)
+ numMono = INT_MAX-numStereo;
+ numMono += numStereo;
+ if(auto srcsopt = ConfigValueUInt(devname, nullptr, "sources"))
+ {
+ if(*srcsopt <= 0) numMono = 256;
+ else numMono = *srcsopt;
+ }
+ else
+ numMono = maxu(numMono, 256);
+ numStereo = minu(numStereo, numMono);
+ numMono -= numStereo;
+ device->SourcesMax = numMono + numStereo;
+
+ device->NumMonoSources = numMono;
+ device->NumStereoSources = numStereo;
+
+ if(auto sendsopt = ConfigValueInt(devname, nullptr, "sends"))
+ new_sends = mini(numSends, clampi(*sendsopt, 0, MAX_SENDS));
+ else
+ new_sends = numSends;
+ }
+
+ if(device->Flags.get<DeviceRunning>())
+ return ALC_NO_ERROR;
+
+ device->AvgSpeakerDist = 0.0f;
+ device->Uhj_Encoder = nullptr;
+ device->AmbiDecoder = nullptr;
+ device->Bs2b = nullptr;
+ device->PostProcess = nullptr;
+
+ device->Stablizer = nullptr;
+ device->Limiter = nullptr;
+ device->ChannelDelay.clear();
+
+ device->Dry.AmbiMap.fill(BFChannelConfig{});
+ device->Dry.Buffer = {};
+ std::fill(std::begin(device->NumChannelsPerOrder), std::end(device->NumChannelsPerOrder), 0u);
+ device->RealOut.ChannelIndex.fill(-1);
+ device->RealOut.Buffer = {};
+ device->MixBuffer.clear();
+ device->MixBuffer.shrink_to_fit();
+
+ UpdateClockBase(device);
+ device->FixedLatency = nanoseconds::zero();
+
+ device->DitherDepth = 0.0f;
+ device->DitherSeed = DITHER_RNG_SEED;
+
+ /*************************************************************************
+ * Update device format request if HRTF is requested
+ */
+ device->HrtfStatus = ALC_HRTF_DISABLED_SOFT;
+ if(device->Type != Loopback)
+ {
+ if(auto hrtfopt = ConfigValueStr(device->DeviceName.c_str(), nullptr, "hrtf"))
+ {
+ const char *hrtf{hrtfopt->c_str()};
+ if(strcasecmp(hrtf, "true") == 0)
+ hrtf_userreq = Hrtf_Enable;
+ else if(strcasecmp(hrtf, "false") == 0)
+ hrtf_userreq = Hrtf_Disable;
+ else if(strcasecmp(hrtf, "auto") != 0)
+ ERR("Unexpected hrtf value: %s\n", hrtf);
+ }
+
+ if(hrtf_userreq == Hrtf_Enable || (hrtf_userreq != Hrtf_Disable && hrtf_appreq == Hrtf_Enable))
+ {
+ HrtfEntry *hrtf{nullptr};
+ if(device->HrtfList.empty())
+ device->HrtfList = EnumerateHrtf(device->DeviceName.c_str());
+ if(!device->HrtfList.empty())
+ {
+ if(hrtf_id >= 0 && static_cast<size_t>(hrtf_id) < device->HrtfList.size())
+ hrtf = GetLoadedHrtf(device->HrtfList[hrtf_id].hrtf);
+ else
+ hrtf = GetLoadedHrtf(device->HrtfList.front().hrtf);
+ }
+
+ if(hrtf)
+ {
+ device->FmtChans = DevFmtStereo;
+ device->Frequency = hrtf->sampleRate;
+ device->Flags.set<ChannelsRequest, FrequencyRequest>();
+ if(HrtfEntry *oldhrtf{device->mHrtf})
+ oldhrtf->DecRef();
+ device->mHrtf = hrtf;
+ }
+ else
+ {
+ hrtf_userreq = Hrtf_Default;
+ hrtf_appreq = Hrtf_Disable;
+ device->HrtfStatus = ALC_HRTF_UNSUPPORTED_FORMAT_SOFT;
+ }
+ }
+ }
+
+ oldFreq = device->Frequency;
+ oldChans = device->FmtChans;
+ oldType = device->FmtType;
+
+ TRACE("Pre-reset: %s%s, %s%s, %s%uhz, %u / %u buffer\n",
+ device->Flags.get<ChannelsRequest>()?"*":"", DevFmtChannelsString(device->FmtChans),
+ device->Flags.get<SampleTypeRequest>()?"*":"", DevFmtTypeString(device->FmtType),
+ device->Flags.get<FrequencyRequest>()?"*":"", device->Frequency,
+ device->UpdateSize, device->BufferSize);
+
+ try {
+ if(device->Backend->reset() == ALC_FALSE)
+ return ALC_INVALID_DEVICE;
+ }
+ catch(std::exception &e) {
+ ERR("Device reset failed: %s\n", e.what());
+ return ALC_INVALID_DEVICE;
+ }
+
+ if(device->FmtChans != oldChans && device->Flags.get<ChannelsRequest>())
+ {
+ ERR("Failed to set %s, got %s instead\n", DevFmtChannelsString(oldChans),
+ DevFmtChannelsString(device->FmtChans));
+ device->Flags.unset<ChannelsRequest>();
+ }
+ if(device->FmtType != oldType && device->Flags.get<SampleTypeRequest>())
+ {
+ ERR("Failed to set %s, got %s instead\n", DevFmtTypeString(oldType),
+ DevFmtTypeString(device->FmtType));
+ device->Flags.unset<SampleTypeRequest>();
+ }
+ if(device->Frequency != oldFreq && device->Flags.get<FrequencyRequest>())
+ {
+ WARN("Failed to set %uhz, got %uhz instead\n", oldFreq, device->Frequency);
+ device->Flags.unset<FrequencyRequest>();
+ }
+
+ TRACE("Post-reset: %s, %s, %uhz, %u / %u buffer\n",
+ DevFmtChannelsString(device->FmtChans), DevFmtTypeString(device->FmtType),
+ device->Frequency, device->UpdateSize, device->BufferSize);
+
+ aluInitRenderer(device, hrtf_id, hrtf_appreq, hrtf_userreq);
+
+ device->NumAuxSends = new_sends;
+ TRACE("Max sources: %d (%d + %d), effect slots: %d, sends: %d\n",
+ device->SourcesMax, device->NumMonoSources, device->NumStereoSources,
+ device->AuxiliaryEffectSlotMax, device->NumAuxSends);
+
+ /* Enable the stablizer only for formats that have front-left, front-right,
+ * and front-center outputs.
+ */
+ switch(device->FmtChans)
+ {
+ case DevFmtX51:
+ case DevFmtX51Rear:
+ case DevFmtX61:
+ case DevFmtX71:
+ if(GetConfigValueBool(device->DeviceName.c_str(), nullptr, "front-stablizer", 0))
+ {
+ auto stablizer = al::make_unique<FrontStablizer>();
+ /* Initialize band-splitting filters for the front-left and front-
+ * right channels, with a crossover at 5khz (could be higher).
+ */
+ const ALfloat scale{5000.0f / static_cast<ALfloat>(device->Frequency)};
+
+ stablizer->LFilter.init(scale);
+ stablizer->RFilter = stablizer->LFilter;
+
+ device->Stablizer = std::move(stablizer);
+ /* NOTE: Don't know why this has to be "copied" into a local static
+ * constexpr variable to avoid a reference on
+ * FrontStablizer::DelayLength...
+ */
+ static constexpr size_t StablizerDelay{FrontStablizer::DelayLength};
+ device->FixedLatency += nanoseconds{seconds{StablizerDelay}} / device->Frequency;
+ }
+ break;
+ case DevFmtMono:
+ case DevFmtStereo:
+ case DevFmtQuad:
+ case DevFmtAmbi3D:
+ break;
+ }
+ TRACE("Front stablizer %s\n", device->Stablizer ? "enabled" : "disabled");
+
+ if(GetConfigValueBool(device->DeviceName.c_str(), nullptr, "dither", 1))
+ {
+ ALint depth{
+ ConfigValueInt(device->DeviceName.c_str(), nullptr, "dither-depth").value_or(0)};
+ if(depth <= 0)
+ {
+ switch(device->FmtType)
+ {
+ case DevFmtByte:
+ case DevFmtUByte:
+ depth = 8;
+ break;
+ case DevFmtShort:
+ case DevFmtUShort:
+ depth = 16;
+ break;
+ case DevFmtInt:
+ case DevFmtUInt:
+ case DevFmtFloat:
+ break;
+ }
+ }
+
+ if(depth > 0)
+ {
+ depth = clampi(depth, 2, 24);
+ device->DitherDepth = std::pow(2.0f, static_cast<ALfloat>(depth-1));
+ }
+ }
+ if(!(device->DitherDepth > 0.0f))
+ TRACE("Dithering disabled\n");
+ else
+ TRACE("Dithering enabled (%d-bit, %g)\n", float2int(std::log2(device->DitherDepth)+0.5f)+1,
+ device->DitherDepth);
+
+ device->LimiterState = gainLimiter;
+ if(auto limopt = ConfigValueBool(device->DeviceName.c_str(), nullptr, "output-limiter"))
+ gainLimiter = *limopt ? ALC_TRUE : ALC_FALSE;
+
+ /* Valid values for gainLimiter are ALC_DONT_CARE_SOFT, ALC_TRUE, and
+ * ALC_FALSE. For ALC_DONT_CARE_SOFT, use the limiter for integer-based
+ * output (where samples must be clamped), and don't for floating-point
+ * (which can take unclamped samples).
+ */
+ if(gainLimiter == ALC_DONT_CARE_SOFT)
+ {
+ switch(device->FmtType)
+ {
+ case DevFmtByte:
+ case DevFmtUByte:
+ case DevFmtShort:
+ case DevFmtUShort:
+ case DevFmtInt:
+ case DevFmtUInt:
+ gainLimiter = ALC_TRUE;
+ break;
+ case DevFmtFloat:
+ gainLimiter = ALC_FALSE;
+ break;
+ }
+ }
+ if(gainLimiter == ALC_FALSE)
+ TRACE("Output limiter disabled\n");
+ else
+ {
+ ALfloat thrshld = 1.0f;
+ switch(device->FmtType)
+ {
+ case DevFmtByte:
+ case DevFmtUByte:
+ thrshld = 127.0f / 128.0f;
+ break;
+ case DevFmtShort:
+ case DevFmtUShort:
+ thrshld = 32767.0f / 32768.0f;
+ break;
+ case DevFmtInt:
+ case DevFmtUInt:
+ case DevFmtFloat:
+ break;
+ }
+ if(device->DitherDepth > 0.0f)
+ thrshld -= 1.0f / device->DitherDepth;
+
+ const float thrshld_dB{std::log10(thrshld) * 20.0f};
+ auto limiter = CreateDeviceLimiter(device, thrshld_dB);
+ /* Convert the lookahead from samples to nanosamples to nanoseconds. */
+ device->FixedLatency += nanoseconds{seconds{limiter->getLookAhead()}} / device->Frequency;
+ device->Limiter = std::move(limiter);
+ TRACE("Output limiter enabled, %.4fdB limit\n", thrshld_dB);
+ }
+
+ TRACE("Fixed device latency: %ldns\n", (long)device->FixedLatency.count());
+
+ /* Need to delay returning failure until replacement Send arrays have been
+ * allocated with the appropriate size.
+ */
+ update_failed = AL_FALSE;
+ FPUCtl mixer_mode{};
+ for(ALCcontext *context : *device->mContexts.load())
+ {
+ if(context->DefaultSlot)
+ {
+ ALeffectslot *slot = context->DefaultSlot.get();
+ aluInitEffectPanning(slot, device);
+
+ EffectState *state{slot->Effect.State};
+ state->mOutTarget = device->Dry.Buffer;
+ if(state->deviceUpdate(device) == AL_FALSE)
+ update_failed = AL_TRUE;
+ else
+ UpdateEffectSlotProps(slot, context);
+ }
+
+ std::unique_lock<std::mutex> proplock{context->PropLock};
+ std::unique_lock<std::mutex> slotlock{context->EffectSlotLock};
+ for(auto &sublist : context->EffectSlotList)
+ {
+ uint64_t usemask = ~sublist.FreeMask;
+ while(usemask)
+ {
+ ALsizei idx = CTZ64(usemask);
+ ALeffectslot *slot = sublist.EffectSlots + idx;
+
+ usemask &= ~(1_u64 << idx);
+
+ aluInitEffectPanning(slot, device);
+
+ EffectState *state{slot->Effect.State};
+ state->mOutTarget = device->Dry.Buffer;
+ if(state->deviceUpdate(device) == AL_FALSE)
+ update_failed = AL_TRUE;
+ else
+ UpdateEffectSlotProps(slot, context);
+ }
+ }
+ slotlock.unlock();
+
+ std::unique_lock<std::mutex> srclock{context->SourceLock};
+ for(auto &sublist : context->SourceList)
+ {
+ uint64_t usemask = ~sublist.FreeMask;
+ while(usemask)
+ {
+ ALsizei idx = CTZ64(usemask);
+ ALsource *source = sublist.Sources + idx;
+
+ usemask &= ~(1_u64 << idx);
+
+ if(old_sends != device->NumAuxSends)
+ {
+ ALsizei s;
+ for(s = device->NumAuxSends;s < old_sends;s++)
+ {
+ if(source->Send[s].Slot)
+ DecrementRef(&source->Send[s].Slot->ref);
+ source->Send[s].Slot = nullptr;
+ }
+ source->Send.resize(device->NumAuxSends);
+ source->Send.shrink_to_fit();
+ for(s = old_sends;s < device->NumAuxSends;s++)
+ {
+ source->Send[s].Slot = nullptr;
+ source->Send[s].Gain = 1.0f;
+ source->Send[s].GainHF = 1.0f;
+ source->Send[s].HFReference = LOWPASSFREQREF;
+ source->Send[s].GainLF = 1.0f;
+ source->Send[s].LFReference = HIGHPASSFREQREF;
+ }
+ }
+
+ source->PropsClean.clear(std::memory_order_release);
+ }
+ }
+
+ /* Clear any pre-existing voice property structs, in case the number of
+ * auxiliary sends is changing. Active sources will have updates
+ * respecified in UpdateAllSourceProps.
+ */
+ ALvoiceProps *vprops{context->FreeVoiceProps.exchange(nullptr, std::memory_order_acq_rel)};
+ while(vprops)
+ {
+ ALvoiceProps *next = vprops->next.load(std::memory_order_relaxed);
+ delete vprops;
+ vprops = next;
+ }
+
+ auto voices = context->Voices.get();
+ auto voices_end = voices->begin() + context->VoiceCount.load(std::memory_order_relaxed);
+ if(device->NumAuxSends < old_sends)
+ {
+ const ALsizei num_sends{device->NumAuxSends};
+ /* Clear extraneous property set sends. */
+ auto clear_sends = [num_sends](ALvoice &voice) -> void
+ {
+ std::fill(std::begin(voice.mProps.Send)+num_sends, std::end(voice.mProps.Send),
+ ALvoiceProps::SendData{});
+
+ std::fill(voice.mSend.begin()+num_sends, voice.mSend.end(), ALvoice::SendData{});
+ auto clear_chan_sends = [num_sends](ALvoice::ChannelData &chandata) -> void
+ {
+ std::fill(chandata.mWetParams.begin()+num_sends, chandata.mWetParams.end(),
+ SendParams{});
+ };
+ std::for_each(voice.mChans.begin(), voice.mChans.end(), clear_chan_sends);
+ };
+ std::for_each(voices->begin(), voices_end, clear_sends);
+ }
+ std::for_each(voices->begin(), voices_end,
+ [device](ALvoice &voice) -> void
+ {
+ delete voice.mUpdate.exchange(nullptr, std::memory_order_acq_rel);
+
+ /* Force the voice to stopped if it was stopping. */
+ ALvoice::State vstate{ALvoice::Stopping};
+ voice.mPlayState.compare_exchange_strong(vstate, ALvoice::Stopped,
+ std::memory_order_acquire, std::memory_order_acquire);
+ if(voice.mSourceID.load(std::memory_order_relaxed) == 0u)
+ return;
+
+ if(device->AvgSpeakerDist > 0.0f)
+ {
+ /* Reinitialize the NFC filters for new parameters. */
+ const ALfloat w1{SPEEDOFSOUNDMETRESPERSEC /
+ (device->AvgSpeakerDist * device->Frequency)};
+ auto init_nfc = [w1](ALvoice::ChannelData &chandata) -> void
+ { chandata.mDryParams.NFCtrlFilter.init(w1); };
+ std::for_each(voice.mChans.begin(), voice.mChans.begin()+voice.mNumChannels,
+ init_nfc);
+ }
+ }
+ );
+ srclock.unlock();
+
+ context->PropsClean.test_and_set(std::memory_order_release);
+ UpdateContextProps(context);
+ context->Listener.PropsClean.test_and_set(std::memory_order_release);
+ UpdateListenerProps(context);
+ UpdateAllSourceProps(context);
+ }
+ mixer_mode.leave();
+ if(update_failed)
+ return ALC_INVALID_DEVICE;
+
+ if(!device->Flags.get<DevicePaused>())
+ {
+ if(device->Backend->start() == ALC_FALSE)
+ return ALC_INVALID_DEVICE;
+ device->Flags.set<DeviceRunning>();
+ }
+
+ return ALC_NO_ERROR;
+}
+
+
+ALCdevice::ALCdevice(DeviceType type) : Type{type}, mContexts{&EmptyContextArray}
+{
+}
+
+/* ALCdevice::~ALCdevice
+ *
+ * Frees the device structure, and destroys any objects the app failed to
+ * delete. Called once there's no more references on the device.
+ */
+ALCdevice::~ALCdevice()
+{
+ TRACE("Freeing device %p\n", this);
+
+ Backend = nullptr;
+
+ size_t count{std::accumulate(BufferList.cbegin(), BufferList.cend(), size_t{0u},
+ [](size_t cur, const BufferSubList &sublist) noexcept -> size_t
+ { return cur + POPCNT64(~sublist.FreeMask); }
+ )};
+ if(count > 0)
+ WARN("%zu Buffer%s not deleted\n", count, (count==1)?"":"s");
+
+ count = std::accumulate(EffectList.cbegin(), EffectList.cend(), size_t{0u},
+ [](size_t cur, const EffectSubList &sublist) noexcept -> size_t
+ { return cur + POPCNT64(~sublist.FreeMask); }
+ );
+ if(count > 0)
+ WARN("%zu Effect%s not deleted\n", count, (count==1)?"":"s");
+
+ count = std::accumulate(FilterList.cbegin(), FilterList.cend(), size_t{0u},
+ [](size_t cur, const FilterSubList &sublist) noexcept -> size_t
+ { return cur + POPCNT64(~sublist.FreeMask); }
+ );
+ if(count > 0)
+ WARN("%zu Filter%s not deleted\n", count, (count==1)?"":"s");
+
+ if(mHrtf)
+ mHrtf->DecRef();
+ mHrtf = nullptr;
+
+ auto *oldarray = mContexts.exchange(nullptr, std::memory_order_relaxed);
+ if(oldarray != &EmptyContextArray) delete oldarray;
+}
+
+
+/* VerifyDevice
+ *
+ * Checks if the device handle is valid, and returns a new reference if so.
+ */
+static DeviceRef VerifyDevice(ALCdevice *device)
+{
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ auto iter = std::lower_bound(DeviceList.cbegin(), DeviceList.cend(), device);
+ if(iter != DeviceList.cend() && *iter == device)
+ {
+ ALCdevice_IncRef(iter->get());
+ return DeviceRef{iter->get()};
+ }
+ return DeviceRef{};
+}
+
+
+ALCcontext::ALCcontext(ALCdevice *device) : Device{device}
+{
+ PropsClean.test_and_set(std::memory_order_relaxed);
+}
+
+/* InitContext
+ *
+ * Initializes context fields
+ */
+static ALvoid InitContext(ALCcontext *Context)
+{
+ ALlistener &listener = Context->Listener;
+ ALeffectslotArray *auxslots;
+
+ //Validate Context
+ if(!Context->DefaultSlot)
+ auxslots = ALeffectslot::CreatePtrArray(0);
+ else
+ {
+ auxslots = ALeffectslot::CreatePtrArray(1);
+ (*auxslots)[0] = Context->DefaultSlot.get();
+ }
+ Context->ActiveAuxSlots.store(auxslots, std::memory_order_relaxed);
+
+ //Set globals
+ Context->mDistanceModel = DistanceModel::Default;
+ Context->SourceDistanceModel = AL_FALSE;
+ Context->DopplerFactor = 1.0f;
+ Context->DopplerVelocity = 1.0f;
+ Context->SpeedOfSound = SPEEDOFSOUNDMETRESPERSEC;
+ Context->MetersPerUnit = AL_DEFAULT_METERS_PER_UNIT;
+
+ Context->ExtensionList = alExtList;
+
+
+ listener.Params.Matrix = alu::Matrix::Identity();
+ listener.Params.Velocity = alu::Vector{};
+ listener.Params.Gain = listener.Gain;
+ listener.Params.MetersPerUnit = Context->MetersPerUnit;
+ listener.Params.DopplerFactor = Context->DopplerFactor;
+ listener.Params.SpeedOfSound = Context->SpeedOfSound * Context->DopplerVelocity;
+ listener.Params.ReverbSpeedOfSound = listener.Params.SpeedOfSound *
+ listener.Params.MetersPerUnit;
+ listener.Params.SourceDistanceModel = Context->SourceDistanceModel;
+ listener.Params.mDistanceModel = Context->mDistanceModel;
+
+
+ Context->AsyncEvents = CreateRingBuffer(511, sizeof(AsyncEvent), false);
+ StartEventThrd(Context);
+}
+
+
+/* ALCcontext::~ALCcontext()
+ *
+ * Cleans up the context, and destroys any remaining objects the app failed to
+ * delete. Called once there's no more references on the context.
+ */
+ALCcontext::~ALCcontext()
+{
+ TRACE("Freeing context %p\n", this);
+
+ ALcontextProps *cprops{Update.exchange(nullptr, std::memory_order_relaxed)};
+ if(cprops)
+ {
+ TRACE("Freed unapplied context update %p\n", cprops);
+ al_free(cprops);
+ }
+ size_t count{0};
+ cprops = FreeContextProps.exchange(nullptr, std::memory_order_acquire);
+ while(cprops)
+ {
+ ALcontextProps *next{cprops->next.load(std::memory_order_relaxed)};
+ al_free(cprops);
+ cprops = next;
+ ++count;
+ }
+ TRACE("Freed %zu context property object%s\n", count, (count==1)?"":"s");
+
+ count = std::accumulate(SourceList.cbegin(), SourceList.cend(), size_t{0u},
+ [](size_t cur, const SourceSubList &sublist) noexcept -> size_t
+ { return cur + POPCNT64(~sublist.FreeMask); }
+ );
+ if(count > 0)
+ WARN("%zu Source%s not deleted\n", count, (count==1)?"":"s");
+ SourceList.clear();
+ NumSources = 0;
+
+ count = 0;
+ ALeffectslotProps *eprops{FreeEffectslotProps.exchange(nullptr, std::memory_order_acquire)};
+ while(eprops)
+ {
+ ALeffectslotProps *next{eprops->next.load(std::memory_order_relaxed)};
+ if(eprops->State) eprops->State->DecRef();
+ al_free(eprops);
+ eprops = next;
+ ++count;
+ }
+ TRACE("Freed %zu AuxiliaryEffectSlot property object%s\n", count, (count==1)?"":"s");
+
+ delete ActiveAuxSlots.exchange(nullptr, std::memory_order_relaxed);
+ DefaultSlot = nullptr;
+
+ count = std::accumulate(EffectSlotList.cbegin(), EffectSlotList.cend(), size_t{0u},
+ [](size_t cur, const EffectSlotSubList &sublist) noexcept -> size_t
+ { return cur + POPCNT64(~sublist.FreeMask); }
+ );
+ if(count > 0)
+ WARN("%zu AuxiliaryEffectSlot%s not deleted\n", count, (count==1)?"":"s");
+ EffectSlotList.clear();
+ NumEffectSlots = 0;
+
+ count = 0;
+ ALvoiceProps *vprops{FreeVoiceProps.exchange(nullptr, std::memory_order_acquire)};
+ while(vprops)
+ {
+ ALvoiceProps *next{vprops->next.load(std::memory_order_relaxed)};
+ delete vprops;
+ vprops = next;
+ ++count;
+ }
+ TRACE("Freed %zu voice property object%s\n", count, (count==1)?"":"s");
+
+ Voices = nullptr;
+ VoiceCount.store(0, std::memory_order_relaxed);
+
+ ALlistenerProps *lprops{Listener.Update.exchange(nullptr, std::memory_order_relaxed)};
+ if(lprops)
+ {
+ TRACE("Freed unapplied listener update %p\n", lprops);
+ al_free(lprops);
+ }
+ count = 0;
+ lprops = FreeListenerProps.exchange(nullptr, std::memory_order_acquire);
+ while(lprops)
+ {
+ ALlistenerProps *next{lprops->next.load(std::memory_order_relaxed)};
+ al_free(lprops);
+ lprops = next;
+ ++count;
+ }
+ TRACE("Freed %zu listener property object%s\n", count, (count==1)?"":"s");
+
+ if(AsyncEvents)
+ {
+ count = 0;
+ auto evt_vec = AsyncEvents->getReadVector();
+ if(evt_vec.first.len > 0)
+ {
+ al::destroy_n(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf), evt_vec.first.len);
+ count += evt_vec.first.len;
+ }
+ if(evt_vec.second.len > 0)
+ {
+ al::destroy_n(reinterpret_cast<AsyncEvent*>(evt_vec.second.buf), evt_vec.second.len);
+ count += evt_vec.second.len;
+ }
+ if(count > 0)
+ TRACE("Destructed %zu orphaned event%s\n", count, (count==1)?"":"s");
+ AsyncEvents->readAdvance(count);
+ }
+
+ ALCdevice_DecRef(Device);
+}
+
+/* ReleaseContext
+ *
+ * Removes the context reference from the given device and removes it from
+ * being current on the running thread or globally. Returns true if other
+ * contexts still exist on the device.
+ */
+static bool ReleaseContext(ALCcontext *context, ALCdevice *device)
+{
+ if(LocalContext.get() == context)
+ {
+ WARN("%p released while current on thread\n", context);
+ LocalContext.set(nullptr);
+ ALCcontext_DecRef(context);
+ }
+
+ ALCcontext *origctx{context};
+ if(GlobalContext.compare_exchange_strong(origctx, nullptr))
+ ALCcontext_DecRef(context);
+
+ bool ret{};
+ {
+ using ContextArray = al::FlexArray<ALCcontext*>;
+
+ /* First make sure this context exists in the device's list. */
+ auto *oldarray = device->mContexts.load(std::memory_order_acquire);
+ if(auto toremove = std::count(oldarray->begin(), oldarray->end(), context))
+ {
+ auto alloc_ctx_array = [](const size_t count) -> ContextArray*
+ {
+ if(count == 0) return &EmptyContextArray;
+ void *ptr{al_calloc(alignof(ContextArray), ContextArray::Sizeof(count))};
+ return new (ptr) ContextArray{count};
+ };
+ auto *newarray = alloc_ctx_array(oldarray->size() - toremove);
+
+ /* Copy the current/old context handles to the new array, excluding
+ * the given context.
+ */
+ std::copy_if(oldarray->begin(), oldarray->end(), newarray->begin(),
+ std::bind(std::not_equal_to<ALCcontext*>{}, _1, context));
+
+ /* Store the new context array in the device. Wait for any current
+ * mix to finish before deleting the old array.
+ */
+ device->mContexts.store(newarray);
+ if(oldarray != &EmptyContextArray)
+ {
+ while((device->MixCount.load(std::memory_order_acquire)&1))
+ std::this_thread::yield();
+ delete oldarray;
+ }
+
+ ret = !newarray->empty();
+ }
+ else
+ ret = !oldarray->empty();
+ }
+
+ StopEventThrd(context);
+
+ return ret;
+}
+
+static void ALCcontext_IncRef(ALCcontext *context)
+{
+ auto ref = IncrementRef(&context->ref);
+ TRACEREF("ALCcontext %p increasing refcount to %u\n", context, ref);
+}
+
+void ALCcontext_DecRef(ALCcontext *context)
+{
+ auto ref = DecrementRef(&context->ref);
+ TRACEREF("ALCcontext %p decreasing refcount to %u\n", context, ref);
+ if(UNLIKELY(ref == 0)) delete context;
+}
+
+/* VerifyContext
+ *
+ * Checks if the given context is valid, returning a new reference to it if so.
+ */
+static ContextRef VerifyContext(ALCcontext *context)
+{
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ auto iter = std::lower_bound(ContextList.cbegin(), ContextList.cend(), context);
+ if(iter != ContextList.cend() && *iter == context)
+ {
+ ALCcontext_IncRef(iter->get());
+ return ContextRef{iter->get()};
+ }
+ return ContextRef{};
+}
+
+
+/* GetContextRef
+ *
+ * Returns a new reference to the currently active context for this thread.
+ */
+ContextRef GetContextRef(void)
+{
+ ALCcontext *context{LocalContext.get()};
+ if(context)
+ ALCcontext_IncRef(context);
+ else
+ {
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ context = GlobalContext.load(std::memory_order_acquire);
+ if(context) ALCcontext_IncRef(context);
+ }
+ return ContextRef{context};
+}
+
+
+void AllocateVoices(ALCcontext *context, size_t num_voices)
+{
+ ALCdevice *device{context->Device};
+ const ALsizei num_sends{device->NumAuxSends};
+
+ if(context->Voices && num_voices == context->Voices->size())
+ return;
+
+ std::unique_ptr<al::FlexArray<ALvoice>> voices;
+ {
+ void *ptr{al_calloc(16, al::FlexArray<ALvoice>::Sizeof(num_voices))};
+ voices.reset(new (ptr) al::FlexArray<ALvoice>{num_voices});
+ }
+
+ const size_t v_count{minz(context->VoiceCount.load(std::memory_order_relaxed), num_voices)};
+ if(context->Voices)
+ {
+ /* Copy the old voice data to the new storage. */
+ auto viter = std::move(context->Voices->begin(), context->Voices->begin()+v_count,
+ voices->begin());
+
+ /* Clear extraneous property set sends. */
+ auto clear_sends = [num_sends](ALvoice &voice) -> void
+ {
+ std::fill(std::begin(voice.mProps.Send)+num_sends, std::end(voice.mProps.Send),
+ ALvoiceProps::SendData{});
+
+ std::fill(voice.mSend.begin()+num_sends, voice.mSend.end(), ALvoice::SendData{});
+ auto clear_chan_sends = [num_sends](ALvoice::ChannelData &chandata) -> void
+ {
+ std::fill(chandata.mWetParams.begin()+num_sends, chandata.mWetParams.end(),
+ SendParams{});
+ };
+ std::for_each(voice.mChans.begin(), voice.mChans.end(), clear_chan_sends);
+ };
+ std::for_each(voices->begin(), viter, clear_sends);
+ }
+
+ context->Voices = std::move(voices);
+ context->VoiceCount.store(static_cast<ALuint>(v_count), std::memory_order_relaxed);
+}
+
+
+/************************************************
+ * Standard ALC functions
+ ************************************************/
+
+/* alcGetError
+ *
+ * Return last ALC generated error code for the given device
+ */
+ALC_API ALCenum ALC_APIENTRY alcGetError(ALCdevice *device)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(dev) return dev->LastError.exchange(ALC_NO_ERROR);
+ return LastNullDeviceError.exchange(ALC_NO_ERROR);
+}
+END_API_FUNC
+
+
+/* alcSuspendContext
+ *
+ * Suspends updates for the given context
+ */
+ALC_API ALCvoid ALC_APIENTRY alcSuspendContext(ALCcontext *context)
+START_API_FUNC
+{
+ if(!SuspendDefers)
+ return;
+
+ ContextRef ctx{VerifyContext(context)};
+ if(!ctx)
+ alcSetError(nullptr, ALC_INVALID_CONTEXT);
+ else
+ ALCcontext_DeferUpdates(ctx.get());
+}
+END_API_FUNC
+
+/* alcProcessContext
+ *
+ * Resumes processing updates for the given context
+ */
+ALC_API ALCvoid ALC_APIENTRY alcProcessContext(ALCcontext *context)
+START_API_FUNC
+{
+ if(!SuspendDefers)
+ return;
+
+ ContextRef ctx{VerifyContext(context)};
+ if(!ctx)
+ alcSetError(nullptr, ALC_INVALID_CONTEXT);
+ else
+ ALCcontext_ProcessUpdates(ctx.get());
+}
+END_API_FUNC
+
+
+/* alcGetString
+ *
+ * Returns information about the device, and error strings
+ */
+ALC_API const ALCchar* ALC_APIENTRY alcGetString(ALCdevice *Device, ALCenum param)
+START_API_FUNC
+{
+ const ALCchar *value = nullptr;
+
+ switch(param)
+ {
+ case ALC_NO_ERROR:
+ value = alcNoError;
+ break;
+
+ case ALC_INVALID_ENUM:
+ value = alcErrInvalidEnum;
+ break;
+
+ case ALC_INVALID_VALUE:
+ value = alcErrInvalidValue;
+ break;
+
+ case ALC_INVALID_DEVICE:
+ value = alcErrInvalidDevice;
+ break;
+
+ case ALC_INVALID_CONTEXT:
+ value = alcErrInvalidContext;
+ break;
+
+ case ALC_OUT_OF_MEMORY:
+ value = alcErrOutOfMemory;
+ break;
+
+ case ALC_DEVICE_SPECIFIER:
+ value = alcDefaultName;
+ break;
+
+ case ALC_ALL_DEVICES_SPECIFIER:
+ if(DeviceRef dev{VerifyDevice(Device)})
+ value = dev->DeviceName.c_str();
+ else
+ {
+ ProbeAllDevicesList();
+ value = alcAllDevicesList.c_str();
+ }
+ break;
+
+ case ALC_CAPTURE_DEVICE_SPECIFIER:
+ if(DeviceRef dev{VerifyDevice(Device)})
+ value = dev->DeviceName.c_str();
+ else
+ {
+ ProbeCaptureDeviceList();
+ value = alcCaptureDeviceList.c_str();
+ }
+ break;
+
+ /* Default devices are always first in the list */
+ case ALC_DEFAULT_DEVICE_SPECIFIER:
+ value = alcDefaultName;
+ break;
+
+ case ALC_DEFAULT_ALL_DEVICES_SPECIFIER:
+ if(alcAllDevicesList.empty())
+ ProbeAllDevicesList();
+
+ /* Copy first entry as default. */
+ alcDefaultAllDevicesSpecifier = alcAllDevicesList.c_str();
+ value = alcDefaultAllDevicesSpecifier.c_str();
+ break;
+
+ case ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER:
+ if(alcCaptureDeviceList.empty())
+ ProbeCaptureDeviceList();
+
+ /* Copy first entry as default. */
+ alcCaptureDefaultDeviceSpecifier = alcCaptureDeviceList.c_str();
+ value = alcCaptureDefaultDeviceSpecifier.c_str();
+ break;
+
+ case ALC_EXTENSIONS:
+ if(VerifyDevice(Device))
+ value = alcExtensionList;
+ else
+ value = alcNoDeviceExtList;
+ break;
+
+ case ALC_HRTF_SPECIFIER_SOFT:
+ if(DeviceRef dev{VerifyDevice(Device)})
+ {
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ value = (dev->mHrtf ? dev->HrtfName.c_str() : "");
+ }
+ else
+ alcSetError(nullptr, ALC_INVALID_DEVICE);
+ break;
+
+ default:
+ alcSetError(VerifyDevice(Device).get(), ALC_INVALID_ENUM);
+ break;
+ }
+
+ return value;
+}
+END_API_FUNC
+
+
+static inline ALCsizei NumAttrsForDevice(ALCdevice *device)
+{
+ if(device->Type == Capture) return 9;
+ if(device->Type != Loopback) return 29;
+ if(device->FmtChans == DevFmtAmbi3D)
+ return 35;
+ return 29;
+}
+
+static ALCsizei GetIntegerv(ALCdevice *device, ALCenum param, const al::span<ALCint> values)
+{
+ ALCsizei i;
+
+ if(values.empty())
+ {
+ alcSetError(device, ALC_INVALID_VALUE);
+ return 0;
+ }
+
+ if(!device)
+ {
+ switch(param)
+ {
+ case ALC_MAJOR_VERSION:
+ values[0] = alcMajorVersion;
+ return 1;
+ case ALC_MINOR_VERSION:
+ values[0] = alcMinorVersion;
+ return 1;
+
+ case ALC_ATTRIBUTES_SIZE:
+ case ALC_ALL_ATTRIBUTES:
+ case ALC_FREQUENCY:
+ case ALC_REFRESH:
+ case ALC_SYNC:
+ case ALC_MONO_SOURCES:
+ case ALC_STEREO_SOURCES:
+ case ALC_CAPTURE_SAMPLES:
+ case ALC_FORMAT_CHANNELS_SOFT:
+ case ALC_FORMAT_TYPE_SOFT:
+ case ALC_AMBISONIC_LAYOUT_SOFT:
+ case ALC_AMBISONIC_SCALING_SOFT:
+ case ALC_AMBISONIC_ORDER_SOFT:
+ case ALC_MAX_AMBISONIC_ORDER_SOFT:
+ alcSetError(nullptr, ALC_INVALID_DEVICE);
+ return 0;
+
+ default:
+ alcSetError(nullptr, ALC_INVALID_ENUM);
+ return 0;
+ }
+ return 0;
+ }
+
+ if(device->Type == Capture)
+ {
+ switch(param)
+ {
+ case ALC_ATTRIBUTES_SIZE:
+ values[0] = NumAttrsForDevice(device);
+ return 1;
+
+ case ALC_ALL_ATTRIBUTES:
+ i = 0;
+ if(values.size() < static_cast<size_t>(NumAttrsForDevice(device)))
+ alcSetError(device, ALC_INVALID_VALUE);
+ else
+ {
+ std::lock_guard<std::mutex> _{device->StateLock};
+ values[i++] = ALC_MAJOR_VERSION;
+ values[i++] = alcMajorVersion;
+ values[i++] = ALC_MINOR_VERSION;
+ values[i++] = alcMinorVersion;
+ values[i++] = ALC_CAPTURE_SAMPLES;
+ values[i++] = device->Backend->availableSamples();
+ values[i++] = ALC_CONNECTED;
+ values[i++] = device->Connected.load(std::memory_order_relaxed);
+ values[i++] = 0;
+ }
+ return i;
+
+ case ALC_MAJOR_VERSION:
+ values[0] = alcMajorVersion;
+ return 1;
+ case ALC_MINOR_VERSION:
+ values[0] = alcMinorVersion;
+ return 1;
+
+ case ALC_CAPTURE_SAMPLES:
+ { std::lock_guard<std::mutex> _{device->StateLock};
+ values[0] = device->Backend->availableSamples();
+ }
+ return 1;
+
+ case ALC_CONNECTED:
+ { std::lock_guard<std::mutex> _{device->StateLock};
+ values[0] = device->Connected.load(std::memory_order_acquire);
+ }
+ return 1;
+
+ default:
+ alcSetError(device, ALC_INVALID_ENUM);
+ return 0;
+ }
+ return 0;
+ }
+
+ /* render device */
+ switch(param)
+ {
+ case ALC_ATTRIBUTES_SIZE:
+ values[0] = NumAttrsForDevice(device);
+ return 1;
+
+ case ALC_ALL_ATTRIBUTES:
+ i = 0;
+ if(values.size() < static_cast<size_t>(NumAttrsForDevice(device)))
+ alcSetError(device, ALC_INVALID_VALUE);
+ else
+ {
+ std::lock_guard<std::mutex> _{device->StateLock};
+ values[i++] = ALC_MAJOR_VERSION;
+ values[i++] = alcMajorVersion;
+ values[i++] = ALC_MINOR_VERSION;
+ values[i++] = alcMinorVersion;
+ values[i++] = ALC_EFX_MAJOR_VERSION;
+ values[i++] = alcEFXMajorVersion;
+ values[i++] = ALC_EFX_MINOR_VERSION;
+ values[i++] = alcEFXMinorVersion;
+
+ values[i++] = ALC_FREQUENCY;
+ values[i++] = device->Frequency;
+ if(device->Type != Loopback)
+ {
+ values[i++] = ALC_REFRESH;
+ values[i++] = device->Frequency / device->UpdateSize;
+
+ values[i++] = ALC_SYNC;
+ values[i++] = ALC_FALSE;
+ }
+ else
+ {
+ if(device->FmtChans == DevFmtAmbi3D)
+ {
+ values[i++] = ALC_AMBISONIC_LAYOUT_SOFT;
+ values[i++] = static_cast<ALCint>(device->mAmbiLayout);
+
+ values[i++] = ALC_AMBISONIC_SCALING_SOFT;
+ values[i++] = static_cast<ALCint>(device->mAmbiScale);
+
+ values[i++] = ALC_AMBISONIC_ORDER_SOFT;
+ values[i++] = device->mAmbiOrder;
+ }
+
+ values[i++] = ALC_FORMAT_CHANNELS_SOFT;
+ values[i++] = device->FmtChans;
+
+ values[i++] = ALC_FORMAT_TYPE_SOFT;
+ values[i++] = device->FmtType;
+ }
+
+ values[i++] = ALC_MONO_SOURCES;
+ values[i++] = device->NumMonoSources;
+
+ values[i++] = ALC_STEREO_SOURCES;
+ values[i++] = device->NumStereoSources;
+
+ values[i++] = ALC_MAX_AUXILIARY_SENDS;
+ values[i++] = device->NumAuxSends;
+
+ values[i++] = ALC_HRTF_SOFT;
+ values[i++] = (device->mHrtf ? ALC_TRUE : ALC_FALSE);
+
+ values[i++] = ALC_HRTF_STATUS_SOFT;
+ values[i++] = device->HrtfStatus;
+
+ values[i++] = ALC_OUTPUT_LIMITER_SOFT;
+ values[i++] = device->Limiter ? ALC_TRUE : ALC_FALSE;
+
+ values[i++] = ALC_MAX_AMBISONIC_ORDER_SOFT;
+ values[i++] = MAX_AMBI_ORDER;
+
+ values[i++] = 0;
+ }
+ return i;
+
+ case ALC_MAJOR_VERSION:
+ values[0] = alcMajorVersion;
+ return 1;
+
+ case ALC_MINOR_VERSION:
+ values[0] = alcMinorVersion;
+ return 1;
+
+ case ALC_EFX_MAJOR_VERSION:
+ values[0] = alcEFXMajorVersion;
+ return 1;
+
+ case ALC_EFX_MINOR_VERSION:
+ values[0] = alcEFXMinorVersion;
+ return 1;
+
+ case ALC_FREQUENCY:
+ values[0] = device->Frequency;
+ return 1;
+
+ case ALC_REFRESH:
+ if(device->Type == Loopback)
+ {
+ alcSetError(device, ALC_INVALID_DEVICE);
+ return 0;
+ }
+ { std::lock_guard<std::mutex> _{device->StateLock};
+ values[0] = device->Frequency / device->UpdateSize;
+ }
+ return 1;
+
+ case ALC_SYNC:
+ if(device->Type == Loopback)
+ {
+ alcSetError(device, ALC_INVALID_DEVICE);
+ return 0;
+ }
+ values[0] = ALC_FALSE;
+ return 1;
+
+ case ALC_FORMAT_CHANNELS_SOFT:
+ if(device->Type != Loopback)
+ {
+ alcSetError(device, ALC_INVALID_DEVICE);
+ return 0;
+ }
+ values[0] = device->FmtChans;
+ return 1;
+
+ case ALC_FORMAT_TYPE_SOFT:
+ if(device->Type != Loopback)
+ {
+ alcSetError(device, ALC_INVALID_DEVICE);
+ return 0;
+ }
+ values[0] = device->FmtType;
+ return 1;
+
+ case ALC_AMBISONIC_LAYOUT_SOFT:
+ if(device->Type != Loopback || device->FmtChans != DevFmtAmbi3D)
+ {
+ alcSetError(device, ALC_INVALID_DEVICE);
+ return 0;
+ }
+ values[0] = static_cast<ALCint>(device->mAmbiLayout);
+ return 1;
+
+ case ALC_AMBISONIC_SCALING_SOFT:
+ if(device->Type != Loopback || device->FmtChans != DevFmtAmbi3D)
+ {
+ alcSetError(device, ALC_INVALID_DEVICE);
+ return 0;
+ }
+ values[0] = static_cast<ALCint>(device->mAmbiScale);
+ return 1;
+
+ case ALC_AMBISONIC_ORDER_SOFT:
+ if(device->Type != Loopback || device->FmtChans != DevFmtAmbi3D)
+ {
+ alcSetError(device, ALC_INVALID_DEVICE);
+ return 0;
+ }
+ values[0] = device->mAmbiOrder;
+ return 1;
+
+ case ALC_MONO_SOURCES:
+ values[0] = device->NumMonoSources;
+ return 1;
+
+ case ALC_STEREO_SOURCES:
+ values[0] = device->NumStereoSources;
+ return 1;
+
+ case ALC_MAX_AUXILIARY_SENDS:
+ values[0] = device->NumAuxSends;
+ return 1;
+
+ case ALC_CONNECTED:
+ { std::lock_guard<std::mutex> _{device->StateLock};
+ values[0] = device->Connected.load(std::memory_order_acquire);
+ }
+ return 1;
+
+ case ALC_HRTF_SOFT:
+ values[0] = (device->mHrtf ? ALC_TRUE : ALC_FALSE);
+ return 1;
+
+ case ALC_HRTF_STATUS_SOFT:
+ values[0] = device->HrtfStatus;
+ return 1;
+
+ case ALC_NUM_HRTF_SPECIFIERS_SOFT:
+ { std::lock_guard<std::mutex> _{device->StateLock};
+ device->HrtfList.clear();
+ device->HrtfList = EnumerateHrtf(device->DeviceName.c_str());
+ values[0] = static_cast<ALCint>(minz(device->HrtfList.size(),
+ std::numeric_limits<ALCint>::max()));
+ }
+ return 1;
+
+ case ALC_OUTPUT_LIMITER_SOFT:
+ values[0] = device->Limiter ? ALC_TRUE : ALC_FALSE;
+ return 1;
+
+ case ALC_MAX_AMBISONIC_ORDER_SOFT:
+ values[0] = MAX_AMBI_ORDER;
+ return 1;
+
+ default:
+ alcSetError(device, ALC_INVALID_ENUM);
+ return 0;
+ }
+ return 0;
+}
+
+/* alcGetIntegerv
+ *
+ * Returns information about the device and the version of OpenAL
+ */
+ALC_API void ALC_APIENTRY alcGetIntegerv(ALCdevice *device, ALCenum param, ALCsizei size, ALCint *values)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(size <= 0 || values == nullptr)
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ else
+ GetIntegerv(dev.get(), param, {values, values+size});
+}
+END_API_FUNC
+
+ALC_API void ALC_APIENTRY alcGetInteger64vSOFT(ALCdevice *device, ALCenum pname, ALCsizei size, ALCint64SOFT *values)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(size <= 0 || values == nullptr)
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ else if(!dev || dev->Type == Capture)
+ {
+ auto ivals = al::vector<ALCint>(size);
+ size = GetIntegerv(dev.get(), pname, {ivals.data(), ivals.size()});
+ std::copy(ivals.begin(), ivals.begin()+size, values);
+ }
+ else /* render device */
+ {
+ switch(pname)
+ {
+ case ALC_ATTRIBUTES_SIZE:
+ *values = NumAttrsForDevice(dev.get())+4;
+ break;
+
+ case ALC_ALL_ATTRIBUTES:
+ if(size < NumAttrsForDevice(dev.get())+4)
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ else
+ {
+ ALsizei i{0};
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ values[i++] = ALC_FREQUENCY;
+ values[i++] = dev->Frequency;
+
+ if(dev->Type != Loopback)
+ {
+ values[i++] = ALC_REFRESH;
+ values[i++] = dev->Frequency / dev->UpdateSize;
+
+ values[i++] = ALC_SYNC;
+ values[i++] = ALC_FALSE;
+ }
+ else
+ {
+ if(dev->FmtChans == DevFmtAmbi3D)
+ {
+ values[i++] = ALC_AMBISONIC_LAYOUT_SOFT;
+ values[i++] = static_cast<ALCint64SOFT>(dev->mAmbiLayout);
+
+ values[i++] = ALC_AMBISONIC_SCALING_SOFT;
+ values[i++] = static_cast<ALCint64SOFT>(dev->mAmbiScale);
+
+ values[i++] = ALC_AMBISONIC_ORDER_SOFT;
+ values[i++] = dev->mAmbiOrder;
+ }
+
+ values[i++] = ALC_FORMAT_CHANNELS_SOFT;
+ values[i++] = dev->FmtChans;
+
+ values[i++] = ALC_FORMAT_TYPE_SOFT;
+ values[i++] = dev->FmtType;
+ }
+
+ values[i++] = ALC_MONO_SOURCES;
+ values[i++] = dev->NumMonoSources;
+
+ values[i++] = ALC_STEREO_SOURCES;
+ values[i++] = dev->NumStereoSources;
+
+ values[i++] = ALC_MAX_AUXILIARY_SENDS;
+ values[i++] = dev->NumAuxSends;
+
+ values[i++] = ALC_HRTF_SOFT;
+ values[i++] = (dev->mHrtf ? ALC_TRUE : ALC_FALSE);
+
+ values[i++] = ALC_HRTF_STATUS_SOFT;
+ values[i++] = dev->HrtfStatus;
+
+ values[i++] = ALC_OUTPUT_LIMITER_SOFT;
+ values[i++] = dev->Limiter ? ALC_TRUE : ALC_FALSE;
+
+ ClockLatency clock{GetClockLatency(dev.get())};
+ values[i++] = ALC_DEVICE_CLOCK_SOFT;
+ values[i++] = clock.ClockTime.count();
+
+ values[i++] = ALC_DEVICE_LATENCY_SOFT;
+ values[i++] = clock.Latency.count();
+
+ values[i++] = 0;
+ }
+ break;
+
+ case ALC_DEVICE_CLOCK_SOFT:
+ { std::lock_guard<std::mutex> _{dev->StateLock};
+ nanoseconds basecount;
+ ALuint samplecount;
+ ALuint refcount;
+ do {
+ while(((refcount=ReadRef(&dev->MixCount))&1) != 0)
+ std::this_thread::yield();
+ basecount = dev->ClockBase;
+ samplecount = dev->SamplesDone;
+ } while(refcount != ReadRef(&dev->MixCount));
+ basecount += nanoseconds{seconds{samplecount}} / dev->Frequency;
+ *values = basecount.count();
+ }
+ break;
+
+ case ALC_DEVICE_LATENCY_SOFT:
+ { std::lock_guard<std::mutex> _{dev->StateLock};
+ ClockLatency clock{GetClockLatency(dev.get())};
+ *values = clock.Latency.count();
+ }
+ break;
+
+ case ALC_DEVICE_CLOCK_LATENCY_SOFT:
+ if(size < 2)
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ else
+ {
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ ClockLatency clock{GetClockLatency(dev.get())};
+ values[0] = clock.ClockTime.count();
+ values[1] = clock.Latency.count();
+ }
+ break;
+
+ default:
+ auto ivals = al::vector<ALCint>(size);
+ size = GetIntegerv(dev.get(), pname, {ivals.data(), ivals.size()});
+ std::copy(ivals.begin(), ivals.begin()+size, values);
+ break;
+ }
+ }
+}
+END_API_FUNC
+
+
+/* alcIsExtensionPresent
+ *
+ * Determines if there is support for a particular extension
+ */
+ALC_API ALCboolean ALC_APIENTRY alcIsExtensionPresent(ALCdevice *device, const ALCchar *extName)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!extName)
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ else
+ {
+ size_t len = strlen(extName);
+ const char *ptr = (dev ? alcExtensionList : alcNoDeviceExtList);
+ while(ptr && *ptr)
+ {
+ if(strncasecmp(ptr, extName, len) == 0 &&
+ (ptr[len] == '\0' || isspace(ptr[len])))
+ return ALC_TRUE;
+
+ if((ptr=strchr(ptr, ' ')) != nullptr)
+ {
+ do {
+ ++ptr;
+ } while(isspace(*ptr));
+ }
+ }
+ }
+ return ALC_FALSE;
+}
+END_API_FUNC
+
+
+/* alcGetProcAddress
+ *
+ * Retrieves the function address for a particular extension function
+ */
+ALC_API ALCvoid* ALC_APIENTRY alcGetProcAddress(ALCdevice *device, const ALCchar *funcName)
+START_API_FUNC
+{
+ if(!funcName)
+ {
+ DeviceRef dev{VerifyDevice(device)};
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ }
+ else
+ {
+ for(const auto &func : alcFunctions)
+ {
+ if(strcmp(func.funcName, funcName) == 0)
+ return func.address;
+ }
+ }
+ return nullptr;
+}
+END_API_FUNC
+
+
+/* alcGetEnumValue
+ *
+ * Get the value for a particular ALC enumeration name
+ */
+ALC_API ALCenum ALC_APIENTRY alcGetEnumValue(ALCdevice *device, const ALCchar *enumName)
+START_API_FUNC
+{
+ if(!enumName)
+ {
+ DeviceRef dev{VerifyDevice(device)};
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ }
+ else
+ {
+ for(const auto &enm : alcEnumerations)
+ {
+ if(strcmp(enm.enumName, enumName) == 0)
+ return enm.value;
+ }
+ }
+ return 0;
+}
+END_API_FUNC
+
+
+/* alcCreateContext
+ *
+ * Create and attach a context to the given device.
+ */
+ALC_API ALCcontext* ALC_APIENTRY alcCreateContext(ALCdevice *device, const ALCint *attrList)
+START_API_FUNC
+{
+ /* Explicitly hold the list lock while taking the StateLock in case the
+ * device is asynchronously destroyed, to ensure this new context is
+ * properly cleaned up after being made.
+ */
+ std::unique_lock<std::recursive_mutex> listlock{ListLock};
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type == Capture || !dev->Connected.load(std::memory_order_relaxed))
+ {
+ listlock.unlock();
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ return nullptr;
+ }
+ std::unique_lock<std::mutex> statelock{dev->StateLock};
+ listlock.unlock();
+
+ dev->LastError.store(ALC_NO_ERROR);
+
+ ContextRef context{new ALCcontext{dev.get()}};
+ ALCdevice_IncRef(context->Device);
+
+ ALCenum err{UpdateDeviceParams(dev.get(), attrList)};
+ if(err != ALC_NO_ERROR)
+ {
+ alcSetError(dev.get(), err);
+ if(err == ALC_INVALID_DEVICE)
+ aluHandleDisconnect(dev.get(), "Device update failure");
+ statelock.unlock();
+
+ return nullptr;
+ }
+ AllocateVoices(context.get(), 256);
+
+ if(DefaultEffect.type != AL_EFFECT_NULL && dev->Type == Playback)
+ {
+ void *ptr{al_calloc(16, sizeof(ALeffectslot))};
+ context->DefaultSlot = std::unique_ptr<ALeffectslot>{new (ptr) ALeffectslot{}};
+ if(InitEffectSlot(context->DefaultSlot.get()) == AL_NO_ERROR)
+ aluInitEffectPanning(context->DefaultSlot.get(), dev.get());
+ else
+ {
+ context->DefaultSlot = nullptr;
+ ERR("Failed to initialize the default effect slot\n");
+ }
+ }
+
+ InitContext(context.get());
+
+ if(auto volopt = ConfigValueFloat(dev->DeviceName.c_str(), nullptr, "volume-adjust"))
+ {
+ const ALfloat valf{*volopt};
+ if(!std::isfinite(valf))
+ ERR("volume-adjust must be finite: %f\n", valf);
+ else
+ {
+ const ALfloat db{clampf(valf, -24.0f, 24.0f)};
+ if(db != valf)
+ WARN("volume-adjust clamped: %f, range: +/-%f\n", valf, 24.0f);
+ context->GainBoost = std::pow(10.0f, db/20.0f);
+ TRACE("volume-adjust gain: %f\n", context->GainBoost);
+ }
+ }
+ UpdateListenerProps(context.get());
+
+ {
+ using ContextArray = al::FlexArray<ALCcontext*>;
+
+ /* Allocate a new context array, which holds 1 more than the current/
+ * old array.
+ */
+ auto *oldarray = device->mContexts.load();
+ const size_t newcount{oldarray->size()+1};
+ void *ptr{al_calloc(alignof(ContextArray), ContextArray::Sizeof(newcount))};
+ auto *newarray = new (ptr) ContextArray{newcount};
+
+ /* Copy the current/old context handles to the new array, appending the
+ * new context.
+ */
+ auto iter = std::copy(oldarray->begin(), oldarray->end(), newarray->begin());
+ *iter = context.get();
+
+ /* Store the new context array in the device. Wait for any current mix
+ * to finish before deleting the old array.
+ */
+ dev->mContexts.store(newarray);
+ if(oldarray != &EmptyContextArray)
+ {
+ while((dev->MixCount.load(std::memory_order_acquire)&1))
+ std::this_thread::yield();
+ delete oldarray;
+ }
+ }
+ statelock.unlock();
+
+ {
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ auto iter = std::lower_bound(ContextList.cbegin(), ContextList.cend(), context.get());
+ ALCcontext_IncRef(context.get());
+ ContextList.insert(iter, ContextRef{context.get()});
+ }
+
+ if(context->DefaultSlot)
+ {
+ if(InitializeEffect(context.get(), context->DefaultSlot.get(), &DefaultEffect) == AL_NO_ERROR)
+ UpdateEffectSlotProps(context->DefaultSlot.get(), context.get());
+ else
+ ERR("Failed to initialize the default effect\n");
+ }
+
+ TRACE("Created context %p\n", context.get());
+ return context.get();
+}
+END_API_FUNC
+
+/* alcDestroyContext
+ *
+ * Remove a context from its device
+ */
+ALC_API ALCvoid ALC_APIENTRY alcDestroyContext(ALCcontext *context)
+START_API_FUNC
+{
+ std::unique_lock<std::recursive_mutex> listlock{ListLock};
+ auto iter = std::lower_bound(ContextList.begin(), ContextList.end(), context);
+ if(iter == ContextList.end() || *iter != context)
+ {
+ listlock.unlock();
+ alcSetError(nullptr, ALC_INVALID_CONTEXT);
+ return;
+ }
+ /* Hold an extra reference to this context so it remains valid until the
+ * ListLock is released.
+ */
+ ContextRef ctx{std::move(*iter)};
+ ContextList.erase(iter);
+
+ ALCdevice *Device{ctx->Device};
+
+ std::lock_guard<std::mutex> _{Device->StateLock};
+ if(!ReleaseContext(ctx.get(), Device) && Device->Flags.get<DeviceRunning>())
+ {
+ Device->Backend->stop();
+ Device->Flags.unset<DeviceRunning>();
+ }
+}
+END_API_FUNC
+
+
+/* alcGetCurrentContext
+ *
+ * Returns the currently active context on the calling thread
+ */
+ALC_API ALCcontext* ALC_APIENTRY alcGetCurrentContext(void)
+START_API_FUNC
+{
+ ALCcontext *Context{LocalContext.get()};
+ if(!Context) Context = GlobalContext.load();
+ return Context;
+}
+END_API_FUNC
+
+/* alcGetThreadContext
+ *
+ * Returns the currently active thread-local context
+ */
+ALC_API ALCcontext* ALC_APIENTRY alcGetThreadContext(void)
+START_API_FUNC
+{ return LocalContext.get(); }
+END_API_FUNC
+
+/* alcMakeContextCurrent
+ *
+ * Makes the given context the active process-wide context, and removes the
+ * thread-local context for the calling thread.
+ */
+ALC_API ALCboolean ALC_APIENTRY alcMakeContextCurrent(ALCcontext *context)
+START_API_FUNC
+{
+ /* context must be valid or nullptr */
+ ContextRef ctx;
+ if(context)
+ {
+ ctx = VerifyContext(context);
+ if(!ctx)
+ {
+ alcSetError(nullptr, ALC_INVALID_CONTEXT);
+ return ALC_FALSE;
+ }
+ }
+ /* Release this reference (if any) to store it in the GlobalContext
+ * pointer. Take ownership of the reference (if any) that was previously
+ * stored there.
+ */
+ ctx = ContextRef{GlobalContext.exchange(ctx.release())};
+
+ /* Reset (decrement) the previous global reference by replacing it with the
+ * thread-local context. Take ownership of the thread-local context
+ * reference (if any), clearing the storage to null.
+ */
+ ctx = ContextRef{LocalContext.get()};
+ if(ctx) LocalContext.set(nullptr);
+ /* Reset (decrement) the previous thread-local reference. */
+
+ return ALC_TRUE;
+}
+END_API_FUNC
+
+/* alcSetThreadContext
+ *
+ * Makes the given context the active context for the current thread
+ */
+ALC_API ALCboolean ALC_APIENTRY alcSetThreadContext(ALCcontext *context)
+START_API_FUNC
+{
+ /* context must be valid or nullptr */
+ ContextRef ctx;
+ if(context)
+ {
+ ctx = VerifyContext(context);
+ if(!ctx)
+ {
+ alcSetError(nullptr, ALC_INVALID_CONTEXT);
+ return ALC_FALSE;
+ }
+ }
+ /* context's reference count is already incremented */
+ ContextRef old{LocalContext.get()};
+ LocalContext.set(ctx.release());
+
+ return ALC_TRUE;
+}
+END_API_FUNC
+
+
+/* alcGetContextsDevice
+ *
+ * Returns the device that a particular context is attached to
+ */
+ALC_API ALCdevice* ALC_APIENTRY alcGetContextsDevice(ALCcontext *Context)
+START_API_FUNC
+{
+ ContextRef ctx{VerifyContext(Context)};
+ if(!ctx)
+ {
+ alcSetError(nullptr, ALC_INVALID_CONTEXT);
+ return nullptr;
+ }
+ return ctx->Device;
+}
+END_API_FUNC
+
+
+/* alcOpenDevice
+ *
+ * Opens the named device.
+ */
+ALC_API ALCdevice* ALC_APIENTRY alcOpenDevice(const ALCchar *deviceName)
+START_API_FUNC
+{
+ DO_INITCONFIG();
+
+ if(!PlaybackFactory)
+ {
+ alcSetError(nullptr, ALC_INVALID_VALUE);
+ return nullptr;
+ }
+
+ if(deviceName && (!deviceName[0] || strcasecmp(deviceName, alcDefaultName) == 0 || strcasecmp(deviceName, "openal-soft") == 0
+#ifdef _WIN32
+ /* Some old Windows apps hardcode these expecting OpenAL to use a
+ * specific audio API, even when they're not enumerated. Creative's
+ * router effectively ignores them too.
+ */
+ || strcasecmp(deviceName, "DirectSound3D") == 0 || strcasecmp(deviceName, "DirectSound") == 0
+ || strcasecmp(deviceName, "MMSYSTEM") == 0
+#endif
+ ))
+ deviceName = nullptr;
+
+ DeviceRef device{new ALCdevice{Playback}};
+
+ /* Set output format */
+ device->FmtChans = DevFmtChannelsDefault;
+ device->FmtType = DevFmtTypeDefault;
+ device->Frequency = DEFAULT_OUTPUT_RATE;
+ device->UpdateSize = DEFAULT_UPDATE_SIZE;
+ device->BufferSize = DEFAULT_UPDATE_SIZE * DEFAULT_NUM_UPDATES;
+
+ device->SourcesMax = 256;
+ device->AuxiliaryEffectSlotMax = 64;
+ device->NumAuxSends = DEFAULT_SENDS;
+
+ try {
+ /* Create the device backend. */
+ device->Backend = PlaybackFactory->createBackend(device.get(), BackendType::Playback);
+
+ /* Find a playback device to open */
+ ALCenum err{device->Backend->open(deviceName)};
+ if(err != ALC_NO_ERROR)
+ {
+ alcSetError(nullptr, err);
+ return nullptr;
+ }
+ }
+ catch(al::backend_exception &e) {
+ WARN("Failed to open playback device: %s\n", e.what());
+ alcSetError(nullptr, e.errorCode());
+ return nullptr;
+ }
+
+ deviceName = device->DeviceName.c_str();
+ if(auto chanopt = ConfigValueStr(deviceName, nullptr, "channels"))
+ {
+ static constexpr struct ChannelMap {
+ const char name[16];
+ DevFmtChannels chans;
+ ALsizei order;
+ } chanlist[] = {
+ { "mono", DevFmtMono, 0 },
+ { "stereo", DevFmtStereo, 0 },
+ { "quad", DevFmtQuad, 0 },
+ { "surround51", DevFmtX51, 0 },
+ { "surround61", DevFmtX61, 0 },
+ { "surround71", DevFmtX71, 0 },
+ { "surround51rear", DevFmtX51Rear, 0 },
+ { "ambi1", DevFmtAmbi3D, 1 },
+ { "ambi2", DevFmtAmbi3D, 2 },
+ { "ambi3", DevFmtAmbi3D, 3 },
+ };
+
+ const ALCchar *fmt{chanopt->c_str()};
+ auto iter = std::find_if(std::begin(chanlist), std::end(chanlist),
+ [fmt](const ChannelMap &entry) -> bool
+ { return strcasecmp(entry.name, fmt) == 0; }
+ );
+ if(iter == std::end(chanlist))
+ ERR("Unsupported channels: %s\n", fmt);
+ else
+ {
+ device->FmtChans = iter->chans;
+ device->mAmbiOrder = iter->order;
+ device->Flags.set<ChannelsRequest>();
+ }
+ }
+ if(auto typeopt = ConfigValueStr(deviceName, nullptr, "sample-type"))
+ {
+ static constexpr struct TypeMap {
+ const char name[16];
+ DevFmtType type;
+ } typelist[] = {
+ { "int8", DevFmtByte },
+ { "uint8", DevFmtUByte },
+ { "int16", DevFmtShort },
+ { "uint16", DevFmtUShort },
+ { "int32", DevFmtInt },
+ { "uint32", DevFmtUInt },
+ { "float32", DevFmtFloat },
+ };
+
+ const ALCchar *fmt{typeopt->c_str()};
+ auto iter = std::find_if(std::begin(typelist), std::end(typelist),
+ [fmt](const TypeMap &entry) -> bool
+ { return strcasecmp(entry.name, fmt) == 0; }
+ );
+ if(iter == std::end(typelist))
+ ERR("Unsupported sample-type: %s\n", fmt);
+ else
+ {
+ device->FmtType = iter->type;
+ device->Flags.set<SampleTypeRequest>();
+ }
+ }
+
+ if(ALuint freq{ConfigValueUInt(deviceName, nullptr, "frequency").value_or(0)})
+ {
+ if(freq < MIN_OUTPUT_RATE)
+ {
+ ERR("%uhz request clamped to %uhz minimum\n", freq, MIN_OUTPUT_RATE);
+ freq = MIN_OUTPUT_RATE;
+ }
+ device->UpdateSize = (device->UpdateSize*freq + device->Frequency/2) / device->Frequency;
+ device->BufferSize = (device->BufferSize*freq + device->Frequency/2) / device->Frequency;
+ device->Frequency = freq;
+ device->Flags.set<FrequencyRequest>();
+ }
+
+ if(auto persizeopt = ConfigValueUInt(deviceName, nullptr, "period_size"))
+ device->UpdateSize = clampu(*persizeopt, 64, 8192);
+
+ if(auto peropt = ConfigValueUInt(deviceName, nullptr, "periods"))
+ device->BufferSize = device->UpdateSize * clampu(*peropt, 2, 16);
+ else
+ device->BufferSize = maxu(device->BufferSize, device->UpdateSize*2);
+
+ if(auto srcsopt = ConfigValueUInt(deviceName, nullptr, "sources"))
+ {
+ if(*srcsopt > 0) device->SourcesMax = *srcsopt;
+ }
+
+ if(auto slotsopt = ConfigValueUInt(deviceName, nullptr, "slots"))
+ {
+ if(*slotsopt > 0)
+ device->AuxiliaryEffectSlotMax = minu(*slotsopt, INT_MAX);
+ }
+
+ if(auto sendsopt = ConfigValueInt(deviceName, nullptr, "sends"))
+ device->NumAuxSends = clampi(DEFAULT_SENDS, 0, clampi(*sendsopt, 0, MAX_SENDS));
+
+ device->NumStereoSources = 1;
+ device->NumMonoSources = device->SourcesMax - device->NumStereoSources;
+
+ if(auto ambiopt = ConfigValueStr(deviceName, nullptr, "ambi-format"))
+ {
+ const ALCchar *fmt{ambiopt->c_str()};
+ if(strcasecmp(fmt, "fuma") == 0)
+ {
+ if(device->mAmbiOrder > 3)
+ ERR("FuMa is incompatible with %d%s order ambisonics (up to third-order only)\n",
+ device->mAmbiOrder,
+ (((device->mAmbiOrder%100)/10) == 1) ? "th" :
+ ((device->mAmbiOrder%10) == 1) ? "st" :
+ ((device->mAmbiOrder%10) == 2) ? "nd" :
+ ((device->mAmbiOrder%10) == 3) ? "rd" : "th");
+ else
+ {
+ device->mAmbiLayout = AmbiLayout::FuMa;
+ device->mAmbiScale = AmbiNorm::FuMa;
+ }
+ }
+ else if(strcasecmp(fmt, "ambix") == 0 || strcasecmp(fmt, "acn+sn3d") == 0)
+ {
+ device->mAmbiLayout = AmbiLayout::ACN;
+ device->mAmbiScale = AmbiNorm::SN3D;
+ }
+ else if(strcasecmp(fmt, "acn+n3d") == 0)
+ {
+ device->mAmbiLayout = AmbiLayout::ACN;
+ device->mAmbiScale = AmbiNorm::N3D;
+ }
+ else
+ ERR("Unsupported ambi-format: %s\n", fmt);
+ }
+
+ {
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ auto iter = std::lower_bound(DeviceList.cbegin(), DeviceList.cend(), device.get());
+ ALCdevice_IncRef(device.get());
+ DeviceList.insert(iter, DeviceRef{device.get()});
+ }
+
+ TRACE("Created device %p, \"%s\"\n", device.get(), device->DeviceName.c_str());
+ return device.get();
+}
+END_API_FUNC
+
+/* alcCloseDevice
+ *
+ * Closes the given device.
+ */
+ALC_API ALCboolean ALC_APIENTRY alcCloseDevice(ALCdevice *device)
+START_API_FUNC
+{
+ std::unique_lock<std::recursive_mutex> listlock{ListLock};
+ auto iter = std::lower_bound(DeviceList.begin(), DeviceList.end(), device);
+ if(iter == DeviceList.end() || *iter != device)
+ {
+ alcSetError(nullptr, ALC_INVALID_DEVICE);
+ return ALC_FALSE;
+ }
+ if((*iter)->Type == Capture)
+ {
+ alcSetError(iter->get(), ALC_INVALID_DEVICE);
+ return ALC_FALSE;
+ }
+
+ /* Erase the device, and any remaining contexts left on it, from their
+ * respective lists.
+ */
+ DeviceRef dev{std::move(*iter)};
+ DeviceList.erase(iter);
+
+ std::unique_lock<std::mutex> statelock{dev->StateLock};
+ al::vector<ContextRef> orphanctxs;
+ for(ALCcontext *ctx : *dev->mContexts.load())
+ {
+ auto iter = std::lower_bound(ContextList.begin(), ContextList.end(), ctx);
+ if(iter != ContextList.end() && *iter == ctx)
+ {
+ orphanctxs.emplace_back(std::move(*iter));
+ ContextList.erase(iter);
+ }
+ }
+ listlock.unlock();
+
+ for(ContextRef &context : orphanctxs)
+ {
+ WARN("Releasing context %p\n", context.get());
+ ReleaseContext(context.get(), dev.get());
+ }
+ orphanctxs.clear();
+
+ if(dev->Flags.get<DeviceRunning>())
+ dev->Backend->stop();
+ dev->Flags.unset<DeviceRunning>();
+
+ return ALC_TRUE;
+}
+END_API_FUNC
+
+
+/************************************************
+ * ALC capture functions
+ ************************************************/
+ALC_API ALCdevice* ALC_APIENTRY alcCaptureOpenDevice(const ALCchar *deviceName, ALCuint frequency, ALCenum format, ALCsizei samples)
+START_API_FUNC
+{
+ DO_INITCONFIG();
+
+ if(!CaptureFactory)
+ {
+ alcSetError(nullptr, ALC_INVALID_VALUE);
+ return nullptr;
+ }
+
+ if(samples <= 0)
+ {
+ alcSetError(nullptr, ALC_INVALID_VALUE);
+ return nullptr;
+ }
+
+ if(deviceName && (!deviceName[0] || strcasecmp(deviceName, alcDefaultName) == 0 || strcasecmp(deviceName, "openal-soft") == 0))
+ deviceName = nullptr;
+
+ DeviceRef device{new ALCdevice{Capture}};
+
+ auto decompfmt = DecomposeDevFormat(format);
+ if(!decompfmt)
+ {
+ alcSetError(nullptr, ALC_INVALID_ENUM);
+ return nullptr;
+ }
+
+ device->Frequency = frequency;
+ device->FmtChans = decompfmt->chans;
+ device->FmtType = decompfmt->type;
+ device->Flags.set<FrequencyRequest, ChannelsRequest, SampleTypeRequest>();
+
+ device->UpdateSize = samples;
+ device->BufferSize = samples;
+
+ try {
+ device->Backend = CaptureFactory->createBackend(device.get(), BackendType::Capture);
+
+ TRACE("Capture format: %s, %s, %uhz, %u / %u buffer\n",
+ DevFmtChannelsString(device->FmtChans), DevFmtTypeString(device->FmtType),
+ device->Frequency, device->UpdateSize, device->BufferSize);
+ ALCenum err{device->Backend->open(deviceName)};
+ if(err != ALC_NO_ERROR)
+ {
+ alcSetError(nullptr, err);
+ return nullptr;
+ }
+ }
+ catch(al::backend_exception &e) {
+ WARN("Failed to open capture device: %s\n", e.what());
+ alcSetError(nullptr, e.errorCode());
+ return nullptr;
+ }
+
+ {
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ auto iter = std::lower_bound(DeviceList.cbegin(), DeviceList.cend(), device.get());
+ ALCdevice_IncRef(device.get());
+ DeviceList.insert(iter, DeviceRef{device.get()});
+ }
+
+ TRACE("Created device %p, \"%s\"\n", device.get(), device->DeviceName.c_str());
+ return device.get();
+}
+END_API_FUNC
+
+ALC_API ALCboolean ALC_APIENTRY alcCaptureCloseDevice(ALCdevice *device)
+START_API_FUNC
+{
+ std::unique_lock<std::recursive_mutex> listlock{ListLock};
+ auto iter = std::lower_bound(DeviceList.begin(), DeviceList.end(), device);
+ if(iter == DeviceList.end() || *iter != device)
+ {
+ alcSetError(nullptr, ALC_INVALID_DEVICE);
+ return ALC_FALSE;
+ }
+ if((*iter)->Type != Capture)
+ {
+ alcSetError(iter->get(), ALC_INVALID_DEVICE);
+ return ALC_FALSE;
+ }
+
+ DeviceRef dev{std::move(*iter)};
+ DeviceList.erase(iter);
+ listlock.unlock();
+
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ if(dev->Flags.get<DeviceRunning>())
+ dev->Backend->stop();
+ dev->Flags.unset<DeviceRunning>();
+
+ return ALC_TRUE;
+}
+END_API_FUNC
+
+ALC_API void ALC_APIENTRY alcCaptureStart(ALCdevice *device)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type != Capture)
+ {
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ return;
+ }
+
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ if(!dev->Connected.load(std::memory_order_acquire))
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ else if(!dev->Flags.get<DeviceRunning>())
+ {
+ if(dev->Backend->start())
+ dev->Flags.set<DeviceRunning>();
+ else
+ {
+ aluHandleDisconnect(dev.get(), "Device start failure");
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ }
+ }
+}
+END_API_FUNC
+
+ALC_API void ALC_APIENTRY alcCaptureStop(ALCdevice *device)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type != Capture)
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ else
+ {
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ if(dev->Flags.get<DeviceRunning>())
+ dev->Backend->stop();
+ dev->Flags.unset<DeviceRunning>();
+ }
+}
+END_API_FUNC
+
+ALC_API void ALC_APIENTRY alcCaptureSamples(ALCdevice *device, ALCvoid *buffer, ALCsizei samples)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type != Capture)
+ {
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ return;
+ }
+
+ ALCenum err{ALC_INVALID_VALUE};
+ { std::lock_guard<std::mutex> _{dev->StateLock};
+ BackendBase *backend{dev->Backend.get()};
+ if(samples >= 0 && backend->availableSamples() >= static_cast<ALCuint>(samples))
+ err = backend->captureSamples(buffer, samples);
+ }
+ if(err != ALC_NO_ERROR)
+ alcSetError(dev.get(), err);
+}
+END_API_FUNC
+
+
+/************************************************
+ * ALC loopback functions
+ ************************************************/
+
+/* alcLoopbackOpenDeviceSOFT
+ *
+ * Open a loopback device, for manual rendering.
+ */
+ALC_API ALCdevice* ALC_APIENTRY alcLoopbackOpenDeviceSOFT(const ALCchar *deviceName)
+START_API_FUNC
+{
+ DO_INITCONFIG();
+
+ /* Make sure the device name, if specified, is us. */
+ if(deviceName && strcmp(deviceName, alcDefaultName) != 0)
+ {
+ alcSetError(nullptr, ALC_INVALID_VALUE);
+ return nullptr;
+ }
+
+ DeviceRef device{new ALCdevice{Loopback}};
+
+ device->SourcesMax = 256;
+ device->AuxiliaryEffectSlotMax = 64;
+ device->NumAuxSends = DEFAULT_SENDS;
+
+ //Set output format
+ device->BufferSize = 0;
+ device->UpdateSize = 0;
+
+ device->Frequency = DEFAULT_OUTPUT_RATE;
+ device->FmtChans = DevFmtChannelsDefault;
+ device->FmtType = DevFmtTypeDefault;
+
+ if(auto srcsopt = ConfigValueUInt(nullptr, nullptr, "sources"))
+ {
+ if(*srcsopt > 0) device->SourcesMax = *srcsopt;
+ }
+
+ if(auto slotsopt = ConfigValueUInt(nullptr, nullptr, "slots"))
+ {
+ if(*slotsopt > 0)
+ device->AuxiliaryEffectSlotMax = minu(*slotsopt, INT_MAX);
+ }
+
+ if(auto sendsopt = ConfigValueInt(nullptr, nullptr, "sends"))
+ device->NumAuxSends = clampi(DEFAULT_SENDS, 0, clampi(*sendsopt, 0, MAX_SENDS));
+
+ device->NumStereoSources = 1;
+ device->NumMonoSources = device->SourcesMax - device->NumStereoSources;
+
+ try {
+ device->Backend = LoopbackBackendFactory::getFactory().createBackend(device.get(),
+ BackendType::Playback);
+
+ // Open the "backend"
+ device->Backend->open("Loopback");
+ }
+ catch(al::backend_exception &e) {
+ WARN("Failed to open loopback device: %s\n", e.what());
+ alcSetError(nullptr, e.errorCode());
+ return nullptr;
+ }
+
+ {
+ std::lock_guard<std::recursive_mutex> _{ListLock};
+ auto iter = std::lower_bound(DeviceList.cbegin(), DeviceList.cend(), device.get());
+ ALCdevice_IncRef(device.get());
+ DeviceList.insert(iter, DeviceRef{device.get()});
+ }
+
+ TRACE("Created device %p\n", device.get());
+ return device.get();
+}
+END_API_FUNC
+
+/* alcIsRenderFormatSupportedSOFT
+ *
+ * Determines if the loopback device supports the given format for rendering.
+ */
+ALC_API ALCboolean ALC_APIENTRY alcIsRenderFormatSupportedSOFT(ALCdevice *device, ALCsizei freq, ALCenum channels, ALCenum type)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type != Loopback)
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ else if(freq <= 0)
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ else
+ {
+ if(IsValidALCType(type) && IsValidALCChannels(channels) && freq >= MIN_OUTPUT_RATE)
+ return ALC_TRUE;
+ }
+
+ return ALC_FALSE;
+}
+END_API_FUNC
+
+/* alcRenderSamplesSOFT
+ *
+ * Renders some samples into a buffer, using the format last set by the
+ * attributes given to alcCreateContext.
+ */
+FORCE_ALIGN ALC_API void ALC_APIENTRY alcRenderSamplesSOFT(ALCdevice *device, ALCvoid *buffer, ALCsizei samples)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type != Loopback)
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ else if(samples < 0 || (samples > 0 && buffer == nullptr))
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ else
+ {
+ BackendLockGuard _{*device->Backend};
+ aluMixData(dev.get(), buffer, samples);
+ }
+}
+END_API_FUNC
+
+
+/************************************************
+ * ALC DSP pause/resume functions
+ ************************************************/
+
+/* alcDevicePauseSOFT
+ *
+ * Pause the DSP to stop audio processing.
+ */
+ALC_API void ALC_APIENTRY alcDevicePauseSOFT(ALCdevice *device)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type != Playback)
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ else
+ {
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ if(dev->Flags.get<DeviceRunning>())
+ dev->Backend->stop();
+ dev->Flags.unset<DeviceRunning>();
+ dev->Flags.set<DevicePaused>();
+ }
+}
+END_API_FUNC
+
+/* alcDeviceResumeSOFT
+ *
+ * Resume the DSP to restart audio processing.
+ */
+ALC_API void ALC_APIENTRY alcDeviceResumeSOFT(ALCdevice *device)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type != Playback)
+ {
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ return;
+ }
+
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ if(!dev->Flags.get<DevicePaused>())
+ return;
+ dev->Flags.unset<DevicePaused>();
+ if(dev->mContexts.load()->empty())
+ return;
+
+ if(dev->Backend->start() == ALC_FALSE)
+ {
+ aluHandleDisconnect(dev.get(), "Device start failure");
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ return;
+ }
+ dev->Flags.set<DeviceRunning>();
+}
+END_API_FUNC
+
+
+/************************************************
+ * ALC HRTF functions
+ ************************************************/
+
+/* alcGetStringiSOFT
+ *
+ * Gets a string parameter at the given index.
+ */
+ALC_API const ALCchar* ALC_APIENTRY alcGetStringiSOFT(ALCdevice *device, ALCenum paramName, ALCsizei index)
+START_API_FUNC
+{
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type == Capture)
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ else switch(paramName)
+ {
+ case ALC_HRTF_SPECIFIER_SOFT:
+ if(index >= 0 && static_cast<size_t>(index) < dev->HrtfList.size())
+ return dev->HrtfList[index].name.c_str();
+ alcSetError(dev.get(), ALC_INVALID_VALUE);
+ break;
+
+ default:
+ alcSetError(dev.get(), ALC_INVALID_ENUM);
+ break;
+ }
+
+ return nullptr;
+}
+END_API_FUNC
+
+/* alcResetDeviceSOFT
+ *
+ * Resets the given device output, using the specified attribute list.
+ */
+ALC_API ALCboolean ALC_APIENTRY alcResetDeviceSOFT(ALCdevice *device, const ALCint *attribs)
+START_API_FUNC
+{
+ std::unique_lock<std::recursive_mutex> listlock{ListLock};
+ DeviceRef dev{VerifyDevice(device)};
+ if(!dev || dev->Type == Capture)
+ {
+ listlock.unlock();
+ alcSetError(dev.get(), ALC_INVALID_DEVICE);
+ return ALC_FALSE;
+ }
+ std::lock_guard<std::mutex> _{dev->StateLock};
+ listlock.unlock();
+
+ /* Force the backend to stop mixing first since we're resetting. Also reset
+ * the connected state so lost devices can attempt recover.
+ */
+ if(dev->Flags.get<DeviceRunning>())
+ dev->Backend->stop();
+ dev->Flags.unset<DeviceRunning>();
+ device->Connected.store(true);
+
+ ALCenum err{UpdateDeviceParams(dev.get(), attribs)};
+ if(LIKELY(err == ALC_NO_ERROR)) return ALC_TRUE;
+
+ alcSetError(dev.get(), err);
+ if(err == ALC_INVALID_DEVICE)
+ aluHandleDisconnect(dev.get(), "Device start failure");
+ return ALC_FALSE;
+}
+END_API_FUNC
diff --git a/alc/alcmain.h b/alc/alcmain.h
new file mode 100644
index 00000000..a22e0e81
--- /dev/null
+++ b/alc/alcmain.h
@@ -0,0 +1,534 @@
+#ifndef ALC_MAIN_H
+#define ALC_MAIN_H
+
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <chrono>
+#include <cstdint>
+#include <cstddef>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <utility>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/alext.h"
+
+#include "albyte.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+#include "alspan.h"
+#include "ambidefs.h"
+#include "atomic.h"
+#include "hrtf.h"
+#include "inprogext.h"
+#include "vector.h"
+
+class BFormatDec;
+struct ALbuffer;
+struct ALeffect;
+struct ALfilter;
+struct BackendBase;
+struct Compressor;
+struct EffectState;
+struct FrontStablizer;
+struct Uhj2Encoder;
+struct bs2b;
+
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__)
+#define IS_LITTLE_ENDIAN (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#else
+static const union {
+ ALuint u;
+ ALubyte b[sizeof(ALuint)];
+} EndianTest = { 1 };
+#define IS_LITTLE_ENDIAN (EndianTest.b[0] == 1)
+#endif
+
+
+#define MIN_OUTPUT_RATE 8000
+#define DEFAULT_OUTPUT_RATE 44100
+#define DEFAULT_UPDATE_SIZE 882 /* 20ms */
+#define DEFAULT_NUM_UPDATES 3
+
+
+enum Channel {
+ FrontLeft = 0,
+ FrontRight,
+ FrontCenter,
+ LFE,
+ BackLeft,
+ BackRight,
+ BackCenter,
+ SideLeft,
+ SideRight,
+
+ UpperFrontLeft,
+ UpperFrontRight,
+ UpperBackLeft,
+ UpperBackRight,
+ LowerFrontLeft,
+ LowerFrontRight,
+ LowerBackLeft,
+ LowerBackRight,
+
+ Aux0,
+ Aux1,
+ Aux2,
+ Aux3,
+ Aux4,
+ Aux5,
+ Aux6,
+ Aux7,
+ Aux8,
+ Aux9,
+ Aux10,
+ Aux11,
+ Aux12,
+ Aux13,
+ Aux14,
+ Aux15,
+
+ MaxChannels
+};
+
+
+/* Device formats */
+enum DevFmtType : ALenum {
+ DevFmtByte = ALC_BYTE_SOFT,
+ DevFmtUByte = ALC_UNSIGNED_BYTE_SOFT,
+ DevFmtShort = ALC_SHORT_SOFT,
+ DevFmtUShort = ALC_UNSIGNED_SHORT_SOFT,
+ DevFmtInt = ALC_INT_SOFT,
+ DevFmtUInt = ALC_UNSIGNED_INT_SOFT,
+ DevFmtFloat = ALC_FLOAT_SOFT,
+
+ DevFmtTypeDefault = DevFmtFloat
+};
+enum DevFmtChannels : ALenum {
+ DevFmtMono = ALC_MONO_SOFT,
+ DevFmtStereo = ALC_STEREO_SOFT,
+ DevFmtQuad = ALC_QUAD_SOFT,
+ DevFmtX51 = ALC_5POINT1_SOFT,
+ DevFmtX61 = ALC_6POINT1_SOFT,
+ DevFmtX71 = ALC_7POINT1_SOFT,
+ DevFmtAmbi3D = ALC_BFORMAT3D_SOFT,
+
+ /* Similar to 5.1, except using rear channels instead of sides */
+ DevFmtX51Rear = 0x70000000,
+
+ DevFmtChannelsDefault = DevFmtStereo
+};
+#define MAX_OUTPUT_CHANNELS (16)
+
+/* DevFmtType traits, providing the type, etc given a DevFmtType. */
+template<DevFmtType T>
+struct DevFmtTypeTraits { };
+
+template<>
+struct DevFmtTypeTraits<DevFmtByte> { using Type = ALbyte; };
+template<>
+struct DevFmtTypeTraits<DevFmtUByte> { using Type = ALubyte; };
+template<>
+struct DevFmtTypeTraits<DevFmtShort> { using Type = ALshort; };
+template<>
+struct DevFmtTypeTraits<DevFmtUShort> { using Type = ALushort; };
+template<>
+struct DevFmtTypeTraits<DevFmtInt> { using Type = ALint; };
+template<>
+struct DevFmtTypeTraits<DevFmtUInt> { using Type = ALuint; };
+template<>
+struct DevFmtTypeTraits<DevFmtFloat> { using Type = ALfloat; };
+
+
+ALsizei BytesFromDevFmt(DevFmtType type) noexcept;
+ALsizei ChannelsFromDevFmt(DevFmtChannels chans, ALsizei ambiorder) noexcept;
+inline ALsizei FrameSizeFromDevFmt(DevFmtChannels chans, DevFmtType type, ALsizei ambiorder) noexcept
+{ return ChannelsFromDevFmt(chans, ambiorder) * BytesFromDevFmt(type); }
+
+enum class AmbiLayout {
+ FuMa = ALC_FUMA_SOFT, /* FuMa channel order */
+ ACN = ALC_ACN_SOFT, /* ACN channel order */
+
+ Default = ACN
+};
+
+enum class AmbiNorm {
+ FuMa = ALC_FUMA_SOFT, /* FuMa normalization */
+ SN3D = ALC_SN3D_SOFT, /* SN3D normalization */
+ N3D = ALC_N3D_SOFT, /* N3D normalization */
+
+ Default = SN3D
+};
+
+
+enum DeviceType {
+ Playback,
+ Capture,
+ Loopback
+};
+
+
+enum RenderMode {
+ NormalRender,
+ StereoPair,
+ HrtfRender
+};
+
+
+struct BufferSubList {
+ uint64_t FreeMask{~0_u64};
+ ALbuffer *Buffers{nullptr}; /* 64 */
+
+ BufferSubList() noexcept = default;
+ BufferSubList(const BufferSubList&) = delete;
+ BufferSubList(BufferSubList&& rhs) noexcept : FreeMask{rhs.FreeMask}, Buffers{rhs.Buffers}
+ { rhs.FreeMask = ~0_u64; rhs.Buffers = nullptr; }
+ ~BufferSubList();
+
+ BufferSubList& operator=(const BufferSubList&) = delete;
+ BufferSubList& operator=(BufferSubList&& rhs) noexcept
+ { std::swap(FreeMask, rhs.FreeMask); std::swap(Buffers, rhs.Buffers); return *this; }
+};
+
+struct EffectSubList {
+ uint64_t FreeMask{~0_u64};
+ ALeffect *Effects{nullptr}; /* 64 */
+
+ EffectSubList() noexcept = default;
+ EffectSubList(const EffectSubList&) = delete;
+ EffectSubList(EffectSubList&& rhs) noexcept : FreeMask{rhs.FreeMask}, Effects{rhs.Effects}
+ { rhs.FreeMask = ~0_u64; rhs.Effects = nullptr; }
+ ~EffectSubList();
+
+ EffectSubList& operator=(const EffectSubList&) = delete;
+ EffectSubList& operator=(EffectSubList&& rhs) noexcept
+ { std::swap(FreeMask, rhs.FreeMask); std::swap(Effects, rhs.Effects); return *this; }
+};
+
+struct FilterSubList {
+ uint64_t FreeMask{~0_u64};
+ ALfilter *Filters{nullptr}; /* 64 */
+
+ FilterSubList() noexcept = default;
+ FilterSubList(const FilterSubList&) = delete;
+ FilterSubList(FilterSubList&& rhs) noexcept : FreeMask{rhs.FreeMask}, Filters{rhs.Filters}
+ { rhs.FreeMask = ~0_u64; rhs.Filters = nullptr; }
+ ~FilterSubList();
+
+ FilterSubList& operator=(const FilterSubList&) = delete;
+ FilterSubList& operator=(FilterSubList&& rhs) noexcept
+ { std::swap(FreeMask, rhs.FreeMask); std::swap(Filters, rhs.Filters); return *this; }
+};
+
+
+/* Maximum delay in samples for speaker distance compensation. */
+#define MAX_DELAY_LENGTH 1024
+
+class DistanceComp {
+public:
+ struct DistData {
+ ALfloat Gain{1.0f};
+ ALsizei Length{0}; /* Valid range is [0...MAX_DELAY_LENGTH). */
+ ALfloat *Buffer{nullptr};
+ };
+
+private:
+ std::array<DistData,MAX_OUTPUT_CHANNELS> mChannels;
+ al::vector<ALfloat,16> mSamples;
+
+public:
+ void setSampleCount(size_t new_size) { mSamples.resize(new_size); }
+ void clear() noexcept
+ {
+ for(auto &chan : mChannels)
+ {
+ chan.Gain = 1.0f;
+ chan.Length = 0;
+ chan.Buffer = nullptr;
+ }
+ using SampleVecT = decltype(mSamples);
+ SampleVecT{}.swap(mSamples);
+ }
+
+ ALfloat *getSamples() noexcept { return mSamples.data(); }
+
+ al::span<DistData,MAX_OUTPUT_CHANNELS> as_span() { return mChannels; }
+};
+
+struct BFChannelConfig {
+ ALfloat Scale;
+ ALsizei Index;
+};
+
+/* Size for temporary storage of buffer data, in ALfloats. Larger values need
+ * more memory, while smaller values may need more iterations. The value needs
+ * to be a sensible size, however, as it constrains the max stepping value used
+ * for mixing, as well as the maximum number of samples per mixing iteration.
+ */
+#define BUFFERSIZE 1024
+
+using FloatBufferLine = std::array<float,BUFFERSIZE>;
+
+/* Maximum number of samples to pad on either end of a buffer for resampling.
+ * Note that both the beginning and end need padding!
+ */
+#define MAX_RESAMPLE_PADDING 24
+
+
+struct MixParams {
+ /* Coefficient channel mapping for mixing to the buffer. */
+ std::array<BFChannelConfig,MAX_OUTPUT_CHANNELS> AmbiMap{};
+
+ al::span<FloatBufferLine> Buffer;
+};
+
+struct RealMixParams {
+ std::array<ALint,MaxChannels> ChannelIndex{};
+
+ al::span<FloatBufferLine> Buffer;
+};
+
+using POSTPROCESS = void(*)(ALCdevice *device, const ALsizei SamplesToDo);
+
+enum {
+ // Frequency was requested by the app or config file
+ FrequencyRequest,
+ // Channel configuration was requested by the config file
+ ChannelsRequest,
+ // Sample type was requested by the config file
+ SampleTypeRequest,
+
+ // Specifies if the DSP is paused at user request
+ DevicePaused,
+ // Specifies if the device is currently running
+ DeviceRunning,
+
+ DeviceFlagsCount
+};
+
+struct ALCdevice {
+ RefCount ref{1u};
+
+ std::atomic<bool> Connected{true};
+ const DeviceType Type{};
+
+ ALuint Frequency{};
+ ALuint UpdateSize{};
+ ALuint BufferSize{};
+
+ DevFmtChannels FmtChans{};
+ DevFmtType FmtType{};
+ ALboolean IsHeadphones{AL_FALSE};
+ ALsizei mAmbiOrder{0};
+ /* For DevFmtAmbi* output only, specifies the channel order and
+ * normalization.
+ */
+ AmbiLayout mAmbiLayout{AmbiLayout::Default};
+ AmbiNorm mAmbiScale{AmbiNorm::Default};
+
+ ALCenum LimiterState{ALC_DONT_CARE_SOFT};
+
+ std::string DeviceName;
+
+ // Device flags
+ al::bitfield<DeviceFlagsCount> Flags{};
+
+ std::string HrtfName;
+ al::vector<EnumeratedHrtf> HrtfList;
+ ALCenum HrtfStatus{ALC_FALSE};
+
+ std::atomic<ALCenum> LastError{ALC_NO_ERROR};
+
+ // Maximum number of sources that can be created
+ ALuint SourcesMax{};
+ // Maximum number of slots that can be created
+ ALuint AuxiliaryEffectSlotMax{};
+
+ ALCuint NumMonoSources{};
+ ALCuint NumStereoSources{};
+ ALsizei NumAuxSends{};
+
+ // Map of Buffers for this device
+ std::mutex BufferLock;
+ al::vector<BufferSubList> BufferList;
+
+ // Map of Effects for this device
+ std::mutex EffectLock;
+ al::vector<EffectSubList> EffectList;
+
+ // Map of Filters for this device
+ std::mutex FilterLock;
+ al::vector<FilterSubList> FilterList;
+
+ /* Rendering mode. */
+ RenderMode mRenderMode{NormalRender};
+
+ /* The average speaker distance as determined by the ambdec configuration,
+ * HRTF data set, or the NFC-HOA reference delay. Only used for NFC.
+ */
+ ALfloat AvgSpeakerDist{0.0f};
+
+ ALuint SamplesDone{0u};
+ std::chrono::nanoseconds ClockBase{0};
+ std::chrono::nanoseconds FixedLatency{0};
+
+ /* Temp storage used for mixer processing. */
+ alignas(16) ALfloat SourceData[BUFFERSIZE + MAX_RESAMPLE_PADDING*2];
+ alignas(16) ALfloat ResampledData[BUFFERSIZE];
+ alignas(16) ALfloat FilteredData[BUFFERSIZE];
+ union {
+ alignas(16) ALfloat HrtfSourceData[BUFFERSIZE + HRTF_HISTORY_LENGTH];
+ alignas(16) ALfloat NfcSampleData[BUFFERSIZE];
+ };
+ alignas(16) float2 HrtfAccumData[BUFFERSIZE + HRIR_LENGTH];
+
+ /* Mixing buffer used by the Dry mix and Real output. */
+ al::vector<FloatBufferLine, 16> MixBuffer;
+
+ /* The "dry" path corresponds to the main output. */
+ MixParams Dry;
+ ALuint NumChannelsPerOrder[MAX_AMBI_ORDER+1]{};
+
+ /* "Real" output, which will be written to the device buffer. May alias the
+ * dry buffer.
+ */
+ RealMixParams RealOut;
+
+ /* HRTF state and info */
+ std::unique_ptr<DirectHrtfState> mHrtfState;
+ HrtfEntry *mHrtf{nullptr};
+
+ /* Ambisonic-to-UHJ encoder */
+ std::unique_ptr<Uhj2Encoder> Uhj_Encoder;
+
+ /* Ambisonic decoder for speakers */
+ std::unique_ptr<BFormatDec> AmbiDecoder;
+
+ /* Stereo-to-binaural filter */
+ std::unique_ptr<bs2b> Bs2b;
+
+ POSTPROCESS PostProcess{};
+
+ std::unique_ptr<FrontStablizer> Stablizer;
+
+ std::unique_ptr<Compressor> Limiter;
+
+ /* Delay buffers used to compensate for speaker distances. */
+ DistanceComp ChannelDelay;
+
+ /* Dithering control. */
+ ALfloat DitherDepth{0.0f};
+ ALuint DitherSeed{0u};
+
+ /* Running count of the mixer invocations, in 31.1 fixed point. This
+ * actually increments *twice* when mixing, first at the start and then at
+ * the end, so the bottom bit indicates if the device is currently mixing
+ * and the upper bits indicates how many mixes have been done.
+ */
+ RefCount MixCount{0u};
+
+ // Contexts created on this device
+ std::atomic<al::FlexArray<ALCcontext*>*> mContexts{nullptr};
+
+ /* This lock protects the device state (format, update size, etc) from
+ * being from being changed in multiple threads, or being accessed while
+ * being changed. It's also used to serialize calls to the backend.
+ */
+ std::mutex StateLock;
+ std::unique_ptr<BackendBase> Backend;
+
+
+ ALCdevice(DeviceType type);
+ ALCdevice(const ALCdevice&) = delete;
+ ALCdevice& operator=(const ALCdevice&) = delete;
+ ~ALCdevice();
+
+ ALsizei bytesFromFmt() const noexcept { return BytesFromDevFmt(FmtType); }
+ ALsizei channelsFromFmt() const noexcept { return ChannelsFromDevFmt(FmtChans, mAmbiOrder); }
+ ALsizei frameSizeFromFmt() const noexcept { return bytesFromFmt() * channelsFromFmt(); }
+
+ DEF_NEWDEL(ALCdevice)
+};
+
+/* Must be less than 15 characters (16 including terminating null) for
+ * compatibility with pthread_setname_np limitations. */
+#define MIXER_THREAD_NAME "alsoft-mixer"
+
+#define RECORD_THREAD_NAME "alsoft-record"
+
+
+enum {
+ /* End event thread processing. */
+ EventType_KillThread = 0,
+
+ /* User event types. */
+ EventType_SourceStateChange = 1<<0,
+ EventType_BufferCompleted = 1<<1,
+ EventType_Error = 1<<2,
+ EventType_Performance = 1<<3,
+ EventType_Deprecated = 1<<4,
+ EventType_Disconnected = 1<<5,
+
+ /* Internal events. */
+ EventType_ReleaseEffectState = 65536,
+};
+
+struct AsyncEvent {
+ unsigned int EnumType{0u};
+ union {
+ char dummy;
+ struct {
+ ALuint id;
+ ALenum state;
+ } srcstate;
+ struct {
+ ALuint id;
+ ALsizei count;
+ } bufcomp;
+ struct {
+ ALenum type;
+ ALuint id;
+ ALuint param;
+ ALchar msg[1008];
+ } user;
+ EffectState *mEffectState;
+ } u{};
+
+ AsyncEvent() noexcept = default;
+ constexpr AsyncEvent(unsigned int type) noexcept : EnumType{type} { }
+};
+
+
+void AllocateVoices(ALCcontext *context, size_t num_voices);
+
+
+extern ALint RTPrioLevel;
+void SetRTPriority(void);
+
+void SetDefaultChannelOrder(ALCdevice *device);
+void SetDefaultWFXChannelOrder(ALCdevice *device);
+
+const ALCchar *DevFmtTypeString(DevFmtType type) noexcept;
+const ALCchar *DevFmtChannelsString(DevFmtChannels chans) noexcept;
+
+/**
+ * GetChannelIdxByName
+ *
+ * Returns the index for the given channel name (e.g. FrontCenter), or -1 if it
+ * doesn't exist.
+ */
+inline ALint GetChannelIdxByName(const RealMixParams &real, Channel chan) noexcept
+{ return real.ChannelIndex[chan]; }
+
+
+void StartEventThrd(ALCcontext *ctx);
+void StopEventThrd(ALCcontext *ctx);
+
+
+al::vector<std::string> SearchDataFiles(const char *match, const char *subdir);
+
+#endif
diff --git a/alc/alconfig.cpp b/alc/alconfig.cpp
new file mode 100644
index 00000000..b246a91d
--- /dev/null
+++ b/alc/alconfig.cpp
@@ -0,0 +1,545 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#ifdef _WIN32
+#ifdef __MINGW32__
+#define _WIN32_IE 0x501
+#else
+#define _WIN32_IE 0x400
+#endif
+#endif
+
+#include "config.h"
+
+#include "alconfig.h"
+
+#include <cstdlib>
+#include <cctype>
+#include <cstring>
+#ifdef _WIN32_IE
+#include <windows.h>
+#include <shlobj.h>
+#endif
+#ifdef __APPLE__
+#include <CoreFoundation/CoreFoundation.h>
+#endif
+
+#include <vector>
+#include <string>
+#include <algorithm>
+
+#include "alcmain.h"
+#include "logging.h"
+#include "compat.h"
+
+
+namespace {
+
+struct ConfigEntry {
+ std::string key;
+ std::string value;
+};
+al::vector<ConfigEntry> ConfOpts;
+
+
+std::string &lstrip(std::string &line)
+{
+ size_t pos{0};
+ while(pos < line.length() && std::isspace(line[pos]))
+ ++pos;
+ line.erase(0, pos);
+ return line;
+}
+
+bool readline(std::istream &f, std::string &output)
+{
+ while(f.good() && f.peek() == '\n')
+ f.ignore();
+
+ return std::getline(f, output) && !output.empty();
+}
+
+std:: string expdup(const char *str)
+{
+ std::string output;
+
+ while(*str != '\0')
+ {
+ const char *addstr;
+ size_t addstrlen;
+
+ if(str[0] != '$')
+ {
+ const char *next = std::strchr(str, '$');
+ addstr = str;
+ addstrlen = next ? static_cast<size_t>(next-str) : std::strlen(str);
+
+ str += addstrlen;
+ }
+ else
+ {
+ str++;
+ if(*str == '$')
+ {
+ const char *next = std::strchr(str+1, '$');
+ addstr = str;
+ addstrlen = next ? static_cast<size_t>(next-str) : std::strlen(str);
+
+ str += addstrlen;
+ }
+ else
+ {
+ bool hasbraces{(*str == '{')};
+ if(hasbraces) str++;
+
+ std::string envname;
+ while((std::isalnum(*str) || *str == '_'))
+ envname += *(str++);
+
+ if(hasbraces && *str != '}')
+ continue;
+
+ if(hasbraces) str++;
+ if((addstr=std::getenv(envname.c_str())) == nullptr)
+ continue;
+ addstrlen = std::strlen(addstr);
+ }
+ }
+ if(addstrlen == 0)
+ continue;
+
+ output.append(addstr, addstrlen);
+ }
+
+ return output;
+}
+
+void LoadConfigFromFile(std::istream &f)
+{
+ std::string curSection;
+ std::string buffer;
+
+ while(readline(f, buffer))
+ {
+ while(!buffer.empty() && std::isspace(buffer.back()))
+ buffer.pop_back();
+ if(lstrip(buffer).empty())
+ continue;
+
+ buffer.push_back(0);
+ char *line{&buffer[0]};
+
+ if(line[0] == '[')
+ {
+ char *section = line+1;
+ char *endsection;
+
+ endsection = std::strchr(section, ']');
+ if(!endsection || section == endsection)
+ {
+ ERR("config parse error: bad line \"%s\"\n", line);
+ continue;
+ }
+ if(endsection[1] != 0)
+ {
+ char *end = endsection+1;
+ while(std::isspace(*end))
+ ++end;
+ if(*end != 0 && *end != '#')
+ {
+ ERR("config parse error: bad line \"%s\"\n", line);
+ continue;
+ }
+ }
+ *endsection = 0;
+
+ curSection.clear();
+ if(strcasecmp(section, "general") != 0)
+ {
+ do {
+ char *nextp = std::strchr(section, '%');
+ if(!nextp)
+ {
+ curSection += section;
+ break;
+ }
+
+ curSection.append(section, nextp);
+ section = nextp;
+
+ if(((section[1] >= '0' && section[1] <= '9') ||
+ (section[1] >= 'a' && section[1] <= 'f') ||
+ (section[1] >= 'A' && section[1] <= 'F')) &&
+ ((section[2] >= '0' && section[2] <= '9') ||
+ (section[2] >= 'a' && section[2] <= 'f') ||
+ (section[2] >= 'A' && section[2] <= 'F')))
+ {
+ unsigned char b = 0;
+ if(section[1] >= '0' && section[1] <= '9')
+ b = (section[1]-'0') << 4;
+ else if(section[1] >= 'a' && section[1] <= 'f')
+ b = (section[1]-'a'+0xa) << 4;
+ else if(section[1] >= 'A' && section[1] <= 'F')
+ b = (section[1]-'A'+0x0a) << 4;
+ if(section[2] >= '0' && section[2] <= '9')
+ b |= (section[2]-'0');
+ else if(section[2] >= 'a' && section[2] <= 'f')
+ b |= (section[2]-'a'+0xa);
+ else if(section[2] >= 'A' && section[2] <= 'F')
+ b |= (section[2]-'A'+0x0a);
+ curSection += static_cast<char>(b);
+ section += 3;
+ }
+ else if(section[1] == '%')
+ {
+ curSection += '%';
+ section += 2;
+ }
+ else
+ {
+ curSection += '%';
+ section += 1;
+ }
+ } while(*section != 0);
+ }
+
+ continue;
+ }
+
+ char *comment{std::strchr(line, '#')};
+ if(comment) *(comment++) = 0;
+ if(!line[0]) continue;
+
+ char key[256]{};
+ char value[256]{};
+ if(std::sscanf(line, "%255[^=] = \"%255[^\"]\"", key, value) == 2 ||
+ std::sscanf(line, "%255[^=] = '%255[^\']'", key, value) == 2 ||
+ std::sscanf(line, "%255[^=] = %255[^\n]", key, value) == 2)
+ {
+ /* sscanf doesn't handle '' or "" as empty values, so clip it
+ * manually. */
+ if(std::strcmp(value, "\"\"") == 0 || std::strcmp(value, "''") == 0)
+ value[0] = 0;
+ }
+ else if(sscanf(line, "%255[^=] %255[=]", key, value) == 2)
+ {
+ /* Special case for 'key =' */
+ value[0] = 0;
+ }
+ else
+ {
+ ERR("config parse error: malformed option line: \"%s\"\n\n", line);
+ continue;
+ }
+
+ std::string fullKey;
+ if(!curSection.empty())
+ {
+ fullKey += curSection;
+ fullKey += '/';
+ }
+ fullKey += key;
+ while(!fullKey.empty() && std::isspace(fullKey.back()))
+ fullKey.pop_back();
+
+ /* Check if we already have this option set */
+ auto ent = std::find_if(ConfOpts.begin(), ConfOpts.end(),
+ [&fullKey](const ConfigEntry &entry) -> bool
+ { return entry.key == fullKey; }
+ );
+ if(ent != ConfOpts.end())
+ ent->value = expdup(value);
+ else
+ {
+ ConfOpts.emplace_back(ConfigEntry{std::move(fullKey), expdup(value)});
+ ent = ConfOpts.end()-1;
+ }
+
+ TRACE("found '%s' = '%s'\n", ent->key.c_str(), ent->value.c_str());
+ }
+ ConfOpts.shrink_to_fit();
+}
+
+} // namespace
+
+
+#ifdef _WIN32
+void ReadALConfig()
+{
+ WCHAR buffer[MAX_PATH];
+ if(SHGetSpecialFolderPathW(nullptr, buffer, CSIDL_APPDATA, FALSE) != FALSE)
+ {
+ std::string filepath{wstr_to_utf8(buffer)};
+ filepath += "\\alsoft.ini";
+
+ TRACE("Loading config %s...\n", filepath.c_str());
+ al::ifstream f{filepath};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+
+ std::string ppath{GetProcBinary().path};
+ if(!ppath.empty())
+ {
+ ppath += "\\alsoft.ini";
+ TRACE("Loading config %s...\n", ppath.c_str());
+ al::ifstream f{ppath};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+
+ const WCHAR *str{_wgetenv(L"ALSOFT_CONF")};
+ if(str != nullptr && *str)
+ {
+ std::string filepath{wstr_to_utf8(str)};
+
+ TRACE("Loading config %s...\n", filepath.c_str());
+ al::ifstream f{filepath};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+}
+#else
+void ReadALConfig()
+{
+ const char *str{"/etc/openal/alsoft.conf"};
+
+ TRACE("Loading config %s...\n", str);
+ al::ifstream f{str};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ f.close();
+
+ if(!(str=getenv("XDG_CONFIG_DIRS")) || str[0] == 0)
+ str = "/etc/xdg";
+ std::string confpaths = str;
+ /* Go through the list in reverse, since "the order of base directories
+ * denotes their importance; the first directory listed is the most
+ * important". Ergo, we need to load the settings from the later dirs
+ * first so that the settings in the earlier dirs override them.
+ */
+ std::string fname;
+ while(!confpaths.empty())
+ {
+ auto next = confpaths.find_last_of(':');
+ if(next < confpaths.length())
+ {
+ fname = confpaths.substr(next+1);
+ confpaths.erase(next);
+ }
+ else
+ {
+ fname = confpaths;
+ confpaths.clear();
+ }
+
+ if(fname.empty() || fname.front() != '/')
+ WARN("Ignoring XDG config dir: %s\n", fname.c_str());
+ else
+ {
+ if(fname.back() != '/') fname += "/alsoft.conf";
+ else fname += "alsoft.conf";
+
+ TRACE("Loading config %s...\n", fname.c_str());
+ al::ifstream f{fname};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+ fname.clear();
+ }
+
+#ifdef __APPLE__
+ CFBundleRef mainBundle = CFBundleGetMainBundle();
+ if(mainBundle)
+ {
+ unsigned char fileName[PATH_MAX];
+ CFURLRef configURL;
+
+ if((configURL=CFBundleCopyResourceURL(mainBundle, CFSTR(".alsoftrc"), CFSTR(""), nullptr)) &&
+ CFURLGetFileSystemRepresentation(configURL, true, fileName, sizeof(fileName)))
+ {
+ al::ifstream f{reinterpret_cast<char*>(fileName)};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+ }
+#endif
+
+ if((str=getenv("HOME")) != nullptr && *str)
+ {
+ fname = str;
+ if(fname.back() != '/') fname += "/.alsoftrc";
+ else fname += ".alsoftrc";
+
+ TRACE("Loading config %s...\n", fname.c_str());
+ al::ifstream f{fname};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+
+ if((str=getenv("XDG_CONFIG_HOME")) != nullptr && str[0] != 0)
+ {
+ fname = str;
+ if(fname.back() != '/') fname += "/alsoft.conf";
+ else fname += "alsoft.conf";
+ }
+ else
+ {
+ fname.clear();
+ if((str=getenv("HOME")) != nullptr && str[0] != 0)
+ {
+ fname = str;
+ if(fname.back() != '/') fname += "/.config/alsoft.conf";
+ else fname += ".config/alsoft.conf";
+ }
+ }
+ if(!fname.empty())
+ {
+ TRACE("Loading config %s...\n", fname.c_str());
+ al::ifstream f{fname};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+
+ std::string ppath{GetProcBinary().path};
+ if(!ppath.empty())
+ {
+ if(ppath.back() != '/') ppath += "/alsoft.conf";
+ else ppath += "alsoft.conf";
+
+ TRACE("Loading config %s...\n", ppath.c_str());
+ al::ifstream f{ppath};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+
+ if((str=getenv("ALSOFT_CONF")) != nullptr && *str)
+ {
+ TRACE("Loading config %s...\n", str);
+ al::ifstream f{str};
+ if(f.is_open())
+ LoadConfigFromFile(f);
+ }
+}
+#endif
+
+const char *GetConfigValue(const char *devName, const char *blockName, const char *keyName, const char *def)
+{
+ if(!keyName)
+ return def;
+
+ std::string key;
+ if(blockName && strcasecmp(blockName, "general") != 0)
+ {
+ key = blockName;
+ if(devName)
+ {
+ key += '/';
+ key += devName;
+ }
+ key += '/';
+ key += keyName;
+ }
+ else
+ {
+ if(devName)
+ {
+ key = devName;
+ key += '/';
+ }
+ key += keyName;
+ }
+
+ auto iter = std::find_if(ConfOpts.cbegin(), ConfOpts.cend(),
+ [&key](const ConfigEntry &entry) -> bool
+ { return entry.key == key; }
+ );
+ if(iter != ConfOpts.cend())
+ {
+ TRACE("Found %s = \"%s\"\n", key.c_str(), iter->value.c_str());
+ if(!iter->value.empty())
+ return iter->value.c_str();
+ return def;
+ }
+
+ if(!devName)
+ {
+ TRACE("Key %s not found\n", key.c_str());
+ return def;
+ }
+ return GetConfigValue(nullptr, blockName, keyName, def);
+}
+
+int ConfigValueExists(const char *devName, const char *blockName, const char *keyName)
+{
+ const char *val = GetConfigValue(devName, blockName, keyName, "");
+ return val[0] != 0;
+}
+
+al::optional<std::string> ConfigValueStr(const char *devName, const char *blockName, const char *keyName)
+{
+ const char *val = GetConfigValue(devName, blockName, keyName, "");
+ if(!val[0]) return al::nullopt;
+
+ return al::make_optional<std::string>(val);
+}
+
+al::optional<int> ConfigValueInt(const char *devName, const char *blockName, const char *keyName)
+{
+ const char *val = GetConfigValue(devName, blockName, keyName, "");
+ if(!val[0]) return al::nullopt;
+
+ return al::make_optional(static_cast<int>(std::strtol(val, nullptr, 0)));
+}
+
+al::optional<unsigned int> ConfigValueUInt(const char *devName, const char *blockName, const char *keyName)
+{
+ const char *val = GetConfigValue(devName, blockName, keyName, "");
+ if(!val[0]) return al::nullopt;
+
+ return al::make_optional(static_cast<unsigned int>(std::strtoul(val, nullptr, 0)));
+}
+
+al::optional<float> ConfigValueFloat(const char *devName, const char *blockName, const char *keyName)
+{
+ const char *val = GetConfigValue(devName, blockName, keyName, "");
+ if(!val[0]) return al::nullopt;
+
+ return al::make_optional(std::strtof(val, nullptr));
+}
+
+al::optional<bool> ConfigValueBool(const char *devName, const char *blockName, const char *keyName)
+{
+ const char *val = GetConfigValue(devName, blockName, keyName, "");
+ if(!val[0]) return al::nullopt;
+
+ return al::make_optional(
+ strcasecmp(val, "true") == 0 || strcasecmp(val, "yes") == 0 ||
+ strcasecmp(val, "on") == 0 || atoi(val) != 0);
+}
+
+int GetConfigValueBool(const char *devName, const char *blockName, const char *keyName, int def)
+{
+ const char *val = GetConfigValue(devName, blockName, keyName, "");
+
+ if(!val[0]) return def != 0;
+ return (strcasecmp(val, "true") == 0 || strcasecmp(val, "yes") == 0 ||
+ strcasecmp(val, "on") == 0 || atoi(val) != 0);
+}
diff --git a/alc/alconfig.h b/alc/alconfig.h
new file mode 100644
index 00000000..ffc7adad
--- /dev/null
+++ b/alc/alconfig.h
@@ -0,0 +1,20 @@
+#ifndef ALCONFIG_H
+#define ALCONFIG_H
+
+#include <string>
+
+#include "aloptional.h"
+
+void ReadALConfig();
+
+int ConfigValueExists(const char *devName, const char *blockName, const char *keyName);
+const char *GetConfigValue(const char *devName, const char *blockName, const char *keyName, const char *def);
+int GetConfigValueBool(const char *devName, const char *blockName, const char *keyName, int def);
+
+al::optional<std::string> ConfigValueStr(const char *devName, const char *blockName, const char *keyName);
+al::optional<int> ConfigValueInt(const char *devName, const char *blockName, const char *keyName);
+al::optional<unsigned int> ConfigValueUInt(const char *devName, const char *blockName, const char *keyName);
+al::optional<float> ConfigValueFloat(const char *devName, const char *blockName, const char *keyName);
+al::optional<bool> ConfigValueBool(const char *devName, const char *blockName, const char *keyName);
+
+#endif /* ALCONFIG_H */
diff --git a/alc/alcontext.h b/alc/alcontext.h
new file mode 100644
index 00000000..cf956079
--- /dev/null
+++ b/alc/alcontext.h
@@ -0,0 +1,217 @@
+#ifndef ALCONTEXT_H
+#define ALCONTEXT_H
+
+#include <mutex>
+#include <atomic>
+#include <memory>
+#include <thread>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/alext.h"
+#include "inprogext.h"
+
+#include "atomic.h"
+#include "vector.h"
+#include "threads.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+
+#include "alListener.h"
+#include "alu.h"
+
+
+struct ALsource;
+struct ALeffectslot;
+struct ALcontextProps;
+struct ALlistenerProps;
+struct ALvoiceProps;
+struct ALeffectslotProps;
+struct RingBuffer;
+
+enum class DistanceModel {
+ InverseClamped = AL_INVERSE_DISTANCE_CLAMPED,
+ LinearClamped = AL_LINEAR_DISTANCE_CLAMPED,
+ ExponentClamped = AL_EXPONENT_DISTANCE_CLAMPED,
+ Inverse = AL_INVERSE_DISTANCE,
+ Linear = AL_LINEAR_DISTANCE,
+ Exponent = AL_EXPONENT_DISTANCE,
+ Disable = AL_NONE,
+
+ Default = InverseClamped
+};
+
+struct SourceSubList {
+ uint64_t FreeMask{~0_u64};
+ ALsource *Sources{nullptr}; /* 64 */
+
+ SourceSubList() noexcept = default;
+ SourceSubList(const SourceSubList&) = delete;
+ SourceSubList(SourceSubList&& rhs) noexcept : FreeMask{rhs.FreeMask}, Sources{rhs.Sources}
+ { rhs.FreeMask = ~0_u64; rhs.Sources = nullptr; }
+ ~SourceSubList();
+
+ SourceSubList& operator=(const SourceSubList&) = delete;
+ SourceSubList& operator=(SourceSubList&& rhs) noexcept
+ { std::swap(FreeMask, rhs.FreeMask); std::swap(Sources, rhs.Sources); return *this; }
+};
+
+struct EffectSlotSubList {
+ uint64_t FreeMask{~0_u64};
+ ALeffectslot *EffectSlots{nullptr}; /* 64 */
+
+ EffectSlotSubList() noexcept = default;
+ EffectSlotSubList(const EffectSlotSubList&) = delete;
+ EffectSlotSubList(EffectSlotSubList&& rhs) noexcept
+ : FreeMask{rhs.FreeMask}, EffectSlots{rhs.EffectSlots}
+ { rhs.FreeMask = ~0_u64; rhs.EffectSlots = nullptr; }
+ ~EffectSlotSubList();
+
+ EffectSlotSubList& operator=(const EffectSlotSubList&) = delete;
+ EffectSlotSubList& operator=(EffectSlotSubList&& rhs) noexcept
+ { std::swap(FreeMask, rhs.FreeMask); std::swap(EffectSlots, rhs.EffectSlots); return *this; }
+};
+
+struct ALCcontext {
+ RefCount ref{1u};
+
+ al::vector<SourceSubList> SourceList;
+ ALuint NumSources{0};
+ std::mutex SourceLock;
+
+ al::vector<EffectSlotSubList> EffectSlotList;
+ ALuint NumEffectSlots{0u};
+ std::mutex EffectSlotLock;
+
+ std::atomic<ALenum> LastError{AL_NO_ERROR};
+
+ DistanceModel mDistanceModel{DistanceModel::Default};
+ ALboolean SourceDistanceModel{AL_FALSE};
+
+ ALfloat DopplerFactor{1.0f};
+ ALfloat DopplerVelocity{1.0f};
+ ALfloat SpeedOfSound{};
+ ALfloat MetersPerUnit{1.0f};
+
+ std::atomic_flag PropsClean;
+ std::atomic<bool> DeferUpdates{false};
+
+ std::mutex PropLock;
+
+ /* Counter for the pre-mixing updates, in 31.1 fixed point (lowest bit
+ * indicates if updates are currently happening).
+ */
+ RefCount UpdateCount{0u};
+ std::atomic<bool> HoldUpdates{false};
+
+ ALfloat GainBoost{1.0f};
+
+ std::atomic<ALcontextProps*> Update{nullptr};
+
+ /* Linked lists of unused property containers, free to use for future
+ * updates.
+ */
+ std::atomic<ALcontextProps*> FreeContextProps{nullptr};
+ std::atomic<ALlistenerProps*> FreeListenerProps{nullptr};
+ std::atomic<ALvoiceProps*> FreeVoiceProps{nullptr};
+ std::atomic<ALeffectslotProps*> FreeEffectslotProps{nullptr};
+
+ std::unique_ptr<al::FlexArray<ALvoice>> Voices{nullptr};
+ std::atomic<ALuint> VoiceCount{0u};
+
+ using ALeffectslotArray = al::FlexArray<ALeffectslot*>;
+ std::atomic<ALeffectslotArray*> ActiveAuxSlots{nullptr};
+
+ std::thread EventThread;
+ al::semaphore EventSem;
+ std::unique_ptr<RingBuffer> AsyncEvents;
+ std::atomic<ALbitfieldSOFT> EnabledEvts{0u};
+ std::mutex EventCbLock;
+ ALEVENTPROCSOFT EventCb{};
+ void *EventParam{nullptr};
+
+ /* Default effect slot */
+ std::unique_ptr<ALeffectslot> DefaultSlot;
+
+ ALCdevice *const Device;
+ const ALCchar *ExtensionList{nullptr};
+
+ ALlistener Listener{};
+
+
+ ALCcontext(ALCdevice *device);
+ ALCcontext(const ALCcontext&) = delete;
+ ALCcontext& operator=(const ALCcontext&) = delete;
+ ~ALCcontext();
+
+ DEF_NEWDEL(ALCcontext)
+};
+
+void ALCcontext_DecRef(ALCcontext *context);
+
+void UpdateContextProps(ALCcontext *context);
+
+void ALCcontext_DeferUpdates(ALCcontext *context);
+void ALCcontext_ProcessUpdates(ALCcontext *context);
+
+
+/* Simple RAII context reference. Takes the reference of the provided
+ * ALCcontext, and decrements it when leaving scope. Movable (transfer
+ * reference) but not copyable (no new references).
+ */
+class ContextRef {
+ ALCcontext *mCtx{nullptr};
+
+ void reset() noexcept
+ {
+ if(mCtx)
+ ALCcontext_DecRef(mCtx);
+ mCtx = nullptr;
+ }
+
+public:
+ ContextRef() noexcept = default;
+ ContextRef(ContextRef&& rhs) noexcept : mCtx{rhs.mCtx}
+ { rhs.mCtx = nullptr; }
+ explicit ContextRef(ALCcontext *ctx) noexcept : mCtx(ctx) { }
+ ~ContextRef() { reset(); }
+
+ ContextRef& operator=(const ContextRef&) = delete;
+ ContextRef& operator=(ContextRef&& rhs) noexcept
+ { std::swap(mCtx, rhs.mCtx); return *this; }
+
+ operator bool() const noexcept { return mCtx != nullptr; }
+
+ ALCcontext* operator->() const noexcept { return mCtx; }
+ ALCcontext* get() const noexcept { return mCtx; }
+
+ ALCcontext* release() noexcept
+ {
+ ALCcontext *ret{mCtx};
+ mCtx = nullptr;
+ return ret;
+ }
+};
+
+inline bool operator==(const ContextRef &lhs, const ALCcontext *rhs) noexcept
+{ return lhs.get() == rhs; }
+inline bool operator!=(const ContextRef &lhs, const ALCcontext *rhs) noexcept
+{ return !(lhs == rhs); }
+inline bool operator<(const ContextRef &lhs, const ALCcontext *rhs) noexcept
+{ return lhs.get() < rhs; }
+
+ContextRef GetContextRef(void);
+
+
+struct ALcontextProps {
+ ALfloat DopplerFactor;
+ ALfloat DopplerVelocity;
+ ALfloat SpeedOfSound;
+ ALboolean SourceDistanceModel;
+ DistanceModel mDistanceModel;
+ ALfloat MetersPerUnit;
+
+ std::atomic<ALcontextProps*> next;
+};
+
+#endif /* ALCONTEXT_H */
diff --git a/alc/alu.cpp b/alc/alu.cpp
new file mode 100644
index 00000000..cc1a5a98
--- /dev/null
+++ b/alc/alu.cpp
@@ -0,0 +1,1798 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "alu.h"
+
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <cassert>
+#include <chrono>
+#include <climits>
+#include <cmath>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <new>
+#include <numeric>
+#include <utility>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/efx.h"
+
+#include "alAuxEffectSlot.h"
+#include "alBuffer.h"
+#include "alcmain.h"
+#include "alEffect.h"
+#include "alListener.h"
+#include "alcontext.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+#include "alspan.h"
+#include "ambidefs.h"
+#include "atomic.h"
+#include "bformatdec.h"
+#include "bs2b.h"
+#include "cpu_caps.h"
+#include "effects/base.h"
+#include "filters/biquad.h"
+#include "filters/nfc.h"
+#include "filters/splitter.h"
+#include "fpu_modes.h"
+#include "hrtf.h"
+#include "inprogext.h"
+#include "mastering.h"
+#include "math_defs.h"
+#include "mixer/defs.h"
+#include "opthelpers.h"
+#include "ringbuffer.h"
+#include "threads.h"
+#include "uhjfilter.h"
+#include "vecmat.h"
+#include "vector.h"
+
+#include "bsinc_inc.h"
+
+
+namespace {
+
+using namespace std::placeholders;
+
+ALfloat InitConeScale()
+{
+ ALfloat ret{1.0f};
+ const char *str{getenv("__ALSOFT_HALF_ANGLE_CONES")};
+ if(str && (strcasecmp(str, "true") == 0 || strtol(str, nullptr, 0) == 1))
+ ret *= 0.5f;
+ return ret;
+}
+
+ALfloat InitZScale()
+{
+ ALfloat ret{1.0f};
+ const char *str{getenv("__ALSOFT_REVERSE_Z")};
+ if(str && (strcasecmp(str, "true") == 0 || strtol(str, nullptr, 0) == 1))
+ ret *= -1.0f;
+ return ret;
+}
+
+ALboolean InitReverbSOS()
+{
+ ALboolean ret{AL_FALSE};
+ const char *str{getenv("__ALSOFT_REVERB_IGNORES_SOUND_SPEED")};
+ if(str && (strcasecmp(str, "true") == 0 || strtol(str, nullptr, 0) == 1))
+ ret = AL_TRUE;
+ return ret;
+}
+
+} // namespace
+
+/* Cone scalar */
+const ALfloat ConeScale{InitConeScale()};
+
+/* Localized Z scalar for mono sources */
+const ALfloat ZScale{InitZScale()};
+
+/* Force default speed of sound for distance-related reverb decay. */
+const ALboolean OverrideReverbSpeedOfSound{InitReverbSOS()};
+
+
+namespace {
+
+void ClearArray(ALfloat (&f)[MAX_OUTPUT_CHANNELS])
+{
+ std::fill(std::begin(f), std::end(f), 0.0f);
+}
+
+struct ChanMap {
+ Channel channel;
+ ALfloat angle;
+ ALfloat elevation;
+};
+
+HrtfDirectMixerFunc MixDirectHrtf = MixDirectHrtf_<CTag>;
+inline HrtfDirectMixerFunc SelectHrtfMixer(void)
+{
+#ifdef HAVE_NEON
+ if((CPUCapFlags&CPU_CAP_NEON))
+ return MixDirectHrtf_<NEONTag>;
+#endif
+#ifdef HAVE_SSE
+ if((CPUCapFlags&CPU_CAP_SSE))
+ return MixDirectHrtf_<SSETag>;
+#endif
+
+ return MixDirectHrtf_<CTag>;
+}
+
+} // namespace
+
+void aluInit(void)
+{
+ MixDirectHrtf = SelectHrtfMixer();
+}
+
+
+void ProcessHrtf(ALCdevice *device, const ALsizei SamplesToDo)
+{
+ /* HRTF is stereo output only. */
+ const int lidx{device->RealOut.ChannelIndex[FrontLeft]};
+ const int ridx{device->RealOut.ChannelIndex[FrontRight]};
+ ASSUME(lidx >= 0 && ridx >= 0);
+
+ DirectHrtfState *state{device->mHrtfState.get()};
+ MixDirectHrtf(device->RealOut.Buffer[lidx], device->RealOut.Buffer[ridx], device->Dry.Buffer,
+ device->HrtfAccumData, state, SamplesToDo);
+}
+
+void ProcessAmbiDec(ALCdevice *device, const ALsizei SamplesToDo)
+{
+ BFormatDec *ambidec{device->AmbiDecoder.get()};
+ ambidec->process(device->RealOut.Buffer, device->Dry.Buffer.data(), SamplesToDo);
+}
+
+void ProcessUhj(ALCdevice *device, const ALsizei SamplesToDo)
+{
+ /* UHJ is stereo output only. */
+ const int lidx{device->RealOut.ChannelIndex[FrontLeft]};
+ const int ridx{device->RealOut.ChannelIndex[FrontRight]};
+ ASSUME(lidx >= 0 && ridx >= 0);
+
+ /* Encode to stereo-compatible 2-channel UHJ output. */
+ Uhj2Encoder *uhj2enc{device->Uhj_Encoder.get()};
+ uhj2enc->encode(device->RealOut.Buffer[lidx], device->RealOut.Buffer[ridx],
+ device->Dry.Buffer.data(), SamplesToDo);
+}
+
+void ProcessBs2b(ALCdevice *device, const ALsizei SamplesToDo)
+{
+ /* First, decode the ambisonic mix to the "real" output. */
+ BFormatDec *ambidec{device->AmbiDecoder.get()};
+ ambidec->process(device->RealOut.Buffer, device->Dry.Buffer.data(), SamplesToDo);
+
+ /* BS2B is stereo output only. */
+ const int lidx{device->RealOut.ChannelIndex[FrontLeft]};
+ const int ridx{device->RealOut.ChannelIndex[FrontRight]};
+ ASSUME(lidx >= 0 && ridx >= 0);
+
+ /* Now apply the BS2B binaural/crossfeed filter. */
+ bs2b_cross_feed(device->Bs2b.get(), device->RealOut.Buffer[lidx].data(),
+ device->RealOut.Buffer[ridx].data(), SamplesToDo);
+}
+
+
+/* Prepares the interpolator for a given rate (determined by increment).
+ *
+ * With a bit of work, and a trade of memory for CPU cost, this could be
+ * modified for use with an interpolated increment for buttery-smooth pitch
+ * changes.
+ */
+void BsincPrepare(const ALuint increment, BsincState *state, const BSincTable *table)
+{
+ ALsizei si{BSINC_SCALE_COUNT - 1};
+ ALfloat sf{0.0f};
+
+ if(increment > FRACTIONONE)
+ {
+ sf = static_cast<ALfloat>FRACTIONONE / increment;
+ sf = maxf(0.0f, (BSINC_SCALE_COUNT-1) * (sf-table->scaleBase) * table->scaleRange);
+ si = float2int(sf);
+ /* The interpolation factor is fit to this diagonally-symmetric curve
+ * to reduce the transition ripple caused by interpolating different
+ * scales of the sinc function.
+ */
+ sf = 1.0f - std::cos(std::asin(sf - si));
+ }
+
+ state->sf = sf;
+ state->m = table->m[si];
+ state->l = (state->m/2) - 1;
+ state->filter = table->Tab + table->filterOffset[si];
+}
+
+
+namespace {
+
+/* This RNG method was created based on the math found in opusdec. It's quick,
+ * and starting with a seed value of 22222, is suitable for generating
+ * whitenoise.
+ */
+inline ALuint dither_rng(ALuint *seed) noexcept
+{
+ *seed = (*seed * 96314165) + 907633515;
+ return *seed;
+}
+
+
+inline alu::Vector aluCrossproduct(const alu::Vector &in1, const alu::Vector &in2)
+{
+ return alu::Vector{
+ in1[1]*in2[2] - in1[2]*in2[1],
+ in1[2]*in2[0] - in1[0]*in2[2],
+ in1[0]*in2[1] - in1[1]*in2[0],
+ 0.0f
+ };
+}
+
+inline ALfloat aluDotproduct(const alu::Vector &vec1, const alu::Vector &vec2)
+{
+ return vec1[0]*vec2[0] + vec1[1]*vec2[1] + vec1[2]*vec2[2];
+}
+
+
+alu::Vector operator*(const alu::Matrix &mtx, const alu::Vector &vec) noexcept
+{
+ return alu::Vector{
+ vec[0]*mtx[0][0] + vec[1]*mtx[1][0] + vec[2]*mtx[2][0] + vec[3]*mtx[3][0],
+ vec[0]*mtx[0][1] + vec[1]*mtx[1][1] + vec[2]*mtx[2][1] + vec[3]*mtx[3][1],
+ vec[0]*mtx[0][2] + vec[1]*mtx[1][2] + vec[2]*mtx[2][2] + vec[3]*mtx[3][2],
+ vec[0]*mtx[0][3] + vec[1]*mtx[1][3] + vec[2]*mtx[2][3] + vec[3]*mtx[3][3]
+ };
+}
+
+
+bool CalcContextParams(ALCcontext *Context)
+{
+ ALcontextProps *props{Context->Update.exchange(nullptr, std::memory_order_acq_rel)};
+ if(!props) return false;
+
+ ALlistener &Listener = Context->Listener;
+ Listener.Params.MetersPerUnit = props->MetersPerUnit;
+
+ Listener.Params.DopplerFactor = props->DopplerFactor;
+ Listener.Params.SpeedOfSound = props->SpeedOfSound * props->DopplerVelocity;
+ if(!OverrideReverbSpeedOfSound)
+ Listener.Params.ReverbSpeedOfSound = Listener.Params.SpeedOfSound *
+ Listener.Params.MetersPerUnit;
+
+ Listener.Params.SourceDistanceModel = props->SourceDistanceModel;
+ Listener.Params.mDistanceModel = props->mDistanceModel;
+
+ AtomicReplaceHead(Context->FreeContextProps, props);
+ return true;
+}
+
+bool CalcListenerParams(ALCcontext *Context)
+{
+ ALlistener &Listener = Context->Listener;
+
+ ALlistenerProps *props{Listener.Update.exchange(nullptr, std::memory_order_acq_rel)};
+ if(!props) return false;
+
+ /* AT then UP */
+ alu::Vector N{props->OrientAt[0], props->OrientAt[1], props->OrientAt[2], 0.0f};
+ N.normalize();
+ alu::Vector V{props->OrientUp[0], props->OrientUp[1], props->OrientUp[2], 0.0f};
+ V.normalize();
+ /* Build and normalize right-vector */
+ alu::Vector U{aluCrossproduct(N, V)};
+ U.normalize();
+
+ Listener.Params.Matrix = alu::Matrix{
+ U[0], V[0], -N[0], 0.0f,
+ U[1], V[1], -N[1], 0.0f,
+ U[2], V[2], -N[2], 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+ };
+
+ const alu::Vector P{Listener.Params.Matrix *
+ alu::Vector{props->Position[0], props->Position[1], props->Position[2], 1.0f}};
+ Listener.Params.Matrix.setRow(3, -P[0], -P[1], -P[2], 1.0f);
+
+ const alu::Vector vel{props->Velocity[0], props->Velocity[1], props->Velocity[2], 0.0f};
+ Listener.Params.Velocity = Listener.Params.Matrix * vel;
+
+ Listener.Params.Gain = props->Gain * Context->GainBoost;
+
+ AtomicReplaceHead(Context->FreeListenerProps, props);
+ return true;
+}
+
+bool CalcEffectSlotParams(ALeffectslot *slot, ALCcontext *context, bool force)
+{
+ ALeffectslotProps *props{slot->Update.exchange(nullptr, std::memory_order_acq_rel)};
+ if(!props && !force) return false;
+
+ EffectState *state;
+ if(!props)
+ state = slot->Params.mEffectState;
+ else
+ {
+ slot->Params.Gain = props->Gain;
+ slot->Params.AuxSendAuto = props->AuxSendAuto;
+ slot->Params.Target = props->Target;
+ slot->Params.EffectType = props->Type;
+ slot->Params.mEffectProps = props->Props;
+ if(IsReverbEffect(props->Type))
+ {
+ slot->Params.RoomRolloff = props->Props.Reverb.RoomRolloffFactor;
+ slot->Params.DecayTime = props->Props.Reverb.DecayTime;
+ slot->Params.DecayLFRatio = props->Props.Reverb.DecayLFRatio;
+ slot->Params.DecayHFRatio = props->Props.Reverb.DecayHFRatio;
+ slot->Params.DecayHFLimit = props->Props.Reverb.DecayHFLimit;
+ slot->Params.AirAbsorptionGainHF = props->Props.Reverb.AirAbsorptionGainHF;
+ }
+ else
+ {
+ slot->Params.RoomRolloff = 0.0f;
+ slot->Params.DecayTime = 0.0f;
+ slot->Params.DecayLFRatio = 0.0f;
+ slot->Params.DecayHFRatio = 0.0f;
+ slot->Params.DecayHFLimit = AL_FALSE;
+ slot->Params.AirAbsorptionGainHF = 1.0f;
+ }
+
+ state = props->State;
+ props->State = nullptr;
+ EffectState *oldstate{slot->Params.mEffectState};
+ slot->Params.mEffectState = state;
+
+ /* Manually decrement the old effect state's refcount if it's greater
+ * than 1. We need to be a bit clever here to avoid the refcount
+ * reaching 0 since it can't be deleted in the mixer.
+ */
+ ALuint oldval{oldstate->mRef.load(std::memory_order_acquire)};
+ while(oldval > 1 && !oldstate->mRef.compare_exchange_weak(oldval, oldval-1,
+ std::memory_order_acq_rel, std::memory_order_acquire))
+ {
+ /* oldval was updated with the current value on failure, so just
+ * try again.
+ */
+ }
+
+ if(oldval < 2)
+ {
+ /* Otherwise, if it would be deleted, send it off with a release
+ * event.
+ */
+ RingBuffer *ring{context->AsyncEvents.get()};
+ auto evt_vec = ring->getWriteVector();
+ if(LIKELY(evt_vec.first.len > 0))
+ {
+ AsyncEvent *evt{new (evt_vec.first.buf) AsyncEvent{EventType_ReleaseEffectState}};
+ evt->u.mEffectState = oldstate;
+ ring->writeAdvance(1);
+ context->EventSem.post();
+ }
+ else
+ {
+ /* If writing the event failed, the queue was probably full.
+ * Store the old state in the property object where it can
+ * eventually be cleaned up sometime later (not ideal, but
+ * better than blocking or leaking).
+ */
+ props->State = oldstate;
+ }
+ }
+
+ AtomicReplaceHead(context->FreeEffectslotProps, props);
+ }
+
+ EffectTarget output;
+ if(ALeffectslot *target{slot->Params.Target})
+ output = EffectTarget{&target->Wet, nullptr};
+ else
+ {
+ ALCdevice *device{context->Device};
+ output = EffectTarget{&device->Dry, &device->RealOut};
+ }
+ state->update(context, slot, &slot->Params.mEffectProps, output);
+ return true;
+}
+
+
+/* Scales the given azimuth toward the side (+/- pi/2 radians) for positions in
+ * front.
+ */
+inline float ScaleAzimuthFront(float azimuth, float scale)
+{
+ const ALfloat abs_azi{std::fabs(azimuth)};
+ if(!(abs_azi > al::MathDefs<float>::Pi()*0.5f))
+ return minf(abs_azi*scale, al::MathDefs<float>::Pi()*0.5f) * std::copysign(1.0f, azimuth);
+ return azimuth;
+}
+
+void CalcPanningAndFilters(ALvoice *voice, const ALfloat xpos, const ALfloat ypos,
+ const ALfloat zpos, const ALfloat Distance, const ALfloat Spread, const ALfloat DryGain,
+ const ALfloat DryGainHF, const ALfloat DryGainLF, const ALfloat (&WetGain)[MAX_SENDS],
+ const ALfloat (&WetGainLF)[MAX_SENDS], const ALfloat (&WetGainHF)[MAX_SENDS],
+ ALeffectslot *(&SendSlots)[MAX_SENDS], const ALvoicePropsBase *props,
+ const ALlistener &Listener, const ALCdevice *Device)
+{
+ static constexpr ChanMap MonoMap[1]{
+ { FrontCenter, 0.0f, 0.0f }
+ }, RearMap[2]{
+ { BackLeft, Deg2Rad(-150.0f), Deg2Rad(0.0f) },
+ { BackRight, Deg2Rad( 150.0f), Deg2Rad(0.0f) }
+ }, QuadMap[4]{
+ { FrontLeft, Deg2Rad( -45.0f), Deg2Rad(0.0f) },
+ { FrontRight, Deg2Rad( 45.0f), Deg2Rad(0.0f) },
+ { BackLeft, Deg2Rad(-135.0f), Deg2Rad(0.0f) },
+ { BackRight, Deg2Rad( 135.0f), Deg2Rad(0.0f) }
+ }, X51Map[6]{
+ { FrontLeft, Deg2Rad( -30.0f), Deg2Rad(0.0f) },
+ { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
+ { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
+ { LFE, 0.0f, 0.0f },
+ { SideLeft, Deg2Rad(-110.0f), Deg2Rad(0.0f) },
+ { SideRight, Deg2Rad( 110.0f), Deg2Rad(0.0f) }
+ }, X61Map[7]{
+ { FrontLeft, Deg2Rad(-30.0f), Deg2Rad(0.0f) },
+ { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
+ { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
+ { LFE, 0.0f, 0.0f },
+ { BackCenter, Deg2Rad(180.0f), Deg2Rad(0.0f) },
+ { SideLeft, Deg2Rad(-90.0f), Deg2Rad(0.0f) },
+ { SideRight, Deg2Rad( 90.0f), Deg2Rad(0.0f) }
+ }, X71Map[8]{
+ { FrontLeft, Deg2Rad( -30.0f), Deg2Rad(0.0f) },
+ { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
+ { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
+ { LFE, 0.0f, 0.0f },
+ { BackLeft, Deg2Rad(-150.0f), Deg2Rad(0.0f) },
+ { BackRight, Deg2Rad( 150.0f), Deg2Rad(0.0f) },
+ { SideLeft, Deg2Rad( -90.0f), Deg2Rad(0.0f) },
+ { SideRight, Deg2Rad( 90.0f), Deg2Rad(0.0f) }
+ };
+
+ ChanMap StereoMap[2]{
+ { FrontLeft, Deg2Rad(-30.0f), Deg2Rad(0.0f) },
+ { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) }
+ };
+
+ const auto Frequency = static_cast<ALfloat>(Device->Frequency);
+ const ALsizei NumSends{Device->NumAuxSends};
+ ASSUME(NumSends >= 0);
+
+ bool DirectChannels{props->DirectChannels != AL_FALSE};
+ const ChanMap *chans{nullptr};
+ ALsizei num_channels{0};
+ bool isbformat{false};
+ ALfloat downmix_gain{1.0f};
+ switch(voice->mFmtChannels)
+ {
+ case FmtMono:
+ chans = MonoMap;
+ num_channels = 1;
+ /* Mono buffers are never played direct. */
+ DirectChannels = false;
+ break;
+
+ case FmtStereo:
+ /* Convert counter-clockwise to clockwise. */
+ StereoMap[0].angle = -props->StereoPan[0];
+ StereoMap[1].angle = -props->StereoPan[1];
+
+ chans = StereoMap;
+ num_channels = 2;
+ downmix_gain = 1.0f / 2.0f;
+ break;
+
+ case FmtRear:
+ chans = RearMap;
+ num_channels = 2;
+ downmix_gain = 1.0f / 2.0f;
+ break;
+
+ case FmtQuad:
+ chans = QuadMap;
+ num_channels = 4;
+ downmix_gain = 1.0f / 4.0f;
+ break;
+
+ case FmtX51:
+ chans = X51Map;
+ num_channels = 6;
+ /* NOTE: Excludes LFE. */
+ downmix_gain = 1.0f / 5.0f;
+ break;
+
+ case FmtX61:
+ chans = X61Map;
+ num_channels = 7;
+ /* NOTE: Excludes LFE. */
+ downmix_gain = 1.0f / 6.0f;
+ break;
+
+ case FmtX71:
+ chans = X71Map;
+ num_channels = 8;
+ /* NOTE: Excludes LFE. */
+ downmix_gain = 1.0f / 7.0f;
+ break;
+
+ case FmtBFormat2D:
+ num_channels = 3;
+ isbformat = true;
+ DirectChannels = false;
+ break;
+
+ case FmtBFormat3D:
+ num_channels = 4;
+ isbformat = true;
+ DirectChannels = false;
+ break;
+ }
+ ASSUME(num_channels > 0);
+
+ std::for_each(voice->mChans.begin(), voice->mChans.begin()+num_channels,
+ [NumSends](ALvoice::ChannelData &chandata) -> void
+ {
+ chandata.mDryParams.Hrtf.Target = HrtfFilter{};
+ ClearArray(chandata.mDryParams.Gains.Target);
+ std::for_each(chandata.mWetParams.begin(), chandata.mWetParams.begin()+NumSends,
+ [](SendParams &params) -> void { ClearArray(params.Gains.Target); });
+ });
+
+ voice->mFlags &= ~(VOICE_HAS_HRTF | VOICE_HAS_NFC);
+ if(isbformat)
+ {
+ /* Special handling for B-Format sources. */
+
+ if(Distance > std::numeric_limits<float>::epsilon())
+ {
+ /* Panning a B-Format sound toward some direction is easy. Just pan
+ * the first (W) channel as a normal mono sound and silence the
+ * others.
+ */
+
+ if(Device->AvgSpeakerDist > 0.0f)
+ {
+ /* Clamp the distance for really close sources, to prevent
+ * excessive bass.
+ */
+ const ALfloat mdist{maxf(Distance, Device->AvgSpeakerDist/4.0f)};
+ const ALfloat w0{SPEEDOFSOUNDMETRESPERSEC / (mdist * Frequency)};
+
+ /* Only need to adjust the first channel of a B-Format source. */
+ voice->mChans[0].mDryParams.NFCtrlFilter.adjust(w0);
+
+ voice->mFlags |= VOICE_HAS_NFC;
+ }
+
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ if(Device->mRenderMode != StereoPair)
+ CalcDirectionCoeffs({xpos, ypos, zpos}, Spread, coeffs);
+ else
+ {
+ /* Clamp Y, in case rounding errors caused it to end up outside
+ * of -1...+1.
+ */
+ const ALfloat ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
+ /* Negate Z for right-handed coords with -Z in front. */
+ const ALfloat az{std::atan2(xpos, -zpos)};
+
+ /* A scalar of 1.5 for plain stereo results in +/-60 degrees
+ * being moved to +/-90 degrees for direct right and left
+ * speaker responses.
+ */
+ CalcAngleCoeffs(ScaleAzimuthFront(az, 1.5f), ev, Spread, coeffs);
+ }
+
+ /* NOTE: W needs to be scaled due to FuMa normalization. */
+ const ALfloat &scale0 = AmbiScale::FromFuMa[0];
+ ComputePanGains(&Device->Dry, coeffs, DryGain*scale0,
+ voice->mChans[0].mDryParams.Gains.Target);
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ if(const ALeffectslot *Slot{SendSlots[i]})
+ ComputePanGains(&Slot->Wet, coeffs, WetGain[i]*scale0,
+ voice->mChans[0].mWetParams[i].Gains.Target);
+ }
+ }
+ else
+ {
+ if(Device->AvgSpeakerDist > 0.0f)
+ {
+ /* NOTE: The NFCtrlFilters were created with a w0 of 0, which
+ * is what we want for FOA input. The first channel may have
+ * been previously re-adjusted if panned, so reset it.
+ */
+ voice->mChans[0].mDryParams.NFCtrlFilter.adjust(0.0f);
+
+ voice->mFlags |= VOICE_HAS_NFC;
+ }
+
+ /* Local B-Format sources have their XYZ channels rotated according
+ * to the orientation.
+ */
+ /* AT then UP */
+ alu::Vector N{props->OrientAt[0], props->OrientAt[1], props->OrientAt[2], 0.0f};
+ N.normalize();
+ alu::Vector V{props->OrientUp[0], props->OrientUp[1], props->OrientUp[2], 0.0f};
+ V.normalize();
+ if(!props->HeadRelative)
+ {
+ N = Listener.Params.Matrix * N;
+ V = Listener.Params.Matrix * V;
+ }
+ /* Build and normalize right-vector */
+ alu::Vector U{aluCrossproduct(N, V)};
+ U.normalize();
+
+ /* Build a rotate + conversion matrix (FuMa -> ACN+N3D). NOTE: This
+ * matrix is transposed, for the inputs to align on the rows and
+ * outputs on the columns.
+ */
+ const ALfloat &wscale = AmbiScale::FromFuMa[0];
+ const ALfloat &yscale = AmbiScale::FromFuMa[1];
+ const ALfloat &zscale = AmbiScale::FromFuMa[2];
+ const ALfloat &xscale = AmbiScale::FromFuMa[3];
+ const ALfloat matrix[4][MAX_AMBI_CHANNELS]{
+ // ACN0 ACN1 ACN2 ACN3
+ { wscale, 0.0f, 0.0f, 0.0f }, // FuMa W
+ { 0.0f, -N[0]*xscale, N[1]*xscale, -N[2]*xscale }, // FuMa X
+ { 0.0f, U[0]*yscale, -U[1]*yscale, U[2]*yscale }, // FuMa Y
+ { 0.0f, -V[0]*zscale, V[1]*zscale, -V[2]*zscale } // FuMa Z
+ };
+
+ for(ALsizei c{0};c < num_channels;c++)
+ {
+ ComputePanGains(&Device->Dry, matrix[c], DryGain,
+ voice->mChans[c].mDryParams.Gains.Target);
+
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ if(const ALeffectslot *Slot{SendSlots[i]})
+ ComputePanGains(&Slot->Wet, matrix[c], WetGain[i],
+ voice->mChans[c].mWetParams[i].Gains.Target);
+ }
+ }
+ }
+ }
+ else if(DirectChannels)
+ {
+ /* Direct source channels always play local. Skip the virtual channels
+ * and write inputs to the matching real outputs.
+ */
+ voice->mDirect.Buffer = Device->RealOut.Buffer;
+
+ for(ALsizei c{0};c < num_channels;c++)
+ {
+ int idx{GetChannelIdxByName(Device->RealOut, chans[c].channel)};
+ if(idx != -1) voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain;
+ }
+
+ /* Auxiliary sends still use normal channel panning since they mix to
+ * B-Format, which can't channel-match.
+ */
+ for(ALsizei c{0};c < num_channels;c++)
+ {
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ CalcAngleCoeffs(chans[c].angle, chans[c].elevation, 0.0f, coeffs);
+
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ if(const ALeffectslot *Slot{SendSlots[i]})
+ ComputePanGains(&Slot->Wet, coeffs, WetGain[i],
+ voice->mChans[c].mWetParams[i].Gains.Target);
+ }
+ }
+ }
+ else if(Device->mRenderMode == HrtfRender)
+ {
+ /* Full HRTF rendering. Skip the virtual channels and render to the
+ * real outputs.
+ */
+ voice->mDirect.Buffer = Device->RealOut.Buffer;
+
+ if(Distance > std::numeric_limits<float>::epsilon())
+ {
+ const ALfloat ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
+ const ALfloat az{std::atan2(xpos, -zpos)};
+
+ /* Get the HRIR coefficients and delays just once, for the given
+ * source direction.
+ */
+ GetHrtfCoeffs(Device->mHrtf, ev, az, Distance, Spread,
+ voice->mChans[0].mDryParams.Hrtf.Target.Coeffs,
+ voice->mChans[0].mDryParams.Hrtf.Target.Delay);
+ voice->mChans[0].mDryParams.Hrtf.Target.Gain = DryGain * downmix_gain;
+
+ /* Remaining channels use the same results as the first. */
+ for(ALsizei c{1};c < num_channels;c++)
+ {
+ /* Skip LFE */
+ if(chans[c].channel == LFE) continue;
+ voice->mChans[c].mDryParams.Hrtf.Target = voice->mChans[0].mDryParams.Hrtf.Target;
+ }
+
+ /* Calculate the directional coefficients once, which apply to all
+ * input channels of the source sends.
+ */
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ CalcDirectionCoeffs({xpos, ypos, zpos}, Spread, coeffs);
+
+ for(ALsizei c{0};c < num_channels;c++)
+ {
+ /* Skip LFE */
+ if(chans[c].channel == LFE)
+ continue;
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ if(const ALeffectslot *Slot{SendSlots[i]})
+ ComputePanGains(&Slot->Wet, coeffs, WetGain[i] * downmix_gain,
+ voice->mChans[c].mWetParams[i].Gains.Target);
+ }
+ }
+ }
+ else
+ {
+ /* Local sources on HRTF play with each channel panned to its
+ * relative location around the listener, providing "virtual
+ * speaker" responses.
+ */
+ for(ALsizei c{0};c < num_channels;c++)
+ {
+ /* Skip LFE */
+ if(chans[c].channel == LFE)
+ continue;
+
+ /* Get the HRIR coefficients and delays for this channel
+ * position.
+ */
+ GetHrtfCoeffs(Device->mHrtf, chans[c].elevation, chans[c].angle,
+ std::numeric_limits<float>::infinity(), Spread,
+ voice->mChans[c].mDryParams.Hrtf.Target.Coeffs,
+ voice->mChans[c].mDryParams.Hrtf.Target.Delay);
+ voice->mChans[c].mDryParams.Hrtf.Target.Gain = DryGain;
+
+ /* Normal panning for auxiliary sends. */
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ CalcAngleCoeffs(chans[c].angle, chans[c].elevation, Spread, coeffs);
+
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ if(const ALeffectslot *Slot{SendSlots[i]})
+ ComputePanGains(&Slot->Wet, coeffs, WetGain[i],
+ voice->mChans[c].mWetParams[i].Gains.Target);
+ }
+ }
+ }
+
+ voice->mFlags |= VOICE_HAS_HRTF;
+ }
+ else
+ {
+ /* Non-HRTF rendering. Use normal panning to the output. */
+
+ if(Distance > std::numeric_limits<float>::epsilon())
+ {
+ /* Calculate NFC filter coefficient if needed. */
+ if(Device->AvgSpeakerDist > 0.0f)
+ {
+ /* Clamp the distance for really close sources, to prevent
+ * excessive bass.
+ */
+ const ALfloat mdist{maxf(Distance, Device->AvgSpeakerDist/4.0f)};
+ const ALfloat w0{SPEEDOFSOUNDMETRESPERSEC / (mdist * Frequency)};
+
+ /* Adjust NFC filters. */
+ for(ALsizei c{0};c < num_channels;c++)
+ voice->mChans[c].mDryParams.NFCtrlFilter.adjust(w0);
+
+ voice->mFlags |= VOICE_HAS_NFC;
+ }
+
+ /* Calculate the directional coefficients once, which apply to all
+ * input channels.
+ */
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ if(Device->mRenderMode != StereoPair)
+ CalcDirectionCoeffs({xpos, ypos, zpos}, Spread, coeffs);
+ else
+ {
+ const ALfloat ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
+ const ALfloat az{std::atan2(xpos, -zpos)};
+ CalcAngleCoeffs(ScaleAzimuthFront(az, 1.5f), ev, Spread, coeffs);
+ }
+
+ for(ALsizei c{0};c < num_channels;c++)
+ {
+ /* Special-case LFE */
+ if(chans[c].channel == LFE)
+ {
+ if(Device->Dry.Buffer.data() == Device->RealOut.Buffer.data())
+ {
+ int idx = GetChannelIdxByName(Device->RealOut, chans[c].channel);
+ if(idx != -1) voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain;
+ }
+ continue;
+ }
+
+ ComputePanGains(&Device->Dry, coeffs, DryGain * downmix_gain,
+ voice->mChans[c].mDryParams.Gains.Target);
+ }
+
+ for(ALsizei c{0};c < num_channels;c++)
+ {
+ /* Skip LFE */
+ if(chans[c].channel == LFE)
+ continue;
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ if(const ALeffectslot *Slot{SendSlots[i]})
+ ComputePanGains(&Slot->Wet, coeffs, WetGain[i] * downmix_gain,
+ voice->mChans[c].mWetParams[i].Gains.Target);
+ }
+ }
+ }
+ else
+ {
+ if(Device->AvgSpeakerDist > 0.0f)
+ {
+ /* If the source distance is 0, set w0 to w1 to act as a pass-
+ * through. We still want to pass the signal through the
+ * filters so they keep an appropriate history, in case the
+ * source moves away from the listener.
+ */
+ const ALfloat w0{SPEEDOFSOUNDMETRESPERSEC / (Device->AvgSpeakerDist * Frequency)};
+
+ for(ALsizei c{0};c < num_channels;c++)
+ voice->mChans[c].mDryParams.NFCtrlFilter.adjust(w0);
+
+ voice->mFlags |= VOICE_HAS_NFC;
+ }
+
+ for(ALsizei c{0};c < num_channels;c++)
+ {
+ /* Special-case LFE */
+ if(chans[c].channel == LFE)
+ {
+ if(Device->Dry.Buffer.data() == Device->RealOut.Buffer.data())
+ {
+ int idx = GetChannelIdxByName(Device->RealOut, chans[c].channel);
+ if(idx != -1) voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain;
+ }
+ continue;
+ }
+
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ CalcAngleCoeffs(
+ (Device->mRenderMode==StereoPair) ? ScaleAzimuthFront(chans[c].angle, 3.0f)
+ : chans[c].angle,
+ chans[c].elevation, Spread, coeffs
+ );
+
+ ComputePanGains(&Device->Dry, coeffs, DryGain,
+ voice->mChans[c].mDryParams.Gains.Target);
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ if(const ALeffectslot *Slot{SendSlots[i]})
+ ComputePanGains(&Slot->Wet, coeffs, WetGain[i],
+ voice->mChans[c].mWetParams[i].Gains.Target);
+ }
+ }
+ }
+ }
+
+ {
+ const ALfloat hfScale{props->Direct.HFReference / Frequency};
+ const ALfloat lfScale{props->Direct.LFReference / Frequency};
+ const ALfloat gainHF{maxf(DryGainHF, 0.001f)}; /* Limit -60dB */
+ const ALfloat gainLF{maxf(DryGainLF, 0.001f)};
+
+ voice->mDirect.FilterType = AF_None;
+ if(gainHF != 1.0f) voice->mDirect.FilterType |= AF_LowPass;
+ if(gainLF != 1.0f) voice->mDirect.FilterType |= AF_HighPass;
+ auto &lowpass = voice->mChans[0].mDryParams.LowPass;
+ auto &highpass = voice->mChans[0].mDryParams.HighPass;
+ lowpass.setParams(BiquadType::HighShelf, gainHF, hfScale,
+ lowpass.rcpQFromSlope(gainHF, 1.0f));
+ highpass.setParams(BiquadType::LowShelf, gainLF, lfScale,
+ highpass.rcpQFromSlope(gainLF, 1.0f));
+ for(ALsizei c{1};c < num_channels;c++)
+ {
+ voice->mChans[c].mDryParams.LowPass.copyParamsFrom(lowpass);
+ voice->mChans[c].mDryParams.HighPass.copyParamsFrom(highpass);
+ }
+ }
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ const ALfloat hfScale{props->Send[i].HFReference / Frequency};
+ const ALfloat lfScale{props->Send[i].LFReference / Frequency};
+ const ALfloat gainHF{maxf(WetGainHF[i], 0.001f)};
+ const ALfloat gainLF{maxf(WetGainLF[i], 0.001f)};
+
+ voice->mSend[i].FilterType = AF_None;
+ if(gainHF != 1.0f) voice->mSend[i].FilterType |= AF_LowPass;
+ if(gainLF != 1.0f) voice->mSend[i].FilterType |= AF_HighPass;
+
+ auto &lowpass = voice->mChans[0].mWetParams[i].LowPass;
+ auto &highpass = voice->mChans[0].mWetParams[i].HighPass;
+ lowpass.setParams(BiquadType::HighShelf, gainHF, hfScale,
+ lowpass.rcpQFromSlope(gainHF, 1.0f));
+ highpass.setParams(BiquadType::LowShelf, gainLF, lfScale,
+ highpass.rcpQFromSlope(gainLF, 1.0f));
+ for(ALsizei c{1};c < num_channels;c++)
+ {
+ voice->mChans[c].mWetParams[i].LowPass.copyParamsFrom(lowpass);
+ voice->mChans[c].mWetParams[i].HighPass.copyParamsFrom(highpass);
+ }
+ }
+}
+
+void CalcNonAttnSourceParams(ALvoice *voice, const ALvoicePropsBase *props, const ALCcontext *ALContext)
+{
+ const ALCdevice *Device{ALContext->Device};
+ ALeffectslot *SendSlots[MAX_SENDS];
+
+ voice->mDirect.Buffer = Device->Dry.Buffer;
+ for(ALsizei i{0};i < Device->NumAuxSends;i++)
+ {
+ SendSlots[i] = props->Send[i].Slot;
+ if(!SendSlots[i] && i == 0)
+ SendSlots[i] = ALContext->DefaultSlot.get();
+ if(!SendSlots[i] || SendSlots[i]->Params.EffectType == AL_EFFECT_NULL)
+ {
+ SendSlots[i] = nullptr;
+ voice->mSend[i].Buffer = {};
+ }
+ else
+ voice->mSend[i].Buffer = SendSlots[i]->Wet.Buffer;
+ }
+
+ /* Calculate the stepping value */
+ const auto Pitch = static_cast<ALfloat>(voice->mFrequency) /
+ static_cast<ALfloat>(Device->Frequency) * props->Pitch;
+ if(Pitch > static_cast<ALfloat>(MAX_PITCH))
+ voice->mStep = MAX_PITCH<<FRACTIONBITS;
+ else
+ voice->mStep = maxi(fastf2i(Pitch * FRACTIONONE), 1);
+ if(props->mResampler == BSinc24Resampler)
+ BsincPrepare(voice->mStep, &voice->mResampleState.bsinc, &bsinc24);
+ else if(props->mResampler == BSinc12Resampler)
+ BsincPrepare(voice->mStep, &voice->mResampleState.bsinc, &bsinc12);
+ voice->mResampler = SelectResampler(props->mResampler);
+
+ /* Calculate gains */
+ const ALlistener &Listener = ALContext->Listener;
+ ALfloat DryGain{clampf(props->Gain, props->MinGain, props->MaxGain)};
+ DryGain *= props->Direct.Gain * Listener.Params.Gain;
+ DryGain = minf(DryGain, GAIN_MIX_MAX);
+ ALfloat DryGainHF{props->Direct.GainHF};
+ ALfloat DryGainLF{props->Direct.GainLF};
+ ALfloat WetGain[MAX_SENDS], WetGainHF[MAX_SENDS], WetGainLF[MAX_SENDS];
+ for(ALsizei i{0};i < Device->NumAuxSends;i++)
+ {
+ WetGain[i] = clampf(props->Gain, props->MinGain, props->MaxGain);
+ WetGain[i] *= props->Send[i].Gain * Listener.Params.Gain;
+ WetGain[i] = minf(WetGain[i], GAIN_MIX_MAX);
+ WetGainHF[i] = props->Send[i].GainHF;
+ WetGainLF[i] = props->Send[i].GainLF;
+ }
+
+ CalcPanningAndFilters(voice, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, DryGain, DryGainHF, DryGainLF,
+ WetGain, WetGainLF, WetGainHF, SendSlots, props, Listener, Device);
+}
+
+void CalcAttnSourceParams(ALvoice *voice, const ALvoicePropsBase *props, const ALCcontext *ALContext)
+{
+ const ALCdevice *Device{ALContext->Device};
+ const ALsizei NumSends{Device->NumAuxSends};
+ const ALlistener &Listener = ALContext->Listener;
+
+ /* Set mixing buffers and get send parameters. */
+ voice->mDirect.Buffer = Device->Dry.Buffer;
+ ALeffectslot *SendSlots[MAX_SENDS];
+ ALfloat RoomRolloff[MAX_SENDS];
+ ALfloat DecayDistance[MAX_SENDS];
+ ALfloat DecayLFDistance[MAX_SENDS];
+ ALfloat DecayHFDistance[MAX_SENDS];
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ SendSlots[i] = props->Send[i].Slot;
+ if(!SendSlots[i] && i == 0)
+ SendSlots[i] = ALContext->DefaultSlot.get();
+ if(!SendSlots[i] || SendSlots[i]->Params.EffectType == AL_EFFECT_NULL)
+ {
+ SendSlots[i] = nullptr;
+ RoomRolloff[i] = 0.0f;
+ DecayDistance[i] = 0.0f;
+ DecayLFDistance[i] = 0.0f;
+ DecayHFDistance[i] = 0.0f;
+ }
+ else if(SendSlots[i]->Params.AuxSendAuto)
+ {
+ RoomRolloff[i] = SendSlots[i]->Params.RoomRolloff + props->RoomRolloffFactor;
+ /* Calculate the distances to where this effect's decay reaches
+ * -60dB.
+ */
+ DecayDistance[i] = SendSlots[i]->Params.DecayTime *
+ Listener.Params.ReverbSpeedOfSound;
+ DecayLFDistance[i] = DecayDistance[i] * SendSlots[i]->Params.DecayLFRatio;
+ DecayHFDistance[i] = DecayDistance[i] * SendSlots[i]->Params.DecayHFRatio;
+ if(SendSlots[i]->Params.DecayHFLimit)
+ {
+ ALfloat airAbsorption{SendSlots[i]->Params.AirAbsorptionGainHF};
+ if(airAbsorption < 1.0f)
+ {
+ /* Calculate the distance to where this effect's air
+ * absorption reaches -60dB, and limit the effect's HF
+ * decay distance (so it doesn't take any longer to decay
+ * than the air would allow).
+ */
+ ALfloat absorb_dist{std::log10(REVERB_DECAY_GAIN) / std::log10(airAbsorption)};
+ DecayHFDistance[i] = minf(absorb_dist, DecayHFDistance[i]);
+ }
+ }
+ }
+ else
+ {
+ /* If the slot's auxiliary send auto is off, the data sent to the
+ * effect slot is the same as the dry path, sans filter effects */
+ RoomRolloff[i] = props->RolloffFactor;
+ DecayDistance[i] = 0.0f;
+ DecayLFDistance[i] = 0.0f;
+ DecayHFDistance[i] = 0.0f;
+ }
+
+ if(!SendSlots[i])
+ voice->mSend[i].Buffer = {};
+ else
+ voice->mSend[i].Buffer = SendSlots[i]->Wet.Buffer;
+ }
+
+ /* Transform source to listener space (convert to head relative) */
+ alu::Vector Position{props->Position[0], props->Position[1], props->Position[2], 1.0f};
+ alu::Vector Velocity{props->Velocity[0], props->Velocity[1], props->Velocity[2], 0.0f};
+ alu::Vector Direction{props->Direction[0], props->Direction[1], props->Direction[2], 0.0f};
+ if(props->HeadRelative == AL_FALSE)
+ {
+ /* Transform source vectors */
+ Position = Listener.Params.Matrix * Position;
+ Velocity = Listener.Params.Matrix * Velocity;
+ Direction = Listener.Params.Matrix * Direction;
+ }
+ else
+ {
+ /* Offset the source velocity to be relative of the listener velocity */
+ Velocity += Listener.Params.Velocity;
+ }
+
+ const bool directional{Direction.normalize() > 0.0f};
+ alu::Vector ToSource{Position[0], Position[1], Position[2], 0.0f};
+ const ALfloat Distance{ToSource.normalize()};
+
+ /* Initial source gain */
+ ALfloat DryGain{props->Gain};
+ ALfloat DryGainHF{1.0f};
+ ALfloat DryGainLF{1.0f};
+ ALfloat WetGain[MAX_SENDS], WetGainHF[MAX_SENDS], WetGainLF[MAX_SENDS];
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ WetGain[i] = props->Gain;
+ WetGainHF[i] = 1.0f;
+ WetGainLF[i] = 1.0f;
+ }
+
+ /* Calculate distance attenuation */
+ ALfloat ClampedDist{Distance};
+
+ switch(Listener.Params.SourceDistanceModel ?
+ props->mDistanceModel : Listener.Params.mDistanceModel)
+ {
+ case DistanceModel::InverseClamped:
+ ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
+ if(props->MaxDistance < props->RefDistance) break;
+ /*fall-through*/
+ case DistanceModel::Inverse:
+ if(!(props->RefDistance > 0.0f))
+ ClampedDist = props->RefDistance;
+ else
+ {
+ ALfloat dist = lerp(props->RefDistance, ClampedDist, props->RolloffFactor);
+ if(dist > 0.0f) DryGain *= props->RefDistance / dist;
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ dist = lerp(props->RefDistance, ClampedDist, RoomRolloff[i]);
+ if(dist > 0.0f) WetGain[i] *= props->RefDistance / dist;
+ }
+ }
+ break;
+
+ case DistanceModel::LinearClamped:
+ ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
+ if(props->MaxDistance < props->RefDistance) break;
+ /*fall-through*/
+ case DistanceModel::Linear:
+ if(!(props->MaxDistance != props->RefDistance))
+ ClampedDist = props->RefDistance;
+ else
+ {
+ ALfloat attn = props->RolloffFactor * (ClampedDist-props->RefDistance) /
+ (props->MaxDistance-props->RefDistance);
+ DryGain *= maxf(1.0f - attn, 0.0f);
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ attn = RoomRolloff[i] * (ClampedDist-props->RefDistance) /
+ (props->MaxDistance-props->RefDistance);
+ WetGain[i] *= maxf(1.0f - attn, 0.0f);
+ }
+ }
+ break;
+
+ case DistanceModel::ExponentClamped:
+ ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
+ if(props->MaxDistance < props->RefDistance) break;
+ /*fall-through*/
+ case DistanceModel::Exponent:
+ if(!(ClampedDist > 0.0f && props->RefDistance > 0.0f))
+ ClampedDist = props->RefDistance;
+ else
+ {
+ DryGain *= std::pow(ClampedDist/props->RefDistance, -props->RolloffFactor);
+ for(ALsizei i{0};i < NumSends;i++)
+ WetGain[i] *= std::pow(ClampedDist/props->RefDistance, -RoomRolloff[i]);
+ }
+ break;
+
+ case DistanceModel::Disable:
+ ClampedDist = props->RefDistance;
+ break;
+ }
+
+ /* Calculate directional soundcones */
+ if(directional && props->InnerAngle < 360.0f)
+ {
+ const ALfloat Angle{Rad2Deg(std::acos(-aluDotproduct(Direction, ToSource)) *
+ ConeScale * 2.0f)};
+
+ ALfloat ConeVolume, ConeHF;
+ if(!(Angle > props->InnerAngle))
+ {
+ ConeVolume = 1.0f;
+ ConeHF = 1.0f;
+ }
+ else if(Angle < props->OuterAngle)
+ {
+ ALfloat scale = ( Angle-props->InnerAngle) /
+ (props->OuterAngle-props->InnerAngle);
+ ConeVolume = lerp(1.0f, props->OuterGain, scale);
+ ConeHF = lerp(1.0f, props->OuterGainHF, scale);
+ }
+ else
+ {
+ ConeVolume = props->OuterGain;
+ ConeHF = props->OuterGainHF;
+ }
+
+ DryGain *= ConeVolume;
+ if(props->DryGainHFAuto)
+ DryGainHF *= ConeHF;
+ if(props->WetGainAuto)
+ std::transform(std::begin(WetGain), std::begin(WetGain)+NumSends, std::begin(WetGain),
+ [ConeVolume](ALfloat gain) noexcept -> ALfloat { return gain * ConeVolume; }
+ );
+ if(props->WetGainHFAuto)
+ std::transform(std::begin(WetGainHF), std::begin(WetGainHF)+NumSends,
+ std::begin(WetGainHF),
+ [ConeHF](ALfloat gain) noexcept -> ALfloat { return gain * ConeHF; }
+ );
+ }
+
+ /* Apply gain and frequency filters */
+ DryGain = clampf(DryGain, props->MinGain, props->MaxGain);
+ DryGain = minf(DryGain*props->Direct.Gain*Listener.Params.Gain, GAIN_MIX_MAX);
+ DryGainHF *= props->Direct.GainHF;
+ DryGainLF *= props->Direct.GainLF;
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ WetGain[i] = clampf(WetGain[i], props->MinGain, props->MaxGain);
+ WetGain[i] = minf(WetGain[i]*props->Send[i].Gain*Listener.Params.Gain, GAIN_MIX_MAX);
+ WetGainHF[i] *= props->Send[i].GainHF;
+ WetGainLF[i] *= props->Send[i].GainLF;
+ }
+
+ /* Distance-based air absorption and initial send decay. */
+ if(ClampedDist > props->RefDistance && props->RolloffFactor > 0.0f)
+ {
+ ALfloat meters_base{(ClampedDist-props->RefDistance) * props->RolloffFactor *
+ Listener.Params.MetersPerUnit};
+ if(props->AirAbsorptionFactor > 0.0f)
+ {
+ ALfloat hfattn{std::pow(AIRABSORBGAINHF, meters_base * props->AirAbsorptionFactor)};
+ DryGainHF *= hfattn;
+ std::transform(std::begin(WetGainHF), std::begin(WetGainHF)+NumSends,
+ std::begin(WetGainHF),
+ [hfattn](ALfloat gain) noexcept -> ALfloat { return gain * hfattn; }
+ );
+ }
+
+ if(props->WetGainAuto)
+ {
+ /* Apply a decay-time transformation to the wet path, based on the
+ * source distance in meters. The initial decay of the reverb
+ * effect is calculated and applied to the wet path.
+ */
+ for(ALsizei i{0};i < NumSends;i++)
+ {
+ if(!(DecayDistance[i] > 0.0f))
+ continue;
+
+ const ALfloat gain{std::pow(REVERB_DECAY_GAIN, meters_base/DecayDistance[i])};
+ WetGain[i] *= gain;
+ /* Yes, the wet path's air absorption is applied with
+ * WetGainAuto on, rather than WetGainHFAuto.
+ */
+ if(gain > 0.0f)
+ {
+ ALfloat gainhf{std::pow(REVERB_DECAY_GAIN, meters_base/DecayHFDistance[i])};
+ WetGainHF[i] *= minf(gainhf / gain, 1.0f);
+ ALfloat gainlf{std::pow(REVERB_DECAY_GAIN, meters_base/DecayLFDistance[i])};
+ WetGainLF[i] *= minf(gainlf / gain, 1.0f);
+ }
+ }
+ }
+ }
+
+
+ /* Initial source pitch */
+ ALfloat Pitch{props->Pitch};
+
+ /* Calculate velocity-based doppler effect */
+ ALfloat DopplerFactor{props->DopplerFactor * Listener.Params.DopplerFactor};
+ if(DopplerFactor > 0.0f)
+ {
+ const alu::Vector &lvelocity = Listener.Params.Velocity;
+ ALfloat vss{aluDotproduct(Velocity, ToSource) * -DopplerFactor};
+ ALfloat vls{aluDotproduct(lvelocity, ToSource) * -DopplerFactor};
+
+ const ALfloat SpeedOfSound{Listener.Params.SpeedOfSound};
+ if(!(vls < SpeedOfSound))
+ {
+ /* Listener moving away from the source at the speed of sound.
+ * Sound waves can't catch it.
+ */
+ Pitch = 0.0f;
+ }
+ else if(!(vss < SpeedOfSound))
+ {
+ /* Source moving toward the listener at the speed of sound. Sound
+ * waves bunch up to extreme frequencies.
+ */
+ Pitch = std::numeric_limits<float>::infinity();
+ }
+ else
+ {
+ /* Source and listener movement is nominal. Calculate the proper
+ * doppler shift.
+ */
+ Pitch *= (SpeedOfSound-vls) / (SpeedOfSound-vss);
+ }
+ }
+
+ /* Adjust pitch based on the buffer and output frequencies, and calculate
+ * fixed-point stepping value.
+ */
+ Pitch *= static_cast<ALfloat>(voice->mFrequency)/static_cast<ALfloat>(Device->Frequency);
+ if(Pitch > static_cast<ALfloat>(MAX_PITCH))
+ voice->mStep = MAX_PITCH<<FRACTIONBITS;
+ else
+ voice->mStep = maxi(fastf2i(Pitch * FRACTIONONE), 1);
+ if(props->mResampler == BSinc24Resampler)
+ BsincPrepare(voice->mStep, &voice->mResampleState.bsinc, &bsinc24);
+ else if(props->mResampler == BSinc12Resampler)
+ BsincPrepare(voice->mStep, &voice->mResampleState.bsinc, &bsinc12);
+ voice->mResampler = SelectResampler(props->mResampler);
+
+ ALfloat spread{0.0f};
+ if(props->Radius > Distance)
+ spread = al::MathDefs<float>::Tau() - Distance/props->Radius*al::MathDefs<float>::Pi();
+ else if(Distance > 0.0f)
+ spread = std::asin(props->Radius/Distance) * 2.0f;
+
+ CalcPanningAndFilters(voice, ToSource[0], ToSource[1], ToSource[2]*ZScale,
+ Distance*Listener.Params.MetersPerUnit, spread, DryGain, DryGainHF, DryGainLF, WetGain,
+ WetGainLF, WetGainHF, SendSlots, props, Listener, Device);
+}
+
+void CalcSourceParams(ALvoice *voice, ALCcontext *context, bool force)
+{
+ ALvoiceProps *props{voice->mUpdate.exchange(nullptr, std::memory_order_acq_rel)};
+ if(!props && !force) return;
+
+ if(props)
+ {
+ voice->mProps = *props;
+
+ AtomicReplaceHead(context->FreeVoiceProps, props);
+ }
+
+ if((voice->mProps.mSpatializeMode == SpatializeAuto && voice->mFmtChannels == FmtMono) ||
+ voice->mProps.mSpatializeMode == SpatializeOn)
+ CalcAttnSourceParams(voice, &voice->mProps, context);
+ else
+ CalcNonAttnSourceParams(voice, &voice->mProps, context);
+}
+
+
+void ProcessParamUpdates(ALCcontext *ctx, const ALeffectslotArray *slots)
+{
+ IncrementRef(&ctx->UpdateCount);
+ if(LIKELY(!ctx->HoldUpdates.load(std::memory_order_acquire)))
+ {
+ bool cforce{CalcContextParams(ctx)};
+ bool force{CalcListenerParams(ctx) || cforce};
+ force = std::accumulate(slots->begin(), slots->end(), force,
+ [ctx,cforce](bool force, ALeffectslot *slot) -> bool
+ { return CalcEffectSlotParams(slot, ctx, cforce) | force; }
+ );
+
+ std::for_each(ctx->Voices->begin(),
+ ctx->Voices->begin() + ctx->VoiceCount.load(std::memory_order_acquire),
+ [ctx,force](ALvoice &voice) -> void
+ {
+ ALuint sid{voice.mSourceID.load(std::memory_order_acquire)};
+ if(sid) CalcSourceParams(&voice, ctx, force);
+ }
+ );
+ }
+ IncrementRef(&ctx->UpdateCount);
+}
+
+void ProcessContext(ALCcontext *ctx, const ALsizei SamplesToDo)
+{
+ ASSUME(SamplesToDo > 0);
+
+ const ALeffectslotArray *auxslots{ctx->ActiveAuxSlots.load(std::memory_order_acquire)};
+
+ /* Process pending propery updates for objects on the context. */
+ ProcessParamUpdates(ctx, auxslots);
+
+ /* Clear auxiliary effect slot mixing buffers. */
+ std::for_each(auxslots->begin(), auxslots->end(),
+ [SamplesToDo](ALeffectslot *slot) -> void
+ {
+ for(auto &buffer : slot->MixBuffer)
+ std::fill_n(buffer.begin(), SamplesToDo, 0.0f);
+ }
+ );
+
+ /* Process voices that have a playing source. */
+ std::for_each(ctx->Voices->begin(),
+ ctx->Voices->begin() + ctx->VoiceCount.load(std::memory_order_acquire),
+ [SamplesToDo,ctx](ALvoice &voice) -> void
+ {
+ const ALvoice::State vstate{voice.mPlayState.load(std::memory_order_acquire)};
+ if(vstate == ALvoice::Stopped) return;
+ const ALuint sid{voice.mSourceID.load(std::memory_order_relaxed)};
+ if(voice.mStep < 1) return;
+
+ MixVoice(&voice, vstate, sid, ctx, SamplesToDo);
+ }
+ );
+
+ /* Process effects. */
+ if(auxslots->size() < 1) return;
+ auto slots = auxslots->data();
+ auto slots_end = slots + auxslots->size();
+
+ /* First sort the slots into scratch storage, so that effects come before
+ * their effect target (or their targets' target).
+ */
+ auto sorted_slots = const_cast<ALeffectslot**>(slots_end);
+ auto sorted_slots_end = sorted_slots;
+ auto in_chain = [](const ALeffectslot *slot1, const ALeffectslot *slot2) noexcept -> bool
+ {
+ while((slot1=slot1->Params.Target) != nullptr) {
+ if(slot1 == slot2) return true;
+ }
+ return false;
+ };
+
+ *sorted_slots_end = *slots;
+ ++sorted_slots_end;
+ while(++slots != slots_end)
+ {
+ /* If this effect slot targets an effect slot already in the list (i.e.
+ * slots outputs to something in sorted_slots), directly or indirectly,
+ * insert it prior to that element.
+ */
+ auto checker = sorted_slots;
+ do {
+ if(in_chain(*slots, *checker)) break;
+ } while(++checker != sorted_slots_end);
+
+ checker = std::move_backward(checker, sorted_slots_end, sorted_slots_end+1);
+ *--checker = *slots;
+ ++sorted_slots_end;
+ }
+
+ std::for_each(sorted_slots, sorted_slots_end,
+ [SamplesToDo](const ALeffectslot *slot) -> void
+ {
+ EffectState *state{slot->Params.mEffectState};
+ state->process(SamplesToDo, slot->Wet.Buffer.data(),
+ static_cast<ALsizei>(slot->Wet.Buffer.size()), state->mOutTarget);
+ }
+ );
+}
+
+
+void ApplyStablizer(FrontStablizer *Stablizer, const al::span<FloatBufferLine> Buffer,
+ const ALuint lidx, const ALuint ridx, const ALuint cidx, const ALsizei SamplesToDo)
+{
+ ASSUME(SamplesToDo > 0);
+
+ /* Apply a delay to all channels, except the front-left and front-right, so
+ * they maintain correct timing.
+ */
+ const size_t NumChannels{Buffer.size()};
+ for(size_t i{0u};i < NumChannels;i++)
+ {
+ if(i == lidx || i == ridx)
+ continue;
+
+ auto &DelayBuf = Stablizer->DelayBuf[i];
+ auto buffer_end = Buffer[i].begin() + SamplesToDo;
+ if(LIKELY(SamplesToDo >= ALsizei{FrontStablizer::DelayLength}))
+ {
+ auto delay_end = std::rotate(Buffer[i].begin(),
+ buffer_end - FrontStablizer::DelayLength, buffer_end);
+ std::swap_ranges(Buffer[i].begin(), delay_end, std::begin(DelayBuf));
+ }
+ else
+ {
+ auto delay_start = std::swap_ranges(Buffer[i].begin(), buffer_end,
+ std::begin(DelayBuf));
+ std::rotate(std::begin(DelayBuf), delay_start, std::end(DelayBuf));
+ }
+ }
+
+ ALfloat (&lsplit)[2][BUFFERSIZE] = Stablizer->LSplit;
+ ALfloat (&rsplit)[2][BUFFERSIZE] = Stablizer->RSplit;
+ auto &tmpbuf = Stablizer->TempBuf;
+
+ /* This applies the band-splitter, preserving phase at the cost of some
+ * delay. The shorter the delay, the more error seeps into the result.
+ */
+ auto apply_splitter = [&tmpbuf,SamplesToDo](const FloatBufferLine &Buffer,
+ ALfloat (&DelayBuf)[FrontStablizer::DelayLength], BandSplitter &Filter,
+ ALfloat (&splitbuf)[2][BUFFERSIZE]) -> void
+ {
+ /* Combine the delayed samples and the input samples into the temp
+ * buffer, in reverse. Then copy the final samples back into the delay
+ * buffer for next time. Note that the delay buffer's samples are
+ * stored backwards here.
+ */
+ auto tmpbuf_end = std::begin(tmpbuf) + SamplesToDo;
+ std::copy_n(std::begin(DelayBuf), FrontStablizer::DelayLength, tmpbuf_end);
+ std::reverse_copy(Buffer.begin(), Buffer.begin()+SamplesToDo, std::begin(tmpbuf));
+ std::copy_n(std::begin(tmpbuf), FrontStablizer::DelayLength, std::begin(DelayBuf));
+
+ /* Apply an all-pass on the reversed signal, then reverse the samples
+ * to get the forward signal with a reversed phase shift.
+ */
+ Filter.applyAllpass(tmpbuf, SamplesToDo+FrontStablizer::DelayLength);
+ std::reverse(std::begin(tmpbuf), tmpbuf_end+FrontStablizer::DelayLength);
+
+ /* Now apply the band-splitter, combining its phase shift with the
+ * reversed phase shift, restoring the original phase on the split
+ * signal.
+ */
+ Filter.process(splitbuf[1], splitbuf[0], tmpbuf, SamplesToDo);
+ };
+ apply_splitter(Buffer[lidx], Stablizer->DelayBuf[lidx], Stablizer->LFilter, lsplit);
+ apply_splitter(Buffer[ridx], Stablizer->DelayBuf[ridx], Stablizer->RFilter, rsplit);
+
+ for(ALsizei i{0};i < SamplesToDo;i++)
+ {
+ ALfloat lfsum{lsplit[0][i] + rsplit[0][i]};
+ ALfloat hfsum{lsplit[1][i] + rsplit[1][i]};
+ ALfloat s{lsplit[0][i] + lsplit[1][i] - rsplit[0][i] - rsplit[1][i]};
+
+ /* This pans the separate low- and high-frequency sums between being on
+ * the center channel and the left/right channels. The low-frequency
+ * sum is 1/3rd toward center (2/3rds on left/right) and the high-
+ * frequency sum is 1/4th toward center (3/4ths on left/right). These
+ * values can be tweaked.
+ */
+ ALfloat m{lfsum*std::cos(1.0f/3.0f * (al::MathDefs<float>::Pi()*0.5f)) +
+ hfsum*std::cos(1.0f/4.0f * (al::MathDefs<float>::Pi()*0.5f))};
+ ALfloat c{lfsum*std::sin(1.0f/3.0f * (al::MathDefs<float>::Pi()*0.5f)) +
+ hfsum*std::sin(1.0f/4.0f * (al::MathDefs<float>::Pi()*0.5f))};
+
+ /* The generated center channel signal adds to the existing signal,
+ * while the modified left and right channels replace.
+ */
+ Buffer[lidx][i] = (m + s) * 0.5f;
+ Buffer[ridx][i] = (m - s) * 0.5f;
+ Buffer[cidx][i] += c * 0.5f;
+ }
+}
+
+void ApplyDistanceComp(const al::span<FloatBufferLine> Samples, const ALsizei SamplesToDo,
+ const DistanceComp::DistData *distcomp)
+{
+ ASSUME(SamplesToDo > 0);
+
+ for(auto &chanbuffer : Samples)
+ {
+ const ALfloat gain{distcomp->Gain};
+ const ALsizei base{distcomp->Length};
+ ALfloat *distbuf{al::assume_aligned<16>(distcomp->Buffer)};
+ ++distcomp;
+
+ if(base < 1)
+ continue;
+
+ ALfloat *inout{al::assume_aligned<16>(chanbuffer.data())};
+ auto inout_end = inout + SamplesToDo;
+ if(LIKELY(SamplesToDo >= base))
+ {
+ auto delay_end = std::rotate(inout, inout_end - base, inout_end);
+ std::swap_ranges(inout, delay_end, distbuf);
+ }
+ else
+ {
+ auto delay_start = std::swap_ranges(inout, inout_end, distbuf);
+ std::rotate(distbuf, delay_start, distbuf + base);
+ }
+ std::transform(inout, inout_end, inout, std::bind(std::multiplies<float>{}, _1, gain));
+ }
+}
+
+void ApplyDither(const al::span<FloatBufferLine> Samples, ALuint *dither_seed,
+ const ALfloat quant_scale, const ALsizei SamplesToDo)
+{
+ /* Dithering. Generate whitenoise (uniform distribution of random values
+ * between -1 and +1) and add it to the sample values, after scaling up to
+ * the desired quantization depth amd before rounding.
+ */
+ const ALfloat invscale{1.0f / quant_scale};
+ ALuint seed{*dither_seed};
+ auto dither_channel = [&seed,invscale,quant_scale,SamplesToDo](FloatBufferLine &input) -> void
+ {
+ ASSUME(SamplesToDo > 0);
+ auto dither_sample = [&seed,invscale,quant_scale](const ALfloat sample) noexcept -> ALfloat
+ {
+ ALfloat val{sample * quant_scale};
+ ALuint rng0{dither_rng(&seed)};
+ ALuint rng1{dither_rng(&seed)};
+ val += static_cast<ALfloat>(rng0*(1.0/UINT_MAX) - rng1*(1.0/UINT_MAX));
+ return fast_roundf(val) * invscale;
+ };
+ std::transform(input.begin(), input.begin()+SamplesToDo, input.begin(), dither_sample);
+ };
+ std::for_each(Samples.begin(), Samples.end(), dither_channel);
+ *dither_seed = seed;
+}
+
+
+/* Base template left undefined. Should be marked =delete, but Clang 3.8.1
+ * chokes on that given the inline specializations.
+ */
+template<typename T>
+inline T SampleConv(ALfloat) noexcept;
+
+template<> inline ALfloat SampleConv(ALfloat val) noexcept
+{ return val; }
+template<> inline ALint SampleConv(ALfloat val) noexcept
+{
+ /* Floats have a 23-bit mantissa, plus an implied 1 bit and a sign bit.
+ * This means a normalized float has at most 25 bits of signed precision.
+ * When scaling and clamping for a signed 32-bit integer, these following
+ * values are the best a float can give.
+ */
+ return fastf2i(clampf(val*2147483648.0f, -2147483648.0f, 2147483520.0f));
+}
+template<> inline ALshort SampleConv(ALfloat val) noexcept
+{ return fastf2i(clampf(val*32768.0f, -32768.0f, 32767.0f)); }
+template<> inline ALbyte SampleConv(ALfloat val) noexcept
+{ return fastf2i(clampf(val*128.0f, -128.0f, 127.0f)); }
+
+/* Define unsigned output variations. */
+template<> inline ALuint SampleConv(ALfloat val) noexcept
+{ return SampleConv<ALint>(val) + 2147483648u; }
+template<> inline ALushort SampleConv(ALfloat val) noexcept
+{ return SampleConv<ALshort>(val) + 32768; }
+template<> inline ALubyte SampleConv(ALfloat val) noexcept
+{ return SampleConv<ALbyte>(val) + 128; }
+
+template<DevFmtType T>
+void Write(const al::span<const FloatBufferLine> InBuffer, ALvoid *OutBuffer, const size_t Offset,
+ const ALsizei SamplesToDo)
+{
+ using SampleType = typename DevFmtTypeTraits<T>::Type;
+
+ const size_t numchans{InBuffer.size()};
+ ASSUME(numchans > 0);
+
+ SampleType *outbase = static_cast<SampleType*>(OutBuffer) + Offset*numchans;
+ auto conv_channel = [&outbase,SamplesToDo,numchans](const FloatBufferLine &inbuf) -> void
+ {
+ ASSUME(SamplesToDo > 0);
+ SampleType *out{outbase++};
+ auto conv_sample = [numchans,&out](const ALfloat s) noexcept -> void
+ {
+ *out = SampleConv<SampleType>(s);
+ out += numchans;
+ };
+ std::for_each(inbuf.begin(), inbuf.begin()+SamplesToDo, conv_sample);
+ };
+ std::for_each(InBuffer.cbegin(), InBuffer.cend(), conv_channel);
+}
+
+} // namespace
+
+void aluMixData(ALCdevice *device, ALvoid *OutBuffer, ALsizei NumSamples)
+{
+ FPUCtl mixer_mode{};
+ for(ALsizei SamplesDone{0};SamplesDone < NumSamples;)
+ {
+ const ALsizei SamplesToDo{mini(NumSamples-SamplesDone, BUFFERSIZE)};
+
+ /* Clear main mixing buffers. */
+ std::for_each(device->MixBuffer.begin(), device->MixBuffer.end(),
+ [SamplesToDo](std::array<ALfloat,BUFFERSIZE> &buffer) -> void
+ { std::fill_n(buffer.begin(), SamplesToDo, 0.0f); }
+ );
+
+ /* Increment the mix count at the start (lsb should now be 1). */
+ IncrementRef(&device->MixCount);
+
+ /* For each context on this device, process and mix its sources and
+ * effects.
+ */
+ for(ALCcontext *ctx : *device->mContexts.load(std::memory_order_acquire))
+ ProcessContext(ctx, SamplesToDo);
+
+ /* Increment the clock time. Every second's worth of samples is
+ * converted and added to clock base so that large sample counts don't
+ * overflow during conversion. This also guarantees a stable
+ * conversion.
+ */
+ device->SamplesDone += SamplesToDo;
+ device->ClockBase += std::chrono::seconds{device->SamplesDone / device->Frequency};
+ device->SamplesDone %= device->Frequency;
+
+ /* Increment the mix count at the end (lsb should now be 0). */
+ IncrementRef(&device->MixCount);
+
+ /* Apply any needed post-process for finalizing the Dry mix to the
+ * RealOut (Ambisonic decode, UHJ encode, etc).
+ */
+ if(LIKELY(device->PostProcess))
+ device->PostProcess(device, SamplesToDo);
+ const al::span<FloatBufferLine> RealOut{device->RealOut.Buffer};
+
+ /* Apply front image stablization for surround sound, if applicable. */
+ if(device->Stablizer)
+ {
+ const int lidx{GetChannelIdxByName(device->RealOut, FrontLeft)};
+ const int ridx{GetChannelIdxByName(device->RealOut, FrontRight)};
+ const int cidx{GetChannelIdxByName(device->RealOut, FrontCenter)};
+ assert(lidx >= 0 && ridx >= 0 && cidx >= 0);
+
+ ApplyStablizer(device->Stablizer.get(), RealOut, lidx, ridx, cidx, SamplesToDo);
+ }
+
+ /* Apply compression, limiting sample amplitude if needed or desired. */
+ if(Compressor *comp{device->Limiter.get()})
+ comp->process(SamplesToDo, RealOut.data());
+
+ /* Apply delays and attenuation for mismatched speaker distances. */
+ ApplyDistanceComp(RealOut, SamplesToDo, device->ChannelDelay.as_span().cbegin());
+
+ /* Apply dithering. The compressor should have left enough headroom for
+ * the dither noise to not saturate.
+ */
+ if(device->DitherDepth > 0.0f)
+ ApplyDither(RealOut, &device->DitherSeed, device->DitherDepth, SamplesToDo);
+
+ if(LIKELY(OutBuffer))
+ {
+ /* Finally, interleave and convert samples, writing to the device's
+ * output buffer.
+ */
+ switch(device->FmtType)
+ {
+#define HANDLE_WRITE(T) case T: \
+ Write<T>(RealOut, OutBuffer, SamplesDone, SamplesToDo); break;
+ HANDLE_WRITE(DevFmtByte)
+ HANDLE_WRITE(DevFmtUByte)
+ HANDLE_WRITE(DevFmtShort)
+ HANDLE_WRITE(DevFmtUShort)
+ HANDLE_WRITE(DevFmtInt)
+ HANDLE_WRITE(DevFmtUInt)
+ HANDLE_WRITE(DevFmtFloat)
+#undef HANDLE_WRITE
+ }
+ }
+
+ SamplesDone += SamplesToDo;
+ }
+}
+
+
+void aluHandleDisconnect(ALCdevice *device, const char *msg, ...)
+{
+ if(!device->Connected.exchange(false, std::memory_order_acq_rel))
+ return;
+
+ AsyncEvent evt{EventType_Disconnected};
+ evt.u.user.type = AL_EVENT_TYPE_DISCONNECTED_SOFT;
+ evt.u.user.id = 0;
+ evt.u.user.param = 0;
+
+ va_list args;
+ va_start(args, msg);
+ int msglen{vsnprintf(evt.u.user.msg, sizeof(evt.u.user.msg), msg, args)};
+ va_end(args);
+
+ if(msglen < 0 || static_cast<size_t>(msglen) >= sizeof(evt.u.user.msg))
+ evt.u.user.msg[sizeof(evt.u.user.msg)-1] = 0;
+
+ for(ALCcontext *ctx : *device->mContexts.load())
+ {
+ const ALbitfieldSOFT enabledevt{ctx->EnabledEvts.load(std::memory_order_acquire)};
+ if((enabledevt&EventType_Disconnected))
+ {
+ RingBuffer *ring{ctx->AsyncEvents.get()};
+ auto evt_data = ring->getWriteVector().first;
+ if(evt_data.len > 0)
+ {
+ new (evt_data.buf) AsyncEvent{evt};
+ ring->writeAdvance(1);
+ ctx->EventSem.post();
+ }
+ }
+
+ auto stop_voice = [](ALvoice &voice) -> void
+ {
+ voice.mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
+ voice.mLoopBuffer.store(nullptr, std::memory_order_relaxed);
+ voice.mSourceID.store(0u, std::memory_order_relaxed);
+ voice.mPlayState.store(ALvoice::Stopped, std::memory_order_release);
+ };
+ std::for_each(ctx->Voices->begin(),
+ ctx->Voices->begin() + ctx->VoiceCount.load(std::memory_order_acquire),
+ stop_voice);
+ }
+}
diff --git a/alc/alu.h b/alc/alu.h
new file mode 100644
index 00000000..9acf904a
--- /dev/null
+++ b/alc/alu.h
@@ -0,0 +1,466 @@
+#ifndef _ALU_H_
+#define _ALU_H_
+
+#include <array>
+#include <atomic>
+#include <cmath>
+#include <cstddef>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/alext.h"
+
+#include "alBuffer.h"
+#include "alcmain.h"
+#include "almalloc.h"
+#include "alspan.h"
+#include "ambidefs.h"
+#include "filters/biquad.h"
+#include "filters/nfc.h"
+#include "filters/splitter.h"
+#include "hrtf.h"
+#include "logging.h"
+
+struct ALbufferlistitem;
+struct ALeffectslot;
+struct BSincTable;
+
+
+enum class DistanceModel;
+
+#define MAX_PITCH 255
+#define MAX_SENDS 16
+
+
+#define DITHER_RNG_SEED 22222
+
+
+enum SpatializeMode {
+ SpatializeOff = AL_FALSE,
+ SpatializeOn = AL_TRUE,
+ SpatializeAuto = AL_AUTO_SOFT
+};
+
+enum Resampler {
+ PointResampler,
+ LinearResampler,
+ FIR4Resampler,
+ BSinc12Resampler,
+ BSinc24Resampler,
+
+ ResamplerMax = BSinc24Resampler
+};
+extern Resampler ResamplerDefault;
+
+/* The number of distinct scale and phase intervals within the bsinc filter
+ * table.
+ */
+#define BSINC_SCALE_BITS 4
+#define BSINC_SCALE_COUNT (1<<BSINC_SCALE_BITS)
+#define BSINC_PHASE_BITS 4
+#define BSINC_PHASE_COUNT (1<<BSINC_PHASE_BITS)
+
+/* Interpolator state. Kind of a misnomer since the interpolator itself is
+ * stateless. This just keeps it from having to recompute scale-related
+ * mappings for every sample.
+ */
+struct BsincState {
+ ALfloat sf; /* Scale interpolation factor. */
+ ALsizei m; /* Coefficient count. */
+ ALsizei l; /* Left coefficient offset. */
+ /* Filter coefficients, followed by the scale, phase, and scale-phase
+ * delta coefficients. Starting at phase index 0, each subsequent phase
+ * index follows contiguously.
+ */
+ const ALfloat *filter;
+};
+
+union InterpState {
+ BsincState bsinc;
+};
+
+using ResamplerFunc = const ALfloat*(*)(const InterpState *state,
+ const ALfloat *RESTRICT src, ALsizei frac, ALint increment,
+ ALfloat *RESTRICT dst, ALsizei dstlen);
+
+void BsincPrepare(const ALuint increment, BsincState *state, const BSincTable *table);
+
+extern const BSincTable bsinc12;
+extern const BSincTable bsinc24;
+
+
+enum {
+ AF_None = 0,
+ AF_LowPass = 1,
+ AF_HighPass = 2,
+ AF_BandPass = AF_LowPass | AF_HighPass
+};
+
+
+struct MixHrtfFilter {
+ const HrirArray<ALfloat> *Coeffs;
+ ALsizei Delay[2];
+ ALfloat Gain;
+ ALfloat GainStep;
+};
+
+
+struct DirectParams {
+ BiquadFilter LowPass;
+ BiquadFilter HighPass;
+
+ NfcFilter NFCtrlFilter;
+
+ struct {
+ HrtfFilter Old;
+ HrtfFilter Target;
+ HrtfState State;
+ } Hrtf;
+
+ struct {
+ ALfloat Current[MAX_OUTPUT_CHANNELS];
+ ALfloat Target[MAX_OUTPUT_CHANNELS];
+ } Gains;
+};
+
+struct SendParams {
+ BiquadFilter LowPass;
+ BiquadFilter HighPass;
+
+ struct {
+ ALfloat Current[MAX_OUTPUT_CHANNELS];
+ ALfloat Target[MAX_OUTPUT_CHANNELS];
+ } Gains;
+};
+
+
+struct ALvoicePropsBase {
+ ALfloat Pitch;
+ ALfloat Gain;
+ ALfloat OuterGain;
+ ALfloat MinGain;
+ ALfloat MaxGain;
+ ALfloat InnerAngle;
+ ALfloat OuterAngle;
+ ALfloat RefDistance;
+ ALfloat MaxDistance;
+ ALfloat RolloffFactor;
+ std::array<ALfloat,3> Position;
+ std::array<ALfloat,3> Velocity;
+ std::array<ALfloat,3> Direction;
+ std::array<ALfloat,3> OrientAt;
+ std::array<ALfloat,3> OrientUp;
+ ALboolean HeadRelative;
+ DistanceModel mDistanceModel;
+ Resampler mResampler;
+ ALboolean DirectChannels;
+ SpatializeMode mSpatializeMode;
+
+ ALboolean DryGainHFAuto;
+ ALboolean WetGainAuto;
+ ALboolean WetGainHFAuto;
+ ALfloat OuterGainHF;
+
+ ALfloat AirAbsorptionFactor;
+ ALfloat RoomRolloffFactor;
+ ALfloat DopplerFactor;
+
+ std::array<ALfloat,2> StereoPan;
+
+ ALfloat Radius;
+
+ /** Direct filter and auxiliary send info. */
+ struct {
+ ALfloat Gain;
+ ALfloat GainHF;
+ ALfloat HFReference;
+ ALfloat GainLF;
+ ALfloat LFReference;
+ } Direct;
+ struct SendData {
+ ALeffectslot *Slot;
+ ALfloat Gain;
+ ALfloat GainHF;
+ ALfloat HFReference;
+ ALfloat GainLF;
+ ALfloat LFReference;
+ } Send[MAX_SENDS];
+};
+
+struct ALvoiceProps : public ALvoicePropsBase {
+ std::atomic<ALvoiceProps*> next{nullptr};
+
+ DEF_NEWDEL(ALvoiceProps)
+};
+
+#define VOICE_IS_STATIC (1u<<0)
+#define VOICE_IS_FADING (1u<<1) /* Fading sources use gain stepping for smooth transitions. */
+#define VOICE_IS_AMBISONIC (1u<<2) /* Voice needs HF scaling for ambisonic upsampling. */
+#define VOICE_HAS_HRTF (1u<<3)
+#define VOICE_HAS_NFC (1u<<4)
+
+struct ALvoice {
+ enum State {
+ Stopped = 0,
+ Playing = 1,
+ Stopping = 2
+ };
+
+ std::atomic<ALvoiceProps*> mUpdate{nullptr};
+
+ std::atomic<ALuint> mSourceID{0u};
+ std::atomic<State> mPlayState{Stopped};
+
+ ALvoicePropsBase mProps;
+
+ /**
+ * Source offset in samples, relative to the currently playing buffer, NOT
+ * the whole queue.
+ */
+ std::atomic<ALuint> mPosition;
+ /** Fractional (fixed-point) offset to the next sample. */
+ std::atomic<ALsizei> mPositionFrac;
+
+ /* Current buffer queue item being played. */
+ std::atomic<ALbufferlistitem*> mCurrentBuffer;
+
+ /* Buffer queue item to loop to at end of queue (will be NULL for non-
+ * looping voices).
+ */
+ std::atomic<ALbufferlistitem*> mLoopBuffer;
+
+ /* Properties for the attached buffer(s). */
+ FmtChannels mFmtChannels;
+ ALuint mFrequency;
+ ALsizei mNumChannels;
+ ALsizei mSampleSize;
+
+ /** Current target parameters used for mixing. */
+ ALint mStep;
+
+ ResamplerFunc mResampler;
+
+ InterpState mResampleState;
+
+ ALuint mFlags;
+
+ struct DirectData {
+ int FilterType;
+ al::span<FloatBufferLine> Buffer;
+ };
+ DirectData mDirect;
+
+ struct SendData {
+ int FilterType;
+ al::span<FloatBufferLine> Buffer;
+ };
+ std::array<SendData,MAX_SENDS> mSend;
+
+ struct ChannelData {
+ alignas(16) std::array<ALfloat,MAX_RESAMPLE_PADDING*2> mPrevSamples;
+
+ ALfloat mAmbiScale;
+ BandSplitter mAmbiSplitter;
+
+ DirectParams mDryParams;
+ std::array<SendParams,MAX_SENDS> mWetParams;
+ };
+ std::array<ChannelData,MAX_INPUT_CHANNELS> mChans;
+
+ ALvoice() = default;
+ ALvoice(const ALvoice&) = delete;
+ ~ALvoice() { delete mUpdate.exchange(nullptr, std::memory_order_acq_rel); }
+ ALvoice& operator=(const ALvoice&) = delete;
+ ALvoice& operator=(ALvoice&& rhs) noexcept
+ {
+ ALvoiceProps *old_update{mUpdate.load(std::memory_order_relaxed)};
+ mUpdate.store(rhs.mUpdate.exchange(old_update, std::memory_order_relaxed),
+ std::memory_order_relaxed);
+
+ mSourceID.store(rhs.mSourceID.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ mPlayState.store(rhs.mPlayState.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+
+ mProps = rhs.mProps;
+
+ mPosition.store(rhs.mPosition.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ mPositionFrac.store(rhs.mPositionFrac.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+
+ mCurrentBuffer.store(rhs.mCurrentBuffer.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+ mLoopBuffer.store(rhs.mLoopBuffer.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+
+ mFmtChannels = rhs.mFmtChannels;
+ mFrequency = rhs.mFrequency;
+ mNumChannels = rhs.mNumChannels;
+ mSampleSize = rhs.mSampleSize;
+
+ mStep = rhs.mStep;
+ mResampler = rhs.mResampler;
+
+ mResampleState = rhs.mResampleState;
+
+ mFlags = rhs.mFlags;
+
+ mDirect = rhs.mDirect;
+ mSend = rhs.mSend;
+ mChans = rhs.mChans;
+
+ return *this;
+ }
+};
+
+
+using MixerFunc = void(*)(const ALfloat *data, const al::span<FloatBufferLine> OutBuffer,
+ ALfloat *CurrentGains, const ALfloat *TargetGains, const ALsizei Counter, const ALsizei OutPos,
+ const ALsizei BufferSize);
+using RowMixerFunc = void(*)(FloatBufferLine &OutBuffer, const ALfloat *gains,
+ const al::span<const FloatBufferLine> InSamples, const ALsizei InPos,
+ const ALsizei BufferSize);
+using HrtfMixerFunc = void(*)(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize,
+ MixHrtfFilter *hrtfparams, const ALsizei BufferSize);
+using HrtfMixerBlendFunc = void(*)(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize,
+ const HrtfFilter *oldparams, MixHrtfFilter *newparams, const ALsizei BufferSize);
+using HrtfDirectMixerFunc = void(*)(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const al::span<const FloatBufferLine> InSamples, float2 *AccumSamples, DirectHrtfState *State,
+ const ALsizei BufferSize);
+
+
+#define GAIN_MIX_MAX (1000.0f) /* +60dB */
+
+#define GAIN_SILENCE_THRESHOLD (0.00001f) /* -100dB */
+
+#define SPEEDOFSOUNDMETRESPERSEC (343.3f)
+#define AIRABSORBGAINHF (0.99426f) /* -0.05dB */
+
+/* Target gain for the reverb decay feedback reaching the decay time. */
+#define REVERB_DECAY_GAIN (0.001f) /* -60 dB */
+
+#define FRACTIONBITS (12)
+#define FRACTIONONE (1<<FRACTIONBITS)
+#define FRACTIONMASK (FRACTIONONE-1)
+
+
+inline ALfloat lerp(ALfloat val1, ALfloat val2, ALfloat mu) noexcept
+{ return val1 + (val2-val1)*mu; }
+inline ALfloat cubic(ALfloat val1, ALfloat val2, ALfloat val3, ALfloat val4, ALfloat mu) noexcept
+{
+ ALfloat mu2 = mu*mu, mu3 = mu2*mu;
+ ALfloat a0 = -0.5f*mu3 + mu2 + -0.5f*mu;
+ ALfloat a1 = 1.5f*mu3 + -2.5f*mu2 + 1.0f;
+ ALfloat a2 = -1.5f*mu3 + 2.0f*mu2 + 0.5f*mu;
+ ALfloat a3 = 0.5f*mu3 + -0.5f*mu2;
+ return val1*a0 + val2*a1 + val3*a2 + val4*a3;
+}
+
+
+enum HrtfRequestMode {
+ Hrtf_Default = 0,
+ Hrtf_Enable = 1,
+ Hrtf_Disable = 2,
+};
+
+void aluInit(void);
+
+void aluInitMixer(void);
+
+ResamplerFunc SelectResampler(Resampler resampler);
+
+/* aluInitRenderer
+ *
+ * Set up the appropriate panning method and mixing method given the device
+ * properties.
+ */
+void aluInitRenderer(ALCdevice *device, ALint hrtf_id, HrtfRequestMode hrtf_appreq, HrtfRequestMode hrtf_userreq);
+
+void aluInitEffectPanning(ALeffectslot *slot, ALCdevice *device);
+
+void ProcessHrtf(ALCdevice *device, const ALsizei SamplesToDo);
+void ProcessAmbiDec(ALCdevice *device, const ALsizei SamplesToDo);
+void ProcessUhj(ALCdevice *device, const ALsizei SamplesToDo);
+void ProcessBs2b(ALCdevice *device, const ALsizei SamplesToDo);
+
+/**
+ * Calculates ambisonic encoder coefficients using the X, Y, and Z direction
+ * components, which must represent a normalized (unit length) vector, and the
+ * spread is the angular width of the sound (0...tau).
+ *
+ * NOTE: The components use ambisonic coordinates. As a result:
+ *
+ * Ambisonic Y = OpenAL -X
+ * Ambisonic Z = OpenAL Y
+ * Ambisonic X = OpenAL -Z
+ *
+ * The components are ordered such that OpenAL's X, Y, and Z are the first,
+ * second, and third parameters respectively -- simply negate X and Z.
+ */
+void CalcAmbiCoeffs(const ALfloat y, const ALfloat z, const ALfloat x, const ALfloat spread,
+ ALfloat (&coeffs)[MAX_AMBI_CHANNELS]);
+
+/**
+ * CalcDirectionCoeffs
+ *
+ * Calculates ambisonic coefficients based on an OpenAL direction vector. The
+ * vector must be normalized (unit length), and the spread is the angular width
+ * of the sound (0...tau).
+ */
+inline void CalcDirectionCoeffs(const ALfloat (&dir)[3], ALfloat spread, ALfloat (&coeffs)[MAX_AMBI_CHANNELS])
+{
+ /* Convert from OpenAL coords to Ambisonics. */
+ CalcAmbiCoeffs(-dir[0], dir[1], -dir[2], spread, coeffs);
+}
+
+/**
+ * CalcAngleCoeffs
+ *
+ * Calculates ambisonic coefficients based on azimuth and elevation. The
+ * azimuth and elevation parameters are in radians, going right and up
+ * respectively.
+ */
+inline void CalcAngleCoeffs(ALfloat azimuth, ALfloat elevation, ALfloat spread, ALfloat (&coeffs)[MAX_AMBI_CHANNELS])
+{
+ ALfloat x = -std::sin(azimuth) * std::cos(elevation);
+ ALfloat y = std::sin(elevation);
+ ALfloat z = std::cos(azimuth) * std::cos(elevation);
+
+ CalcAmbiCoeffs(x, y, z, spread, coeffs);
+}
+
+
+/**
+ * ComputePanGains
+ *
+ * Computes panning gains using the given channel decoder coefficients and the
+ * pre-calculated direction or angle coefficients. For B-Format sources, the
+ * coeffs are a 'slice' of a transform matrix for the input channel, used to
+ * scale and orient the sound samples.
+ */
+void ComputePanGains(const MixParams *mix, const ALfloat*RESTRICT coeffs, ALfloat ingain, ALfloat (&gains)[MAX_OUTPUT_CHANNELS]);
+
+
+inline std::array<ALfloat,MAX_AMBI_CHANNELS> GetAmbiIdentityRow(size_t i) noexcept
+{
+ std::array<ALfloat,MAX_AMBI_CHANNELS> ret{};
+ ret[i] = 1.0f;
+ return ret;
+}
+
+
+void MixVoice(ALvoice *voice, ALvoice::State vstate, const ALuint SourceID, ALCcontext *Context, const ALsizei SamplesToDo);
+
+void aluMixData(ALCdevice *device, ALvoid *OutBuffer, ALsizei NumSamples);
+/* Caller must lock the device state, and the mixer must not be running. */
+void aluHandleDisconnect(ALCdevice *device, const char *msg, ...) DECL_FORMAT(printf, 2, 3);
+
+extern MixerFunc MixSamples;
+extern RowMixerFunc MixRowSamples;
+
+extern const ALfloat ConeScale;
+extern const ALfloat ZScale;
+extern const ALboolean OverrideReverbSpeedOfSound;
+
+#endif
diff --git a/alc/ambdec.cpp b/alc/ambdec.cpp
new file mode 100644
index 00000000..0991cfc5
--- /dev/null
+++ b/alc/ambdec.cpp
@@ -0,0 +1,436 @@
+
+#include "config.h"
+
+#include "ambdec.h"
+
+#include <cctype>
+#include <cstring>
+#include <algorithm>
+
+#include <limits>
+#include <string>
+#include <fstream>
+#include <sstream>
+
+#include "logging.h"
+#include "compat.h"
+
+
+namespace {
+
+template<typename T, std::size_t N>
+constexpr inline std::size_t size(const T(&)[N]) noexcept
+{ return N; }
+
+int readline(std::istream &f, std::string &output)
+{
+ while(f.good() && f.peek() == '\n')
+ f.ignore();
+
+ return std::getline(f, output) && !output.empty();
+}
+
+bool read_clipped_line(std::istream &f, std::string &buffer)
+{
+ while(readline(f, buffer))
+ {
+ std::size_t pos{0};
+ while(pos < buffer.length() && std::isspace(buffer[pos]))
+ pos++;
+ buffer.erase(0, pos);
+
+ std::size_t cmtpos{buffer.find_first_of('#')};
+ if(cmtpos < buffer.length())
+ buffer.resize(cmtpos);
+ while(!buffer.empty() && std::isspace(buffer.back()))
+ buffer.pop_back();
+
+ if(!buffer.empty())
+ return true;
+ }
+ return false;
+}
+
+
+std::string read_word(std::istream &f)
+{
+ std::string ret;
+ f >> ret;
+ return ret;
+}
+
+bool is_at_end(const std::string &buffer, std::size_t endpos)
+{
+ while(endpos < buffer.length() && std::isspace(buffer[endpos]))
+ ++endpos;
+ return !(endpos < buffer.length());
+}
+
+
+bool load_ambdec_speakers(al::vector<AmbDecConf::SpeakerConf> &spkrs, const std::size_t num_speakers, std::istream &f, std::string &buffer)
+{
+ while(spkrs.size() < num_speakers)
+ {
+ std::istringstream istr{buffer};
+
+ std::string cmd{read_word(istr)};
+ if(cmd.empty())
+ {
+ if(!read_clipped_line(f, buffer))
+ {
+ ERR("Unexpected end of file\n");
+ return false;
+ }
+ continue;
+ }
+
+ if(cmd == "add_spkr")
+ {
+ spkrs.emplace_back();
+ AmbDecConf::SpeakerConf &spkr = spkrs.back();
+ const size_t spkr_num{spkrs.size()};
+
+ istr >> spkr.Name;
+ if(istr.fail()) WARN("Name not specified for speaker %zu\n", spkr_num);
+ istr >> spkr.Distance;
+ if(istr.fail()) WARN("Distance not specified for speaker %zu\n", spkr_num);
+ istr >> spkr.Azimuth;
+ if(istr.fail()) WARN("Azimuth not specified for speaker %zu\n", spkr_num);
+ istr >> spkr.Elevation;
+ if(istr.fail()) WARN("Elevation not specified for speaker %zu\n", spkr_num);
+ istr >> spkr.Connection;
+ if(istr.fail()) TRACE("Connection not specified for speaker %zu\n", spkr_num);
+ }
+ else
+ {
+ ERR("Unexpected speakers command: %s\n", cmd.c_str());
+ return false;
+ }
+
+ istr.clear();
+ const auto endpos = static_cast<std::size_t>(istr.tellg());
+ if(!is_at_end(buffer, endpos))
+ {
+ ERR("Unexpected junk on line: %s\n", buffer.c_str()+endpos);
+ return false;
+ }
+ buffer.clear();
+ }
+
+ return true;
+}
+
+bool load_ambdec_matrix(float (&gains)[MAX_AMBI_ORDER+1], al::vector<AmbDecConf::CoeffArray> &matrix, const std::size_t maxrow, std::istream &f, std::string &buffer)
+{
+ bool gotgains{false};
+ std::size_t cur{0u};
+ while(cur < maxrow)
+ {
+ std::istringstream istr{buffer};
+
+ std::string cmd{read_word(istr)};
+ if(cmd.empty())
+ {
+ if(!read_clipped_line(f, buffer))
+ {
+ ERR("Unexpected end of file\n");
+ return false;
+ }
+ continue;
+ }
+
+ if(cmd == "order_gain")
+ {
+ std::size_t curgain{0u};
+ float value;
+ while(istr.good())
+ {
+ istr >> value;
+ if(istr.fail()) break;
+ if(!istr.eof() && !std::isspace(istr.peek()))
+ {
+ ERR("Extra junk on gain %zu: %s\n", curgain+1,
+ buffer.c_str()+static_cast<std::size_t>(istr.tellg()));
+ return false;
+ }
+ if(curgain < size(gains))
+ gains[curgain++] = value;
+ }
+ std::fill(std::begin(gains)+curgain, std::end(gains), 0.0f);
+ gotgains = true;
+ }
+ else if(cmd == "add_row")
+ {
+ matrix.emplace_back();
+ AmbDecConf::CoeffArray &mtxrow = matrix.back();
+ std::size_t curidx{0u};
+ float value{};
+ while(istr.good())
+ {
+ istr >> value;
+ if(istr.fail()) break;
+ if(!istr.eof() && !std::isspace(istr.peek()))
+ {
+ ERR("Extra junk on matrix element %zux%zu: %s\n", curidx,
+ matrix.size(), buffer.c_str()+static_cast<std::size_t>(istr.tellg()));
+ matrix.pop_back();
+ return false;
+ }
+ if(curidx < mtxrow.size())
+ mtxrow[curidx++] = value;
+ }
+ std::fill(mtxrow.begin()+curidx, mtxrow.end(), 0.0f);
+ cur++;
+ }
+ else
+ {
+ ERR("Unexpected matrix command: %s\n", cmd.c_str());
+ return false;
+ }
+
+ istr.clear();
+ const auto endpos = static_cast<std::size_t>(istr.tellg());
+ if(!is_at_end(buffer, endpos))
+ {
+ ERR("Unexpected junk on line: %s\n", buffer.c_str()+endpos);
+ return false;
+ }
+ buffer.clear();
+ }
+
+ if(!gotgains)
+ {
+ ERR("Matrix order_gain not specified\n");
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+int AmbDecConf::load(const char *fname) noexcept
+{
+ al::ifstream f{fname};
+ if(!f.is_open())
+ {
+ ERR("Failed to open: %s\n", fname);
+ return 0;
+ }
+
+ std::size_t num_speakers{0u};
+ std::string buffer;
+ while(read_clipped_line(f, buffer))
+ {
+ std::istringstream istr{buffer};
+
+ std::string command{read_word(istr)};
+ if(command.empty())
+ {
+ ERR("Malformed line: %s\n", buffer.c_str());
+ return 0;
+ }
+
+ if(command == "/description")
+ istr >> Description;
+ else if(command == "/version")
+ {
+ istr >> Version;
+ if(!istr.eof() && !std::isspace(istr.peek()))
+ {
+ ERR("Extra junk after version: %s\n",
+ buffer.c_str()+static_cast<std::size_t>(istr.tellg()));
+ return 0;
+ }
+ if(Version != 3)
+ {
+ ERR("Unsupported version: %u\n", Version);
+ return 0;
+ }
+ }
+ else if(command == "/dec/chan_mask")
+ {
+ istr >> std::hex >> ChanMask >> std::dec;
+ if(!istr.eof() && !std::isspace(istr.peek()))
+ {
+ ERR("Extra junk after mask: %s\n",
+ buffer.c_str()+static_cast<std::size_t>(istr.tellg()));
+ return 0;
+ }
+ }
+ else if(command == "/dec/freq_bands")
+ {
+ istr >> FreqBands;
+ if(!istr.eof() && !std::isspace(istr.peek()))
+ {
+ ERR("Extra junk after freq_bands: %s\n",
+ buffer.c_str()+static_cast<std::size_t>(istr.tellg()));
+ return 0;
+ }
+ if(FreqBands != 1 && FreqBands != 2)
+ {
+ ERR("Invalid freq_bands value: %u\n", FreqBands);
+ return 0;
+ }
+ }
+ else if(command == "/dec/speakers")
+ {
+ istr >> num_speakers;
+ if(!istr.eof() && !std::isspace(istr.peek()))
+ {
+ ERR("Extra junk after speakers: %s\n",
+ buffer.c_str()+static_cast<std::size_t>(istr.tellg()));
+ return 0;
+ }
+ Speakers.reserve(num_speakers);
+ LFMatrix.reserve(num_speakers);
+ HFMatrix.reserve(num_speakers);
+ }
+ else if(command == "/dec/coeff_scale")
+ {
+ std::string scale = read_word(istr);
+ if(scale == "n3d") CoeffScale = AmbDecScale::N3D;
+ else if(scale == "sn3d") CoeffScale = AmbDecScale::SN3D;
+ else if(scale == "fuma") CoeffScale = AmbDecScale::FuMa;
+ else
+ {
+ ERR("Unsupported coeff scale: %s\n", scale.c_str());
+ return 0;
+ }
+ }
+ else if(command == "/opt/xover_freq")
+ {
+ istr >> XOverFreq;
+ if(!istr.eof() && !std::isspace(istr.peek()))
+ {
+ ERR("Extra junk after xover_freq: %s\n",
+ buffer.c_str()+static_cast<std::size_t>(istr.tellg()));
+ return 0;
+ }
+ }
+ else if(command == "/opt/xover_ratio")
+ {
+ istr >> XOverRatio;
+ if(!istr.eof() && !std::isspace(istr.peek()))
+ {
+ ERR("Extra junk after xover_ratio: %s\n",
+ buffer.c_str()+static_cast<std::size_t>(istr.tellg()));
+ return 0;
+ }
+ }
+ else if(command == "/opt/input_scale" || command == "/opt/nfeff_comp" ||
+ command == "/opt/delay_comp" || command == "/opt/level_comp")
+ {
+ /* Unused */
+ read_word(istr);
+ }
+ else if(command == "/speakers/{")
+ {
+ const auto endpos = static_cast<std::size_t>(istr.tellg());
+ if(!is_at_end(buffer, endpos))
+ {
+ ERR("Unexpected junk on line: %s\n", buffer.c_str()+endpos);
+ return 0;
+ }
+ buffer.clear();
+
+ if(!load_ambdec_speakers(Speakers, num_speakers, f, buffer))
+ return 0;
+
+ if(!read_clipped_line(f, buffer))
+ {
+ ERR("Unexpected end of file\n");
+ return 0;
+ }
+ std::istringstream istr2{buffer};
+ std::string endmark{read_word(istr2)};
+ if(endmark != "/}")
+ {
+ ERR("Expected /} after speaker definitions, got %s\n", endmark.c_str());
+ return 0;
+ }
+ istr.swap(istr2);
+ }
+ else if(command == "/lfmatrix/{" || command == "/hfmatrix/{" || command == "/matrix/{")
+ {
+ const auto endpos = static_cast<std::size_t>(istr.tellg());
+ if(!is_at_end(buffer, endpos))
+ {
+ ERR("Unexpected junk on line: %s\n", buffer.c_str()+endpos);
+ return 0;
+ }
+ buffer.clear();
+
+ if(FreqBands == 1)
+ {
+ if(command != "/matrix/{")
+ {
+ ERR("Unexpected \"%s\" type for a single-band decoder\n", command.c_str());
+ return 0;
+ }
+ if(!load_ambdec_matrix(HFOrderGain, HFMatrix, num_speakers, f, buffer))
+ return 0;
+ }
+ else
+ {
+ if(command == "/lfmatrix/{")
+ {
+ if(!load_ambdec_matrix(LFOrderGain, LFMatrix, num_speakers, f, buffer))
+ return 0;
+ }
+ else if(command == "/hfmatrix/{")
+ {
+ if(!load_ambdec_matrix(HFOrderGain, HFMatrix, num_speakers, f, buffer))
+ return 0;
+ }
+ else
+ {
+ ERR("Unexpected \"%s\" type for a dual-band decoder\n", command.c_str());
+ return 0;
+ }
+ }
+
+ if(!read_clipped_line(f, buffer))
+ {
+ ERR("Unexpected end of file\n");
+ return 0;
+ }
+ std::istringstream istr2{buffer};
+ std::string endmark{read_word(istr2)};
+ if(endmark != "/}")
+ {
+ ERR("Expected /} after matrix definitions, got %s\n", endmark.c_str());
+ return 0;
+ }
+ istr.swap(istr2);
+ }
+ else if(command == "/end")
+ {
+ const auto endpos = static_cast<std::size_t>(istr.tellg());
+ if(!is_at_end(buffer, endpos))
+ {
+ ERR("Unexpected junk on end: %s\n", buffer.c_str()+endpos);
+ return 0;
+ }
+
+ return 1;
+ }
+ else
+ {
+ ERR("Unexpected command: %s\n", command.c_str());
+ return 0;
+ }
+
+ istr.clear();
+ const auto endpos = static_cast<std::size_t>(istr.tellg());
+ if(!is_at_end(buffer, endpos))
+ {
+ ERR("Unexpected junk on line: %s\n", buffer.c_str()+endpos);
+ return 0;
+ }
+ buffer.clear();
+ }
+ ERR("Unexpected end of file\n");
+
+ return 0;
+}
diff --git a/alc/ambdec.h b/alc/ambdec.h
new file mode 100644
index 00000000..ff7b71ee
--- /dev/null
+++ b/alc/ambdec.h
@@ -0,0 +1,48 @@
+#ifndef AMBDEC_H
+#define AMBDEC_H
+
+#include <array>
+#include <string>
+
+#include "ambidefs.h"
+#include "vector.h"
+
+/* Helpers to read .ambdec configuration files. */
+
+enum class AmbDecScale {
+ N3D,
+ SN3D,
+ FuMa,
+};
+struct AmbDecConf {
+ std::string Description;
+ int Version{0}; /* Must be 3 */
+
+ unsigned int ChanMask{0u};
+ unsigned int FreqBands{0u}; /* Must be 1 or 2 */
+ AmbDecScale CoeffScale{};
+
+ float XOverFreq{0.0f};
+ float XOverRatio{0.0f};
+
+ struct SpeakerConf {
+ std::string Name;
+ float Distance{0.0f};
+ float Azimuth{0.0f};
+ float Elevation{0.0f};
+ std::string Connection;
+ };
+ al::vector<SpeakerConf> Speakers;
+
+ using CoeffArray = std::array<float,MAX_AMBI_CHANNELS>;
+ /* Unused when FreqBands == 1 */
+ float LFOrderGain[MAX_AMBI_ORDER+1]{};
+ al::vector<CoeffArray> LFMatrix;
+
+ float HFOrderGain[MAX_AMBI_ORDER+1]{};
+ al::vector<CoeffArray> HFMatrix;
+
+ int load(const char *fname) noexcept;
+};
+
+#endif /* AMBDEC_H */
diff --git a/alc/ambidefs.h b/alc/ambidefs.h
new file mode 100644
index 00000000..17a9815b
--- /dev/null
+++ b/alc/ambidefs.h
@@ -0,0 +1,119 @@
+#ifndef AMBIDEFS_H
+#define AMBIDEFS_H
+
+#include <array>
+
+/* The maximum number of Ambisonics channels. For a given order (o), the size
+ * needed will be (o+1)**2, thus zero-order has 1, first-order has 4, second-
+ * order has 9, third-order has 16, and fourth-order has 25.
+ */
+#define MAX_AMBI_ORDER 3
+constexpr inline size_t AmbiChannelsFromOrder(size_t order) noexcept
+{ return (order+1) * (order+1); }
+#define MAX_AMBI_CHANNELS AmbiChannelsFromOrder(MAX_AMBI_ORDER)
+
+/* A bitmask of ambisonic channels for 0 to 4th order. This only specifies up
+ * to 4th order, which is the highest order a 32-bit mask value can specify (a
+ * 64-bit mask could handle up to 7th order).
+ */
+#define AMBI_0ORDER_MASK 0x00000001
+#define AMBI_1ORDER_MASK 0x0000000f
+#define AMBI_2ORDER_MASK 0x000001ff
+#define AMBI_3ORDER_MASK 0x0000ffff
+#define AMBI_4ORDER_MASK 0x01ffffff
+
+/* A bitmask of ambisonic channels with height information. If none of these
+ * channels are used/needed, there's no height (e.g. with most surround sound
+ * speaker setups). This is ACN ordering, with bit 0 being ACN 0, etc.
+ */
+#define AMBI_PERIPHONIC_MASK (0xfe7ce4)
+
+/* The maximum number of ambisonic channels for 2D (non-periphonic)
+ * representation. This is 2 per each order above zero-order, plus 1 for zero-
+ * order. Or simply, o*2 + 1.
+ */
+constexpr inline size_t Ambi2DChannelsFromOrder(size_t order) noexcept
+{ return order*2 + 1; }
+#define MAX_AMBI2D_CHANNELS Ambi2DChannelsFromOrder(MAX_AMBI_ORDER)
+
+
+/* NOTE: These are scale factors as applied to Ambisonics content. Decoder
+ * coefficients should be divided by these values to get proper scalings.
+ */
+struct AmbiScale {
+ static constexpr std::array<float,MAX_AMBI_CHANNELS> FromN3D{{
+ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f
+ }};
+ static constexpr std::array<float,MAX_AMBI_CHANNELS> FromSN3D{{
+ 1.000000000f, /* ACN 0, sqrt(1) */
+ 1.732050808f, /* ACN 1, sqrt(3) */
+ 1.732050808f, /* ACN 2, sqrt(3) */
+ 1.732050808f, /* ACN 3, sqrt(3) */
+ 2.236067978f, /* ACN 4, sqrt(5) */
+ 2.236067978f, /* ACN 5, sqrt(5) */
+ 2.236067978f, /* ACN 6, sqrt(5) */
+ 2.236067978f, /* ACN 7, sqrt(5) */
+ 2.236067978f, /* ACN 8, sqrt(5) */
+ 2.645751311f, /* ACN 9, sqrt(7) */
+ 2.645751311f, /* ACN 10, sqrt(7) */
+ 2.645751311f, /* ACN 11, sqrt(7) */
+ 2.645751311f, /* ACN 12, sqrt(7) */
+ 2.645751311f, /* ACN 13, sqrt(7) */
+ 2.645751311f, /* ACN 14, sqrt(7) */
+ 2.645751311f, /* ACN 15, sqrt(7) */
+ }};
+ static constexpr std::array<float,MAX_AMBI_CHANNELS> FromFuMa{{
+ 1.414213562f, /* ACN 0 (W), sqrt(2) */
+ 1.732050808f, /* ACN 1 (Y), sqrt(3) */
+ 1.732050808f, /* ACN 2 (Z), sqrt(3) */
+ 1.732050808f, /* ACN 3 (X), sqrt(3) */
+ 1.936491673f, /* ACN 4 (V), sqrt(15)/2 */
+ 1.936491673f, /* ACN 5 (T), sqrt(15)/2 */
+ 2.236067978f, /* ACN 6 (R), sqrt(5) */
+ 1.936491673f, /* ACN 7 (S), sqrt(15)/2 */
+ 1.936491673f, /* ACN 8 (U), sqrt(15)/2 */
+ 2.091650066f, /* ACN 9 (Q), sqrt(35/8) */
+ 1.972026594f, /* ACN 10 (O), sqrt(35)/3 */
+ 2.231093404f, /* ACN 11 (M), sqrt(224/45) */
+ 2.645751311f, /* ACN 12 (K), sqrt(7) */
+ 2.231093404f, /* ACN 13 (L), sqrt(224/45) */
+ 1.972026594f, /* ACN 14 (N), sqrt(35)/3 */
+ 2.091650066f, /* ACN 15 (P), sqrt(35/8) */
+ }};
+};
+
+struct AmbiIndex {
+ static constexpr std::array<int,MAX_AMBI_CHANNELS> FromFuMa{{
+ 0, /* W */
+ 3, /* X */
+ 1, /* Y */
+ 2, /* Z */
+ 6, /* R */
+ 7, /* S */
+ 5, /* T */
+ 8, /* U */
+ 4, /* V */
+ 12, /* K */
+ 13, /* L */
+ 11, /* M */
+ 14, /* N */
+ 10, /* O */
+ 15, /* P */
+ 9, /* Q */
+ }};
+ static constexpr std::array<int,MAX_AMBI_CHANNELS> FromACN{{
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15
+ }};
+
+ static constexpr std::array<int,MAX_AMBI2D_CHANNELS> From2D{{
+ 0, 1,3, 4,8, 9,15
+ }};
+ static constexpr std::array<int,MAX_AMBI_CHANNELS> From3D{{
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15
+ }};
+};
+
+#endif /* AMBIDEFS_H */
diff --git a/alc/backends/alsa.cpp b/alc/backends/alsa.cpp
new file mode 100644
index 00000000..c133df68
--- /dev/null
+++ b/alc/backends/alsa.cpp
@@ -0,0 +1,1288 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/alsa.h"
+
+#include <algorithm>
+#include <atomic>
+#include <cassert>
+#include <cerrno>
+#include <chrono>
+#include <cstring>
+#include <exception>
+#include <functional>
+#include <memory>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "AL/al.h"
+
+#include "albyte.h"
+#include "alcmain.h"
+#include "alconfig.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+#include "aloptional.h"
+#include "alu.h"
+#include "compat.h"
+#include "logging.h"
+#include "ringbuffer.h"
+#include "threads.h"
+#include "vector.h"
+
+#include <alsa/asoundlib.h>
+
+
+namespace {
+
+constexpr ALCchar alsaDevice[] = "ALSA Default";
+
+
+#ifdef HAVE_DYNLOAD
+#define ALSA_FUNCS(MAGIC) \
+ MAGIC(snd_strerror); \
+ MAGIC(snd_pcm_open); \
+ MAGIC(snd_pcm_close); \
+ MAGIC(snd_pcm_nonblock); \
+ MAGIC(snd_pcm_frames_to_bytes); \
+ MAGIC(snd_pcm_bytes_to_frames); \
+ MAGIC(snd_pcm_hw_params_malloc); \
+ MAGIC(snd_pcm_hw_params_free); \
+ MAGIC(snd_pcm_hw_params_any); \
+ MAGIC(snd_pcm_hw_params_current); \
+ MAGIC(snd_pcm_hw_params_set_access); \
+ MAGIC(snd_pcm_hw_params_set_format); \
+ MAGIC(snd_pcm_hw_params_set_channels); \
+ MAGIC(snd_pcm_hw_params_set_periods_near); \
+ MAGIC(snd_pcm_hw_params_set_rate_near); \
+ MAGIC(snd_pcm_hw_params_set_rate); \
+ MAGIC(snd_pcm_hw_params_set_rate_resample); \
+ MAGIC(snd_pcm_hw_params_set_buffer_time_near); \
+ MAGIC(snd_pcm_hw_params_set_period_time_near); \
+ MAGIC(snd_pcm_hw_params_set_buffer_size_near); \
+ MAGIC(snd_pcm_hw_params_set_period_size_near); \
+ MAGIC(snd_pcm_hw_params_set_buffer_size_min); \
+ MAGIC(snd_pcm_hw_params_get_buffer_time_min); \
+ MAGIC(snd_pcm_hw_params_get_buffer_time_max); \
+ MAGIC(snd_pcm_hw_params_get_period_time_min); \
+ MAGIC(snd_pcm_hw_params_get_period_time_max); \
+ MAGIC(snd_pcm_hw_params_get_buffer_size); \
+ MAGIC(snd_pcm_hw_params_get_period_size); \
+ MAGIC(snd_pcm_hw_params_get_access); \
+ MAGIC(snd_pcm_hw_params_get_periods); \
+ MAGIC(snd_pcm_hw_params_test_format); \
+ MAGIC(snd_pcm_hw_params_test_channels); \
+ MAGIC(snd_pcm_hw_params); \
+ MAGIC(snd_pcm_sw_params_malloc); \
+ MAGIC(snd_pcm_sw_params_current); \
+ MAGIC(snd_pcm_sw_params_set_avail_min); \
+ MAGIC(snd_pcm_sw_params_set_stop_threshold); \
+ MAGIC(snd_pcm_sw_params); \
+ MAGIC(snd_pcm_sw_params_free); \
+ MAGIC(snd_pcm_prepare); \
+ MAGIC(snd_pcm_start); \
+ MAGIC(snd_pcm_resume); \
+ MAGIC(snd_pcm_reset); \
+ MAGIC(snd_pcm_wait); \
+ MAGIC(snd_pcm_delay); \
+ MAGIC(snd_pcm_state); \
+ MAGIC(snd_pcm_avail_update); \
+ MAGIC(snd_pcm_areas_silence); \
+ MAGIC(snd_pcm_mmap_begin); \
+ MAGIC(snd_pcm_mmap_commit); \
+ MAGIC(snd_pcm_readi); \
+ MAGIC(snd_pcm_writei); \
+ MAGIC(snd_pcm_drain); \
+ MAGIC(snd_pcm_drop); \
+ MAGIC(snd_pcm_recover); \
+ MAGIC(snd_pcm_info_malloc); \
+ MAGIC(snd_pcm_info_free); \
+ MAGIC(snd_pcm_info_set_device); \
+ MAGIC(snd_pcm_info_set_subdevice); \
+ MAGIC(snd_pcm_info_set_stream); \
+ MAGIC(snd_pcm_info_get_name); \
+ MAGIC(snd_ctl_pcm_next_device); \
+ MAGIC(snd_ctl_pcm_info); \
+ MAGIC(snd_ctl_open); \
+ MAGIC(snd_ctl_close); \
+ MAGIC(snd_ctl_card_info_malloc); \
+ MAGIC(snd_ctl_card_info_free); \
+ MAGIC(snd_ctl_card_info); \
+ MAGIC(snd_ctl_card_info_get_name); \
+ MAGIC(snd_ctl_card_info_get_id); \
+ MAGIC(snd_card_next); \
+ MAGIC(snd_config_update_free_global)
+
+static void *alsa_handle;
+#define MAKE_FUNC(f) decltype(f) * p##f
+ALSA_FUNCS(MAKE_FUNC);
+#undef MAKE_FUNC
+
+#ifndef IN_IDE_PARSER
+#define snd_strerror psnd_strerror
+#define snd_pcm_open psnd_pcm_open
+#define snd_pcm_close psnd_pcm_close
+#define snd_pcm_nonblock psnd_pcm_nonblock
+#define snd_pcm_frames_to_bytes psnd_pcm_frames_to_bytes
+#define snd_pcm_bytes_to_frames psnd_pcm_bytes_to_frames
+#define snd_pcm_hw_params_malloc psnd_pcm_hw_params_malloc
+#define snd_pcm_hw_params_free psnd_pcm_hw_params_free
+#define snd_pcm_hw_params_any psnd_pcm_hw_params_any
+#define snd_pcm_hw_params_current psnd_pcm_hw_params_current
+#define snd_pcm_hw_params_set_access psnd_pcm_hw_params_set_access
+#define snd_pcm_hw_params_set_format psnd_pcm_hw_params_set_format
+#define snd_pcm_hw_params_set_channels psnd_pcm_hw_params_set_channels
+#define snd_pcm_hw_params_set_periods_near psnd_pcm_hw_params_set_periods_near
+#define snd_pcm_hw_params_set_rate_near psnd_pcm_hw_params_set_rate_near
+#define snd_pcm_hw_params_set_rate psnd_pcm_hw_params_set_rate
+#define snd_pcm_hw_params_set_rate_resample psnd_pcm_hw_params_set_rate_resample
+#define snd_pcm_hw_params_set_buffer_time_near psnd_pcm_hw_params_set_buffer_time_near
+#define snd_pcm_hw_params_set_period_time_near psnd_pcm_hw_params_set_period_time_near
+#define snd_pcm_hw_params_set_buffer_size_near psnd_pcm_hw_params_set_buffer_size_near
+#define snd_pcm_hw_params_set_period_size_near psnd_pcm_hw_params_set_period_size_near
+#define snd_pcm_hw_params_set_buffer_size_min psnd_pcm_hw_params_set_buffer_size_min
+#define snd_pcm_hw_params_get_buffer_time_min psnd_pcm_hw_params_get_buffer_time_min
+#define snd_pcm_hw_params_get_buffer_time_max psnd_pcm_hw_params_get_buffer_time_max
+#define snd_pcm_hw_params_get_period_time_min psnd_pcm_hw_params_get_period_time_min
+#define snd_pcm_hw_params_get_period_time_max psnd_pcm_hw_params_get_period_time_max
+#define snd_pcm_hw_params_get_buffer_size psnd_pcm_hw_params_get_buffer_size
+#define snd_pcm_hw_params_get_period_size psnd_pcm_hw_params_get_period_size
+#define snd_pcm_hw_params_get_access psnd_pcm_hw_params_get_access
+#define snd_pcm_hw_params_get_periods psnd_pcm_hw_params_get_periods
+#define snd_pcm_hw_params_test_format psnd_pcm_hw_params_test_format
+#define snd_pcm_hw_params_test_channels psnd_pcm_hw_params_test_channels
+#define snd_pcm_hw_params psnd_pcm_hw_params
+#define snd_pcm_sw_params_malloc psnd_pcm_sw_params_malloc
+#define snd_pcm_sw_params_current psnd_pcm_sw_params_current
+#define snd_pcm_sw_params_set_avail_min psnd_pcm_sw_params_set_avail_min
+#define snd_pcm_sw_params_set_stop_threshold psnd_pcm_sw_params_set_stop_threshold
+#define snd_pcm_sw_params psnd_pcm_sw_params
+#define snd_pcm_sw_params_free psnd_pcm_sw_params_free
+#define snd_pcm_prepare psnd_pcm_prepare
+#define snd_pcm_start psnd_pcm_start
+#define snd_pcm_resume psnd_pcm_resume
+#define snd_pcm_reset psnd_pcm_reset
+#define snd_pcm_wait psnd_pcm_wait
+#define snd_pcm_delay psnd_pcm_delay
+#define snd_pcm_state psnd_pcm_state
+#define snd_pcm_avail_update psnd_pcm_avail_update
+#define snd_pcm_areas_silence psnd_pcm_areas_silence
+#define snd_pcm_mmap_begin psnd_pcm_mmap_begin
+#define snd_pcm_mmap_commit psnd_pcm_mmap_commit
+#define snd_pcm_readi psnd_pcm_readi
+#define snd_pcm_writei psnd_pcm_writei
+#define snd_pcm_drain psnd_pcm_drain
+#define snd_pcm_drop psnd_pcm_drop
+#define snd_pcm_recover psnd_pcm_recover
+#define snd_pcm_info_malloc psnd_pcm_info_malloc
+#define snd_pcm_info_free psnd_pcm_info_free
+#define snd_pcm_info_set_device psnd_pcm_info_set_device
+#define snd_pcm_info_set_subdevice psnd_pcm_info_set_subdevice
+#define snd_pcm_info_set_stream psnd_pcm_info_set_stream
+#define snd_pcm_info_get_name psnd_pcm_info_get_name
+#define snd_ctl_pcm_next_device psnd_ctl_pcm_next_device
+#define snd_ctl_pcm_info psnd_ctl_pcm_info
+#define snd_ctl_open psnd_ctl_open
+#define snd_ctl_close psnd_ctl_close
+#define snd_ctl_card_info_malloc psnd_ctl_card_info_malloc
+#define snd_ctl_card_info_free psnd_ctl_card_info_free
+#define snd_ctl_card_info psnd_ctl_card_info
+#define snd_ctl_card_info_get_name psnd_ctl_card_info_get_name
+#define snd_ctl_card_info_get_id psnd_ctl_card_info_get_id
+#define snd_card_next psnd_card_next
+#define snd_config_update_free_global psnd_config_update_free_global
+#endif
+#endif
+
+
+struct DevMap {
+ std::string name;
+ std::string device_name;
+};
+
+al::vector<DevMap> PlaybackDevices;
+al::vector<DevMap> CaptureDevices;
+
+
+const char *prefix_name(snd_pcm_stream_t stream)
+{
+ assert(stream == SND_PCM_STREAM_PLAYBACK || stream == SND_PCM_STREAM_CAPTURE);
+ return (stream==SND_PCM_STREAM_PLAYBACK) ? "device-prefix" : "capture-prefix";
+}
+
+al::vector<DevMap> probe_devices(snd_pcm_stream_t stream)
+{
+ al::vector<DevMap> devlist;
+
+ snd_ctl_card_info_t *info;
+ snd_ctl_card_info_malloc(&info);
+ snd_pcm_info_t *pcminfo;
+ snd_pcm_info_malloc(&pcminfo);
+
+ devlist.emplace_back(DevMap{alsaDevice,
+ GetConfigValue(nullptr, "alsa", (stream==SND_PCM_STREAM_PLAYBACK) ? "device" : "capture",
+ "default")});
+
+ if(stream == SND_PCM_STREAM_PLAYBACK)
+ {
+ const char *customdevs;
+ const char *next{GetConfigValue(nullptr, "alsa", "custom-devices", "")};
+ while((customdevs=next) != nullptr && customdevs[0])
+ {
+ next = strchr(customdevs, ';');
+ const char *sep{strchr(customdevs, '=')};
+ if(!sep)
+ {
+ std::string spec{next ? std::string(customdevs, next++) : std::string(customdevs)};
+ ERR("Invalid ALSA device specification \"%s\"\n", spec.c_str());
+ continue;
+ }
+
+ const char *oldsep{sep++};
+ devlist.emplace_back(DevMap{std::string(customdevs, oldsep),
+ next ? std::string(sep, next++) : std::string(sep)});
+ const auto &entry = devlist.back();
+ TRACE("Got device \"%s\", \"%s\"\n", entry.name.c_str(), entry.device_name.c_str());
+ }
+ }
+
+ const std::string main_prefix{
+ ConfigValueStr(nullptr, "alsa", prefix_name(stream)).value_or("plughw:")};
+
+ int card{-1};
+ int err{snd_card_next(&card)};
+ for(;err >= 0 && card >= 0;err = snd_card_next(&card))
+ {
+ std::string name{"hw:" + std::to_string(card)};
+
+ snd_ctl_t *handle;
+ if((err=snd_ctl_open(&handle, name.c_str(), 0)) < 0)
+ {
+ ERR("control open (hw:%d): %s\n", card, snd_strerror(err));
+ continue;
+ }
+ if((err=snd_ctl_card_info(handle, info)) < 0)
+ {
+ ERR("control hardware info (hw:%d): %s\n", card, snd_strerror(err));
+ snd_ctl_close(handle);
+ continue;
+ }
+
+ const char *cardname{snd_ctl_card_info_get_name(info)};
+ const char *cardid{snd_ctl_card_info_get_id(info)};
+ name = prefix_name(stream);
+ name += '-';
+ name += cardid;
+ const std::string card_prefix{
+ ConfigValueStr(nullptr, "alsa", name.c_str()).value_or(main_prefix)};
+
+ int dev{-1};
+ while(1)
+ {
+ if(snd_ctl_pcm_next_device(handle, &dev) < 0)
+ ERR("snd_ctl_pcm_next_device failed\n");
+ if(dev < 0) break;
+
+ snd_pcm_info_set_device(pcminfo, dev);
+ snd_pcm_info_set_subdevice(pcminfo, 0);
+ snd_pcm_info_set_stream(pcminfo, stream);
+ if((err=snd_ctl_pcm_info(handle, pcminfo)) < 0)
+ {
+ if(err != -ENOENT)
+ ERR("control digital audio info (hw:%d): %s\n", card, snd_strerror(err));
+ continue;
+ }
+
+ /* "prefix-cardid-dev" */
+ name = prefix_name(stream);
+ name += '-';
+ name += cardid;
+ name += '-';
+ name += std::to_string(dev);
+ const std::string device_prefix{
+ ConfigValueStr(nullptr, "alsa", name.c_str()).value_or(card_prefix)};
+
+ /* "CardName, PcmName (CARD=cardid,DEV=dev)" */
+ name = cardname;
+ name += ", ";
+ name += snd_pcm_info_get_name(pcminfo);
+ name += " (CARD=";
+ name += cardid;
+ name += ",DEV=";
+ name += std::to_string(dev);
+ name += ')';
+
+ /* "devprefixCARD=cardid,DEV=dev" */
+ std::string device{device_prefix};
+ device += "CARD=";
+ device += cardid;
+ device += ",DEV=";
+ device += std::to_string(dev);
+
+ devlist.emplace_back(DevMap{std::move(name), std::move(device)});
+ const auto &entry = devlist.back();
+ TRACE("Got device \"%s\", \"%s\"\n", entry.name.c_str(), entry.device_name.c_str());
+ }
+ snd_ctl_close(handle);
+ }
+ if(err < 0)
+ ERR("snd_card_next failed: %s\n", snd_strerror(err));
+
+ snd_pcm_info_free(pcminfo);
+ snd_ctl_card_info_free(info);
+
+ return devlist;
+}
+
+
+int verify_state(snd_pcm_t *handle)
+{
+ snd_pcm_state_t state{snd_pcm_state(handle)};
+
+ int err;
+ switch(state)
+ {
+ case SND_PCM_STATE_OPEN:
+ case SND_PCM_STATE_SETUP:
+ case SND_PCM_STATE_PREPARED:
+ case SND_PCM_STATE_RUNNING:
+ case SND_PCM_STATE_DRAINING:
+ case SND_PCM_STATE_PAUSED:
+ /* All Okay */
+ break;
+
+ case SND_PCM_STATE_XRUN:
+ if((err=snd_pcm_recover(handle, -EPIPE, 1)) < 0)
+ return err;
+ break;
+ case SND_PCM_STATE_SUSPENDED:
+ if((err=snd_pcm_recover(handle, -ESTRPIPE, 1)) < 0)
+ return err;
+ break;
+ case SND_PCM_STATE_DISCONNECTED:
+ return -ENODEV;
+ }
+
+ return state;
+}
+
+
+struct AlsaPlayback final : public BackendBase {
+ AlsaPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~AlsaPlayback() override;
+
+ int mixerProc();
+ int mixerNoMMapProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ ClockLatency getClockLatency() override;
+
+ snd_pcm_t *mPcmHandle{nullptr};
+
+ al::vector<char> mBuffer;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(AlsaPlayback)
+};
+
+AlsaPlayback::~AlsaPlayback()
+{
+ if(mPcmHandle)
+ snd_pcm_close(mPcmHandle);
+ mPcmHandle = nullptr;
+}
+
+
+int AlsaPlayback::mixerProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ const snd_pcm_uframes_t update_size{mDevice->UpdateSize};
+ const snd_pcm_uframes_t num_updates{mDevice->BufferSize / update_size};
+ while(!mKillNow.load(std::memory_order_acquire))
+ {
+ int state{verify_state(mPcmHandle)};
+ if(state < 0)
+ {
+ ERR("Invalid state detected: %s\n", snd_strerror(state));
+ aluHandleDisconnect(mDevice, "Bad state: %s", snd_strerror(state));
+ break;
+ }
+
+ snd_pcm_sframes_t avail{snd_pcm_avail_update(mPcmHandle)};
+ if(avail < 0)
+ {
+ ERR("available update failed: %s\n", snd_strerror(avail));
+ continue;
+ }
+
+ if(static_cast<snd_pcm_uframes_t>(avail) > update_size*(num_updates+1))
+ {
+ WARN("available samples exceeds the buffer size\n");
+ snd_pcm_reset(mPcmHandle);
+ continue;
+ }
+
+ // make sure there's frames to process
+ if(static_cast<snd_pcm_uframes_t>(avail) < update_size)
+ {
+ if(state != SND_PCM_STATE_RUNNING)
+ {
+ int err{snd_pcm_start(mPcmHandle)};
+ if(err < 0)
+ {
+ ERR("start failed: %s\n", snd_strerror(err));
+ continue;
+ }
+ }
+ if(snd_pcm_wait(mPcmHandle, 1000) == 0)
+ ERR("Wait timeout... buffer size too low?\n");
+ continue;
+ }
+ avail -= avail%update_size;
+
+ // it is possible that contiguous areas are smaller, thus we use a loop
+ lock();
+ while(avail > 0)
+ {
+ snd_pcm_uframes_t frames{static_cast<snd_pcm_uframes_t>(avail)};
+
+ const snd_pcm_channel_area_t *areas{};
+ snd_pcm_uframes_t offset{};
+ int err{snd_pcm_mmap_begin(mPcmHandle, &areas, &offset, &frames)};
+ if(err < 0)
+ {
+ ERR("mmap begin error: %s\n", snd_strerror(err));
+ break;
+ }
+
+ char *WritePtr{static_cast<char*>(areas->addr) + (offset * areas->step / 8)};
+ aluMixData(mDevice, WritePtr, frames);
+
+ snd_pcm_sframes_t commitres{snd_pcm_mmap_commit(mPcmHandle, offset, frames)};
+ if(commitres < 0 || (commitres-frames) != 0)
+ {
+ ERR("mmap commit error: %s\n",
+ snd_strerror(commitres >= 0 ? -EPIPE : commitres));
+ break;
+ }
+
+ avail -= frames;
+ }
+ unlock();
+ }
+
+ return 0;
+}
+
+int AlsaPlayback::mixerNoMMapProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ const snd_pcm_uframes_t update_size{mDevice->UpdateSize};
+ const snd_pcm_uframes_t buffer_size{mDevice->BufferSize};
+ while(!mKillNow.load(std::memory_order_acquire))
+ {
+ int state{verify_state(mPcmHandle)};
+ if(state < 0)
+ {
+ ERR("Invalid state detected: %s\n", snd_strerror(state));
+ aluHandleDisconnect(mDevice, "Bad state: %s", snd_strerror(state));
+ break;
+ }
+
+ snd_pcm_sframes_t avail{snd_pcm_avail_update(mPcmHandle)};
+ if(avail < 0)
+ {
+ ERR("available update failed: %s\n", snd_strerror(avail));
+ continue;
+ }
+
+ if(static_cast<snd_pcm_uframes_t>(avail) > buffer_size)
+ {
+ WARN("available samples exceeds the buffer size\n");
+ snd_pcm_reset(mPcmHandle);
+ continue;
+ }
+
+ if(static_cast<snd_pcm_uframes_t>(avail) < update_size)
+ {
+ if(state != SND_PCM_STATE_RUNNING)
+ {
+ int err{snd_pcm_start(mPcmHandle)};
+ if(err < 0)
+ {
+ ERR("start failed: %s\n", snd_strerror(err));
+ continue;
+ }
+ }
+ if(snd_pcm_wait(mPcmHandle, 1000) == 0)
+ ERR("Wait timeout... buffer size too low?\n");
+ continue;
+ }
+
+ lock();
+ char *WritePtr{mBuffer.data()};
+ avail = snd_pcm_bytes_to_frames(mPcmHandle, mBuffer.size());
+ aluMixData(mDevice, WritePtr, avail);
+ while(avail > 0)
+ {
+ snd_pcm_sframes_t ret{snd_pcm_writei(mPcmHandle, WritePtr, avail)};
+ switch(ret)
+ {
+ case -EAGAIN:
+ continue;
+#if ESTRPIPE != EPIPE
+ case -ESTRPIPE:
+#endif
+ case -EPIPE:
+ case -EINTR:
+ ret = snd_pcm_recover(mPcmHandle, ret, 1);
+ if(ret < 0)
+ avail = 0;
+ break;
+ default:
+ if(ret >= 0)
+ {
+ WritePtr += snd_pcm_frames_to_bytes(mPcmHandle, ret);
+ avail -= ret;
+ }
+ break;
+ }
+ if(ret < 0)
+ {
+ ret = snd_pcm_prepare(mPcmHandle);
+ if(ret < 0) break;
+ }
+ }
+ unlock();
+ }
+
+ return 0;
+}
+
+
+ALCenum AlsaPlayback::open(const ALCchar *name)
+{
+ const char *driver{};
+ if(name)
+ {
+ if(PlaybackDevices.empty())
+ PlaybackDevices = probe_devices(SND_PCM_STREAM_PLAYBACK);
+
+ auto iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
+ [name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ );
+ if(iter == PlaybackDevices.cend())
+ return ALC_INVALID_VALUE;
+ driver = iter->device_name.c_str();
+ }
+ else
+ {
+ name = alsaDevice;
+ driver = GetConfigValue(nullptr, "alsa", "device", "default");
+ }
+
+ TRACE("Opening device \"%s\"\n", driver);
+ int err{snd_pcm_open(&mPcmHandle, driver, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)};
+ if(err < 0)
+ {
+ ERR("Could not open playback device '%s': %s\n", driver, snd_strerror(err));
+ return ALC_OUT_OF_MEMORY;
+ }
+
+ /* Free alsa's global config tree. Otherwise valgrind reports a ton of leaks. */
+ snd_config_update_free_global();
+
+ mDevice->DeviceName = name;
+
+ return ALC_NO_ERROR;
+}
+
+ALCboolean AlsaPlayback::reset()
+{
+ snd_pcm_format_t format{SND_PCM_FORMAT_UNKNOWN};
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ format = SND_PCM_FORMAT_S8;
+ break;
+ case DevFmtUByte:
+ format = SND_PCM_FORMAT_U8;
+ break;
+ case DevFmtShort:
+ format = SND_PCM_FORMAT_S16;
+ break;
+ case DevFmtUShort:
+ format = SND_PCM_FORMAT_U16;
+ break;
+ case DevFmtInt:
+ format = SND_PCM_FORMAT_S32;
+ break;
+ case DevFmtUInt:
+ format = SND_PCM_FORMAT_U32;
+ break;
+ case DevFmtFloat:
+ format = SND_PCM_FORMAT_FLOAT;
+ break;
+ }
+
+ bool allowmmap{!!GetConfigValueBool(mDevice->DeviceName.c_str(), "alsa", "mmap", 1)};
+ ALuint periodLen{static_cast<ALuint>(mDevice->UpdateSize * 1000000_u64 / mDevice->Frequency)};
+ ALuint bufferLen{static_cast<ALuint>(mDevice->BufferSize * 1000000_u64 / mDevice->Frequency)};
+ ALuint rate{mDevice->Frequency};
+
+ snd_pcm_uframes_t periodSizeInFrames{};
+ snd_pcm_uframes_t bufferSizeInFrames{};
+ snd_pcm_sw_params_t *sp{};
+ snd_pcm_hw_params_t *hp{};
+ snd_pcm_access_t access{};
+ const char *funcerr{};
+ int err{};
+
+ snd_pcm_hw_params_malloc(&hp);
+#define CHECK(x) if((funcerr=#x),(err=(x)) < 0) goto error
+ CHECK(snd_pcm_hw_params_any(mPcmHandle, hp));
+ /* set interleaved access */
+ if(!allowmmap || snd_pcm_hw_params_set_access(mPcmHandle, hp, SND_PCM_ACCESS_MMAP_INTERLEAVED) < 0)
+ {
+ /* No mmap */
+ CHECK(snd_pcm_hw_params_set_access(mPcmHandle, hp, SND_PCM_ACCESS_RW_INTERLEAVED));
+ }
+ /* test and set format (implicitly sets sample bits) */
+ if(snd_pcm_hw_params_test_format(mPcmHandle, hp, format) < 0)
+ {
+ static const struct {
+ snd_pcm_format_t format;
+ DevFmtType fmttype;
+ } formatlist[] = {
+ { SND_PCM_FORMAT_FLOAT, DevFmtFloat },
+ { SND_PCM_FORMAT_S32, DevFmtInt },
+ { SND_PCM_FORMAT_U32, DevFmtUInt },
+ { SND_PCM_FORMAT_S16, DevFmtShort },
+ { SND_PCM_FORMAT_U16, DevFmtUShort },
+ { SND_PCM_FORMAT_S8, DevFmtByte },
+ { SND_PCM_FORMAT_U8, DevFmtUByte },
+ };
+
+ for(const auto &fmt : formatlist)
+ {
+ format = fmt.format;
+ if(snd_pcm_hw_params_test_format(mPcmHandle, hp, format) >= 0)
+ {
+ mDevice->FmtType = fmt.fmttype;
+ break;
+ }
+ }
+ }
+ CHECK(snd_pcm_hw_params_set_format(mPcmHandle, hp, format));
+ /* test and set channels (implicitly sets frame bits) */
+ if(snd_pcm_hw_params_test_channels(mPcmHandle, hp, mDevice->channelsFromFmt()) < 0)
+ {
+ static const DevFmtChannels channellist[] = {
+ DevFmtStereo,
+ DevFmtQuad,
+ DevFmtX51,
+ DevFmtX71,
+ DevFmtMono,
+ };
+
+ for(const auto &chan : channellist)
+ {
+ if(snd_pcm_hw_params_test_channels(mPcmHandle, hp, ChannelsFromDevFmt(chan, 0)) >= 0)
+ {
+ mDevice->FmtChans = chan;
+ mDevice->mAmbiOrder = 0;
+ break;
+ }
+ }
+ }
+ CHECK(snd_pcm_hw_params_set_channels(mPcmHandle, hp, mDevice->channelsFromFmt()));
+ /* set rate (implicitly constrains period/buffer parameters) */
+ if(!GetConfigValueBool(mDevice->DeviceName.c_str(), "alsa", "allow-resampler", 0) ||
+ !mDevice->Flags.get<FrequencyRequest>())
+ {
+ if(snd_pcm_hw_params_set_rate_resample(mPcmHandle, hp, 0) < 0)
+ ERR("Failed to disable ALSA resampler\n");
+ }
+ else if(snd_pcm_hw_params_set_rate_resample(mPcmHandle, hp, 1) < 0)
+ ERR("Failed to enable ALSA resampler\n");
+ CHECK(snd_pcm_hw_params_set_rate_near(mPcmHandle, hp, &rate, nullptr));
+ /* set period time (implicitly constrains period/buffer parameters) */
+ if((err=snd_pcm_hw_params_set_period_time_near(mPcmHandle, hp, &periodLen, nullptr)) < 0)
+ ERR("snd_pcm_hw_params_set_period_time_near failed: %s\n", snd_strerror(err));
+ /* set buffer time (implicitly sets buffer size/bytes/time and period size/bytes) */
+ if((err=snd_pcm_hw_params_set_buffer_time_near(mPcmHandle, hp, &bufferLen, nullptr)) < 0)
+ ERR("snd_pcm_hw_params_set_buffer_time_near failed: %s\n", snd_strerror(err));
+ /* install and prepare hardware configuration */
+ CHECK(snd_pcm_hw_params(mPcmHandle, hp));
+
+ /* retrieve configuration info */
+ CHECK(snd_pcm_hw_params_get_access(hp, &access));
+ CHECK(snd_pcm_hw_params_get_period_size(hp, &periodSizeInFrames, nullptr));
+ CHECK(snd_pcm_hw_params_get_buffer_size(hp, &bufferSizeInFrames));
+ snd_pcm_hw_params_free(hp);
+ hp = nullptr;
+
+ snd_pcm_sw_params_malloc(&sp);
+ CHECK(snd_pcm_sw_params_current(mPcmHandle, sp));
+ CHECK(snd_pcm_sw_params_set_avail_min(mPcmHandle, sp, periodSizeInFrames));
+ CHECK(snd_pcm_sw_params_set_stop_threshold(mPcmHandle, sp, bufferSizeInFrames));
+ CHECK(snd_pcm_sw_params(mPcmHandle, sp));
+#undef CHECK
+ snd_pcm_sw_params_free(sp);
+ sp = nullptr;
+
+ mDevice->BufferSize = bufferSizeInFrames;
+ mDevice->UpdateSize = periodSizeInFrames;
+ mDevice->Frequency = rate;
+
+ SetDefaultChannelOrder(mDevice);
+
+ return ALC_TRUE;
+
+error:
+ ERR("%s failed: %s\n", funcerr, snd_strerror(err));
+ if(hp) snd_pcm_hw_params_free(hp);
+ if(sp) snd_pcm_sw_params_free(sp);
+ return ALC_FALSE;
+}
+
+ALCboolean AlsaPlayback::start()
+{
+ snd_pcm_hw_params_t *hp{};
+ snd_pcm_access_t access;
+ const char *funcerr;
+ int err;
+
+ snd_pcm_hw_params_malloc(&hp);
+#define CHECK(x) if((funcerr=#x),(err=(x)) < 0) goto error
+ CHECK(snd_pcm_hw_params_current(mPcmHandle, hp));
+ /* retrieve configuration info */
+ CHECK(snd_pcm_hw_params_get_access(hp, &access));
+#undef CHECK
+ if(0)
+ {
+ error:
+ ERR("%s failed: %s\n", funcerr, snd_strerror(err));
+ if(hp) snd_pcm_hw_params_free(hp);
+ return ALC_FALSE;
+ }
+ snd_pcm_hw_params_free(hp);
+ hp = nullptr;
+
+ int (AlsaPlayback::*thread_func)(){};
+ if(access == SND_PCM_ACCESS_RW_INTERLEAVED)
+ {
+ mBuffer.resize(snd_pcm_frames_to_bytes(mPcmHandle, mDevice->UpdateSize));
+ thread_func = &AlsaPlayback::mixerNoMMapProc;
+ }
+ else
+ {
+ err = snd_pcm_prepare(mPcmHandle);
+ if(err < 0)
+ {
+ ERR("snd_pcm_prepare(data->mPcmHandle) failed: %s\n", snd_strerror(err));
+ return ALC_FALSE;
+ }
+ thread_func = &AlsaPlayback::mixerProc;
+ }
+
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(thread_func), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create playback thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ mBuffer.clear();
+ return ALC_FALSE;
+}
+
+void AlsaPlayback::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ mBuffer.clear();
+}
+
+ClockLatency AlsaPlayback::getClockLatency()
+{
+ ClockLatency ret;
+
+ lock();
+ ret.ClockTime = GetDeviceClockTime(mDevice);
+ snd_pcm_sframes_t delay{};
+ int err{snd_pcm_delay(mPcmHandle, &delay)};
+ if(err < 0)
+ {
+ ERR("Failed to get pcm delay: %s\n", snd_strerror(err));
+ delay = 0;
+ }
+ ret.Latency = std::chrono::seconds{std::max<snd_pcm_sframes_t>(0, delay)};
+ ret.Latency /= mDevice->Frequency;
+ unlock();
+
+ return ret;
+}
+
+
+struct AlsaCapture final : public BackendBase {
+ AlsaCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~AlsaCapture() override;
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(ALCvoid *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+ ClockLatency getClockLatency() override;
+
+ snd_pcm_t *mPcmHandle{nullptr};
+
+ al::vector<char> mBuffer;
+
+ bool mDoCapture{false};
+ RingBufferPtr mRing{nullptr};
+
+ snd_pcm_sframes_t mLastAvail{0};
+
+ DEF_NEWDEL(AlsaCapture)
+};
+
+AlsaCapture::~AlsaCapture()
+{
+ if(mPcmHandle)
+ snd_pcm_close(mPcmHandle);
+ mPcmHandle = nullptr;
+}
+
+
+ALCenum AlsaCapture::open(const ALCchar *name)
+{
+ const char *driver{};
+ if(name)
+ {
+ if(CaptureDevices.empty())
+ CaptureDevices = probe_devices(SND_PCM_STREAM_CAPTURE);
+
+ auto iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
+ [name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ );
+ if(iter == CaptureDevices.cend())
+ return ALC_INVALID_VALUE;
+ driver = iter->device_name.c_str();
+ }
+ else
+ {
+ name = alsaDevice;
+ driver = GetConfigValue(nullptr, "alsa", "capture", "default");
+ }
+
+ TRACE("Opening device \"%s\"\n", driver);
+ int err{snd_pcm_open(&mPcmHandle, driver, SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK)};
+ if(err < 0)
+ {
+ ERR("Could not open capture device '%s': %s\n", driver, snd_strerror(err));
+ return ALC_INVALID_VALUE;
+ }
+
+ /* Free alsa's global config tree. Otherwise valgrind reports a ton of leaks. */
+ snd_config_update_free_global();
+
+ snd_pcm_format_t format{SND_PCM_FORMAT_UNKNOWN};
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ format = SND_PCM_FORMAT_S8;
+ break;
+ case DevFmtUByte:
+ format = SND_PCM_FORMAT_U8;
+ break;
+ case DevFmtShort:
+ format = SND_PCM_FORMAT_S16;
+ break;
+ case DevFmtUShort:
+ format = SND_PCM_FORMAT_U16;
+ break;
+ case DevFmtInt:
+ format = SND_PCM_FORMAT_S32;
+ break;
+ case DevFmtUInt:
+ format = SND_PCM_FORMAT_U32;
+ break;
+ case DevFmtFloat:
+ format = SND_PCM_FORMAT_FLOAT;
+ break;
+ }
+
+ snd_pcm_uframes_t bufferSizeInFrames{maxu(mDevice->BufferSize, 100*mDevice->Frequency/1000)};
+ snd_pcm_uframes_t periodSizeInFrames{minu(bufferSizeInFrames, 25*mDevice->Frequency/1000)};
+
+ bool needring{false};
+ const char *funcerr{};
+ snd_pcm_hw_params_t *hp{};
+ snd_pcm_hw_params_malloc(&hp);
+#define CHECK(x) if((funcerr=#x),(err=(x)) < 0) goto error
+ CHECK(snd_pcm_hw_params_any(mPcmHandle, hp));
+ /* set interleaved access */
+ CHECK(snd_pcm_hw_params_set_access(mPcmHandle, hp, SND_PCM_ACCESS_RW_INTERLEAVED));
+ /* set format (implicitly sets sample bits) */
+ CHECK(snd_pcm_hw_params_set_format(mPcmHandle, hp, format));
+ /* set channels (implicitly sets frame bits) */
+ CHECK(snd_pcm_hw_params_set_channels(mPcmHandle, hp, mDevice->channelsFromFmt()));
+ /* set rate (implicitly constrains period/buffer parameters) */
+ CHECK(snd_pcm_hw_params_set_rate(mPcmHandle, hp, mDevice->Frequency, 0));
+ /* set buffer size in frame units (implicitly sets period size/bytes/time and buffer time/bytes) */
+ if(snd_pcm_hw_params_set_buffer_size_min(mPcmHandle, hp, &bufferSizeInFrames) < 0)
+ {
+ TRACE("Buffer too large, using intermediate ring buffer\n");
+ needring = true;
+ CHECK(snd_pcm_hw_params_set_buffer_size_near(mPcmHandle, hp, &bufferSizeInFrames));
+ }
+ /* set buffer size in frame units (implicitly sets period size/bytes/time and buffer time/bytes) */
+ CHECK(snd_pcm_hw_params_set_period_size_near(mPcmHandle, hp, &periodSizeInFrames, nullptr));
+ /* install and prepare hardware configuration */
+ CHECK(snd_pcm_hw_params(mPcmHandle, hp));
+ /* retrieve configuration info */
+ CHECK(snd_pcm_hw_params_get_period_size(hp, &periodSizeInFrames, nullptr));
+#undef CHECK
+ snd_pcm_hw_params_free(hp);
+ hp = nullptr;
+
+ if(needring)
+ {
+ mRing = CreateRingBuffer(mDevice->BufferSize, mDevice->frameSizeFromFmt(), false);
+ if(!mRing)
+ {
+ ERR("ring buffer create failed\n");
+ goto error2;
+ }
+ }
+
+ mDevice->DeviceName = name;
+
+ return ALC_NO_ERROR;
+
+error:
+ ERR("%s failed: %s\n", funcerr, snd_strerror(err));
+ if(hp) snd_pcm_hw_params_free(hp);
+
+error2:
+ mRing = nullptr;
+ snd_pcm_close(mPcmHandle);
+ mPcmHandle = nullptr;
+
+ return ALC_INVALID_VALUE;
+}
+
+
+ALCboolean AlsaCapture::start()
+{
+ int err{snd_pcm_prepare(mPcmHandle)};
+ if(err < 0)
+ ERR("prepare failed: %s\n", snd_strerror(err));
+ else
+ {
+ err = snd_pcm_start(mPcmHandle);
+ if(err < 0)
+ ERR("start failed: %s\n", snd_strerror(err));
+ }
+ if(err < 0)
+ {
+ aluHandleDisconnect(mDevice, "Capture state failure: %s", snd_strerror(err));
+ return ALC_FALSE;
+ }
+
+ mDoCapture = true;
+ return ALC_TRUE;
+}
+
+void AlsaCapture::stop()
+{
+ /* OpenAL requires access to unread audio after stopping, but ALSA's
+ * snd_pcm_drain is unreliable and snd_pcm_drop drops it. Capture what's
+ * available now so it'll be available later after the drop.
+ */
+ ALCuint avail{availableSamples()};
+ if(!mRing && avail > 0)
+ {
+ /* The ring buffer implicitly captures when checking availability.
+ * Direct access needs to explicitly capture it into temp storage. */
+ al::vector<char> temp(snd_pcm_frames_to_bytes(mPcmHandle, avail));
+ captureSamples(temp.data(), avail);
+ mBuffer = std::move(temp);
+ }
+ int err{snd_pcm_drop(mPcmHandle)};
+ if(err < 0)
+ ERR("drop failed: %s\n", snd_strerror(err));
+ mDoCapture = false;
+}
+
+ALCenum AlsaCapture::captureSamples(ALCvoid *buffer, ALCuint samples)
+{
+ if(mRing)
+ {
+ mRing->read(buffer, samples);
+ return ALC_NO_ERROR;
+ }
+
+ mLastAvail -= samples;
+ while(mDevice->Connected.load(std::memory_order_acquire) && samples > 0)
+ {
+ snd_pcm_sframes_t amt{0};
+
+ if(!mBuffer.empty())
+ {
+ /* First get any data stored from the last stop */
+ amt = snd_pcm_bytes_to_frames(mPcmHandle, mBuffer.size());
+ if(static_cast<snd_pcm_uframes_t>(amt) > samples) amt = samples;
+
+ amt = snd_pcm_frames_to_bytes(mPcmHandle, amt);
+ memcpy(buffer, mBuffer.data(), amt);
+
+ mBuffer.erase(mBuffer.begin(), mBuffer.begin()+amt);
+ amt = snd_pcm_bytes_to_frames(mPcmHandle, amt);
+ }
+ else if(mDoCapture)
+ amt = snd_pcm_readi(mPcmHandle, buffer, samples);
+ if(amt < 0)
+ {
+ ERR("read error: %s\n", snd_strerror(amt));
+
+ if(amt == -EAGAIN)
+ continue;
+ if((amt=snd_pcm_recover(mPcmHandle, amt, 1)) >= 0)
+ {
+ amt = snd_pcm_start(mPcmHandle);
+ if(amt >= 0)
+ amt = snd_pcm_avail_update(mPcmHandle);
+ }
+ if(amt < 0)
+ {
+ ERR("restore error: %s\n", snd_strerror(amt));
+ aluHandleDisconnect(mDevice, "Capture recovery failure: %s", snd_strerror(amt));
+ break;
+ }
+ /* If the amount available is less than what's asked, we lost it
+ * during recovery. So just give silence instead. */
+ if(static_cast<snd_pcm_uframes_t>(amt) < samples)
+ break;
+ continue;
+ }
+
+ buffer = static_cast<ALbyte*>(buffer) + amt;
+ samples -= amt;
+ }
+ if(samples > 0)
+ memset(buffer, ((mDevice->FmtType == DevFmtUByte) ? 0x80 : 0),
+ snd_pcm_frames_to_bytes(mPcmHandle, samples));
+
+ return ALC_NO_ERROR;
+}
+
+ALCuint AlsaCapture::availableSamples()
+{
+ snd_pcm_sframes_t avail{0};
+ if(mDevice->Connected.load(std::memory_order_acquire) && mDoCapture)
+ avail = snd_pcm_avail_update(mPcmHandle);
+ if(avail < 0)
+ {
+ ERR("avail update failed: %s\n", snd_strerror(avail));
+
+ if((avail=snd_pcm_recover(mPcmHandle, avail, 1)) >= 0)
+ {
+ if(mDoCapture)
+ avail = snd_pcm_start(mPcmHandle);
+ if(avail >= 0)
+ avail = snd_pcm_avail_update(mPcmHandle);
+ }
+ if(avail < 0)
+ {
+ ERR("restore error: %s\n", snd_strerror(avail));
+ aluHandleDisconnect(mDevice, "Capture recovery failure: %s", snd_strerror(avail));
+ }
+ }
+
+ if(!mRing)
+ {
+ if(avail < 0) avail = 0;
+ avail += snd_pcm_bytes_to_frames(mPcmHandle, mBuffer.size());
+ if(avail > mLastAvail) mLastAvail = avail;
+ return mLastAvail;
+ }
+
+ while(avail > 0)
+ {
+ auto vec = mRing->getWriteVector();
+ if(vec.first.len == 0) break;
+
+ snd_pcm_sframes_t amt{std::min<snd_pcm_sframes_t>(vec.first.len, avail)};
+ amt = snd_pcm_readi(mPcmHandle, vec.first.buf, amt);
+ if(amt < 0)
+ {
+ ERR("read error: %s\n", snd_strerror(amt));
+
+ if(amt == -EAGAIN)
+ continue;
+ if((amt=snd_pcm_recover(mPcmHandle, amt, 1)) >= 0)
+ {
+ if(mDoCapture)
+ amt = snd_pcm_start(mPcmHandle);
+ if(amt >= 0)
+ amt = snd_pcm_avail_update(mPcmHandle);
+ }
+ if(amt < 0)
+ {
+ ERR("restore error: %s\n", snd_strerror(amt));
+ aluHandleDisconnect(mDevice, "Capture recovery failure: %s", snd_strerror(amt));
+ break;
+ }
+ avail = amt;
+ continue;
+ }
+
+ mRing->writeAdvance(amt);
+ avail -= amt;
+ }
+
+ return mRing->readSpace();
+}
+
+ClockLatency AlsaCapture::getClockLatency()
+{
+ ClockLatency ret;
+
+ lock();
+ ret.ClockTime = GetDeviceClockTime(mDevice);
+ snd_pcm_sframes_t delay{};
+ int err{snd_pcm_delay(mPcmHandle, &delay)};
+ if(err < 0)
+ {
+ ERR("Failed to get pcm delay: %s\n", snd_strerror(err));
+ delay = 0;
+ }
+ ret.Latency = std::chrono::seconds{std::max<snd_pcm_sframes_t>(0, delay)};
+ ret.Latency /= mDevice->Frequency;
+ unlock();
+
+ return ret;
+}
+
+} // namespace
+
+
+bool AlsaBackendFactory::init()
+{
+ bool error{false};
+
+#ifdef HAVE_DYNLOAD
+ if(!alsa_handle)
+ {
+ std::string missing_funcs;
+
+ alsa_handle = LoadLib("libasound.so.2");
+ if(!alsa_handle)
+ {
+ WARN("Failed to load %s\n", "libasound.so.2");
+ return ALC_FALSE;
+ }
+
+ error = ALC_FALSE;
+#define LOAD_FUNC(f) do { \
+ p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(alsa_handle, #f)); \
+ if(p##f == nullptr) { \
+ error = true; \
+ missing_funcs += "\n" #f; \
+ } \
+} while(0)
+ ALSA_FUNCS(LOAD_FUNC);
+#undef LOAD_FUNC
+
+ if(error)
+ {
+ WARN("Missing expected functions:%s\n", missing_funcs.c_str());
+ CloseLib(alsa_handle);
+ alsa_handle = nullptr;
+ }
+ }
+#endif
+
+ return !error;
+}
+
+bool AlsaBackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback || type == BackendType::Capture); }
+
+void AlsaBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ auto add_device = [outnames](const DevMap &entry) -> void
+ {
+ /* +1 to also append the null char (to ensure a null-separated list and
+ * double-null terminated list).
+ */
+ outnames->append(entry.name.c_str(), entry.name.length()+1);
+ };
+ switch(type)
+ {
+ case DevProbe::Playback:
+ PlaybackDevices = probe_devices(SND_PCM_STREAM_PLAYBACK);
+ std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
+ break;
+
+ case DevProbe::Capture:
+ CaptureDevices = probe_devices(SND_PCM_STREAM_CAPTURE);
+ std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
+ break;
+ }
+}
+
+BackendPtr AlsaBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new AlsaPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new AlsaCapture{device}};
+ return nullptr;
+}
+
+BackendFactory &AlsaBackendFactory::getFactory()
+{
+ static AlsaBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/alsa.h b/alc/backends/alsa.h
new file mode 100644
index 00000000..fb9de006
--- /dev/null
+++ b/alc/backends/alsa.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_ALSA_H
+#define BACKENDS_ALSA_H
+
+#include "backends/base.h"
+
+struct AlsaBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_ALSA_H */
diff --git a/alc/backends/base.cpp b/alc/backends/base.cpp
new file mode 100644
index 00000000..a7d47c6d
--- /dev/null
+++ b/alc/backends/base.cpp
@@ -0,0 +1,58 @@
+
+#include "config.h"
+
+#include <cstdlib>
+
+#include <thread>
+
+#include "alcmain.h"
+#include "alu.h"
+
+#include "backends/base.h"
+
+
+ClockLatency GetClockLatency(ALCdevice *device)
+{
+ BackendBase *backend{device->Backend.get()};
+ ClockLatency ret{backend->getClockLatency()};
+ ret.Latency += device->FixedLatency;
+ return ret;
+}
+
+
+/* BackendBase method implementations. */
+BackendBase::BackendBase(ALCdevice *device) noexcept : mDevice{device}
+{ }
+
+BackendBase::~BackendBase() = default;
+
+ALCboolean BackendBase::reset()
+{ return ALC_FALSE; }
+
+ALCenum BackendBase::captureSamples(void*, ALCuint)
+{ return ALC_INVALID_DEVICE; }
+
+ALCuint BackendBase::availableSamples()
+{ return 0; }
+
+ClockLatency BackendBase::getClockLatency()
+{
+ ClockLatency ret;
+
+ ALuint refcount;
+ do {
+ while(((refcount=mDevice->MixCount.load(std::memory_order_acquire))&1))
+ std::this_thread::yield();
+ ret.ClockTime = GetDeviceClockTime(mDevice);
+ std::atomic_thread_fence(std::memory_order_acquire);
+ } while(refcount != mDevice->MixCount.load(std::memory_order_relaxed));
+
+ /* NOTE: The device will generally have about all but one periods filled at
+ * any given time during playback. Without a more accurate measurement from
+ * the output, this is an okay approximation.
+ */
+ ret.Latency = std::chrono::seconds{maxi(mDevice->BufferSize-mDevice->UpdateSize, 0)};
+ ret.Latency /= mDevice->Frequency;
+
+ return ret;
+}
diff --git a/alc/backends/base.h b/alc/backends/base.h
new file mode 100644
index 00000000..437e31d9
--- /dev/null
+++ b/alc/backends/base.h
@@ -0,0 +1,78 @@
+#ifndef ALC_BACKENDS_BASE_H
+#define ALC_BACKENDS_BASE_H
+
+#include <memory>
+#include <chrono>
+#include <string>
+#include <mutex>
+
+#include "alcmain.h"
+
+
+struct ClockLatency {
+ std::chrono::nanoseconds ClockTime;
+ std::chrono::nanoseconds Latency;
+};
+
+/* Helper to get the current clock time from the device's ClockBase, and
+ * SamplesDone converted from the sample rate.
+ */
+inline std::chrono::nanoseconds GetDeviceClockTime(ALCdevice *device)
+{
+ using std::chrono::seconds;
+ using std::chrono::nanoseconds;
+
+ auto ns = nanoseconds{seconds{device->SamplesDone}} / device->Frequency;
+ return device->ClockBase + ns;
+}
+
+ClockLatency GetClockLatency(ALCdevice *device);
+
+struct BackendBase {
+ virtual ALCenum open(const ALCchar *name) = 0;
+
+ virtual ALCboolean reset();
+ virtual ALCboolean start() = 0;
+ virtual void stop() = 0;
+
+ virtual ALCenum captureSamples(void *buffer, ALCuint samples);
+ virtual ALCuint availableSamples();
+
+ virtual ClockLatency getClockLatency();
+
+ virtual void lock() { mMutex.lock(); }
+ virtual void unlock() { mMutex.unlock(); }
+
+ ALCdevice *mDevice;
+
+ std::recursive_mutex mMutex;
+
+ BackendBase(ALCdevice *device) noexcept;
+ virtual ~BackendBase();
+};
+using BackendPtr = std::unique_ptr<BackendBase>;
+using BackendUniqueLock = std::unique_lock<BackendBase>;
+using BackendLockGuard = std::lock_guard<BackendBase>;
+
+enum class BackendType {
+ Playback,
+ Capture
+};
+
+enum class DevProbe {
+ Playback,
+ Capture
+};
+
+
+struct BackendFactory {
+ virtual bool init() = 0;
+
+ virtual bool querySupport(BackendType type) = 0;
+
+ virtual void probe(DevProbe type, std::string *outnames) = 0;
+
+ virtual BackendPtr createBackend(ALCdevice *device, BackendType type) = 0;
+};
+
+#endif /* ALC_BACKENDS_BASE_H */
diff --git a/alc/backends/coreaudio.cpp b/alc/backends/coreaudio.cpp
new file mode 100644
index 00000000..b4b46382
--- /dev/null
+++ b/alc/backends/coreaudio.cpp
@@ -0,0 +1,709 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/coreaudio.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "ringbuffer.h"
+#include "converter.h"
+#include "backends/base.h"
+
+#include <unistd.h>
+#include <AudioUnit/AudioUnit.h>
+#include <AudioToolbox/AudioToolbox.h>
+
+
+namespace {
+
+static const ALCchar ca_device[] = "CoreAudio Default";
+
+
+struct CoreAudioPlayback final : public BackendBase {
+ CoreAudioPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~CoreAudioPlayback() override;
+
+ static OSStatus MixerProcC(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
+ AudioBufferList *ioData);
+ OSStatus MixerProc(AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
+ AudioBufferList *ioData);
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ AudioUnit mAudioUnit;
+
+ ALuint mFrameSize{0u};
+ AudioStreamBasicDescription mFormat{}; // This is the OpenAL format as a CoreAudio ASBD
+
+ DEF_NEWDEL(CoreAudioPlayback)
+};
+
+CoreAudioPlayback::~CoreAudioPlayback()
+{
+ AudioUnitUninitialize(mAudioUnit);
+ AudioComponentInstanceDispose(mAudioUnit);
+}
+
+
+OSStatus CoreAudioPlayback::MixerProcC(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData)
+{
+ return static_cast<CoreAudioPlayback*>(inRefCon)->MixerProc(ioActionFlags, inTimeStamp,
+ inBusNumber, inNumberFrames, ioData);
+}
+
+OSStatus CoreAudioPlayback::MixerProc(AudioUnitRenderActionFlags*,
+ const AudioTimeStamp*, UInt32, UInt32, AudioBufferList *ioData)
+{
+ lock();
+ aluMixData(mDevice, ioData->mBuffers[0].mData, ioData->mBuffers[0].mDataByteSize/mFrameSize);
+ unlock();
+ return noErr;
+}
+
+
+ALCenum CoreAudioPlayback::open(const ALCchar *name)
+{
+ if(!name)
+ name = ca_device;
+ else if(strcmp(name, ca_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ /* open the default output unit */
+ AudioComponentDescription desc{};
+ desc.componentType = kAudioUnitType_Output;
+#if TARGET_OS_IOS
+ desc.componentSubType = kAudioUnitSubType_RemoteIO;
+#else
+ desc.componentSubType = kAudioUnitSubType_DefaultOutput;
+#endif
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
+
+ AudioComponent comp{AudioComponentFindNext(NULL, &desc)};
+ if(comp == nullptr)
+ {
+ ERR("AudioComponentFindNext failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ OSStatus err{AudioComponentInstanceNew(comp, &mAudioUnit)};
+ if(err != noErr)
+ {
+ ERR("AudioComponentInstanceNew failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ /* init and start the default audio unit... */
+ err = AudioUnitInitialize(mAudioUnit);
+ if(err != noErr)
+ {
+ ERR("AudioUnitInitialize failed\n");
+ AudioComponentInstanceDispose(mAudioUnit);
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean CoreAudioPlayback::reset()
+{
+ OSStatus err{AudioUnitUninitialize(mAudioUnit)};
+ if(err != noErr)
+ ERR("-- AudioUnitUninitialize failed.\n");
+
+ /* retrieve default output unit's properties (output side) */
+ AudioStreamBasicDescription streamFormat{};
+ auto size = static_cast<UInt32>(sizeof(AudioStreamBasicDescription));
+ err = AudioUnitGetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
+ 0, &streamFormat, &size);
+ if(err != noErr || size != sizeof(AudioStreamBasicDescription))
+ {
+ ERR("AudioUnitGetProperty failed\n");
+ return ALC_FALSE;
+ }
+
+#if 0
+ TRACE("Output streamFormat of default output unit -\n");
+ TRACE(" streamFormat.mFramesPerPacket = %d\n", streamFormat.mFramesPerPacket);
+ TRACE(" streamFormat.mChannelsPerFrame = %d\n", streamFormat.mChannelsPerFrame);
+ TRACE(" streamFormat.mBitsPerChannel = %d\n", streamFormat.mBitsPerChannel);
+ TRACE(" streamFormat.mBytesPerPacket = %d\n", streamFormat.mBytesPerPacket);
+ TRACE(" streamFormat.mBytesPerFrame = %d\n", streamFormat.mBytesPerFrame);
+ TRACE(" streamFormat.mSampleRate = %5.0f\n", streamFormat.mSampleRate);
+#endif
+
+ /* set default output unit's input side to match output side */
+ err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
+ 0, &streamFormat, size);
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed\n");
+ return ALC_FALSE;
+ }
+
+ if(mDevice->Frequency != streamFormat.mSampleRate)
+ {
+ mDevice->BufferSize = static_cast<ALuint>(uint64_t{mDevice->BufferSize} *
+ streamFormat.mSampleRate / mDevice->Frequency);
+ mDevice->Frequency = streamFormat.mSampleRate;
+ }
+
+ /* FIXME: How to tell what channels are what in the output device, and how
+ * to specify what we're giving? eg, 6.0 vs 5.1 */
+ switch(streamFormat.mChannelsPerFrame)
+ {
+ case 1:
+ mDevice->FmtChans = DevFmtMono;
+ break;
+ case 2:
+ mDevice->FmtChans = DevFmtStereo;
+ break;
+ case 4:
+ mDevice->FmtChans = DevFmtQuad;
+ break;
+ case 6:
+ mDevice->FmtChans = DevFmtX51;
+ break;
+ case 7:
+ mDevice->FmtChans = DevFmtX61;
+ break;
+ case 8:
+ mDevice->FmtChans = DevFmtX71;
+ break;
+ default:
+ ERR("Unhandled channel count (%d), using Stereo\n", streamFormat.mChannelsPerFrame);
+ mDevice->FmtChans = DevFmtStereo;
+ streamFormat.mChannelsPerFrame = 2;
+ break;
+ }
+ SetDefaultWFXChannelOrder(mDevice);
+
+ /* use channel count and sample rate from the default output unit's current
+ * parameters, but reset everything else */
+ streamFormat.mFramesPerPacket = 1;
+ streamFormat.mFormatFlags = 0;
+ switch(mDevice->FmtType)
+ {
+ case DevFmtUByte:
+ mDevice->FmtType = DevFmtByte;
+ /* fall-through */
+ case DevFmtByte:
+ streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
+ streamFormat.mBitsPerChannel = 8;
+ break;
+ case DevFmtUShort:
+ mDevice->FmtType = DevFmtShort;
+ /* fall-through */
+ case DevFmtShort:
+ streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
+ streamFormat.mBitsPerChannel = 16;
+ break;
+ case DevFmtUInt:
+ mDevice->FmtType = DevFmtInt;
+ /* fall-through */
+ case DevFmtInt:
+ streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
+ streamFormat.mBitsPerChannel = 32;
+ break;
+ case DevFmtFloat:
+ streamFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
+ streamFormat.mBitsPerChannel = 32;
+ break;
+ }
+ streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame *
+ streamFormat.mBitsPerChannel / 8;
+ streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame;
+ streamFormat.mFormatID = kAudioFormatLinearPCM;
+ streamFormat.mFormatFlags |= kAudioFormatFlagsNativeEndian |
+ kLinearPCMFormatFlagIsPacked;
+
+ err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
+ 0, &streamFormat, sizeof(AudioStreamBasicDescription));
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed\n");
+ return ALC_FALSE;
+ }
+
+ /* setup callback */
+ mFrameSize = mDevice->frameSizeFromFmt();
+ AURenderCallbackStruct input{};
+ input.inputProc = CoreAudioPlayback::MixerProcC;
+ input.inputProcRefCon = this;
+
+ err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Input, 0, &input, sizeof(AURenderCallbackStruct));
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed\n");
+ return ALC_FALSE;
+ }
+
+ /* init the default audio unit... */
+ err = AudioUnitInitialize(mAudioUnit);
+ if(err != noErr)
+ {
+ ERR("AudioUnitInitialize failed\n");
+ return ALC_FALSE;
+ }
+
+ return ALC_TRUE;
+}
+
+ALCboolean CoreAudioPlayback::start()
+{
+ OSStatus err{AudioOutputUnitStart(mAudioUnit)};
+ if(err != noErr)
+ {
+ ERR("AudioOutputUnitStart failed\n");
+ return ALC_FALSE;
+ }
+ return ALC_TRUE;
+}
+
+void CoreAudioPlayback::stop()
+{
+ OSStatus err{AudioOutputUnitStop(mAudioUnit)};
+ if(err != noErr)
+ ERR("AudioOutputUnitStop failed\n");
+}
+
+
+struct CoreAudioCapture final : public BackendBase {
+ CoreAudioCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~CoreAudioCapture() override;
+
+ static OSStatus RecordProcC(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
+ AudioBufferList *ioData);
+ OSStatus RecordProc(AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber,
+ UInt32 inNumberFrames, AudioBufferList *ioData);
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(void *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ AudioUnit mAudioUnit{0};
+
+ ALuint mFrameSize{0u};
+ AudioStreamBasicDescription mFormat{}; // This is the OpenAL format as a CoreAudio ASBD
+
+ SampleConverterPtr mConverter;
+
+ RingBufferPtr mRing{nullptr};
+
+ DEF_NEWDEL(CoreAudioCapture)
+};
+
+CoreAudioCapture::~CoreAudioCapture()
+{
+ if(mAudioUnit)
+ AudioComponentInstanceDispose(mAudioUnit);
+ mAudioUnit = 0;
+}
+
+
+OSStatus CoreAudioCapture::RecordProcC(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData)
+{
+ return static_cast<CoreAudioCapture*>(inRefCon)->RecordProc(ioActionFlags, inTimeStamp,
+ inBusNumber, inNumberFrames, ioData);
+}
+
+OSStatus CoreAudioCapture::RecordProc(AudioUnitRenderActionFlags*,
+ const AudioTimeStamp *inTimeStamp, UInt32, UInt32 inNumberFrames,
+ AudioBufferList*)
+{
+ AudioUnitRenderActionFlags flags = 0;
+ union {
+ ALbyte _[sizeof(AudioBufferList) + sizeof(AudioBuffer)*2];
+ AudioBufferList list;
+ } audiobuf = { { 0 } };
+
+ auto rec_vec = mRing->getWriteVector();
+ inNumberFrames = minz(inNumberFrames, rec_vec.first.len+rec_vec.second.len);
+
+ // Fill the ringbuffer's two segments with data from the input device
+ if(rec_vec.first.len >= inNumberFrames)
+ {
+ audiobuf.list.mNumberBuffers = 1;
+ audiobuf.list.mBuffers[0].mNumberChannels = mFormat.mChannelsPerFrame;
+ audiobuf.list.mBuffers[0].mData = rec_vec.first.buf;
+ audiobuf.list.mBuffers[0].mDataByteSize = inNumberFrames * mFormat.mBytesPerFrame;
+ }
+ else
+ {
+ const size_t remaining{inNumberFrames-rec_vec.first.len};
+ audiobuf.list.mNumberBuffers = 2;
+ audiobuf.list.mBuffers[0].mNumberChannels = mFormat.mChannelsPerFrame;
+ audiobuf.list.mBuffers[0].mData = rec_vec.first.buf;
+ audiobuf.list.mBuffers[0].mDataByteSize = rec_vec.first.len * mFormat.mBytesPerFrame;
+ audiobuf.list.mBuffers[1].mNumberChannels = mFormat.mChannelsPerFrame;
+ audiobuf.list.mBuffers[1].mData = rec_vec.second.buf;
+ audiobuf.list.mBuffers[1].mDataByteSize = remaining * mFormat.mBytesPerFrame;
+ }
+ OSStatus err{AudioUnitRender(mAudioUnit, &flags, inTimeStamp, audiobuf.list.mNumberBuffers,
+ inNumberFrames, &audiobuf.list)};
+ if(err != noErr)
+ {
+ ERR("AudioUnitRender error: %d\n", err);
+ return err;
+ }
+
+ mRing->writeAdvance(inNumberFrames);
+ return noErr;
+}
+
+
+ALCenum CoreAudioCapture::open(const ALCchar *name)
+{
+ AudioStreamBasicDescription requestedFormat; // The application requested format
+ AudioStreamBasicDescription hardwareFormat; // The hardware format
+ AudioStreamBasicDescription outputFormat; // The AudioUnit output format
+ AURenderCallbackStruct input;
+ AudioComponentDescription desc;
+ UInt32 outputFrameCount;
+ UInt32 propertySize;
+ AudioObjectPropertyAddress propertyAddress;
+ UInt32 enableIO;
+ AudioComponent comp;
+ OSStatus err;
+
+ if(!name)
+ name = ca_device;
+ else if(strcmp(name, ca_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ desc.componentType = kAudioUnitType_Output;
+#if TARGET_OS_IOS
+ desc.componentSubType = kAudioUnitSubType_RemoteIO;
+#else
+ desc.componentSubType = kAudioUnitSubType_HALOutput;
+#endif
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
+
+ // Search for component with given description
+ comp = AudioComponentFindNext(NULL, &desc);
+ if(comp == NULL)
+ {
+ ERR("AudioComponentFindNext failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Open the component
+ err = AudioComponentInstanceNew(comp, &mAudioUnit);
+ if(err != noErr)
+ {
+ ERR("AudioComponentInstanceNew failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Turn off AudioUnit output
+ enableIO = 0;
+ err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint));
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Turn on AudioUnit input
+ enableIO = 1;
+ err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint));
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+#if !TARGET_OS_IOS
+ {
+ // Get the default input device
+ AudioDeviceID inputDevice = kAudioDeviceUnknown;
+
+ propertySize = sizeof(AudioDeviceID);
+ propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propertyAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propertyAddress, 0, NULL, &propertySize, &inputDevice);
+ if(err != noErr)
+ {
+ ERR("AudioObjectGetPropertyData failed\n");
+ return ALC_INVALID_VALUE;
+ }
+ if(inputDevice == kAudioDeviceUnknown)
+ {
+ ERR("No input device found\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Track the input device
+ err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_CurrentDevice,
+ kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID));
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed\n");
+ return ALC_INVALID_VALUE;
+ }
+ }
+#endif
+
+ // set capture callback
+ input.inputProc = CoreAudioCapture::RecordProcC;
+ input.inputProcRefCon = this;
+
+ err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct));
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Initialize the device
+ err = AudioUnitInitialize(mAudioUnit);
+ if(err != noErr)
+ {
+ ERR("AudioUnitInitialize failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Get the hardware format
+ propertySize = sizeof(AudioStreamBasicDescription);
+ err = AudioUnitGetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
+ 1, &hardwareFormat, &propertySize);
+ if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription))
+ {
+ ERR("AudioUnitGetProperty failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Set up the requested format description
+ switch(mDevice->FmtType)
+ {
+ case DevFmtUByte:
+ requestedFormat.mBitsPerChannel = 8;
+ requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
+ break;
+ case DevFmtShort:
+ requestedFormat.mBitsPerChannel = 16;
+ requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
+ break;
+ case DevFmtInt:
+ requestedFormat.mBitsPerChannel = 32;
+ requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
+ break;
+ case DevFmtFloat:
+ requestedFormat.mBitsPerChannel = 32;
+ requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
+ break;
+ case DevFmtByte:
+ case DevFmtUShort:
+ case DevFmtUInt:
+ ERR("%s samples not supported\n", DevFmtTypeString(mDevice->FmtType));
+ return ALC_INVALID_VALUE;
+ }
+
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono:
+ requestedFormat.mChannelsPerFrame = 1;
+ break;
+ case DevFmtStereo:
+ requestedFormat.mChannelsPerFrame = 2;
+ break;
+
+ case DevFmtQuad:
+ case DevFmtX51:
+ case DevFmtX51Rear:
+ case DevFmtX61:
+ case DevFmtX71:
+ case DevFmtAmbi3D:
+ ERR("%s not supported\n", DevFmtChannelsString(mDevice->FmtChans));
+ return ALC_INVALID_VALUE;
+ }
+
+ requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8;
+ requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame;
+ requestedFormat.mSampleRate = mDevice->Frequency;
+ requestedFormat.mFormatID = kAudioFormatLinearPCM;
+ requestedFormat.mReserved = 0;
+ requestedFormat.mFramesPerPacket = 1;
+
+ // save requested format description for later use
+ mFormat = requestedFormat;
+ mFrameSize = mDevice->frameSizeFromFmt();
+
+ // Use intermediate format for sample rate conversion (outputFormat)
+ // Set sample rate to the same as hardware for resampling later
+ outputFormat = requestedFormat;
+ outputFormat.mSampleRate = hardwareFormat.mSampleRate;
+
+ // The output format should be the requested format, but using the hardware sample rate
+ // This is because the AudioUnit will automatically scale other properties, except for sample rate
+ err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
+ 1, (void*)&outputFormat, sizeof(outputFormat));
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ // Set the AudioUnit output format frame count
+ uint64_t FrameCount64{mDevice->UpdateSize};
+ FrameCount64 = (FrameCount64*outputFormat.mSampleRate + mDevice->Frequency-1) /
+ mDevice->Frequency;
+ FrameCount64 += MAX_RESAMPLE_PADDING*2;
+ if(FrameCount64 > std::numeric_limits<uint32_t>::max()/2)
+ {
+ ERR("FrameCount too large\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ outputFrameCount = static_cast<uint32_t>(FrameCount64);
+ err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_MaximumFramesPerSlice,
+ kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount));
+ if(err != noErr)
+ {
+ ERR("AudioUnitSetProperty failed: %d\n", err);
+ return ALC_INVALID_VALUE;
+ }
+
+ // Set up sample converter if needed
+ if(outputFormat.mSampleRate != mDevice->Frequency)
+ mConverter = CreateSampleConverter(mDevice->FmtType, mDevice->FmtType,
+ mFormat.mChannelsPerFrame, hardwareFormat.mSampleRate, mDevice->Frequency,
+ BSinc24Resampler);
+
+ mRing = CreateRingBuffer(outputFrameCount, mFrameSize, false);
+ if(!mRing) return ALC_INVALID_VALUE;
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+
+ALCboolean CoreAudioCapture::start()
+{
+ OSStatus err{AudioOutputUnitStart(mAudioUnit)};
+ if(err != noErr)
+ {
+ ERR("AudioOutputUnitStart failed\n");
+ return ALC_FALSE;
+ }
+ return ALC_TRUE;
+}
+
+void CoreAudioCapture::stop()
+{
+ OSStatus err{AudioOutputUnitStop(mAudioUnit)};
+ if(err != noErr)
+ ERR("AudioOutputUnitStop failed\n");
+}
+
+ALCenum CoreAudioCapture::captureSamples(void *buffer, ALCuint samples)
+{
+ if(!mConverter)
+ {
+ mRing->read(buffer, samples);
+ return ALC_NO_ERROR;
+ }
+
+ auto rec_vec = mRing->getReadVector();
+ const void *src0{rec_vec.first.buf};
+ auto src0len = static_cast<ALsizei>(rec_vec.first.len);
+ auto got = static_cast<ALuint>(mConverter->convert(&src0, &src0len, buffer, samples));
+ size_t total_read{rec_vec.first.len - src0len};
+ if(got < samples && !src0len && rec_vec.second.len > 0)
+ {
+ const void *src1{rec_vec.second.buf};
+ auto src1len = static_cast<ALsizei>(rec_vec.second.len);
+ got += static_cast<ALuint>(mConverter->convert(&src1, &src1len,
+ static_cast<char*>(buffer)+got, samples-got));
+ total_read += rec_vec.second.len - src1len;
+ }
+
+ mRing->readAdvance(total_read);
+ return ALC_NO_ERROR;
+}
+
+ALCuint CoreAudioCapture::availableSamples()
+{
+ if(!mConverter) return mRing->readSpace();
+ return mConverter->availableOut(mRing->readSpace());
+}
+
+} // namespace
+
+BackendFactory &CoreAudioBackendFactory::getFactory()
+{
+ static CoreAudioBackendFactory factory{};
+ return factory;
+}
+
+bool CoreAudioBackendFactory::init() { return true; }
+
+bool CoreAudioBackendFactory::querySupport(BackendType type)
+{ return type == BackendType::Playback || type == BackendType::Capture; }
+
+void CoreAudioBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ switch(type)
+ {
+ case DevProbe::Playback:
+ case DevProbe::Capture:
+ /* Includes null char. */
+ outnames->append(ca_device, sizeof(ca_device));
+ break;
+ }
+}
+
+BackendPtr CoreAudioBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new CoreAudioPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new CoreAudioCapture{device}};
+ return nullptr;
+}
diff --git a/alc/backends/coreaudio.h b/alc/backends/coreaudio.h
new file mode 100644
index 00000000..37b9ebe5
--- /dev/null
+++ b/alc/backends/coreaudio.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_COREAUDIO_H
+#define BACKENDS_COREAUDIO_H
+
+#include "backends/base.h"
+
+struct CoreAudioBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_COREAUDIO_H */
diff --git a/alc/backends/dsound.cpp b/alc/backends/dsound.cpp
new file mode 100644
index 00000000..5a156d54
--- /dev/null
+++ b/alc/backends/dsound.cpp
@@ -0,0 +1,938 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/dsound.h"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <memory.h>
+
+#include <cguid.h>
+#include <mmreg.h>
+#ifndef _WAVEFORMATEXTENSIBLE_
+#include <ks.h>
+#include <ksmedia.h>
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <thread>
+#include <string>
+#include <vector>
+#include <algorithm>
+#include <functional>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "ringbuffer.h"
+#include "compat.h"
+#include "threads.h"
+
+/* MinGW-w64 needs this for some unknown reason now. */
+using LPCWAVEFORMATEX = const WAVEFORMATEX*;
+#include <dsound.h>
+
+
+#ifndef DSSPEAKER_5POINT1
+# define DSSPEAKER_5POINT1 0x00000006
+#endif
+#ifndef DSSPEAKER_5POINT1_BACK
+# define DSSPEAKER_5POINT1_BACK 0x00000006
+#endif
+#ifndef DSSPEAKER_7POINT1
+# define DSSPEAKER_7POINT1 0x00000007
+#endif
+#ifndef DSSPEAKER_7POINT1_SURROUND
+# define DSSPEAKER_7POINT1_SURROUND 0x00000008
+#endif
+#ifndef DSSPEAKER_5POINT1_SURROUND
+# define DSSPEAKER_5POINT1_SURROUND 0x00000009
+#endif
+
+
+/* Some headers seem to define these as macros for __uuidof, which is annoying
+ * since some headers don't declare them at all. Hopefully the ifdef is enough
+ * to tell if they need to be declared.
+ */
+#ifndef KSDATAFORMAT_SUBTYPE_PCM
+DEFINE_GUID(KSDATAFORMAT_SUBTYPE_PCM, 0x00000001, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71);
+#endif
+#ifndef KSDATAFORMAT_SUBTYPE_IEEE_FLOAT
+DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, 0x00000003, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71);
+#endif
+
+namespace {
+
+#define DEVNAME_HEAD "OpenAL Soft on "
+
+
+#ifdef HAVE_DYNLOAD
+void *ds_handle;
+HRESULT (WINAPI *pDirectSoundCreate)(const GUID *pcGuidDevice, IDirectSound **ppDS, IUnknown *pUnkOuter);
+HRESULT (WINAPI *pDirectSoundEnumerateW)(LPDSENUMCALLBACKW pDSEnumCallback, void *pContext);
+HRESULT (WINAPI *pDirectSoundCaptureCreate)(const GUID *pcGuidDevice, IDirectSoundCapture **ppDSC, IUnknown *pUnkOuter);
+HRESULT (WINAPI *pDirectSoundCaptureEnumerateW)(LPDSENUMCALLBACKW pDSEnumCallback, void *pContext);
+
+#ifndef IN_IDE_PARSER
+#define DirectSoundCreate pDirectSoundCreate
+#define DirectSoundEnumerateW pDirectSoundEnumerateW
+#define DirectSoundCaptureCreate pDirectSoundCaptureCreate
+#define DirectSoundCaptureEnumerateW pDirectSoundCaptureEnumerateW
+#endif
+#endif
+
+
+#define MAX_UPDATES 128
+
+struct DevMap {
+ std::string name;
+ GUID guid;
+
+ template<typename T0, typename T1>
+ DevMap(T0&& name_, T1&& guid_)
+ : name{std::forward<T0>(name_)}, guid{std::forward<T1>(guid_)}
+ { }
+};
+
+al::vector<DevMap> PlaybackDevices;
+al::vector<DevMap> CaptureDevices;
+
+bool checkName(const al::vector<DevMap> &list, const std::string &name)
+{
+ return std::find_if(list.cbegin(), list.cend(),
+ [&name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ ) != list.cend();
+}
+
+BOOL CALLBACK DSoundEnumDevices(GUID *guid, const WCHAR *desc, const WCHAR*, void *data)
+{
+ if(!guid)
+ return TRUE;
+
+ auto& devices = *static_cast<al::vector<DevMap>*>(data);
+ const std::string basename{DEVNAME_HEAD + wstr_to_utf8(desc)};
+
+ int count{1};
+ std::string newname{basename};
+ while(checkName(devices, newname))
+ {
+ newname = basename;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+ devices.emplace_back(std::move(newname), *guid);
+ const DevMap &newentry = devices.back();
+
+ OLECHAR *guidstr{nullptr};
+ HRESULT hr{StringFromCLSID(*guid, &guidstr)};
+ if(SUCCEEDED(hr))
+ {
+ TRACE("Got device \"%s\", GUID \"%ls\"\n", newentry.name.c_str(), guidstr);
+ CoTaskMemFree(guidstr);
+ }
+
+ return TRUE;
+}
+
+
+struct DSoundPlayback final : public BackendBase {
+ DSoundPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~DSoundPlayback() override;
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ IDirectSound *mDS{nullptr};
+ IDirectSoundBuffer *mPrimaryBuffer{nullptr};
+ IDirectSoundBuffer *mBuffer{nullptr};
+ IDirectSoundNotify *mNotifies{nullptr};
+ HANDLE mNotifyEvent{nullptr};
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(DSoundPlayback)
+};
+
+DSoundPlayback::~DSoundPlayback()
+{
+ if(mNotifies)
+ mNotifies->Release();
+ mNotifies = nullptr;
+ if(mBuffer)
+ mBuffer->Release();
+ mBuffer = nullptr;
+ if(mPrimaryBuffer)
+ mPrimaryBuffer->Release();
+ mPrimaryBuffer = nullptr;
+
+ if(mDS)
+ mDS->Release();
+ mDS = nullptr;
+ if(mNotifyEvent)
+ CloseHandle(mNotifyEvent);
+ mNotifyEvent = nullptr;
+}
+
+
+FORCE_ALIGN int DSoundPlayback::mixerProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ DSBCAPS DSBCaps{};
+ DSBCaps.dwSize = sizeof(DSBCaps);
+ HRESULT err{mBuffer->GetCaps(&DSBCaps)};
+ if(FAILED(err))
+ {
+ ERR("Failed to get buffer caps: 0x%lx\n", err);
+ aluHandleDisconnect(mDevice, "Failure retrieving playback buffer info: 0x%lx", err);
+ return 1;
+ }
+
+ ALsizei FrameSize{mDevice->frameSizeFromFmt()};
+ DWORD FragSize{mDevice->UpdateSize * FrameSize};
+
+ bool Playing{false};
+ DWORD LastCursor{0u};
+ mBuffer->GetCurrentPosition(&LastCursor, nullptr);
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ // Get current play cursor
+ DWORD PlayCursor;
+ mBuffer->GetCurrentPosition(&PlayCursor, nullptr);
+ DWORD avail = (PlayCursor-LastCursor+DSBCaps.dwBufferBytes) % DSBCaps.dwBufferBytes;
+
+ if(avail < FragSize)
+ {
+ if(!Playing)
+ {
+ err = mBuffer->Play(0, 0, DSBPLAY_LOOPING);
+ if(FAILED(err))
+ {
+ ERR("Failed to play buffer: 0x%lx\n", err);
+ aluHandleDisconnect(mDevice, "Failure starting playback: 0x%lx", err);
+ return 1;
+ }
+ Playing = true;
+ }
+
+ avail = WaitForSingleObjectEx(mNotifyEvent, 2000, FALSE);
+ if(avail != WAIT_OBJECT_0)
+ ERR("WaitForSingleObjectEx error: 0x%lx\n", avail);
+ continue;
+ }
+ avail -= avail%FragSize;
+
+ // Lock output buffer
+ void *WritePtr1, *WritePtr2;
+ DWORD WriteCnt1{0u}, WriteCnt2{0u};
+ err = mBuffer->Lock(LastCursor, avail, &WritePtr1, &WriteCnt1, &WritePtr2, &WriteCnt2, 0);
+
+ // If the buffer is lost, restore it and lock
+ if(err == DSERR_BUFFERLOST)
+ {
+ WARN("Buffer lost, restoring...\n");
+ err = mBuffer->Restore();
+ if(SUCCEEDED(err))
+ {
+ Playing = false;
+ LastCursor = 0;
+ err = mBuffer->Lock(0, DSBCaps.dwBufferBytes, &WritePtr1, &WriteCnt1,
+ &WritePtr2, &WriteCnt2, 0);
+ }
+ }
+
+ if(SUCCEEDED(err))
+ {
+ lock();
+ aluMixData(mDevice, WritePtr1, WriteCnt1/FrameSize);
+ if(WriteCnt2 > 0)
+ aluMixData(mDevice, WritePtr2, WriteCnt2/FrameSize);
+ unlock();
+
+ mBuffer->Unlock(WritePtr1, WriteCnt1, WritePtr2, WriteCnt2);
+ }
+ else
+ {
+ ERR("Buffer lock error: %#lx\n", err);
+ aluHandleDisconnect(mDevice, "Failed to lock output buffer: 0x%lx", err);
+ return 1;
+ }
+
+ // Update old write cursor location
+ LastCursor += WriteCnt1+WriteCnt2;
+ LastCursor %= DSBCaps.dwBufferBytes;
+ }
+
+ return 0;
+}
+
+ALCenum DSoundPlayback::open(const ALCchar *name)
+{
+ HRESULT hr;
+ if(PlaybackDevices.empty())
+ {
+ /* Initialize COM to prevent name truncation */
+ HRESULT hrcom{CoInitialize(nullptr)};
+ hr = DirectSoundEnumerateW(DSoundEnumDevices, &PlaybackDevices);
+ if(FAILED(hr))
+ ERR("Error enumerating DirectSound devices (0x%lx)!\n", hr);
+ if(SUCCEEDED(hrcom))
+ CoUninitialize();
+ }
+
+ const GUID *guid{nullptr};
+ if(!name && !PlaybackDevices.empty())
+ {
+ name = PlaybackDevices[0].name.c_str();
+ guid = &PlaybackDevices[0].guid;
+ }
+ else
+ {
+ auto iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
+ [name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ );
+ if(iter == PlaybackDevices.cend())
+ return ALC_INVALID_VALUE;
+ guid = &iter->guid;
+ }
+
+ hr = DS_OK;
+ mNotifyEvent = CreateEventW(nullptr, FALSE, FALSE, nullptr);
+ if(!mNotifyEvent) hr = E_FAIL;
+
+ //DirectSound Init code
+ if(SUCCEEDED(hr))
+ hr = DirectSoundCreate(guid, &mDS, nullptr);
+ if(SUCCEEDED(hr))
+ hr = mDS->SetCooperativeLevel(GetForegroundWindow(), DSSCL_PRIORITY);
+ if(FAILED(hr))
+ {
+ ERR("Device init failed: 0x%08lx\n", hr);
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean DSoundPlayback::reset()
+{
+ if(mNotifies)
+ mNotifies->Release();
+ mNotifies = nullptr;
+ if(mBuffer)
+ mBuffer->Release();
+ mBuffer = nullptr;
+ if(mPrimaryBuffer)
+ mPrimaryBuffer->Release();
+ mPrimaryBuffer = nullptr;
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ mDevice->FmtType = DevFmtUByte;
+ break;
+ case DevFmtFloat:
+ if(mDevice->Flags.get<SampleTypeRequest>())
+ break;
+ /* fall-through */
+ case DevFmtUShort:
+ mDevice->FmtType = DevFmtShort;
+ break;
+ case DevFmtUInt:
+ mDevice->FmtType = DevFmtInt;
+ break;
+ case DevFmtUByte:
+ case DevFmtShort:
+ case DevFmtInt:
+ break;
+ }
+
+ WAVEFORMATEXTENSIBLE OutputType{};
+ DWORD speakers;
+ HRESULT hr{mDS->GetSpeakerConfig(&speakers)};
+ if(SUCCEEDED(hr))
+ {
+ speakers = DSSPEAKER_CONFIG(speakers);
+ if(!mDevice->Flags.get<ChannelsRequest>())
+ {
+ if(speakers == DSSPEAKER_MONO)
+ mDevice->FmtChans = DevFmtMono;
+ else if(speakers == DSSPEAKER_STEREO || speakers == DSSPEAKER_HEADPHONE)
+ mDevice->FmtChans = DevFmtStereo;
+ else if(speakers == DSSPEAKER_QUAD)
+ mDevice->FmtChans = DevFmtQuad;
+ else if(speakers == DSSPEAKER_5POINT1_SURROUND)
+ mDevice->FmtChans = DevFmtX51;
+ else if(speakers == DSSPEAKER_5POINT1_BACK)
+ mDevice->FmtChans = DevFmtX51Rear;
+ else if(speakers == DSSPEAKER_7POINT1 || speakers == DSSPEAKER_7POINT1_SURROUND)
+ mDevice->FmtChans = DevFmtX71;
+ else
+ ERR("Unknown system speaker config: 0x%lx\n", speakers);
+ }
+ mDevice->IsHeadphones = (mDevice->FmtChans == DevFmtStereo &&
+ speakers == DSSPEAKER_HEADPHONE);
+
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono:
+ OutputType.dwChannelMask = SPEAKER_FRONT_CENTER;
+ break;
+ case DevFmtAmbi3D:
+ mDevice->FmtChans = DevFmtStereo;
+ /*fall-through*/
+ case DevFmtStereo:
+ OutputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT;
+ break;
+ case DevFmtQuad:
+ OutputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_BACK_LEFT |
+ SPEAKER_BACK_RIGHT;
+ break;
+ case DevFmtX51:
+ OutputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER |
+ SPEAKER_LOW_FREQUENCY |
+ SPEAKER_SIDE_LEFT |
+ SPEAKER_SIDE_RIGHT;
+ break;
+ case DevFmtX51Rear:
+ OutputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER |
+ SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_LEFT |
+ SPEAKER_BACK_RIGHT;
+ break;
+ case DevFmtX61:
+ OutputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER |
+ SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_CENTER |
+ SPEAKER_SIDE_LEFT |
+ SPEAKER_SIDE_RIGHT;
+ break;
+ case DevFmtX71:
+ OutputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER |
+ SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_LEFT |
+ SPEAKER_BACK_RIGHT |
+ SPEAKER_SIDE_LEFT |
+ SPEAKER_SIDE_RIGHT;
+ break;
+ }
+
+retry_open:
+ hr = S_OK;
+ OutputType.Format.wFormatTag = WAVE_FORMAT_PCM;
+ OutputType.Format.nChannels = mDevice->channelsFromFmt();
+ OutputType.Format.wBitsPerSample = mDevice->bytesFromFmt() * 8;
+ OutputType.Format.nBlockAlign = OutputType.Format.nChannels*OutputType.Format.wBitsPerSample/8;
+ OutputType.Format.nSamplesPerSec = mDevice->Frequency;
+ OutputType.Format.nAvgBytesPerSec = OutputType.Format.nSamplesPerSec*OutputType.Format.nBlockAlign;
+ OutputType.Format.cbSize = 0;
+ }
+
+ if(OutputType.Format.nChannels > 2 || mDevice->FmtType == DevFmtFloat)
+ {
+ OutputType.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ OutputType.Samples.wValidBitsPerSample = OutputType.Format.wBitsPerSample;
+ OutputType.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+ if(mDevice->FmtType == DevFmtFloat)
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ else
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+
+ if(mPrimaryBuffer)
+ mPrimaryBuffer->Release();
+ mPrimaryBuffer = nullptr;
+ }
+ else
+ {
+ if(SUCCEEDED(hr) && !mPrimaryBuffer)
+ {
+ DSBUFFERDESC DSBDescription{};
+ DSBDescription.dwSize = sizeof(DSBDescription);
+ DSBDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
+ hr = mDS->CreateSoundBuffer(&DSBDescription, &mPrimaryBuffer, nullptr);
+ }
+ if(SUCCEEDED(hr))
+ hr = mPrimaryBuffer->SetFormat(&OutputType.Format);
+ }
+
+ if(SUCCEEDED(hr))
+ {
+ ALuint num_updates{mDevice->BufferSize / mDevice->UpdateSize};
+ if(num_updates > MAX_UPDATES)
+ num_updates = MAX_UPDATES;
+ mDevice->BufferSize = mDevice->UpdateSize * num_updates;
+
+ DSBUFFERDESC DSBDescription{};
+ DSBDescription.dwSize = sizeof(DSBDescription);
+ DSBDescription.dwFlags = DSBCAPS_CTRLPOSITIONNOTIFY | DSBCAPS_GETCURRENTPOSITION2 |
+ DSBCAPS_GLOBALFOCUS;
+ DSBDescription.dwBufferBytes = mDevice->BufferSize * OutputType.Format.nBlockAlign;
+ DSBDescription.lpwfxFormat = &OutputType.Format;
+
+ hr = mDS->CreateSoundBuffer(&DSBDescription, &mBuffer, nullptr);
+ if(FAILED(hr) && mDevice->FmtType == DevFmtFloat)
+ {
+ mDevice->FmtType = DevFmtShort;
+ goto retry_open;
+ }
+ }
+
+ if(SUCCEEDED(hr))
+ {
+ void *ptr;
+ hr = mBuffer->QueryInterface(IID_IDirectSoundNotify, &ptr);
+ if(SUCCEEDED(hr))
+ {
+ auto Notifies = static_cast<IDirectSoundNotify*>(ptr);
+ mNotifies = Notifies;
+
+ ALuint num_updates{mDevice->BufferSize / mDevice->UpdateSize};
+ assert(num_updates <= MAX_UPDATES);
+
+ std::array<DSBPOSITIONNOTIFY,MAX_UPDATES> nots;
+ for(ALuint i{0};i < num_updates;++i)
+ {
+ nots[i].dwOffset = i * mDevice->UpdateSize * OutputType.Format.nBlockAlign;
+ nots[i].hEventNotify = mNotifyEvent;
+ }
+ if(Notifies->SetNotificationPositions(num_updates, nots.data()) != DS_OK)
+ hr = E_FAIL;
+ }
+ }
+
+ if(FAILED(hr))
+ {
+ if(mNotifies)
+ mNotifies->Release();
+ mNotifies = nullptr;
+ if(mBuffer)
+ mBuffer->Release();
+ mBuffer = nullptr;
+ if(mPrimaryBuffer)
+ mPrimaryBuffer->Release();
+ mPrimaryBuffer = nullptr;
+ return ALC_FALSE;
+ }
+
+ ResetEvent(mNotifyEvent);
+ SetDefaultWFXChannelOrder(mDevice);
+
+ return ALC_TRUE;
+}
+
+ALCboolean DSoundPlayback::start()
+{
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&DSoundPlayback::mixerProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Failed to start mixing thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void DSoundPlayback::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ mBuffer->Stop();
+}
+
+
+struct DSoundCapture final : public BackendBase {
+ DSoundCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~DSoundCapture() override;
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(void *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ IDirectSoundCapture *mDSC{nullptr};
+ IDirectSoundCaptureBuffer *mDSCbuffer{nullptr};
+ DWORD mBufferBytes{0u};
+ DWORD mCursor{0u};
+
+ RingBufferPtr mRing;
+
+ DEF_NEWDEL(DSoundCapture)
+};
+
+DSoundCapture::~DSoundCapture()
+{
+ if(mDSCbuffer)
+ {
+ mDSCbuffer->Stop();
+ mDSCbuffer->Release();
+ mDSCbuffer = nullptr;
+ }
+
+ if(mDSC)
+ mDSC->Release();
+ mDSC = nullptr;
+}
+
+
+ALCenum DSoundCapture::open(const ALCchar *name)
+{
+ HRESULT hr;
+ if(CaptureDevices.empty())
+ {
+ /* Initialize COM to prevent name truncation */
+ HRESULT hrcom{CoInitialize(nullptr)};
+ hr = DirectSoundCaptureEnumerateW(DSoundEnumDevices, &CaptureDevices);
+ if(FAILED(hr))
+ ERR("Error enumerating DirectSound devices (0x%lx)!\n", hr);
+ if(SUCCEEDED(hrcom))
+ CoUninitialize();
+ }
+
+ const GUID *guid{nullptr};
+ if(!name && !CaptureDevices.empty())
+ {
+ name = CaptureDevices[0].name.c_str();
+ guid = &CaptureDevices[0].guid;
+ }
+ else
+ {
+ auto iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
+ [name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ );
+ if(iter == CaptureDevices.cend())
+ return ALC_INVALID_VALUE;
+ guid = &iter->guid;
+ }
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ case DevFmtUShort:
+ case DevFmtUInt:
+ WARN("%s capture samples not supported\n", DevFmtTypeString(mDevice->FmtType));
+ return ALC_INVALID_ENUM;
+
+ case DevFmtUByte:
+ case DevFmtShort:
+ case DevFmtInt:
+ case DevFmtFloat:
+ break;
+ }
+
+ WAVEFORMATEXTENSIBLE InputType{};
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono:
+ InputType.dwChannelMask = SPEAKER_FRONT_CENTER;
+ break;
+ case DevFmtStereo:
+ InputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT;
+ break;
+ case DevFmtQuad:
+ InputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_BACK_LEFT |
+ SPEAKER_BACK_RIGHT;
+ break;
+ case DevFmtX51:
+ InputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER |
+ SPEAKER_LOW_FREQUENCY |
+ SPEAKER_SIDE_LEFT |
+ SPEAKER_SIDE_RIGHT;
+ break;
+ case DevFmtX51Rear:
+ InputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER |
+ SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_LEFT |
+ SPEAKER_BACK_RIGHT;
+ break;
+ case DevFmtX61:
+ InputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER |
+ SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_CENTER |
+ SPEAKER_SIDE_LEFT |
+ SPEAKER_SIDE_RIGHT;
+ break;
+ case DevFmtX71:
+ InputType.dwChannelMask = SPEAKER_FRONT_LEFT |
+ SPEAKER_FRONT_RIGHT |
+ SPEAKER_FRONT_CENTER |
+ SPEAKER_LOW_FREQUENCY |
+ SPEAKER_BACK_LEFT |
+ SPEAKER_BACK_RIGHT |
+ SPEAKER_SIDE_LEFT |
+ SPEAKER_SIDE_RIGHT;
+ break;
+ case DevFmtAmbi3D:
+ WARN("%s capture not supported\n", DevFmtChannelsString(mDevice->FmtChans));
+ return ALC_INVALID_ENUM;
+ }
+
+ InputType.Format.wFormatTag = WAVE_FORMAT_PCM;
+ InputType.Format.nChannels = mDevice->channelsFromFmt();
+ InputType.Format.wBitsPerSample = mDevice->bytesFromFmt() * 8;
+ InputType.Format.nBlockAlign = InputType.Format.nChannels*InputType.Format.wBitsPerSample/8;
+ InputType.Format.nSamplesPerSec = mDevice->Frequency;
+ InputType.Format.nAvgBytesPerSec = InputType.Format.nSamplesPerSec*InputType.Format.nBlockAlign;
+ InputType.Format.cbSize = 0;
+ InputType.Samples.wValidBitsPerSample = InputType.Format.wBitsPerSample;
+ if(mDevice->FmtType == DevFmtFloat)
+ InputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ else
+ InputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+
+ if(InputType.Format.nChannels > 2 || mDevice->FmtType == DevFmtFloat)
+ {
+ InputType.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ InputType.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
+ }
+
+ ALuint samples{mDevice->BufferSize};
+ samples = maxu(samples, 100 * mDevice->Frequency / 1000);
+
+ DSCBUFFERDESC DSCBDescription{};
+ DSCBDescription.dwSize = sizeof(DSCBDescription);
+ DSCBDescription.dwFlags = 0;
+ DSCBDescription.dwBufferBytes = samples * InputType.Format.nBlockAlign;
+ DSCBDescription.lpwfxFormat = &InputType.Format;
+
+ //DirectSoundCapture Init code
+ hr = DirectSoundCaptureCreate(guid, &mDSC, nullptr);
+ if(SUCCEEDED(hr))
+ mDSC->CreateCaptureBuffer(&DSCBDescription, &mDSCbuffer, nullptr);
+ if(SUCCEEDED(hr))
+ {
+ mRing = CreateRingBuffer(mDevice->BufferSize, InputType.Format.nBlockAlign, false);
+ if(!mRing) hr = DSERR_OUTOFMEMORY;
+ }
+
+ if(FAILED(hr))
+ {
+ ERR("Device init failed: 0x%08lx\n", hr);
+
+ mRing = nullptr;
+ if(mDSCbuffer)
+ mDSCbuffer->Release();
+ mDSCbuffer = nullptr;
+ if(mDSC)
+ mDSC->Release();
+ mDSC = nullptr;
+
+ return ALC_INVALID_VALUE;
+ }
+
+ mBufferBytes = DSCBDescription.dwBufferBytes;
+ SetDefaultWFXChannelOrder(mDevice);
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean DSoundCapture::start()
+{
+ HRESULT hr{mDSCbuffer->Start(DSCBSTART_LOOPING)};
+ if(FAILED(hr))
+ {
+ ERR("start failed: 0x%08lx\n", hr);
+ aluHandleDisconnect(mDevice, "Failure starting capture: 0x%lx", hr);
+ return ALC_FALSE;
+ }
+ return ALC_TRUE;
+}
+
+void DSoundCapture::stop()
+{
+ HRESULT hr{mDSCbuffer->Stop()};
+ if(FAILED(hr))
+ {
+ ERR("stop failed: 0x%08lx\n", hr);
+ aluHandleDisconnect(mDevice, "Failure stopping capture: 0x%lx", hr);
+ }
+}
+
+ALCenum DSoundCapture::captureSamples(void *buffer, ALCuint samples)
+{
+ mRing->read(buffer, samples);
+ return ALC_NO_ERROR;
+}
+
+ALCuint DSoundCapture::availableSamples()
+{
+ if(!mDevice->Connected.load(std::memory_order_acquire))
+ return static_cast<ALCuint>(mRing->readSpace());
+
+ ALsizei FrameSize{mDevice->frameSizeFromFmt()};
+ DWORD BufferBytes{mBufferBytes};
+ DWORD LastCursor{mCursor};
+
+ DWORD ReadCursor;
+ void *ReadPtr1, *ReadPtr2;
+ DWORD ReadCnt1, ReadCnt2;
+ HRESULT hr{mDSCbuffer->GetCurrentPosition(nullptr, &ReadCursor)};
+ if(SUCCEEDED(hr))
+ {
+ DWORD NumBytes{(ReadCursor-LastCursor + BufferBytes) % BufferBytes};
+ if(!NumBytes) return static_cast<ALCubyte>(mRing->readSpace());
+ hr = mDSCbuffer->Lock(LastCursor, NumBytes, &ReadPtr1, &ReadCnt1, &ReadPtr2, &ReadCnt2, 0);
+ }
+ if(SUCCEEDED(hr))
+ {
+ mRing->write(ReadPtr1, ReadCnt1/FrameSize);
+ if(ReadPtr2 != nullptr && ReadCnt2 > 0)
+ mRing->write(ReadPtr2, ReadCnt2/FrameSize);
+ hr = mDSCbuffer->Unlock(ReadPtr1, ReadCnt1, ReadPtr2, ReadCnt2);
+ mCursor = (LastCursor+ReadCnt1+ReadCnt2) % BufferBytes;
+ }
+
+ if(FAILED(hr))
+ {
+ ERR("update failed: 0x%08lx\n", hr);
+ aluHandleDisconnect(mDevice, "Failure retrieving capture data: 0x%lx", hr);
+ }
+
+ return static_cast<ALCuint>(mRing->readSpace());
+}
+
+} // namespace
+
+
+BackendFactory &DSoundBackendFactory::getFactory()
+{
+ static DSoundBackendFactory factory{};
+ return factory;
+}
+
+bool DSoundBackendFactory::init()
+{
+#ifdef HAVE_DYNLOAD
+ if(!ds_handle)
+ {
+ ds_handle = LoadLib("dsound.dll");
+ if(!ds_handle)
+ {
+ ERR("Failed to load dsound.dll\n");
+ return false;
+ }
+
+#define LOAD_FUNC(f) do { \
+ p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(ds_handle, #f)); \
+ if(!p##f) \
+ { \
+ CloseLib(ds_handle); \
+ ds_handle = nullptr; \
+ return false; \
+ } \
+} while(0)
+ LOAD_FUNC(DirectSoundCreate);
+ LOAD_FUNC(DirectSoundEnumerateW);
+ LOAD_FUNC(DirectSoundCaptureCreate);
+ LOAD_FUNC(DirectSoundCaptureEnumerateW);
+#undef LOAD_FUNC
+ }
+#endif
+ return true;
+}
+
+bool DSoundBackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback || type == BackendType::Capture); }
+
+void DSoundBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ auto add_device = [outnames](const DevMap &entry) -> void
+ {
+ /* +1 to also append the null char (to ensure a null-separated list and
+ * double-null terminated list).
+ */
+ outnames->append(entry.name.c_str(), entry.name.length()+1);
+ };
+
+ /* Initialize COM to prevent name truncation */
+ HRESULT hr;
+ HRESULT hrcom{CoInitialize(nullptr)};
+ switch(type)
+ {
+ case DevProbe::Playback:
+ PlaybackDevices.clear();
+ hr = DirectSoundEnumerateW(DSoundEnumDevices, &PlaybackDevices);
+ if(FAILED(hr))
+ ERR("Error enumerating DirectSound playback devices (0x%lx)!\n", hr);
+ std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
+ break;
+
+ case DevProbe::Capture:
+ CaptureDevices.clear();
+ hr = DirectSoundCaptureEnumerateW(DSoundEnumDevices, &CaptureDevices);
+ if(FAILED(hr))
+ ERR("Error enumerating DirectSound capture devices (0x%lx)!\n", hr);
+ std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
+ break;
+ }
+ if(SUCCEEDED(hrcom))
+ CoUninitialize();
+}
+
+BackendPtr DSoundBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new DSoundPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new DSoundCapture{device}};
+ return nullptr;
+}
diff --git a/alc/backends/dsound.h b/alc/backends/dsound.h
new file mode 100644
index 00000000..6bef0bfc
--- /dev/null
+++ b/alc/backends/dsound.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_DSOUND_H
+#define BACKENDS_DSOUND_H
+
+#include "backends/base.h"
+
+struct DSoundBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_DSOUND_H */
diff --git a/alc/backends/jack.cpp b/alc/backends/jack.cpp
new file mode 100644
index 00000000..3f81d08c
--- /dev/null
+++ b/alc/backends/jack.cpp
@@ -0,0 +1,562 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/jack.h"
+
+#include <cstdlib>
+#include <cstdio>
+#include <memory.h>
+
+#include <thread>
+#include <functional>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "alconfig.h"
+#include "ringbuffer.h"
+#include "threads.h"
+#include "compat.h"
+
+#include <jack/jack.h>
+#include <jack/ringbuffer.h>
+
+
+namespace {
+
+constexpr ALCchar jackDevice[] = "JACK Default";
+
+
+#ifdef HAVE_DYNLOAD
+#define JACK_FUNCS(MAGIC) \
+ MAGIC(jack_client_open); \
+ MAGIC(jack_client_close); \
+ MAGIC(jack_client_name_size); \
+ MAGIC(jack_get_client_name); \
+ MAGIC(jack_connect); \
+ MAGIC(jack_activate); \
+ MAGIC(jack_deactivate); \
+ MAGIC(jack_port_register); \
+ MAGIC(jack_port_unregister); \
+ MAGIC(jack_port_get_buffer); \
+ MAGIC(jack_port_name); \
+ MAGIC(jack_get_ports); \
+ MAGIC(jack_free); \
+ MAGIC(jack_get_sample_rate); \
+ MAGIC(jack_set_error_function); \
+ MAGIC(jack_set_process_callback); \
+ MAGIC(jack_set_buffer_size_callback); \
+ MAGIC(jack_set_buffer_size); \
+ MAGIC(jack_get_buffer_size);
+
+void *jack_handle;
+#define MAKE_FUNC(f) decltype(f) * p##f
+JACK_FUNCS(MAKE_FUNC);
+decltype(jack_error_callback) * pjack_error_callback;
+#undef MAKE_FUNC
+
+#ifndef IN_IDE_PARSER
+#define jack_client_open pjack_client_open
+#define jack_client_close pjack_client_close
+#define jack_client_name_size pjack_client_name_size
+#define jack_get_client_name pjack_get_client_name
+#define jack_connect pjack_connect
+#define jack_activate pjack_activate
+#define jack_deactivate pjack_deactivate
+#define jack_port_register pjack_port_register
+#define jack_port_unregister pjack_port_unregister
+#define jack_port_get_buffer pjack_port_get_buffer
+#define jack_port_name pjack_port_name
+#define jack_get_ports pjack_get_ports
+#define jack_free pjack_free
+#define jack_get_sample_rate pjack_get_sample_rate
+#define jack_set_error_function pjack_set_error_function
+#define jack_set_process_callback pjack_set_process_callback
+#define jack_set_buffer_size_callback pjack_set_buffer_size_callback
+#define jack_set_buffer_size pjack_set_buffer_size
+#define jack_get_buffer_size pjack_get_buffer_size
+#define jack_error_callback (*pjack_error_callback)
+#endif
+#endif
+
+
+jack_options_t ClientOptions = JackNullOption;
+
+ALCboolean jack_load()
+{
+ ALCboolean error = ALC_FALSE;
+
+#ifdef HAVE_DYNLOAD
+ if(!jack_handle)
+ {
+ std::string missing_funcs;
+
+#ifdef _WIN32
+#define JACKLIB "libjack.dll"
+#else
+#define JACKLIB "libjack.so.0"
+#endif
+ jack_handle = LoadLib(JACKLIB);
+ if(!jack_handle)
+ {
+ WARN("Failed to load %s\n", JACKLIB);
+ return ALC_FALSE;
+ }
+
+ error = ALC_FALSE;
+#define LOAD_FUNC(f) do { \
+ p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(jack_handle, #f)); \
+ if(p##f == nullptr) { \
+ error = ALC_TRUE; \
+ missing_funcs += "\n" #f; \
+ } \
+} while(0)
+ JACK_FUNCS(LOAD_FUNC);
+#undef LOAD_FUNC
+ /* Optional symbols. These don't exist in all versions of JACK. */
+#define LOAD_SYM(f) p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(jack_handle, #f))
+ LOAD_SYM(jack_error_callback);
+#undef LOAD_SYM
+
+ if(error)
+ {
+ WARN("Missing expected functions:%s\n", missing_funcs.c_str());
+ CloseLib(jack_handle);
+ jack_handle = nullptr;
+ }
+ }
+#endif
+
+ return !error;
+}
+
+
+struct JackPlayback final : public BackendBase {
+ JackPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~JackPlayback() override;
+
+ static int bufferSizeNotifyC(jack_nframes_t numframes, void *arg);
+ int bufferSizeNotify(jack_nframes_t numframes);
+
+ static int processC(jack_nframes_t numframes, void *arg);
+ int process(jack_nframes_t numframes);
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+ ClockLatency getClockLatency() override;
+
+ jack_client_t *mClient{nullptr};
+ jack_port_t *mPort[MAX_OUTPUT_CHANNELS]{};
+
+ RingBufferPtr mRing;
+ al::semaphore mSem;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(JackPlayback)
+};
+
+JackPlayback::~JackPlayback()
+{
+ if(!mClient)
+ return;
+
+ std::for_each(std::begin(mPort), std::end(mPort),
+ [this](jack_port_t *port) -> void
+ { if(port) jack_port_unregister(mClient, port); }
+ );
+ std::fill(std::begin(mPort), std::end(mPort), nullptr);
+ jack_client_close(mClient);
+ mClient = nullptr;
+}
+
+
+int JackPlayback::bufferSizeNotifyC(jack_nframes_t numframes, void *arg)
+{ return static_cast<JackPlayback*>(arg)->bufferSizeNotify(numframes); }
+
+int JackPlayback::bufferSizeNotify(jack_nframes_t numframes)
+{
+ std::lock_guard<std::mutex> _{mDevice->StateLock};
+ mDevice->UpdateSize = numframes;
+ mDevice->BufferSize = numframes*2;
+
+ const char *devname{mDevice->DeviceName.c_str()};
+ ALuint bufsize{ConfigValueUInt(devname, "jack", "buffer-size").value_or(mDevice->UpdateSize)};
+ bufsize = maxu(NextPowerOf2(bufsize), mDevice->UpdateSize);
+ mDevice->BufferSize = bufsize + mDevice->UpdateSize;
+
+ TRACE("%u / %u buffer\n", mDevice->UpdateSize, mDevice->BufferSize);
+
+ mRing = nullptr;
+ mRing = CreateRingBuffer(bufsize, mDevice->frameSizeFromFmt(), true);
+ if(!mRing)
+ {
+ ERR("Failed to reallocate ringbuffer\n");
+ aluHandleDisconnect(mDevice, "Failed to reallocate %u-sample buffer", bufsize);
+ }
+ return 0;
+}
+
+
+int JackPlayback::processC(jack_nframes_t numframes, void *arg)
+{ return static_cast<JackPlayback*>(arg)->process(numframes); }
+
+int JackPlayback::process(jack_nframes_t numframes)
+{
+ jack_default_audio_sample_t *out[MAX_OUTPUT_CHANNELS];
+ ALsizei numchans{0};
+ for(auto port : mPort)
+ {
+ if(!port) break;
+ out[numchans++] = static_cast<float*>(jack_port_get_buffer(port, numframes));
+ }
+
+ auto data = mRing->getReadVector();
+ jack_nframes_t todo{minu(numframes, data.first.len)};
+ std::transform(out, out+numchans, out,
+ [&data,numchans,todo](ALfloat *outbuf) -> ALfloat*
+ {
+ const ALfloat *RESTRICT in = reinterpret_cast<ALfloat*>(data.first.buf);
+ std::generate_n(outbuf, todo,
+ [&in,numchans]() noexcept -> ALfloat
+ {
+ ALfloat ret{*in};
+ in += numchans;
+ return ret;
+ }
+ );
+ data.first.buf += sizeof(ALfloat);
+ return outbuf + todo;
+ }
+ );
+ jack_nframes_t total{todo};
+
+ todo = minu(numframes-total, data.second.len);
+ if(todo > 0)
+ {
+ std::transform(out, out+numchans, out,
+ [&data,numchans,todo](ALfloat *outbuf) -> ALfloat*
+ {
+ const ALfloat *RESTRICT in = reinterpret_cast<ALfloat*>(data.second.buf);
+ std::generate_n(outbuf, todo,
+ [&in,numchans]() noexcept -> ALfloat
+ {
+ ALfloat ret{*in};
+ in += numchans;
+ return ret;
+ }
+ );
+ data.second.buf += sizeof(ALfloat);
+ return outbuf + todo;
+ }
+ );
+ total += todo;
+ }
+
+ mRing->readAdvance(total);
+ mSem.post();
+
+ if(numframes > total)
+ {
+ todo = numframes-total;
+ std::transform(out, out+numchans, out,
+ [todo](ALfloat *outbuf) -> ALfloat*
+ {
+ std::fill_n(outbuf, todo, 0.0f);
+ return outbuf + todo;
+ }
+ );
+ }
+
+ return 0;
+}
+
+int JackPlayback::mixerProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ lock();
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ if(mRing->writeSpace() < mDevice->UpdateSize)
+ {
+ unlock();
+ mSem.wait();
+ lock();
+ continue;
+ }
+
+ auto data = mRing->getWriteVector();
+ auto todo = static_cast<ALuint>(data.first.len + data.second.len);
+ todo -= todo%mDevice->UpdateSize;
+
+ ALuint len1{minu(data.first.len, todo)};
+ ALuint len2{minu(data.second.len, todo-len1)};
+
+ aluMixData(mDevice, data.first.buf, len1);
+ if(len2 > 0)
+ aluMixData(mDevice, data.second.buf, len2);
+ mRing->writeAdvance(todo);
+ }
+ unlock();
+
+ return 0;
+}
+
+
+ALCenum JackPlayback::open(const ALCchar *name)
+{
+ if(!name)
+ name = jackDevice;
+ else if(strcmp(name, jackDevice) != 0)
+ return ALC_INVALID_VALUE;
+
+ const char *client_name{"alsoft"};
+ jack_status_t status;
+ mClient = jack_client_open(client_name, ClientOptions, &status, nullptr);
+ if(mClient == nullptr)
+ {
+ ERR("jack_client_open() failed, status = 0x%02x\n", status);
+ return ALC_INVALID_VALUE;
+ }
+ if((status&JackServerStarted))
+ TRACE("JACK server started\n");
+ if((status&JackNameNotUnique))
+ {
+ client_name = jack_get_client_name(mClient);
+ TRACE("Client name not unique, got `%s' instead\n", client_name);
+ }
+
+ jack_set_process_callback(mClient, &JackPlayback::processC, this);
+ jack_set_buffer_size_callback(mClient, &JackPlayback::bufferSizeNotifyC, this);
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean JackPlayback::reset()
+{
+ std::for_each(std::begin(mPort), std::end(mPort),
+ [this](jack_port_t *port) -> void
+ { if(port) jack_port_unregister(mClient, port); }
+ );
+ std::fill(std::begin(mPort), std::end(mPort), nullptr);
+
+ /* Ignore the requested buffer metrics and just keep one JACK-sized buffer
+ * ready for when requested.
+ */
+ mDevice->Frequency = jack_get_sample_rate(mClient);
+ mDevice->UpdateSize = jack_get_buffer_size(mClient);
+ mDevice->BufferSize = mDevice->UpdateSize * 2;
+
+ const char *devname{mDevice->DeviceName.c_str()};
+ ALuint bufsize{ConfigValueUInt(devname, "jack", "buffer-size").value_or(mDevice->UpdateSize)};
+ bufsize = maxu(NextPowerOf2(bufsize), mDevice->UpdateSize);
+ mDevice->BufferSize = bufsize + mDevice->UpdateSize;
+
+ /* Force 32-bit float output. */
+ mDevice->FmtType = DevFmtFloat;
+
+ ALsizei numchans{mDevice->channelsFromFmt()};
+ auto ports_end = std::begin(mPort) + numchans;
+ auto bad_port = std::find_if_not(std::begin(mPort), ports_end,
+ [this](jack_port_t *&port) -> bool
+ {
+ std::string name{"channel_" + std::to_string(&port - mPort + 1)};
+ port = jack_port_register(mClient, name.c_str(), JACK_DEFAULT_AUDIO_TYPE,
+ JackPortIsOutput, 0);
+ return port != nullptr;
+ }
+ );
+ if(bad_port != ports_end)
+ {
+ ERR("Not enough JACK ports available for %s output\n", DevFmtChannelsString(mDevice->FmtChans));
+ if(bad_port == std::begin(mPort)) return ALC_FALSE;
+
+ if(bad_port == std::begin(mPort)+1)
+ mDevice->FmtChans = DevFmtMono;
+ else
+ {
+ ports_end = mPort+2;
+ while(bad_port != ports_end)
+ {
+ jack_port_unregister(mClient, *(--bad_port));
+ *bad_port = nullptr;
+ }
+ mDevice->FmtChans = DevFmtStereo;
+ }
+ numchans = std::distance(std::begin(mPort), bad_port);
+ }
+
+ mRing = nullptr;
+ mRing = CreateRingBuffer(bufsize, mDevice->frameSizeFromFmt(), true);
+ if(!mRing)
+ {
+ ERR("Failed to allocate ringbuffer\n");
+ return ALC_FALSE;
+ }
+
+ SetDefaultChannelOrder(mDevice);
+
+ return ALC_TRUE;
+}
+
+ALCboolean JackPlayback::start()
+{
+ if(jack_activate(mClient))
+ {
+ ERR("Failed to activate client\n");
+ return ALC_FALSE;
+ }
+
+ const char **ports{jack_get_ports(mClient, nullptr, nullptr,
+ JackPortIsPhysical|JackPortIsInput)};
+ if(ports == nullptr)
+ {
+ ERR("No physical playback ports found\n");
+ jack_deactivate(mClient);
+ return ALC_FALSE;
+ }
+ std::mismatch(std::begin(mPort), std::end(mPort), ports,
+ [this](const jack_port_t *port, const char *pname) -> bool
+ {
+ if(!port) return false;
+ if(!pname)
+ {
+ ERR("No physical playback port for \"%s\"\n", jack_port_name(port));
+ return false;
+ }
+ if(jack_connect(mClient, jack_port_name(port), pname))
+ ERR("Failed to connect output port \"%s\" to \"%s\"\n", jack_port_name(port),
+ pname);
+ return true;
+ }
+ );
+ jack_free(ports);
+
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&JackPlayback::mixerProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create playback thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ jack_deactivate(mClient);
+ return ALC_FALSE;
+}
+
+void JackPlayback::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+
+ mSem.post();
+ mThread.join();
+
+ jack_deactivate(mClient);
+}
+
+
+ClockLatency JackPlayback::getClockLatency()
+{
+ ClockLatency ret;
+
+ lock();
+ ret.ClockTime = GetDeviceClockTime(mDevice);
+ ret.Latency = std::chrono::seconds{mRing->readSpace()};
+ ret.Latency /= mDevice->Frequency;
+ unlock();
+
+ return ret;
+}
+
+
+void jack_msg_handler(const char *message)
+{
+ WARN("%s\n", message);
+}
+
+} // namespace
+
+bool JackBackendFactory::init()
+{
+ if(!jack_load())
+ return false;
+
+ if(!GetConfigValueBool(nullptr, "jack", "spawn-server", 0))
+ ClientOptions = static_cast<jack_options_t>(ClientOptions | JackNoStartServer);
+
+ void (*old_error_cb)(const char*){&jack_error_callback ? jack_error_callback : nullptr};
+ jack_set_error_function(jack_msg_handler);
+ jack_status_t status;
+ jack_client_t *client{jack_client_open("alsoft", ClientOptions, &status, nullptr)};
+ jack_set_error_function(old_error_cb);
+ if(!client)
+ {
+ WARN("jack_client_open() failed, 0x%02x\n", status);
+ if((status&JackServerFailed) && !(ClientOptions&JackNoStartServer))
+ ERR("Unable to connect to JACK server\n");
+ return false;
+ }
+
+ jack_client_close(client);
+ return true;
+}
+
+bool JackBackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback); }
+
+void JackBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ switch(type)
+ {
+ case DevProbe::Playback:
+ /* Includes null char. */
+ outnames->append(jackDevice, sizeof(jackDevice));
+ break;
+
+ case DevProbe::Capture:
+ break;
+ }
+}
+
+BackendPtr JackBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new JackPlayback{device}};
+ return nullptr;
+}
+
+BackendFactory &JackBackendFactory::getFactory()
+{
+ static JackBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/jack.h b/alc/backends/jack.h
new file mode 100644
index 00000000..10beebfb
--- /dev/null
+++ b/alc/backends/jack.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_JACK_H
+#define BACKENDS_JACK_H
+
+#include "backends/base.h"
+
+struct JackBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_JACK_H */
diff --git a/alc/backends/loopback.cpp b/alc/backends/loopback.cpp
new file mode 100644
index 00000000..4a1c641a
--- /dev/null
+++ b/alc/backends/loopback.cpp
@@ -0,0 +1,80 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2011 by Chris Robinson
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/loopback.h"
+
+#include "alcmain.h"
+#include "alu.h"
+
+
+namespace {
+
+struct LoopbackBackend final : public BackendBase {
+ LoopbackBackend(ALCdevice *device) noexcept : BackendBase{device} { }
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ DEF_NEWDEL(LoopbackBackend)
+};
+
+
+ALCenum LoopbackBackend::open(const ALCchar *name)
+{
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean LoopbackBackend::reset()
+{
+ SetDefaultWFXChannelOrder(mDevice);
+ return ALC_TRUE;
+}
+
+ALCboolean LoopbackBackend::start()
+{ return ALC_TRUE; }
+
+void LoopbackBackend::stop()
+{ }
+
+} // namespace
+
+
+bool LoopbackBackendFactory::init()
+{ return true; }
+
+bool LoopbackBackendFactory::querySupport(BackendType)
+{ return true; }
+
+void LoopbackBackendFactory::probe(DevProbe, std::string*)
+{ }
+
+BackendPtr LoopbackBackendFactory::createBackend(ALCdevice *device, BackendType)
+{ return BackendPtr{new LoopbackBackend{device}}; }
+
+BackendFactory &LoopbackBackendFactory::getFactory()
+{
+ static LoopbackBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/loopback.h b/alc/backends/loopback.h
new file mode 100644
index 00000000..09c085b8
--- /dev/null
+++ b/alc/backends/loopback.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_LOOPBACK_H
+#define BACKENDS_LOOPBACK_H
+
+#include "backends/base.h"
+
+struct LoopbackBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_LOOPBACK_H */
diff --git a/alc/backends/null.cpp b/alc/backends/null.cpp
new file mode 100644
index 00000000..ae58cb8b
--- /dev/null
+++ b/alc/backends/null.cpp
@@ -0,0 +1,184 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2010 by Chris Robinson
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/null.h"
+
+#include <exception>
+#include <atomic>
+#include <chrono>
+#include <cstdint>
+#include <cstring>
+#include <functional>
+#include <thread>
+
+#include "alcmain.h"
+#include "almalloc.h"
+#include "alu.h"
+#include "logging.h"
+#include "threads.h"
+
+
+namespace {
+
+using std::chrono::seconds;
+using std::chrono::milliseconds;
+using std::chrono::nanoseconds;
+
+constexpr ALCchar nullDevice[] = "No Output";
+
+
+struct NullBackend final : public BackendBase {
+ NullBackend(ALCdevice *device) noexcept : BackendBase{device} { }
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(NullBackend)
+};
+
+int NullBackend::mixerProc()
+{
+ const milliseconds restTime{mDevice->UpdateSize*1000/mDevice->Frequency / 2};
+
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ int64_t done{0};
+ auto start = std::chrono::steady_clock::now();
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ auto now = std::chrono::steady_clock::now();
+
+ /* This converts from nanoseconds to nanosamples, then to samples. */
+ int64_t avail{std::chrono::duration_cast<seconds>((now-start) * mDevice->Frequency).count()};
+ if(avail-done < mDevice->UpdateSize)
+ {
+ std::this_thread::sleep_for(restTime);
+ continue;
+ }
+ while(avail-done >= mDevice->UpdateSize)
+ {
+ lock();
+ aluMixData(mDevice, nullptr, mDevice->UpdateSize);
+ unlock();
+ done += mDevice->UpdateSize;
+ }
+
+ /* For every completed second, increment the start time and reduce the
+ * samples done. This prevents the difference between the start time
+ * and current time from growing too large, while maintaining the
+ * correct number of samples to render.
+ */
+ if(done >= mDevice->Frequency)
+ {
+ seconds s{done/mDevice->Frequency};
+ start += s;
+ done -= mDevice->Frequency*s.count();
+ }
+ }
+
+ return 0;
+}
+
+
+ALCenum NullBackend::open(const ALCchar *name)
+{
+ if(!name)
+ name = nullDevice;
+ else if(strcmp(name, nullDevice) != 0)
+ return ALC_INVALID_VALUE;
+
+ mDevice->DeviceName = name;
+
+ return ALC_NO_ERROR;
+}
+
+ALCboolean NullBackend::reset()
+{
+ SetDefaultWFXChannelOrder(mDevice);
+ return ALC_TRUE;
+}
+
+ALCboolean NullBackend::start()
+{
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&NullBackend::mixerProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Failed to start mixing thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void NullBackend::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+}
+
+} // namespace
+
+
+bool NullBackendFactory::init()
+{ return true; }
+
+bool NullBackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback); }
+
+void NullBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ switch(type)
+ {
+ case DevProbe::Playback:
+ /* Includes null char. */
+ outnames->append(nullDevice, sizeof(nullDevice));
+ break;
+ case DevProbe::Capture:
+ break;
+ }
+}
+
+BackendPtr NullBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new NullBackend{device}};
+ return nullptr;
+}
+
+BackendFactory &NullBackendFactory::getFactory()
+{
+ static NullBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/null.h b/alc/backends/null.h
new file mode 100644
index 00000000..f19d5b4d
--- /dev/null
+++ b/alc/backends/null.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_NULL_H
+#define BACKENDS_NULL_H
+
+#include "backends/base.h"
+
+struct NullBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_NULL_H */
diff --git a/alc/backends/opensl.cpp b/alc/backends/opensl.cpp
new file mode 100644
index 00000000..b34dc0cb
--- /dev/null
+++ b/alc/backends/opensl.cpp
@@ -0,0 +1,936 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This is an OpenAL backend for Android using the native audio APIs based on
+ * OpenSL ES 1.0.1. It is based on source code for the native-audio sample app
+ * bundled with NDK.
+ */
+
+#include "config.h"
+
+#include "backends/opensl.h"
+
+#include <stdlib.h>
+#include <jni.h>
+
+#include <new>
+#include <array>
+#include <thread>
+#include <functional>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "ringbuffer.h"
+#include "threads.h"
+#include "compat.h"
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+
+namespace {
+
+/* Helper macros */
+#define EXTRACT_VCALL_ARGS(...) __VA_ARGS__))
+#define VCALL(obj, func) ((*(obj))->func((obj), EXTRACT_VCALL_ARGS
+#define VCALL0(obj, func) ((*(obj))->func((obj) EXTRACT_VCALL_ARGS
+
+
+constexpr ALCchar opensl_device[] = "OpenSL";
+
+
+SLuint32 GetChannelMask(DevFmtChannels chans)
+{
+ switch(chans)
+ {
+ case DevFmtMono: return SL_SPEAKER_FRONT_CENTER;
+ case DevFmtStereo: return SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT;
+ case DevFmtQuad: return SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT|
+ SL_SPEAKER_BACK_LEFT|SL_SPEAKER_BACK_RIGHT;
+ case DevFmtX51: return SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT|
+ SL_SPEAKER_FRONT_CENTER|SL_SPEAKER_LOW_FREQUENCY|
+ SL_SPEAKER_SIDE_LEFT|SL_SPEAKER_SIDE_RIGHT;
+ case DevFmtX51Rear: return SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT|
+ SL_SPEAKER_FRONT_CENTER|SL_SPEAKER_LOW_FREQUENCY|
+ SL_SPEAKER_BACK_LEFT|SL_SPEAKER_BACK_RIGHT;
+ case DevFmtX61: return SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT|
+ SL_SPEAKER_FRONT_CENTER|SL_SPEAKER_LOW_FREQUENCY|
+ SL_SPEAKER_BACK_CENTER|
+ SL_SPEAKER_SIDE_LEFT|SL_SPEAKER_SIDE_RIGHT;
+ case DevFmtX71: return SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT|
+ SL_SPEAKER_FRONT_CENTER|SL_SPEAKER_LOW_FREQUENCY|
+ SL_SPEAKER_BACK_LEFT|SL_SPEAKER_BACK_RIGHT|
+ SL_SPEAKER_SIDE_LEFT|SL_SPEAKER_SIDE_RIGHT;
+ case DevFmtAmbi3D:
+ break;
+ }
+ return 0;
+}
+
+#ifdef SL_ANDROID_DATAFORMAT_PCM_EX
+SLuint32 GetTypeRepresentation(DevFmtType type)
+{
+ switch(type)
+ {
+ case DevFmtUByte:
+ case DevFmtUShort:
+ case DevFmtUInt:
+ return SL_ANDROID_PCM_REPRESENTATION_UNSIGNED_INT;
+ case DevFmtByte:
+ case DevFmtShort:
+ case DevFmtInt:
+ return SL_ANDROID_PCM_REPRESENTATION_SIGNED_INT;
+ case DevFmtFloat:
+ return SL_ANDROID_PCM_REPRESENTATION_FLOAT;
+ }
+ return 0;
+}
+#endif
+
+const char *res_str(SLresult result)
+{
+ switch(result)
+ {
+ case SL_RESULT_SUCCESS: return "Success";
+ case SL_RESULT_PRECONDITIONS_VIOLATED: return "Preconditions violated";
+ case SL_RESULT_PARAMETER_INVALID: return "Parameter invalid";
+ case SL_RESULT_MEMORY_FAILURE: return "Memory failure";
+ case SL_RESULT_RESOURCE_ERROR: return "Resource error";
+ case SL_RESULT_RESOURCE_LOST: return "Resource lost";
+ case SL_RESULT_IO_ERROR: return "I/O error";
+ case SL_RESULT_BUFFER_INSUFFICIENT: return "Buffer insufficient";
+ case SL_RESULT_CONTENT_CORRUPTED: return "Content corrupted";
+ case SL_RESULT_CONTENT_UNSUPPORTED: return "Content unsupported";
+ case SL_RESULT_CONTENT_NOT_FOUND: return "Content not found";
+ case SL_RESULT_PERMISSION_DENIED: return "Permission denied";
+ case SL_RESULT_FEATURE_UNSUPPORTED: return "Feature unsupported";
+ case SL_RESULT_INTERNAL_ERROR: return "Internal error";
+ case SL_RESULT_UNKNOWN_ERROR: return "Unknown error";
+ case SL_RESULT_OPERATION_ABORTED: return "Operation aborted";
+ case SL_RESULT_CONTROL_LOST: return "Control lost";
+#ifdef SL_RESULT_READONLY
+ case SL_RESULT_READONLY: return "ReadOnly";
+#endif
+#ifdef SL_RESULT_ENGINEOPTION_UNSUPPORTED
+ case SL_RESULT_ENGINEOPTION_UNSUPPORTED: return "Engine option unsupported";
+#endif
+#ifdef SL_RESULT_SOURCE_SINK_INCOMPATIBLE
+ case SL_RESULT_SOURCE_SINK_INCOMPATIBLE: return "Source/Sink incompatible";
+#endif
+ }
+ return "Unknown error code";
+}
+
+#define PRINTERR(x, s) do { \
+ if(UNLIKELY((x) != SL_RESULT_SUCCESS)) \
+ ERR("%s: %s\n", (s), res_str((x))); \
+} while(0)
+
+
+struct OpenSLPlayback final : public BackendBase {
+ OpenSLPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~OpenSLPlayback() override;
+
+ static void processC(SLAndroidSimpleBufferQueueItf bq, void *context);
+ void process(SLAndroidSimpleBufferQueueItf bq);
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+ ClockLatency getClockLatency() override;
+
+ /* engine interfaces */
+ SLObjectItf mEngineObj{nullptr};
+ SLEngineItf mEngine{nullptr};
+
+ /* output mix interfaces */
+ SLObjectItf mOutputMix{nullptr};
+
+ /* buffer queue player interfaces */
+ SLObjectItf mBufferQueueObj{nullptr};
+
+ RingBufferPtr mRing{nullptr};
+ al::semaphore mSem;
+
+ ALsizei mFrameSize{0};
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(OpenSLPlayback)
+};
+
+OpenSLPlayback::~OpenSLPlayback()
+{
+ if(mBufferQueueObj)
+ VCALL0(mBufferQueueObj,Destroy)();
+ mBufferQueueObj = nullptr;
+
+ if(mOutputMix)
+ VCALL0(mOutputMix,Destroy)();
+ mOutputMix = nullptr;
+
+ if(mEngineObj)
+ VCALL0(mEngineObj,Destroy)();
+ mEngineObj = nullptr;
+ mEngine = nullptr;
+}
+
+
+/* this callback handler is called every time a buffer finishes playing */
+void OpenSLPlayback::processC(SLAndroidSimpleBufferQueueItf bq, void *context)
+{ static_cast<OpenSLPlayback*>(context)->process(bq); }
+
+void OpenSLPlayback::process(SLAndroidSimpleBufferQueueItf)
+{
+ /* A note on the ringbuffer usage: The buffer queue seems to hold on to the
+ * pointer passed to the Enqueue method, rather than copying the audio.
+ * Consequently, the ringbuffer contains the audio that is currently queued
+ * and waiting to play. This process() callback is called when a buffer is
+ * finished, so we simply move the read pointer up to indicate the space is
+ * available for writing again, and wake up the mixer thread to mix and
+ * queue more audio.
+ */
+ mRing->readAdvance(1);
+
+ mSem.post();
+}
+
+int OpenSLPlayback::mixerProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ SLPlayItf player;
+ SLAndroidSimpleBufferQueueItf bufferQueue;
+ SLresult result{VCALL(mBufferQueueObj,GetInterface)(SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ &bufferQueue)};
+ PRINTERR(result, "bufferQueue->GetInterface SL_IID_ANDROIDSIMPLEBUFFERQUEUE");
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mBufferQueueObj,GetInterface)(SL_IID_PLAY, &player);
+ PRINTERR(result, "bufferQueue->GetInterface SL_IID_PLAY");
+ }
+
+ lock();
+ if(SL_RESULT_SUCCESS != result)
+ aluHandleDisconnect(mDevice, "Failed to get playback buffer: 0x%08x", result);
+
+ while(SL_RESULT_SUCCESS == result && !mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ if(mRing->writeSpace() == 0)
+ {
+ SLuint32 state{0};
+
+ result = VCALL(player,GetPlayState)(&state);
+ PRINTERR(result, "player->GetPlayState");
+ if(SL_RESULT_SUCCESS == result && state != SL_PLAYSTATE_PLAYING)
+ {
+ result = VCALL(player,SetPlayState)(SL_PLAYSTATE_PLAYING);
+ PRINTERR(result, "player->SetPlayState");
+ }
+ if(SL_RESULT_SUCCESS != result)
+ {
+ aluHandleDisconnect(mDevice, "Failed to start platback: 0x%08x", result);
+ break;
+ }
+
+ if(mRing->writeSpace() == 0)
+ {
+ unlock();
+ mSem.wait();
+ lock();
+ continue;
+ }
+ }
+
+ auto data = mRing->getWriteVector();
+ aluMixData(mDevice, data.first.buf, data.first.len*mDevice->UpdateSize);
+ if(data.second.len > 0)
+ aluMixData(mDevice, data.second.buf, data.second.len*mDevice->UpdateSize);
+
+ size_t todo{data.first.len + data.second.len};
+ mRing->writeAdvance(todo);
+
+ for(size_t i{0};i < todo;i++)
+ {
+ if(!data.first.len)
+ {
+ data.first = data.second;
+ data.second.buf = nullptr;
+ data.second.len = 0;
+ }
+
+ result = VCALL(bufferQueue,Enqueue)(data.first.buf, mDevice->UpdateSize*mFrameSize);
+ PRINTERR(result, "bufferQueue->Enqueue");
+ if(SL_RESULT_SUCCESS != result)
+ {
+ aluHandleDisconnect(mDevice, "Failed to queue audio: 0x%08x", result);
+ break;
+ }
+
+ data.first.len--;
+ data.first.buf += mDevice->UpdateSize*mFrameSize;
+ }
+ }
+ unlock();
+
+ return 0;
+}
+
+
+ALCenum OpenSLPlayback::open(const ALCchar *name)
+{
+ if(!name)
+ name = opensl_device;
+ else if(strcmp(name, opensl_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ // create engine
+ SLresult result{slCreateEngine(&mEngineObj, 0, nullptr, 0, nullptr, nullptr)};
+ PRINTERR(result, "slCreateEngine");
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mEngineObj,Realize)(SL_BOOLEAN_FALSE);
+ PRINTERR(result, "engine->Realize");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mEngineObj,GetInterface)(SL_IID_ENGINE, &mEngine);
+ PRINTERR(result, "engine->GetInterface");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mEngine,CreateOutputMix)(&mOutputMix, 0, nullptr, nullptr);
+ PRINTERR(result, "engine->CreateOutputMix");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mOutputMix,Realize)(SL_BOOLEAN_FALSE);
+ PRINTERR(result, "outputMix->Realize");
+ }
+
+ if(SL_RESULT_SUCCESS != result)
+ {
+ if(mOutputMix)
+ VCALL0(mOutputMix,Destroy)();
+ mOutputMix = nullptr;
+
+ if(mEngineObj)
+ VCALL0(mEngineObj,Destroy)();
+ mEngineObj = nullptr;
+ mEngine = nullptr;
+
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean OpenSLPlayback::reset()
+{
+ SLDataLocator_AndroidSimpleBufferQueue loc_bufq;
+ SLDataLocator_OutputMix loc_outmix;
+ SLDataSource audioSrc;
+ SLDataSink audioSnk;
+ SLresult result;
+
+ if(mBufferQueueObj)
+ VCALL0(mBufferQueueObj,Destroy)();
+ mBufferQueueObj = nullptr;
+
+ mRing = nullptr;
+
+#if 0
+ if(!mDevice->Flags.get<FrequencyRequest>())
+ {
+ /* FIXME: Disabled until I figure out how to get the Context needed for
+ * the getSystemService call.
+ */
+ JNIEnv *env = Android_GetJNIEnv();
+ jobject jctx = Android_GetContext();
+
+ /* Get necessary stuff for using java.lang.Integer,
+ * android.content.Context, and android.media.AudioManager.
+ */
+ jclass int_cls = JCALL(env,FindClass)("java/lang/Integer");
+ jmethodID int_parseint = JCALL(env,GetStaticMethodID)(int_cls,
+ "parseInt", "(Ljava/lang/String;)I"
+ );
+ TRACE("Integer: %p, parseInt: %p\n", int_cls, int_parseint);
+
+ jclass ctx_cls = JCALL(env,FindClass)("android/content/Context");
+ jfieldID ctx_audsvc = JCALL(env,GetStaticFieldID)(ctx_cls,
+ "AUDIO_SERVICE", "Ljava/lang/String;"
+ );
+ jmethodID ctx_getSysSvc = JCALL(env,GetMethodID)(ctx_cls,
+ "getSystemService", "(Ljava/lang/String;)Ljava/lang/Object;"
+ );
+ TRACE("Context: %p, AUDIO_SERVICE: %p, getSystemService: %p\n",
+ ctx_cls, ctx_audsvc, ctx_getSysSvc);
+
+ jclass audmgr_cls = JCALL(env,FindClass)("android/media/AudioManager");
+ jfieldID audmgr_prop_out_srate = JCALL(env,GetStaticFieldID)(audmgr_cls,
+ "PROPERTY_OUTPUT_SAMPLE_RATE", "Ljava/lang/String;"
+ );
+ jmethodID audmgr_getproperty = JCALL(env,GetMethodID)(audmgr_cls,
+ "getProperty", "(Ljava/lang/String;)Ljava/lang/String;"
+ );
+ TRACE("AudioManager: %p, PROPERTY_OUTPUT_SAMPLE_RATE: %p, getProperty: %p\n",
+ audmgr_cls, audmgr_prop_out_srate, audmgr_getproperty);
+
+ const char *strchars;
+ jstring strobj;
+
+ /* Now make the calls. */
+ //AudioManager audMgr = (AudioManager)getSystemService(Context.AUDIO_SERVICE);
+ strobj = JCALL(env,GetStaticObjectField)(ctx_cls, ctx_audsvc);
+ jobject audMgr = JCALL(env,CallObjectMethod)(jctx, ctx_getSysSvc, strobj);
+ strchars = JCALL(env,GetStringUTFChars)(strobj, nullptr);
+ TRACE("Context.getSystemService(%s) = %p\n", strchars, audMgr);
+ JCALL(env,ReleaseStringUTFChars)(strobj, strchars);
+
+ //String srateStr = audMgr.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ strobj = JCALL(env,GetStaticObjectField)(audmgr_cls, audmgr_prop_out_srate);
+ jstring srateStr = JCALL(env,CallObjectMethod)(audMgr, audmgr_getproperty, strobj);
+ strchars = JCALL(env,GetStringUTFChars)(strobj, nullptr);
+ TRACE("audMgr.getProperty(%s) = %p\n", strchars, srateStr);
+ JCALL(env,ReleaseStringUTFChars)(strobj, strchars);
+
+ //int sampleRate = Integer.parseInt(srateStr);
+ sampleRate = JCALL(env,CallStaticIntMethod)(int_cls, int_parseint, srateStr);
+
+ strchars = JCALL(env,GetStringUTFChars)(srateStr, nullptr);
+ TRACE("Got system sample rate %uhz (%s)\n", sampleRate, strchars);
+ JCALL(env,ReleaseStringUTFChars)(srateStr, strchars);
+
+ if(!sampleRate) sampleRate = device->Frequency;
+ else sampleRate = maxu(sampleRate, MIN_OUTPUT_RATE);
+ }
+#endif
+
+ mDevice->FmtChans = DevFmtStereo;
+ mDevice->FmtType = DevFmtShort;
+
+ SetDefaultWFXChannelOrder(mDevice);
+ mFrameSize = mDevice->frameSizeFromFmt();
+
+
+ const std::array<SLInterfaceID,2> ids{{ SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION }};
+ const std::array<SLboolean,2> reqs{{ SL_BOOLEAN_TRUE, SL_BOOLEAN_FALSE }};
+
+ loc_bufq.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
+ loc_bufq.numBuffers = mDevice->BufferSize / mDevice->UpdateSize;
+
+#ifdef SL_ANDROID_DATAFORMAT_PCM_EX
+ SLAndroidDataFormat_PCM_EX format_pcm{};
+ format_pcm.formatType = SL_ANDROID_DATAFORMAT_PCM_EX;
+ format_pcm.numChannels = mDevice->channelsFromFmt();
+ format_pcm.sampleRate = mDevice->Frequency * 1000;
+ format_pcm.bitsPerSample = mDevice->bytesFromFmt() * 8;
+ format_pcm.containerSize = format_pcm.bitsPerSample;
+ format_pcm.channelMask = GetChannelMask(mDevice->FmtChans);
+ format_pcm.endianness = IS_LITTLE_ENDIAN ? SL_BYTEORDER_LITTLEENDIAN :
+ SL_BYTEORDER_BIGENDIAN;
+ format_pcm.representation = GetTypeRepresentation(mDevice->FmtType);
+#else
+ SLDataFormat_PCM format_pcm{};
+ format_pcm.formatType = SL_DATAFORMAT_PCM;
+ format_pcm.numChannels = mDevice->channelsFromFmt();
+ format_pcm.samplesPerSec = mDevice->Frequency * 1000;
+ format_pcm.bitsPerSample = mDevice->bytesFromFmt() * 8;
+ format_pcm.containerSize = format_pcm.bitsPerSample;
+ format_pcm.channelMask = GetChannelMask(mDevice->FmtChans);
+ format_pcm.endianness = IS_LITTLE_ENDIAN ? SL_BYTEORDER_LITTLEENDIAN :
+ SL_BYTEORDER_BIGENDIAN;
+#endif
+
+ audioSrc.pLocator = &loc_bufq;
+ audioSrc.pFormat = &format_pcm;
+
+ loc_outmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
+ loc_outmix.outputMix = mOutputMix;
+ audioSnk.pLocator = &loc_outmix;
+ audioSnk.pFormat = nullptr;
+
+
+ result = VCALL(mEngine,CreateAudioPlayer)(&mBufferQueueObj, &audioSrc, &audioSnk, ids.size(),
+ ids.data(), reqs.data());
+ PRINTERR(result, "engine->CreateAudioPlayer");
+ if(SL_RESULT_SUCCESS == result)
+ {
+ /* Set the stream type to "media" (games, music, etc), if possible. */
+ SLAndroidConfigurationItf config;
+ result = VCALL(mBufferQueueObj,GetInterface)(SL_IID_ANDROIDCONFIGURATION, &config);
+ PRINTERR(result, "bufferQueue->GetInterface SL_IID_ANDROIDCONFIGURATION");
+ if(SL_RESULT_SUCCESS == result)
+ {
+ SLint32 streamType = SL_ANDROID_STREAM_MEDIA;
+ result = VCALL(config,SetConfiguration)(SL_ANDROID_KEY_STREAM_TYPE, &streamType,
+ sizeof(streamType));
+ PRINTERR(result, "config->SetConfiguration");
+ }
+
+ /* Clear any error since this was optional. */
+ result = SL_RESULT_SUCCESS;
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mBufferQueueObj,Realize)(SL_BOOLEAN_FALSE);
+ PRINTERR(result, "bufferQueue->Realize");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ const ALuint num_updates{mDevice->BufferSize / mDevice->UpdateSize};
+ try {
+ mRing = CreateRingBuffer(num_updates, mFrameSize*mDevice->UpdateSize, true);
+ }
+ catch(std::exception& e) {
+ ERR("Failed allocating ring buffer %ux%ux%u: %s\n", mDevice->UpdateSize,
+ num_updates, mFrameSize, e.what());
+ result = SL_RESULT_MEMORY_FAILURE;
+ }
+ }
+
+ if(SL_RESULT_SUCCESS != result)
+ {
+ if(mBufferQueueObj)
+ VCALL0(mBufferQueueObj,Destroy)();
+ mBufferQueueObj = nullptr;
+
+ return ALC_FALSE;
+ }
+
+ return ALC_TRUE;
+}
+
+ALCboolean OpenSLPlayback::start()
+{
+ mRing->reset();
+
+ SLAndroidSimpleBufferQueueItf bufferQueue;
+ SLresult result{VCALL(mBufferQueueObj,GetInterface)(SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
+ &bufferQueue)};
+ PRINTERR(result, "bufferQueue->GetInterface");
+ if(SL_RESULT_SUCCESS != result)
+ return ALC_FALSE;
+
+ result = VCALL(bufferQueue,RegisterCallback)(&OpenSLPlayback::processC, this);
+ PRINTERR(result, "bufferQueue->RegisterCallback");
+ if(SL_RESULT_SUCCESS != result) return ALC_FALSE;
+
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread(std::mem_fn(&OpenSLPlayback::mixerProc), this);
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create playback thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void OpenSLPlayback::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+
+ mSem.post();
+ mThread.join();
+
+ SLPlayItf player;
+ SLresult result{VCALL(mBufferQueueObj,GetInterface)(SL_IID_PLAY, &player)};
+ PRINTERR(result, "bufferQueue->GetInterface");
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(player,SetPlayState)(SL_PLAYSTATE_STOPPED);
+ PRINTERR(result, "player->SetPlayState");
+ }
+
+ SLAndroidSimpleBufferQueueItf bufferQueue;
+ result = VCALL(mBufferQueueObj,GetInterface)(SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &bufferQueue);
+ PRINTERR(result, "bufferQueue->GetInterface");
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL0(bufferQueue,Clear)();
+ PRINTERR(result, "bufferQueue->Clear");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(bufferQueue,RegisterCallback)(nullptr, nullptr);
+ PRINTERR(result, "bufferQueue->RegisterCallback");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ SLAndroidSimpleBufferQueueState state;
+ do {
+ std::this_thread::yield();
+ result = VCALL(bufferQueue,GetState)(&state);
+ } while(SL_RESULT_SUCCESS == result && state.count > 0);
+ PRINTERR(result, "bufferQueue->GetState");
+ }
+}
+
+ClockLatency OpenSLPlayback::getClockLatency()
+{
+ ClockLatency ret;
+
+ lock();
+ ret.ClockTime = GetDeviceClockTime(mDevice);
+ ret.Latency = std::chrono::seconds{mRing->readSpace() * mDevice->UpdateSize};
+ ret.Latency /= mDevice->Frequency;
+ unlock();
+
+ return ret;
+}
+
+
+struct OpenSLCapture final : public BackendBase {
+ OpenSLCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~OpenSLCapture() override;
+
+ static void processC(SLAndroidSimpleBufferQueueItf bq, void *context);
+ void process(SLAndroidSimpleBufferQueueItf bq);
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(void *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ /* engine interfaces */
+ SLObjectItf mEngineObj{nullptr};
+ SLEngineItf mEngine;
+
+ /* recording interfaces */
+ SLObjectItf mRecordObj{nullptr};
+
+ RingBufferPtr mRing{nullptr};
+ ALCuint mSplOffset{0u};
+
+ ALsizei mFrameSize{0};
+
+ DEF_NEWDEL(OpenSLCapture)
+};
+
+OpenSLCapture::~OpenSLCapture()
+{
+ if(mRecordObj)
+ VCALL0(mRecordObj,Destroy)();
+ mRecordObj = nullptr;
+
+ if(mEngineObj)
+ VCALL0(mEngineObj,Destroy)();
+ mEngineObj = nullptr;
+ mEngine = nullptr;
+}
+
+
+void OpenSLCapture::processC(SLAndroidSimpleBufferQueueItf bq, void *context)
+{ static_cast<OpenSLCapture*>(context)->process(bq); }
+
+void OpenSLCapture::process(SLAndroidSimpleBufferQueueItf)
+{
+ /* A new chunk has been written into the ring buffer, advance it. */
+ mRing->writeAdvance(1);
+}
+
+
+ALCenum OpenSLCapture::open(const ALCchar* name)
+{
+ if(!name)
+ name = opensl_device;
+ else if(strcmp(name, opensl_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ SLresult result{slCreateEngine(&mEngineObj, 0, nullptr, 0, nullptr, nullptr)};
+ PRINTERR(result, "slCreateEngine");
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mEngineObj,Realize)(SL_BOOLEAN_FALSE);
+ PRINTERR(result, "engine->Realize");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mEngineObj,GetInterface)(SL_IID_ENGINE, &mEngine);
+ PRINTERR(result, "engine->GetInterface");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ mFrameSize = mDevice->frameSizeFromFmt();
+ /* Ensure the total length is at least 100ms */
+ ALsizei length{maxi(mDevice->BufferSize, mDevice->Frequency/10)};
+ /* Ensure the per-chunk length is at least 10ms, and no more than 50ms. */
+ ALsizei update_len{clampi(mDevice->BufferSize/3, mDevice->Frequency/100,
+ mDevice->Frequency/100*5)};
+ ALsizei num_updates{(length+update_len-1) / update_len};
+
+ try {
+ mRing = CreateRingBuffer(num_updates, update_len*mFrameSize, false);
+
+ mDevice->UpdateSize = update_len;
+ mDevice->BufferSize = mRing->writeSpace() * update_len;
+ }
+ catch(std::exception& e) {
+ ERR("Failed to allocate ring buffer: %s\n", e.what());
+ result = SL_RESULT_MEMORY_FAILURE;
+ }
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ const std::array<SLInterfaceID,2> ids{{ SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION }};
+ const std::array<SLboolean,2> reqs{{ SL_BOOLEAN_TRUE, SL_BOOLEAN_FALSE }};
+
+ SLDataLocator_IODevice loc_dev{};
+ loc_dev.locatorType = SL_DATALOCATOR_IODEVICE;
+ loc_dev.deviceType = SL_IODEVICE_AUDIOINPUT;
+ loc_dev.deviceID = SL_DEFAULTDEVICEID_AUDIOINPUT;
+ loc_dev.device = nullptr;
+
+ SLDataSource audioSrc{};
+ audioSrc.pLocator = &loc_dev;
+ audioSrc.pFormat = nullptr;
+
+ SLDataLocator_AndroidSimpleBufferQueue loc_bq{};
+ loc_bq.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
+ loc_bq.numBuffers = mDevice->BufferSize / mDevice->UpdateSize;
+
+#ifdef SL_ANDROID_DATAFORMAT_PCM_EX
+ SLAndroidDataFormat_PCM_EX format_pcm{};
+ format_pcm.formatType = SL_ANDROID_DATAFORMAT_PCM_EX;
+ format_pcm.numChannels = mDevice->channelsFromFmt();
+ format_pcm.sampleRate = mDevice->Frequency * 1000;
+ format_pcm.bitsPerSample = mDevice->bytesFromFmt() * 8;
+ format_pcm.containerSize = format_pcm.bitsPerSample;
+ format_pcm.channelMask = GetChannelMask(mDevice->FmtChans);
+ format_pcm.endianness = IS_LITTLE_ENDIAN ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN;
+ format_pcm.representation = GetTypeRepresentation(mDevice->FmtType);
+#else
+ SLDataFormat_PCM format_pcm{};
+ format_pcm.formatType = SL_DATAFORMAT_PCM;
+ format_pcm.numChannels = mDevice->channelsFromFmt();
+ format_pcm.samplesPerSec = mDevice->Frequency * 1000;
+ format_pcm.bitsPerSample = mDevice->bytesFromFmt() * 8;
+ format_pcm.containerSize = format_pcm.bitsPerSample;
+ format_pcm.channelMask = GetChannelMask(mDevice->FmtChans);
+ format_pcm.endianness = IS_LITTLE_ENDIAN ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN;
+#endif
+
+ SLDataSink audioSnk{};
+ audioSnk.pLocator = &loc_bq;
+ audioSnk.pFormat = &format_pcm;
+
+ result = VCALL(mEngine,CreateAudioRecorder)(&mRecordObj, &audioSrc, &audioSnk,
+ ids.size(), ids.data(), reqs.data());
+ PRINTERR(result, "engine->CreateAudioRecorder");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ /* Set the record preset to "generic", if possible. */
+ SLAndroidConfigurationItf config;
+ result = VCALL(mRecordObj,GetInterface)(SL_IID_ANDROIDCONFIGURATION, &config);
+ PRINTERR(result, "recordObj->GetInterface SL_IID_ANDROIDCONFIGURATION");
+ if(SL_RESULT_SUCCESS == result)
+ {
+ SLuint32 preset = SL_ANDROID_RECORDING_PRESET_GENERIC;
+ result = VCALL(config,SetConfiguration)(SL_ANDROID_KEY_RECORDING_PRESET, &preset,
+ sizeof(preset));
+ PRINTERR(result, "config->SetConfiguration");
+ }
+
+ /* Clear any error since this was optional. */
+ result = SL_RESULT_SUCCESS;
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mRecordObj,Realize)(SL_BOOLEAN_FALSE);
+ PRINTERR(result, "recordObj->Realize");
+ }
+
+ SLAndroidSimpleBufferQueueItf bufferQueue;
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(mRecordObj,GetInterface)(SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &bufferQueue);
+ PRINTERR(result, "recordObj->GetInterface");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(bufferQueue,RegisterCallback)(&OpenSLCapture::processC, this);
+ PRINTERR(result, "bufferQueue->RegisterCallback");
+ }
+ if(SL_RESULT_SUCCESS == result)
+ {
+ const ALuint chunk_size{mDevice->UpdateSize * mFrameSize};
+
+ auto data = mRing->getWriteVector();
+ for(size_t i{0u};i < data.first.len && SL_RESULT_SUCCESS == result;i++)
+ {
+ result = VCALL(bufferQueue,Enqueue)(data.first.buf + chunk_size*i, chunk_size);
+ PRINTERR(result, "bufferQueue->Enqueue");
+ }
+ for(size_t i{0u};i < data.second.len && SL_RESULT_SUCCESS == result;i++)
+ {
+ result = VCALL(bufferQueue,Enqueue)(data.second.buf + chunk_size*i, chunk_size);
+ PRINTERR(result, "bufferQueue->Enqueue");
+ }
+ }
+
+ if(SL_RESULT_SUCCESS != result)
+ {
+ if(mRecordObj)
+ VCALL0(mRecordObj,Destroy)();
+ mRecordObj = nullptr;
+
+ if(mEngineObj)
+ VCALL0(mEngineObj,Destroy)();
+ mEngineObj = nullptr;
+ mEngine = nullptr;
+
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean OpenSLCapture::start()
+{
+ SLRecordItf record;
+ SLresult result{VCALL(mRecordObj,GetInterface)(SL_IID_RECORD, &record)};
+ PRINTERR(result, "recordObj->GetInterface");
+
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(record,SetRecordState)(SL_RECORDSTATE_RECORDING);
+ PRINTERR(result, "record->SetRecordState");
+ }
+
+ if(SL_RESULT_SUCCESS != result)
+ {
+ aluHandleDisconnect(mDevice, "Failed to start capture: 0x%08x", result);
+ return ALC_FALSE;
+ }
+
+ return ALC_TRUE;
+}
+
+void OpenSLCapture::stop()
+{
+ SLRecordItf record;
+ SLresult result{VCALL(mRecordObj,GetInterface)(SL_IID_RECORD, &record)};
+ PRINTERR(result, "recordObj->GetInterface");
+
+ if(SL_RESULT_SUCCESS == result)
+ {
+ result = VCALL(record,SetRecordState)(SL_RECORDSTATE_PAUSED);
+ PRINTERR(result, "record->SetRecordState");
+ }
+}
+
+ALCenum OpenSLCapture::captureSamples(void* buffer, ALCuint samples)
+{
+ ALsizei chunk_size = mDevice->UpdateSize * mFrameSize;
+ SLAndroidSimpleBufferQueueItf bufferQueue;
+ SLresult result;
+ ALCuint i;
+
+ result = VCALL(mRecordObj,GetInterface)(SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &bufferQueue);
+ PRINTERR(result, "recordObj->GetInterface");
+
+ /* Read the desired samples from the ring buffer then advance its read
+ * pointer.
+ */
+ auto data = mRing->getReadVector();
+ for(i = 0;i < samples;)
+ {
+ ALCuint rem{minu(samples - i, mDevice->UpdateSize - mSplOffset)};
+ memcpy((ALCbyte*)buffer + i*mFrameSize, data.first.buf + mSplOffset*mFrameSize,
+ rem * mFrameSize);
+
+ mSplOffset += rem;
+ if(mSplOffset == mDevice->UpdateSize)
+ {
+ /* Finished a chunk, reset the offset and advance the read pointer. */
+ mSplOffset = 0;
+
+ mRing->readAdvance(1);
+ result = VCALL(bufferQueue,Enqueue)(data.first.buf, chunk_size);
+ PRINTERR(result, "bufferQueue->Enqueue");
+ if(SL_RESULT_SUCCESS != result) break;
+
+ data.first.len--;
+ if(!data.first.len)
+ data.first = data.second;
+ else
+ data.first.buf += chunk_size;
+ }
+
+ i += rem;
+ }
+
+ if(SL_RESULT_SUCCESS != result)
+ {
+ aluHandleDisconnect(mDevice, "Failed to update capture buffer: 0x%08x", result);
+ return ALC_INVALID_DEVICE;
+ }
+
+ return ALC_NO_ERROR;
+}
+
+ALCuint OpenSLCapture::availableSamples()
+{ return mRing->readSpace()*mDevice->UpdateSize - mSplOffset; }
+
+} // namespace
+
+bool OSLBackendFactory::init() { return true; }
+
+bool OSLBackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback || type == BackendType::Capture); }
+
+void OSLBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ switch(type)
+ {
+ case DevProbe::Playback:
+ case DevProbe::Capture:
+ /* Includes null char. */
+ outnames->append(opensl_device, sizeof(opensl_device));
+ break;
+ }
+}
+
+BackendPtr OSLBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new OpenSLPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new OpenSLCapture{device}};
+ return nullptr;
+}
+
+BackendFactory &OSLBackendFactory::getFactory()
+{
+ static OSLBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/opensl.h b/alc/backends/opensl.h
new file mode 100644
index 00000000..809aa339
--- /dev/null
+++ b/alc/backends/opensl.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_OSL_H
+#define BACKENDS_OSL_H
+
+#include "backends/base.h"
+
+struct OSLBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_OSL_H */
diff --git a/alc/backends/oss.cpp b/alc/backends/oss.cpp
new file mode 100644
index 00000000..8cfe9e96
--- /dev/null
+++ b/alc/backends/oss.cpp
@@ -0,0 +1,751 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/oss.h"
+
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstdio>
+#include <cstring>
+#include <exception>
+#include <functional>
+#include <memory>
+#include <new>
+#include <string>
+#include <thread>
+#include <utility>
+
+#include "AL/al.h"
+
+#include "alcmain.h"
+#include "alconfig.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+#include "aloptional.h"
+#include "alu.h"
+#include "logging.h"
+#include "ringbuffer.h"
+#include "threads.h"
+#include "vector.h"
+
+#include <sys/soundcard.h>
+
+/*
+ * The OSS documentation talks about SOUND_MIXER_READ, but the header
+ * only contains MIXER_READ. Play safe. Same for WRITE.
+ */
+#ifndef SOUND_MIXER_READ
+#define SOUND_MIXER_READ MIXER_READ
+#endif
+#ifndef SOUND_MIXER_WRITE
+#define SOUND_MIXER_WRITE MIXER_WRITE
+#endif
+
+#if defined(SOUND_VERSION) && (SOUND_VERSION < 0x040000)
+#define ALC_OSS_COMPAT
+#endif
+#ifndef SNDCTL_AUDIOINFO
+#define ALC_OSS_COMPAT
+#endif
+
+/*
+ * FreeBSD strongly discourages the use of specific devices,
+ * such as those returned in oss_audioinfo.devnode
+ */
+#ifdef __FreeBSD__
+#define ALC_OSS_DEVNODE_TRUC
+#endif
+
+namespace {
+
+constexpr char DefaultName[] = "OSS Default";
+std::string DefaultPlayback{"/dev/dsp"};
+std::string DefaultCapture{"/dev/dsp"};
+
+struct DevMap {
+ std::string name;
+ std::string device_name;
+};
+
+bool checkName(const al::vector<DevMap> &list, const std::string &name)
+{
+ return std::find_if(list.cbegin(), list.cend(),
+ [&name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ ) != list.cend();
+}
+
+al::vector<DevMap> PlaybackDevices;
+al::vector<DevMap> CaptureDevices;
+
+
+#ifdef ALC_OSS_COMPAT
+
+#define DSP_CAP_OUTPUT 0x00020000
+#define DSP_CAP_INPUT 0x00010000
+void ALCossListPopulate(al::vector<DevMap> *devlist, int type)
+{
+ devlist->emplace_back(DevMap{DefaultName, (type==DSP_CAP_INPUT) ? DefaultCapture : DefaultPlayback});
+}
+
+#else
+
+void ALCossListAppend(al::vector<DevMap> *list, const char *handle, size_t hlen, const char *path, size_t plen)
+{
+#ifdef ALC_OSS_DEVNODE_TRUC
+ for(size_t i{0};i < plen;i++)
+ {
+ if(path[i] == '.')
+ {
+ if(strncmp(path + i, handle + hlen + i - plen, plen - i) == 0)
+ hlen = hlen + i - plen;
+ plen = i;
+ }
+ }
+#endif
+ if(handle[0] == '\0')
+ {
+ handle = path;
+ hlen = plen;
+ }
+
+ std::string basename{handle, hlen};
+ basename.erase(std::find(basename.begin(), basename.end(), '\0'), basename.end());
+ std::string devname{path, plen};
+ devname.erase(std::find(devname.begin(), devname.end(), '\0'), devname.end());
+
+ auto iter = std::find_if(list->cbegin(), list->cend(),
+ [&devname](const DevMap &entry) -> bool
+ { return entry.device_name == devname; }
+ );
+ if(iter != list->cend())
+ return;
+
+ int count{1};
+ std::string newname{basename};
+ while(checkName(PlaybackDevices, newname))
+ {
+ newname = basename;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+
+ list->emplace_back(DevMap{std::move(newname), std::move(devname)});
+ const DevMap &entry = list->back();
+
+ TRACE("Got device \"%s\", \"%s\"\n", entry.name.c_str(), entry.device_name.c_str());
+}
+
+void ALCossListPopulate(al::vector<DevMap> *devlist, int type_flag)
+{
+ int fd{open("/dev/mixer", O_RDONLY)};
+ if(fd < 0)
+ {
+ TRACE("Could not open /dev/mixer: %s\n", strerror(errno));
+ goto done;
+ }
+
+ oss_sysinfo si;
+ if(ioctl(fd, SNDCTL_SYSINFO, &si) == -1)
+ {
+ TRACE("SNDCTL_SYSINFO failed: %s\n", strerror(errno));
+ goto done;
+ }
+
+ for(int i{0};i < si.numaudios;i++)
+ {
+ oss_audioinfo ai;
+ ai.dev = i;
+ if(ioctl(fd, SNDCTL_AUDIOINFO, &ai) == -1)
+ {
+ ERR("SNDCTL_AUDIOINFO (%d) failed: %s\n", i, strerror(errno));
+ continue;
+ }
+ if(!(ai.caps&type_flag) || ai.devnode[0] == '\0')
+ continue;
+
+ const char *handle;
+ size_t len;
+ if(ai.handle[0] != '\0')
+ {
+ len = strnlen(ai.handle, sizeof(ai.handle));
+ handle = ai.handle;
+ }
+ else
+ {
+ len = strnlen(ai.name, sizeof(ai.name));
+ handle = ai.name;
+ }
+
+ ALCossListAppend(devlist, handle, len, ai.devnode,
+ strnlen(ai.devnode, sizeof(ai.devnode)));
+ }
+
+done:
+ if(fd >= 0)
+ close(fd);
+ fd = -1;
+
+ const char *defdev{((type_flag==DSP_CAP_INPUT) ? DefaultCapture : DefaultPlayback).c_str()};
+ auto iter = std::find_if(devlist->cbegin(), devlist->cend(),
+ [defdev](const DevMap &entry) -> bool
+ { return entry.device_name == defdev; }
+ );
+ if(iter == devlist->cend())
+ devlist->insert(devlist->begin(), DevMap{DefaultName, defdev});
+ else
+ {
+ DevMap entry{std::move(*iter)};
+ devlist->erase(iter);
+ devlist->insert(devlist->begin(), std::move(entry));
+ }
+ devlist->shrink_to_fit();
+}
+
+#endif
+
+int log2i(ALCuint x)
+{
+ int y = 0;
+ while (x > 1)
+ {
+ x >>= 1;
+ y++;
+ }
+ return y;
+}
+
+
+struct OSSPlayback final : public BackendBase {
+ OSSPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~OSSPlayback() override;
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ int mFd{-1};
+
+ al::vector<ALubyte> mMixData;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(OSSPlayback)
+};
+
+OSSPlayback::~OSSPlayback()
+{
+ if(mFd != -1)
+ close(mFd);
+ mFd = -1;
+}
+
+
+int OSSPlayback::mixerProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ const int frame_size{mDevice->frameSizeFromFmt()};
+
+ lock();
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ pollfd pollitem{};
+ pollitem.fd = mFd;
+ pollitem.events = POLLOUT;
+
+ unlock();
+ int pret{poll(&pollitem, 1, 1000)};
+ lock();
+ if(pret < 0)
+ {
+ if(errno == EINTR || errno == EAGAIN)
+ continue;
+ ERR("poll failed: %s\n", strerror(errno));
+ aluHandleDisconnect(mDevice, "Failed waiting for playback buffer: %s", strerror(errno));
+ break;
+ }
+ else if(pret == 0)
+ {
+ WARN("poll timeout\n");
+ continue;
+ }
+
+ ALubyte *write_ptr{mMixData.data()};
+ size_t to_write{mMixData.size()};
+ aluMixData(mDevice, write_ptr, to_write/frame_size);
+ while(to_write > 0 && !mKillNow.load(std::memory_order_acquire))
+ {
+ ssize_t wrote{write(mFd, write_ptr, to_write)};
+ if(wrote < 0)
+ {
+ if(errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
+ continue;
+ ERR("write failed: %s\n", strerror(errno));
+ aluHandleDisconnect(mDevice, "Failed writing playback samples: %s",
+ strerror(errno));
+ break;
+ }
+
+ to_write -= wrote;
+ write_ptr += wrote;
+ }
+ }
+ unlock();
+
+ return 0;
+}
+
+
+ALCenum OSSPlayback::open(const ALCchar *name)
+{
+ const char *devname{DefaultPlayback.c_str()};
+ if(!name)
+ name = DefaultName;
+ else
+ {
+ if(PlaybackDevices.empty())
+ ALCossListPopulate(&PlaybackDevices, DSP_CAP_OUTPUT);
+
+ auto iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
+ [&name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ );
+ if(iter == PlaybackDevices.cend())
+ return ALC_INVALID_VALUE;
+ devname = iter->device_name.c_str();
+ }
+
+ mFd = ::open(devname, O_WRONLY);
+ if(mFd == -1)
+ {
+ ERR("Could not open %s: %s\n", devname, strerror(errno));
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean OSSPlayback::reset()
+{
+ int numFragmentsLogSize;
+ int log2FragmentSize;
+ unsigned int periods;
+ audio_buf_info info;
+ ALuint frameSize;
+ int numChannels;
+ int ossFormat;
+ int ossSpeed;
+ const char *err;
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ ossFormat = AFMT_S8;
+ break;
+ case DevFmtUByte:
+ ossFormat = AFMT_U8;
+ break;
+ case DevFmtUShort:
+ case DevFmtInt:
+ case DevFmtUInt:
+ case DevFmtFloat:
+ mDevice->FmtType = DevFmtShort;
+ /* fall-through */
+ case DevFmtShort:
+ ossFormat = AFMT_S16_NE;
+ break;
+ }
+
+ periods = mDevice->BufferSize / mDevice->UpdateSize;
+ numChannels = mDevice->channelsFromFmt();
+ ossSpeed = mDevice->Frequency;
+ frameSize = numChannels * mDevice->bytesFromFmt();
+ /* According to the OSS spec, 16 bytes (log2(16)) is the minimum. */
+ log2FragmentSize = maxi(log2i(mDevice->UpdateSize*frameSize), 4);
+ numFragmentsLogSize = (periods << 16) | log2FragmentSize;
+
+#define CHECKERR(func) if((func) < 0) { \
+ err = #func; \
+ goto err; \
+}
+ /* Don't fail if SETFRAGMENT fails. We can handle just about anything
+ * that's reported back via GETOSPACE */
+ ioctl(mFd, SNDCTL_DSP_SETFRAGMENT, &numFragmentsLogSize);
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_SETFMT, &ossFormat));
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_CHANNELS, &numChannels));
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_SPEED, &ossSpeed));
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_GETOSPACE, &info));
+ if(0)
+ {
+ err:
+ ERR("%s failed: %s\n", err, strerror(errno));
+ return ALC_FALSE;
+ }
+#undef CHECKERR
+
+ if(mDevice->channelsFromFmt() != numChannels)
+ {
+ ERR("Failed to set %s, got %d channels instead\n", DevFmtChannelsString(mDevice->FmtChans),
+ numChannels);
+ return ALC_FALSE;
+ }
+
+ if(!((ossFormat == AFMT_S8 && mDevice->FmtType == DevFmtByte) ||
+ (ossFormat == AFMT_U8 && mDevice->FmtType == DevFmtUByte) ||
+ (ossFormat == AFMT_S16_NE && mDevice->FmtType == DevFmtShort)))
+ {
+ ERR("Failed to set %s samples, got OSS format %#x\n", DevFmtTypeString(mDevice->FmtType),
+ ossFormat);
+ return ALC_FALSE;
+ }
+
+ mDevice->Frequency = ossSpeed;
+ mDevice->UpdateSize = info.fragsize / frameSize;
+ mDevice->BufferSize = info.fragments * mDevice->UpdateSize;
+
+ SetDefaultChannelOrder(mDevice);
+
+ mMixData.resize(mDevice->UpdateSize * mDevice->frameSizeFromFmt());
+
+ return ALC_TRUE;
+}
+
+ALCboolean OSSPlayback::start()
+{
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&OSSPlayback::mixerProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create playback thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void OSSPlayback::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ if(ioctl(mFd, SNDCTL_DSP_RESET) != 0)
+ ERR("Error resetting device: %s\n", strerror(errno));
+}
+
+
+struct OSScapture final : public BackendBase {
+ OSScapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~OSScapture() override;
+
+ int recordProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(ALCvoid *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ int mFd{-1};
+
+ RingBufferPtr mRing{nullptr};
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(OSScapture)
+};
+
+OSScapture::~OSScapture()
+{
+ if(mFd != -1)
+ close(mFd);
+ mFd = -1;
+}
+
+
+int OSScapture::recordProc()
+{
+ SetRTPriority();
+ althrd_setname(RECORD_THREAD_NAME);
+
+ const int frame_size{mDevice->frameSizeFromFmt()};
+ while(!mKillNow.load(std::memory_order_acquire))
+ {
+ pollfd pollitem{};
+ pollitem.fd = mFd;
+ pollitem.events = POLLIN;
+
+ int sret{poll(&pollitem, 1, 1000)};
+ if(sret < 0)
+ {
+ if(errno == EINTR || errno == EAGAIN)
+ continue;
+ ERR("poll failed: %s\n", strerror(errno));
+ aluHandleDisconnect(mDevice, "Failed to check capture samples: %s", strerror(errno));
+ break;
+ }
+ else if(sret == 0)
+ {
+ WARN("poll timeout\n");
+ continue;
+ }
+
+ auto vec = mRing->getWriteVector();
+ if(vec.first.len > 0)
+ {
+ ssize_t amt{read(mFd, vec.first.buf, vec.first.len*frame_size)};
+ if(amt < 0)
+ {
+ ERR("read failed: %s\n", strerror(errno));
+ aluHandleDisconnect(mDevice, "Failed reading capture samples: %s",
+ strerror(errno));
+ break;
+ }
+ mRing->writeAdvance(amt/frame_size);
+ }
+ }
+
+ return 0;
+}
+
+
+ALCenum OSScapture::open(const ALCchar *name)
+{
+ const char *devname{DefaultCapture.c_str()};
+ if(!name)
+ name = DefaultName;
+ else
+ {
+ if(CaptureDevices.empty())
+ ALCossListPopulate(&CaptureDevices, DSP_CAP_INPUT);
+
+ auto iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
+ [&name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ );
+ if(iter == CaptureDevices.cend())
+ return ALC_INVALID_VALUE;
+ devname = iter->device_name.c_str();
+ }
+
+ mFd = ::open(devname, O_RDONLY);
+ if(mFd == -1)
+ {
+ ERR("Could not open %s: %s\n", devname, strerror(errno));
+ return ALC_INVALID_VALUE;
+ }
+
+ int ossFormat{};
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ ossFormat = AFMT_S8;
+ break;
+ case DevFmtUByte:
+ ossFormat = AFMT_U8;
+ break;
+ case DevFmtShort:
+ ossFormat = AFMT_S16_NE;
+ break;
+ case DevFmtUShort:
+ case DevFmtInt:
+ case DevFmtUInt:
+ case DevFmtFloat:
+ ERR("%s capture samples not supported\n", DevFmtTypeString(mDevice->FmtType));
+ return ALC_INVALID_VALUE;
+ }
+
+ int periods{4};
+ int numChannels{mDevice->channelsFromFmt()};
+ int frameSize{numChannels * mDevice->bytesFromFmt()};
+ int ossSpeed{static_cast<int>(mDevice->Frequency)};
+ int log2FragmentSize{log2i(mDevice->BufferSize * frameSize / periods)};
+
+ /* according to the OSS spec, 16 bytes are the minimum */
+ log2FragmentSize = std::max(log2FragmentSize, 4);
+ int numFragmentsLogSize{(periods << 16) | log2FragmentSize};
+
+ audio_buf_info info;
+ const char *err;
+#define CHECKERR(func) if((func) < 0) { \
+ err = #func; \
+ goto err; \
+}
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_SETFRAGMENT, &numFragmentsLogSize));
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_SETFMT, &ossFormat));
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_CHANNELS, &numChannels));
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_SPEED, &ossSpeed));
+ CHECKERR(ioctl(mFd, SNDCTL_DSP_GETISPACE, &info));
+ if(0)
+ {
+ err:
+ ERR("%s failed: %s\n", err, strerror(errno));
+ close(mFd);
+ mFd = -1;
+ return ALC_INVALID_VALUE;
+ }
+#undef CHECKERR
+
+ if(mDevice->channelsFromFmt() != numChannels)
+ {
+ ERR("Failed to set %s, got %d channels instead\n", DevFmtChannelsString(mDevice->FmtChans),
+ numChannels);
+ close(mFd);
+ mFd = -1;
+ return ALC_INVALID_VALUE;
+ }
+
+ if(!((ossFormat == AFMT_S8 && mDevice->FmtType == DevFmtByte) ||
+ (ossFormat == AFMT_U8 && mDevice->FmtType == DevFmtUByte) ||
+ (ossFormat == AFMT_S16_NE && mDevice->FmtType == DevFmtShort)))
+ {
+ ERR("Failed to set %s samples, got OSS format %#x\n", DevFmtTypeString(mDevice->FmtType), ossFormat);
+ close(mFd);
+ mFd = -1;
+ return ALC_INVALID_VALUE;
+ }
+
+ mRing = CreateRingBuffer(mDevice->BufferSize, frameSize, false);
+ if(!mRing)
+ {
+ ERR("Ring buffer create failed\n");
+ close(mFd);
+ mFd = -1;
+ return ALC_OUT_OF_MEMORY;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean OSScapture::start()
+{
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&OSScapture::recordProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create record thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void OSScapture::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ if(ioctl(mFd, SNDCTL_DSP_RESET) != 0)
+ ERR("Error resetting device: %s\n", strerror(errno));
+}
+
+ALCenum OSScapture::captureSamples(ALCvoid *buffer, ALCuint samples)
+{
+ mRing->read(buffer, samples);
+ return ALC_NO_ERROR;
+}
+
+ALCuint OSScapture::availableSamples()
+{ return mRing->readSpace(); }
+
+} // namespace
+
+
+BackendFactory &OSSBackendFactory::getFactory()
+{
+ static OSSBackendFactory factory{};
+ return factory;
+}
+
+bool OSSBackendFactory::init()
+{
+ if(auto devopt = ConfigValueStr(nullptr, "oss", "device"))
+ DefaultPlayback = std::move(*devopt);
+ if(auto capopt = ConfigValueStr(nullptr, "oss", "capture"))
+ DefaultCapture = std::move(*capopt);
+
+ return true;
+}
+
+bool OSSBackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback || type == BackendType::Capture); }
+
+void OSSBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ auto add_device = [outnames](const DevMap &entry) -> void
+ {
+#ifdef HAVE_STAT
+ struct stat buf;
+ if(stat(entry.device_name.c_str(), &buf) == 0)
+#endif
+ {
+ /* Includes null char. */
+ outnames->append(entry.name.c_str(), entry.name.length()+1);
+ }
+ };
+
+ switch(type)
+ {
+ case DevProbe::Playback:
+ PlaybackDevices.clear();
+ ALCossListPopulate(&PlaybackDevices, DSP_CAP_OUTPUT);
+ std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
+ break;
+
+ case DevProbe::Capture:
+ CaptureDevices.clear();
+ ALCossListPopulate(&CaptureDevices, DSP_CAP_INPUT);
+ std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
+ break;
+ }
+}
+
+BackendPtr OSSBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new OSSPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new OSScapture{device}};
+ return nullptr;
+}
diff --git a/alc/backends/oss.h b/alc/backends/oss.h
new file mode 100644
index 00000000..9e63d7b6
--- /dev/null
+++ b/alc/backends/oss.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_OSS_H
+#define BACKENDS_OSS_H
+
+#include "backends/base.h"
+
+struct OSSBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_OSS_H */
diff --git a/alc/backends/portaudio.cpp b/alc/backends/portaudio.cpp
new file mode 100644
index 00000000..73e972c5
--- /dev/null
+++ b/alc/backends/portaudio.cpp
@@ -0,0 +1,463 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/portaudio.h"
+
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "alconfig.h"
+#include "ringbuffer.h"
+#include "compat.h"
+
+#include <portaudio.h>
+
+
+namespace {
+
+constexpr ALCchar pa_device[] = "PortAudio Default";
+
+
+#ifdef HAVE_DYNLOAD
+void *pa_handle;
+#define MAKE_FUNC(x) decltype(x) * p##x
+MAKE_FUNC(Pa_Initialize);
+MAKE_FUNC(Pa_Terminate);
+MAKE_FUNC(Pa_GetErrorText);
+MAKE_FUNC(Pa_StartStream);
+MAKE_FUNC(Pa_StopStream);
+MAKE_FUNC(Pa_OpenStream);
+MAKE_FUNC(Pa_CloseStream);
+MAKE_FUNC(Pa_GetDefaultOutputDevice);
+MAKE_FUNC(Pa_GetDefaultInputDevice);
+MAKE_FUNC(Pa_GetStreamInfo);
+#undef MAKE_FUNC
+
+#ifndef IN_IDE_PARSER
+#define Pa_Initialize pPa_Initialize
+#define Pa_Terminate pPa_Terminate
+#define Pa_GetErrorText pPa_GetErrorText
+#define Pa_StartStream pPa_StartStream
+#define Pa_StopStream pPa_StopStream
+#define Pa_OpenStream pPa_OpenStream
+#define Pa_CloseStream pPa_CloseStream
+#define Pa_GetDefaultOutputDevice pPa_GetDefaultOutputDevice
+#define Pa_GetDefaultInputDevice pPa_GetDefaultInputDevice
+#define Pa_GetStreamInfo pPa_GetStreamInfo
+#endif
+#endif
+
+
+struct PortPlayback final : public BackendBase {
+ PortPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~PortPlayback() override;
+
+ static int writeCallbackC(const void *inputBuffer, void *outputBuffer,
+ unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo *timeInfo,
+ const PaStreamCallbackFlags statusFlags, void *userData);
+ int writeCallback(const void *inputBuffer, void *outputBuffer, unsigned long framesPerBuffer,
+ const PaStreamCallbackTimeInfo *timeInfo, const PaStreamCallbackFlags statusFlags);
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ PaStream *mStream{nullptr};
+ PaStreamParameters mParams{};
+ ALuint mUpdateSize{0u};
+
+ DEF_NEWDEL(PortPlayback)
+};
+
+PortPlayback::~PortPlayback()
+{
+ PaError err{mStream ? Pa_CloseStream(mStream) : paNoError};
+ if(err != paNoError)
+ ERR("Error closing stream: %s\n", Pa_GetErrorText(err));
+ mStream = nullptr;
+}
+
+
+int PortPlayback::writeCallbackC(const void *inputBuffer, void *outputBuffer,
+ unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo *timeInfo,
+ const PaStreamCallbackFlags statusFlags, void *userData)
+{
+ return static_cast<PortPlayback*>(userData)->writeCallback(inputBuffer, outputBuffer,
+ framesPerBuffer, timeInfo, statusFlags);
+}
+
+int PortPlayback::writeCallback(const void*, void *outputBuffer,
+ unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo*,
+ const PaStreamCallbackFlags)
+{
+ lock();
+ aluMixData(mDevice, outputBuffer, framesPerBuffer);
+ unlock();
+ return 0;
+}
+
+
+ALCenum PortPlayback::open(const ALCchar *name)
+{
+ if(!name)
+ name = pa_device;
+ else if(strcmp(name, pa_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ mUpdateSize = mDevice->UpdateSize;
+
+ auto devidopt = ConfigValueInt(nullptr, "port", "device");
+ if(devidopt && *devidopt >= 0) mParams.device = *devidopt;
+ else mParams.device = Pa_GetDefaultOutputDevice();
+ mParams.suggestedLatency = mDevice->BufferSize / static_cast<double>(mDevice->Frequency);
+ mParams.hostApiSpecificStreamInfo = nullptr;
+
+ mParams.channelCount = ((mDevice->FmtChans == DevFmtMono) ? 1 : 2);
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ mParams.sampleFormat = paInt8;
+ break;
+ case DevFmtUByte:
+ mParams.sampleFormat = paUInt8;
+ break;
+ case DevFmtUShort:
+ /* fall-through */
+ case DevFmtShort:
+ mParams.sampleFormat = paInt16;
+ break;
+ case DevFmtUInt:
+ /* fall-through */
+ case DevFmtInt:
+ mParams.sampleFormat = paInt32;
+ break;
+ case DevFmtFloat:
+ mParams.sampleFormat = paFloat32;
+ break;
+ }
+
+retry_open:
+ PaError err{Pa_OpenStream(&mStream, nullptr, &mParams, mDevice->Frequency, mDevice->UpdateSize,
+ paNoFlag, &PortPlayback::writeCallbackC, this)};
+ if(err != paNoError)
+ {
+ if(mParams.sampleFormat == paFloat32)
+ {
+ mParams.sampleFormat = paInt16;
+ goto retry_open;
+ }
+ ERR("Pa_OpenStream() returned an error: %s\n", Pa_GetErrorText(err));
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+
+}
+
+ALCboolean PortPlayback::reset()
+{
+ const PaStreamInfo *streamInfo{Pa_GetStreamInfo(mStream)};
+ mDevice->Frequency = streamInfo->sampleRate;
+ mDevice->UpdateSize = mUpdateSize;
+
+ if(mParams.sampleFormat == paInt8)
+ mDevice->FmtType = DevFmtByte;
+ else if(mParams.sampleFormat == paUInt8)
+ mDevice->FmtType = DevFmtUByte;
+ else if(mParams.sampleFormat == paInt16)
+ mDevice->FmtType = DevFmtShort;
+ else if(mParams.sampleFormat == paInt32)
+ mDevice->FmtType = DevFmtInt;
+ else if(mParams.sampleFormat == paFloat32)
+ mDevice->FmtType = DevFmtFloat;
+ else
+ {
+ ERR("Unexpected sample format: 0x%lx\n", mParams.sampleFormat);
+ return ALC_FALSE;
+ }
+
+ if(mParams.channelCount == 2)
+ mDevice->FmtChans = DevFmtStereo;
+ else if(mParams.channelCount == 1)
+ mDevice->FmtChans = DevFmtMono;
+ else
+ {
+ ERR("Unexpected channel count: %u\n", mParams.channelCount);
+ return ALC_FALSE;
+ }
+ SetDefaultChannelOrder(mDevice);
+
+ return ALC_TRUE;
+}
+
+ALCboolean PortPlayback::start()
+{
+ PaError err{Pa_StartStream(mStream)};
+ if(err != paNoError)
+ {
+ ERR("Pa_StartStream() returned an error: %s\n", Pa_GetErrorText(err));
+ return ALC_FALSE;
+ }
+ return ALC_TRUE;
+}
+
+void PortPlayback::stop()
+{
+ PaError err{Pa_StopStream(mStream)};
+ if(err != paNoError)
+ ERR("Error stopping stream: %s\n", Pa_GetErrorText(err));
+}
+
+
+struct PortCapture final : public BackendBase {
+ PortCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~PortCapture() override;
+
+ static int readCallbackC(const void *inputBuffer, void *outputBuffer,
+ unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo *timeInfo,
+ const PaStreamCallbackFlags statusFlags, void *userData);
+ int readCallback(const void *inputBuffer, void *outputBuffer, unsigned long framesPerBuffer,
+ const PaStreamCallbackTimeInfo *timeInfo, const PaStreamCallbackFlags statusFlags);
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(ALCvoid *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ PaStream *mStream{nullptr};
+ PaStreamParameters mParams;
+
+ RingBufferPtr mRing{nullptr};
+
+ DEF_NEWDEL(PortCapture)
+};
+
+PortCapture::~PortCapture()
+{
+ PaError err{mStream ? Pa_CloseStream(mStream) : paNoError};
+ if(err != paNoError)
+ ERR("Error closing stream: %s\n", Pa_GetErrorText(err));
+ mStream = nullptr;
+}
+
+
+int PortCapture::readCallbackC(const void *inputBuffer, void *outputBuffer,
+ unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo *timeInfo,
+ const PaStreamCallbackFlags statusFlags, void* userData)
+{
+ return static_cast<PortCapture*>(userData)->readCallback(inputBuffer, outputBuffer,
+ framesPerBuffer, timeInfo, statusFlags);
+}
+
+int PortCapture::readCallback(const void *inputBuffer, void*,
+ unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo*,
+ const PaStreamCallbackFlags)
+{
+ mRing->write(inputBuffer, framesPerBuffer);
+ return 0;
+}
+
+
+ALCenum PortCapture::open(const ALCchar *name)
+{
+ if(!name)
+ name = pa_device;
+ else if(strcmp(name, pa_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ ALuint samples{mDevice->BufferSize};
+ samples = maxu(samples, 100 * mDevice->Frequency / 1000);
+ ALsizei frame_size{mDevice->frameSizeFromFmt()};
+
+ mRing = CreateRingBuffer(samples, frame_size, false);
+ if(!mRing) return ALC_INVALID_VALUE;
+
+ auto devidopt = ConfigValueInt(nullptr, "port", "capture");
+ if(devidopt && *devidopt >= 0) mParams.device = *devidopt;
+ else mParams.device = Pa_GetDefaultOutputDevice();
+ mParams.suggestedLatency = 0.0f;
+ mParams.hostApiSpecificStreamInfo = nullptr;
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ mParams.sampleFormat = paInt8;
+ break;
+ case DevFmtUByte:
+ mParams.sampleFormat = paUInt8;
+ break;
+ case DevFmtShort:
+ mParams.sampleFormat = paInt16;
+ break;
+ case DevFmtInt:
+ mParams.sampleFormat = paInt32;
+ break;
+ case DevFmtFloat:
+ mParams.sampleFormat = paFloat32;
+ break;
+ case DevFmtUInt:
+ case DevFmtUShort:
+ ERR("%s samples not supported\n", DevFmtTypeString(mDevice->FmtType));
+ return ALC_INVALID_VALUE;
+ }
+ mParams.channelCount = mDevice->channelsFromFmt();
+
+ PaError err{Pa_OpenStream(&mStream, &mParams, nullptr, mDevice->Frequency,
+ paFramesPerBufferUnspecified, paNoFlag, &PortCapture::readCallbackC, this)};
+ if(err != paNoError)
+ {
+ ERR("Pa_OpenStream() returned an error: %s\n", Pa_GetErrorText(err));
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+
+ALCboolean PortCapture::start()
+{
+ PaError err{Pa_StartStream(mStream)};
+ if(err != paNoError)
+ {
+ ERR("Error starting stream: %s\n", Pa_GetErrorText(err));
+ return ALC_FALSE;
+ }
+ return ALC_TRUE;
+}
+
+void PortCapture::stop()
+{
+ PaError err{Pa_StopStream(mStream)};
+ if(err != paNoError)
+ ERR("Error stopping stream: %s\n", Pa_GetErrorText(err));
+}
+
+
+ALCuint PortCapture::availableSamples()
+{ return mRing->readSpace(); }
+
+ALCenum PortCapture::captureSamples(ALCvoid *buffer, ALCuint samples)
+{
+ mRing->read(buffer, samples);
+ return ALC_NO_ERROR;
+}
+
+} // namespace
+
+
+bool PortBackendFactory::init()
+{
+ PaError err;
+
+#ifdef HAVE_DYNLOAD
+ if(!pa_handle)
+ {
+#ifdef _WIN32
+# define PALIB "portaudio.dll"
+#elif defined(__APPLE__) && defined(__MACH__)
+# define PALIB "libportaudio.2.dylib"
+#elif defined(__OpenBSD__)
+# define PALIB "libportaudio.so"
+#else
+# define PALIB "libportaudio.so.2"
+#endif
+
+ pa_handle = LoadLib(PALIB);
+ if(!pa_handle)
+ return false;
+
+#define LOAD_FUNC(f) do { \
+ p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(pa_handle, #f)); \
+ if(p##f == nullptr) \
+ { \
+ CloseLib(pa_handle); \
+ pa_handle = nullptr; \
+ return false; \
+ } \
+} while(0)
+ LOAD_FUNC(Pa_Initialize);
+ LOAD_FUNC(Pa_Terminate);
+ LOAD_FUNC(Pa_GetErrorText);
+ LOAD_FUNC(Pa_StartStream);
+ LOAD_FUNC(Pa_StopStream);
+ LOAD_FUNC(Pa_OpenStream);
+ LOAD_FUNC(Pa_CloseStream);
+ LOAD_FUNC(Pa_GetDefaultOutputDevice);
+ LOAD_FUNC(Pa_GetDefaultInputDevice);
+ LOAD_FUNC(Pa_GetStreamInfo);
+#undef LOAD_FUNC
+
+ if((err=Pa_Initialize()) != paNoError)
+ {
+ ERR("Pa_Initialize() returned an error: %s\n", Pa_GetErrorText(err));
+ CloseLib(pa_handle);
+ pa_handle = nullptr;
+ return false;
+ }
+ }
+#else
+ if((err=Pa_Initialize()) != paNoError)
+ {
+ ERR("Pa_Initialize() returned an error: %s\n", Pa_GetErrorText(err));
+ return false;
+ }
+#endif
+ return true;
+}
+
+bool PortBackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback || type == BackendType::Capture); }
+
+void PortBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ switch(type)
+ {
+ case DevProbe::Playback:
+ case DevProbe::Capture:
+ /* Includes null char. */
+ outnames->append(pa_device, sizeof(pa_device));
+ break;
+ }
+}
+
+BackendPtr PortBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new PortPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new PortCapture{device}};
+ return nullptr;
+}
+
+BackendFactory &PortBackendFactory::getFactory()
+{
+ static PortBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/portaudio.h b/alc/backends/portaudio.h
new file mode 100644
index 00000000..082e9020
--- /dev/null
+++ b/alc/backends/portaudio.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_PORTAUDIO_H
+#define BACKENDS_PORTAUDIO_H
+
+#include "backends/base.h"
+
+struct PortBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_PORTAUDIO_H */
diff --git a/alc/backends/pulseaudio.cpp b/alc/backends/pulseaudio.cpp
new file mode 100644
index 00000000..da209c8d
--- /dev/null
+++ b/alc/backends/pulseaudio.cpp
@@ -0,0 +1,1532 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2009 by Konstantinos Natsakis <[email protected]>
+ * Copyright (C) 2010 by Chris Robinson <[email protected]>
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/pulseaudio.h"
+
+#include <poll.h>
+#include <cstring>
+
+#include <array>
+#include <string>
+#include <vector>
+#include <atomic>
+#include <thread>
+#include <algorithm>
+#include <condition_variable>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "alconfig.h"
+#include "compat.h"
+#include "alexcpt.h"
+
+#include <pulse/pulseaudio.h>
+
+
+namespace {
+
+#ifdef HAVE_DYNLOAD
+#define PULSE_FUNCS(MAGIC) \
+ MAGIC(pa_mainloop_new); \
+ MAGIC(pa_mainloop_free); \
+ MAGIC(pa_mainloop_set_poll_func); \
+ MAGIC(pa_mainloop_run); \
+ MAGIC(pa_mainloop_get_api); \
+ MAGIC(pa_context_new); \
+ MAGIC(pa_context_unref); \
+ MAGIC(pa_context_get_state); \
+ MAGIC(pa_context_disconnect); \
+ MAGIC(pa_context_set_state_callback); \
+ MAGIC(pa_context_errno); \
+ MAGIC(pa_context_connect); \
+ MAGIC(pa_context_get_server_info); \
+ MAGIC(pa_context_get_sink_info_by_name); \
+ MAGIC(pa_context_get_sink_info_list); \
+ MAGIC(pa_context_get_source_info_by_name); \
+ MAGIC(pa_context_get_source_info_list); \
+ MAGIC(pa_stream_new); \
+ MAGIC(pa_stream_unref); \
+ MAGIC(pa_stream_drop); \
+ MAGIC(pa_stream_get_state); \
+ MAGIC(pa_stream_peek); \
+ MAGIC(pa_stream_write); \
+ MAGIC(pa_stream_connect_record); \
+ MAGIC(pa_stream_connect_playback); \
+ MAGIC(pa_stream_readable_size); \
+ MAGIC(pa_stream_writable_size); \
+ MAGIC(pa_stream_is_corked); \
+ MAGIC(pa_stream_cork); \
+ MAGIC(pa_stream_is_suspended); \
+ MAGIC(pa_stream_get_device_name); \
+ MAGIC(pa_stream_get_latency); \
+ MAGIC(pa_stream_set_write_callback); \
+ MAGIC(pa_stream_set_buffer_attr); \
+ MAGIC(pa_stream_get_buffer_attr); \
+ MAGIC(pa_stream_get_sample_spec); \
+ MAGIC(pa_stream_get_time); \
+ MAGIC(pa_stream_set_read_callback); \
+ MAGIC(pa_stream_set_state_callback); \
+ MAGIC(pa_stream_set_moved_callback); \
+ MAGIC(pa_stream_set_underflow_callback); \
+ MAGIC(pa_stream_new_with_proplist); \
+ MAGIC(pa_stream_disconnect); \
+ MAGIC(pa_stream_set_buffer_attr_callback); \
+ MAGIC(pa_stream_begin_write); \
+ MAGIC(pa_channel_map_init_auto); \
+ MAGIC(pa_channel_map_parse); \
+ MAGIC(pa_channel_map_snprint); \
+ MAGIC(pa_channel_map_equal); \
+ MAGIC(pa_channel_map_superset); \
+ MAGIC(pa_operation_get_state); \
+ MAGIC(pa_operation_unref); \
+ MAGIC(pa_sample_spec_valid); \
+ MAGIC(pa_frame_size); \
+ MAGIC(pa_strerror); \
+ MAGIC(pa_path_get_filename); \
+ MAGIC(pa_get_binary_name); \
+ MAGIC(pa_xmalloc); \
+ MAGIC(pa_xfree);
+
+void *pulse_handle;
+#define MAKE_FUNC(x) decltype(x) * p##x
+PULSE_FUNCS(MAKE_FUNC)
+#undef MAKE_FUNC
+
+#ifndef IN_IDE_PARSER
+#define pa_mainloop_new ppa_mainloop_new
+#define pa_mainloop_free ppa_mainloop_free
+#define pa_mainloop_set_poll_func ppa_mainloop_set_poll_func
+#define pa_mainloop_run ppa_mainloop_run
+#define pa_mainloop_get_api ppa_mainloop_get_api
+#define pa_context_new ppa_context_new
+#define pa_context_unref ppa_context_unref
+#define pa_context_get_state ppa_context_get_state
+#define pa_context_disconnect ppa_context_disconnect
+#define pa_context_set_state_callback ppa_context_set_state_callback
+#define pa_context_errno ppa_context_errno
+#define pa_context_connect ppa_context_connect
+#define pa_context_get_server_info ppa_context_get_server_info
+#define pa_context_get_sink_info_by_name ppa_context_get_sink_info_by_name
+#define pa_context_get_sink_info_list ppa_context_get_sink_info_list
+#define pa_context_get_source_info_by_name ppa_context_get_source_info_by_name
+#define pa_context_get_source_info_list ppa_context_get_source_info_list
+#define pa_stream_new ppa_stream_new
+#define pa_stream_unref ppa_stream_unref
+#define pa_stream_disconnect ppa_stream_disconnect
+#define pa_stream_drop ppa_stream_drop
+#define pa_stream_set_write_callback ppa_stream_set_write_callback
+#define pa_stream_set_buffer_attr ppa_stream_set_buffer_attr
+#define pa_stream_get_buffer_attr ppa_stream_get_buffer_attr
+#define pa_stream_get_sample_spec ppa_stream_get_sample_spec
+#define pa_stream_get_time ppa_stream_get_time
+#define pa_stream_set_read_callback ppa_stream_set_read_callback
+#define pa_stream_set_state_callback ppa_stream_set_state_callback
+#define pa_stream_set_moved_callback ppa_stream_set_moved_callback
+#define pa_stream_set_underflow_callback ppa_stream_set_underflow_callback
+#define pa_stream_connect_record ppa_stream_connect_record
+#define pa_stream_connect_playback ppa_stream_connect_playback
+#define pa_stream_readable_size ppa_stream_readable_size
+#define pa_stream_writable_size ppa_stream_writable_size
+#define pa_stream_is_corked ppa_stream_is_corked
+#define pa_stream_cork ppa_stream_cork
+#define pa_stream_is_suspended ppa_stream_is_suspended
+#define pa_stream_get_device_name ppa_stream_get_device_name
+#define pa_stream_get_latency ppa_stream_get_latency
+#define pa_stream_set_buffer_attr_callback ppa_stream_set_buffer_attr_callback
+#define pa_stream_begin_write ppa_stream_begin_write*/
+#define pa_channel_map_init_auto ppa_channel_map_init_auto
+#define pa_channel_map_parse ppa_channel_map_parse
+#define pa_channel_map_snprint ppa_channel_map_snprint
+#define pa_channel_map_equal ppa_channel_map_equal
+#define pa_channel_map_superset ppa_channel_map_superset
+#define pa_operation_get_state ppa_operation_get_state
+#define pa_operation_unref ppa_operation_unref
+#define pa_sample_spec_valid ppa_sample_spec_valid
+#define pa_frame_size ppa_frame_size
+#define pa_strerror ppa_strerror
+#define pa_stream_get_state ppa_stream_get_state
+#define pa_stream_peek ppa_stream_peek
+#define pa_stream_write ppa_stream_write
+#define pa_xfree ppa_xfree
+#define pa_path_get_filename ppa_path_get_filename
+#define pa_get_binary_name ppa_get_binary_name
+#define pa_xmalloc ppa_xmalloc
+#endif /* IN_IDE_PARSER */
+
+#endif
+
+
+constexpr pa_channel_map MonoChanMap{
+ 1, {PA_CHANNEL_POSITION_MONO}
+}, StereoChanMap{
+ 2, {PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT}
+}, QuadChanMap{
+ 4, {
+ PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT
+ }
+}, X51ChanMap{
+ 6, {
+ PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_FRONT_CENTER, PA_CHANNEL_POSITION_LFE,
+ PA_CHANNEL_POSITION_SIDE_LEFT, PA_CHANNEL_POSITION_SIDE_RIGHT
+ }
+}, X51RearChanMap{
+ 6, {
+ PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_FRONT_CENTER, PA_CHANNEL_POSITION_LFE,
+ PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT
+ }
+}, X61ChanMap{
+ 7, {
+ PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_FRONT_CENTER, PA_CHANNEL_POSITION_LFE,
+ PA_CHANNEL_POSITION_REAR_CENTER,
+ PA_CHANNEL_POSITION_SIDE_LEFT, PA_CHANNEL_POSITION_SIDE_RIGHT
+ }
+}, X71ChanMap{
+ 8, {
+ PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_FRONT_CENTER, PA_CHANNEL_POSITION_LFE,
+ PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT,
+ PA_CHANNEL_POSITION_SIDE_LEFT, PA_CHANNEL_POSITION_SIDE_RIGHT
+ }
+};
+
+size_t ChannelFromPulse(pa_channel_position_t chan)
+{
+ switch(chan)
+ {
+ case PA_CHANNEL_POSITION_INVALID: break;
+ case PA_CHANNEL_POSITION_MONO: return FrontCenter;
+ case PA_CHANNEL_POSITION_FRONT_LEFT: return FrontLeft;
+ case PA_CHANNEL_POSITION_FRONT_RIGHT: return FrontRight;
+ case PA_CHANNEL_POSITION_FRONT_CENTER: return FrontCenter;
+ case PA_CHANNEL_POSITION_REAR_CENTER: return BackCenter;
+ case PA_CHANNEL_POSITION_REAR_LEFT: return BackLeft;
+ case PA_CHANNEL_POSITION_REAR_RIGHT: return BackRight;
+ case PA_CHANNEL_POSITION_LFE: return LFE;
+ case PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER: break;
+ case PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER: break;
+ case PA_CHANNEL_POSITION_SIDE_LEFT: return SideLeft;
+ case PA_CHANNEL_POSITION_SIDE_RIGHT: return SideRight;
+ case PA_CHANNEL_POSITION_AUX0: return Aux0;
+ case PA_CHANNEL_POSITION_AUX1: return Aux1;
+ case PA_CHANNEL_POSITION_AUX2: return Aux2;
+ case PA_CHANNEL_POSITION_AUX3: return Aux3;
+ case PA_CHANNEL_POSITION_AUX4: return Aux4;
+ case PA_CHANNEL_POSITION_AUX5: return Aux5;
+ case PA_CHANNEL_POSITION_AUX6: return Aux6;
+ case PA_CHANNEL_POSITION_AUX7: return Aux7;
+ case PA_CHANNEL_POSITION_AUX8: return Aux8;
+ case PA_CHANNEL_POSITION_AUX9: return Aux9;
+ case PA_CHANNEL_POSITION_AUX10: return Aux10;
+ case PA_CHANNEL_POSITION_AUX11: return Aux11;
+ case PA_CHANNEL_POSITION_AUX12: return Aux12;
+ case PA_CHANNEL_POSITION_AUX13: return Aux13;
+ case PA_CHANNEL_POSITION_AUX14: return Aux14;
+ case PA_CHANNEL_POSITION_AUX15: return Aux15;
+ case PA_CHANNEL_POSITION_AUX16: break;
+ case PA_CHANNEL_POSITION_AUX17: break;
+ case PA_CHANNEL_POSITION_AUX18: break;
+ case PA_CHANNEL_POSITION_AUX19: break;
+ case PA_CHANNEL_POSITION_AUX20: break;
+ case PA_CHANNEL_POSITION_AUX21: break;
+ case PA_CHANNEL_POSITION_AUX22: break;
+ case PA_CHANNEL_POSITION_AUX23: break;
+ case PA_CHANNEL_POSITION_AUX24: break;
+ case PA_CHANNEL_POSITION_AUX25: break;
+ case PA_CHANNEL_POSITION_AUX26: break;
+ case PA_CHANNEL_POSITION_AUX27: break;
+ case PA_CHANNEL_POSITION_AUX28: break;
+ case PA_CHANNEL_POSITION_AUX29: break;
+ case PA_CHANNEL_POSITION_AUX30: break;
+ case PA_CHANNEL_POSITION_AUX31: break;
+ case PA_CHANNEL_POSITION_TOP_CENTER: break;
+ case PA_CHANNEL_POSITION_TOP_FRONT_LEFT: return UpperFrontLeft;
+ case PA_CHANNEL_POSITION_TOP_FRONT_RIGHT: return UpperFrontRight;
+ case PA_CHANNEL_POSITION_TOP_FRONT_CENTER: break;
+ case PA_CHANNEL_POSITION_TOP_REAR_LEFT: return UpperBackLeft;
+ case PA_CHANNEL_POSITION_TOP_REAR_RIGHT: return UpperBackRight;
+ case PA_CHANNEL_POSITION_TOP_REAR_CENTER: break;
+ case PA_CHANNEL_POSITION_MAX: break;
+ }
+ throw al::backend_exception{ALC_INVALID_VALUE, "Unexpected channel enum %d", chan};
+}
+
+void SetChannelOrderFromMap(ALCdevice *device, const pa_channel_map &chanmap)
+{
+ device->RealOut.ChannelIndex.fill(-1);
+ for(int i{0};i < chanmap.channels;++i)
+ device->RealOut.ChannelIndex[ChannelFromPulse(chanmap.map[i])] = i;
+}
+
+
+/* *grumble* Don't use enums for bitflags. */
+inline pa_stream_flags_t operator|(pa_stream_flags_t lhs, pa_stream_flags_t rhs)
+{ return pa_stream_flags_t(int(lhs) | int(rhs)); }
+inline pa_stream_flags_t& operator|=(pa_stream_flags_t &lhs, pa_stream_flags_t rhs)
+{
+ lhs = pa_stream_flags_t(int(lhs) | int(rhs));
+ return lhs;
+}
+inline pa_context_flags_t& operator|=(pa_context_flags_t &lhs, pa_context_flags_t rhs)
+{
+ lhs = pa_context_flags_t(int(lhs) | int(rhs));
+ return lhs;
+}
+
+inline pa_stream_flags_t& operator&=(pa_stream_flags_t &lhs, int rhs)
+{
+ lhs = pa_stream_flags_t(int(lhs) & rhs);
+ return lhs;
+}
+
+
+/* Global flags and properties */
+pa_context_flags_t pulse_ctx_flags;
+
+pa_mainloop *pulse_mainloop{nullptr};
+
+std::mutex pulse_lock;
+std::condition_variable pulse_condvar;
+
+int pulse_poll_func(struct pollfd *ufds, unsigned long nfds, int timeout, void *userdata)
+{
+ auto plock = static_cast<std::unique_lock<std::mutex>*>(userdata);
+ plock->unlock();
+ int r{poll(ufds, nfds, timeout)};
+ plock->lock();
+ return r;
+}
+
+int pulse_mainloop_thread()
+{
+ SetRTPriority();
+
+ std::unique_lock<std::mutex> plock{pulse_lock};
+ pulse_mainloop = pa_mainloop_new();
+
+ pa_mainloop_set_poll_func(pulse_mainloop, pulse_poll_func, &plock);
+ pulse_condvar.notify_all();
+
+ int ret{};
+ pa_mainloop_run(pulse_mainloop, &ret);
+
+ pa_mainloop_free(pulse_mainloop);
+ pulse_mainloop = nullptr;
+
+ return ret;
+}
+
+
+/* PulseAudio Event Callbacks */
+void context_state_callback(pa_context *context, void* /*pdata*/)
+{
+ pa_context_state_t state{pa_context_get_state(context)};
+ if(state == PA_CONTEXT_READY || !PA_CONTEXT_IS_GOOD(state))
+ pulse_condvar.notify_all();
+}
+
+void stream_state_callback(pa_stream *stream, void* /*pdata*/)
+{
+ pa_stream_state_t state{pa_stream_get_state(stream)};
+ if(state == PA_STREAM_READY || !PA_STREAM_IS_GOOD(state))
+ pulse_condvar.notify_all();
+}
+
+void stream_success_callback(pa_stream* /*stream*/, int /*success*/, void* /*pdata*/)
+{
+ pulse_condvar.notify_all();
+}
+
+void wait_for_operation(pa_operation *op, std::unique_lock<std::mutex> &plock)
+{
+ if(op)
+ {
+ while(pa_operation_get_state(op) == PA_OPERATION_RUNNING)
+ pulse_condvar.wait(plock);
+ pa_operation_unref(op);
+ }
+}
+
+
+pa_context *connect_context(std::unique_lock<std::mutex> &plock)
+{
+ const char *name{"OpenAL Soft"};
+
+ const PathNamePair &binname = GetProcBinary();
+ if(!binname.fname.empty())
+ name = binname.fname.c_str();
+
+ if(UNLIKELY(!pulse_mainloop))
+ {
+ std::thread{pulse_mainloop_thread}.detach();
+ while(!pulse_mainloop)
+ pulse_condvar.wait(plock);
+ }
+
+ pa_context *context{pa_context_new(pa_mainloop_get_api(pulse_mainloop), name)};
+ if(!context) throw al::backend_exception{ALC_OUT_OF_MEMORY, "pa_context_new() failed"};
+
+ pa_context_set_state_callback(context, context_state_callback, nullptr);
+
+ int err;
+ if((err=pa_context_connect(context, nullptr, pulse_ctx_flags, nullptr)) >= 0)
+ {
+ pa_context_state_t state;
+ while((state=pa_context_get_state(context)) != PA_CONTEXT_READY)
+ {
+ if(!PA_CONTEXT_IS_GOOD(state))
+ {
+ err = pa_context_errno(context);
+ if(err > 0) err = -err;
+ break;
+ }
+
+ pulse_condvar.wait(plock);
+ }
+ }
+ pa_context_set_state_callback(context, nullptr, nullptr);
+
+ if(err < 0)
+ {
+ pa_context_unref(context);
+ throw al::backend_exception{ALC_INVALID_VALUE, "Context did not connect (%s)",
+ pa_strerror(err)};
+ }
+
+ return context;
+}
+
+
+void pulse_close(pa_context *context, pa_stream *stream)
+{
+ std::lock_guard<std::mutex> _{pulse_lock};
+ if(stream)
+ {
+ pa_stream_set_state_callback(stream, nullptr, nullptr);
+ pa_stream_set_moved_callback(stream, nullptr, nullptr);
+ pa_stream_set_write_callback(stream, nullptr, nullptr);
+ pa_stream_set_buffer_attr_callback(stream, nullptr, nullptr);
+ pa_stream_disconnect(stream);
+ pa_stream_unref(stream);
+ }
+
+ pa_context_disconnect(context);
+ pa_context_unref(context);
+}
+
+
+struct DevMap {
+ std::string name;
+ std::string device_name;
+};
+
+bool checkName(const al::vector<DevMap> &list, const std::string &name)
+{
+ return std::find_if(list.cbegin(), list.cend(),
+ [&name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ ) != list.cend();
+}
+
+al::vector<DevMap> PlaybackDevices;
+al::vector<DevMap> CaptureDevices;
+
+
+pa_stream *pulse_connect_stream(const char *device_name, std::unique_lock<std::mutex> &plock,
+ pa_context *context, pa_stream_flags_t flags, pa_buffer_attr *attr, pa_sample_spec *spec,
+ pa_channel_map *chanmap, BackendType type)
+{
+ const char *stream_id{(type==BackendType::Playback) ? "Playback Stream" : "Capture Stream"};
+ pa_stream *stream{pa_stream_new(context, stream_id, spec, chanmap)};
+ if(!stream)
+ throw al::backend_exception{ALC_OUT_OF_MEMORY, "pa_stream_new() failed (%s)",
+ pa_strerror(pa_context_errno(context))};
+
+ pa_stream_set_state_callback(stream, stream_state_callback, nullptr);
+
+ int err{(type==BackendType::Playback) ?
+ pa_stream_connect_playback(stream, device_name, attr, flags, nullptr, nullptr) :
+ pa_stream_connect_record(stream, device_name, attr, flags)};
+ if(err < 0)
+ {
+ pa_stream_unref(stream);
+ throw al::backend_exception{ALC_INVALID_VALUE, "%s did not connect (%s)", stream_id,
+ pa_strerror(err)};
+ }
+
+ pa_stream_state_t state;
+ while((state=pa_stream_get_state(stream)) != PA_STREAM_READY)
+ {
+ if(!PA_STREAM_IS_GOOD(state))
+ {
+ int err{pa_context_errno(context)};
+ pa_stream_unref(stream);
+ throw al::backend_exception{ALC_INVALID_VALUE, "%s did not get ready (%s)", stream_id,
+ pa_strerror(err)};
+ }
+
+ pulse_condvar.wait(plock);
+ }
+ pa_stream_set_state_callback(stream, nullptr, nullptr);
+
+ return stream;
+}
+
+
+void device_sink_callback(pa_context*, const pa_sink_info *info, int eol, void*)
+{
+ if(eol)
+ {
+ pulse_condvar.notify_all();
+ return;
+ }
+
+ /* Skip this device is if it's already in the list. */
+ if(std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
+ [info](const DevMap &entry) -> bool
+ { return entry.device_name == info->name; }
+ ) != PlaybackDevices.cend())
+ return;
+
+ /* Make sure the display name (description) is unique. Append a number
+ * counter as needed.
+ */
+ int count{1};
+ std::string newname{info->description};
+ while(checkName(PlaybackDevices, newname))
+ {
+ newname = info->description;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+ PlaybackDevices.emplace_back(DevMap{std::move(newname), info->name});
+ DevMap &newentry = PlaybackDevices.back();
+
+ TRACE("Got device \"%s\", \"%s\"\n", newentry.name.c_str(), newentry.device_name.c_str());
+}
+
+void probePlaybackDevices()
+{
+ PlaybackDevices.clear();
+
+ try {
+ std::unique_lock<std::mutex> plock{pulse_lock};
+
+ pa_context *context{connect_context(plock)};
+
+ const pa_stream_flags_t flags{PA_STREAM_FIX_FORMAT | PA_STREAM_FIX_RATE |
+ PA_STREAM_FIX_CHANNELS | PA_STREAM_DONT_MOVE};
+
+ pa_sample_spec spec{};
+ spec.format = PA_SAMPLE_S16NE;
+ spec.rate = 44100;
+ spec.channels = 2;
+
+ pa_stream *stream{pulse_connect_stream(nullptr, plock, context, flags, nullptr, &spec,
+ nullptr, BackendType::Playback)};
+ pa_operation *op{pa_context_get_sink_info_by_name(context,
+ pa_stream_get_device_name(stream), device_sink_callback, nullptr)};
+ wait_for_operation(op, plock);
+
+ pa_stream_disconnect(stream);
+ pa_stream_unref(stream);
+ stream = nullptr;
+
+ op = pa_context_get_sink_info_list(context, device_sink_callback, nullptr);
+ wait_for_operation(op, plock);
+
+ pa_context_disconnect(context);
+ pa_context_unref(context);
+ }
+ catch(std::exception &e) {
+ ERR("Error enumerating devices: %s\n", e.what());
+ }
+}
+
+
+void device_source_callback(pa_context*, const pa_source_info *info, int eol, void*)
+{
+ if(eol)
+ {
+ pulse_condvar.notify_all();
+ return;
+ }
+
+ /* Skip this device is if it's already in the list. */
+ if(std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
+ [info](const DevMap &entry) -> bool
+ { return entry.device_name == info->name; }
+ ) != CaptureDevices.cend())
+ return;
+
+ /* Make sure the display name (description) is unique. Append a number
+ * counter as needed.
+ */
+ int count{1};
+ std::string newname{info->description};
+ while(checkName(CaptureDevices, newname))
+ {
+ newname = info->description;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+ CaptureDevices.emplace_back(DevMap{std::move(newname), info->name});
+ DevMap &newentry = CaptureDevices.back();
+
+ TRACE("Got device \"%s\", \"%s\"\n", newentry.name.c_str(), newentry.device_name.c_str());
+}
+
+void probeCaptureDevices()
+{
+ CaptureDevices.clear();
+
+ try {
+ std::unique_lock<std::mutex> plock{pulse_lock};
+
+ pa_context *context{connect_context(plock)};
+
+ const pa_stream_flags_t flags{PA_STREAM_FIX_FORMAT | PA_STREAM_FIX_RATE |
+ PA_STREAM_FIX_CHANNELS | PA_STREAM_DONT_MOVE};
+
+ pa_sample_spec spec{};
+ spec.format = PA_SAMPLE_S16NE;
+ spec.rate = 44100;
+ spec.channels = 1;
+
+ pa_stream *stream{pulse_connect_stream(nullptr, plock, context, flags, nullptr, &spec, nullptr,
+ BackendType::Capture)};
+ pa_operation *op{pa_context_get_source_info_by_name(context,
+ pa_stream_get_device_name(stream), device_source_callback, nullptr)};
+ wait_for_operation(op, plock);
+
+ pa_stream_disconnect(stream);
+ pa_stream_unref(stream);
+ stream = nullptr;
+
+ op = pa_context_get_source_info_list(context, device_source_callback, nullptr);
+ wait_for_operation(op, plock);
+
+ pa_context_disconnect(context);
+ pa_context_unref(context);
+ }
+ catch(std::exception &e) {
+ ERR("Error enumerating devices: %s\n", e.what());
+ }
+}
+
+
+struct PulsePlayback final : public BackendBase {
+ PulsePlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~PulsePlayback() override;
+
+ static void bufferAttrCallbackC(pa_stream *stream, void *pdata);
+ void bufferAttrCallback(pa_stream *stream);
+
+ static void contextStateCallbackC(pa_context *context, void *pdata);
+ void contextStateCallback(pa_context *context);
+
+ static void streamStateCallbackC(pa_stream *stream, void *pdata);
+ void streamStateCallback(pa_stream *stream);
+
+ static void streamWriteCallbackC(pa_stream *stream, size_t nbytes, void *pdata);
+ void streamWriteCallback(pa_stream *stream, size_t nbytes);
+
+ static void sinkInfoCallbackC(pa_context *context, const pa_sink_info *info, int eol, void *pdata);
+ void sinkInfoCallback(pa_context *context, const pa_sink_info *info, int eol);
+
+ static void sinkNameCallbackC(pa_context *context, const pa_sink_info *info, int eol, void *pdata);
+ void sinkNameCallback(pa_context *context, const pa_sink_info *info, int eol);
+
+ static void streamMovedCallbackC(pa_stream *stream, void *pdata);
+ void streamMovedCallback(pa_stream *stream);
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+ ClockLatency getClockLatency() override;
+ void lock() override;
+ void unlock() override;
+
+ std::string mDeviceName;
+
+ pa_buffer_attr mAttr;
+ pa_sample_spec mSpec;
+
+ pa_stream *mStream{nullptr};
+ pa_context *mContext{nullptr};
+
+ ALuint mFrameSize{0u};
+
+ DEF_NEWDEL(PulsePlayback)
+};
+
+PulsePlayback::~PulsePlayback()
+{
+ if(!mContext)
+ return;
+
+ pulse_close(mContext, mStream);
+ mContext = nullptr;
+ mStream = nullptr;
+}
+
+
+void PulsePlayback::bufferAttrCallbackC(pa_stream *stream, void *pdata)
+{ static_cast<PulsePlayback*>(pdata)->bufferAttrCallback(stream); }
+
+void PulsePlayback::bufferAttrCallback(pa_stream *stream)
+{
+ /* FIXME: Update the device's UpdateSize (and/or BufferSize) using the new
+ * buffer attributes? Changing UpdateSize will change the ALC_REFRESH
+ * property, which probably shouldn't change between device resets. But
+ * leaving it alone means ALC_REFRESH will be off.
+ */
+ mAttr = *(pa_stream_get_buffer_attr(stream));
+ TRACE("minreq=%d, tlength=%d, prebuf=%d\n", mAttr.minreq, mAttr.tlength, mAttr.prebuf);
+}
+
+void PulsePlayback::contextStateCallbackC(pa_context *context, void *pdata)
+{ static_cast<PulsePlayback*>(pdata)->contextStateCallback(context); }
+
+void PulsePlayback::contextStateCallback(pa_context *context)
+{
+ if(pa_context_get_state(context) == PA_CONTEXT_FAILED)
+ {
+ ERR("Received context failure!\n");
+ aluHandleDisconnect(mDevice, "Playback state failure");
+ }
+ pulse_condvar.notify_all();
+}
+
+void PulsePlayback::streamStateCallbackC(pa_stream *stream, void *pdata)
+{ static_cast<PulsePlayback*>(pdata)->streamStateCallback(stream); }
+
+void PulsePlayback::streamStateCallback(pa_stream *stream)
+{
+ if(pa_stream_get_state(stream) == PA_STREAM_FAILED)
+ {
+ ERR("Received stream failure!\n");
+ aluHandleDisconnect(mDevice, "Playback stream failure");
+ }
+ pulse_condvar.notify_all();
+}
+
+void PulsePlayback::streamWriteCallbackC(pa_stream *stream, size_t nbytes, void *pdata)
+{ static_cast<PulsePlayback*>(pdata)->streamWriteCallback(stream, nbytes); }
+
+void PulsePlayback::streamWriteCallback(pa_stream *stream, size_t nbytes)
+{
+ void *buf{pa_xmalloc(nbytes)};
+ aluMixData(mDevice, buf, nbytes/mFrameSize);
+
+ int ret{pa_stream_write(stream, buf, nbytes, pa_xfree, 0, PA_SEEK_RELATIVE)};
+ if(UNLIKELY(ret != PA_OK))
+ ERR("Failed to write to stream: %d, %s\n", ret, pa_strerror(ret));
+}
+
+void PulsePlayback::sinkInfoCallbackC(pa_context *context, const pa_sink_info *info, int eol, void *pdata)
+{ static_cast<PulsePlayback*>(pdata)->sinkInfoCallback(context, info, eol); }
+
+void PulsePlayback::sinkInfoCallback(pa_context*, const pa_sink_info *info, int eol)
+{
+ struct ChannelMap {
+ DevFmtChannels chans;
+ pa_channel_map map;
+ };
+ static constexpr std::array<ChannelMap,7> chanmaps{{
+ { DevFmtX71, X71ChanMap },
+ { DevFmtX61, X61ChanMap },
+ { DevFmtX51, X51ChanMap },
+ { DevFmtX51Rear, X51RearChanMap },
+ { DevFmtQuad, QuadChanMap },
+ { DevFmtStereo, StereoChanMap },
+ { DevFmtMono, MonoChanMap }
+ }};
+
+ if(eol)
+ {
+ pulse_condvar.notify_all();
+ return;
+ }
+
+ auto chanmap = std::find_if(chanmaps.cbegin(), chanmaps.cend(),
+ [info](const ChannelMap &chanmap) -> bool
+ { return pa_channel_map_superset(&info->channel_map, &chanmap.map); }
+ );
+ if(chanmap != chanmaps.cend())
+ {
+ if(!mDevice->Flags.get<ChannelsRequest>())
+ mDevice->FmtChans = chanmap->chans;
+ }
+ else
+ {
+ char chanmap_str[PA_CHANNEL_MAP_SNPRINT_MAX]{};
+ pa_channel_map_snprint(chanmap_str, sizeof(chanmap_str), &info->channel_map);
+ WARN("Failed to find format for channel map:\n %s\n", chanmap_str);
+ }
+
+ if(info->active_port)
+ TRACE("Active port: %s (%s)\n", info->active_port->name, info->active_port->description);
+ mDevice->IsHeadphones = (mDevice->FmtChans == DevFmtStereo &&
+ info->active_port && strcmp(info->active_port->name, "analog-output-headphones") == 0);
+}
+
+void PulsePlayback::sinkNameCallbackC(pa_context *context, const pa_sink_info *info, int eol, void *pdata)
+{ static_cast<PulsePlayback*>(pdata)->sinkNameCallback(context, info, eol); }
+
+void PulsePlayback::sinkNameCallback(pa_context*, const pa_sink_info *info, int eol)
+{
+ if(eol)
+ {
+ pulse_condvar.notify_all();
+ return;
+ }
+ mDevice->DeviceName = info->description;
+}
+
+void PulsePlayback::streamMovedCallbackC(pa_stream *stream, void *pdata)
+{ static_cast<PulsePlayback*>(pdata)->streamMovedCallback(stream); }
+
+void PulsePlayback::streamMovedCallback(pa_stream *stream)
+{
+ mDeviceName = pa_stream_get_device_name(stream);
+ TRACE("Stream moved to %s\n", mDeviceName.c_str());
+}
+
+
+ALCenum PulsePlayback::open(const ALCchar *name)
+{
+ const char *pulse_name{nullptr};
+ const char *dev_name{nullptr};
+
+ if(name)
+ {
+ if(PlaybackDevices.empty())
+ probePlaybackDevices();
+
+ auto iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
+ [name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ );
+ if(iter == PlaybackDevices.cend())
+ throw al::backend_exception{ALC_INVALID_VALUE, "Device name \"%s\" not found", name};
+ pulse_name = iter->device_name.c_str();
+ dev_name = iter->name.c_str();
+ }
+
+ std::unique_lock<std::mutex> plock{pulse_lock};
+
+ mContext = connect_context(plock);
+ pa_context_set_state_callback(mContext, &PulsePlayback::contextStateCallbackC, this);
+
+ pa_stream_flags_t flags{PA_STREAM_FIX_FORMAT | PA_STREAM_FIX_RATE | PA_STREAM_FIX_CHANNELS};
+ if(!GetConfigValueBool(nullptr, "pulse", "allow-moves", 1))
+ flags |= PA_STREAM_DONT_MOVE;
+
+ pa_sample_spec spec{};
+ spec.format = PA_SAMPLE_S16NE;
+ spec.rate = 44100;
+ spec.channels = 2;
+
+ if(!pulse_name)
+ {
+ pulse_name = getenv("ALSOFT_PULSE_DEFAULT");
+ if(pulse_name && !pulse_name[0]) pulse_name = nullptr;
+ }
+ TRACE("Connecting to \"%s\"\n", pulse_name ? pulse_name : "(default)");
+ mStream = pulse_connect_stream(pulse_name, plock, mContext, flags, nullptr, &spec, nullptr,
+ BackendType::Playback);
+
+ pa_stream_set_moved_callback(mStream, &PulsePlayback::streamMovedCallbackC, this);
+ mFrameSize = pa_frame_size(pa_stream_get_sample_spec(mStream));
+
+ mDeviceName = pa_stream_get_device_name(mStream);
+ if(!dev_name)
+ {
+ pa_operation *op{pa_context_get_sink_info_by_name(mContext, mDeviceName.c_str(),
+ &PulsePlayback::sinkNameCallbackC, this)};
+ wait_for_operation(op, plock);
+ }
+ else
+ mDevice->DeviceName = dev_name;
+
+ return ALC_NO_ERROR;
+}
+
+ALCboolean PulsePlayback::reset()
+{
+ std::unique_lock<std::mutex> plock{pulse_lock};
+
+ if(mStream)
+ {
+ pa_stream_set_state_callback(mStream, nullptr, nullptr);
+ pa_stream_set_moved_callback(mStream, nullptr, nullptr);
+ pa_stream_set_write_callback(mStream, nullptr, nullptr);
+ pa_stream_set_buffer_attr_callback(mStream, nullptr, nullptr);
+ pa_stream_disconnect(mStream);
+ pa_stream_unref(mStream);
+ mStream = nullptr;
+ }
+
+ pa_operation *op{pa_context_get_sink_info_by_name(mContext, mDeviceName.c_str(),
+ &PulsePlayback::sinkInfoCallbackC, this)};
+ wait_for_operation(op, plock);
+
+ pa_stream_flags_t flags{PA_STREAM_START_CORKED | PA_STREAM_INTERPOLATE_TIMING |
+ PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_EARLY_REQUESTS};
+ if(!GetConfigValueBool(nullptr, "pulse", "allow-moves", 1))
+ flags |= PA_STREAM_DONT_MOVE;
+ if(GetConfigValueBool(mDevice->DeviceName.c_str(), "pulse", "adjust-latency", 0))
+ {
+ /* ADJUST_LATENCY can't be specified with EARLY_REQUESTS, for some
+ * reason. So if the user wants to adjust the overall device latency,
+ * we can't ask to get write signals as soon as minreq is reached.
+ */
+ flags &= ~PA_STREAM_EARLY_REQUESTS;
+ flags |= PA_STREAM_ADJUST_LATENCY;
+ }
+ if(GetConfigValueBool(mDevice->DeviceName.c_str(), "pulse", "fix-rate", 0) ||
+ !mDevice->Flags.get<FrequencyRequest>())
+ flags |= PA_STREAM_FIX_RATE;
+
+ pa_channel_map chanmap{};
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono:
+ chanmap = MonoChanMap;
+ break;
+ case DevFmtAmbi3D:
+ mDevice->FmtChans = DevFmtStereo;
+ /*fall-through*/
+ case DevFmtStereo:
+ chanmap = StereoChanMap;
+ break;
+ case DevFmtQuad:
+ chanmap = QuadChanMap;
+ break;
+ case DevFmtX51:
+ chanmap = X51ChanMap;
+ break;
+ case DevFmtX51Rear:
+ chanmap = X51RearChanMap;
+ break;
+ case DevFmtX61:
+ chanmap = X61ChanMap;
+ break;
+ case DevFmtX71:
+ chanmap = X71ChanMap;
+ break;
+ }
+ SetChannelOrderFromMap(mDevice, chanmap);
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ mDevice->FmtType = DevFmtUByte;
+ /* fall-through */
+ case DevFmtUByte:
+ mSpec.format = PA_SAMPLE_U8;
+ break;
+ case DevFmtUShort:
+ mDevice->FmtType = DevFmtShort;
+ /* fall-through */
+ case DevFmtShort:
+ mSpec.format = PA_SAMPLE_S16NE;
+ break;
+ case DevFmtUInt:
+ mDevice->FmtType = DevFmtInt;
+ /* fall-through */
+ case DevFmtInt:
+ mSpec.format = PA_SAMPLE_S32NE;
+ break;
+ case DevFmtFloat:
+ mSpec.format = PA_SAMPLE_FLOAT32NE;
+ break;
+ }
+ mSpec.rate = mDevice->Frequency;
+ mSpec.channels = mDevice->channelsFromFmt();
+ if(pa_sample_spec_valid(&mSpec) == 0)
+ throw al::backend_exception{ALC_INVALID_VALUE, "Invalid sample spec"};
+
+ mAttr.maxlength = -1;
+ mAttr.tlength = mDevice->BufferSize * pa_frame_size(&mSpec);
+ mAttr.prebuf = 0;
+ mAttr.minreq = mDevice->UpdateSize * pa_frame_size(&mSpec);
+ mAttr.fragsize = -1;
+
+ mStream = pulse_connect_stream(mDeviceName.c_str(), plock, mContext, flags, &mAttr, &mSpec,
+ &chanmap, BackendType::Playback);
+
+ pa_stream_set_state_callback(mStream, &PulsePlayback::streamStateCallbackC, this);
+ pa_stream_set_moved_callback(mStream, &PulsePlayback::streamMovedCallbackC, this);
+
+ mSpec = *(pa_stream_get_sample_spec(mStream));
+ mFrameSize = pa_frame_size(&mSpec);
+
+ if(mDevice->Frequency != mSpec.rate)
+ {
+ /* Server updated our playback rate, so modify the buffer attribs
+ * accordingly.
+ */
+ const auto scale = static_cast<double>(mSpec.rate) / mDevice->Frequency;
+ const ALuint perlen{static_cast<ALuint>(clampd(scale*mDevice->UpdateSize + 0.5, 64.0,
+ 8192.0))};
+ const ALuint buflen{static_cast<ALuint>(clampd(scale*mDevice->BufferSize + 0.5, perlen*2,
+ std::numeric_limits<int>::max()/mFrameSize))};
+
+ mAttr.maxlength = -1;
+ mAttr.tlength = buflen * mFrameSize;
+ mAttr.prebuf = 0;
+ mAttr.minreq = perlen * mFrameSize;
+
+ op = pa_stream_set_buffer_attr(mStream, &mAttr, stream_success_callback, nullptr);
+ wait_for_operation(op, plock);
+
+ mDevice->Frequency = mSpec.rate;
+ }
+
+ pa_stream_set_buffer_attr_callback(mStream, &PulsePlayback::bufferAttrCallbackC, this);
+ bufferAttrCallback(mStream);
+
+ mDevice->BufferSize = mAttr.tlength / mFrameSize;
+ mDevice->UpdateSize = mAttr.minreq / mFrameSize;
+
+ /* HACK: prebuf should be 0 as that's what we set it to. However on some
+ * systems it comes back as non-0, so we have to make sure the device will
+ * write enough audio to start playback. The lack of manual start control
+ * may have unintended consequences, but it's better than not starting at
+ * all.
+ */
+ if(mAttr.prebuf != 0)
+ {
+ ALuint len{mAttr.prebuf / mFrameSize};
+ if(len <= mDevice->BufferSize)
+ ERR("Non-0 prebuf, %u samples (%u bytes), device has %u samples\n",
+ len, mAttr.prebuf, mDevice->BufferSize);
+ }
+
+ return ALC_TRUE;
+}
+
+ALCboolean PulsePlayback::start()
+{
+ std::unique_lock<std::mutex> plock{pulse_lock};
+
+ pa_stream_set_write_callback(mStream, &PulsePlayback::streamWriteCallbackC, this);
+ pa_operation *op{pa_stream_cork(mStream, 0, stream_success_callback, nullptr)};
+ wait_for_operation(op, plock);
+
+ return ALC_TRUE;
+}
+
+void PulsePlayback::stop()
+{
+ std::unique_lock<std::mutex> plock{pulse_lock};
+
+ pa_stream_set_write_callback(mStream, nullptr, nullptr);
+ pa_operation *op{pa_stream_cork(mStream, 1, stream_success_callback, nullptr)};
+ wait_for_operation(op, plock);
+}
+
+
+ClockLatency PulsePlayback::getClockLatency()
+{
+ ClockLatency ret;
+ pa_usec_t latency;
+ int neg, err;
+
+ { std::lock_guard<std::mutex> _{pulse_lock};
+ ret.ClockTime = GetDeviceClockTime(mDevice);
+ err = pa_stream_get_latency(mStream, &latency, &neg);
+ }
+
+ if(UNLIKELY(err != 0))
+ {
+ /* FIXME: if err = -PA_ERR_NODATA, it means we were called too soon
+ * after starting the stream and no timing info has been received from
+ * the server yet. Should we wait, possibly stalling the app, or give a
+ * dummy value? Either way, it shouldn't be 0. */
+ if(err != -PA_ERR_NODATA)
+ ERR("Failed to get stream latency: 0x%x\n", err);
+ latency = 0;
+ neg = 0;
+ }
+ else if(UNLIKELY(neg))
+ latency = 0;
+ ret.Latency = std::chrono::microseconds{latency};
+
+ return ret;
+}
+
+
+void PulsePlayback::lock()
+{ pulse_lock.lock(); }
+
+void PulsePlayback::unlock()
+{ pulse_lock.unlock(); }
+
+
+struct PulseCapture final : public BackendBase {
+ PulseCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~PulseCapture() override;
+
+ static void contextStateCallbackC(pa_context *context, void *pdata);
+ void contextStateCallback(pa_context *context);
+
+ static void streamStateCallbackC(pa_stream *stream, void *pdata);
+ void streamStateCallback(pa_stream *stream);
+
+ static void sourceNameCallbackC(pa_context *context, const pa_source_info *info, int eol, void *pdata);
+ void sourceNameCallback(pa_context *context, const pa_source_info *info, int eol);
+
+ static void streamMovedCallbackC(pa_stream *stream, void *pdata);
+ void streamMovedCallback(pa_stream *stream);
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(ALCvoid *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+ ClockLatency getClockLatency() override;
+ void lock() override;
+ void unlock() override;
+
+ std::string mDeviceName;
+
+ ALCuint mLastReadable{0u};
+ al::byte mSilentVal{};
+
+ al::span<const al::byte> mCapBuffer;
+ ssize_t mCapLen{0};
+
+ pa_buffer_attr mAttr{};
+ pa_sample_spec mSpec{};
+
+ pa_stream *mStream{nullptr};
+ pa_context *mContext{nullptr};
+
+ DEF_NEWDEL(PulseCapture)
+};
+
+PulseCapture::~PulseCapture()
+{
+ if(!mContext)
+ return;
+
+ pulse_close(mContext, mStream);
+ mContext = nullptr;
+ mStream = nullptr;
+}
+
+void PulseCapture::contextStateCallbackC(pa_context *context, void *pdata)
+{ static_cast<PulseCapture*>(pdata)->contextStateCallback(context); }
+
+void PulseCapture::contextStateCallback(pa_context *context)
+{
+ if(pa_context_get_state(context) == PA_CONTEXT_FAILED)
+ {
+ ERR("Received context failure!\n");
+ aluHandleDisconnect(mDevice, "Capture state failure");
+ }
+ pulse_condvar.notify_all();
+}
+
+void PulseCapture::streamStateCallbackC(pa_stream *stream, void *pdata)
+{ static_cast<PulseCapture*>(pdata)->streamStateCallback(stream); }
+
+void PulseCapture::streamStateCallback(pa_stream *stream)
+{
+ if(pa_stream_get_state(stream) == PA_STREAM_FAILED)
+ {
+ ERR("Received stream failure!\n");
+ aluHandleDisconnect(mDevice, "Capture stream failure");
+ }
+ pulse_condvar.notify_all();
+}
+
+void PulseCapture::sourceNameCallbackC(pa_context *context, const pa_source_info *info, int eol, void *pdata)
+{ static_cast<PulseCapture*>(pdata)->sourceNameCallback(context, info, eol); }
+
+void PulseCapture::sourceNameCallback(pa_context*, const pa_source_info *info, int eol)
+{
+ if(eol)
+ {
+ pulse_condvar.notify_all();
+ return;
+ }
+ mDevice->DeviceName = info->description;
+}
+
+void PulseCapture::streamMovedCallbackC(pa_stream *stream, void *pdata)
+{ static_cast<PulseCapture*>(pdata)->streamMovedCallback(stream); }
+
+void PulseCapture::streamMovedCallback(pa_stream *stream)
+{
+ mDeviceName = pa_stream_get_device_name(stream);
+ TRACE("Stream moved to %s\n", mDeviceName.c_str());
+}
+
+
+ALCenum PulseCapture::open(const ALCchar *name)
+{
+ const char *pulse_name{nullptr};
+ if(name)
+ {
+ if(CaptureDevices.empty())
+ probeCaptureDevices();
+
+ auto iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
+ [name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ );
+ if(iter == CaptureDevices.cend())
+ throw al::backend_exception{ALC_INVALID_VALUE, "Device name \"%s\" not found", name};
+ pulse_name = iter->device_name.c_str();
+ mDevice->DeviceName = iter->name;
+ }
+
+ std::unique_lock<std::mutex> plock{pulse_lock};
+
+ mContext = connect_context(plock);
+ pa_context_set_state_callback(mContext, &PulseCapture::contextStateCallbackC, this);
+
+ pa_channel_map chanmap{};
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono:
+ chanmap = MonoChanMap;
+ break;
+ case DevFmtStereo:
+ chanmap = StereoChanMap;
+ break;
+ case DevFmtQuad:
+ chanmap = QuadChanMap;
+ break;
+ case DevFmtX51:
+ chanmap = X51ChanMap;
+ break;
+ case DevFmtX51Rear:
+ chanmap = X51RearChanMap;
+ break;
+ case DevFmtX61:
+ chanmap = X61ChanMap;
+ break;
+ case DevFmtX71:
+ chanmap = X71ChanMap;
+ break;
+ case DevFmtAmbi3D:
+ throw al::backend_exception{ALC_INVALID_VALUE, "%s capture samples not supported",
+ DevFmtChannelsString(mDevice->FmtChans)};
+ }
+ SetChannelOrderFromMap(mDevice, chanmap);
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtUByte:
+ mSilentVal = al::byte(0x80);
+ mSpec.format = PA_SAMPLE_U8;
+ break;
+ case DevFmtShort:
+ mSpec.format = PA_SAMPLE_S16NE;
+ break;
+ case DevFmtInt:
+ mSpec.format = PA_SAMPLE_S32NE;
+ break;
+ case DevFmtFloat:
+ mSpec.format = PA_SAMPLE_FLOAT32NE;
+ break;
+ case DevFmtByte:
+ case DevFmtUShort:
+ case DevFmtUInt:
+ throw al::backend_exception{ALC_INVALID_VALUE, "%s capture samples not supported",
+ DevFmtTypeString(mDevice->FmtType)};
+ }
+ mSpec.rate = mDevice->Frequency;
+ mSpec.channels = mDevice->channelsFromFmt();
+ if(pa_sample_spec_valid(&mSpec) == 0)
+ throw al::backend_exception{ALC_INVALID_VALUE, "Invalid sample format"};
+
+ ALuint samples{mDevice->BufferSize};
+ samples = maxu(samples, 100 * mDevice->Frequency / 1000);
+
+ mAttr.minreq = -1;
+ mAttr.prebuf = -1;
+ mAttr.maxlength = samples * pa_frame_size(&mSpec);
+ mAttr.tlength = -1;
+ mAttr.fragsize = minu(samples, 50*mDevice->Frequency/1000) * pa_frame_size(&mSpec);
+
+ pa_stream_flags_t flags{PA_STREAM_START_CORKED | PA_STREAM_ADJUST_LATENCY};
+ if(!GetConfigValueBool(nullptr, "pulse", "allow-moves", 1))
+ flags |= PA_STREAM_DONT_MOVE;
+
+ TRACE("Connecting to \"%s\"\n", pulse_name ? pulse_name : "(default)");
+ mStream = pulse_connect_stream(pulse_name, plock, mContext, flags, &mAttr, &mSpec, &chanmap,
+ BackendType::Capture);
+
+ pa_stream_set_moved_callback(mStream, &PulseCapture::streamMovedCallbackC, this);
+ pa_stream_set_state_callback(mStream, &PulseCapture::streamStateCallbackC, this);
+
+ mDeviceName = pa_stream_get_device_name(mStream);
+ if(mDevice->DeviceName.empty())
+ {
+ pa_operation *op{pa_context_get_source_info_by_name(mContext, mDeviceName.c_str(),
+ &PulseCapture::sourceNameCallbackC, this)};
+ wait_for_operation(op, plock);
+ }
+
+ return ALC_NO_ERROR;
+}
+
+ALCboolean PulseCapture::start()
+{
+ std::unique_lock<std::mutex> plock{pulse_lock};
+ pa_operation *op{pa_stream_cork(mStream, 0, stream_success_callback, nullptr)};
+ wait_for_operation(op, plock);
+ return ALC_TRUE;
+}
+
+void PulseCapture::stop()
+{
+ std::unique_lock<std::mutex> plock{pulse_lock};
+ pa_operation *op{pa_stream_cork(mStream, 1, stream_success_callback, nullptr)};
+ wait_for_operation(op, plock);
+}
+
+ALCenum PulseCapture::captureSamples(ALCvoid *buffer, ALCuint samples)
+{
+ al::span<al::byte> dstbuf{static_cast<al::byte*>(buffer), samples * pa_frame_size(&mSpec)};
+
+ /* Capture is done in fragment-sized chunks, so we loop until we get all
+ * that's available */
+ mLastReadable -= dstbuf.size();
+ std::lock_guard<std::mutex> _{pulse_lock};
+ while(!dstbuf.empty())
+ {
+ if(mCapBuffer.empty())
+ {
+ if(UNLIKELY(!mDevice->Connected.load(std::memory_order_acquire)))
+ break;
+ const pa_stream_state_t state{pa_stream_get_state(mStream)};
+ if(UNLIKELY(!PA_STREAM_IS_GOOD(state)))
+ {
+ aluHandleDisconnect(mDevice, "Bad capture state: %u", state);
+ break;
+ }
+ const void *capbuf;
+ size_t caplen;
+ if(UNLIKELY(pa_stream_peek(mStream, &capbuf, &caplen) < 0))
+ {
+ aluHandleDisconnect(mDevice, "Failed retrieving capture samples: %s",
+ pa_strerror(pa_context_errno(mContext)));
+ break;
+ }
+ if(caplen == 0) break;
+ if(UNLIKELY(!capbuf))
+ mCapLen = -static_cast<ssize_t>(caplen);
+ else
+ mCapLen = static_cast<ssize_t>(caplen);
+ mCapBuffer = {static_cast<const al::byte*>(capbuf), caplen};
+ }
+
+ const size_t rem{minz(dstbuf.size(), mCapBuffer.size())};
+ if(UNLIKELY(mCapLen < 0))
+ std::fill_n(dstbuf.begin(), rem, mSilentVal);
+ else
+ std::copy_n(mCapBuffer.begin(), rem, dstbuf.begin());
+ dstbuf = dstbuf.subspan(rem);
+ mCapBuffer = mCapBuffer.subspan(rem);
+
+ if(mCapBuffer.empty())
+ {
+ pa_stream_drop(mStream);
+ mCapLen = 0;
+ }
+ }
+ if(!dstbuf.empty())
+ std::fill(dstbuf.begin(), dstbuf.end(), mSilentVal);
+
+ return ALC_NO_ERROR;
+}
+
+ALCuint PulseCapture::availableSamples()
+{
+ size_t readable{mCapBuffer.size()};
+
+ if(mDevice->Connected.load(std::memory_order_acquire))
+ {
+ std::lock_guard<std::mutex> _{pulse_lock};
+ size_t got{pa_stream_readable_size(mStream)};
+ if(static_cast<ssize_t>(got) < 0)
+ {
+ ERR("pa_stream_readable_size() failed: %s\n", pa_strerror(got));
+ aluHandleDisconnect(mDevice, "Failed getting readable size: %s", pa_strerror(got));
+ }
+ else
+ {
+ const auto caplen = static_cast<size_t>(std::abs(mCapLen));
+ if(got > caplen) readable += got - caplen;
+ }
+ }
+
+ readable = std::min<size_t>(readable, std::numeric_limits<ALCuint>::max());
+ mLastReadable = std::max(mLastReadable, static_cast<ALCuint>(readable));
+ return mLastReadable / pa_frame_size(&mSpec);
+}
+
+
+ClockLatency PulseCapture::getClockLatency()
+{
+ ClockLatency ret;
+ pa_usec_t latency;
+ int neg, err;
+
+ { std::lock_guard<std::mutex> _{pulse_lock};
+ ret.ClockTime = GetDeviceClockTime(mDevice);
+ err = pa_stream_get_latency(mStream, &latency, &neg);
+ }
+
+ if(UNLIKELY(err != 0))
+ {
+ ERR("Failed to get stream latency: 0x%x\n", err);
+ latency = 0;
+ neg = 0;
+ }
+ else if(UNLIKELY(neg))
+ latency = 0;
+ ret.Latency = std::chrono::microseconds{latency};
+
+ return ret;
+}
+
+
+void PulseCapture::lock()
+{ pulse_lock.lock(); }
+
+void PulseCapture::unlock()
+{ pulse_lock.unlock(); }
+
+} // namespace
+
+
+bool PulseBackendFactory::init()
+{
+#ifdef HAVE_DYNLOAD
+ if(!pulse_handle)
+ {
+ bool ret{true};
+ std::string missing_funcs;
+
+#ifdef _WIN32
+#define PALIB "libpulse-0.dll"
+#elif defined(__APPLE__) && defined(__MACH__)
+#define PALIB "libpulse.0.dylib"
+#else
+#define PALIB "libpulse.so.0"
+#endif
+ pulse_handle = LoadLib(PALIB);
+ if(!pulse_handle)
+ {
+ WARN("Failed to load %s\n", PALIB);
+ return false;
+ }
+
+#define LOAD_FUNC(x) do { \
+ p##x = reinterpret_cast<decltype(p##x)>(GetSymbol(pulse_handle, #x)); \
+ if(!(p##x)) { \
+ ret = false; \
+ missing_funcs += "\n" #x; \
+ } \
+} while(0)
+ PULSE_FUNCS(LOAD_FUNC)
+#undef LOAD_FUNC
+
+ if(!ret)
+ {
+ WARN("Missing expected functions:%s\n", missing_funcs.c_str());
+ CloseLib(pulse_handle);
+ pulse_handle = nullptr;
+ return false;
+ }
+ }
+#endif /* HAVE_DYNLOAD */
+
+ pulse_ctx_flags = PA_CONTEXT_NOFLAGS;
+ if(!GetConfigValueBool(nullptr, "pulse", "spawn-server", 1))
+ pulse_ctx_flags |= PA_CONTEXT_NOAUTOSPAWN;
+
+ try {
+ std::unique_lock<std::mutex> plock{pulse_lock};
+ pa_context *context{connect_context(plock)};
+ pa_context_disconnect(context);
+ pa_context_unref(context);
+ return true;
+ }
+ catch(...) {
+ return false;
+ }
+}
+
+bool PulseBackendFactory::querySupport(BackendType type)
+{ return type == BackendType::Playback || type == BackendType::Capture; }
+
+void PulseBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ auto add_device = [outnames](const DevMap &entry) -> void
+ {
+ /* +1 to also append the null char (to ensure a null-separated list and
+ * double-null terminated list).
+ */
+ outnames->append(entry.name.c_str(), entry.name.length()+1);
+ };
+ switch(type)
+ {
+ case DevProbe::Playback:
+ probePlaybackDevices();
+ std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
+ break;
+
+ case DevProbe::Capture:
+ probeCaptureDevices();
+ std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
+ break;
+ }
+}
+
+BackendPtr PulseBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new PulsePlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new PulseCapture{device}};
+ return nullptr;
+}
+
+BackendFactory &PulseBackendFactory::getFactory()
+{
+ static PulseBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/pulseaudio.h b/alc/backends/pulseaudio.h
new file mode 100644
index 00000000..40f3e305
--- /dev/null
+++ b/alc/backends/pulseaudio.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_PULSEAUDIO_H
+#define BACKENDS_PULSEAUDIO_H
+
+#include "backends/base.h"
+
+class PulseBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_PULSEAUDIO_H */
diff --git a/alc/backends/qsa.cpp b/alc/backends/qsa.cpp
new file mode 100644
index 00000000..64ed53aa
--- /dev/null
+++ b/alc/backends/qsa.cpp
@@ -0,0 +1,953 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2011-2013 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/qsa.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <sched.h>
+#include <errno.h>
+#include <memory.h>
+#include <poll.h>
+
+#include <thread>
+#include <memory>
+#include <algorithm>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "threads.h"
+
+#include <sys/asoundlib.h>
+#include <sys/neutrino.h>
+
+
+namespace {
+
+struct qsa_data {
+ snd_pcm_t* pcmHandle{nullptr};
+ int audio_fd{-1};
+
+ snd_pcm_channel_setup_t csetup{};
+ snd_pcm_channel_params_t cparams{};
+
+ ALvoid* buffer{nullptr};
+ ALsizei size{0};
+
+ std::atomic<ALenum> mKillNow{AL_TRUE};
+ std::thread mThread;
+};
+
+struct DevMap {
+ ALCchar* name;
+ int card;
+ int dev;
+};
+
+al::vector<DevMap> DeviceNameMap;
+al::vector<DevMap> CaptureNameMap;
+
+constexpr ALCchar qsaDevice[] = "QSA Default";
+
+constexpr struct {
+ int32_t format;
+} formatlist[] = {
+ {SND_PCM_SFMT_FLOAT_LE},
+ {SND_PCM_SFMT_S32_LE},
+ {SND_PCM_SFMT_U32_LE},
+ {SND_PCM_SFMT_S16_LE},
+ {SND_PCM_SFMT_U16_LE},
+ {SND_PCM_SFMT_S8},
+ {SND_PCM_SFMT_U8},
+ {0},
+};
+
+constexpr struct {
+ int32_t rate;
+} ratelist[] = {
+ {192000},
+ {176400},
+ {96000},
+ {88200},
+ {48000},
+ {44100},
+ {32000},
+ {24000},
+ {22050},
+ {16000},
+ {12000},
+ {11025},
+ {8000},
+ {0},
+};
+
+constexpr struct {
+ int32_t channels;
+} channellist[] = {
+ {8},
+ {7},
+ {6},
+ {4},
+ {2},
+ {1},
+ {0},
+};
+
+void deviceList(int type, al::vector<DevMap> *devmap)
+{
+ snd_ctl_t* handle;
+ snd_pcm_info_t pcminfo;
+ int max_cards, card, err, dev;
+ DevMap entry;
+ char name[1024];
+ snd_ctl_hw_info info;
+
+ max_cards = snd_cards();
+ if(max_cards < 0)
+ return;
+
+ std::for_each(devmap->begin(), devmap->end(),
+ [](const DevMap &entry) -> void
+ { free(entry.name); }
+ );
+ devmap->clear();
+
+ entry.name = strdup(qsaDevice);
+ entry.card = 0;
+ entry.dev = 0;
+ devmap->push_back(entry);
+
+ for(card = 0;card < max_cards;card++)
+ {
+ if((err=snd_ctl_open(&handle, card)) < 0)
+ continue;
+
+ if((err=snd_ctl_hw_info(handle, &info)) < 0)
+ {
+ snd_ctl_close(handle);
+ continue;
+ }
+
+ for(dev = 0;dev < (int)info.pcmdevs;dev++)
+ {
+ if((err=snd_ctl_pcm_info(handle, dev, &pcminfo)) < 0)
+ continue;
+
+ if((type==SND_PCM_CHANNEL_PLAYBACK && (pcminfo.flags&SND_PCM_INFO_PLAYBACK)) ||
+ (type==SND_PCM_CHANNEL_CAPTURE && (pcminfo.flags&SND_PCM_INFO_CAPTURE)))
+ {
+ snprintf(name, sizeof(name), "%s [%s] (hw:%d,%d)", info.name, pcminfo.name, card, dev);
+ entry.name = strdup(name);
+ entry.card = card;
+ entry.dev = dev;
+
+ devmap->push_back(entry);
+ TRACE("Got device \"%s\", card %d, dev %d\n", name, card, dev);
+ }
+ }
+ snd_ctl_close(handle);
+ }
+}
+
+
+/* Wrappers to use an old-style backend with the new interface. */
+struct PlaybackWrapper final : public BackendBase {
+ PlaybackWrapper(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~PlaybackWrapper() override;
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ std::unique_ptr<qsa_data> mExtraData;
+
+ DEF_NEWDEL(PlaybackWrapper)
+};
+
+
+FORCE_ALIGN static int qsa_proc_playback(void *ptr)
+{
+ PlaybackWrapper *self = static_cast<PlaybackWrapper*>(ptr);
+ ALCdevice *device = self->mDevice;
+ qsa_data *data = self->mExtraData.get();
+ snd_pcm_channel_status_t status;
+ sched_param param;
+ char* write_ptr;
+ ALint len;
+ int sret;
+
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ /* Increase default 10 priority to 11 to avoid jerky sound */
+ SchedGet(0, 0, &param);
+ param.sched_priority=param.sched_curpriority+1;
+ SchedSet(0, 0, SCHED_NOCHANGE, &param);
+
+ const ALint frame_size = device->frameSizeFromFmt();
+
+ self->lock();
+ while(!data->mKillNow.load(std::memory_order_acquire))
+ {
+ pollfd pollitem{};
+ pollitem.fd = data->audio_fd;
+ pollitem.events = POLLOUT;
+
+ /* Select also works like time slice to OS */
+ self->unlock();
+ sret = poll(&pollitem, 1, 2000);
+ self->lock();
+ if(sret == -1)
+ {
+ if(errno == EINTR || errno == EAGAIN)
+ continue;
+ ERR("poll error: %s\n", strerror(errno));
+ aluHandleDisconnect(device, "Failed waiting for playback buffer: %s", strerror(errno));
+ break;
+ }
+ if(sret == 0)
+ {
+ ERR("poll timeout\n");
+ continue;
+ }
+
+ len = data->size;
+ write_ptr = static_cast<char*>(data->buffer);
+ aluMixData(device, write_ptr, len/frame_size);
+ while(len>0 && !data->mKillNow.load(std::memory_order_acquire))
+ {
+ int wrote = snd_pcm_plugin_write(data->pcmHandle, write_ptr, len);
+ if(wrote <= 0)
+ {
+ if(errno==EAGAIN || errno==EWOULDBLOCK)
+ continue;
+
+ memset(&status, 0, sizeof(status));
+ status.channel = SND_PCM_CHANNEL_PLAYBACK;
+
+ snd_pcm_plugin_status(data->pcmHandle, &status);
+
+ /* we need to reinitialize the sound channel if we've underrun the buffer */
+ if(status.status == SND_PCM_STATUS_UNDERRUN ||
+ status.status == SND_PCM_STATUS_READY)
+ {
+ if(snd_pcm_plugin_prepare(data->pcmHandle, SND_PCM_CHANNEL_PLAYBACK) < 0)
+ {
+ aluHandleDisconnect(device, "Playback recovery failed");
+ break;
+ }
+ }
+ }
+ else
+ {
+ write_ptr += wrote;
+ len -= wrote;
+ }
+ }
+ }
+ self->unlock();
+
+ return 0;
+}
+
+/************/
+/* Playback */
+/************/
+
+static ALCenum qsa_open_playback(PlaybackWrapper *self, const ALCchar* deviceName)
+{
+ ALCdevice *device = self->mDevice;
+ int card, dev;
+ int status;
+
+ std::unique_ptr<qsa_data> data{new qsa_data{}};
+ data->mKillNow.store(AL_TRUE, std::memory_order_relaxed);
+
+ if(!deviceName)
+ deviceName = qsaDevice;
+
+ if(strcmp(deviceName, qsaDevice) == 0)
+ status = snd_pcm_open_preferred(&data->pcmHandle, &card, &dev, SND_PCM_OPEN_PLAYBACK);
+ else
+ {
+ if(DeviceNameMap.empty())
+ deviceList(SND_PCM_CHANNEL_PLAYBACK, &DeviceNameMap);
+
+ auto iter = std::find_if(DeviceNameMap.begin(), DeviceNameMap.end(),
+ [deviceName](const DevMap &entry) -> bool
+ { return entry.name && strcmp(deviceName, entry.name) == 0; }
+ );
+ if(iter == DeviceNameMap.cend())
+ return ALC_INVALID_DEVICE;
+
+ status = snd_pcm_open(&data->pcmHandle, iter->card, iter->dev, SND_PCM_OPEN_PLAYBACK);
+ }
+
+ if(status < 0)
+ return ALC_INVALID_DEVICE;
+
+ data->audio_fd = snd_pcm_file_descriptor(data->pcmHandle, SND_PCM_CHANNEL_PLAYBACK);
+ if(data->audio_fd < 0)
+ {
+ snd_pcm_close(data->pcmHandle);
+ return ALC_INVALID_DEVICE;
+ }
+
+ device->DeviceName = deviceName;
+ self->mExtraData = std::move(data);
+
+ return ALC_NO_ERROR;
+}
+
+static void qsa_close_playback(PlaybackWrapper *self)
+{
+ qsa_data *data = self->mExtraData.get();
+
+ if (data->buffer!=NULL)
+ {
+ free(data->buffer);
+ data->buffer=NULL;
+ }
+
+ snd_pcm_close(data->pcmHandle);
+
+ self->mExtraData = nullptr;
+}
+
+static ALCboolean qsa_reset_playback(PlaybackWrapper *self)
+{
+ ALCdevice *device = self->mDevice;
+ qsa_data *data = self->mExtraData.get();
+ int32_t format=-1;
+
+ switch(device->FmtType)
+ {
+ case DevFmtByte:
+ format=SND_PCM_SFMT_S8;
+ break;
+ case DevFmtUByte:
+ format=SND_PCM_SFMT_U8;
+ break;
+ case DevFmtShort:
+ format=SND_PCM_SFMT_S16_LE;
+ break;
+ case DevFmtUShort:
+ format=SND_PCM_SFMT_U16_LE;
+ break;
+ case DevFmtInt:
+ format=SND_PCM_SFMT_S32_LE;
+ break;
+ case DevFmtUInt:
+ format=SND_PCM_SFMT_U32_LE;
+ break;
+ case DevFmtFloat:
+ format=SND_PCM_SFMT_FLOAT_LE;
+ break;
+ }
+
+ /* we actually don't want to block on writes */
+ snd_pcm_nonblock_mode(data->pcmHandle, 1);
+ /* Disable mmap to control data transfer to the audio device */
+ snd_pcm_plugin_set_disable(data->pcmHandle, PLUGIN_DISABLE_MMAP);
+ snd_pcm_plugin_set_disable(data->pcmHandle, PLUGIN_DISABLE_BUFFER_PARTIAL_BLOCKS);
+
+ // configure a sound channel
+ memset(&data->cparams, 0, sizeof(data->cparams));
+ data->cparams.channel=SND_PCM_CHANNEL_PLAYBACK;
+ data->cparams.mode=SND_PCM_MODE_BLOCK;
+ data->cparams.start_mode=SND_PCM_START_FULL;
+ data->cparams.stop_mode=SND_PCM_STOP_STOP;
+
+ data->cparams.buf.block.frag_size=device->UpdateSize * device->frameSizeFromFmt();
+ data->cparams.buf.block.frags_max=device->BufferSize / device->UpdateSize;
+ data->cparams.buf.block.frags_min=data->cparams.buf.block.frags_max;
+
+ data->cparams.format.interleave=1;
+ data->cparams.format.rate=device->Frequency;
+ data->cparams.format.voices=device->channelsFromFmt();
+ data->cparams.format.format=format;
+
+ if ((snd_pcm_plugin_params(data->pcmHandle, &data->cparams))<0)
+ {
+ int original_rate=data->cparams.format.rate;
+ int original_voices=data->cparams.format.voices;
+ int original_format=data->cparams.format.format;
+ int it;
+ int jt;
+
+ for (it=0; it<1; it++)
+ {
+ /* Check for second pass */
+ if (it==1)
+ {
+ original_rate=ratelist[0].rate;
+ original_voices=channellist[0].channels;
+ original_format=formatlist[0].format;
+ }
+
+ do {
+ /* At first downgrade sample format */
+ jt=0;
+ do {
+ if (formatlist[jt].format==data->cparams.format.format)
+ {
+ data->cparams.format.format=formatlist[jt+1].format;
+ break;
+ }
+ if (formatlist[jt].format==0)
+ {
+ data->cparams.format.format=0;
+ break;
+ }
+ jt++;
+ } while(1);
+
+ if (data->cparams.format.format==0)
+ {
+ data->cparams.format.format=original_format;
+
+ /* At secod downgrade sample rate */
+ jt=0;
+ do {
+ if (ratelist[jt].rate==data->cparams.format.rate)
+ {
+ data->cparams.format.rate=ratelist[jt+1].rate;
+ break;
+ }
+ if (ratelist[jt].rate==0)
+ {
+ data->cparams.format.rate=0;
+ break;
+ }
+ jt++;
+ } while(1);
+
+ if (data->cparams.format.rate==0)
+ {
+ data->cparams.format.rate=original_rate;
+ data->cparams.format.format=original_format;
+
+ /* At third downgrade channels number */
+ jt=0;
+ do {
+ if(channellist[jt].channels==data->cparams.format.voices)
+ {
+ data->cparams.format.voices=channellist[jt+1].channels;
+ break;
+ }
+ if (channellist[jt].channels==0)
+ {
+ data->cparams.format.voices=0;
+ break;
+ }
+ jt++;
+ } while(1);
+ }
+
+ if (data->cparams.format.voices==0)
+ {
+ break;
+ }
+ }
+
+ data->cparams.buf.block.frag_size=device->UpdateSize*
+ data->cparams.format.voices*
+ snd_pcm_format_width(data->cparams.format.format)/8;
+ data->cparams.buf.block.frags_max=device->NumUpdates;
+ data->cparams.buf.block.frags_min=device->NumUpdates;
+ if ((snd_pcm_plugin_params(data->pcmHandle, &data->cparams))<0)
+ {
+ continue;
+ }
+ else
+ {
+ break;
+ }
+ } while(1);
+
+ if (data->cparams.format.voices!=0)
+ {
+ break;
+ }
+ }
+
+ if (data->cparams.format.voices==0)
+ {
+ return ALC_FALSE;
+ }
+ }
+
+ if ((snd_pcm_plugin_prepare(data->pcmHandle, SND_PCM_CHANNEL_PLAYBACK))<0)
+ {
+ return ALC_FALSE;
+ }
+
+ memset(&data->csetup, 0, sizeof(data->csetup));
+ data->csetup.channel=SND_PCM_CHANNEL_PLAYBACK;
+ if (snd_pcm_plugin_setup(data->pcmHandle, &data->csetup)<0)
+ {
+ return ALC_FALSE;
+ }
+
+ /* now fill back to the our AL device */
+ device->Frequency=data->cparams.format.rate;
+
+ switch (data->cparams.format.voices)
+ {
+ case 1:
+ device->FmtChans=DevFmtMono;
+ break;
+ case 2:
+ device->FmtChans=DevFmtStereo;
+ break;
+ case 4:
+ device->FmtChans=DevFmtQuad;
+ break;
+ case 6:
+ device->FmtChans=DevFmtX51;
+ break;
+ case 7:
+ device->FmtChans=DevFmtX61;
+ break;
+ case 8:
+ device->FmtChans=DevFmtX71;
+ break;
+ default:
+ device->FmtChans=DevFmtMono;
+ break;
+ }
+
+ switch (data->cparams.format.format)
+ {
+ case SND_PCM_SFMT_S8:
+ device->FmtType=DevFmtByte;
+ break;
+ case SND_PCM_SFMT_U8:
+ device->FmtType=DevFmtUByte;
+ break;
+ case SND_PCM_SFMT_S16_LE:
+ device->FmtType=DevFmtShort;
+ break;
+ case SND_PCM_SFMT_U16_LE:
+ device->FmtType=DevFmtUShort;
+ break;
+ case SND_PCM_SFMT_S32_LE:
+ device->FmtType=DevFmtInt;
+ break;
+ case SND_PCM_SFMT_U32_LE:
+ device->FmtType=DevFmtUInt;
+ break;
+ case SND_PCM_SFMT_FLOAT_LE:
+ device->FmtType=DevFmtFloat;
+ break;
+ default:
+ device->FmtType=DevFmtShort;
+ break;
+ }
+
+ SetDefaultChannelOrder(device);
+
+ device->UpdateSize=data->csetup.buf.block.frag_size / device->frameSizeFromFmt();
+ device->NumUpdates=data->csetup.buf.block.frags;
+
+ data->size=data->csetup.buf.block.frag_size;
+ data->buffer=malloc(data->size);
+ if (!data->buffer)
+ {
+ return ALC_FALSE;
+ }
+
+ return ALC_TRUE;
+}
+
+static ALCboolean qsa_start_playback(PlaybackWrapper *self)
+{
+ qsa_data *data = self->mExtraData.get();
+
+ try {
+ data->mKillNow.store(AL_FALSE, std::memory_order_release);
+ data->mThread = std::thread(qsa_proc_playback, self);
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create playback thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+static void qsa_stop_playback(PlaybackWrapper *self)
+{
+ qsa_data *data = self->mExtraData.get();
+
+ if(data->mKillNow.exchange(AL_TRUE, std::memory_order_acq_rel) || !data->mThread.joinable())
+ return;
+ data->mThread.join();
+}
+
+
+PlaybackWrapper::~PlaybackWrapper()
+{
+ if(mExtraData)
+ qsa_close_playback(this);
+}
+
+ALCenum PlaybackWrapper::open(const ALCchar *name)
+{ return qsa_open_playback(this, name); }
+
+ALCboolean PlaybackWrapper::reset()
+{ return qsa_reset_playback(this); }
+
+ALCboolean PlaybackWrapper::start()
+{ return qsa_start_playback(this); }
+
+void PlaybackWrapper::stop()
+{ qsa_stop_playback(this); }
+
+
+/***********/
+/* Capture */
+/***********/
+
+struct CaptureWrapper final : public BackendBase {
+ CaptureWrapper(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~CaptureWrapper() override;
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(void *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ std::unique_ptr<qsa_data> mExtraData;
+
+ DEF_NEWDEL(CaptureWrapper)
+};
+
+static ALCenum qsa_open_capture(CaptureWrapper *self, const ALCchar *deviceName)
+{
+ ALCdevice *device = self->mDevice;
+ int card, dev;
+ int format=-1;
+ int status;
+
+ std::unique_ptr<qsa_data> data{new qsa_data{}};
+
+ if(!deviceName)
+ deviceName = qsaDevice;
+
+ if(strcmp(deviceName, qsaDevice) == 0)
+ status = snd_pcm_open_preferred(&data->pcmHandle, &card, &dev, SND_PCM_OPEN_CAPTURE);
+ else
+ {
+ if(CaptureNameMap.empty())
+ deviceList(SND_PCM_CHANNEL_CAPTURE, &CaptureNameMap);
+
+ auto iter = std::find_if(CaptureNameMap.cbegin(), CaptureNameMap.cend(),
+ [deviceName](const DevMap &entry) -> bool
+ { return entry.name && strcmp(deviceName, entry.name) == 0; }
+ );
+ if(iter == CaptureNameMap.cend())
+ return ALC_INVALID_DEVICE;
+
+ status = snd_pcm_open(&data->pcmHandle, iter->card, iter->dev, SND_PCM_OPEN_CAPTURE);
+ }
+
+ if(status < 0)
+ return ALC_INVALID_DEVICE;
+
+ data->audio_fd = snd_pcm_file_descriptor(data->pcmHandle, SND_PCM_CHANNEL_CAPTURE);
+ if(data->audio_fd < 0)
+ {
+ snd_pcm_close(data->pcmHandle);
+ return ALC_INVALID_DEVICE;
+ }
+
+ device->DeviceName = deviceName;
+
+ switch (device->FmtType)
+ {
+ case DevFmtByte:
+ format=SND_PCM_SFMT_S8;
+ break;
+ case DevFmtUByte:
+ format=SND_PCM_SFMT_U8;
+ break;
+ case DevFmtShort:
+ format=SND_PCM_SFMT_S16_LE;
+ break;
+ case DevFmtUShort:
+ format=SND_PCM_SFMT_U16_LE;
+ break;
+ case DevFmtInt:
+ format=SND_PCM_SFMT_S32_LE;
+ break;
+ case DevFmtUInt:
+ format=SND_PCM_SFMT_U32_LE;
+ break;
+ case DevFmtFloat:
+ format=SND_PCM_SFMT_FLOAT_LE;
+ break;
+ }
+
+ /* we actually don't want to block on reads */
+ snd_pcm_nonblock_mode(data->pcmHandle, 1);
+ /* Disable mmap to control data transfer to the audio device */
+ snd_pcm_plugin_set_disable(data->pcmHandle, PLUGIN_DISABLE_MMAP);
+
+ /* configure a sound channel */
+ memset(&data->cparams, 0, sizeof(data->cparams));
+ data->cparams.mode=SND_PCM_MODE_BLOCK;
+ data->cparams.channel=SND_PCM_CHANNEL_CAPTURE;
+ data->cparams.start_mode=SND_PCM_START_GO;
+ data->cparams.stop_mode=SND_PCM_STOP_STOP;
+
+ data->cparams.buf.block.frag_size=device->UpdateSize * device->frameSizeFromFmt();
+ data->cparams.buf.block.frags_max=device->NumUpdates;
+ data->cparams.buf.block.frags_min=device->NumUpdates;
+
+ data->cparams.format.interleave=1;
+ data->cparams.format.rate=device->Frequency;
+ data->cparams.format.voices=device->channelsFromFmt();
+ data->cparams.format.format=format;
+
+ if(snd_pcm_plugin_params(data->pcmHandle, &data->cparams) < 0)
+ {
+ snd_pcm_close(data->pcmHandle);
+ return ALC_INVALID_VALUE;
+ }
+
+ self->mExtraData = std::move(data);
+
+ return ALC_NO_ERROR;
+}
+
+static void qsa_close_capture(CaptureWrapper *self)
+{
+ qsa_data *data = self->mExtraData.get();
+
+ if (data->pcmHandle!=nullptr)
+ snd_pcm_close(data->pcmHandle);
+ data->pcmHandle = nullptr;
+
+ self->mExtraData = nullptr;
+}
+
+static void qsa_start_capture(CaptureWrapper *self)
+{
+ qsa_data *data = self->mExtraData.get();
+ int rstatus;
+
+ if ((rstatus=snd_pcm_plugin_prepare(data->pcmHandle, SND_PCM_CHANNEL_CAPTURE))<0)
+ {
+ ERR("capture prepare failed: %s\n", snd_strerror(rstatus));
+ return;
+ }
+
+ memset(&data->csetup, 0, sizeof(data->csetup));
+ data->csetup.channel=SND_PCM_CHANNEL_CAPTURE;
+ if ((rstatus=snd_pcm_plugin_setup(data->pcmHandle, &data->csetup))<0)
+ {
+ ERR("capture setup failed: %s\n", snd_strerror(rstatus));
+ return;
+ }
+
+ snd_pcm_capture_go(data->pcmHandle);
+}
+
+static void qsa_stop_capture(CaptureWrapper *self)
+{
+ qsa_data *data = self->mExtraData.get();
+ snd_pcm_capture_flush(data->pcmHandle);
+}
+
+static ALCuint qsa_available_samples(CaptureWrapper *self)
+{
+ ALCdevice *device = self->mDevice;
+ qsa_data *data = self->mExtraData.get();
+ snd_pcm_channel_status_t status;
+ ALint frame_size = device->frameSizeFromFmt();
+ ALint free_size;
+ int rstatus;
+
+ memset(&status, 0, sizeof (status));
+ status.channel=SND_PCM_CHANNEL_CAPTURE;
+ snd_pcm_plugin_status(data->pcmHandle, &status);
+ if ((status.status==SND_PCM_STATUS_OVERRUN) ||
+ (status.status==SND_PCM_STATUS_READY))
+ {
+ if ((rstatus=snd_pcm_plugin_prepare(data->pcmHandle, SND_PCM_CHANNEL_CAPTURE))<0)
+ {
+ ERR("capture prepare failed: %s\n", snd_strerror(rstatus));
+ aluHandleDisconnect(device, "Failed capture recovery: %s", snd_strerror(rstatus));
+ return 0;
+ }
+
+ snd_pcm_capture_go(data->pcmHandle);
+ return 0;
+ }
+
+ free_size=data->csetup.buf.block.frag_size*data->csetup.buf.block.frags;
+ free_size-=status.free;
+
+ return free_size/frame_size;
+}
+
+static ALCenum qsa_capture_samples(CaptureWrapper *self, ALCvoid *buffer, ALCuint samples)
+{
+ ALCdevice *device = self->mDevice;
+ qsa_data *data = self->mExtraData.get();
+ char* read_ptr;
+ snd_pcm_channel_status_t status;
+ int selectret;
+ int bytes_read;
+ ALint frame_size=device->frameSizeFromFmt();
+ ALint len=samples*frame_size;
+ int rstatus;
+
+ read_ptr = static_cast<char*>(buffer);
+
+ while (len>0)
+ {
+ pollfd pollitem{};
+ pollitem.fd = data->audio_fd;
+ pollitem.events = POLLOUT;
+
+ /* Select also works like time slice to OS */
+ bytes_read=0;
+ selectret = poll(&pollitem, 1, 2000);
+ switch (selectret)
+ {
+ case -1:
+ aluHandleDisconnect(device, "Failed to check capture samples");
+ return ALC_INVALID_DEVICE;
+ case 0:
+ break;
+ default:
+ bytes_read=snd_pcm_plugin_read(data->pcmHandle, read_ptr, len);
+ break;
+ }
+
+ if (bytes_read<=0)
+ {
+ if ((errno==EAGAIN) || (errno==EWOULDBLOCK))
+ {
+ continue;
+ }
+
+ memset(&status, 0, sizeof (status));
+ status.channel=SND_PCM_CHANNEL_CAPTURE;
+ snd_pcm_plugin_status(data->pcmHandle, &status);
+
+ /* we need to reinitialize the sound channel if we've overrun the buffer */
+ if ((status.status==SND_PCM_STATUS_OVERRUN) ||
+ (status.status==SND_PCM_STATUS_READY))
+ {
+ if ((rstatus=snd_pcm_plugin_prepare(data->pcmHandle, SND_PCM_CHANNEL_CAPTURE))<0)
+ {
+ ERR("capture prepare failed: %s\n", snd_strerror(rstatus));
+ aluHandleDisconnect(device, "Failed capture recovery: %s",
+ snd_strerror(rstatus));
+ return ALC_INVALID_DEVICE;
+ }
+ snd_pcm_capture_go(data->pcmHandle);
+ }
+ }
+ else
+ {
+ read_ptr+=bytes_read;
+ len-=bytes_read;
+ }
+ }
+
+ return ALC_NO_ERROR;
+}
+
+
+CaptureWrapper::~CaptureWrapper()
+{
+ if(mExtraData)
+ qsa_close_capture(this);
+}
+
+ALCenum CaptureWrapper::open(const ALCchar *name)
+{ return qsa_open_capture(this, name); }
+
+ALCboolean CaptureWrapper::start()
+{ qsa_start_capture(this); return ALC_TRUE; }
+
+void CaptureWrapper::stop()
+{ qsa_stop_capture(this); }
+
+ALCenum CaptureWrapper::captureSamples(void *buffer, ALCuint samples)
+{ return qsa_capture_samples(this, buffer, samples); }
+
+ALCuint CaptureWrapper::availableSamples()
+{ return qsa_available_samples(this); }
+
+} // namespace
+
+
+bool QSABackendFactory::init()
+{ return true; }
+
+bool QSABackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback || type == BackendType::Capture); }
+
+void QSABackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ auto add_device = [outnames](const DevMap &entry) -> void
+ {
+ const char *n = entry.name;
+ if(n && n[0])
+ outnames->append(n, strlen(n)+1);
+ };
+
+ switch (type)
+ {
+ case DevProbe::Playback:
+ deviceList(SND_PCM_CHANNEL_PLAYBACK, &DeviceNameMap);
+ std::for_each(DeviceNameMap.cbegin(), DeviceNameMap.cend(), add_device);
+ break;
+ case DevProbe::Capture:
+ deviceList(SND_PCM_CHANNEL_CAPTURE, &CaptureNameMap);
+ std::for_each(CaptureNameMap.cbegin(), CaptureNameMap.cend(), add_device);
+ break;
+ }
+}
+
+BackendPtr QSABackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new PlaybackWrapper{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new CaptureWrapper{device}};
+ return nullptr;
+}
+
+BackendFactory &QSABackendFactory::getFactory()
+{
+ static QSABackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/qsa.h b/alc/backends/qsa.h
new file mode 100644
index 00000000..da548bba
--- /dev/null
+++ b/alc/backends/qsa.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_QSA_H
+#define BACKENDS_QSA_H
+
+#include "backends/base.h"
+
+struct QSABackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_QSA_H */
diff --git a/alc/backends/sdl2.cpp b/alc/backends/sdl2.cpp
new file mode 100644
index 00000000..29d27c05
--- /dev/null
+++ b/alc/backends/sdl2.cpp
@@ -0,0 +1,227 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2018 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/sdl2.h"
+
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+
+#include "AL/al.h"
+
+#include "alcmain.h"
+#include "almalloc.h"
+#include "alu.h"
+#include "logging.h"
+
+#include <SDL2/SDL.h>
+
+
+namespace {
+
+#ifdef _WIN32
+#define DEVNAME_PREFIX "OpenAL Soft on "
+#else
+#define DEVNAME_PREFIX ""
+#endif
+
+constexpr ALCchar defaultDeviceName[] = DEVNAME_PREFIX "Default Device";
+
+struct Sdl2Backend final : public BackendBase {
+ Sdl2Backend(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~Sdl2Backend() override;
+
+ static void audioCallbackC(void *ptr, Uint8 *stream, int len);
+ void audioCallback(Uint8 *stream, int len);
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+ void lock() override;
+ void unlock() override;
+
+ SDL_AudioDeviceID mDeviceID{0u};
+ ALsizei mFrameSize{0};
+
+ ALuint mFrequency{0u};
+ DevFmtChannels mFmtChans{};
+ DevFmtType mFmtType{};
+ ALuint mUpdateSize{0u};
+
+ DEF_NEWDEL(Sdl2Backend)
+};
+
+Sdl2Backend::~Sdl2Backend()
+{
+ if(mDeviceID)
+ SDL_CloseAudioDevice(mDeviceID);
+ mDeviceID = 0;
+}
+
+void Sdl2Backend::audioCallbackC(void *ptr, Uint8 *stream, int len)
+{ static_cast<Sdl2Backend*>(ptr)->audioCallback(stream, len); }
+
+void Sdl2Backend::audioCallback(Uint8 *stream, int len)
+{
+ assert((len % mFrameSize) == 0);
+ aluMixData(mDevice, stream, len / mFrameSize);
+}
+
+ALCenum Sdl2Backend::open(const ALCchar *name)
+{
+ SDL_AudioSpec want{}, have{};
+ want.freq = mDevice->Frequency;
+ switch(mDevice->FmtType)
+ {
+ case DevFmtUByte: want.format = AUDIO_U8; break;
+ case DevFmtByte: want.format = AUDIO_S8; break;
+ case DevFmtUShort: want.format = AUDIO_U16SYS; break;
+ case DevFmtShort: want.format = AUDIO_S16SYS; break;
+ case DevFmtUInt: /* fall-through */
+ case DevFmtInt: want.format = AUDIO_S32SYS; break;
+ case DevFmtFloat: want.format = AUDIO_F32; break;
+ }
+ want.channels = (mDevice->FmtChans == DevFmtMono) ? 1 : 2;
+ want.samples = mDevice->UpdateSize;
+ want.callback = &Sdl2Backend::audioCallbackC;
+ want.userdata = this;
+
+ /* Passing nullptr to SDL_OpenAudioDevice opens a default, which isn't
+ * necessarily the first in the list.
+ */
+ if(!name || strcmp(name, defaultDeviceName) == 0)
+ mDeviceID = SDL_OpenAudioDevice(nullptr, SDL_FALSE, &want, &have,
+ SDL_AUDIO_ALLOW_ANY_CHANGE);
+ else
+ {
+ const size_t prefix_len = strlen(DEVNAME_PREFIX);
+ if(strncmp(name, DEVNAME_PREFIX, prefix_len) == 0)
+ mDeviceID = SDL_OpenAudioDevice(name+prefix_len, SDL_FALSE, &want, &have,
+ SDL_AUDIO_ALLOW_ANY_CHANGE);
+ else
+ mDeviceID = SDL_OpenAudioDevice(name, SDL_FALSE, &want, &have,
+ SDL_AUDIO_ALLOW_ANY_CHANGE);
+ }
+ if(mDeviceID == 0)
+ return ALC_INVALID_VALUE;
+
+ mDevice->Frequency = have.freq;
+ if(have.channels == 1)
+ mDevice->FmtChans = DevFmtMono;
+ else if(have.channels == 2)
+ mDevice->FmtChans = DevFmtStereo;
+ else
+ {
+ ERR("Got unhandled SDL channel count: %d\n", (int)have.channels);
+ return ALC_INVALID_VALUE;
+ }
+ switch(have.format)
+ {
+ case AUDIO_U8: mDevice->FmtType = DevFmtUByte; break;
+ case AUDIO_S8: mDevice->FmtType = DevFmtByte; break;
+ case AUDIO_U16SYS: mDevice->FmtType = DevFmtUShort; break;
+ case AUDIO_S16SYS: mDevice->FmtType = DevFmtShort; break;
+ case AUDIO_S32SYS: mDevice->FmtType = DevFmtInt; break;
+ case AUDIO_F32SYS: mDevice->FmtType = DevFmtFloat; break;
+ default:
+ ERR("Got unsupported SDL format: 0x%04x\n", have.format);
+ return ALC_INVALID_VALUE;
+ }
+ mDevice->UpdateSize = have.samples;
+ mDevice->BufferSize = have.samples * 2; /* SDL always (tries to) use two periods. */
+
+ mFrameSize = mDevice->frameSizeFromFmt();
+ mFrequency = mDevice->Frequency;
+ mFmtChans = mDevice->FmtChans;
+ mFmtType = mDevice->FmtType;
+ mUpdateSize = mDevice->UpdateSize;
+
+ mDevice->DeviceName = name ? name : defaultDeviceName;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean Sdl2Backend::reset()
+{
+ mDevice->Frequency = mFrequency;
+ mDevice->FmtChans = mFmtChans;
+ mDevice->FmtType = mFmtType;
+ mDevice->UpdateSize = mUpdateSize;
+ mDevice->BufferSize = mUpdateSize * 2;
+ SetDefaultWFXChannelOrder(mDevice);
+ return ALC_TRUE;
+}
+
+ALCboolean Sdl2Backend::start()
+{
+ SDL_PauseAudioDevice(mDeviceID, 0);
+ return ALC_TRUE;
+}
+
+void Sdl2Backend::stop()
+{ SDL_PauseAudioDevice(mDeviceID, 1); }
+
+void Sdl2Backend::lock()
+{ SDL_LockAudioDevice(mDeviceID); }
+
+void Sdl2Backend::unlock()
+{ SDL_UnlockAudioDevice(mDeviceID); }
+
+} // namespace
+
+BackendFactory &SDL2BackendFactory::getFactory()
+{
+ static SDL2BackendFactory factory{};
+ return factory;
+}
+
+bool SDL2BackendFactory::init()
+{ return (SDL_InitSubSystem(SDL_INIT_AUDIO) == 0); }
+
+bool SDL2BackendFactory::querySupport(BackendType type)
+{ return type == BackendType::Playback; }
+
+void SDL2BackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ if(type != DevProbe::Playback)
+ return;
+
+ int num_devices{SDL_GetNumAudioDevices(SDL_FALSE)};
+
+ /* Includes null char. */
+ outnames->append(defaultDeviceName, sizeof(defaultDeviceName));
+ for(int i{0};i < num_devices;++i)
+ {
+ std::string name{DEVNAME_PREFIX};
+ name += SDL_GetAudioDeviceName(i, SDL_FALSE);
+ if(!name.empty())
+ outnames->append(name.c_str(), name.length()+1);
+ }
+}
+
+BackendPtr SDL2BackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new Sdl2Backend{device}};
+ return nullptr;
+}
diff --git a/alc/backends/sdl2.h b/alc/backends/sdl2.h
new file mode 100644
index 00000000..041d47ee
--- /dev/null
+++ b/alc/backends/sdl2.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_SDL2_H
+#define BACKENDS_SDL2_H
+
+#include "backends/base.h"
+
+struct SDL2BackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_SDL2_H */
diff --git a/alc/backends/sndio.cpp b/alc/backends/sndio.cpp
new file mode 100644
index 00000000..587f67bb
--- /dev/null
+++ b/alc/backends/sndio.cpp
@@ -0,0 +1,495 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/sndio.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <thread>
+#include <functional>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "threads.h"
+#include "vector.h"
+#include "ringbuffer.h"
+
+#include <sndio.h>
+
+
+namespace {
+
+static const ALCchar sndio_device[] = "SndIO Default";
+
+
+struct SndioPlayback final : public BackendBase {
+ SndioPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~SndioPlayback() override;
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ sio_hdl *mSndHandle{nullptr};
+
+ al::vector<ALubyte> mBuffer;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(SndioPlayback)
+};
+
+SndioPlayback::~SndioPlayback()
+{
+ if(mSndHandle)
+ sio_close(mSndHandle);
+ mSndHandle = nullptr;
+}
+
+int SndioPlayback::mixerProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ const ALsizei frameSize{mDevice->frameSizeFromFmt()};
+
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ auto WritePtr = static_cast<ALubyte*>(mBuffer.data());
+ size_t len{mBuffer.size()};
+
+ lock();
+ aluMixData(mDevice, WritePtr, len/frameSize);
+ unlock();
+ while(len > 0 && !mKillNow.load(std::memory_order_acquire))
+ {
+ size_t wrote{sio_write(mSndHandle, WritePtr, len)};
+ if(wrote == 0)
+ {
+ ERR("sio_write failed\n");
+ aluHandleDisconnect(mDevice, "Failed to write playback samples");
+ break;
+ }
+
+ len -= wrote;
+ WritePtr += wrote;
+ }
+ }
+
+ return 0;
+}
+
+
+ALCenum SndioPlayback::open(const ALCchar *name)
+{
+ if(!name)
+ name = sndio_device;
+ else if(strcmp(name, sndio_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ mSndHandle = sio_open(nullptr, SIO_PLAY, 0);
+ if(mSndHandle == nullptr)
+ {
+ ERR("Could not open device\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean SndioPlayback::reset()
+{
+ sio_par par;
+ sio_initpar(&par);
+
+ par.rate = mDevice->Frequency;
+ par.pchan = ((mDevice->FmtChans != DevFmtMono) ? 2 : 1);
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ par.bits = 8;
+ par.sig = 1;
+ break;
+ case DevFmtUByte:
+ par.bits = 8;
+ par.sig = 0;
+ break;
+ case DevFmtFloat:
+ case DevFmtShort:
+ par.bits = 16;
+ par.sig = 1;
+ break;
+ case DevFmtUShort:
+ par.bits = 16;
+ par.sig = 0;
+ break;
+ case DevFmtInt:
+ par.bits = 32;
+ par.sig = 1;
+ break;
+ case DevFmtUInt:
+ par.bits = 32;
+ par.sig = 0;
+ break;
+ }
+ par.le = SIO_LE_NATIVE;
+
+ par.round = mDevice->UpdateSize;
+ par.appbufsz = mDevice->BufferSize - mDevice->UpdateSize;
+ if(!par.appbufsz) par.appbufsz = mDevice->UpdateSize;
+
+ if(!sio_setpar(mSndHandle, &par) || !sio_getpar(mSndHandle, &par))
+ {
+ ERR("Failed to set device parameters\n");
+ return ALC_FALSE;
+ }
+
+ if(par.bits != par.bps*8)
+ {
+ ERR("Padded samples not supported (%u of %u bits)\n", par.bits, par.bps*8);
+ return ALC_FALSE;
+ }
+
+ mDevice->Frequency = par.rate;
+ mDevice->FmtChans = ((par.pchan==1) ? DevFmtMono : DevFmtStereo);
+
+ if(par.bits == 8 && par.sig == 1)
+ mDevice->FmtType = DevFmtByte;
+ else if(par.bits == 8 && par.sig == 0)
+ mDevice->FmtType = DevFmtUByte;
+ else if(par.bits == 16 && par.sig == 1)
+ mDevice->FmtType = DevFmtShort;
+ else if(par.bits == 16 && par.sig == 0)
+ mDevice->FmtType = DevFmtUShort;
+ else if(par.bits == 32 && par.sig == 1)
+ mDevice->FmtType = DevFmtInt;
+ else if(par.bits == 32 && par.sig == 0)
+ mDevice->FmtType = DevFmtUInt;
+ else
+ {
+ ERR("Unhandled sample format: %s %u-bit\n", (par.sig?"signed":"unsigned"), par.bits);
+ return ALC_FALSE;
+ }
+
+ SetDefaultChannelOrder(mDevice);
+
+ mDevice->UpdateSize = par.round;
+ mDevice->BufferSize = par.bufsz + par.round;
+
+ mBuffer.resize(mDevice->UpdateSize * mDevice->frameSizeFromFmt());
+ std::fill(mBuffer.begin(), mBuffer.end(), 0);
+
+ return ALC_TRUE;
+}
+
+ALCboolean SndioPlayback::start()
+{
+ if(!sio_start(mSndHandle))
+ {
+ ERR("Error starting playback\n");
+ return ALC_FALSE;
+ }
+
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&SndioPlayback::mixerProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create playback thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ sio_stop(mSndHandle);
+ return ALC_FALSE;
+}
+
+void SndioPlayback::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ if(!sio_stop(mSndHandle))
+ ERR("Error stopping device\n");
+}
+
+
+struct SndioCapture final : public BackendBase {
+ SndioCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~SndioCapture() override;
+
+ int recordProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(void *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ sio_hdl *mSndHandle{nullptr};
+
+ RingBufferPtr mRing;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(SndioCapture)
+};
+
+SndioCapture::~SndioCapture()
+{
+ if(mSndHandle)
+ sio_close(mSndHandle);
+ mSndHandle = nullptr;
+}
+
+int SndioCapture::recordProc()
+{
+ SetRTPriority();
+ althrd_setname(RECORD_THREAD_NAME);
+
+ const ALsizei frameSize{mDevice->frameSizeFromFmt()};
+
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ auto data = mRing->getWriteVector();
+ size_t todo{data.first.len + data.second.len};
+ if(todo == 0)
+ {
+ static char junk[4096];
+ sio_read(mSndHandle, junk,
+ minz(sizeof(junk)/frameSize, mDevice->UpdateSize)*frameSize);
+ continue;
+ }
+
+ size_t total{0u};
+ data.first.len *= frameSize;
+ data.second.len *= frameSize;
+ todo = minz(todo, mDevice->UpdateSize) * frameSize;
+ while(total < todo)
+ {
+ if(!data.first.len)
+ data.first = data.second;
+
+ size_t got{sio_read(mSndHandle, data.first.buf, minz(todo-total, data.first.len))};
+ if(!got)
+ {
+ aluHandleDisconnect(mDevice, "Failed to read capture samples");
+ break;
+ }
+
+ data.first.buf += got;
+ data.first.len -= got;
+ total += got;
+ }
+ mRing->writeAdvance(total / frameSize);
+ }
+
+ return 0;
+}
+
+
+ALCenum SndioCapture::open(const ALCchar *name)
+{
+ if(!name)
+ name = sndio_device;
+ else if(strcmp(name, sndio_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ mSndHandle = sio_open(nullptr, SIO_REC, 0);
+ if(mSndHandle == nullptr)
+ {
+ ERR("Could not open device\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ sio_par par;
+ sio_initpar(&par);
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ par.bps = 1;
+ par.sig = 1;
+ break;
+ case DevFmtUByte:
+ par.bps = 1;
+ par.sig = 0;
+ break;
+ case DevFmtShort:
+ par.bps = 2;
+ par.sig = 1;
+ break;
+ case DevFmtUShort:
+ par.bps = 2;
+ par.sig = 0;
+ break;
+ case DevFmtInt:
+ par.bps = 4;
+ par.sig = 1;
+ break;
+ case DevFmtUInt:
+ par.bps = 4;
+ par.sig = 0;
+ break;
+ case DevFmtFloat:
+ ERR("%s capture samples not supported\n", DevFmtTypeString(mDevice->FmtType));
+ return ALC_INVALID_VALUE;
+ }
+ par.bits = par.bps * 8;
+ par.le = SIO_LE_NATIVE;
+ par.msb = SIO_LE_NATIVE ? 0 : 1;
+ par.rchan = mDevice->channelsFromFmt();
+ par.rate = mDevice->Frequency;
+
+ par.appbufsz = maxu(mDevice->BufferSize, mDevice->Frequency/10);
+ par.round = minu(par.appbufsz, mDevice->Frequency/40);
+
+ mDevice->UpdateSize = par.round;
+ mDevice->BufferSize = par.appbufsz;
+
+ if(!sio_setpar(mSndHandle, &par) || !sio_getpar(mSndHandle, &par))
+ {
+ ERR("Failed to set device parameters\n");
+ return ALC_INVALID_VALUE;
+ }
+
+ if(par.bits != par.bps*8)
+ {
+ ERR("Padded samples not supported (%u of %u bits)\n", par.bits, par.bps*8);
+ return ALC_INVALID_VALUE;
+ }
+
+ if(!((mDevice->FmtType == DevFmtByte && par.bits == 8 && par.sig != 0) ||
+ (mDevice->FmtType == DevFmtUByte && par.bits == 8 && par.sig == 0) ||
+ (mDevice->FmtType == DevFmtShort && par.bits == 16 && par.sig != 0) ||
+ (mDevice->FmtType == DevFmtUShort && par.bits == 16 && par.sig == 0) ||
+ (mDevice->FmtType == DevFmtInt && par.bits == 32 && par.sig != 0) ||
+ (mDevice->FmtType == DevFmtUInt && par.bits == 32 && par.sig == 0)) ||
+ mDevice->channelsFromFmt() != (ALsizei)par.rchan ||
+ mDevice->Frequency != par.rate)
+ {
+ ERR("Failed to set format %s %s %uhz, got %c%u %u-channel %uhz instead\n",
+ DevFmtTypeString(mDevice->FmtType), DevFmtChannelsString(mDevice->FmtChans),
+ mDevice->Frequency, par.sig?'s':'u', par.bits, par.rchan, par.rate);
+ return ALC_INVALID_VALUE;
+ }
+
+ mRing = CreateRingBuffer(mDevice->BufferSize, par.bps*par.rchan, false);
+ if(!mRing)
+ {
+ ERR("Failed to allocate %u-byte ringbuffer\n", mDevice->BufferSize*par.bps*par.rchan);
+ return ALC_OUT_OF_MEMORY;
+ }
+
+ SetDefaultChannelOrder(mDevice);
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean SndioCapture::start()
+{
+ if(!sio_start(mSndHandle))
+ {
+ ERR("Error starting playback\n");
+ return ALC_FALSE;
+ }
+
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&SndioCapture::recordProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create record thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ sio_stop(mSndHandle);
+ return ALC_FALSE;
+}
+
+void SndioCapture::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ if(!sio_stop(mSndHandle))
+ ERR("Error stopping device\n");
+}
+
+ALCenum SndioCapture::captureSamples(void *buffer, ALCuint samples)
+{
+ mRing->read(buffer, samples);
+ return ALC_NO_ERROR;
+}
+
+ALCuint SndioCapture::availableSamples()
+{ return mRing->readSpace(); }
+
+} // namespace
+
+BackendFactory &SndIOBackendFactory::getFactory()
+{
+ static SndIOBackendFactory factory{};
+ return factory;
+}
+
+bool SndIOBackendFactory::init()
+{ return true; }
+
+bool SndIOBackendFactory::querySupport(BackendType type)
+{ return (type == BackendType::Playback || type == BackendType::Capture); }
+
+void SndIOBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ switch(type)
+ {
+ case DevProbe::Playback:
+ case DevProbe::Capture:
+ /* Includes null char. */
+ outnames->append(sndio_device, sizeof(sndio_device));
+ break;
+ }
+}
+
+BackendPtr SndIOBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new SndioPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new SndioCapture{device}};
+ return nullptr;
+}
diff --git a/alc/backends/sndio.h b/alc/backends/sndio.h
new file mode 100644
index 00000000..1ed63d5e
--- /dev/null
+++ b/alc/backends/sndio.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_SNDIO_H
+#define BACKENDS_SNDIO_H
+
+#include "backends/base.h"
+
+struct SndIOBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_SNDIO_H */
diff --git a/alc/backends/solaris.cpp b/alc/backends/solaris.cpp
new file mode 100644
index 00000000..584f6e66
--- /dev/null
+++ b/alc/backends/solaris.cpp
@@ -0,0 +1,302 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/solaris.h"
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <memory.h>
+#include <unistd.h>
+#include <errno.h>
+#include <poll.h>
+#include <math.h>
+
+#include <thread>
+#include <functional>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "alconfig.h"
+#include "threads.h"
+#include "vector.h"
+#include "compat.h"
+
+#include <sys/audioio.h>
+
+
+namespace {
+
+constexpr ALCchar solaris_device[] = "Solaris Default";
+
+std::string solaris_driver{"/dev/audio"};
+
+
+struct SolarisBackend final : public BackendBase {
+ SolarisBackend(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~SolarisBackend() override;
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ int mFd{-1};
+
+ al::vector<ALubyte> mBuffer;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(SolarisBackend)
+};
+
+SolarisBackend::~SolarisBackend()
+{
+ if(mFd != -1)
+ close(mFd);
+ mFd = -1;
+}
+
+int SolarisBackend::mixerProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ const int frame_size{mDevice->frameSizeFromFmt()};
+
+ lock();
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ pollfd pollitem{};
+ pollitem.fd = mFd;
+ pollitem.events = POLLOUT;
+
+ unlock();
+ int pret{poll(&pollitem, 1, 1000)};
+ lock();
+ if(pret < 0)
+ {
+ if(errno == EINTR || errno == EAGAIN)
+ continue;
+ ERR("poll failed: %s\n", strerror(errno));
+ aluHandleDisconnect(mDevice, "Failed to wait for playback buffer: %s",
+ strerror(errno));
+ break;
+ }
+ else if(pret == 0)
+ {
+ WARN("poll timeout\n");
+ continue;
+ }
+
+ ALubyte *write_ptr{mBuffer.data()};
+ size_t to_write{mBuffer.size()};
+ aluMixData(mDevice, write_ptr, to_write/frame_size);
+ while(to_write > 0 && !mKillNow.load(std::memory_order_acquire))
+ {
+ ssize_t wrote{write(mFd, write_ptr, to_write)};
+ if(wrote < 0)
+ {
+ if(errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
+ continue;
+ ERR("write failed: %s\n", strerror(errno));
+ aluHandleDisconnect(mDevice, "Failed to write playback samples: %s",
+ strerror(errno));
+ break;
+ }
+
+ to_write -= wrote;
+ write_ptr += wrote;
+ }
+ }
+ unlock();
+
+ return 0;
+}
+
+
+ALCenum SolarisBackend::open(const ALCchar *name)
+{
+ if(!name)
+ name = solaris_device;
+ else if(strcmp(name, solaris_device) != 0)
+ return ALC_INVALID_VALUE;
+
+ mFd = ::open(solaris_driver.c_str(), O_WRONLY);
+ if(mFd == -1)
+ {
+ ERR("Could not open %s: %s\n", solaris_driver.c_str(), strerror(errno));
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+ return ALC_NO_ERROR;
+}
+
+ALCboolean SolarisBackend::reset()
+{
+ audio_info_t info;
+ AUDIO_INITINFO(&info);
+
+ info.play.sample_rate = mDevice->Frequency;
+
+ if(mDevice->FmtChans != DevFmtMono)
+ mDevice->FmtChans = DevFmtStereo;
+ ALsizei numChannels{mDevice->channelsFromFmt()};
+ info.play.channels = numChannels;
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ info.play.precision = 8;
+ info.play.encoding = AUDIO_ENCODING_LINEAR;
+ break;
+ case DevFmtUByte:
+ info.play.precision = 8;
+ info.play.encoding = AUDIO_ENCODING_LINEAR8;
+ break;
+ case DevFmtUShort:
+ case DevFmtInt:
+ case DevFmtUInt:
+ case DevFmtFloat:
+ mDevice->FmtType = DevFmtShort;
+ /* fall-through */
+ case DevFmtShort:
+ info.play.precision = 16;
+ info.play.encoding = AUDIO_ENCODING_LINEAR;
+ break;
+ }
+
+ ALsizei frameSize{numChannels * mDevice->bytesFromFmt()};
+ info.play.buffer_size = mDevice->BufferSize * frameSize;
+
+ if(ioctl(mFd, AUDIO_SETINFO, &info) < 0)
+ {
+ ERR("ioctl failed: %s\n", strerror(errno));
+ return ALC_FALSE;
+ }
+
+ if(mDevice->channelsFromFmt() != (ALsizei)info.play.channels)
+ {
+ ERR("Failed to set %s, got %u channels instead\n", DevFmtChannelsString(mDevice->FmtChans),
+ info.play.channels);
+ return ALC_FALSE;
+ }
+
+ if(!((info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR8 && mDevice->FmtType == DevFmtUByte) ||
+ (info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR && mDevice->FmtType == DevFmtByte) ||
+ (info.play.precision == 16 && info.play.encoding == AUDIO_ENCODING_LINEAR && mDevice->FmtType == DevFmtShort) ||
+ (info.play.precision == 32 && info.play.encoding == AUDIO_ENCODING_LINEAR && mDevice->FmtType == DevFmtInt)))
+ {
+ ERR("Could not set %s samples, got %d (0x%x)\n", DevFmtTypeString(mDevice->FmtType),
+ info.play.precision, info.play.encoding);
+ return ALC_FALSE;
+ }
+
+ mDevice->Frequency = info.play.sample_rate;
+ mDevice->BufferSize = info.play.buffer_size / frameSize;
+ mDevice->UpdateSize = mDevice->BufferSize / 2;
+
+ SetDefaultChannelOrder(mDevice);
+
+ mBuffer.resize(mDevice->UpdateSize * mDevice->frameSizeFromFmt());
+ std::fill(mBuffer.begin(), mBuffer.end(), 0);
+
+ return ALC_TRUE;
+}
+
+ALCboolean SolarisBackend::start()
+{
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&SolarisBackend::mixerProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Could not create playback thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void SolarisBackend::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ if(ioctl(mFd, AUDIO_DRAIN) < 0)
+ ERR("Error draining device: %s\n", strerror(errno));
+}
+
+} // namespace
+
+BackendFactory &SolarisBackendFactory::getFactory()
+{
+ static SolarisBackendFactory factory{};
+ return factory;
+}
+
+bool SolarisBackendFactory::init()
+{
+ if(auto devopt = ConfigValueStr(nullptr, "solaris", "device"))
+ solaris_driver = std::move(*devopt);
+ return true;
+}
+
+bool SolarisBackendFactory::querySupport(BackendType type)
+{ return type == BackendType::Playback; }
+
+void SolarisBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ switch(type)
+ {
+ case DevProbe::Playback:
+ {
+#ifdef HAVE_STAT
+ struct stat buf;
+ if(stat(solaris_driver.c_str(), &buf) == 0)
+#endif
+ outnames->append(solaris_device, sizeof(solaris_device));
+ }
+ break;
+
+ case DevProbe::Capture:
+ break;
+ }
+}
+
+BackendPtr SolarisBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new SolarisBackend{device}};
+ return nullptr;
+}
diff --git a/alc/backends/solaris.h b/alc/backends/solaris.h
new file mode 100644
index 00000000..98b10593
--- /dev/null
+++ b/alc/backends/solaris.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_SOLARIS_H
+#define BACKENDS_SOLARIS_H
+
+#include "backends/base.h"
+
+struct SolarisBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_SOLARIS_H */
diff --git a/alc/backends/wasapi.cpp b/alc/backends/wasapi.cpp
new file mode 100644
index 00000000..bd009463
--- /dev/null
+++ b/alc/backends/wasapi.cpp
@@ -0,0 +1,1763 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2011 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/wasapi.h"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <memory.h>
+
+#include <wtypes.h>
+#include <mmdeviceapi.h>
+#include <audioclient.h>
+#include <cguid.h>
+#include <devpropdef.h>
+#include <mmreg.h>
+#include <propsys.h>
+#include <propkey.h>
+#include <devpkey.h>
+#ifndef _WAVEFORMATEXTENSIBLE_
+#include <ks.h>
+#include <ksmedia.h>
+#endif
+
+#include <deque>
+#include <mutex>
+#include <atomic>
+#include <thread>
+#include <vector>
+#include <string>
+#include <future>
+#include <algorithm>
+#include <functional>
+#include <condition_variable>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "ringbuffer.h"
+#include "compat.h"
+#include "converter.h"
+#include "threads.h"
+
+
+/* Some headers seem to define these as macros for __uuidof, which is annoying
+ * since some headers don't declare them at all. Hopefully the ifdef is enough
+ * to tell if they need to be declared.
+ */
+#ifndef KSDATAFORMAT_SUBTYPE_PCM
+DEFINE_GUID(KSDATAFORMAT_SUBTYPE_PCM, 0x00000001, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71);
+#endif
+#ifndef KSDATAFORMAT_SUBTYPE_IEEE_FLOAT
+DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, 0x00000003, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71);
+#endif
+
+DEFINE_DEVPROPKEY(DEVPKEY_Device_FriendlyName, 0xa45c254e, 0xdf1c, 0x4efd, 0x80,0x20, 0x67,0xd1,0x46,0xa8,0x50,0xe0, 14);
+DEFINE_PROPERTYKEY(PKEY_AudioEndpoint_FormFactor, 0x1da5d803, 0xd492, 0x4edd, 0x8c,0x23, 0xe0,0xc0,0xff,0xee,0x7f,0x0e, 0);
+DEFINE_PROPERTYKEY(PKEY_AudioEndpoint_GUID, 0x1da5d803, 0xd492, 0x4edd, 0x8c, 0x23,0xe0, 0xc0,0xff,0xee,0x7f,0x0e, 4 );
+
+
+namespace {
+
+#define MONO SPEAKER_FRONT_CENTER
+#define STEREO (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT)
+#define QUAD (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_BACK_LEFT|SPEAKER_BACK_RIGHT)
+#define X5DOT1 (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_SIDE_LEFT|SPEAKER_SIDE_RIGHT)
+#define X5DOT1REAR (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_BACK_LEFT|SPEAKER_BACK_RIGHT)
+#define X6DOT1 (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_BACK_CENTER|SPEAKER_SIDE_LEFT|SPEAKER_SIDE_RIGHT)
+#define X7DOT1 (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_BACK_LEFT|SPEAKER_BACK_RIGHT|SPEAKER_SIDE_LEFT|SPEAKER_SIDE_RIGHT)
+#define X7DOT1_WIDE (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_BACK_LEFT|SPEAKER_BACK_RIGHT|SPEAKER_FRONT_LEFT_OF_CENTER|SPEAKER_FRONT_RIGHT_OF_CENTER)
+
+#define REFTIME_PER_SEC ((REFERENCE_TIME)10000000)
+
+#define DEVNAME_HEAD "OpenAL Soft on "
+
+
+/* Scales the given value using 64-bit integer math, ceiling the result. */
+inline int64_t ScaleCeil(int64_t val, int64_t new_scale, int64_t old_scale)
+{
+ return (val*new_scale + old_scale-1) / old_scale;
+}
+
+
+struct PropVariant {
+ PROPVARIANT mProp;
+
+public:
+ PropVariant() { PropVariantInit(&mProp); }
+ ~PropVariant() { clear(); }
+
+ void clear() { PropVariantClear(&mProp); }
+
+ PROPVARIANT* get() noexcept { return &mProp; }
+
+ PROPVARIANT& operator*() noexcept { return mProp; }
+ const PROPVARIANT& operator*() const noexcept { return mProp; }
+
+ PROPVARIANT* operator->() noexcept { return &mProp; }
+ const PROPVARIANT* operator->() const noexcept { return &mProp; }
+};
+
+struct DevMap {
+ std::string name;
+ std::string endpoint_guid; // obtained from PKEY_AudioEndpoint_GUID , set to "Unknown device GUID" if absent.
+ std::wstring devid;
+
+ template<typename T0, typename T1, typename T2>
+ DevMap(T0&& name_, T1&& guid_, T2&& devid_)
+ : name{std::forward<T0>(name_)}
+ , endpoint_guid{std::forward<T1>(guid_)}
+ , devid{std::forward<T2>(devid_)}
+ { }
+};
+
+bool checkName(const al::vector<DevMap> &list, const std::string &name)
+{
+ return std::find_if(list.cbegin(), list.cend(),
+ [&name](const DevMap &entry) -> bool
+ { return entry.name == name; }
+ ) != list.cend();
+}
+
+al::vector<DevMap> PlaybackDevices;
+al::vector<DevMap> CaptureDevices;
+
+
+using NameGUIDPair = std::pair<std::string,std::string>;
+NameGUIDPair get_device_name_and_guid(IMMDevice *device)
+{
+ std::string name{DEVNAME_HEAD};
+ std::string guid;
+
+ IPropertyStore *ps;
+ HRESULT hr = device->OpenPropertyStore(STGM_READ, &ps);
+ if(FAILED(hr))
+ {
+ WARN("OpenPropertyStore failed: 0x%08lx\n", hr);
+ return { name+"Unknown Device Name", "Unknown Device GUID" };
+ }
+
+ PropVariant pvprop;
+ hr = ps->GetValue(reinterpret_cast<const PROPERTYKEY&>(DEVPKEY_Device_FriendlyName), pvprop.get());
+ if(FAILED(hr))
+ {
+ WARN("GetValue Device_FriendlyName failed: 0x%08lx\n", hr);
+ name += "Unknown Device Name";
+ }
+ else if(pvprop->vt == VT_LPWSTR)
+ name += wstr_to_utf8(pvprop->pwszVal);
+ else
+ {
+ WARN("Unexpected PROPVARIANT type: 0x%04x\n", pvprop->vt);
+ name += "Unknown Device Name";
+ }
+
+ pvprop.clear();
+ hr = ps->GetValue(reinterpret_cast<const PROPERTYKEY&>(PKEY_AudioEndpoint_GUID), pvprop.get());
+ if(FAILED(hr))
+ {
+ WARN("GetValue AudioEndpoint_GUID failed: 0x%08lx\n", hr);
+ guid = "Unknown Device GUID";
+ }
+ else if(pvprop->vt == VT_LPWSTR)
+ guid = wstr_to_utf8(pvprop->pwszVal);
+ else
+ {
+ WARN("Unexpected PROPVARIANT type: 0x%04x\n", pvprop->vt);
+ guid = "Unknown Device GUID";
+ }
+
+ ps->Release();
+
+ return {name, guid};
+}
+
+void get_device_formfactor(IMMDevice *device, EndpointFormFactor *formfactor)
+{
+ IPropertyStore *ps;
+ HRESULT hr = device->OpenPropertyStore(STGM_READ, &ps);
+ if(FAILED(hr))
+ {
+ WARN("OpenPropertyStore failed: 0x%08lx\n", hr);
+ return;
+ }
+
+ PropVariant pvform;
+ hr = ps->GetValue(reinterpret_cast<const PROPERTYKEY&>(PKEY_AudioEndpoint_FormFactor), pvform.get());
+ if(FAILED(hr))
+ WARN("GetValue AudioEndpoint_FormFactor failed: 0x%08lx\n", hr);
+ else if(pvform->vt == VT_UI4)
+ *formfactor = static_cast<EndpointFormFactor>(pvform->ulVal);
+ else if(pvform->vt == VT_EMPTY)
+ *formfactor = UnknownFormFactor;
+ else
+ WARN("Unexpected PROPVARIANT type: 0x%04x\n", pvform->vt);
+
+ ps->Release();
+}
+
+
+void add_device(IMMDevice *device, const WCHAR *devid, al::vector<DevMap> &list)
+{
+ std::string basename, guidstr;
+ std::tie(basename, guidstr) = get_device_name_and_guid(device);
+
+ int count{1};
+ std::string newname{basename};
+ while(checkName(list, newname))
+ {
+ newname = basename;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+ list.emplace_back(std::move(newname), std::move(guidstr), devid);
+ const DevMap &newentry = list.back();
+
+ TRACE("Got device \"%s\", \"%s\", \"%ls\"\n", newentry.name.c_str(),
+ newentry.endpoint_guid.c_str(), newentry.devid.c_str());
+}
+
+WCHAR *get_device_id(IMMDevice *device)
+{
+ WCHAR *devid;
+
+ HRESULT hr = device->GetId(&devid);
+ if(FAILED(hr))
+ {
+ ERR("Failed to get device id: %lx\n", hr);
+ return nullptr;
+ }
+
+ return devid;
+}
+
+HRESULT probe_devices(IMMDeviceEnumerator *devenum, EDataFlow flowdir, al::vector<DevMap> &list)
+{
+ IMMDeviceCollection *coll;
+ HRESULT hr{devenum->EnumAudioEndpoints(flowdir, DEVICE_STATE_ACTIVE, &coll)};
+ if(FAILED(hr))
+ {
+ ERR("Failed to enumerate audio endpoints: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ IMMDevice *defdev{nullptr};
+ WCHAR *defdevid{nullptr};
+ UINT count{0};
+ hr = coll->GetCount(&count);
+ if(SUCCEEDED(hr) && count > 0)
+ {
+ list.clear();
+ list.reserve(count);
+
+ hr = devenum->GetDefaultAudioEndpoint(flowdir, eMultimedia, &defdev);
+ }
+ if(SUCCEEDED(hr) && defdev != nullptr)
+ {
+ defdevid = get_device_id(defdev);
+ if(defdevid)
+ add_device(defdev, defdevid, list);
+ }
+
+ for(UINT i{0};i < count;++i)
+ {
+ IMMDevice *device;
+ hr = coll->Item(i, &device);
+ if(FAILED(hr)) continue;
+
+ WCHAR *devid{get_device_id(device)};
+ if(devid)
+ {
+ if(!defdevid || wcscmp(devid, defdevid) != 0)
+ add_device(device, devid, list);
+ CoTaskMemFree(devid);
+ }
+ device->Release();
+ }
+
+ if(defdev) defdev->Release();
+ if(defdevid) CoTaskMemFree(defdevid);
+ coll->Release();
+
+ return S_OK;
+}
+
+
+bool MakeExtensible(WAVEFORMATEXTENSIBLE *out, const WAVEFORMATEX *in)
+{
+ *out = WAVEFORMATEXTENSIBLE{};
+ if(in->wFormatTag == WAVE_FORMAT_EXTENSIBLE)
+ {
+ *out = *CONTAINING_RECORD(in, const WAVEFORMATEXTENSIBLE, Format);
+ out->Format.cbSize = sizeof(*out) - sizeof(out->Format);
+ }
+ else if(in->wFormatTag == WAVE_FORMAT_PCM)
+ {
+ out->Format = *in;
+ out->Format.cbSize = 0;
+ out->Samples.wValidBitsPerSample = out->Format.wBitsPerSample;
+ if(out->Format.nChannels == 1)
+ out->dwChannelMask = MONO;
+ else if(out->Format.nChannels == 2)
+ out->dwChannelMask = STEREO;
+ else
+ ERR("Unhandled PCM channel count: %d\n", out->Format.nChannels);
+ out->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ }
+ else if(in->wFormatTag == WAVE_FORMAT_IEEE_FLOAT)
+ {
+ out->Format = *in;
+ out->Format.cbSize = 0;
+ out->Samples.wValidBitsPerSample = out->Format.wBitsPerSample;
+ if(out->Format.nChannels == 1)
+ out->dwChannelMask = MONO;
+ else if(out->Format.nChannels == 2)
+ out->dwChannelMask = STEREO;
+ else
+ ERR("Unhandled IEEE float channel count: %d\n", out->Format.nChannels);
+ out->SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ }
+ else
+ {
+ ERR("Unhandled format tag: 0x%04x\n", in->wFormatTag);
+ return false;
+ }
+ return true;
+}
+
+void TraceFormat(const char *msg, const WAVEFORMATEX *format)
+{
+ constexpr size_t fmtex_extra_size{sizeof(WAVEFORMATEXTENSIBLE)-sizeof(WAVEFORMATEX)};
+ if(format->wFormatTag == WAVE_FORMAT_EXTENSIBLE && format->cbSize >= fmtex_extra_size)
+ {
+ class GuidPrinter {
+ char mMsg[64];
+
+ public:
+ GuidPrinter(const GUID &guid)
+ {
+ std::snprintf(mMsg, al::size(mMsg),
+ "{%08lx-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x}",
+ DWORD{guid.Data1}, guid.Data2, guid.Data3,
+ guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3],
+ guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]);
+ }
+ const char *c_str() const { return mMsg; }
+ };
+
+ const WAVEFORMATEXTENSIBLE *fmtex{
+ CONTAINING_RECORD(format, const WAVEFORMATEXTENSIBLE, Format)};
+ TRACE("%s:\n"
+ " FormatTag = 0x%04x\n"
+ " Channels = %d\n"
+ " SamplesPerSec = %lu\n"
+ " AvgBytesPerSec = %lu\n"
+ " BlockAlign = %d\n"
+ " BitsPerSample = %d\n"
+ " Size = %d\n"
+ " Samples = %d\n"
+ " ChannelMask = 0x%lx\n"
+ " SubFormat = %s\n",
+ msg, fmtex->Format.wFormatTag, fmtex->Format.nChannels, fmtex->Format.nSamplesPerSec,
+ fmtex->Format.nAvgBytesPerSec, fmtex->Format.nBlockAlign, fmtex->Format.wBitsPerSample,
+ fmtex->Format.cbSize, fmtex->Samples.wReserved, fmtex->dwChannelMask,
+ GuidPrinter{fmtex->SubFormat}.c_str());
+ }
+ else
+ TRACE("%s:\n"
+ " FormatTag = 0x%04x\n"
+ " Channels = %d\n"
+ " SamplesPerSec = %lu\n"
+ " AvgBytesPerSec = %lu\n"
+ " BlockAlign = %d\n"
+ " BitsPerSample = %d\n"
+ " Size = %d\n",
+ msg, format->wFormatTag, format->nChannels, format->nSamplesPerSec,
+ format->nAvgBytesPerSec, format->nBlockAlign, format->wBitsPerSample, format->cbSize);
+}
+
+
+enum class MsgType : unsigned int {
+ OpenDevice,
+ ResetDevice,
+ StartDevice,
+ StopDevice,
+ CloseDevice,
+ EnumeratePlayback,
+ EnumerateCapture,
+ QuitThread,
+
+ Count
+};
+
+constexpr char MessageStr[static_cast<unsigned int>(MsgType::Count)][20]{
+ "Open Device",
+ "Reset Device",
+ "Start Device",
+ "Stop Device",
+ "Close Device",
+ "Enumerate Playback",
+ "Enumerate Capture",
+ "Quit"
+};
+
+
+/* Proxy interface used by the message handler. */
+struct WasapiProxy {
+ virtual HRESULT openProxy() = 0;
+ virtual void closeProxy() = 0;
+
+ virtual HRESULT resetProxy() = 0;
+ virtual HRESULT startProxy() = 0;
+ virtual void stopProxy() = 0;
+
+ struct Msg {
+ MsgType mType;
+ WasapiProxy *mProxy;
+ std::promise<HRESULT> mPromise;
+ };
+ static std::deque<Msg> mMsgQueue;
+ static std::mutex mMsgQueueLock;
+ static std::condition_variable mMsgQueueCond;
+
+ std::future<HRESULT> pushMessage(MsgType type)
+ {
+ std::promise<HRESULT> promise;
+ std::future<HRESULT> future{promise.get_future()};
+ { std::lock_guard<std::mutex> _{mMsgQueueLock};
+ mMsgQueue.emplace_back(Msg{type, this, std::move(promise)});
+ }
+ mMsgQueueCond.notify_one();
+ return future;
+ }
+
+ static std::future<HRESULT> pushMessageStatic(MsgType type)
+ {
+ std::promise<HRESULT> promise;
+ std::future<HRESULT> future{promise.get_future()};
+ { std::lock_guard<std::mutex> _{mMsgQueueLock};
+ mMsgQueue.emplace_back(Msg{type, nullptr, std::move(promise)});
+ }
+ mMsgQueueCond.notify_one();
+ return future;
+ }
+
+ static bool popMessage(Msg &msg)
+ {
+ std::unique_lock<std::mutex> lock{mMsgQueueLock};
+ while(mMsgQueue.empty())
+ mMsgQueueCond.wait(lock);
+ msg = std::move(mMsgQueue.front());
+ mMsgQueue.pop_front();
+ return msg.mType != MsgType::QuitThread;
+ }
+
+ static int messageHandler(std::promise<HRESULT> *promise);
+};
+std::deque<WasapiProxy::Msg> WasapiProxy::mMsgQueue;
+std::mutex WasapiProxy::mMsgQueueLock;
+std::condition_variable WasapiProxy::mMsgQueueCond;
+
+int WasapiProxy::messageHandler(std::promise<HRESULT> *promise)
+{
+ TRACE("Starting message thread\n");
+
+ HRESULT cohr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if(FAILED(cohr))
+ {
+ WARN("Failed to initialize COM: 0x%08lx\n", cohr);
+ promise->set_value(cohr);
+ return 0;
+ }
+
+ void *ptr{};
+ HRESULT hr{CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER,
+ IID_IMMDeviceEnumerator, &ptr)};
+ if(FAILED(hr))
+ {
+ WARN("Failed to create IMMDeviceEnumerator instance: 0x%08lx\n", hr);
+ promise->set_value(hr);
+ CoUninitialize();
+ return 0;
+ }
+ auto Enumerator = static_cast<IMMDeviceEnumerator*>(ptr);
+ Enumerator->Release();
+ Enumerator = nullptr;
+ CoUninitialize();
+
+ TRACE("Message thread initialization complete\n");
+ promise->set_value(S_OK);
+ promise = nullptr;
+
+ TRACE("Starting message loop\n");
+ ALuint deviceCount{0};
+ Msg msg;
+ while(popMessage(msg))
+ {
+ TRACE("Got message \"%s\" (0x%04x, this=%p)\n",
+ MessageStr[static_cast<unsigned int>(msg.mType)], static_cast<unsigned int>(msg.mType),
+ msg.mProxy);
+
+ switch(msg.mType)
+ {
+ case MsgType::OpenDevice:
+ hr = cohr = S_OK;
+ if(++deviceCount == 1)
+ hr = cohr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if(SUCCEEDED(hr))
+ hr = msg.mProxy->openProxy();
+ msg.mPromise.set_value(hr);
+
+ if(FAILED(hr))
+ {
+ if(--deviceCount == 0 && SUCCEEDED(cohr))
+ CoUninitialize();
+ }
+ continue;
+
+ case MsgType::ResetDevice:
+ hr = msg.mProxy->resetProxy();
+ msg.mPromise.set_value(hr);
+ continue;
+
+ case MsgType::StartDevice:
+ hr = msg.mProxy->startProxy();
+ msg.mPromise.set_value(hr);
+ continue;
+
+ case MsgType::StopDevice:
+ msg.mProxy->stopProxy();
+ msg.mPromise.set_value(S_OK);
+ continue;
+
+ case MsgType::CloseDevice:
+ msg.mProxy->closeProxy();
+ msg.mPromise.set_value(S_OK);
+
+ if(--deviceCount == 0)
+ CoUninitialize();
+ continue;
+
+ case MsgType::EnumeratePlayback:
+ case MsgType::EnumerateCapture:
+ hr = cohr = S_OK;
+ if(++deviceCount == 1)
+ hr = cohr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if(SUCCEEDED(hr))
+ hr = CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER, IID_IMMDeviceEnumerator, &ptr);
+ if(FAILED(hr))
+ msg.mPromise.set_value(hr);
+ else
+ {
+ Enumerator = static_cast<IMMDeviceEnumerator*>(ptr);
+
+ if(msg.mType == MsgType::EnumeratePlayback)
+ hr = probe_devices(Enumerator, eRender, PlaybackDevices);
+ else if(msg.mType == MsgType::EnumerateCapture)
+ hr = probe_devices(Enumerator, eCapture, CaptureDevices);
+ msg.mPromise.set_value(hr);
+
+ Enumerator->Release();
+ Enumerator = nullptr;
+ }
+
+ if(--deviceCount == 0 && SUCCEEDED(cohr))
+ CoUninitialize();
+ continue;
+
+ default:
+ ERR("Unexpected message: %u\n", static_cast<unsigned int>(msg.mType));
+ msg.mPromise.set_value(E_FAIL);
+ continue;
+ }
+ }
+ TRACE("Message loop finished\n");
+
+ return 0;
+}
+
+
+struct WasapiPlayback final : public BackendBase, WasapiProxy {
+ WasapiPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~WasapiPlayback() override;
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ HRESULT openProxy() override;
+ void closeProxy() override;
+
+ ALCboolean reset() override;
+ HRESULT resetProxy() override;
+ ALCboolean start() override;
+ HRESULT startProxy() override;
+ void stop() override;
+ void stopProxy() override;
+
+ ClockLatency getClockLatency() override;
+
+ std::wstring mDevId;
+
+ IMMDevice *mMMDev{nullptr};
+ IAudioClient *mClient{nullptr};
+ IAudioRenderClient *mRender{nullptr};
+ HANDLE mNotifyEvent{nullptr};
+
+ std::atomic<UINT32> mPadding{0u};
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(WasapiPlayback)
+};
+
+WasapiPlayback::~WasapiPlayback()
+{
+ pushMessage(MsgType::CloseDevice).wait();
+
+ if(mNotifyEvent != nullptr)
+ CloseHandle(mNotifyEvent);
+ mNotifyEvent = nullptr;
+}
+
+
+FORCE_ALIGN int WasapiPlayback::mixerProc()
+{
+ HRESULT hr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if(FAILED(hr))
+ {
+ ERR("CoInitializeEx(nullptr, COINIT_MULTITHREADED) failed: 0x%08lx\n", hr);
+ aluHandleDisconnect(mDevice, "COM init failed: 0x%08lx", hr);
+ return 1;
+ }
+
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ const ALuint update_size{mDevice->UpdateSize};
+ const UINT32 buffer_len{mDevice->BufferSize};
+ while(!mKillNow.load(std::memory_order_relaxed))
+ {
+ UINT32 written;
+ hr = mClient->GetCurrentPadding(&written);
+ if(FAILED(hr))
+ {
+ ERR("Failed to get padding: 0x%08lx\n", hr);
+ aluHandleDisconnect(mDevice, "Failed to retrieve buffer padding: 0x%08lx", hr);
+ break;
+ }
+ mPadding.store(written, std::memory_order_relaxed);
+
+ ALuint len{buffer_len - written};
+ if(len < update_size)
+ {
+ DWORD res{WaitForSingleObjectEx(mNotifyEvent, 2000, FALSE)};
+ if(res != WAIT_OBJECT_0)
+ ERR("WaitForSingleObjectEx error: 0x%lx\n", res);
+ continue;
+ }
+
+ BYTE *buffer;
+ hr = mRender->GetBuffer(len, &buffer);
+ if(SUCCEEDED(hr))
+ {
+ lock();
+ aluMixData(mDevice, buffer, len);
+ mPadding.store(written + len, std::memory_order_relaxed);
+ unlock();
+ hr = mRender->ReleaseBuffer(len, 0);
+ }
+ if(FAILED(hr))
+ {
+ ERR("Failed to buffer data: 0x%08lx\n", hr);
+ aluHandleDisconnect(mDevice, "Failed to send playback samples: 0x%08lx", hr);
+ break;
+ }
+ }
+ mPadding.store(0u, std::memory_order_release);
+
+ CoUninitialize();
+ return 0;
+}
+
+
+ALCenum WasapiPlayback::open(const ALCchar *name)
+{
+ HRESULT hr{S_OK};
+
+ mNotifyEvent = CreateEventW(nullptr, FALSE, FALSE, nullptr);
+ if(mNotifyEvent == nullptr)
+ {
+ ERR("Failed to create notify events: %lu\n", GetLastError());
+ hr = E_FAIL;
+ }
+
+ if(SUCCEEDED(hr))
+ {
+ if(name)
+ {
+ if(PlaybackDevices.empty())
+ pushMessage(MsgType::EnumeratePlayback).wait();
+
+ hr = E_FAIL;
+ auto iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
+ [name](const DevMap &entry) -> bool
+ { return entry.name == name || entry.endpoint_guid == name; }
+ );
+ if(iter == PlaybackDevices.cend())
+ {
+ std::wstring wname{utf8_to_wstr(name)};
+ iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
+ [&wname](const DevMap &entry) -> bool
+ { return entry.devid == wname; }
+ );
+ }
+ if(iter == PlaybackDevices.cend())
+ WARN("Failed to find device name matching \"%s\"\n", name);
+ else
+ {
+ mDevId = iter->devid;
+ mDevice->DeviceName = iter->name;
+ hr = S_OK;
+ }
+ }
+ }
+
+ if(SUCCEEDED(hr))
+ hr = pushMessage(MsgType::OpenDevice).get();
+
+ if(FAILED(hr))
+ {
+ if(mNotifyEvent != nullptr)
+ CloseHandle(mNotifyEvent);
+ mNotifyEvent = nullptr;
+
+ mDevId.clear();
+
+ ERR("Device init failed: 0x%08lx\n", hr);
+ return ALC_INVALID_VALUE;
+ }
+
+ return ALC_NO_ERROR;
+}
+
+HRESULT WasapiPlayback::openProxy()
+{
+ void *ptr;
+ HRESULT hr{CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER, IID_IMMDeviceEnumerator, &ptr)};
+ if(SUCCEEDED(hr))
+ {
+ auto Enumerator = static_cast<IMMDeviceEnumerator*>(ptr);
+ if(mDevId.empty())
+ hr = Enumerator->GetDefaultAudioEndpoint(eRender, eMultimedia, &mMMDev);
+ else
+ hr = Enumerator->GetDevice(mDevId.c_str(), &mMMDev);
+ Enumerator->Release();
+ }
+ if(SUCCEEDED(hr))
+ hr = mMMDev->Activate(IID_IAudioClient, CLSCTX_INPROC_SERVER, nullptr, &ptr);
+ if(SUCCEEDED(hr))
+ {
+ mClient = static_cast<IAudioClient*>(ptr);
+ if(mDevice->DeviceName.empty())
+ mDevice->DeviceName = get_device_name_and_guid(mMMDev).first;
+ }
+
+ if(FAILED(hr))
+ {
+ if(mMMDev)
+ mMMDev->Release();
+ mMMDev = nullptr;
+ }
+
+ return hr;
+}
+
+void WasapiPlayback::closeProxy()
+{
+ if(mClient)
+ mClient->Release();
+ mClient = nullptr;
+
+ if(mMMDev)
+ mMMDev->Release();
+ mMMDev = nullptr;
+}
+
+
+ALCboolean WasapiPlayback::reset()
+{
+ HRESULT hr{pushMessage(MsgType::ResetDevice).get()};
+ return SUCCEEDED(hr) ? ALC_TRUE : ALC_FALSE;
+}
+
+HRESULT WasapiPlayback::resetProxy()
+{
+ if(mClient)
+ mClient->Release();
+ mClient = nullptr;
+
+ void *ptr;
+ HRESULT hr = mMMDev->Activate(IID_IAudioClient, CLSCTX_INPROC_SERVER, nullptr, &ptr);
+ if(FAILED(hr))
+ {
+ ERR("Failed to reactivate audio client: 0x%08lx\n", hr);
+ return hr;
+ }
+ mClient = static_cast<IAudioClient*>(ptr);
+
+ WAVEFORMATEX *wfx;
+ hr = mClient->GetMixFormat(&wfx);
+ if(FAILED(hr))
+ {
+ ERR("Failed to get mix format: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ WAVEFORMATEXTENSIBLE OutputType;
+ if(!MakeExtensible(&OutputType, wfx))
+ {
+ CoTaskMemFree(wfx);
+ return E_FAIL;
+ }
+ CoTaskMemFree(wfx);
+ wfx = nullptr;
+
+ const REFERENCE_TIME per_time{mDevice->UpdateSize * REFTIME_PER_SEC / mDevice->Frequency};
+ const REFERENCE_TIME buf_time{mDevice->BufferSize * REFTIME_PER_SEC / mDevice->Frequency};
+
+ if(!mDevice->Flags.get<FrequencyRequest>())
+ mDevice->Frequency = OutputType.Format.nSamplesPerSec;
+ if(!mDevice->Flags.get<ChannelsRequest>())
+ {
+ if(OutputType.Format.nChannels == 1 && OutputType.dwChannelMask == MONO)
+ mDevice->FmtChans = DevFmtMono;
+ else if(OutputType.Format.nChannels == 2 && OutputType.dwChannelMask == STEREO)
+ mDevice->FmtChans = DevFmtStereo;
+ else if(OutputType.Format.nChannels == 4 && OutputType.dwChannelMask == QUAD)
+ mDevice->FmtChans = DevFmtQuad;
+ else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1)
+ mDevice->FmtChans = DevFmtX51;
+ else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1REAR)
+ mDevice->FmtChans = DevFmtX51Rear;
+ else if(OutputType.Format.nChannels == 7 && OutputType.dwChannelMask == X6DOT1)
+ mDevice->FmtChans = DevFmtX61;
+ else if(OutputType.Format.nChannels == 8 && (OutputType.dwChannelMask == X7DOT1 || OutputType.dwChannelMask == X7DOT1_WIDE))
+ mDevice->FmtChans = DevFmtX71;
+ else
+ ERR("Unhandled channel config: %d -- 0x%08lx\n", OutputType.Format.nChannels, OutputType.dwChannelMask);
+ }
+
+ OutputType.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono:
+ OutputType.Format.nChannels = 1;
+ OutputType.dwChannelMask = MONO;
+ break;
+ case DevFmtAmbi3D:
+ mDevice->FmtChans = DevFmtStereo;
+ /*fall-through*/
+ case DevFmtStereo:
+ OutputType.Format.nChannels = 2;
+ OutputType.dwChannelMask = STEREO;
+ break;
+ case DevFmtQuad:
+ OutputType.Format.nChannels = 4;
+ OutputType.dwChannelMask = QUAD;
+ break;
+ case DevFmtX51:
+ OutputType.Format.nChannels = 6;
+ OutputType.dwChannelMask = X5DOT1;
+ break;
+ case DevFmtX51Rear:
+ OutputType.Format.nChannels = 6;
+ OutputType.dwChannelMask = X5DOT1REAR;
+ break;
+ case DevFmtX61:
+ OutputType.Format.nChannels = 7;
+ OutputType.dwChannelMask = X6DOT1;
+ break;
+ case DevFmtX71:
+ OutputType.Format.nChannels = 8;
+ OutputType.dwChannelMask = X7DOT1;
+ break;
+ }
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ mDevice->FmtType = DevFmtUByte;
+ /* fall-through */
+ case DevFmtUByte:
+ OutputType.Format.wBitsPerSample = 8;
+ OutputType.Samples.wValidBitsPerSample = 8;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ break;
+ case DevFmtUShort:
+ mDevice->FmtType = DevFmtShort;
+ /* fall-through */
+ case DevFmtShort:
+ OutputType.Format.wBitsPerSample = 16;
+ OutputType.Samples.wValidBitsPerSample = 16;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ break;
+ case DevFmtUInt:
+ mDevice->FmtType = DevFmtInt;
+ /* fall-through */
+ case DevFmtInt:
+ OutputType.Format.wBitsPerSample = 32;
+ OutputType.Samples.wValidBitsPerSample = 32;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ break;
+ case DevFmtFloat:
+ OutputType.Format.wBitsPerSample = 32;
+ OutputType.Samples.wValidBitsPerSample = 32;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ break;
+ }
+ OutputType.Format.nSamplesPerSec = mDevice->Frequency;
+
+ OutputType.Format.nBlockAlign = OutputType.Format.nChannels *
+ OutputType.Format.wBitsPerSample / 8;
+ OutputType.Format.nAvgBytesPerSec = OutputType.Format.nSamplesPerSec *
+ OutputType.Format.nBlockAlign;
+
+ TraceFormat("Requesting playback format", &OutputType.Format);
+ hr = mClient->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &OutputType.Format, &wfx);
+ if(FAILED(hr))
+ {
+ ERR("Failed to check format support: 0x%08lx\n", hr);
+ hr = mClient->GetMixFormat(&wfx);
+ }
+ if(FAILED(hr))
+ {
+ ERR("Failed to find a supported format: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ if(wfx != nullptr)
+ {
+ TraceFormat("Got playback format", wfx);
+ if(!MakeExtensible(&OutputType, wfx))
+ {
+ CoTaskMemFree(wfx);
+ return E_FAIL;
+ }
+ CoTaskMemFree(wfx);
+ wfx = nullptr;
+
+ mDevice->Frequency = OutputType.Format.nSamplesPerSec;
+ if(OutputType.Format.nChannels == 1 && OutputType.dwChannelMask == MONO)
+ mDevice->FmtChans = DevFmtMono;
+ else if(OutputType.Format.nChannels == 2 && OutputType.dwChannelMask == STEREO)
+ mDevice->FmtChans = DevFmtStereo;
+ else if(OutputType.Format.nChannels == 4 && OutputType.dwChannelMask == QUAD)
+ mDevice->FmtChans = DevFmtQuad;
+ else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1)
+ mDevice->FmtChans = DevFmtX51;
+ else if(OutputType.Format.nChannels == 6 && OutputType.dwChannelMask == X5DOT1REAR)
+ mDevice->FmtChans = DevFmtX51Rear;
+ else if(OutputType.Format.nChannels == 7 && OutputType.dwChannelMask == X6DOT1)
+ mDevice->FmtChans = DevFmtX61;
+ else if(OutputType.Format.nChannels == 8 && (OutputType.dwChannelMask == X7DOT1 || OutputType.dwChannelMask == X7DOT1_WIDE))
+ mDevice->FmtChans = DevFmtX71;
+ else
+ {
+ ERR("Unhandled extensible channels: %d -- 0x%08lx\n", OutputType.Format.nChannels, OutputType.dwChannelMask);
+ mDevice->FmtChans = DevFmtStereo;
+ OutputType.Format.nChannels = 2;
+ OutputType.dwChannelMask = STEREO;
+ }
+
+ if(IsEqualGUID(OutputType.SubFormat, KSDATAFORMAT_SUBTYPE_PCM))
+ {
+ if(OutputType.Format.wBitsPerSample == 8)
+ mDevice->FmtType = DevFmtUByte;
+ else if(OutputType.Format.wBitsPerSample == 16)
+ mDevice->FmtType = DevFmtShort;
+ else if(OutputType.Format.wBitsPerSample == 32)
+ mDevice->FmtType = DevFmtInt;
+ else
+ {
+ mDevice->FmtType = DevFmtShort;
+ OutputType.Format.wBitsPerSample = 16;
+ }
+ }
+ else if(IsEqualGUID(OutputType.SubFormat, KSDATAFORMAT_SUBTYPE_IEEE_FLOAT))
+ {
+ mDevice->FmtType = DevFmtFloat;
+ OutputType.Format.wBitsPerSample = 32;
+ }
+ else
+ {
+ ERR("Unhandled format sub-type\n");
+ mDevice->FmtType = DevFmtShort;
+ if(OutputType.Format.wFormatTag != WAVE_FORMAT_EXTENSIBLE)
+ OutputType.Format.wFormatTag = WAVE_FORMAT_PCM;
+ OutputType.Format.wBitsPerSample = 16;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ }
+ OutputType.Samples.wValidBitsPerSample = OutputType.Format.wBitsPerSample;
+ }
+
+ EndpointFormFactor formfactor = UnknownFormFactor;
+ get_device_formfactor(mMMDev, &formfactor);
+ mDevice->IsHeadphones = (mDevice->FmtChans == DevFmtStereo &&
+ (formfactor == Headphones || formfactor == Headset));
+
+ SetDefaultWFXChannelOrder(mDevice);
+
+ hr = mClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, buf_time,
+ 0, &OutputType.Format, nullptr);
+ if(FAILED(hr))
+ {
+ ERR("Failed to initialize audio client: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ UINT32 buffer_len, min_len;
+ REFERENCE_TIME min_per;
+ hr = mClient->GetDevicePeriod(&min_per, nullptr);
+ if(SUCCEEDED(hr))
+ hr = mClient->GetBufferSize(&buffer_len);
+ if(FAILED(hr))
+ {
+ ERR("Failed to get audio buffer info: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ /* Find the nearest multiple of the period size to the update size */
+ if(min_per < per_time)
+ min_per *= maxi64((per_time + min_per/2) / min_per, 1);
+ min_len = (UINT32)ScaleCeil(min_per, mDevice->Frequency, REFTIME_PER_SEC);
+ min_len = minu(min_len, buffer_len/2);
+
+ mDevice->UpdateSize = min_len;
+ mDevice->BufferSize = buffer_len;
+
+ hr = mClient->SetEventHandle(mNotifyEvent);
+ if(FAILED(hr))
+ {
+ ERR("Failed to set event handle: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ return hr;
+}
+
+
+ALCboolean WasapiPlayback::start()
+{
+ HRESULT hr{pushMessage(MsgType::StartDevice).get()};
+ return SUCCEEDED(hr) ? ALC_TRUE : ALC_FALSE;
+}
+
+HRESULT WasapiPlayback::startProxy()
+{
+ ResetEvent(mNotifyEvent);
+
+ HRESULT hr = mClient->Start();
+ if(FAILED(hr))
+ {
+ ERR("Failed to start audio client: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ void *ptr;
+ hr = mClient->GetService(IID_IAudioRenderClient, &ptr);
+ if(SUCCEEDED(hr))
+ {
+ mRender = static_cast<IAudioRenderClient*>(ptr);
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&WasapiPlayback::mixerProc), this};
+ }
+ catch(...) {
+ mRender->Release();
+ mRender = nullptr;
+ ERR("Failed to start thread\n");
+ hr = E_FAIL;
+ }
+ }
+
+ if(FAILED(hr))
+ mClient->Stop();
+
+ return hr;
+}
+
+
+void WasapiPlayback::stop()
+{ pushMessage(MsgType::StopDevice).wait(); }
+
+void WasapiPlayback::stopProxy()
+{
+ if(!mRender || !mThread.joinable())
+ return;
+
+ mKillNow.store(true, std::memory_order_release);
+ mThread.join();
+
+ mRender->Release();
+ mRender = nullptr;
+ mClient->Stop();
+}
+
+
+ClockLatency WasapiPlayback::getClockLatency()
+{
+ ClockLatency ret;
+
+ lock();
+ ret.ClockTime = GetDeviceClockTime(mDevice);
+ ret.Latency = std::chrono::seconds{mPadding.load(std::memory_order_relaxed)};
+ ret.Latency /= mDevice->Frequency;
+ unlock();
+
+ return ret;
+}
+
+
+struct WasapiCapture final : public BackendBase, WasapiProxy {
+ WasapiCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~WasapiCapture() override;
+
+ int recordProc();
+
+ ALCenum open(const ALCchar *name) override;
+ HRESULT openProxy() override;
+ void closeProxy() override;
+
+ HRESULT resetProxy() override;
+ ALCboolean start() override;
+ HRESULT startProxy() override;
+ void stop() override;
+ void stopProxy() override;
+
+ ALCenum captureSamples(void *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ std::wstring mDevId;
+
+ IMMDevice *mMMDev{nullptr};
+ IAudioClient *mClient{nullptr};
+ IAudioCaptureClient *mCapture{nullptr};
+ HANDLE mNotifyEvent{nullptr};
+
+ ChannelConverterPtr mChannelConv;
+ SampleConverterPtr mSampleConv;
+ RingBufferPtr mRing;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(WasapiCapture)
+};
+
+WasapiCapture::~WasapiCapture()
+{
+ pushMessage(MsgType::CloseDevice).wait();
+
+ if(mNotifyEvent != nullptr)
+ CloseHandle(mNotifyEvent);
+ mNotifyEvent = nullptr;
+}
+
+
+FORCE_ALIGN int WasapiCapture::recordProc()
+{
+ HRESULT hr = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
+ if(FAILED(hr))
+ {
+ ERR("CoInitializeEx(nullptr, COINIT_MULTITHREADED) failed: 0x%08lx\n", hr);
+ aluHandleDisconnect(mDevice, "COM init failed: 0x%08lx", hr);
+ return 1;
+ }
+
+ althrd_setname(RECORD_THREAD_NAME);
+
+ al::vector<float> samples;
+ while(!mKillNow.load(std::memory_order_relaxed))
+ {
+ UINT32 avail;
+ hr = mCapture->GetNextPacketSize(&avail);
+ if(FAILED(hr))
+ ERR("Failed to get next packet size: 0x%08lx\n", hr);
+ else if(avail > 0)
+ {
+ UINT32 numsamples;
+ DWORD flags;
+ BYTE *rdata;
+
+ hr = mCapture->GetBuffer(&rdata, &numsamples, &flags, nullptr, nullptr);
+ if(FAILED(hr))
+ ERR("Failed to get capture buffer: 0x%08lx\n", hr);
+ else
+ {
+ if(mChannelConv)
+ {
+ samples.resize(numsamples*2);
+ mChannelConv->convert(rdata, samples.data(), numsamples);
+ rdata = reinterpret_cast<BYTE*>(samples.data());
+ }
+
+ auto data = mRing->getWriteVector();
+
+ size_t dstframes;
+ if(mSampleConv)
+ {
+ const ALvoid *srcdata{rdata};
+ auto srcframes = static_cast<ALsizei>(numsamples);
+
+ dstframes = mSampleConv->convert(&srcdata, &srcframes, data.first.buf,
+ static_cast<ALsizei>(minz(data.first.len, INT_MAX)));
+ if(srcframes > 0 && dstframes == data.first.len && data.second.len > 0)
+ {
+ /* If some source samples remain, all of the first dest
+ * block was filled, and there's space in the second
+ * dest block, do another run for the second block.
+ */
+ dstframes += mSampleConv->convert(&srcdata, &srcframes, data.second.buf,
+ static_cast<ALsizei>(minz(data.second.len, INT_MAX)));
+ }
+ }
+ else
+ {
+ const auto framesize = static_cast<ALuint>(mDevice->frameSizeFromFmt());
+ size_t len1 = minz(data.first.len, numsamples);
+ size_t len2 = minz(data.second.len, numsamples-len1);
+
+ memcpy(data.first.buf, rdata, len1*framesize);
+ if(len2 > 0)
+ memcpy(data.second.buf, rdata+len1*framesize, len2*framesize);
+ dstframes = len1 + len2;
+ }
+
+ mRing->writeAdvance(dstframes);
+
+ hr = mCapture->ReleaseBuffer(numsamples);
+ if(FAILED(hr)) ERR("Failed to release capture buffer: 0x%08lx\n", hr);
+ }
+ }
+
+ if(FAILED(hr))
+ {
+ aluHandleDisconnect(mDevice, "Failed to capture samples: 0x%08lx", hr);
+ break;
+ }
+
+ DWORD res{WaitForSingleObjectEx(mNotifyEvent, 2000, FALSE)};
+ if(res != WAIT_OBJECT_0)
+ ERR("WaitForSingleObjectEx error: 0x%lx\n", res);
+ }
+
+ CoUninitialize();
+ return 0;
+}
+
+
+ALCenum WasapiCapture::open(const ALCchar *name)
+{
+ HRESULT hr{S_OK};
+
+ mNotifyEvent = CreateEventW(nullptr, FALSE, FALSE, nullptr);
+ if(mNotifyEvent == nullptr)
+ {
+ ERR("Failed to create notify event: %lu\n", GetLastError());
+ hr = E_FAIL;
+ }
+
+ if(SUCCEEDED(hr))
+ {
+ if(name)
+ {
+ if(CaptureDevices.empty())
+ pushMessage(MsgType::EnumerateCapture).wait();
+
+ hr = E_FAIL;
+ auto iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
+ [name](const DevMap &entry) -> bool
+ { return entry.name == name || entry.endpoint_guid == name; }
+ );
+ if(iter == CaptureDevices.cend())
+ {
+ std::wstring wname{utf8_to_wstr(name)};
+ iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
+ [&wname](const DevMap &entry) -> bool
+ { return entry.devid == wname; }
+ );
+ }
+ if(iter == CaptureDevices.cend())
+ WARN("Failed to find device name matching \"%s\"\n", name);
+ else
+ {
+ mDevId = iter->devid;
+ mDevice->DeviceName = iter->name;
+ hr = S_OK;
+ }
+ }
+ }
+
+ if(SUCCEEDED(hr))
+ hr = pushMessage(MsgType::OpenDevice).get();
+
+ if(FAILED(hr))
+ {
+ if(mNotifyEvent != nullptr)
+ CloseHandle(mNotifyEvent);
+ mNotifyEvent = nullptr;
+
+ mDevId.clear();
+
+ ERR("Device init failed: 0x%08lx\n", hr);
+ return ALC_INVALID_VALUE;
+ }
+
+ hr = pushMessage(MsgType::ResetDevice).get();
+ if(FAILED(hr))
+ {
+ if(hr == E_OUTOFMEMORY)
+ return ALC_OUT_OF_MEMORY;
+ return ALC_INVALID_VALUE;
+ }
+
+ return ALC_NO_ERROR;
+}
+
+HRESULT WasapiCapture::openProxy()
+{
+ void *ptr;
+ HRESULT hr{CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER,
+ IID_IMMDeviceEnumerator, &ptr)};
+ if(SUCCEEDED(hr))
+ {
+ auto Enumerator = static_cast<IMMDeviceEnumerator*>(ptr);
+ if(mDevId.empty())
+ hr = Enumerator->GetDefaultAudioEndpoint(eCapture, eMultimedia, &mMMDev);
+ else
+ hr = Enumerator->GetDevice(mDevId.c_str(), &mMMDev);
+ Enumerator->Release();
+ }
+ if(SUCCEEDED(hr))
+ hr = mMMDev->Activate(IID_IAudioClient, CLSCTX_INPROC_SERVER, nullptr, &ptr);
+ if(SUCCEEDED(hr))
+ {
+ mClient = static_cast<IAudioClient*>(ptr);
+ if(mDevice->DeviceName.empty())
+ mDevice->DeviceName = get_device_name_and_guid(mMMDev).first;
+ }
+
+ if(FAILED(hr))
+ {
+ if(mMMDev)
+ mMMDev->Release();
+ mMMDev = nullptr;
+ }
+
+ return hr;
+}
+
+void WasapiCapture::closeProxy()
+{
+ if(mClient)
+ mClient->Release();
+ mClient = nullptr;
+
+ if(mMMDev)
+ mMMDev->Release();
+ mMMDev = nullptr;
+}
+
+HRESULT WasapiCapture::resetProxy()
+{
+ if(mClient)
+ mClient->Release();
+ mClient = nullptr;
+
+ void *ptr;
+ HRESULT hr{mMMDev->Activate(IID_IAudioClient, CLSCTX_INPROC_SERVER, nullptr, &ptr)};
+ if(FAILED(hr))
+ {
+ ERR("Failed to reactivate audio client: 0x%08lx\n", hr);
+ return hr;
+ }
+ mClient = static_cast<IAudioClient*>(ptr);
+
+ // Make sure buffer is at least 100ms in size
+ REFERENCE_TIME buf_time{mDevice->BufferSize * REFTIME_PER_SEC / mDevice->Frequency};
+ buf_time = maxu64(buf_time, REFTIME_PER_SEC/10);
+
+ WAVEFORMATEXTENSIBLE OutputType;
+ OutputType.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono:
+ OutputType.Format.nChannels = 1;
+ OutputType.dwChannelMask = MONO;
+ break;
+ case DevFmtStereo:
+ OutputType.Format.nChannels = 2;
+ OutputType.dwChannelMask = STEREO;
+ break;
+ case DevFmtQuad:
+ OutputType.Format.nChannels = 4;
+ OutputType.dwChannelMask = QUAD;
+ break;
+ case DevFmtX51:
+ OutputType.Format.nChannels = 6;
+ OutputType.dwChannelMask = X5DOT1;
+ break;
+ case DevFmtX51Rear:
+ OutputType.Format.nChannels = 6;
+ OutputType.dwChannelMask = X5DOT1REAR;
+ break;
+ case DevFmtX61:
+ OutputType.Format.nChannels = 7;
+ OutputType.dwChannelMask = X6DOT1;
+ break;
+ case DevFmtX71:
+ OutputType.Format.nChannels = 8;
+ OutputType.dwChannelMask = X7DOT1;
+ break;
+
+ case DevFmtAmbi3D:
+ return E_FAIL;
+ }
+ switch(mDevice->FmtType)
+ {
+ /* NOTE: Signedness doesn't matter, the converter will handle it. */
+ case DevFmtByte:
+ case DevFmtUByte:
+ OutputType.Format.wBitsPerSample = 8;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ break;
+ case DevFmtShort:
+ case DevFmtUShort:
+ OutputType.Format.wBitsPerSample = 16;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ break;
+ case DevFmtInt:
+ case DevFmtUInt:
+ OutputType.Format.wBitsPerSample = 32;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
+ break;
+ case DevFmtFloat:
+ OutputType.Format.wBitsPerSample = 32;
+ OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ break;
+ }
+ OutputType.Samples.wValidBitsPerSample = OutputType.Format.wBitsPerSample;
+ OutputType.Format.nSamplesPerSec = mDevice->Frequency;
+
+ OutputType.Format.nBlockAlign = OutputType.Format.nChannels *
+ OutputType.Format.wBitsPerSample / 8;
+ OutputType.Format.nAvgBytesPerSec = OutputType.Format.nSamplesPerSec *
+ OutputType.Format.nBlockAlign;
+ OutputType.Format.cbSize = sizeof(OutputType) - sizeof(OutputType.Format);
+
+ TraceFormat("Requesting capture format", &OutputType.Format);
+ WAVEFORMATEX *wfx;
+ hr = mClient->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, &OutputType.Format, &wfx);
+ if(FAILED(hr))
+ {
+ ERR("Failed to check format support: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ mSampleConv = nullptr;
+ mChannelConv = nullptr;
+
+ if(wfx != nullptr)
+ {
+ TraceFormat("Got capture format", wfx);
+ if(!(wfx->nChannels == OutputType.Format.nChannels ||
+ (wfx->nChannels == 1 && OutputType.Format.nChannels == 2) ||
+ (wfx->nChannels == 2 && OutputType.Format.nChannels == 1)))
+ {
+ ERR("Failed to get matching format, wanted: %s %s %uhz, got: %d channel%s %d-bit %luhz\n",
+ DevFmtChannelsString(mDevice->FmtChans), DevFmtTypeString(mDevice->FmtType),
+ mDevice->Frequency, wfx->nChannels, (wfx->nChannels==1)?"":"s", wfx->wBitsPerSample,
+ wfx->nSamplesPerSec);
+ CoTaskMemFree(wfx);
+ return E_FAIL;
+ }
+
+ if(!MakeExtensible(&OutputType, wfx))
+ {
+ CoTaskMemFree(wfx);
+ return E_FAIL;
+ }
+ CoTaskMemFree(wfx);
+ wfx = nullptr;
+ }
+
+ DevFmtType srcType;
+ if(IsEqualGUID(OutputType.SubFormat, KSDATAFORMAT_SUBTYPE_PCM))
+ {
+ if(OutputType.Format.wBitsPerSample == 8)
+ srcType = DevFmtUByte;
+ else if(OutputType.Format.wBitsPerSample == 16)
+ srcType = DevFmtShort;
+ else if(OutputType.Format.wBitsPerSample == 32)
+ srcType = DevFmtInt;
+ else
+ {
+ ERR("Unhandled integer bit depth: %d\n", OutputType.Format.wBitsPerSample);
+ return E_FAIL;
+ }
+ }
+ else if(IsEqualGUID(OutputType.SubFormat, KSDATAFORMAT_SUBTYPE_IEEE_FLOAT))
+ {
+ if(OutputType.Format.wBitsPerSample == 32)
+ srcType = DevFmtFloat;
+ else
+ {
+ ERR("Unhandled float bit depth: %d\n", OutputType.Format.wBitsPerSample);
+ return E_FAIL;
+ }
+ }
+ else
+ {
+ ERR("Unhandled format sub-type\n");
+ return E_FAIL;
+ }
+
+ if(mDevice->FmtChans == DevFmtMono && OutputType.Format.nChannels == 2)
+ {
+ mChannelConv = CreateChannelConverter(srcType, DevFmtStereo, mDevice->FmtChans);
+ if(!mChannelConv)
+ {
+ ERR("Failed to create %s stereo-to-mono converter\n", DevFmtTypeString(srcType));
+ return E_FAIL;
+ }
+ TRACE("Created %s stereo-to-mono converter\n", DevFmtTypeString(srcType));
+ /* The channel converter always outputs float, so change the input type
+ * for the resampler/type-converter.
+ */
+ srcType = DevFmtFloat;
+ }
+ else if(mDevice->FmtChans == DevFmtStereo && OutputType.Format.nChannels == 1)
+ {
+ mChannelConv = CreateChannelConverter(srcType, DevFmtMono, mDevice->FmtChans);
+ if(!mChannelConv)
+ {
+ ERR("Failed to create %s mono-to-stereo converter\n", DevFmtTypeString(srcType));
+ return E_FAIL;
+ }
+ TRACE("Created %s mono-to-stereo converter\n", DevFmtTypeString(srcType));
+ srcType = DevFmtFloat;
+ }
+
+ if(mDevice->Frequency != OutputType.Format.nSamplesPerSec || mDevice->FmtType != srcType)
+ {
+ mSampleConv = CreateSampleConverter(srcType, mDevice->FmtType, mDevice->channelsFromFmt(),
+ OutputType.Format.nSamplesPerSec, mDevice->Frequency, BSinc24Resampler);
+ if(!mSampleConv)
+ {
+ ERR("Failed to create converter for %s format, dst: %s %uhz, src: %s %luhz\n",
+ DevFmtChannelsString(mDevice->FmtChans), DevFmtTypeString(mDevice->FmtType),
+ mDevice->Frequency, DevFmtTypeString(srcType), OutputType.Format.nSamplesPerSec);
+ return E_FAIL;
+ }
+ TRACE("Created converter for %s format, dst: %s %uhz, src: %s %luhz\n",
+ DevFmtChannelsString(mDevice->FmtChans), DevFmtTypeString(mDevice->FmtType),
+ mDevice->Frequency, DevFmtTypeString(srcType), OutputType.Format.nSamplesPerSec);
+ }
+
+ hr = mClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, buf_time,
+ 0, &OutputType.Format, nullptr);
+ if(FAILED(hr))
+ {
+ ERR("Failed to initialize audio client: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ UINT32 buffer_len;
+ REFERENCE_TIME min_per;
+ hr = mClient->GetDevicePeriod(&min_per, nullptr);
+ if(SUCCEEDED(hr))
+ hr = mClient->GetBufferSize(&buffer_len);
+ if(FAILED(hr))
+ {
+ ERR("Failed to get buffer size: 0x%08lx\n", hr);
+ return hr;
+ }
+ mDevice->UpdateSize = static_cast<ALuint>(ScaleCeil(min_per, mDevice->Frequency,
+ REFTIME_PER_SEC));
+ mDevice->BufferSize = buffer_len;
+
+ buffer_len = maxu(mDevice->BufferSize, buffer_len);
+ mRing = CreateRingBuffer(buffer_len, mDevice->frameSizeFromFmt(), false);
+ if(!mRing)
+ {
+ ERR("Failed to allocate capture ring buffer\n");
+ return E_OUTOFMEMORY;
+ }
+
+ hr = mClient->SetEventHandle(mNotifyEvent);
+ if(FAILED(hr))
+ {
+ ERR("Failed to set event handle: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ return hr;
+}
+
+
+ALCboolean WasapiCapture::start()
+{
+ HRESULT hr{pushMessage(MsgType::StartDevice).get()};
+ return SUCCEEDED(hr) ? ALC_TRUE : ALC_FALSE;
+}
+
+HRESULT WasapiCapture::startProxy()
+{
+ ResetEvent(mNotifyEvent);
+
+ HRESULT hr{mClient->Start()};
+ if(FAILED(hr))
+ {
+ ERR("Failed to start audio client: 0x%08lx\n", hr);
+ return hr;
+ }
+
+ void *ptr;
+ hr = mClient->GetService(IID_IAudioCaptureClient, &ptr);
+ if(SUCCEEDED(hr))
+ {
+ mCapture = static_cast<IAudioCaptureClient*>(ptr);
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&WasapiCapture::recordProc), this};
+ }
+ catch(...) {
+ mCapture->Release();
+ mCapture = nullptr;
+ ERR("Failed to start thread\n");
+ hr = E_FAIL;
+ }
+ }
+
+ if(FAILED(hr))
+ {
+ mClient->Stop();
+ mClient->Reset();
+ }
+
+ return hr;
+}
+
+
+void WasapiCapture::stop()
+{ pushMessage(MsgType::StopDevice).wait(); }
+
+void WasapiCapture::stopProxy()
+{
+ if(!mCapture || !mThread.joinable())
+ return;
+
+ mKillNow.store(true, std::memory_order_release);
+ mThread.join();
+
+ mCapture->Release();
+ mCapture = nullptr;
+ mClient->Stop();
+ mClient->Reset();
+}
+
+
+ALCuint WasapiCapture::availableSamples()
+{ return (ALCuint)mRing->readSpace(); }
+
+ALCenum WasapiCapture::captureSamples(void *buffer, ALCuint samples)
+{
+ mRing->read(buffer, samples);
+ return ALC_NO_ERROR;
+}
+
+} // namespace
+
+
+bool WasapiBackendFactory::init()
+{
+ static HRESULT InitResult{E_FAIL};
+
+ if(FAILED(InitResult)) try
+ {
+ std::promise<HRESULT> promise;
+ auto future = promise.get_future();
+
+ std::thread{&WasapiProxy::messageHandler, &promise}.detach();
+ InitResult = future.get();
+ }
+ catch(...) {
+ }
+
+ return SUCCEEDED(InitResult) ? ALC_TRUE : ALC_FALSE;
+}
+
+bool WasapiBackendFactory::querySupport(BackendType type)
+{ return type == BackendType::Playback || type == BackendType::Capture; }
+
+void WasapiBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ auto add_device = [outnames](const DevMap &entry) -> void
+ {
+ /* +1 to also append the null char (to ensure a null-separated list and
+ * double-null terminated list).
+ */
+ outnames->append(entry.name.c_str(), entry.name.length()+1);
+ };
+ HRESULT hr{};
+ switch(type)
+ {
+ case DevProbe::Playback:
+ hr = WasapiProxy::pushMessageStatic(MsgType::EnumeratePlayback).get();
+ if(SUCCEEDED(hr))
+ std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
+ break;
+
+ case DevProbe::Capture:
+ hr = WasapiProxy::pushMessageStatic(MsgType::EnumerateCapture).get();
+ if(SUCCEEDED(hr))
+ std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
+ break;
+ }
+}
+
+BackendPtr WasapiBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new WasapiPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new WasapiCapture{device}};
+ return nullptr;
+}
+
+BackendFactory &WasapiBackendFactory::getFactory()
+{
+ static WasapiBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/wasapi.h b/alc/backends/wasapi.h
new file mode 100644
index 00000000..067dd259
--- /dev/null
+++ b/alc/backends/wasapi.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_WASAPI_H
+#define BACKENDS_WASAPI_H
+
+#include "backends/base.h"
+
+struct WasapiBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_WASAPI_H */
diff --git a/alc/backends/wave.cpp b/alc/backends/wave.cpp
new file mode 100644
index 00000000..67ed7e79
--- /dev/null
+++ b/alc/backends/wave.cpp
@@ -0,0 +1,402 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/wave.h"
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <chrono>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <exception>
+#include <functional>
+#include <thread>
+
+#include "AL/al.h"
+
+#include "alcmain.h"
+#include "alconfig.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+#include "alu.h"
+#include "compat.h"
+#include "logging.h"
+#include "threads.h"
+#include "vector.h"
+
+
+namespace {
+
+using std::chrono::seconds;
+using std::chrono::milliseconds;
+using std::chrono::nanoseconds;
+
+constexpr ALCchar waveDevice[] = "Wave File Writer";
+
+constexpr ALubyte SUBTYPE_PCM[]{
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa,
+ 0x00, 0x38, 0x9b, 0x71
+};
+constexpr ALubyte SUBTYPE_FLOAT[]{
+ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa,
+ 0x00, 0x38, 0x9b, 0x71
+};
+
+constexpr ALubyte SUBTYPE_BFORMAT_PCM[]{
+ 0x01, 0x00, 0x00, 0x00, 0x21, 0x07, 0xd3, 0x11, 0x86, 0x44, 0xc8, 0xc1,
+ 0xca, 0x00, 0x00, 0x00
+};
+
+constexpr ALubyte SUBTYPE_BFORMAT_FLOAT[]{
+ 0x03, 0x00, 0x00, 0x00, 0x21, 0x07, 0xd3, 0x11, 0x86, 0x44, 0xc8, 0xc1,
+ 0xca, 0x00, 0x00, 0x00
+};
+
+void fwrite16le(ALushort val, FILE *f)
+{
+ ALubyte data[2]{ static_cast<ALubyte>(val&0xff), static_cast<ALubyte>((val>>8)&0xff) };
+ fwrite(data, 1, 2, f);
+}
+
+void fwrite32le(ALuint val, FILE *f)
+{
+ ALubyte data[4]{ static_cast<ALubyte>(val&0xff), static_cast<ALubyte>((val>>8)&0xff),
+ static_cast<ALubyte>((val>>16)&0xff), static_cast<ALubyte>((val>>24)&0xff) };
+ fwrite(data, 1, 4, f);
+}
+
+
+struct WaveBackend final : public BackendBase {
+ WaveBackend(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~WaveBackend() override;
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ FILE *mFile{nullptr};
+ long mDataStart{-1};
+
+ al::vector<ALbyte> mBuffer;
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(WaveBackend)
+};
+
+WaveBackend::~WaveBackend()
+{
+ if(mFile)
+ fclose(mFile);
+ mFile = nullptr;
+}
+
+int WaveBackend::mixerProc()
+{
+ const milliseconds restTime{mDevice->UpdateSize*1000/mDevice->Frequency / 2};
+
+ althrd_setname(MIXER_THREAD_NAME);
+
+ const ALsizei frameSize{mDevice->frameSizeFromFmt()};
+
+ int64_t done{0};
+ auto start = std::chrono::steady_clock::now();
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ auto now = std::chrono::steady_clock::now();
+
+ /* This converts from nanoseconds to nanosamples, then to samples. */
+ int64_t avail{std::chrono::duration_cast<seconds>((now-start) *
+ mDevice->Frequency).count()};
+ if(avail-done < mDevice->UpdateSize)
+ {
+ std::this_thread::sleep_for(restTime);
+ continue;
+ }
+ while(avail-done >= mDevice->UpdateSize)
+ {
+ lock();
+ aluMixData(mDevice, mBuffer.data(), mDevice->UpdateSize);
+ unlock();
+ done += mDevice->UpdateSize;
+
+ if(!IS_LITTLE_ENDIAN)
+ {
+ const ALsizei bytesize{mDevice->bytesFromFmt()};
+ ALsizei i;
+
+ if(bytesize == 2)
+ {
+ ALushort *samples = reinterpret_cast<ALushort*>(mBuffer.data());
+ const auto len = static_cast<ALsizei>(mBuffer.size() / 2);
+ for(i = 0;i < len;i++)
+ {
+ ALushort samp = samples[i];
+ samples[i] = (samp>>8) | (samp<<8);
+ }
+ }
+ else if(bytesize == 4)
+ {
+ ALuint *samples = reinterpret_cast<ALuint*>(mBuffer.data());
+ const auto len = static_cast<ALsizei>(mBuffer.size() / 4);
+ for(i = 0;i < len;i++)
+ {
+ ALuint samp = samples[i];
+ samples[i] = (samp>>24) | ((samp>>8)&0x0000ff00) |
+ ((samp<<8)&0x00ff0000) | (samp<<24);
+ }
+ }
+ }
+
+ size_t fs{fwrite(mBuffer.data(), frameSize, mDevice->UpdateSize, mFile)};
+ (void)fs;
+ if(ferror(mFile))
+ {
+ ERR("Error writing to file\n");
+ aluHandleDisconnect(mDevice, "Failed to write playback samples");
+ break;
+ }
+ }
+
+ /* For every completed second, increment the start time and reduce the
+ * samples done. This prevents the difference between the start time
+ * and current time from growing too large, while maintaining the
+ * correct number of samples to render.
+ */
+ if(done >= mDevice->Frequency)
+ {
+ seconds s{done/mDevice->Frequency};
+ start += s;
+ done -= mDevice->Frequency*s.count();
+ }
+ }
+
+ return 0;
+}
+
+ALCenum WaveBackend::open(const ALCchar *name)
+{
+ const char *fname{GetConfigValue(nullptr, "wave", "file", "")};
+ if(!fname[0]) return ALC_INVALID_VALUE;
+
+ if(!name)
+ name = waveDevice;
+ else if(strcmp(name, waveDevice) != 0)
+ return ALC_INVALID_VALUE;
+
+#ifdef _WIN32
+ {
+ std::wstring wname = utf8_to_wstr(fname);
+ mFile = _wfopen(wname.c_str(), L"wb");
+ }
+#else
+ mFile = fopen(fname, "wb");
+#endif
+ if(!mFile)
+ {
+ ERR("Could not open file '%s': %s\n", fname, strerror(errno));
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = name;
+
+ return ALC_NO_ERROR;
+}
+
+ALCboolean WaveBackend::reset()
+{
+ ALuint channels=0, bytes=0, chanmask=0;
+ int isbformat = 0;
+ size_t val;
+
+ fseek(mFile, 0, SEEK_SET);
+ clearerr(mFile);
+
+ if(GetConfigValueBool(nullptr, "wave", "bformat", 0))
+ {
+ mDevice->FmtChans = DevFmtAmbi3D;
+ mDevice->mAmbiOrder = 1;
+ }
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtByte:
+ mDevice->FmtType = DevFmtUByte;
+ break;
+ case DevFmtUShort:
+ mDevice->FmtType = DevFmtShort;
+ break;
+ case DevFmtUInt:
+ mDevice->FmtType = DevFmtInt;
+ break;
+ case DevFmtUByte:
+ case DevFmtShort:
+ case DevFmtInt:
+ case DevFmtFloat:
+ break;
+ }
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono: chanmask = 0x04; break;
+ case DevFmtStereo: chanmask = 0x01 | 0x02; break;
+ case DevFmtQuad: chanmask = 0x01 | 0x02 | 0x10 | 0x20; break;
+ case DevFmtX51: chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x200 | 0x400; break;
+ case DevFmtX51Rear: chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x010 | 0x020; break;
+ case DevFmtX61: chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x100 | 0x200 | 0x400; break;
+ case DevFmtX71: chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x010 | 0x020 | 0x200 | 0x400; break;
+ case DevFmtAmbi3D:
+ /* .amb output requires FuMa */
+ mDevice->mAmbiOrder = mini(mDevice->mAmbiOrder, 3);
+ mDevice->mAmbiLayout = AmbiLayout::FuMa;
+ mDevice->mAmbiScale = AmbiNorm::FuMa;
+ isbformat = 1;
+ chanmask = 0;
+ break;
+ }
+ bytes = mDevice->bytesFromFmt();
+ channels = mDevice->channelsFromFmt();
+
+ rewind(mFile);
+
+ fputs("RIFF", mFile);
+ fwrite32le(0xFFFFFFFF, mFile); // 'RIFF' header len; filled in at close
+
+ fputs("WAVE", mFile);
+
+ fputs("fmt ", mFile);
+ fwrite32le(40, mFile); // 'fmt ' header len; 40 bytes for EXTENSIBLE
+
+ // 16-bit val, format type id (extensible: 0xFFFE)
+ fwrite16le(0xFFFE, mFile);
+ // 16-bit val, channel count
+ fwrite16le(channels, mFile);
+ // 32-bit val, frequency
+ fwrite32le(mDevice->Frequency, mFile);
+ // 32-bit val, bytes per second
+ fwrite32le(mDevice->Frequency * channels * bytes, mFile);
+ // 16-bit val, frame size
+ fwrite16le(channels * bytes, mFile);
+ // 16-bit val, bits per sample
+ fwrite16le(bytes * 8, mFile);
+ // 16-bit val, extra byte count
+ fwrite16le(22, mFile);
+ // 16-bit val, valid bits per sample
+ fwrite16le(bytes * 8, mFile);
+ // 32-bit val, channel mask
+ fwrite32le(chanmask, mFile);
+ // 16 byte GUID, sub-type format
+ val = fwrite((mDevice->FmtType == DevFmtFloat) ?
+ (isbformat ? SUBTYPE_BFORMAT_FLOAT : SUBTYPE_FLOAT) :
+ (isbformat ? SUBTYPE_BFORMAT_PCM : SUBTYPE_PCM), 1, 16, mFile);
+ (void)val;
+
+ fputs("data", mFile);
+ fwrite32le(0xFFFFFFFF, mFile); // 'data' header len; filled in at close
+
+ if(ferror(mFile))
+ {
+ ERR("Error writing header: %s\n", strerror(errno));
+ return ALC_FALSE;
+ }
+ mDataStart = ftell(mFile);
+
+ SetDefaultWFXChannelOrder(mDevice);
+
+ const ALuint bufsize{mDevice->frameSizeFromFmt() * mDevice->UpdateSize};
+ mBuffer.resize(bufsize);
+
+ return ALC_TRUE;
+}
+
+ALCboolean WaveBackend::start()
+{
+ try {
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&WaveBackend::mixerProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Failed to start mixing thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void WaveBackend::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ long size{ftell(mFile)};
+ if(size > 0)
+ {
+ long dataLen{size - mDataStart};
+ if(fseek(mFile, mDataStart-4, SEEK_SET) == 0)
+ fwrite32le(dataLen, mFile); // 'data' header len
+ if(fseek(mFile, 4, SEEK_SET) == 0)
+ fwrite32le(size-8, mFile); // 'WAVE' header len
+ }
+}
+
+} // namespace
+
+
+bool WaveBackendFactory::init()
+{ return true; }
+
+bool WaveBackendFactory::querySupport(BackendType type)
+{ return type == BackendType::Playback; }
+
+void WaveBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ switch(type)
+ {
+ case DevProbe::Playback:
+ /* Includes null char. */
+ outnames->append(waveDevice, sizeof(waveDevice));
+ break;
+ case DevProbe::Capture:
+ break;
+ }
+}
+
+BackendPtr WaveBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new WaveBackend{device}};
+ return nullptr;
+}
+
+BackendFactory &WaveBackendFactory::getFactory()
+{
+ static WaveBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/wave.h b/alc/backends/wave.h
new file mode 100644
index 00000000..b9b62d7f
--- /dev/null
+++ b/alc/backends/wave.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_WAVE_H
+#define BACKENDS_WAVE_H
+
+#include "backends/base.h"
+
+struct WaveBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_WAVE_H */
diff --git a/alc/backends/winmm.cpp b/alc/backends/winmm.cpp
new file mode 100644
index 00000000..cd32e95b
--- /dev/null
+++ b/alc/backends/winmm.cpp
@@ -0,0 +1,640 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "backends/winmm.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <memory.h>
+
+#include <windows.h>
+#include <mmsystem.h>
+
+#include <array>
+#include <atomic>
+#include <thread>
+#include <vector>
+#include <string>
+#include <algorithm>
+#include <functional>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "ringbuffer.h"
+#include "threads.h"
+#include "compat.h"
+
+#ifndef WAVE_FORMAT_IEEE_FLOAT
+#define WAVE_FORMAT_IEEE_FLOAT 0x0003
+#endif
+
+namespace {
+
+#define DEVNAME_HEAD "OpenAL Soft on "
+
+
+al::vector<std::string> PlaybackDevices;
+al::vector<std::string> CaptureDevices;
+
+bool checkName(const al::vector<std::string> &list, const std::string &name)
+{ return std::find(list.cbegin(), list.cend(), name) != list.cend(); }
+
+void ProbePlaybackDevices(void)
+{
+ PlaybackDevices.clear();
+
+ ALuint numdevs{waveOutGetNumDevs()};
+ PlaybackDevices.reserve(numdevs);
+ for(ALuint i{0};i < numdevs;i++)
+ {
+ std::string dname;
+
+ WAVEOUTCAPSW WaveCaps{};
+ if(waveOutGetDevCapsW(i, &WaveCaps, sizeof(WaveCaps)) == MMSYSERR_NOERROR)
+ {
+ const std::string basename{DEVNAME_HEAD + wstr_to_utf8(WaveCaps.szPname)};
+
+ int count{1};
+ std::string newname{basename};
+ while(checkName(PlaybackDevices, newname))
+ {
+ newname = basename;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+ dname = std::move(newname);
+
+ TRACE("Got device \"%s\", ID %u\n", dname.c_str(), i);
+ }
+ PlaybackDevices.emplace_back(std::move(dname));
+ }
+}
+
+void ProbeCaptureDevices(void)
+{
+ CaptureDevices.clear();
+
+ ALuint numdevs{waveInGetNumDevs()};
+ CaptureDevices.reserve(numdevs);
+ for(ALuint i{0};i < numdevs;i++)
+ {
+ std::string dname;
+
+ WAVEINCAPSW WaveCaps{};
+ if(waveInGetDevCapsW(i, &WaveCaps, sizeof(WaveCaps)) == MMSYSERR_NOERROR)
+ {
+ const std::string basename{DEVNAME_HEAD + wstr_to_utf8(WaveCaps.szPname)};
+
+ int count{1};
+ std::string newname{basename};
+ while(checkName(CaptureDevices, newname))
+ {
+ newname = basename;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+ dname = std::move(newname);
+
+ TRACE("Got device \"%s\", ID %u\n", dname.c_str(), i);
+ }
+ CaptureDevices.emplace_back(std::move(dname));
+ }
+}
+
+
+struct WinMMPlayback final : public BackendBase {
+ WinMMPlayback(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~WinMMPlayback() override;
+
+ static void CALLBACK waveOutProcC(HWAVEOUT device, UINT msg, DWORD_PTR instance, DWORD_PTR param1, DWORD_PTR param2);
+ void CALLBACK waveOutProc(HWAVEOUT device, UINT msg, DWORD_PTR param1, DWORD_PTR param2);
+
+ int mixerProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean reset() override;
+ ALCboolean start() override;
+ void stop() override;
+
+ std::atomic<ALuint> mWritable{0u};
+ al::semaphore mSem;
+ int mIdx{0};
+ std::array<WAVEHDR,4> mWaveBuffer{};
+
+ HWAVEOUT mOutHdl{nullptr};
+
+ WAVEFORMATEX mFormat{};
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(WinMMPlayback)
+};
+
+WinMMPlayback::~WinMMPlayback()
+{
+ if(mOutHdl)
+ waveOutClose(mOutHdl);
+ mOutHdl = nullptr;
+
+ al_free(mWaveBuffer[0].lpData);
+ std::fill(mWaveBuffer.begin(), mWaveBuffer.end(), WAVEHDR{});
+}
+
+
+void CALLBACK WinMMPlayback::waveOutProcC(HWAVEOUT device, UINT msg, DWORD_PTR instance, DWORD_PTR param1, DWORD_PTR param2)
+{ reinterpret_cast<WinMMPlayback*>(instance)->waveOutProc(device, msg, param1, param2); }
+
+/* WinMMPlayback::waveOutProc
+ *
+ * Posts a message to 'WinMMPlayback::mixerProc' everytime a WaveOut Buffer is
+ * completed and returns to the application (for more data)
+ */
+void CALLBACK WinMMPlayback::waveOutProc(HWAVEOUT, UINT msg, DWORD_PTR, DWORD_PTR)
+{
+ if(msg != WOM_DONE) return;
+ mWritable.fetch_add(1, std::memory_order_acq_rel);
+ mSem.post();
+}
+
+FORCE_ALIGN int WinMMPlayback::mixerProc()
+{
+ SetRTPriority();
+ althrd_setname(MIXER_THREAD_NAME);
+
+ lock();
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ ALsizei todo = mWritable.load(std::memory_order_acquire);
+ if(todo < 1)
+ {
+ unlock();
+ mSem.wait();
+ lock();
+ continue;
+ }
+
+ int widx{mIdx};
+ do {
+ WAVEHDR &waveHdr = mWaveBuffer[widx];
+ widx = (widx+1) % mWaveBuffer.size();
+
+ aluMixData(mDevice, waveHdr.lpData, mDevice->UpdateSize);
+ mWritable.fetch_sub(1, std::memory_order_acq_rel);
+ waveOutWrite(mOutHdl, &waveHdr, sizeof(WAVEHDR));
+ } while(--todo);
+ mIdx = widx;
+ }
+ unlock();
+
+ return 0;
+}
+
+
+ALCenum WinMMPlayback::open(const ALCchar *name)
+{
+ if(PlaybackDevices.empty())
+ ProbePlaybackDevices();
+
+ // Find the Device ID matching the deviceName if valid
+ auto iter = name ?
+ std::find(PlaybackDevices.cbegin(), PlaybackDevices.cend(), name) :
+ PlaybackDevices.cbegin();
+ if(iter == PlaybackDevices.cend()) return ALC_INVALID_VALUE;
+ auto DeviceID = static_cast<UINT>(std::distance(PlaybackDevices.cbegin(), iter));
+
+retry_open:
+ mFormat = WAVEFORMATEX{};
+ if(mDevice->FmtType == DevFmtFloat)
+ {
+ mFormat.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;
+ mFormat.wBitsPerSample = 32;
+ }
+ else
+ {
+ mFormat.wFormatTag = WAVE_FORMAT_PCM;
+ if(mDevice->FmtType == DevFmtUByte || mDevice->FmtType == DevFmtByte)
+ mFormat.wBitsPerSample = 8;
+ else
+ mFormat.wBitsPerSample = 16;
+ }
+ mFormat.nChannels = ((mDevice->FmtChans == DevFmtMono) ? 1 : 2);
+ mFormat.nBlockAlign = mFormat.wBitsPerSample * mFormat.nChannels / 8;
+ mFormat.nSamplesPerSec = mDevice->Frequency;
+ mFormat.nAvgBytesPerSec = mFormat.nSamplesPerSec * mFormat.nBlockAlign;
+ mFormat.cbSize = 0;
+
+ MMRESULT res{waveOutOpen(&mOutHdl, DeviceID, &mFormat, (DWORD_PTR)&WinMMPlayback::waveOutProcC,
+ reinterpret_cast<DWORD_PTR>(this), CALLBACK_FUNCTION)};
+ if(res != MMSYSERR_NOERROR)
+ {
+ if(mDevice->FmtType == DevFmtFloat)
+ {
+ mDevice->FmtType = DevFmtShort;
+ goto retry_open;
+ }
+ ERR("waveOutOpen failed: %u\n", res);
+ return ALC_INVALID_VALUE;
+ }
+
+ mDevice->DeviceName = PlaybackDevices[DeviceID];
+ return ALC_NO_ERROR;
+}
+
+ALCboolean WinMMPlayback::reset()
+{
+ mDevice->BufferSize = static_cast<ALuint>(uint64_t{mDevice->BufferSize} *
+ mFormat.nSamplesPerSec / mDevice->Frequency);
+ mDevice->BufferSize = (mDevice->BufferSize+3) & ~0x3;
+ mDevice->UpdateSize = mDevice->BufferSize / 4;
+ mDevice->Frequency = mFormat.nSamplesPerSec;
+
+ if(mFormat.wFormatTag == WAVE_FORMAT_IEEE_FLOAT)
+ {
+ if(mFormat.wBitsPerSample == 32)
+ mDevice->FmtType = DevFmtFloat;
+ else
+ {
+ ERR("Unhandled IEEE float sample depth: %d\n", mFormat.wBitsPerSample);
+ return ALC_FALSE;
+ }
+ }
+ else if(mFormat.wFormatTag == WAVE_FORMAT_PCM)
+ {
+ if(mFormat.wBitsPerSample == 16)
+ mDevice->FmtType = DevFmtShort;
+ else if(mFormat.wBitsPerSample == 8)
+ mDevice->FmtType = DevFmtUByte;
+ else
+ {
+ ERR("Unhandled PCM sample depth: %d\n", mFormat.wBitsPerSample);
+ return ALC_FALSE;
+ }
+ }
+ else
+ {
+ ERR("Unhandled format tag: 0x%04x\n", mFormat.wFormatTag);
+ return ALC_FALSE;
+ }
+
+ if(mFormat.nChannels == 2)
+ mDevice->FmtChans = DevFmtStereo;
+ else if(mFormat.nChannels == 1)
+ mDevice->FmtChans = DevFmtMono;
+ else
+ {
+ ERR("Unhandled channel count: %d\n", mFormat.nChannels);
+ return ALC_FALSE;
+ }
+ SetDefaultWFXChannelOrder(mDevice);
+
+ ALuint BufferSize{mDevice->UpdateSize * mDevice->frameSizeFromFmt()};
+
+ al_free(mWaveBuffer[0].lpData);
+ mWaveBuffer[0] = WAVEHDR{};
+ mWaveBuffer[0].lpData = static_cast<char*>(al_calloc(16, BufferSize * mWaveBuffer.size()));
+ mWaveBuffer[0].dwBufferLength = BufferSize;
+ for(size_t i{1};i < mWaveBuffer.size();i++)
+ {
+ mWaveBuffer[i] = WAVEHDR{};
+ mWaveBuffer[i].lpData = mWaveBuffer[i-1].lpData + mWaveBuffer[i-1].dwBufferLength;
+ mWaveBuffer[i].dwBufferLength = BufferSize;
+ }
+ mIdx = 0;
+
+ return ALC_TRUE;
+}
+
+ALCboolean WinMMPlayback::start()
+{
+ try {
+ std::for_each(mWaveBuffer.begin(), mWaveBuffer.end(),
+ [this](WAVEHDR &waveHdr) -> void
+ { waveOutPrepareHeader(mOutHdl, &waveHdr, static_cast<UINT>(sizeof(WAVEHDR))); }
+ );
+ mWritable.store(static_cast<ALuint>(mWaveBuffer.size()), std::memory_order_release);
+
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&WinMMPlayback::mixerProc), this};
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Failed to start mixing thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void WinMMPlayback::stop()
+{
+ if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
+ return;
+ mThread.join();
+
+ while(mWritable.load(std::memory_order_acquire) < mWaveBuffer.size())
+ mSem.wait();
+ std::for_each(mWaveBuffer.begin(), mWaveBuffer.end(),
+ [this](WAVEHDR &waveHdr) -> void
+ { waveOutUnprepareHeader(mOutHdl, &waveHdr, sizeof(WAVEHDR)); }
+ );
+ mWritable.store(0, std::memory_order_release);
+}
+
+
+struct WinMMCapture final : public BackendBase {
+ WinMMCapture(ALCdevice *device) noexcept : BackendBase{device} { }
+ ~WinMMCapture() override;
+
+ static void CALLBACK waveInProcC(HWAVEIN device, UINT msg, DWORD_PTR instance, DWORD_PTR param1, DWORD_PTR param2);
+ void CALLBACK waveInProc(HWAVEIN device, UINT msg, DWORD_PTR param1, DWORD_PTR param2);
+
+ int captureProc();
+
+ ALCenum open(const ALCchar *name) override;
+ ALCboolean start() override;
+ void stop() override;
+ ALCenum captureSamples(void *buffer, ALCuint samples) override;
+ ALCuint availableSamples() override;
+
+ std::atomic<ALuint> mReadable{0u};
+ al::semaphore mSem;
+ int mIdx{0};
+ std::array<WAVEHDR,4> mWaveBuffer{};
+
+ HWAVEIN mInHdl{nullptr};
+
+ RingBufferPtr mRing{nullptr};
+
+ WAVEFORMATEX mFormat{};
+
+ std::atomic<bool> mKillNow{true};
+ std::thread mThread;
+
+ DEF_NEWDEL(WinMMCapture)
+};
+
+WinMMCapture::~WinMMCapture()
+{
+ // Close the Wave device
+ if(mInHdl)
+ waveInClose(mInHdl);
+ mInHdl = nullptr;
+
+ al_free(mWaveBuffer[0].lpData);
+ std::fill(mWaveBuffer.begin(), mWaveBuffer.end(), WAVEHDR{});
+}
+
+void CALLBACK WinMMCapture::waveInProcC(HWAVEIN device, UINT msg, DWORD_PTR instance, DWORD_PTR param1, DWORD_PTR param2)
+{ reinterpret_cast<WinMMCapture*>(instance)->waveInProc(device, msg, param1, param2); }
+
+/* WinMMCapture::waveInProc
+ *
+ * Posts a message to 'WinMMCapture::captureProc' everytime a WaveIn Buffer is
+ * completed and returns to the application (with more data).
+ */
+void CALLBACK WinMMCapture::waveInProc(HWAVEIN, UINT msg, DWORD_PTR, DWORD_PTR)
+{
+ if(msg != WIM_DATA) return;
+ mReadable.fetch_add(1, std::memory_order_acq_rel);
+ mSem.post();
+}
+
+int WinMMCapture::captureProc()
+{
+ althrd_setname(RECORD_THREAD_NAME);
+
+ lock();
+ while(!mKillNow.load(std::memory_order_acquire) &&
+ mDevice->Connected.load(std::memory_order_acquire))
+ {
+ ALuint todo{mReadable.load(std::memory_order_acquire)};
+ if(todo < 1)
+ {
+ unlock();
+ mSem.wait();
+ lock();
+ continue;
+ }
+
+ int widx{mIdx};
+ do {
+ WAVEHDR &waveHdr = mWaveBuffer[widx];
+ widx = (widx+1) % mWaveBuffer.size();
+
+ mRing->write(waveHdr.lpData, waveHdr.dwBytesRecorded / mFormat.nBlockAlign);
+ mReadable.fetch_sub(1, std::memory_order_acq_rel);
+ waveInAddBuffer(mInHdl, &waveHdr, sizeof(WAVEHDR));
+ } while(--todo);
+ mIdx = widx;
+ }
+ unlock();
+
+ return 0;
+}
+
+
+ALCenum WinMMCapture::open(const ALCchar *name)
+{
+ if(CaptureDevices.empty())
+ ProbeCaptureDevices();
+
+ // Find the Device ID matching the deviceName if valid
+ auto iter = name ?
+ std::find(CaptureDevices.cbegin(), CaptureDevices.cend(), name) :
+ CaptureDevices.cbegin();
+ if(iter == CaptureDevices.cend()) return ALC_INVALID_VALUE;
+ auto DeviceID = static_cast<UINT>(std::distance(CaptureDevices.cbegin(), iter));
+
+ switch(mDevice->FmtChans)
+ {
+ case DevFmtMono:
+ case DevFmtStereo:
+ break;
+
+ case DevFmtQuad:
+ case DevFmtX51:
+ case DevFmtX51Rear:
+ case DevFmtX61:
+ case DevFmtX71:
+ case DevFmtAmbi3D:
+ return ALC_INVALID_ENUM;
+ }
+
+ switch(mDevice->FmtType)
+ {
+ case DevFmtUByte:
+ case DevFmtShort:
+ case DevFmtInt:
+ case DevFmtFloat:
+ break;
+
+ case DevFmtByte:
+ case DevFmtUShort:
+ case DevFmtUInt:
+ return ALC_INVALID_ENUM;
+ }
+
+ mFormat = WAVEFORMATEX{};
+ mFormat.wFormatTag = (mDevice->FmtType == DevFmtFloat) ?
+ WAVE_FORMAT_IEEE_FLOAT : WAVE_FORMAT_PCM;
+ mFormat.nChannels = mDevice->channelsFromFmt();
+ mFormat.wBitsPerSample = mDevice->bytesFromFmt() * 8;
+ mFormat.nBlockAlign = mFormat.wBitsPerSample * mFormat.nChannels / 8;
+ mFormat.nSamplesPerSec = mDevice->Frequency;
+ mFormat.nAvgBytesPerSec = mFormat.nSamplesPerSec * mFormat.nBlockAlign;
+ mFormat.cbSize = 0;
+
+ MMRESULT res{waveInOpen(&mInHdl, DeviceID, &mFormat, (DWORD_PTR)&WinMMCapture::waveInProcC,
+ reinterpret_cast<DWORD_PTR>(this), CALLBACK_FUNCTION)};
+ if(res != MMSYSERR_NOERROR)
+ {
+ ERR("waveInOpen failed: %u\n", res);
+ return ALC_INVALID_VALUE;
+ }
+
+ // Ensure each buffer is 50ms each
+ DWORD BufferSize{mFormat.nAvgBytesPerSec / 20u};
+ BufferSize -= (BufferSize % mFormat.nBlockAlign);
+
+ // Allocate circular memory buffer for the captured audio
+ // Make sure circular buffer is at least 100ms in size
+ ALuint CapturedDataSize{mDevice->BufferSize};
+ CapturedDataSize = static_cast<ALuint>(maxz(CapturedDataSize, BufferSize*mWaveBuffer.size()));
+
+ mRing = CreateRingBuffer(CapturedDataSize, mFormat.nBlockAlign, false);
+ if(!mRing) return ALC_INVALID_VALUE;
+
+ al_free(mWaveBuffer[0].lpData);
+ mWaveBuffer[0] = WAVEHDR{};
+ mWaveBuffer[0].lpData = static_cast<char*>(al_calloc(16, BufferSize*4));
+ mWaveBuffer[0].dwBufferLength = BufferSize;
+ for(size_t i{1};i < mWaveBuffer.size();++i)
+ {
+ mWaveBuffer[i] = WAVEHDR{};
+ mWaveBuffer[i].lpData = mWaveBuffer[i-1].lpData + mWaveBuffer[i-1].dwBufferLength;
+ mWaveBuffer[i].dwBufferLength = mWaveBuffer[i-1].dwBufferLength;
+ }
+
+ mDevice->DeviceName = CaptureDevices[DeviceID];
+ return ALC_NO_ERROR;
+}
+
+ALCboolean WinMMCapture::start()
+{
+ try {
+ for(size_t i{0};i < mWaveBuffer.size();++i)
+ {
+ waveInPrepareHeader(mInHdl, &mWaveBuffer[i], sizeof(WAVEHDR));
+ waveInAddBuffer(mInHdl, &mWaveBuffer[i], sizeof(WAVEHDR));
+ }
+
+ mKillNow.store(false, std::memory_order_release);
+ mThread = std::thread{std::mem_fn(&WinMMCapture::captureProc), this};
+
+ waveInStart(mInHdl);
+ return ALC_TRUE;
+ }
+ catch(std::exception& e) {
+ ERR("Failed to start mixing thread: %s\n", e.what());
+ }
+ catch(...) {
+ }
+ return ALC_FALSE;
+}
+
+void WinMMCapture::stop()
+{
+ waveInStop(mInHdl);
+
+ mKillNow.store(true, std::memory_order_release);
+ if(mThread.joinable())
+ {
+ mSem.post();
+ mThread.join();
+ }
+
+ waveInReset(mInHdl);
+ for(size_t i{0};i < mWaveBuffer.size();++i)
+ waveInUnprepareHeader(mInHdl, &mWaveBuffer[i], sizeof(WAVEHDR));
+
+ mReadable.store(0, std::memory_order_release);
+ mIdx = 0;
+}
+
+ALCenum WinMMCapture::captureSamples(void *buffer, ALCuint samples)
+{
+ mRing->read(buffer, samples);
+ return ALC_NO_ERROR;
+}
+
+ALCuint WinMMCapture::availableSamples()
+{ return (ALCuint)mRing->readSpace(); }
+
+} // namespace
+
+
+bool WinMMBackendFactory::init()
+{ return true; }
+
+bool WinMMBackendFactory::querySupport(BackendType type)
+{ return type == BackendType::Playback || type == BackendType::Capture; }
+
+void WinMMBackendFactory::probe(DevProbe type, std::string *outnames)
+{
+ auto add_device = [outnames](const std::string &dname) -> void
+ {
+ /* +1 to also append the null char (to ensure a null-separated list and
+ * double-null terminated list).
+ */
+ if(!dname.empty())
+ outnames->append(dname.c_str(), dname.length()+1);
+ };
+ switch(type)
+ {
+ case DevProbe::Playback:
+ ProbePlaybackDevices();
+ std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
+ break;
+
+ case DevProbe::Capture:
+ ProbeCaptureDevices();
+ std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
+ break;
+ }
+}
+
+BackendPtr WinMMBackendFactory::createBackend(ALCdevice *device, BackendType type)
+{
+ if(type == BackendType::Playback)
+ return BackendPtr{new WinMMPlayback{device}};
+ if(type == BackendType::Capture)
+ return BackendPtr{new WinMMCapture{device}};
+ return nullptr;
+}
+
+BackendFactory &WinMMBackendFactory::getFactory()
+{
+ static WinMMBackendFactory factory{};
+ return factory;
+}
diff --git a/alc/backends/winmm.h b/alc/backends/winmm.h
new file mode 100644
index 00000000..e357ec19
--- /dev/null
+++ b/alc/backends/winmm.h
@@ -0,0 +1,19 @@
+#ifndef BACKENDS_WINMM_H
+#define BACKENDS_WINMM_H
+
+#include "backends/base.h"
+
+struct WinMMBackendFactory final : public BackendFactory {
+public:
+ bool init() override;
+
+ bool querySupport(BackendType type) override;
+
+ void probe(DevProbe type, std::string *outnames) override;
+
+ BackendPtr createBackend(ALCdevice *device, BackendType type) override;
+
+ static BackendFactory &getFactory();
+};
+
+#endif /* BACKENDS_WINMM_H */
diff --git a/alc/bformatdec.cpp b/alc/bformatdec.cpp
new file mode 100644
index 00000000..889bbf3a
--- /dev/null
+++ b/alc/bformatdec.cpp
@@ -0,0 +1,200 @@
+
+#include "config.h"
+
+#include "bformatdec.h"
+
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cmath>
+#include <iterator>
+#include <numeric>
+
+#include "almalloc.h"
+#include "alu.h"
+#include "ambdec.h"
+#include "filters/splitter.h"
+#include "opthelpers.h"
+
+
+namespace {
+
+constexpr ALfloat Ambi3DDecoderHFScale[MAX_AMBI_ORDER+1] = {
+ 1.00000000e+00f, 1.00000000e+00f
+};
+constexpr ALfloat Ambi3DDecoderHFScale2O[MAX_AMBI_ORDER+1] = {
+ 7.45355990e-01f, 1.00000000e+00f
+};
+constexpr ALfloat Ambi3DDecoderHFScale3O[MAX_AMBI_ORDER+1] = {
+ 5.89792205e-01f, 8.79693856e-01f
+};
+
+inline auto GetDecoderHFScales(ALsizei order) noexcept -> const ALfloat(&)[MAX_AMBI_ORDER+1]
+{
+ if(order >= 3) return Ambi3DDecoderHFScale3O;
+ if(order == 2) return Ambi3DDecoderHFScale2O;
+ return Ambi3DDecoderHFScale;
+}
+
+inline auto GetAmbiScales(AmbDecScale scaletype) noexcept -> const std::array<float,MAX_AMBI_CHANNELS>&
+{
+ if(scaletype == AmbDecScale::FuMa) return AmbiScale::FromFuMa;
+ if(scaletype == AmbDecScale::SN3D) return AmbiScale::FromSN3D;
+ return AmbiScale::FromN3D;
+}
+
+} // namespace
+
+
+BFormatDec::BFormatDec(const AmbDecConf *conf, const bool allow_2band, const ALuint inchans,
+ const ALuint srate, const ALsizei (&chanmap)[MAX_OUTPUT_CHANNELS])
+{
+ mDualBand = allow_2band && (conf->FreqBands == 2);
+ if(!mDualBand)
+ mSamples.resize(2);
+ else
+ {
+ ASSUME(inchans > 0);
+ mSamples.resize(inchans * 2);
+ mSamplesHF = mSamples.data();
+ mSamplesLF = mSamplesHF + inchans;
+ }
+ mNumChannels = inchans;
+
+ mEnabled = std::accumulate(std::begin(chanmap), std::begin(chanmap)+conf->Speakers.size(), 0u,
+ [](ALuint mask, const ALsizei &chan) noexcept -> ALuint
+ { return mask | (1 << chan); }
+ );
+
+ const ALfloat xover_norm{conf->XOverFreq / static_cast<float>(srate)};
+
+ const bool periphonic{(conf->ChanMask&AMBI_PERIPHONIC_MASK) != 0};
+ const std::array<float,MAX_AMBI_CHANNELS> &coeff_scale = GetAmbiScales(conf->CoeffScale);
+ const size_t coeff_count{periphonic ? MAX_AMBI_CHANNELS : MAX_AMBI2D_CHANNELS};
+
+ if(!mDualBand)
+ {
+ for(size_t i{0u};i < conf->Speakers.size();i++)
+ {
+ ALfloat (&mtx)[MAX_AMBI_CHANNELS] = mMatrix.Single[chanmap[i]];
+ for(size_t j{0},k{0};j < coeff_count;j++)
+ {
+ const size_t l{periphonic ? j : AmbiIndex::From2D[j]};
+ if(!(conf->ChanMask&(1u<<l))) continue;
+ mtx[j] = conf->HFMatrix[i][k] / coeff_scale[l] *
+ ((l>=9) ? conf->HFOrderGain[3] :
+ (l>=4) ? conf->HFOrderGain[2] :
+ (l>=1) ? conf->HFOrderGain[1] : conf->HFOrderGain[0]);
+ ++k;
+ }
+ }
+ }
+ else
+ {
+ mXOver[0].init(xover_norm);
+ std::fill(std::begin(mXOver)+1, std::end(mXOver), mXOver[0]);
+
+ const float ratio{std::pow(10.0f, conf->XOverRatio / 40.0f)};
+ for(size_t i{0u};i < conf->Speakers.size();i++)
+ {
+ ALfloat (&mtx)[sNumBands][MAX_AMBI_CHANNELS] = mMatrix.Dual[chanmap[i]];
+ for(size_t j{0},k{0};j < coeff_count;j++)
+ {
+ const size_t l{periphonic ? j : AmbiIndex::From2D[j]};
+ if(!(conf->ChanMask&(1u<<l))) continue;
+ mtx[sHFBand][j] = conf->HFMatrix[i][k] / coeff_scale[l] *
+ ((l>=9) ? conf->HFOrderGain[3] :
+ (l>=4) ? conf->HFOrderGain[2] :
+ (l>=1) ? conf->HFOrderGain[1] : conf->HFOrderGain[0]) * ratio;
+ mtx[sLFBand][j] = conf->LFMatrix[i][k] / coeff_scale[l] *
+ ((l>=9) ? conf->LFOrderGain[3] :
+ (l>=4) ? conf->LFOrderGain[2] :
+ (l>=1) ? conf->LFOrderGain[1] : conf->LFOrderGain[0]) / ratio;
+ ++k;
+ }
+ }
+ }
+}
+
+BFormatDec::BFormatDec(const ALuint inchans, const ALsizei chancount,
+ const ChannelDec (&chancoeffs)[MAX_OUTPUT_CHANNELS],
+ const ALsizei (&chanmap)[MAX_OUTPUT_CHANNELS])
+{
+ mSamples.resize(2);
+ mNumChannels = inchans;
+
+ ASSUME(chancount > 0);
+ mEnabled = std::accumulate(std::begin(chanmap), std::begin(chanmap)+chancount, 0u,
+ [](ALuint mask, const ALsizei &chan) noexcept -> ALuint
+ { return mask | (1 << chan); }
+ );
+
+ const ChannelDec *incoeffs{chancoeffs};
+ auto set_coeffs = [this,inchans,&incoeffs](const ALsizei chanidx) noexcept -> void
+ {
+ ASSUME(chanidx >= 0);
+ ALfloat (&mtx)[MAX_AMBI_CHANNELS] = mMatrix.Single[chanidx];
+ const ALfloat (&coeffs)[MAX_AMBI_CHANNELS] = *(incoeffs++);
+
+ ASSUME(inchans > 0);
+ std::copy_n(std::begin(coeffs), inchans, std::begin(mtx));
+ };
+ std::for_each(chanmap, chanmap+chancount, set_coeffs);
+}
+
+
+void BFormatDec::process(const al::span<FloatBufferLine> OutBuffer,
+ const FloatBufferLine *InSamples, const ALsizei SamplesToDo)
+{
+ if(mDualBand)
+ {
+ for(ALuint i{0};i < mNumChannels;i++)
+ mXOver[i].process(mSamplesHF[i].data(), mSamplesLF[i].data(), InSamples[i].data(),
+ SamplesToDo);
+
+ const al::span<const FloatBufferLine> hfsamples{mSamplesHF, mNumChannels};
+ const al::span<const FloatBufferLine> lfsamples{mSamplesLF, mNumChannels};
+ ALfloat (*mixmtx)[sNumBands][MAX_AMBI_CHANNELS]{mMatrix.Dual};
+ ALuint enabled{mEnabled};
+ for(FloatBufferLine &outbuf : OutBuffer)
+ {
+ if(LIKELY(enabled&1))
+ {
+ MixRowSamples(outbuf, (*mixmtx)[sHFBand], hfsamples, 0, SamplesToDo);
+ MixRowSamples(outbuf, (*mixmtx)[sLFBand], lfsamples, 0, SamplesToDo);
+ }
+ ++mixmtx;
+ enabled >>= 1;
+ }
+ }
+ else
+ {
+ const al::span<const FloatBufferLine> insamples{InSamples, mNumChannels};
+ ALfloat (*mixmtx)[MAX_AMBI_CHANNELS]{mMatrix.Single};
+ ALuint enabled{mEnabled};
+ for(FloatBufferLine &outbuf : OutBuffer)
+ {
+ if(LIKELY(enabled&1))
+ MixRowSamples(outbuf, *mixmtx, insamples, 0, SamplesToDo);
+ ++mixmtx;
+ enabled >>= 1;
+ }
+ }
+}
+
+
+std::array<ALfloat,MAX_AMBI_ORDER+1> BFormatDec::GetHFOrderScales(const ALsizei in_order, const ALsizei out_order) noexcept
+{
+ std::array<ALfloat,MAX_AMBI_ORDER+1> ret{};
+
+ assert(out_order >= in_order);
+ ASSUME(out_order >= in_order);
+
+ const ALfloat (&target)[MAX_AMBI_ORDER+1] = GetDecoderHFScales(out_order);
+ const ALfloat (&input)[MAX_AMBI_ORDER+1] = GetDecoderHFScales(in_order);
+
+ for(ALsizei i{0};i < in_order+1;++i)
+ ret[i] = input[i] / target[i];
+
+ return ret;
+}
diff --git a/alc/bformatdec.h b/alc/bformatdec.h
new file mode 100644
index 00000000..06974651
--- /dev/null
+++ b/alc/bformatdec.h
@@ -0,0 +1,62 @@
+#ifndef BFORMATDEC_H
+#define BFORMATDEC_H
+
+#include <array>
+#include <cstddef>
+
+#include "AL/al.h"
+
+#include "alcmain.h"
+#include "almalloc.h"
+#include "alspan.h"
+#include "ambidefs.h"
+#include "filters/splitter.h"
+#include "vector.h"
+
+struct AmbDecConf;
+
+
+using ChannelDec = ALfloat[MAX_AMBI_CHANNELS];
+
+class BFormatDec {
+ static constexpr size_t sHFBand{0};
+ static constexpr size_t sLFBand{1};
+ static constexpr size_t sNumBands{2};
+
+ ALuint mEnabled{0u}; /* Bitfield of enabled channels. */
+
+ union MatrixU {
+ ALfloat Dual[MAX_OUTPUT_CHANNELS][sNumBands][MAX_AMBI_CHANNELS];
+ ALfloat Single[MAX_OUTPUT_CHANNELS][MAX_AMBI_CHANNELS];
+ } mMatrix{};
+
+ /* NOTE: BandSplitter filters are unused with single-band decoding */
+ BandSplitter mXOver[MAX_AMBI_CHANNELS];
+
+ al::vector<FloatBufferLine, 16> mSamples;
+ /* These two alias into Samples */
+ FloatBufferLine *mSamplesHF{nullptr};
+ FloatBufferLine *mSamplesLF{nullptr};
+
+ ALuint mNumChannels{0u};
+ bool mDualBand{false};
+
+public:
+ BFormatDec(const AmbDecConf *conf, const bool allow_2band, const ALuint inchans,
+ const ALuint srate, const ALsizei (&chanmap)[MAX_OUTPUT_CHANNELS]);
+ BFormatDec(const ALuint inchans, const ALsizei chancount,
+ const ChannelDec (&chancoeffs)[MAX_OUTPUT_CHANNELS],
+ const ALsizei (&chanmap)[MAX_OUTPUT_CHANNELS]);
+
+ /* Decodes the ambisonic input to the given output channels. */
+ void process(const al::span<FloatBufferLine> OutBuffer, const FloatBufferLine *InSamples,
+ const ALsizei SamplesToDo);
+
+ /* Retrieves per-order HF scaling factors for "upsampling" ambisonic data. */
+ static std::array<ALfloat,MAX_AMBI_ORDER+1> GetHFOrderScales(const ALsizei in_order,
+ const ALsizei out_order) noexcept;
+
+ DEF_NEWDEL(BFormatDec)
+};
+
+#endif /* BFORMATDEC_H */
diff --git a/alc/bs2b.cpp b/alc/bs2b.cpp
new file mode 100644
index 00000000..2d1b96aa
--- /dev/null
+++ b/alc/bs2b.cpp
@@ -0,0 +1,188 @@
+/*-
+ * Copyright (c) 2005 Boris Mikhaylov
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstring>
+#include <algorithm>
+
+#include "bs2b.h"
+#include "math_defs.h"
+
+
+/* Set up all data. */
+static void init(struct bs2b *bs2b)
+{
+ float Fc_lo, Fc_hi;
+ float G_lo, G_hi;
+ float x, g;
+
+ switch(bs2b->level)
+ {
+ case BS2B_LOW_CLEVEL: /* Low crossfeed level */
+ Fc_lo = 360.0f;
+ Fc_hi = 501.0f;
+ G_lo = 0.398107170553497f;
+ G_hi = 0.205671765275719f;
+ break;
+
+ case BS2B_MIDDLE_CLEVEL: /* Middle crossfeed level */
+ Fc_lo = 500.0f;
+ Fc_hi = 711.0f;
+ G_lo = 0.459726988530872f;
+ G_hi = 0.228208484414988f;
+ break;
+
+ case BS2B_HIGH_CLEVEL: /* High crossfeed level (virtual speakers are closer to itself) */
+ Fc_lo = 700.0f;
+ Fc_hi = 1021.0f;
+ G_lo = 0.530884444230988f;
+ G_hi = 0.250105790667544f;
+ break;
+
+ case BS2B_LOW_ECLEVEL: /* Low easy crossfeed level */
+ Fc_lo = 360.0f;
+ Fc_hi = 494.0f;
+ G_lo = 0.316227766016838f;
+ G_hi = 0.168236228897329f;
+ break;
+
+ case BS2B_MIDDLE_ECLEVEL: /* Middle easy crossfeed level */
+ Fc_lo = 500.0f;
+ Fc_hi = 689.0f;
+ G_lo = 0.354813389233575f;
+ G_hi = 0.187169483835901f;
+ break;
+
+ default: /* High easy crossfeed level */
+ bs2b->level = BS2B_HIGH_ECLEVEL;
+
+ Fc_lo = 700.0f;
+ Fc_hi = 975.0f;
+ G_lo = 0.398107170553497f;
+ G_hi = 0.205671765275719f;
+ break;
+ } /* switch */
+
+ g = 1.0f / (1.0f - G_hi + G_lo);
+
+ /* $fc = $Fc / $s;
+ * $d = 1 / 2 / pi / $fc;
+ * $x = exp(-1 / $d);
+ */
+ x = std::exp(-al::MathDefs<float>::Tau() * Fc_lo / bs2b->srate);
+ bs2b->b1_lo = x;
+ bs2b->a0_lo = G_lo * (1.0f - x) * g;
+
+ x = std::exp(-al::MathDefs<float>::Tau() * Fc_hi / bs2b->srate);
+ bs2b->b1_hi = x;
+ bs2b->a0_hi = (1.0f - G_hi * (1.0f - x)) * g;
+ bs2b->a1_hi = -x * g;
+} /* init */
+
+
+/* Exported functions.
+ * See descriptions in "bs2b.h"
+ */
+
+void bs2b_set_params(struct bs2b *bs2b, int level, int srate)
+{
+ if(srate <= 0) srate = 1;
+
+ bs2b->level = level;
+ bs2b->srate = srate;
+ init(bs2b);
+} /* bs2b_set_params */
+
+int bs2b_get_level(struct bs2b *bs2b)
+{
+ return bs2b->level;
+} /* bs2b_get_level */
+
+int bs2b_get_srate(struct bs2b *bs2b)
+{
+ return bs2b->srate;
+} /* bs2b_get_srate */
+
+void bs2b_clear(struct bs2b *bs2b)
+{
+ std::fill(std::begin(bs2b->last_sample), std::end(bs2b->last_sample), bs2b::t_last_sample{});
+} /* bs2b_clear */
+
+void bs2b_cross_feed(struct bs2b *bs2b, float *RESTRICT Left, float *RESTRICT Right, int SamplesToDo)
+{
+ float lsamples[128][2];
+ float rsamples[128][2];
+ int base;
+
+ for(base = 0;base < SamplesToDo;)
+ {
+ int todo = std::min(128, SamplesToDo-base);
+ int i;
+
+ /* Process left input */
+ lsamples[0][0] = bs2b->a0_lo*Left[0] +
+ bs2b->b1_lo*bs2b->last_sample[0].lo;
+ lsamples[0][1] = bs2b->a0_hi*Left[0] +
+ bs2b->a1_hi*bs2b->last_sample[0].asis +
+ bs2b->b1_hi*bs2b->last_sample[0].hi;
+ for(i = 1;i < todo;i++)
+ {
+ lsamples[i][0] = bs2b->a0_lo*Left[i] +
+ bs2b->b1_lo*lsamples[i-1][0];
+ lsamples[i][1] = bs2b->a0_hi*Left[i] +
+ bs2b->a1_hi*Left[i-1] +
+ bs2b->b1_hi*lsamples[i-1][1];
+ }
+ bs2b->last_sample[0].asis = Left[i-1];
+ bs2b->last_sample[0].lo = lsamples[i-1][0];
+ bs2b->last_sample[0].hi = lsamples[i-1][1];
+
+ /* Process right input */
+ rsamples[0][0] = bs2b->a0_lo*Right[0] +
+ bs2b->b1_lo*bs2b->last_sample[1].lo;
+ rsamples[0][1] = bs2b->a0_hi*Right[0] +
+ bs2b->a1_hi*bs2b->last_sample[1].asis +
+ bs2b->b1_hi*bs2b->last_sample[1].hi;
+ for(i = 1;i < todo;i++)
+ {
+ rsamples[i][0] = bs2b->a0_lo*Right[i] +
+ bs2b->b1_lo*rsamples[i-1][0];
+ rsamples[i][1] = bs2b->a0_hi*Right[i] +
+ bs2b->a1_hi*Right[i-1] +
+ bs2b->b1_hi*rsamples[i-1][1];
+ }
+ bs2b->last_sample[1].asis = Right[i-1];
+ bs2b->last_sample[1].lo = rsamples[i-1][0];
+ bs2b->last_sample[1].hi = rsamples[i-1][1];
+
+ /* Crossfeed */
+ for(i = 0;i < todo;i++)
+ *(Left++) = lsamples[i][1] + rsamples[i][0];
+ for(i = 0;i < todo;i++)
+ *(Right++) = rsamples[i][1] + lsamples[i][0];
+
+ base += todo;
+ }
+} /* bs2b_cross_feed */
diff --git a/alc/bs2b.h b/alc/bs2b.h
new file mode 100644
index 00000000..e235e765
--- /dev/null
+++ b/alc/bs2b.h
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (c) 2005 Boris Mikhaylov
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef BS2B_H
+#define BS2B_H
+
+#include "almalloc.h"
+
+/* Number of crossfeed levels */
+#define BS2B_CLEVELS 3
+
+/* Normal crossfeed levels */
+#define BS2B_HIGH_CLEVEL 3
+#define BS2B_MIDDLE_CLEVEL 2
+#define BS2B_LOW_CLEVEL 1
+
+/* Easy crossfeed levels */
+#define BS2B_HIGH_ECLEVEL BS2B_HIGH_CLEVEL + BS2B_CLEVELS
+#define BS2B_MIDDLE_ECLEVEL BS2B_MIDDLE_CLEVEL + BS2B_CLEVELS
+#define BS2B_LOW_ECLEVEL BS2B_LOW_CLEVEL + BS2B_CLEVELS
+
+/* Default crossfeed levels */
+#define BS2B_DEFAULT_CLEVEL BS2B_HIGH_ECLEVEL
+/* Default sample rate (Hz) */
+#define BS2B_DEFAULT_SRATE 44100
+
+struct bs2b {
+ int level; /* Crossfeed level */
+ int srate; /* Sample rate (Hz) */
+
+ /* Lowpass IIR filter coefficients */
+ float a0_lo;
+ float b1_lo;
+
+ /* Highboost IIR filter coefficients */
+ float a0_hi;
+ float a1_hi;
+ float b1_hi;
+
+ /* Buffer of last filtered sample.
+ * [0] - first channel, [1] - second channel
+ */
+ struct t_last_sample {
+ float asis;
+ float lo;
+ float hi;
+ } last_sample[2];
+
+ DEF_NEWDEL(bs2b)
+};
+
+/* Clear buffers and set new coefficients with new crossfeed level and sample
+ * rate values.
+ * level - crossfeed level of *LEVEL values.
+ * srate - sample rate by Hz.
+ */
+void bs2b_set_params(bs2b *bs2b, int level, int srate);
+
+/* Return current crossfeed level value */
+int bs2b_get_level(bs2b *bs2b);
+
+/* Return current sample rate value */
+int bs2b_get_srate(bs2b *bs2b);
+
+/* Clear buffer */
+void bs2b_clear(bs2b *bs2b);
+
+void bs2b_cross_feed(bs2b *bs2b, float *RESTRICT Left, float *RESTRICT Right, int SamplesToDo);
+
+#endif /* BS2B_H */
diff --git a/alc/compat.h b/alc/compat.h
new file mode 100644
index 00000000..4ffc40bf
--- /dev/null
+++ b/alc/compat.h
@@ -0,0 +1,121 @@
+#ifndef AL_COMPAT_H
+#define AL_COMPAT_H
+
+#ifdef __cplusplus
+
+#ifdef _WIN32
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#include <array>
+#include <string>
+#include <fstream>
+
+inline std::string wstr_to_utf8(const WCHAR *wstr)
+{
+ std::string ret;
+
+ int len = WideCharToMultiByte(CP_UTF8, 0, wstr, -1, nullptr, 0, nullptr, nullptr);
+ if(len > 0)
+ {
+ ret.resize(len);
+ WideCharToMultiByte(CP_UTF8, 0, wstr, -1, &ret[0], len, nullptr, nullptr);
+ ret.pop_back();
+ }
+
+ return ret;
+}
+
+inline std::wstring utf8_to_wstr(const char *str)
+{
+ std::wstring ret;
+
+ int len = MultiByteToWideChar(CP_UTF8, 0, str, -1, NULL, 0);
+ if(len > 0)
+ {
+ ret.resize(len);
+ MultiByteToWideChar(CP_UTF8, 0, str, -1, &ret[0], len);
+ ret.pop_back();
+ }
+
+ return ret;
+}
+
+
+namespace al {
+
+// Windows' std::ifstream fails with non-ANSI paths since the standard only
+// specifies names using const char* (or std::string). MSVC has a non-standard
+// extension using const wchar_t* (or std::wstring?) to handle Unicode paths,
+// but not all Windows compilers support it. So we have to make our own istream
+// that accepts UTF-8 paths and forwards to Unicode-aware I/O functions.
+class filebuf final : public std::streambuf {
+ std::array<char_type,4096> mBuffer;
+ HANDLE mFile{INVALID_HANDLE_VALUE};
+
+ int_type underflow() override;
+ pos_type seekoff(off_type offset, std::ios_base::seekdir whence, std::ios_base::openmode mode) override;
+ pos_type seekpos(pos_type pos, std::ios_base::openmode mode) override;
+
+public:
+ filebuf() = default;
+ ~filebuf() override;
+
+ bool open(const wchar_t *filename, std::ios_base::openmode mode);
+ bool open(const char *filename, std::ios_base::openmode mode);
+
+ bool is_open() const noexcept { return mFile != INVALID_HANDLE_VALUE; }
+};
+
+// Inherit from std::istream to use our custom streambuf
+class ifstream final : public std::istream {
+ filebuf mStreamBuf;
+
+public:
+ ifstream(const wchar_t *filename, std::ios_base::openmode mode = std::ios_base::in);
+ ifstream(const std::wstring &filename, std::ios_base::openmode mode = std::ios_base::in)
+ : ifstream(filename.c_str(), mode) { }
+ ifstream(const char *filename, std::ios_base::openmode mode = std::ios_base::in);
+ ifstream(const std::string &filename, std::ios_base::openmode mode = std::ios_base::in)
+ : ifstream(filename.c_str(), mode) { }
+ ~ifstream() override;
+
+ bool is_open() const noexcept { return mStreamBuf.is_open(); }
+};
+
+} // namespace al
+
+#define HAVE_DYNLOAD 1
+
+#else /* _WIN32 */
+
+#include <fstream>
+
+namespace al {
+
+using filebuf = std::filebuf;
+using ifstream = std::ifstream;
+
+} // namespace al
+
+#if defined(HAVE_DLFCN_H)
+#define HAVE_DYNLOAD 1
+#endif
+
+#endif /* _WIN32 */
+
+#include <string>
+
+struct PathNamePair { std::string path, fname; };
+const PathNamePair &GetProcBinary(void);
+
+#ifdef HAVE_DYNLOAD
+void *LoadLib(const char *name);
+void CloseLib(void *handle);
+void *GetSymbol(void *handle, const char *name);
+#endif
+
+#endif /* __cplusplus */
+
+#endif /* AL_COMPAT_H */
diff --git a/alc/converter.cpp b/alc/converter.cpp
new file mode 100644
index 00000000..0f8e8941
--- /dev/null
+++ b/alc/converter.cpp
@@ -0,0 +1,367 @@
+
+#include "config.h"
+
+#include "converter.h"
+
+#include <algorithm>
+
+#include "fpu_modes.h"
+#include "mixer/defs.h"
+
+
+namespace {
+
+/* Base template left undefined. Should be marked =delete, but Clang 3.8.1
+ * chokes on that given the inline specializations.
+ */
+template<DevFmtType T>
+inline ALfloat LoadSample(typename DevFmtTypeTraits<T>::Type val) noexcept;
+
+template<> inline ALfloat LoadSample<DevFmtByte>(DevFmtTypeTraits<DevFmtByte>::Type val) noexcept
+{ return val * (1.0f/128.0f); }
+template<> inline ALfloat LoadSample<DevFmtShort>(DevFmtTypeTraits<DevFmtShort>::Type val) noexcept
+{ return val * (1.0f/32768.0f); }
+template<> inline ALfloat LoadSample<DevFmtInt>(DevFmtTypeTraits<DevFmtInt>::Type val) noexcept
+{ return val * (1.0f/2147483648.0f); }
+template<> inline ALfloat LoadSample<DevFmtFloat>(DevFmtTypeTraits<DevFmtFloat>::Type val) noexcept
+{ return val; }
+
+template<> inline ALfloat LoadSample<DevFmtUByte>(DevFmtTypeTraits<DevFmtUByte>::Type val) noexcept
+{ return LoadSample<DevFmtByte>(val - 128); }
+template<> inline ALfloat LoadSample<DevFmtUShort>(DevFmtTypeTraits<DevFmtUShort>::Type val) noexcept
+{ return LoadSample<DevFmtShort>(val - 32768); }
+template<> inline ALfloat LoadSample<DevFmtUInt>(DevFmtTypeTraits<DevFmtUInt>::Type val) noexcept
+{ return LoadSample<DevFmtInt>(val - 2147483648u); }
+
+
+template<DevFmtType T>
+inline void LoadSampleArray(ALfloat *RESTRICT dst, const void *src, const size_t srcstep,
+ const ALsizei samples) noexcept
+{
+ using SampleType = typename DevFmtTypeTraits<T>::Type;
+
+ const SampleType *ssrc = static_cast<const SampleType*>(src);
+ for(ALsizei i{0};i < samples;i++)
+ dst[i] = LoadSample<T>(ssrc[i*srcstep]);
+}
+
+void LoadSamples(ALfloat *dst, const ALvoid *src, const size_t srcstep, const DevFmtType srctype,
+ const ALsizei samples) noexcept
+{
+#define HANDLE_FMT(T) \
+ case T: LoadSampleArray<T>(dst, src, srcstep, samples); break
+ switch(srctype)
+ {
+ HANDLE_FMT(DevFmtByte);
+ HANDLE_FMT(DevFmtUByte);
+ HANDLE_FMT(DevFmtShort);
+ HANDLE_FMT(DevFmtUShort);
+ HANDLE_FMT(DevFmtInt);
+ HANDLE_FMT(DevFmtUInt);
+ HANDLE_FMT(DevFmtFloat);
+ }
+#undef HANDLE_FMT
+}
+
+
+template<DevFmtType T>
+inline typename DevFmtTypeTraits<T>::Type StoreSample(ALfloat) noexcept;
+
+template<> inline ALfloat StoreSample<DevFmtFloat>(ALfloat val) noexcept
+{ return val; }
+template<> inline ALint StoreSample<DevFmtInt>(ALfloat val) noexcept
+{ return fastf2i(clampf(val*2147483648.0f, -2147483648.0f, 2147483520.0f)); }
+template<> inline ALshort StoreSample<DevFmtShort>(ALfloat val) noexcept
+{ return fastf2i(clampf(val*32768.0f, -32768.0f, 32767.0f)); }
+template<> inline ALbyte StoreSample<DevFmtByte>(ALfloat val) noexcept
+{ return fastf2i(clampf(val*128.0f, -128.0f, 127.0f)); }
+
+/* Define unsigned output variations. */
+template<> inline ALuint StoreSample<DevFmtUInt>(ALfloat val) noexcept
+{ return StoreSample<DevFmtInt>(val) + 2147483648u; }
+template<> inline ALushort StoreSample<DevFmtUShort>(ALfloat val) noexcept
+{ return StoreSample<DevFmtShort>(val) + 32768; }
+template<> inline ALubyte StoreSample<DevFmtUByte>(ALfloat val) noexcept
+{ return StoreSample<DevFmtByte>(val) + 128; }
+
+template<DevFmtType T>
+inline void StoreSampleArray(void *dst, const ALfloat *RESTRICT src, const size_t dststep,
+ const ALsizei samples) noexcept
+{
+ using SampleType = typename DevFmtTypeTraits<T>::Type;
+
+ SampleType *sdst = static_cast<SampleType*>(dst);
+ for(ALsizei i{0};i < samples;i++)
+ sdst[i*dststep] = StoreSample<T>(src[i]);
+}
+
+
+void StoreSamples(ALvoid *dst, const ALfloat *src, const size_t dststep, const DevFmtType dsttype,
+ const ALsizei samples) noexcept
+{
+#define HANDLE_FMT(T) \
+ case T: StoreSampleArray<T>(dst, src, dststep, samples); break
+ switch(dsttype)
+ {
+ HANDLE_FMT(DevFmtByte);
+ HANDLE_FMT(DevFmtUByte);
+ HANDLE_FMT(DevFmtShort);
+ HANDLE_FMT(DevFmtUShort);
+ HANDLE_FMT(DevFmtInt);
+ HANDLE_FMT(DevFmtUInt);
+ HANDLE_FMT(DevFmtFloat);
+ }
+#undef HANDLE_FMT
+}
+
+
+template<DevFmtType T>
+void Mono2Stereo(ALfloat *RESTRICT dst, const void *src, const ALsizei frames) noexcept
+{
+ using SampleType = typename DevFmtTypeTraits<T>::Type;
+
+ const SampleType *ssrc = static_cast<const SampleType*>(src);
+ for(ALsizei i{0};i < frames;i++)
+ dst[i*2 + 1] = dst[i*2 + 0] = LoadSample<T>(ssrc[i]) * 0.707106781187f;
+}
+
+template<DevFmtType T>
+void Stereo2Mono(ALfloat *RESTRICT dst, const void *src, const ALsizei frames) noexcept
+{
+ using SampleType = typename DevFmtTypeTraits<T>::Type;
+
+ const SampleType *ssrc = static_cast<const SampleType*>(src);
+ for(ALsizei i{0};i < frames;i++)
+ dst[i] = (LoadSample<T>(ssrc[i*2 + 0])+LoadSample<T>(ssrc[i*2 + 1])) *
+ 0.707106781187f;
+}
+
+} // namespace
+
+SampleConverterPtr CreateSampleConverter(DevFmtType srcType, DevFmtType dstType, ALsizei numchans,
+ ALsizei srcRate, ALsizei dstRate, Resampler resampler)
+{
+ if(numchans <= 0 || srcRate <= 0 || dstRate <= 0)
+ return nullptr;
+
+ void *ptr{al_calloc(16, SampleConverter::Sizeof(numchans))};
+ SampleConverterPtr converter{new (ptr) SampleConverter{static_cast<size_t>(numchans)}};
+ converter->mSrcType = srcType;
+ converter->mDstType = dstType;
+ converter->mSrcTypeSize = BytesFromDevFmt(srcType);
+ converter->mDstTypeSize = BytesFromDevFmt(dstType);
+
+ converter->mSrcPrepCount = 0;
+ converter->mFracOffset = 0;
+
+ /* Have to set the mixer FPU mode since that's what the resampler code expects. */
+ FPUCtl mixer_mode{};
+ auto step = static_cast<ALsizei>(
+ mind(static_cast<ALdouble>(srcRate)/dstRate*FRACTIONONE + 0.5, MAX_PITCH*FRACTIONONE));
+ converter->mIncrement = maxi(step, 1);
+ if(converter->mIncrement == FRACTIONONE)
+ converter->mResample = Resample_<CopyTag,CTag>;
+ else
+ {
+ if(resampler == BSinc24Resampler)
+ BsincPrepare(converter->mIncrement, &converter->mState.bsinc, &bsinc24);
+ else if(resampler == BSinc12Resampler)
+ BsincPrepare(converter->mIncrement, &converter->mState.bsinc, &bsinc12);
+ converter->mResample = SelectResampler(resampler);
+ }
+
+ return converter;
+}
+
+ALsizei SampleConverter::availableOut(ALsizei srcframes) const
+{
+ ALint prepcount{mSrcPrepCount};
+ if(prepcount < 0)
+ {
+ /* Negative prepcount means we need to skip that many input samples. */
+ if(-prepcount >= srcframes)
+ return 0;
+ srcframes += prepcount;
+ prepcount = 0;
+ }
+
+ if(srcframes < 1)
+ {
+ /* No output samples if there's no input samples. */
+ return 0;
+ }
+
+ if(prepcount < MAX_RESAMPLE_PADDING*2 &&
+ MAX_RESAMPLE_PADDING*2 - prepcount >= srcframes)
+ {
+ /* Not enough input samples to generate an output sample. */
+ return 0;
+ }
+
+ auto DataSize64 = static_cast<uint64_t>(prepcount);
+ DataSize64 += srcframes;
+ DataSize64 -= MAX_RESAMPLE_PADDING*2;
+ DataSize64 <<= FRACTIONBITS;
+ DataSize64 -= mFracOffset;
+
+ /* If we have a full prep, we can generate at least one sample. */
+ return static_cast<ALsizei>(clampu64((DataSize64 + mIncrement-1)/mIncrement, 1, BUFFERSIZE));
+}
+
+ALsizei SampleConverter::convert(const ALvoid **src, ALsizei *srcframes, ALvoid *dst, ALsizei dstframes)
+{
+ const ALsizei SrcFrameSize{static_cast<ALsizei>(mChan.size()) * mSrcTypeSize};
+ const ALsizei DstFrameSize{static_cast<ALsizei>(mChan.size()) * mDstTypeSize};
+ const ALsizei increment{mIncrement};
+ auto SamplesIn = static_cast<const al::byte*>(*src);
+ ALsizei NumSrcSamples{*srcframes};
+
+ FPUCtl mixer_mode{};
+ ALsizei pos{0};
+ while(pos < dstframes && NumSrcSamples > 0)
+ {
+ ALint prepcount{mSrcPrepCount};
+ if(prepcount < 0)
+ {
+ /* Negative prepcount means we need to skip that many input samples. */
+ if(-prepcount >= NumSrcSamples)
+ {
+ mSrcPrepCount = prepcount + NumSrcSamples;
+ NumSrcSamples = 0;
+ break;
+ }
+ SamplesIn += SrcFrameSize*-prepcount;
+ NumSrcSamples += prepcount;
+ mSrcPrepCount = 0;
+ continue;
+ }
+ ALint toread{mini(NumSrcSamples, BUFFERSIZE - MAX_RESAMPLE_PADDING*2)};
+
+ if(prepcount < MAX_RESAMPLE_PADDING*2 &&
+ MAX_RESAMPLE_PADDING*2 - prepcount >= toread)
+ {
+ /* Not enough input samples to generate an output sample. Store
+ * what we're given for later.
+ */
+ for(size_t chan{0u};chan < mChan.size();chan++)
+ LoadSamples(&mChan[chan].PrevSamples[prepcount], SamplesIn + mSrcTypeSize*chan,
+ mChan.size(), mSrcType, toread);
+
+ mSrcPrepCount = prepcount + toread;
+ NumSrcSamples = 0;
+ break;
+ }
+
+ ALfloat *RESTRICT SrcData{mSrcSamples};
+ ALfloat *RESTRICT DstData{mDstSamples};
+ ALsizei DataPosFrac{mFracOffset};
+ auto DataSize64 = static_cast<uint64_t>(prepcount);
+ DataSize64 += toread;
+ DataSize64 -= MAX_RESAMPLE_PADDING*2;
+ DataSize64 <<= FRACTIONBITS;
+ DataSize64 -= DataPosFrac;
+
+ /* If we have a full prep, we can generate at least one sample. */
+ auto DstSize = static_cast<ALsizei>(
+ clampu64((DataSize64 + increment-1)/increment, 1, BUFFERSIZE));
+ DstSize = mini(DstSize, dstframes-pos);
+
+ for(size_t chan{0u};chan < mChan.size();chan++)
+ {
+ const al::byte *SrcSamples{SamplesIn + mSrcTypeSize*chan};
+ al::byte *DstSamples = static_cast<al::byte*>(dst) + mDstTypeSize*chan;
+
+ /* Load the previous samples into the source data first, then the
+ * new samples from the input buffer.
+ */
+ std::copy_n(mChan[chan].PrevSamples, prepcount, SrcData);
+ LoadSamples(SrcData + prepcount, SrcSamples, mChan.size(), mSrcType, toread);
+
+ /* Store as many prep samples for next time as possible, given the
+ * number of output samples being generated.
+ */
+ ALsizei SrcDataEnd{(DstSize*increment + DataPosFrac)>>FRACTIONBITS};
+ if(SrcDataEnd >= prepcount+toread)
+ std::fill(std::begin(mChan[chan].PrevSamples),
+ std::end(mChan[chan].PrevSamples), 0.0f);
+ else
+ {
+ size_t len = mini(MAX_RESAMPLE_PADDING*2, prepcount+toread-SrcDataEnd);
+ std::copy_n(SrcData+SrcDataEnd, len, mChan[chan].PrevSamples);
+ std::fill(std::begin(mChan[chan].PrevSamples)+len,
+ std::end(mChan[chan].PrevSamples), 0.0f);
+ }
+
+ /* Now resample, and store the result in the output buffer. */
+ const ALfloat *ResampledData{mResample(&mState, SrcData+MAX_RESAMPLE_PADDING,
+ DataPosFrac, increment, DstData, DstSize)};
+
+ StoreSamples(DstSamples, ResampledData, mChan.size(), mDstType, DstSize);
+ }
+
+ /* Update the number of prep samples still available, as well as the
+ * fractional offset.
+ */
+ DataPosFrac += increment*DstSize;
+ mSrcPrepCount = mini(prepcount + toread - (DataPosFrac>>FRACTIONBITS),
+ MAX_RESAMPLE_PADDING*2);
+ mFracOffset = DataPosFrac & FRACTIONMASK;
+
+ /* Update the src and dst pointers in case there's still more to do. */
+ SamplesIn += SrcFrameSize*(DataPosFrac>>FRACTIONBITS);
+ NumSrcSamples -= mini(NumSrcSamples, (DataPosFrac>>FRACTIONBITS));
+
+ dst = static_cast<al::byte*>(dst) + DstFrameSize*DstSize;
+ pos += DstSize;
+ }
+
+ *src = SamplesIn;
+ *srcframes = NumSrcSamples;
+
+ return pos;
+}
+
+
+ChannelConverterPtr CreateChannelConverter(DevFmtType srcType, DevFmtChannels srcChans, DevFmtChannels dstChans)
+{
+ if(srcChans != dstChans && !((srcChans == DevFmtMono && dstChans == DevFmtStereo) ||
+ (srcChans == DevFmtStereo && dstChans == DevFmtMono)))
+ return nullptr;
+ return al::make_unique<ChannelConverter>(srcType, srcChans, dstChans);
+}
+
+void ChannelConverter::convert(const ALvoid *src, ALfloat *dst, ALsizei frames) const
+{
+ if(mSrcChans == DevFmtStereo && mDstChans == DevFmtMono)
+ {
+ switch(mSrcType)
+ {
+#define HANDLE_FMT(T) case T: Stereo2Mono<T>(dst, src, frames); break
+ HANDLE_FMT(DevFmtByte);
+ HANDLE_FMT(DevFmtUByte);
+ HANDLE_FMT(DevFmtShort);
+ HANDLE_FMT(DevFmtUShort);
+ HANDLE_FMT(DevFmtInt);
+ HANDLE_FMT(DevFmtUInt);
+ HANDLE_FMT(DevFmtFloat);
+#undef HANDLE_FMT
+ }
+ }
+ else if(mSrcChans == DevFmtMono && mDstChans == DevFmtStereo)
+ {
+ switch(mSrcType)
+ {
+#define HANDLE_FMT(T) case T: Mono2Stereo<T>(dst, src, frames); break
+ HANDLE_FMT(DevFmtByte);
+ HANDLE_FMT(DevFmtUByte);
+ HANDLE_FMT(DevFmtShort);
+ HANDLE_FMT(DevFmtUShort);
+ HANDLE_FMT(DevFmtInt);
+ HANDLE_FMT(DevFmtUInt);
+ HANDLE_FMT(DevFmtFloat);
+#undef HANDLE_FMT
+ }
+ }
+ else
+ LoadSamples(dst, src, 1u, mSrcType, frames*ChannelsFromDevFmt(mSrcChans, 0));
+}
diff --git a/alc/converter.h b/alc/converter.h
new file mode 100644
index 00000000..033e4d3f
--- /dev/null
+++ b/alc/converter.h
@@ -0,0 +1,70 @@
+#ifndef CONVERTER_H
+#define CONVERTER_H
+
+#include <memory>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "almalloc.h"
+
+struct SampleConverter {
+ DevFmtType mSrcType{};
+ DevFmtType mDstType{};
+ ALsizei mSrcTypeSize{};
+ ALsizei mDstTypeSize{};
+
+ ALint mSrcPrepCount{};
+
+ ALsizei mFracOffset{};
+ ALsizei mIncrement{};
+ InterpState mState{};
+ ResamplerFunc mResample{};
+
+ alignas(16) ALfloat mSrcSamples[BUFFERSIZE]{};
+ alignas(16) ALfloat mDstSamples[BUFFERSIZE]{};
+
+ struct ChanSamples {
+ alignas(16) ALfloat PrevSamples[MAX_RESAMPLE_PADDING*2];
+ };
+ al::FlexArray<ChanSamples> mChan;
+
+ SampleConverter(size_t numchans) : mChan{numchans} { }
+ SampleConverter(const SampleConverter&) = delete;
+ SampleConverter& operator=(const SampleConverter&) = delete;
+
+ ALsizei convert(const ALvoid **src, ALsizei *srcframes, ALvoid *dst, ALsizei dstframes);
+ ALsizei availableOut(ALsizei srcframes) const;
+
+ static constexpr size_t Sizeof(size_t length) noexcept
+ {
+ return maxz(sizeof(SampleConverter),
+ al::FlexArray<ChanSamples>::Sizeof(length, offsetof(SampleConverter, mChan)));
+ }
+
+ DEF_PLACE_NEWDEL()
+};
+using SampleConverterPtr = std::unique_ptr<SampleConverter>;
+
+SampleConverterPtr CreateSampleConverter(DevFmtType srcType, DevFmtType dstType, ALsizei numchans,
+ ALsizei srcRate, ALsizei dstRate, Resampler resampler);
+
+
+struct ChannelConverter {
+ DevFmtType mSrcType;
+ DevFmtChannels mSrcChans;
+ DevFmtChannels mDstChans;
+
+ ChannelConverter(DevFmtType srctype, DevFmtChannels srcchans, DevFmtChannels dstchans)
+ : mSrcType(srctype), mSrcChans(srcchans), mDstChans(dstchans)
+ { }
+
+ void convert(const ALvoid *src, ALfloat *dst, ALsizei frames) const;
+
+ DEF_NEWDEL(ChannelConverter)
+};
+using ChannelConverterPtr = std::unique_ptr<ChannelConverter>;
+
+ChannelConverterPtr CreateChannelConverter(DevFmtType srcType, DevFmtChannels srcChans,
+ DevFmtChannels dstChans);
+
+#endif /* CONVERTER_H */
diff --git a/alc/cpu_caps.h b/alc/cpu_caps.h
new file mode 100644
index 00000000..64a4ee45
--- /dev/null
+++ b/alc/cpu_caps.h
@@ -0,0 +1,16 @@
+#ifndef CPU_CAPS_H
+#define CPU_CAPS_H
+
+
+extern int CPUCapFlags;
+enum {
+ CPU_CAP_SSE = 1<<0,
+ CPU_CAP_SSE2 = 1<<1,
+ CPU_CAP_SSE3 = 1<<2,
+ CPU_CAP_SSE4_1 = 1<<3,
+ CPU_CAP_NEON = 1<<4,
+};
+
+void FillCPUCaps(int capfilter);
+
+#endif /* CPU_CAPS_H */
diff --git a/alc/effects/autowah.cpp b/alc/effects/autowah.cpp
new file mode 100644
index 00000000..96292636
--- /dev/null
+++ b/alc/effects/autowah.cpp
@@ -0,0 +1,298 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2018 by Raul Herraiz.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstdlib>
+
+#include <algorithm>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+#include "filters/biquad.h"
+#include "vecmat.h"
+
+namespace {
+
+#define MIN_FREQ 20.0f
+#define MAX_FREQ 2500.0f
+#define Q_FACTOR 5.0f
+
+struct ALautowahState final : public EffectState {
+ /* Effect parameters */
+ ALfloat mAttackRate;
+ ALfloat mReleaseRate;
+ ALfloat mResonanceGain;
+ ALfloat mPeakGain;
+ ALfloat mFreqMinNorm;
+ ALfloat mBandwidthNorm;
+ ALfloat mEnvDelay;
+
+ /* Filter components derived from the envelope. */
+ struct {
+ ALfloat cos_w0;
+ ALfloat alpha;
+ } mEnv[BUFFERSIZE];
+
+ struct {
+ /* Effect filters' history. */
+ struct {
+ ALfloat z1, z2;
+ } Filter;
+
+ /* Effect gains for each output channel */
+ ALfloat CurrentGains[MAX_OUTPUT_CHANNELS];
+ ALfloat TargetGains[MAX_OUTPUT_CHANNELS];
+ } mChans[MAX_AMBI_CHANNELS];
+
+ /* Effects buffers */
+ alignas(16) ALfloat mBufferOut[BUFFERSIZE];
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(ALautowahState)
+};
+
+ALboolean ALautowahState::deviceUpdate(const ALCdevice*)
+{
+ /* (Re-)initializing parameters and clear the buffers. */
+
+ mAttackRate = 1.0f;
+ mReleaseRate = 1.0f;
+ mResonanceGain = 10.0f;
+ mPeakGain = 4.5f;
+ mFreqMinNorm = 4.5e-4f;
+ mBandwidthNorm = 0.05f;
+ mEnvDelay = 0.0f;
+
+ for(auto &e : mEnv)
+ {
+ e.cos_w0 = 0.0f;
+ e.alpha = 0.0f;
+ }
+
+ for(auto &chan : mChans)
+ {
+ std::fill(std::begin(chan.CurrentGains), std::end(chan.CurrentGains), 0.0f);
+ chan.Filter.z1 = 0.0f;
+ chan.Filter.z2 = 0.0f;
+ }
+
+ return AL_TRUE;
+}
+
+void ALautowahState::update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ const ALCdevice *device{context->Device};
+
+ const ALfloat ReleaseTime{clampf(props->Autowah.ReleaseTime, 0.001f, 1.0f)};
+
+ mAttackRate = expf(-1.0f / (props->Autowah.AttackTime*device->Frequency));
+ mReleaseRate = expf(-1.0f / (ReleaseTime*device->Frequency));
+ /* 0-20dB Resonance Peak gain */
+ mResonanceGain = std::sqrt(std::log10(props->Autowah.Resonance)*10.0f / 3.0f);
+ mPeakGain = 1.0f - std::log10(props->Autowah.PeakGain/AL_AUTOWAH_MAX_PEAK_GAIN);
+ mFreqMinNorm = MIN_FREQ / device->Frequency;
+ mBandwidthNorm = (MAX_FREQ-MIN_FREQ) / device->Frequency;
+
+ mOutTarget = target.Main->Buffer;
+ for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
+ {
+ auto coeffs = GetAmbiIdentityRow(i);
+ ComputePanGains(target.Main, coeffs.data(), slot->Params.Gain, mChans[i].TargetGains);
+ }
+}
+
+void ALautowahState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut)
+{
+ const ALfloat attack_rate = mAttackRate;
+ const ALfloat release_rate = mReleaseRate;
+ const ALfloat res_gain = mResonanceGain;
+ const ALfloat peak_gain = mPeakGain;
+ const ALfloat freq_min = mFreqMinNorm;
+ const ALfloat bandwidth = mBandwidthNorm;
+
+ ALfloat env_delay{mEnvDelay};
+ for(ALsizei i{0};i < samplesToDo;i++)
+ {
+ ALfloat w0, sample, a;
+
+ /* Envelope follower described on the book: Audio Effects, Theory,
+ * Implementation and Application.
+ */
+ sample = peak_gain * std::fabs(samplesIn[0][i]);
+ a = (sample > env_delay) ? attack_rate : release_rate;
+ env_delay = lerp(sample, env_delay, a);
+
+ /* Calculate the cos and alpha components for this sample's filter. */
+ w0 = minf((bandwidth*env_delay + freq_min), 0.46f) * al::MathDefs<float>::Tau();
+ mEnv[i].cos_w0 = cosf(w0);
+ mEnv[i].alpha = sinf(w0)/(2.0f * Q_FACTOR);
+ }
+ mEnvDelay = env_delay;
+
+ ASSUME(numInput > 0);
+ for(ALsizei c{0};c < numInput;++c)
+ {
+ /* This effectively inlines BiquadFilter_setParams for a peaking
+ * filter and BiquadFilter_processC. The alpha and cosine components
+ * for the filter coefficients were previously calculated with the
+ * envelope. Because the filter changes for each sample, the
+ * coefficients are transient and don't need to be held.
+ */
+ ALfloat z1{mChans[c].Filter.z1};
+ ALfloat z2{mChans[c].Filter.z2};
+
+ for(ALsizei i{0};i < samplesToDo;i++)
+ {
+ const ALfloat alpha = mEnv[i].alpha;
+ const ALfloat cos_w0 = mEnv[i].cos_w0;
+ ALfloat input, output;
+ ALfloat a[3], b[3];
+
+ b[0] = 1.0f + alpha*res_gain;
+ b[1] = -2.0f * cos_w0;
+ b[2] = 1.0f - alpha*res_gain;
+ a[0] = 1.0f + alpha/res_gain;
+ a[1] = -2.0f * cos_w0;
+ a[2] = 1.0f - alpha/res_gain;
+
+ input = samplesIn[c][i];
+ output = input*(b[0]/a[0]) + z1;
+ z1 = input*(b[1]/a[0]) - output*(a[1]/a[0]) + z2;
+ z2 = input*(b[2]/a[0]) - output*(a[2]/a[0]);
+ mBufferOut[i] = output;
+ }
+ mChans[c].Filter.z1 = z1;
+ mChans[c].Filter.z2 = z2;
+
+ /* Now, mix the processed sound data to the output. */
+ MixSamples(mBufferOut, samplesOut, mChans[c].CurrentGains, mChans[c].TargetGains,
+ samplesToDo, 0, samplesToDo);
+ }
+}
+
+
+void ALautowah_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_AUTOWAH_ATTACK_TIME:
+ if(!(val >= AL_AUTOWAH_MIN_ATTACK_TIME && val <= AL_AUTOWAH_MAX_ATTACK_TIME))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Autowah attack time out of range");
+ props->Autowah.AttackTime = val;
+ break;
+
+ case AL_AUTOWAH_RELEASE_TIME:
+ if(!(val >= AL_AUTOWAH_MIN_RELEASE_TIME && val <= AL_AUTOWAH_MAX_RELEASE_TIME))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Autowah release time out of range");
+ props->Autowah.ReleaseTime = val;
+ break;
+
+ case AL_AUTOWAH_RESONANCE:
+ if(!(val >= AL_AUTOWAH_MIN_RESONANCE && val <= AL_AUTOWAH_MAX_RESONANCE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Autowah resonance out of range");
+ props->Autowah.Resonance = val;
+ break;
+
+ case AL_AUTOWAH_PEAK_GAIN:
+ if(!(val >= AL_AUTOWAH_MIN_PEAK_GAIN && val <= AL_AUTOWAH_MAX_PEAK_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Autowah peak gain out of range");
+ props->Autowah.PeakGain = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid autowah float property 0x%04x", param);
+ }
+}
+void ALautowah_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ ALautowah_setParamf(props, context, param, vals[0]); }
+
+void ALautowah_setParami(EffectProps*, ALCcontext *context, ALenum param, ALint)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid autowah integer property 0x%04x", param); }
+void ALautowah_setParamiv(EffectProps*, ALCcontext *context, ALenum param, const ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid autowah integer vector property 0x%04x", param); }
+
+void ALautowah_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_AUTOWAH_ATTACK_TIME:
+ *val = props->Autowah.AttackTime;
+ break;
+
+ case AL_AUTOWAH_RELEASE_TIME:
+ *val = props->Autowah.ReleaseTime;
+ break;
+
+ case AL_AUTOWAH_RESONANCE:
+ *val = props->Autowah.Resonance;
+ break;
+
+ case AL_AUTOWAH_PEAK_GAIN:
+ *val = props->Autowah.PeakGain;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid autowah float property 0x%04x", param);
+ }
+
+}
+void ALautowah_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ ALautowah_getParamf(props, context, param, vals); }
+
+void ALautowah_getParami(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid autowah integer property 0x%04x", param); }
+void ALautowah_getParamiv(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid autowah integer vector property 0x%04x", param); }
+
+DEFINE_ALEFFECT_VTABLE(ALautowah);
+
+
+struct AutowahStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new ALautowahState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &ALautowah_vtable; }
+};
+
+EffectProps AutowahStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Autowah.AttackTime = AL_AUTOWAH_DEFAULT_ATTACK_TIME;
+ props.Autowah.ReleaseTime = AL_AUTOWAH_DEFAULT_RELEASE_TIME;
+ props.Autowah.Resonance = AL_AUTOWAH_DEFAULT_RESONANCE;
+ props.Autowah.PeakGain = AL_AUTOWAH_DEFAULT_PEAK_GAIN;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *AutowahStateFactory_getFactory()
+{
+ static AutowahStateFactory AutowahFactory{};
+ return &AutowahFactory;
+}
diff --git a/alc/effects/base.h b/alc/effects/base.h
new file mode 100644
index 00000000..4f48de22
--- /dev/null
+++ b/alc/effects/base.h
@@ -0,0 +1,196 @@
+#ifndef EFFECTS_BASE_H
+#define EFFECTS_BASE_H
+
+#include "alcmain.h"
+#include "almalloc.h"
+#include "alspan.h"
+#include "atomic.h"
+
+
+struct ALeffectslot;
+
+
+union EffectProps {
+ struct {
+ // Shared Reverb Properties
+ ALfloat Density;
+ ALfloat Diffusion;
+ ALfloat Gain;
+ ALfloat GainHF;
+ ALfloat DecayTime;
+ ALfloat DecayHFRatio;
+ ALfloat ReflectionsGain;
+ ALfloat ReflectionsDelay;
+ ALfloat LateReverbGain;
+ ALfloat LateReverbDelay;
+ ALfloat AirAbsorptionGainHF;
+ ALfloat RoomRolloffFactor;
+ ALboolean DecayHFLimit;
+
+ // Additional EAX Reverb Properties
+ ALfloat GainLF;
+ ALfloat DecayLFRatio;
+ ALfloat ReflectionsPan[3];
+ ALfloat LateReverbPan[3];
+ ALfloat EchoTime;
+ ALfloat EchoDepth;
+ ALfloat ModulationTime;
+ ALfloat ModulationDepth;
+ ALfloat HFReference;
+ ALfloat LFReference;
+ } Reverb;
+
+ struct {
+ ALfloat AttackTime;
+ ALfloat ReleaseTime;
+ ALfloat Resonance;
+ ALfloat PeakGain;
+ } Autowah;
+
+ struct {
+ ALint Waveform;
+ ALint Phase;
+ ALfloat Rate;
+ ALfloat Depth;
+ ALfloat Feedback;
+ ALfloat Delay;
+ } Chorus; /* Also Flanger */
+
+ struct {
+ ALboolean OnOff;
+ } Compressor;
+
+ struct {
+ ALfloat Edge;
+ ALfloat Gain;
+ ALfloat LowpassCutoff;
+ ALfloat EQCenter;
+ ALfloat EQBandwidth;
+ } Distortion;
+
+ struct {
+ ALfloat Delay;
+ ALfloat LRDelay;
+
+ ALfloat Damping;
+ ALfloat Feedback;
+
+ ALfloat Spread;
+ } Echo;
+
+ struct {
+ ALfloat LowCutoff;
+ ALfloat LowGain;
+ ALfloat Mid1Center;
+ ALfloat Mid1Gain;
+ ALfloat Mid1Width;
+ ALfloat Mid2Center;
+ ALfloat Mid2Gain;
+ ALfloat Mid2Width;
+ ALfloat HighCutoff;
+ ALfloat HighGain;
+ } Equalizer;
+
+ struct {
+ ALfloat Frequency;
+ ALint LeftDirection;
+ ALint RightDirection;
+ } Fshifter;
+
+ struct {
+ ALfloat Frequency;
+ ALfloat HighPassCutoff;
+ ALint Waveform;
+ } Modulator;
+
+ struct {
+ ALint CoarseTune;
+ ALint FineTune;
+ } Pshifter;
+
+ struct {
+ ALfloat Rate;
+ ALint PhonemeA;
+ ALint PhonemeB;
+ ALint PhonemeACoarseTuning;
+ ALint PhonemeBCoarseTuning;
+ ALint Waveform;
+ } Vmorpher;
+
+ struct {
+ ALfloat Gain;
+ } Dedicated;
+};
+
+
+struct EffectVtable {
+ void (*const setParami)(EffectProps *props, ALCcontext *context, ALenum param, ALint val);
+ void (*const setParamiv)(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals);
+ void (*const setParamf)(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val);
+ void (*const setParamfv)(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals);
+
+ void (*const getParami)(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val);
+ void (*const getParamiv)(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals);
+ void (*const getParamf)(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val);
+ void (*const getParamfv)(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals);
+};
+
+#define DEFINE_ALEFFECT_VTABLE(T) \
+const EffectVtable T##_vtable = { \
+ T##_setParami, T##_setParamiv, \
+ T##_setParamf, T##_setParamfv, \
+ T##_getParami, T##_getParamiv, \
+ T##_getParamf, T##_getParamfv, \
+}
+
+
+struct EffectTarget {
+ MixParams *Main;
+ RealMixParams *RealOut;
+};
+
+struct EffectState {
+ RefCount mRef{1u};
+
+ al::span<FloatBufferLine> mOutTarget;
+
+
+ virtual ~EffectState() = default;
+
+ virtual ALboolean deviceUpdate(const ALCdevice *device) = 0;
+ virtual void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) = 0;
+ virtual void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) = 0;
+
+ void IncRef() noexcept;
+ void DecRef() noexcept;
+};
+
+
+struct EffectStateFactory {
+ virtual ~EffectStateFactory() { }
+
+ virtual EffectState *create() = 0;
+ virtual EffectProps getDefaultProps() const noexcept = 0;
+ virtual const EffectVtable *getEffectVtable() const noexcept = 0;
+};
+
+
+EffectStateFactory *NullStateFactory_getFactory(void);
+EffectStateFactory *ReverbStateFactory_getFactory(void);
+EffectStateFactory *StdReverbStateFactory_getFactory(void);
+EffectStateFactory *AutowahStateFactory_getFactory(void);
+EffectStateFactory *ChorusStateFactory_getFactory(void);
+EffectStateFactory *CompressorStateFactory_getFactory(void);
+EffectStateFactory *DistortionStateFactory_getFactory(void);
+EffectStateFactory *EchoStateFactory_getFactory(void);
+EffectStateFactory *EqualizerStateFactory_getFactory(void);
+EffectStateFactory *FlangerStateFactory_getFactory(void);
+EffectStateFactory *FshifterStateFactory_getFactory(void);
+EffectStateFactory *ModulatorStateFactory_getFactory(void);
+EffectStateFactory *PshifterStateFactory_getFactory(void);
+EffectStateFactory* VmorpherStateFactory_getFactory(void);
+
+EffectStateFactory *DedicatedStateFactory_getFactory(void);
+
+
+#endif /* EFFECTS_BASE_H */
diff --git a/alc/effects/chorus.cpp b/alc/effects/chorus.cpp
new file mode 100644
index 00000000..d475b57a
--- /dev/null
+++ b/alc/effects/chorus.cpp
@@ -0,0 +1,538 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2013 by Mike Gorchak
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <algorithm>
+#include <climits>
+#include <cmath>
+#include <cstdlib>
+#include <iterator>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/efx.h"
+
+#include "alAuxEffectSlot.h"
+#include "alcmain.h"
+#include "alError.h"
+#include "alcontext.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+#include "alspan.h"
+#include "alu.h"
+#include "ambidefs.h"
+#include "effects/base.h"
+#include "math_defs.h"
+#include "opthelpers.h"
+#include "vector.h"
+
+
+namespace {
+
+static_assert(AL_CHORUS_WAVEFORM_SINUSOID == AL_FLANGER_WAVEFORM_SINUSOID, "Chorus/Flanger waveform value mismatch");
+static_assert(AL_CHORUS_WAVEFORM_TRIANGLE == AL_FLANGER_WAVEFORM_TRIANGLE, "Chorus/Flanger waveform value mismatch");
+
+enum class WaveForm {
+ Sinusoid,
+ Triangle
+};
+
+void GetTriangleDelays(ALint *delays, const ALsizei start_offset, const ALsizei lfo_range,
+ const ALfloat lfo_scale, const ALfloat depth, const ALsizei delay, const ALsizei todo)
+{
+ ASSUME(start_offset >= 0);
+ ASSUME(lfo_range > 0);
+ ASSUME(todo > 0);
+
+ ALsizei offset{start_offset};
+ auto gen_lfo = [&offset,lfo_range,lfo_scale,depth,delay]() -> ALint
+ {
+ offset = (offset+1)%lfo_range;
+ return fastf2i((1.0f - std::abs(2.0f - lfo_scale*offset)) * depth) + delay;
+ };
+ std::generate_n(delays, todo, gen_lfo);
+}
+
+void GetSinusoidDelays(ALint *delays, const ALsizei start_offset, const ALsizei lfo_range,
+ const ALfloat lfo_scale, const ALfloat depth, const ALsizei delay, const ALsizei todo)
+{
+ ASSUME(start_offset >= 0);
+ ASSUME(lfo_range > 0);
+ ASSUME(todo > 0);
+
+ ALsizei offset{start_offset};
+ auto gen_lfo = [&offset,lfo_range,lfo_scale,depth,delay]() -> ALint
+ {
+ ASSUME(delay >= 0);
+ offset = (offset+1)%lfo_range;
+ return fastf2i(std::sin(lfo_scale*offset) * depth) + delay;
+ };
+ std::generate_n(delays, todo, gen_lfo);
+}
+
+struct ChorusState final : public EffectState {
+ al::vector<ALfloat,16> mSampleBuffer;
+ ALsizei mOffset{0};
+
+ ALsizei mLfoOffset{0};
+ ALsizei mLfoRange{1};
+ ALfloat mLfoScale{0.0f};
+ ALint mLfoDisp{0};
+
+ /* Gains for left and right sides */
+ struct {
+ ALfloat Current[MAX_OUTPUT_CHANNELS]{};
+ ALfloat Target[MAX_OUTPUT_CHANNELS]{};
+ } mGains[2];
+
+ /* effect parameters */
+ WaveForm mWaveform{};
+ ALint mDelay{0};
+ ALfloat mDepth{0.0f};
+ ALfloat mFeedback{0.0f};
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(ChorusState)
+};
+
+ALboolean ChorusState::deviceUpdate(const ALCdevice *Device)
+{
+ const ALfloat max_delay = maxf(AL_CHORUS_MAX_DELAY, AL_FLANGER_MAX_DELAY);
+ size_t maxlen;
+
+ maxlen = NextPowerOf2(float2int(max_delay*2.0f*Device->Frequency) + 1u);
+ if(maxlen <= 0) return AL_FALSE;
+
+ if(maxlen != mSampleBuffer.size())
+ {
+ mSampleBuffer.resize(maxlen);
+ mSampleBuffer.shrink_to_fit();
+ }
+
+ std::fill(mSampleBuffer.begin(), mSampleBuffer.end(), 0.0f);
+ for(auto &e : mGains)
+ {
+ std::fill(std::begin(e.Current), std::end(e.Current), 0.0f);
+ std::fill(std::begin(e.Target), std::end(e.Target), 0.0f);
+ }
+
+ return AL_TRUE;
+}
+
+void ChorusState::update(const ALCcontext *Context, const ALeffectslot *Slot, const EffectProps *props, const EffectTarget target)
+{
+ static constexpr ALsizei mindelay = MAX_RESAMPLE_PADDING << FRACTIONBITS;
+
+ switch(props->Chorus.Waveform)
+ {
+ case AL_CHORUS_WAVEFORM_TRIANGLE:
+ mWaveform = WaveForm::Triangle;
+ break;
+ case AL_CHORUS_WAVEFORM_SINUSOID:
+ mWaveform = WaveForm::Sinusoid;
+ break;
+ }
+
+ /* The LFO depth is scaled to be relative to the sample delay. Clamp the
+ * delay and depth to allow enough padding for resampling.
+ */
+ const ALCdevice *device{Context->Device};
+ const auto frequency = static_cast<ALfloat>(device->Frequency);
+ mDelay = maxi(float2int(props->Chorus.Delay*frequency*FRACTIONONE + 0.5f), mindelay);
+ mDepth = minf(props->Chorus.Depth * mDelay, static_cast<ALfloat>(mDelay - mindelay));
+
+ mFeedback = props->Chorus.Feedback;
+
+ /* Gains for left and right sides */
+ ALfloat coeffs[2][MAX_AMBI_CHANNELS];
+ CalcDirectionCoeffs({-1.0f, 0.0f, 0.0f}, 0.0f, coeffs[0]);
+ CalcDirectionCoeffs({ 1.0f, 0.0f, 0.0f}, 0.0f, coeffs[1]);
+
+ mOutTarget = target.Main->Buffer;
+ ComputePanGains(target.Main, coeffs[0], Slot->Params.Gain, mGains[0].Target);
+ ComputePanGains(target.Main, coeffs[1], Slot->Params.Gain, mGains[1].Target);
+
+ ALfloat rate{props->Chorus.Rate};
+ if(!(rate > 0.0f))
+ {
+ mLfoOffset = 0;
+ mLfoRange = 1;
+ mLfoScale = 0.0f;
+ mLfoDisp = 0;
+ }
+ else
+ {
+ /* Calculate LFO coefficient (number of samples per cycle). Limit the
+ * max range to avoid overflow when calculating the displacement.
+ */
+ ALsizei lfo_range = float2int(minf(frequency/rate + 0.5f, static_cast<ALfloat>(INT_MAX/360 - 180)));
+
+ mLfoOffset = float2int(static_cast<ALfloat>(mLfoOffset)/mLfoRange*lfo_range + 0.5f) % lfo_range;
+ mLfoRange = lfo_range;
+ switch(mWaveform)
+ {
+ case WaveForm::Triangle:
+ mLfoScale = 4.0f / mLfoRange;
+ break;
+ case WaveForm::Sinusoid:
+ mLfoScale = al::MathDefs<float>::Tau() / mLfoRange;
+ break;
+ }
+
+ /* Calculate lfo phase displacement */
+ ALint phase{props->Chorus.Phase};
+ if(phase < 0) phase = 360 + phase;
+ mLfoDisp = (mLfoRange*phase + 180) / 360;
+ }
+}
+
+void ChorusState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei /*numInput*/, const al::span<FloatBufferLine> samplesOut)
+{
+ const auto bufmask = static_cast<ALsizei>(mSampleBuffer.size()-1);
+ const ALfloat feedback{mFeedback};
+ const ALsizei avgdelay{(mDelay + (FRACTIONONE>>1)) >> FRACTIONBITS};
+ ALfloat *RESTRICT delaybuf{mSampleBuffer.data()};
+ ALsizei offset{mOffset};
+
+ for(ALsizei base{0};base < samplesToDo;)
+ {
+ const ALsizei todo = mini(256, samplesToDo-base);
+ ALint moddelays[2][256];
+ alignas(16) ALfloat temps[2][256];
+
+ if(mWaveform == WaveForm::Sinusoid)
+ {
+ GetSinusoidDelays(moddelays[0], mLfoOffset, mLfoRange, mLfoScale, mDepth, mDelay,
+ todo);
+ GetSinusoidDelays(moddelays[1], (mLfoOffset+mLfoDisp)%mLfoRange, mLfoRange, mLfoScale,
+ mDepth, mDelay, todo);
+ }
+ else /*if(mWaveform == WaveForm::Triangle)*/
+ {
+ GetTriangleDelays(moddelays[0], mLfoOffset, mLfoRange, mLfoScale, mDepth, mDelay,
+ todo);
+ GetTriangleDelays(moddelays[1], (mLfoOffset+mLfoDisp)%mLfoRange, mLfoRange, mLfoScale,
+ mDepth, mDelay, todo);
+ }
+ mLfoOffset = (mLfoOffset+todo) % mLfoRange;
+
+ for(ALsizei i{0};i < todo;i++)
+ {
+ // Feed the buffer's input first (necessary for delays < 1).
+ delaybuf[offset&bufmask] = samplesIn[0][base+i];
+
+ // Tap for the left output.
+ ALint delay{offset - (moddelays[0][i]>>FRACTIONBITS)};
+ ALfloat mu{(moddelays[0][i]&FRACTIONMASK) * (1.0f/FRACTIONONE)};
+ temps[0][i] = cubic(delaybuf[(delay+1) & bufmask], delaybuf[(delay ) & bufmask],
+ delaybuf[(delay-1) & bufmask], delaybuf[(delay-2) & bufmask],
+ mu);
+
+ // Tap for the right output.
+ delay = offset - (moddelays[1][i]>>FRACTIONBITS);
+ mu = (moddelays[1][i]&FRACTIONMASK) * (1.0f/FRACTIONONE);
+ temps[1][i] = cubic(delaybuf[(delay+1) & bufmask], delaybuf[(delay ) & bufmask],
+ delaybuf[(delay-1) & bufmask], delaybuf[(delay-2) & bufmask],
+ mu);
+
+ // Accumulate feedback from the average delay of the taps.
+ delaybuf[offset&bufmask] += delaybuf[(offset-avgdelay) & bufmask] * feedback;
+ offset++;
+ }
+
+ for(ALsizei c{0};c < 2;c++)
+ MixSamples(temps[c], samplesOut, mGains[c].Current, mGains[c].Target, samplesToDo-base,
+ base, todo);
+
+ base += todo;
+ }
+
+ mOffset = offset;
+}
+
+
+void Chorus_setParami(EffectProps *props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_CHORUS_WAVEFORM:
+ if(!(val >= AL_CHORUS_MIN_WAVEFORM && val <= AL_CHORUS_MAX_WAVEFORM))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Invalid chorus waveform");
+ props->Chorus.Waveform = val;
+ break;
+
+ case AL_CHORUS_PHASE:
+ if(!(val >= AL_CHORUS_MIN_PHASE && val <= AL_CHORUS_MAX_PHASE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Chorus phase out of range");
+ props->Chorus.Phase = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid chorus integer property 0x%04x", param);
+ }
+}
+void Chorus_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{ Chorus_setParami(props, context, param, vals[0]); }
+void Chorus_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_CHORUS_RATE:
+ if(!(val >= AL_CHORUS_MIN_RATE && val <= AL_CHORUS_MAX_RATE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Chorus rate out of range");
+ props->Chorus.Rate = val;
+ break;
+
+ case AL_CHORUS_DEPTH:
+ if(!(val >= AL_CHORUS_MIN_DEPTH && val <= AL_CHORUS_MAX_DEPTH))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Chorus depth out of range");
+ props->Chorus.Depth = val;
+ break;
+
+ case AL_CHORUS_FEEDBACK:
+ if(!(val >= AL_CHORUS_MIN_FEEDBACK && val <= AL_CHORUS_MAX_FEEDBACK))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Chorus feedback out of range");
+ props->Chorus.Feedback = val;
+ break;
+
+ case AL_CHORUS_DELAY:
+ if(!(val >= AL_CHORUS_MIN_DELAY && val <= AL_CHORUS_MAX_DELAY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Chorus delay out of range");
+ props->Chorus.Delay = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid chorus float property 0x%04x", param);
+ }
+}
+void Chorus_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Chorus_setParamf(props, context, param, vals[0]); }
+
+void Chorus_getParami(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val)
+{
+ switch(param)
+ {
+ case AL_CHORUS_WAVEFORM:
+ *val = props->Chorus.Waveform;
+ break;
+
+ case AL_CHORUS_PHASE:
+ *val = props->Chorus.Phase;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid chorus integer property 0x%04x", param);
+ }
+}
+void Chorus_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{ Chorus_getParami(props, context, param, vals); }
+void Chorus_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_CHORUS_RATE:
+ *val = props->Chorus.Rate;
+ break;
+
+ case AL_CHORUS_DEPTH:
+ *val = props->Chorus.Depth;
+ break;
+
+ case AL_CHORUS_FEEDBACK:
+ *val = props->Chorus.Feedback;
+ break;
+
+ case AL_CHORUS_DELAY:
+ *val = props->Chorus.Delay;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid chorus float property 0x%04x", param);
+ }
+}
+void Chorus_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Chorus_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Chorus);
+
+
+struct ChorusStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new ChorusState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Chorus_vtable; }
+};
+
+EffectProps ChorusStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Chorus.Waveform = AL_CHORUS_DEFAULT_WAVEFORM;
+ props.Chorus.Phase = AL_CHORUS_DEFAULT_PHASE;
+ props.Chorus.Rate = AL_CHORUS_DEFAULT_RATE;
+ props.Chorus.Depth = AL_CHORUS_DEFAULT_DEPTH;
+ props.Chorus.Feedback = AL_CHORUS_DEFAULT_FEEDBACK;
+ props.Chorus.Delay = AL_CHORUS_DEFAULT_DELAY;
+ return props;
+}
+
+
+void Flanger_setParami(EffectProps *props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_FLANGER_WAVEFORM:
+ if(!(val >= AL_FLANGER_MIN_WAVEFORM && val <= AL_FLANGER_MAX_WAVEFORM))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Invalid flanger waveform");
+ props->Chorus.Waveform = val;
+ break;
+
+ case AL_FLANGER_PHASE:
+ if(!(val >= AL_FLANGER_MIN_PHASE && val <= AL_FLANGER_MAX_PHASE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Flanger phase out of range");
+ props->Chorus.Phase = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid flanger integer property 0x%04x", param);
+ }
+}
+void Flanger_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{ Flanger_setParami(props, context, param, vals[0]); }
+void Flanger_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_FLANGER_RATE:
+ if(!(val >= AL_FLANGER_MIN_RATE && val <= AL_FLANGER_MAX_RATE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Flanger rate out of range");
+ props->Chorus.Rate = val;
+ break;
+
+ case AL_FLANGER_DEPTH:
+ if(!(val >= AL_FLANGER_MIN_DEPTH && val <= AL_FLANGER_MAX_DEPTH))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Flanger depth out of range");
+ props->Chorus.Depth = val;
+ break;
+
+ case AL_FLANGER_FEEDBACK:
+ if(!(val >= AL_FLANGER_MIN_FEEDBACK && val <= AL_FLANGER_MAX_FEEDBACK))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Flanger feedback out of range");
+ props->Chorus.Feedback = val;
+ break;
+
+ case AL_FLANGER_DELAY:
+ if(!(val >= AL_FLANGER_MIN_DELAY && val <= AL_FLANGER_MAX_DELAY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Flanger delay out of range");
+ props->Chorus.Delay = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid flanger float property 0x%04x", param);
+ }
+}
+void Flanger_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Flanger_setParamf(props, context, param, vals[0]); }
+
+void Flanger_getParami(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val)
+{
+ switch(param)
+ {
+ case AL_FLANGER_WAVEFORM:
+ *val = props->Chorus.Waveform;
+ break;
+
+ case AL_FLANGER_PHASE:
+ *val = props->Chorus.Phase;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid flanger integer property 0x%04x", param);
+ }
+}
+void Flanger_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{ Flanger_getParami(props, context, param, vals); }
+void Flanger_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_FLANGER_RATE:
+ *val = props->Chorus.Rate;
+ break;
+
+ case AL_FLANGER_DEPTH:
+ *val = props->Chorus.Depth;
+ break;
+
+ case AL_FLANGER_FEEDBACK:
+ *val = props->Chorus.Feedback;
+ break;
+
+ case AL_FLANGER_DELAY:
+ *val = props->Chorus.Delay;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid flanger float property 0x%04x", param);
+ }
+}
+void Flanger_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Flanger_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Flanger);
+
+
+/* Flanger is basically a chorus with a really short delay. They can both use
+ * the same processing functions, so piggyback flanger on the chorus functions.
+ */
+struct FlangerStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new ChorusState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Flanger_vtable; }
+};
+
+EffectProps FlangerStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Chorus.Waveform = AL_FLANGER_DEFAULT_WAVEFORM;
+ props.Chorus.Phase = AL_FLANGER_DEFAULT_PHASE;
+ props.Chorus.Rate = AL_FLANGER_DEFAULT_RATE;
+ props.Chorus.Depth = AL_FLANGER_DEFAULT_DEPTH;
+ props.Chorus.Feedback = AL_FLANGER_DEFAULT_FEEDBACK;
+ props.Chorus.Delay = AL_FLANGER_DEFAULT_DELAY;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *ChorusStateFactory_getFactory()
+{
+ static ChorusStateFactory ChorusFactory{};
+ return &ChorusFactory;
+}
+
+EffectStateFactory *FlangerStateFactory_getFactory()
+{
+ static FlangerStateFactory FlangerFactory{};
+ return &FlangerFactory;
+}
diff --git a/alc/effects/compressor.cpp b/alc/effects/compressor.cpp
new file mode 100644
index 00000000..4a487097
--- /dev/null
+++ b/alc/effects/compressor.cpp
@@ -0,0 +1,222 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2013 by Anis A. Hireche
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cstdlib>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alu.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "vecmat.h"
+
+
+namespace {
+
+#define AMP_ENVELOPE_MIN 0.5f
+#define AMP_ENVELOPE_MAX 2.0f
+
+#define ATTACK_TIME 0.1f /* 100ms to rise from min to max */
+#define RELEASE_TIME 0.2f /* 200ms to drop from max to min */
+
+
+struct CompressorState final : public EffectState {
+ /* Effect gains for each channel */
+ ALfloat mGain[MAX_AMBI_CHANNELS][MAX_OUTPUT_CHANNELS]{};
+
+ /* Effect parameters */
+ ALboolean mEnabled{AL_TRUE};
+ ALfloat mAttackMult{1.0f};
+ ALfloat mReleaseMult{1.0f};
+ ALfloat mEnvFollower{1.0f};
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(CompressorState)
+};
+
+ALboolean CompressorState::deviceUpdate(const ALCdevice *device)
+{
+ /* Number of samples to do a full attack and release (non-integer sample
+ * counts are okay).
+ */
+ const ALfloat attackCount = static_cast<ALfloat>(device->Frequency) * ATTACK_TIME;
+ const ALfloat releaseCount = static_cast<ALfloat>(device->Frequency) * RELEASE_TIME;
+
+ /* Calculate per-sample multipliers to attack and release at the desired
+ * rates.
+ */
+ mAttackMult = std::pow(AMP_ENVELOPE_MAX/AMP_ENVELOPE_MIN, 1.0f/attackCount);
+ mReleaseMult = std::pow(AMP_ENVELOPE_MIN/AMP_ENVELOPE_MAX, 1.0f/releaseCount);
+
+ return AL_TRUE;
+}
+
+void CompressorState::update(const ALCcontext*, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ mEnabled = props->Compressor.OnOff;
+
+ mOutTarget = target.Main->Buffer;
+ for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
+ {
+ auto coeffs = GetAmbiIdentityRow(i);
+ ComputePanGains(target.Main, coeffs.data(), slot->Params.Gain, mGain[i]);
+ }
+}
+
+void CompressorState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut)
+{
+ for(ALsizei base{0};base < samplesToDo;)
+ {
+ ALfloat gains[256];
+ const ALsizei td{mini(256, samplesToDo-base)};
+
+ /* Generate the per-sample gains from the signal envelope. */
+ ALfloat env{mEnvFollower};
+ if(mEnabled)
+ {
+ for(ALsizei i{0};i < td;++i)
+ {
+ /* Clamp the absolute amplitude to the defined envelope limits,
+ * then attack or release the envelope to reach it.
+ */
+ const ALfloat amplitude{clampf(std::fabs(samplesIn[0][base+i]), AMP_ENVELOPE_MIN,
+ AMP_ENVELOPE_MAX)};
+ if(amplitude > env)
+ env = minf(env*mAttackMult, amplitude);
+ else if(amplitude < env)
+ env = maxf(env*mReleaseMult, amplitude);
+
+ /* Apply the reciprocal of the envelope to normalize the volume
+ * (compress the dynamic range).
+ */
+ gains[i] = 1.0f / env;
+ }
+ }
+ else
+ {
+ /* Same as above, except the amplitude is forced to 1. This helps
+ * ensure smooth gain changes when the compressor is turned on and
+ * off.
+ */
+ for(ALsizei i{0};i < td;++i)
+ {
+ const ALfloat amplitude{1.0f};
+ if(amplitude > env)
+ env = minf(env*mAttackMult, amplitude);
+ else if(amplitude < env)
+ env = maxf(env*mReleaseMult, amplitude);
+
+ gains[i] = 1.0f / env;
+ }
+ }
+ mEnvFollower = env;
+
+ /* Now compress the signal amplitude to output. */
+ ASSUME(numInput > 0);
+ for(ALsizei j{0};j < numInput;j++)
+ {
+ const ALfloat *outgains{mGain[j]};
+ for(FloatBufferLine &output : samplesOut)
+ {
+ const ALfloat gain{*(outgains++)};
+ if(!(std::fabs(gain) > GAIN_SILENCE_THRESHOLD))
+ continue;
+
+ for(ALsizei i{0};i < td;i++)
+ output[base+i] += samplesIn[j][base+i] * gains[i] * gain;
+ }
+ }
+
+ base += td;
+ }
+}
+
+
+void Compressor_setParami(EffectProps *props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_COMPRESSOR_ONOFF:
+ if(!(val >= AL_COMPRESSOR_MIN_ONOFF && val <= AL_COMPRESSOR_MAX_ONOFF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Compressor state out of range");
+ props->Compressor.OnOff = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid compressor integer property 0x%04x",
+ param);
+ }
+}
+void Compressor_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{ Compressor_setParami(props, context, param, vals[0]); }
+void Compressor_setParamf(EffectProps*, ALCcontext *context, ALenum param, ALfloat)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid compressor float property 0x%04x", param); }
+void Compressor_setParamfv(EffectProps*, ALCcontext *context, ALenum param, const ALfloat*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid compressor float-vector property 0x%04x", param); }
+
+void Compressor_getParami(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val)
+{
+ switch(param)
+ {
+ case AL_COMPRESSOR_ONOFF:
+ *val = props->Compressor.OnOff;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid compressor integer property 0x%04x",
+ param);
+ }
+}
+void Compressor_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{ Compressor_getParami(props, context, param, vals); }
+void Compressor_getParamf(const EffectProps*, ALCcontext *context, ALenum param, ALfloat*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid compressor float property 0x%04x", param); }
+void Compressor_getParamfv(const EffectProps*, ALCcontext *context, ALenum param, ALfloat*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid compressor float-vector property 0x%04x", param); }
+
+DEFINE_ALEFFECT_VTABLE(Compressor);
+
+
+struct CompressorStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new CompressorState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Compressor_vtable; }
+};
+
+EffectProps CompressorStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Compressor.OnOff = AL_COMPRESSOR_DEFAULT_ONOFF;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *CompressorStateFactory_getFactory()
+{
+ static CompressorStateFactory CompressorFactory{};
+ return &CompressorFactory;
+}
diff --git a/alc/effects/dedicated.cpp b/alc/effects/dedicated.cpp
new file mode 100644
index 00000000..b31b3750
--- /dev/null
+++ b/alc/effects/dedicated.cpp
@@ -0,0 +1,159 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2011 by Chris Robinson.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cstdlib>
+#include <cmath>
+#include <algorithm>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+
+
+namespace {
+
+struct DedicatedState final : public EffectState {
+ ALfloat mCurrentGains[MAX_OUTPUT_CHANNELS];
+ ALfloat mTargetGains[MAX_OUTPUT_CHANNELS];
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(DedicatedState)
+};
+
+ALboolean DedicatedState::deviceUpdate(const ALCdevice*)
+{
+ std::fill(std::begin(mCurrentGains), std::end(mCurrentGains), 0.0f);
+ return AL_TRUE;
+}
+
+void DedicatedState::update(const ALCcontext*, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ std::fill(std::begin(mTargetGains), std::end(mTargetGains), 0.0f);
+
+ const ALfloat Gain{slot->Params.Gain * props->Dedicated.Gain};
+
+ if(slot->Params.EffectType == AL_EFFECT_DEDICATED_LOW_FREQUENCY_EFFECT)
+ {
+ const int idx{!target.RealOut ? -1 : GetChannelIdxByName(*target.RealOut, LFE)};
+ if(idx != -1)
+ {
+ mOutTarget = target.RealOut->Buffer;
+ mTargetGains[idx] = Gain;
+ }
+ }
+ else if(slot->Params.EffectType == AL_EFFECT_DEDICATED_DIALOGUE)
+ {
+ /* Dialog goes to the front-center speaker if it exists, otherwise it
+ * plays from the front-center location. */
+ const int idx{!target.RealOut ? -1 : GetChannelIdxByName(*target.RealOut, FrontCenter)};
+ if(idx != -1)
+ {
+ mOutTarget = target.RealOut->Buffer;
+ mTargetGains[idx] = Gain;
+ }
+ else
+ {
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ CalcDirectionCoeffs({0.0f, 0.0f, -1.0f}, 0.0f, coeffs);
+
+ mOutTarget = target.Main->Buffer;
+ ComputePanGains(target.Main, coeffs, Gain, mTargetGains);
+ }
+ }
+}
+
+void DedicatedState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei /*numInput*/, const al::span<FloatBufferLine> samplesOut)
+{
+ MixSamples(samplesIn[0].data(), samplesOut, mCurrentGains, mTargetGains, samplesToDo, 0,
+ samplesToDo);
+}
+
+
+void Dedicated_setParami(EffectProps*, ALCcontext *context, ALenum param, ALint)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid dedicated integer property 0x%04x", param); }
+void Dedicated_setParamiv(EffectProps*, ALCcontext *context, ALenum param, const ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid dedicated integer-vector property 0x%04x", param); }
+void Dedicated_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_DEDICATED_GAIN:
+ if(!(val >= 0.0f && std::isfinite(val)))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Dedicated gain out of range");
+ props->Dedicated.Gain = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid dedicated float property 0x%04x", param);
+ }
+}
+void Dedicated_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Dedicated_setParamf(props, context, param, vals[0]); }
+
+void Dedicated_getParami(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid dedicated integer property 0x%04x", param); }
+void Dedicated_getParamiv(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid dedicated integer-vector property 0x%04x", param); }
+void Dedicated_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_DEDICATED_GAIN:
+ *val = props->Dedicated.Gain;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid dedicated float property 0x%04x", param);
+ }
+}
+void Dedicated_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Dedicated_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Dedicated);
+
+
+struct DedicatedStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new DedicatedState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Dedicated_vtable; }
+};
+
+EffectProps DedicatedStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Dedicated.Gain = 1.0f;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *DedicatedStateFactory_getFactory()
+{
+ static DedicatedStateFactory DedicatedFactory{};
+ return &DedicatedFactory;
+}
diff --git a/alc/effects/distortion.cpp b/alc/effects/distortion.cpp
new file mode 100644
index 00000000..59557395
--- /dev/null
+++ b/alc/effects/distortion.cpp
@@ -0,0 +1,269 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2013 by Mike Gorchak
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstdlib>
+
+#include <cmath>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+#include "filters/biquad.h"
+
+
+namespace {
+
+struct DistortionState final : public EffectState {
+ /* Effect gains for each channel */
+ ALfloat mGain[MAX_OUTPUT_CHANNELS]{};
+
+ /* Effect parameters */
+ BiquadFilter mLowpass;
+ BiquadFilter mBandpass;
+ ALfloat mAttenuation{};
+ ALfloat mEdgeCoeff{};
+
+ ALfloat mBuffer[2][BUFFERSIZE]{};
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(DistortionState)
+};
+
+ALboolean DistortionState::deviceUpdate(const ALCdevice*)
+{
+ mLowpass.clear();
+ mBandpass.clear();
+ return AL_TRUE;
+}
+
+void DistortionState::update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ const ALCdevice *device{context->Device};
+
+ /* Store waveshaper edge settings. */
+ const ALfloat edge{
+ minf(std::sin(al::MathDefs<float>::Pi()*0.5f * props->Distortion.Edge), 0.99f)};
+ mEdgeCoeff = 2.0f * edge / (1.0f-edge);
+
+ ALfloat cutoff{props->Distortion.LowpassCutoff};
+ /* Bandwidth value is constant in octaves. */
+ ALfloat bandwidth{(cutoff / 2.0f) / (cutoff * 0.67f)};
+ /* Multiply sampling frequency by the amount of oversampling done during
+ * processing.
+ */
+ auto frequency = static_cast<ALfloat>(device->Frequency);
+ mLowpass.setParams(BiquadType::LowPass, 1.0f, cutoff / (frequency*4.0f),
+ mLowpass.rcpQFromBandwidth(cutoff / (frequency*4.0f), bandwidth));
+
+ cutoff = props->Distortion.EQCenter;
+ /* Convert bandwidth in Hz to octaves. */
+ bandwidth = props->Distortion.EQBandwidth / (cutoff * 0.67f);
+ mBandpass.setParams(BiquadType::BandPass, 1.0f, cutoff / (frequency*4.0f),
+ mBandpass.rcpQFromBandwidth(cutoff / (frequency*4.0f), bandwidth));
+
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ CalcDirectionCoeffs({0.0f, 0.0f, -1.0f}, 0.0f, coeffs);
+
+ mOutTarget = target.Main->Buffer;
+ ComputePanGains(target.Main, coeffs, slot->Params.Gain*props->Distortion.Gain, mGain);
+}
+
+void DistortionState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei /*numInput*/, const al::span<FloatBufferLine> samplesOut)
+{
+ const ALfloat fc{mEdgeCoeff};
+ for(ALsizei base{0};base < samplesToDo;)
+ {
+ /* Perform 4x oversampling to avoid aliasing. Oversampling greatly
+ * improves distortion quality and allows to implement lowpass and
+ * bandpass filters using high frequencies, at which classic IIR
+ * filters became unstable.
+ */
+ ALsizei todo{mini(BUFFERSIZE, (samplesToDo-base) * 4)};
+
+ /* Fill oversample buffer using zero stuffing. Multiply the sample by
+ * the amount of oversampling to maintain the signal's power.
+ */
+ for(ALsizei i{0};i < todo;i++)
+ mBuffer[0][i] = !(i&3) ? samplesIn[0][(i>>2)+base] * 4.0f : 0.0f;
+
+ /* First step, do lowpass filtering of original signal. Additionally
+ * perform buffer interpolation and lowpass cutoff for oversampling
+ * (which is fortunately first step of distortion). So combine three
+ * operations into the one.
+ */
+ mLowpass.process(mBuffer[1], mBuffer[0], todo);
+
+ /* Second step, do distortion using waveshaper function to emulate
+ * signal processing during tube overdriving. Three steps of
+ * waveshaping are intended to modify waveform without boost/clipping/
+ * attenuation process.
+ */
+ for(ALsizei i{0};i < todo;i++)
+ {
+ ALfloat smp{mBuffer[1][i]};
+
+ smp = (1.0f + fc) * smp/(1.0f + fc*fabsf(smp));
+ smp = (1.0f + fc) * smp/(1.0f + fc*fabsf(smp)) * -1.0f;
+ smp = (1.0f + fc) * smp/(1.0f + fc*fabsf(smp));
+
+ mBuffer[0][i] = smp;
+ }
+
+ /* Third step, do bandpass filtering of distorted signal. */
+ mBandpass.process(mBuffer[1], mBuffer[0], todo);
+
+ todo >>= 2;
+ const ALfloat *outgains{mGain};
+ for(FloatBufferLine &output : samplesOut)
+ {
+ /* Fourth step, final, do attenuation and perform decimation,
+ * storing only one sample out of four.
+ */
+ const ALfloat gain{*(outgains++)};
+ if(!(std::fabs(gain) > GAIN_SILENCE_THRESHOLD))
+ continue;
+
+ for(ALsizei i{0};i < todo;i++)
+ output[base+i] += gain * mBuffer[1][i*4];
+ }
+
+ base += todo;
+ }
+}
+
+
+void Distortion_setParami(EffectProps*, ALCcontext *context, ALenum param, ALint)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid distortion integer property 0x%04x", param); }
+void Distortion_setParamiv(EffectProps*, ALCcontext *context, ALenum param, const ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid distortion integer-vector property 0x%04x", param); }
+void Distortion_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_DISTORTION_EDGE:
+ if(!(val >= AL_DISTORTION_MIN_EDGE && val <= AL_DISTORTION_MAX_EDGE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Distortion edge out of range");
+ props->Distortion.Edge = val;
+ break;
+
+ case AL_DISTORTION_GAIN:
+ if(!(val >= AL_DISTORTION_MIN_GAIN && val <= AL_DISTORTION_MAX_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Distortion gain out of range");
+ props->Distortion.Gain = val;
+ break;
+
+ case AL_DISTORTION_LOWPASS_CUTOFF:
+ if(!(val >= AL_DISTORTION_MIN_LOWPASS_CUTOFF && val <= AL_DISTORTION_MAX_LOWPASS_CUTOFF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Distortion low-pass cutoff out of range");
+ props->Distortion.LowpassCutoff = val;
+ break;
+
+ case AL_DISTORTION_EQCENTER:
+ if(!(val >= AL_DISTORTION_MIN_EQCENTER && val <= AL_DISTORTION_MAX_EQCENTER))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Distortion EQ center out of range");
+ props->Distortion.EQCenter = val;
+ break;
+
+ case AL_DISTORTION_EQBANDWIDTH:
+ if(!(val >= AL_DISTORTION_MIN_EQBANDWIDTH && val <= AL_DISTORTION_MAX_EQBANDWIDTH))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Distortion EQ bandwidth out of range");
+ props->Distortion.EQBandwidth = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid distortion float property 0x%04x",
+ param);
+ }
+}
+void Distortion_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Distortion_setParamf(props, context, param, vals[0]); }
+
+void Distortion_getParami(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid distortion integer property 0x%04x", param); }
+void Distortion_getParamiv(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid distortion integer-vector property 0x%04x", param); }
+void Distortion_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_DISTORTION_EDGE:
+ *val = props->Distortion.Edge;
+ break;
+
+ case AL_DISTORTION_GAIN:
+ *val = props->Distortion.Gain;
+ break;
+
+ case AL_DISTORTION_LOWPASS_CUTOFF:
+ *val = props->Distortion.LowpassCutoff;
+ break;
+
+ case AL_DISTORTION_EQCENTER:
+ *val = props->Distortion.EQCenter;
+ break;
+
+ case AL_DISTORTION_EQBANDWIDTH:
+ *val = props->Distortion.EQBandwidth;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid distortion float property 0x%04x",
+ param);
+ }
+}
+void Distortion_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Distortion_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Distortion);
+
+
+struct DistortionStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new DistortionState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Distortion_vtable; }
+};
+
+EffectProps DistortionStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Distortion.Edge = AL_DISTORTION_DEFAULT_EDGE;
+ props.Distortion.Gain = AL_DISTORTION_DEFAULT_GAIN;
+ props.Distortion.LowpassCutoff = AL_DISTORTION_DEFAULT_LOWPASS_CUTOFF;
+ props.Distortion.EQCenter = AL_DISTORTION_DEFAULT_EQCENTER;
+ props.Distortion.EQBandwidth = AL_DISTORTION_DEFAULT_EQBANDWIDTH;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *DistortionStateFactory_getFactory()
+{
+ static DistortionStateFactory DistortionFactory{};
+ return &DistortionFactory;
+}
diff --git a/alc/effects/echo.cpp b/alc/effects/echo.cpp
new file mode 100644
index 00000000..c10f2eb2
--- /dev/null
+++ b/alc/effects/echo.cpp
@@ -0,0 +1,271 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2009 by Chris Robinson.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstdlib>
+
+#include <algorithm>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alFilter.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+#include "filters/biquad.h"
+#include "vector.h"
+
+
+namespace {
+
+struct EchoState final : public EffectState {
+ al::vector<ALfloat,16> mSampleBuffer;
+
+ // The echo is two tap. The delay is the number of samples from before the
+ // current offset
+ struct {
+ ALsizei delay{0};
+ } mTap[2];
+ ALsizei mOffset{0};
+
+ /* The panning gains for the two taps */
+ struct {
+ ALfloat Current[MAX_OUTPUT_CHANNELS]{};
+ ALfloat Target[MAX_OUTPUT_CHANNELS]{};
+ } mGains[2];
+
+ BiquadFilter mFilter;
+ ALfloat mFeedGain{0.0f};
+
+ alignas(16) ALfloat mTempBuffer[2][BUFFERSIZE];
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(EchoState)
+};
+
+ALboolean EchoState::deviceUpdate(const ALCdevice *Device)
+{
+ ALuint maxlen;
+
+ // Use the next power of 2 for the buffer length, so the tap offsets can be
+ // wrapped using a mask instead of a modulo
+ maxlen = float2int(AL_ECHO_MAX_DELAY*Device->Frequency + 0.5f) +
+ float2int(AL_ECHO_MAX_LRDELAY*Device->Frequency + 0.5f);
+ maxlen = NextPowerOf2(maxlen);
+ if(maxlen <= 0) return AL_FALSE;
+
+ if(maxlen != mSampleBuffer.size())
+ {
+ mSampleBuffer.resize(maxlen);
+ mSampleBuffer.shrink_to_fit();
+ }
+
+ std::fill(mSampleBuffer.begin(), mSampleBuffer.end(), 0.0f);
+ for(auto &e : mGains)
+ {
+ std::fill(std::begin(e.Current), std::end(e.Current), 0.0f);
+ std::fill(std::begin(e.Target), std::end(e.Target), 0.0f);
+ }
+
+ return AL_TRUE;
+}
+
+void EchoState::update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ const ALCdevice *device = context->Device;
+ const auto frequency = static_cast<ALfloat>(device->Frequency);
+
+ mTap[0].delay = maxi(float2int(props->Echo.Delay*frequency + 0.5f), 1);
+ mTap[1].delay = float2int(props->Echo.LRDelay*frequency + 0.5f) + mTap[0].delay;
+
+ const ALfloat gainhf{maxf(1.0f - props->Echo.Damping, 0.0625f)}; /* Limit -24dB */
+ mFilter.setParams(BiquadType::HighShelf, gainhf, LOWPASSFREQREF/frequency,
+ mFilter.rcpQFromSlope(gainhf, 1.0f));
+
+ mFeedGain = props->Echo.Feedback;
+
+ /* Convert echo spread (where 0 = center, +/-1 = sides) to angle. */
+ const ALfloat angle{std::asin(props->Echo.Spread)};
+
+ ALfloat coeffs[2][MAX_AMBI_CHANNELS];
+ CalcAngleCoeffs(-angle, 0.0f, 0.0f, coeffs[0]);
+ CalcAngleCoeffs( angle, 0.0f, 0.0f, coeffs[1]);
+
+ mOutTarget = target.Main->Buffer;
+ ComputePanGains(target.Main, coeffs[0], slot->Params.Gain, mGains[0].Target);
+ ComputePanGains(target.Main, coeffs[1], slot->Params.Gain, mGains[1].Target);
+}
+
+void EchoState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei /*numInput*/, const al::span<FloatBufferLine> samplesOut)
+{
+ const auto mask = static_cast<ALsizei>(mSampleBuffer.size()-1);
+ ALfloat *RESTRICT delaybuf{mSampleBuffer.data()};
+ ALsizei offset{mOffset};
+ ALsizei tap1{offset - mTap[0].delay};
+ ALsizei tap2{offset - mTap[1].delay};
+ ALfloat z1, z2;
+
+ ASSUME(samplesToDo > 0);
+ ASSUME(mask > 0);
+
+ std::tie(z1, z2) = mFilter.getComponents();
+ for(ALsizei i{0};i < samplesToDo;)
+ {
+ offset &= mask;
+ tap1 &= mask;
+ tap2 &= mask;
+
+ ALsizei td{mini(mask+1 - maxi(offset, maxi(tap1, tap2)), samplesToDo-i)};
+ do {
+ /* Feed the delay buffer's input first. */
+ delaybuf[offset] = samplesIn[0][i];
+
+ /* Get delayed output from the first and second taps. Use the
+ * second tap for feedback.
+ */
+ mTempBuffer[0][i] = delaybuf[tap1++];
+ mTempBuffer[1][i] = delaybuf[tap2++];
+ const float feedb{mTempBuffer[1][i++]};
+
+ /* Add feedback to the delay buffer with damping and attenuation. */
+ delaybuf[offset++] += mFilter.processOne(feedb, z1, z2) * mFeedGain;
+ } while(--td);
+ }
+ mFilter.setComponents(z1, z2);
+ mOffset = offset;
+
+ for(ALsizei c{0};c < 2;c++)
+ MixSamples(mTempBuffer[c], samplesOut, mGains[c].Current, mGains[c].Target, samplesToDo, 0,
+ samplesToDo);
+}
+
+
+void Echo_setParami(EffectProps*, ALCcontext *context, ALenum param, ALint)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid echo integer property 0x%04x", param); }
+void Echo_setParamiv(EffectProps*, ALCcontext *context, ALenum param, const ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid echo integer-vector property 0x%04x", param); }
+void Echo_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_ECHO_DELAY:
+ if(!(val >= AL_ECHO_MIN_DELAY && val <= AL_ECHO_MAX_DELAY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Echo delay out of range");
+ props->Echo.Delay = val;
+ break;
+
+ case AL_ECHO_LRDELAY:
+ if(!(val >= AL_ECHO_MIN_LRDELAY && val <= AL_ECHO_MAX_LRDELAY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Echo LR delay out of range");
+ props->Echo.LRDelay = val;
+ break;
+
+ case AL_ECHO_DAMPING:
+ if(!(val >= AL_ECHO_MIN_DAMPING && val <= AL_ECHO_MAX_DAMPING))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Echo damping out of range");
+ props->Echo.Damping = val;
+ break;
+
+ case AL_ECHO_FEEDBACK:
+ if(!(val >= AL_ECHO_MIN_FEEDBACK && val <= AL_ECHO_MAX_FEEDBACK))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Echo feedback out of range");
+ props->Echo.Feedback = val;
+ break;
+
+ case AL_ECHO_SPREAD:
+ if(!(val >= AL_ECHO_MIN_SPREAD && val <= AL_ECHO_MAX_SPREAD))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Echo spread out of range");
+ props->Echo.Spread = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid echo float property 0x%04x", param);
+ }
+}
+void Echo_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Echo_setParamf(props, context, param, vals[0]); }
+
+void Echo_getParami(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid echo integer property 0x%04x", param); }
+void Echo_getParamiv(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid echo integer-vector property 0x%04x", param); }
+void Echo_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_ECHO_DELAY:
+ *val = props->Echo.Delay;
+ break;
+
+ case AL_ECHO_LRDELAY:
+ *val = props->Echo.LRDelay;
+ break;
+
+ case AL_ECHO_DAMPING:
+ *val = props->Echo.Damping;
+ break;
+
+ case AL_ECHO_FEEDBACK:
+ *val = props->Echo.Feedback;
+ break;
+
+ case AL_ECHO_SPREAD:
+ *val = props->Echo.Spread;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid echo float property 0x%04x", param);
+ }
+}
+void Echo_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Echo_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Echo);
+
+
+struct EchoStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new EchoState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Echo_vtable; }
+};
+
+EffectProps EchoStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Echo.Delay = AL_ECHO_DEFAULT_DELAY;
+ props.Echo.LRDelay = AL_ECHO_DEFAULT_LRDELAY;
+ props.Echo.Damping = AL_ECHO_DEFAULT_DAMPING;
+ props.Echo.Feedback = AL_ECHO_DEFAULT_FEEDBACK;
+ props.Echo.Spread = AL_ECHO_DEFAULT_SPREAD;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *EchoStateFactory_getFactory()
+{
+ static EchoStateFactory EchoFactory{};
+ return &EchoFactory;
+}
diff --git a/alc/effects/equalizer.cpp b/alc/effects/equalizer.cpp
new file mode 100644
index 00000000..69ab5021
--- /dev/null
+++ b/alc/effects/equalizer.cpp
@@ -0,0 +1,337 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2013 by Mike Gorchak
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstdlib>
+
+#include <algorithm>
+#include <functional>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+#include "filters/biquad.h"
+#include "vecmat.h"
+
+
+namespace {
+
+/* The document "Effects Extension Guide.pdf" says that low and high *
+ * frequencies are cutoff frequencies. This is not fully correct, they *
+ * are corner frequencies for low and high shelf filters. If they were *
+ * just cutoff frequencies, there would be no need in cutoff frequency *
+ * gains, which are present. Documentation for "Creative Proteus X2" *
+ * software describes 4-band equalizer functionality in a much better *
+ * way. This equalizer seems to be a predecessor of OpenAL 4-band *
+ * equalizer. With low and high shelf filters we are able to cutoff *
+ * frequencies below and/or above corner frequencies using attenuation *
+ * gains (below 1.0) and amplify all low and/or high frequencies using *
+ * gains above 1.0. *
+ * *
+ * Low-shelf Low Mid Band High Mid Band High-shelf *
+ * corner center center corner *
+ * frequency frequency frequency frequency *
+ * 50Hz..800Hz 200Hz..3000Hz 1000Hz..8000Hz 4000Hz..16000Hz *
+ * *
+ * | | | | *
+ * | | | | *
+ * B -----+ /--+--\ /--+--\ +----- *
+ * O |\ | | | | | | /| *
+ * O | \ - | - - | - / | *
+ * S + | \ | | | | | | / | *
+ * T | | | | | | | | | | *
+ * ---------+---------------+------------------+---------------+-------- *
+ * C | | | | | | | | | | *
+ * U - | / | | | | | | \ | *
+ * T | / - | - - | - \ | *
+ * O |/ | | | | | | \| *
+ * F -----+ \--+--/ \--+--/ +----- *
+ * F | | | | *
+ * | | | | *
+ * *
+ * Gains vary from 0.126 up to 7.943, which means from -18dB attenuation *
+ * up to +18dB amplification. Band width varies from 0.01 up to 1.0 in *
+ * octaves for two mid bands. *
+ * *
+ * Implementation is based on the "Cookbook formulae for audio EQ biquad *
+ * filter coefficients" by Robert Bristow-Johnson *
+ * http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt */
+
+
+struct EqualizerState final : public EffectState {
+ struct {
+ /* Effect parameters */
+ BiquadFilter filter[4];
+
+ /* Effect gains for each channel */
+ ALfloat CurrentGains[MAX_OUTPUT_CHANNELS]{};
+ ALfloat TargetGains[MAX_OUTPUT_CHANNELS]{};
+ } mChans[MAX_AMBI_CHANNELS];
+
+ ALfloat mSampleBuffer[BUFFERSIZE]{};
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(EqualizerState)
+};
+
+ALboolean EqualizerState::deviceUpdate(const ALCdevice*)
+{
+ for(auto &e : mChans)
+ {
+ std::for_each(std::begin(e.filter), std::end(e.filter),
+ std::mem_fn(&BiquadFilter::clear));
+ std::fill(std::begin(e.CurrentGains), std::end(e.CurrentGains), 0.0f);
+ }
+ return AL_TRUE;
+}
+
+void EqualizerState::update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ const ALCdevice *device = context->Device;
+ auto frequency = static_cast<ALfloat>(device->Frequency);
+ ALfloat gain, f0norm;
+
+ /* Calculate coefficients for the each type of filter. Note that the shelf
+ * filters' gain is for the reference frequency, which is the centerpoint
+ * of the transition band.
+ */
+ gain = maxf(sqrtf(props->Equalizer.LowGain), 0.0625f); /* Limit -24dB */
+ f0norm = props->Equalizer.LowCutoff/frequency;
+ mChans[0].filter[0].setParams(BiquadType::LowShelf, gain, f0norm,
+ BiquadFilter::rcpQFromSlope(gain, 0.75f));
+
+ gain = maxf(props->Equalizer.Mid1Gain, 0.0625f);
+ f0norm = props->Equalizer.Mid1Center/frequency;
+ mChans[0].filter[1].setParams(BiquadType::Peaking, gain, f0norm,
+ BiquadFilter::rcpQFromBandwidth(f0norm, props->Equalizer.Mid1Width));
+
+ gain = maxf(props->Equalizer.Mid2Gain, 0.0625f);
+ f0norm = props->Equalizer.Mid2Center/frequency;
+ mChans[0].filter[2].setParams(BiquadType::Peaking, gain, f0norm,
+ BiquadFilter::rcpQFromBandwidth(f0norm, props->Equalizer.Mid2Width));
+
+ gain = maxf(sqrtf(props->Equalizer.HighGain), 0.0625f);
+ f0norm = props->Equalizer.HighCutoff/frequency;
+ mChans[0].filter[3].setParams(BiquadType::HighShelf, gain, f0norm,
+ BiquadFilter::rcpQFromSlope(gain, 0.75f));
+
+ /* Copy the filter coefficients for the other input channels. */
+ for(size_t i{1u};i < slot->Wet.Buffer.size();++i)
+ {
+ mChans[i].filter[0].copyParamsFrom(mChans[0].filter[0]);
+ mChans[i].filter[1].copyParamsFrom(mChans[0].filter[1]);
+ mChans[i].filter[2].copyParamsFrom(mChans[0].filter[2]);
+ mChans[i].filter[3].copyParamsFrom(mChans[0].filter[3]);
+ }
+
+ mOutTarget = target.Main->Buffer;
+ for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
+ {
+ auto coeffs = GetAmbiIdentityRow(i);
+ ComputePanGains(target.Main, coeffs.data(), slot->Params.Gain, mChans[i].TargetGains);
+ }
+}
+
+void EqualizerState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut)
+{
+ ASSUME(numInput > 0);
+ for(ALsizei c{0};c < numInput;c++)
+ {
+ mChans[c].filter[0].process(mSampleBuffer, samplesIn[c].data(), samplesToDo);
+ mChans[c].filter[1].process(mSampleBuffer, mSampleBuffer, samplesToDo);
+ mChans[c].filter[2].process(mSampleBuffer, mSampleBuffer, samplesToDo);
+ mChans[c].filter[3].process(mSampleBuffer, mSampleBuffer, samplesToDo);
+
+ MixSamples(mSampleBuffer, samplesOut, mChans[c].CurrentGains, mChans[c].TargetGains,
+ samplesToDo, 0, samplesToDo);
+ }
+}
+
+
+void Equalizer_setParami(EffectProps*, ALCcontext *context, ALenum param, ALint)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid equalizer integer property 0x%04x", param); }
+void Equalizer_setParamiv(EffectProps*, ALCcontext *context, ALenum param, const ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid equalizer integer-vector property 0x%04x", param); }
+void Equalizer_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_EQUALIZER_LOW_GAIN:
+ if(!(val >= AL_EQUALIZER_MIN_LOW_GAIN && val <= AL_EQUALIZER_MAX_LOW_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer low-band gain out of range");
+ props->Equalizer.LowGain = val;
+ break;
+
+ case AL_EQUALIZER_LOW_CUTOFF:
+ if(!(val >= AL_EQUALIZER_MIN_LOW_CUTOFF && val <= AL_EQUALIZER_MAX_LOW_CUTOFF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer low-band cutoff out of range");
+ props->Equalizer.LowCutoff = val;
+ break;
+
+ case AL_EQUALIZER_MID1_GAIN:
+ if(!(val >= AL_EQUALIZER_MIN_MID1_GAIN && val <= AL_EQUALIZER_MAX_MID1_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer mid1-band gain out of range");
+ props->Equalizer.Mid1Gain = val;
+ break;
+
+ case AL_EQUALIZER_MID1_CENTER:
+ if(!(val >= AL_EQUALIZER_MIN_MID1_CENTER && val <= AL_EQUALIZER_MAX_MID1_CENTER))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer mid1-band center out of range");
+ props->Equalizer.Mid1Center = val;
+ break;
+
+ case AL_EQUALIZER_MID1_WIDTH:
+ if(!(val >= AL_EQUALIZER_MIN_MID1_WIDTH && val <= AL_EQUALIZER_MAX_MID1_WIDTH))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer mid1-band width out of range");
+ props->Equalizer.Mid1Width = val;
+ break;
+
+ case AL_EQUALIZER_MID2_GAIN:
+ if(!(val >= AL_EQUALIZER_MIN_MID2_GAIN && val <= AL_EQUALIZER_MAX_MID2_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer mid2-band gain out of range");
+ props->Equalizer.Mid2Gain = val;
+ break;
+
+ case AL_EQUALIZER_MID2_CENTER:
+ if(!(val >= AL_EQUALIZER_MIN_MID2_CENTER && val <= AL_EQUALIZER_MAX_MID2_CENTER))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer mid2-band center out of range");
+ props->Equalizer.Mid2Center = val;
+ break;
+
+ case AL_EQUALIZER_MID2_WIDTH:
+ if(!(val >= AL_EQUALIZER_MIN_MID2_WIDTH && val <= AL_EQUALIZER_MAX_MID2_WIDTH))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer mid2-band width out of range");
+ props->Equalizer.Mid2Width = val;
+ break;
+
+ case AL_EQUALIZER_HIGH_GAIN:
+ if(!(val >= AL_EQUALIZER_MIN_HIGH_GAIN && val <= AL_EQUALIZER_MAX_HIGH_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer high-band gain out of range");
+ props->Equalizer.HighGain = val;
+ break;
+
+ case AL_EQUALIZER_HIGH_CUTOFF:
+ if(!(val >= AL_EQUALIZER_MIN_HIGH_CUTOFF && val <= AL_EQUALIZER_MAX_HIGH_CUTOFF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Equalizer high-band cutoff out of range");
+ props->Equalizer.HighCutoff = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid equalizer float property 0x%04x", param);
+ }
+}
+void Equalizer_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Equalizer_setParamf(props, context, param, vals[0]); }
+
+void Equalizer_getParami(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid equalizer integer property 0x%04x", param); }
+void Equalizer_getParamiv(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid equalizer integer-vector property 0x%04x", param); }
+void Equalizer_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_EQUALIZER_LOW_GAIN:
+ *val = props->Equalizer.LowGain;
+ break;
+
+ case AL_EQUALIZER_LOW_CUTOFF:
+ *val = props->Equalizer.LowCutoff;
+ break;
+
+ case AL_EQUALIZER_MID1_GAIN:
+ *val = props->Equalizer.Mid1Gain;
+ break;
+
+ case AL_EQUALIZER_MID1_CENTER:
+ *val = props->Equalizer.Mid1Center;
+ break;
+
+ case AL_EQUALIZER_MID1_WIDTH:
+ *val = props->Equalizer.Mid1Width;
+ break;
+
+ case AL_EQUALIZER_MID2_GAIN:
+ *val = props->Equalizer.Mid2Gain;
+ break;
+
+ case AL_EQUALIZER_MID2_CENTER:
+ *val = props->Equalizer.Mid2Center;
+ break;
+
+ case AL_EQUALIZER_MID2_WIDTH:
+ *val = props->Equalizer.Mid2Width;
+ break;
+
+ case AL_EQUALIZER_HIGH_GAIN:
+ *val = props->Equalizer.HighGain;
+ break;
+
+ case AL_EQUALIZER_HIGH_CUTOFF:
+ *val = props->Equalizer.HighCutoff;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid equalizer float property 0x%04x", param);
+ }
+}
+void Equalizer_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Equalizer_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Equalizer);
+
+
+struct EqualizerStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new EqualizerState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Equalizer_vtable; }
+};
+
+EffectProps EqualizerStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Equalizer.LowCutoff = AL_EQUALIZER_DEFAULT_LOW_CUTOFF;
+ props.Equalizer.LowGain = AL_EQUALIZER_DEFAULT_LOW_GAIN;
+ props.Equalizer.Mid1Center = AL_EQUALIZER_DEFAULT_MID1_CENTER;
+ props.Equalizer.Mid1Gain = AL_EQUALIZER_DEFAULT_MID1_GAIN;
+ props.Equalizer.Mid1Width = AL_EQUALIZER_DEFAULT_MID1_WIDTH;
+ props.Equalizer.Mid2Center = AL_EQUALIZER_DEFAULT_MID2_CENTER;
+ props.Equalizer.Mid2Gain = AL_EQUALIZER_DEFAULT_MID2_GAIN;
+ props.Equalizer.Mid2Width = AL_EQUALIZER_DEFAULT_MID2_WIDTH;
+ props.Equalizer.HighCutoff = AL_EQUALIZER_DEFAULT_HIGH_CUTOFF;
+ props.Equalizer.HighGain = AL_EQUALIZER_DEFAULT_HIGH_GAIN;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *EqualizerStateFactory_getFactory()
+{
+ static EqualizerStateFactory EqualizerFactory{};
+ return &EqualizerFactory;
+}
diff --git a/alc/effects/fshifter.cpp b/alc/effects/fshifter.cpp
new file mode 100644
index 00000000..b47aa00e
--- /dev/null
+++ b/alc/effects/fshifter.cpp
@@ -0,0 +1,301 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2018 by Raul Herraiz.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstdlib>
+#include <array>
+#include <complex>
+#include <algorithm>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+
+#include "alcomplex.h"
+
+namespace {
+
+using complex_d = std::complex<double>;
+
+#define HIL_SIZE 1024
+#define OVERSAMP (1<<2)
+
+#define HIL_STEP (HIL_SIZE / OVERSAMP)
+#define FIFO_LATENCY (HIL_STEP * (OVERSAMP-1))
+
+/* Define a Hann window, used to filter the HIL input and output. */
+/* Making this constexpr seems to require C++14. */
+std::array<ALdouble,HIL_SIZE> InitHannWindow()
+{
+ std::array<ALdouble,HIL_SIZE> ret;
+ /* Create lookup table of the Hann window for the desired size, i.e. HIL_SIZE */
+ for(ALsizei i{0};i < HIL_SIZE>>1;i++)
+ {
+ ALdouble val = std::sin(al::MathDefs<double>::Pi() * i / ALdouble{HIL_SIZE-1});
+ ret[i] = ret[HIL_SIZE-1-i] = val * val;
+ }
+ return ret;
+}
+alignas(16) const std::array<ALdouble,HIL_SIZE> HannWindow = InitHannWindow();
+
+
+struct FshifterState final : public EffectState {
+ /* Effect parameters */
+ ALsizei mCount{};
+ ALsizei mPhaseStep{};
+ ALsizei mPhase{};
+ ALdouble mLdSign{};
+
+ /*Effects buffers*/
+ ALfloat mInFIFO[HIL_SIZE]{};
+ complex_d mOutFIFO[HIL_SIZE]{};
+ complex_d mOutputAccum[HIL_SIZE]{};
+ complex_d mAnalytic[HIL_SIZE]{};
+ complex_d mOutdata[BUFFERSIZE]{};
+
+ alignas(16) ALfloat mBufferOut[BUFFERSIZE]{};
+
+ /* Effect gains for each output channel */
+ ALfloat mCurrentGains[MAX_OUTPUT_CHANNELS]{};
+ ALfloat mTargetGains[MAX_OUTPUT_CHANNELS]{};
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(FshifterState)
+};
+
+ALboolean FshifterState::deviceUpdate(const ALCdevice*)
+{
+ /* (Re-)initializing parameters and clear the buffers. */
+ mCount = FIFO_LATENCY;
+ mPhaseStep = 0;
+ mPhase = 0;
+ mLdSign = 1.0;
+
+ std::fill(std::begin(mInFIFO), std::end(mInFIFO), 0.0f);
+ std::fill(std::begin(mOutFIFO), std::end(mOutFIFO), complex_d{});
+ std::fill(std::begin(mOutputAccum), std::end(mOutputAccum), complex_d{});
+ std::fill(std::begin(mAnalytic), std::end(mAnalytic), complex_d{});
+
+ std::fill(std::begin(mCurrentGains), std::end(mCurrentGains), 0.0f);
+ std::fill(std::begin(mTargetGains), std::end(mTargetGains), 0.0f);
+
+ return AL_TRUE;
+}
+
+void FshifterState::update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ const ALCdevice *device{context->Device};
+
+ ALfloat step{props->Fshifter.Frequency / static_cast<ALfloat>(device->Frequency)};
+ mPhaseStep = fastf2i(minf(step, 0.5f) * FRACTIONONE);
+
+ switch(props->Fshifter.LeftDirection)
+ {
+ case AL_FREQUENCY_SHIFTER_DIRECTION_DOWN:
+ mLdSign = -1.0;
+ break;
+
+ case AL_FREQUENCY_SHIFTER_DIRECTION_UP:
+ mLdSign = 1.0;
+ break;
+
+ case AL_FREQUENCY_SHIFTER_DIRECTION_OFF:
+ mPhase = 0;
+ mPhaseStep = 0;
+ break;
+ }
+
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ CalcDirectionCoeffs({0.0f, 0.0f, -1.0f}, 0.0f, coeffs);
+
+ mOutTarget = target.Main->Buffer;
+ ComputePanGains(target.Main, coeffs, slot->Params.Gain, mTargetGains);
+}
+
+void FshifterState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei /*numInput*/, const al::span<FloatBufferLine> samplesOut)
+{
+ static constexpr complex_d complex_zero{0.0, 0.0};
+ ALfloat *RESTRICT BufferOut = mBufferOut;
+ ALsizei j, k, base;
+
+ for(base = 0;base < samplesToDo;)
+ {
+ const ALsizei todo{mini(HIL_SIZE-mCount, samplesToDo-base)};
+
+ ASSUME(todo > 0);
+
+ /* Fill FIFO buffer with samples data */
+ k = mCount;
+ for(j = 0;j < todo;j++,k++)
+ {
+ mInFIFO[k] = samplesIn[0][base+j];
+ mOutdata[base+j] = mOutFIFO[k-FIFO_LATENCY];
+ }
+ mCount += todo;
+ base += todo;
+
+ /* Check whether FIFO buffer is filled */
+ if(mCount < HIL_SIZE) continue;
+ mCount = FIFO_LATENCY;
+
+ /* Real signal windowing and store in Analytic buffer */
+ for(k = 0;k < HIL_SIZE;k++)
+ {
+ mAnalytic[k].real(mInFIFO[k] * HannWindow[k]);
+ mAnalytic[k].imag(0.0);
+ }
+
+ /* Processing signal by Discrete Hilbert Transform (analytical signal). */
+ complex_hilbert(mAnalytic);
+
+ /* Windowing and add to output accumulator */
+ for(k = 0;k < HIL_SIZE;k++)
+ mOutputAccum[k] += 2.0/OVERSAMP*HannWindow[k]*mAnalytic[k];
+
+ /* Shift accumulator, input & output FIFO */
+ for(k = 0;k < HIL_STEP;k++) mOutFIFO[k] = mOutputAccum[k];
+ for(j = 0;k < HIL_SIZE;k++,j++) mOutputAccum[j] = mOutputAccum[k];
+ for(;j < HIL_SIZE;j++) mOutputAccum[j] = complex_zero;
+ for(k = 0;k < FIFO_LATENCY;k++)
+ mInFIFO[k] = mInFIFO[k+HIL_STEP];
+ }
+
+ /* Process frequency shifter using the analytic signal obtained. */
+ for(k = 0;k < samplesToDo;k++)
+ {
+ double phase = mPhase * ((1.0/FRACTIONONE) * al::MathDefs<double>::Tau());
+ BufferOut[k] = static_cast<float>(mOutdata[k].real()*std::cos(phase) +
+ mOutdata[k].imag()*std::sin(phase)*mLdSign);
+
+ mPhase += mPhaseStep;
+ mPhase &= FRACTIONMASK;
+ }
+
+ /* Now, mix the processed sound data to the output. */
+ MixSamples(BufferOut, samplesOut, mCurrentGains, mTargetGains, maxi(samplesToDo, 512), 0,
+ samplesToDo);
+}
+
+
+void Fshifter_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_FREQUENCY_SHIFTER_FREQUENCY:
+ if(!(val >= AL_FREQUENCY_SHIFTER_MIN_FREQUENCY && val <= AL_FREQUENCY_SHIFTER_MAX_FREQUENCY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Frequency shifter frequency out of range");
+ props->Fshifter.Frequency = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid frequency shifter float property 0x%04x", param);
+ }
+}
+void Fshifter_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Fshifter_setParamf(props, context, param, vals[0]); }
+
+void Fshifter_setParami(EffectProps *props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_FREQUENCY_SHIFTER_LEFT_DIRECTION:
+ if(!(val >= AL_FREQUENCY_SHIFTER_MIN_LEFT_DIRECTION && val <= AL_FREQUENCY_SHIFTER_MAX_LEFT_DIRECTION))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Frequency shifter left direction out of range");
+ props->Fshifter.LeftDirection = val;
+ break;
+
+ case AL_FREQUENCY_SHIFTER_RIGHT_DIRECTION:
+ if(!(val >= AL_FREQUENCY_SHIFTER_MIN_RIGHT_DIRECTION && val <= AL_FREQUENCY_SHIFTER_MAX_RIGHT_DIRECTION))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Frequency shifter right direction out of range");
+ props->Fshifter.RightDirection = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid frequency shifter integer property 0x%04x", param);
+ }
+}
+void Fshifter_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{ Fshifter_setParami(props, context, param, vals[0]); }
+
+void Fshifter_getParami(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val)
+{
+ switch(param)
+ {
+ case AL_FREQUENCY_SHIFTER_LEFT_DIRECTION:
+ *val = props->Fshifter.LeftDirection;
+ break;
+ case AL_FREQUENCY_SHIFTER_RIGHT_DIRECTION:
+ *val = props->Fshifter.RightDirection;
+ break;
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid frequency shifter integer property 0x%04x", param);
+ }
+}
+void Fshifter_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{ Fshifter_getParami(props, context, param, vals); }
+
+void Fshifter_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_FREQUENCY_SHIFTER_FREQUENCY:
+ *val = props->Fshifter.Frequency;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid frequency shifter float property 0x%04x", param);
+ }
+}
+void Fshifter_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Fshifter_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Fshifter);
+
+
+struct FshifterStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new FshifterState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Fshifter_vtable; }
+};
+
+EffectProps FshifterStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Fshifter.Frequency = AL_FREQUENCY_SHIFTER_DEFAULT_FREQUENCY;
+ props.Fshifter.LeftDirection = AL_FREQUENCY_SHIFTER_DEFAULT_LEFT_DIRECTION;
+ props.Fshifter.RightDirection = AL_FREQUENCY_SHIFTER_DEFAULT_RIGHT_DIRECTION;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *FshifterStateFactory_getFactory()
+{
+ static FshifterStateFactory FshifterFactory{};
+ return &FshifterFactory;
+}
diff --git a/alc/effects/modulator.cpp b/alc/effects/modulator.cpp
new file mode 100644
index 00000000..086482d7
--- /dev/null
+++ b/alc/effects/modulator.cpp
@@ -0,0 +1,279 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2009 by Chris Robinson.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstdlib>
+
+#include <cmath>
+#include <algorithm>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+#include "filters/biquad.h"
+#include "vecmat.h"
+
+
+namespace {
+
+#define MAX_UPDATE_SAMPLES 128
+
+#define WAVEFORM_FRACBITS 24
+#define WAVEFORM_FRACONE (1<<WAVEFORM_FRACBITS)
+#define WAVEFORM_FRACMASK (WAVEFORM_FRACONE-1)
+
+inline ALfloat Sin(ALsizei index)
+{
+ return std::sin(static_cast<ALfloat>(index) *
+ (al::MathDefs<float>::Tau() / ALfloat{WAVEFORM_FRACONE}));
+}
+
+inline ALfloat Saw(ALsizei index)
+{
+ return static_cast<ALfloat>(index)*(2.0f/WAVEFORM_FRACONE) - 1.0f;
+}
+
+inline ALfloat Square(ALsizei index)
+{
+ return static_cast<ALfloat>(((index>>(WAVEFORM_FRACBITS-2))&2) - 1);
+}
+
+inline ALfloat One(ALsizei)
+{
+ return 1.0f;
+}
+
+template<ALfloat func(ALsizei)>
+void Modulate(ALfloat *RESTRICT dst, ALsizei index, const ALsizei step, ALsizei todo)
+{
+ ALsizei i;
+ for(i = 0;i < todo;i++)
+ {
+ index += step;
+ index &= WAVEFORM_FRACMASK;
+ dst[i] = func(index);
+ }
+}
+
+
+struct ModulatorState final : public EffectState {
+ void (*mGetSamples)(ALfloat*RESTRICT, ALsizei, const ALsizei, ALsizei){};
+
+ ALsizei mIndex{0};
+ ALsizei mStep{1};
+
+ struct {
+ BiquadFilter Filter;
+
+ ALfloat CurrentGains[MAX_OUTPUT_CHANNELS]{};
+ ALfloat TargetGains[MAX_OUTPUT_CHANNELS]{};
+ } mChans[MAX_AMBI_CHANNELS];
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(ModulatorState)
+};
+
+ALboolean ModulatorState::deviceUpdate(const ALCdevice*)
+{
+ for(auto &e : mChans)
+ {
+ e.Filter.clear();
+ std::fill(std::begin(e.CurrentGains), std::end(e.CurrentGains), 0.0f);
+ }
+ return AL_TRUE;
+}
+
+void ModulatorState::update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ const ALCdevice *device{context->Device};
+
+ const float step{props->Modulator.Frequency / static_cast<ALfloat>(device->Frequency)};
+ mStep = fastf2i(clampf(step*WAVEFORM_FRACONE, 0.0f, ALfloat{WAVEFORM_FRACONE-1}));
+
+ if(mStep == 0)
+ mGetSamples = Modulate<One>;
+ else if(props->Modulator.Waveform == AL_RING_MODULATOR_SINUSOID)
+ mGetSamples = Modulate<Sin>;
+ else if(props->Modulator.Waveform == AL_RING_MODULATOR_SAWTOOTH)
+ mGetSamples = Modulate<Saw>;
+ else /*if(props->Modulator.Waveform == AL_RING_MODULATOR_SQUARE)*/
+ mGetSamples = Modulate<Square>;
+
+ ALfloat f0norm{props->Modulator.HighPassCutoff / static_cast<ALfloat>(device->Frequency)};
+ f0norm = clampf(f0norm, 1.0f/512.0f, 0.49f);
+ /* Bandwidth value is constant in octaves. */
+ mChans[0].Filter.setParams(BiquadType::HighPass, 1.0f, f0norm,
+ BiquadFilter::rcpQFromBandwidth(f0norm, 0.75f));
+ for(size_t i{1u};i < slot->Wet.Buffer.size();++i)
+ mChans[i].Filter.copyParamsFrom(mChans[0].Filter);
+
+ mOutTarget = target.Main->Buffer;
+ for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
+ {
+ auto coeffs = GetAmbiIdentityRow(i);
+ ComputePanGains(target.Main, coeffs.data(), slot->Params.Gain, mChans[i].TargetGains);
+ }
+}
+
+void ModulatorState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut)
+{
+ for(ALsizei base{0};base < samplesToDo;)
+ {
+ alignas(16) ALfloat modsamples[MAX_UPDATE_SAMPLES];
+ ALsizei td = mini(MAX_UPDATE_SAMPLES, samplesToDo-base);
+ ALsizei c, i;
+
+ mGetSamples(modsamples, mIndex, mStep, td);
+ mIndex += (mStep*td) & WAVEFORM_FRACMASK;
+ mIndex &= WAVEFORM_FRACMASK;
+
+ ASSUME(numInput > 0);
+ for(c = 0;c < numInput;c++)
+ {
+ alignas(16) ALfloat temps[MAX_UPDATE_SAMPLES];
+
+ mChans[c].Filter.process(temps, &samplesIn[c][base], td);
+ for(i = 0;i < td;i++)
+ temps[i] *= modsamples[i];
+
+ MixSamples(temps, samplesOut, mChans[c].CurrentGains, mChans[c].TargetGains,
+ samplesToDo-base, base, td);
+ }
+
+ base += td;
+ }
+}
+
+
+void Modulator_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_RING_MODULATOR_FREQUENCY:
+ if(!(val >= AL_RING_MODULATOR_MIN_FREQUENCY && val <= AL_RING_MODULATOR_MAX_FREQUENCY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Modulator frequency out of range");
+ props->Modulator.Frequency = val;
+ break;
+
+ case AL_RING_MODULATOR_HIGHPASS_CUTOFF:
+ if(!(val >= AL_RING_MODULATOR_MIN_HIGHPASS_CUTOFF && val <= AL_RING_MODULATOR_MAX_HIGHPASS_CUTOFF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Modulator high-pass cutoff out of range");
+ props->Modulator.HighPassCutoff = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid modulator float property 0x%04x", param);
+ }
+}
+void Modulator_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Modulator_setParamf(props, context, param, vals[0]); }
+void Modulator_setParami(EffectProps *props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_RING_MODULATOR_FREQUENCY:
+ case AL_RING_MODULATOR_HIGHPASS_CUTOFF:
+ Modulator_setParamf(props, context, param, static_cast<ALfloat>(val));
+ break;
+
+ case AL_RING_MODULATOR_WAVEFORM:
+ if(!(val >= AL_RING_MODULATOR_MIN_WAVEFORM && val <= AL_RING_MODULATOR_MAX_WAVEFORM))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Invalid modulator waveform");
+ props->Modulator.Waveform = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid modulator integer property 0x%04x", param);
+ }
+}
+void Modulator_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{ Modulator_setParami(props, context, param, vals[0]); }
+
+void Modulator_getParami(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val)
+{
+ switch(param)
+ {
+ case AL_RING_MODULATOR_FREQUENCY:
+ *val = static_cast<ALint>(props->Modulator.Frequency);
+ break;
+ case AL_RING_MODULATOR_HIGHPASS_CUTOFF:
+ *val = static_cast<ALint>(props->Modulator.HighPassCutoff);
+ break;
+ case AL_RING_MODULATOR_WAVEFORM:
+ *val = props->Modulator.Waveform;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid modulator integer property 0x%04x", param);
+ }
+}
+void Modulator_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{ Modulator_getParami(props, context, param, vals); }
+void Modulator_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_RING_MODULATOR_FREQUENCY:
+ *val = props->Modulator.Frequency;
+ break;
+ case AL_RING_MODULATOR_HIGHPASS_CUTOFF:
+ *val = props->Modulator.HighPassCutoff;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid modulator float property 0x%04x", param);
+ }
+}
+void Modulator_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Modulator_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Modulator);
+
+
+struct ModulatorStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new ModulatorState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Modulator_vtable; }
+};
+
+EffectProps ModulatorStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Modulator.Frequency = AL_RING_MODULATOR_DEFAULT_FREQUENCY;
+ props.Modulator.HighPassCutoff = AL_RING_MODULATOR_DEFAULT_HIGHPASS_CUTOFF;
+ props.Modulator.Waveform = AL_RING_MODULATOR_DEFAULT_WAVEFORM;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *ModulatorStateFactory_getFactory()
+{
+ static ModulatorStateFactory ModulatorFactory{};
+ return &ModulatorFactory;
+}
diff --git a/alc/effects/null.cpp b/alc/effects/null.cpp
new file mode 100644
index 00000000..e55c8699
--- /dev/null
+++ b/alc/effects/null.cpp
@@ -0,0 +1,164 @@
+#include "config.h"
+
+#include <cstdlib>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+
+
+namespace {
+
+struct NullState final : public EffectState {
+ NullState();
+ ~NullState() override;
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(NullState)
+};
+
+/* This constructs the effect state. It's called when the object is first
+ * created.
+ */
+NullState::NullState() = default;
+
+/* This destructs the effect state. It's called only when the effect instance
+ * is no longer used.
+ */
+NullState::~NullState() = default;
+
+/* This updates the device-dependant effect state. This is called on state
+ * initialization and any time the device parameters (e.g. playback frequency,
+ * format) have been changed. Will always be followed by a call to the update
+ * method, if successful.
+ */
+ALboolean NullState::deviceUpdate(const ALCdevice* /*device*/)
+{
+ return AL_TRUE;
+}
+
+/* This updates the effect state with new properties. This is called any time
+ * the effect is (re)loaded into a slot.
+ */
+void NullState::update(const ALCcontext* /*context*/, const ALeffectslot* /*slot*/,
+ const EffectProps* /*props*/, const EffectTarget /*target*/)
+{
+}
+
+/* This processes the effect state, for the given number of samples from the
+ * input to the output buffer. The result should be added to the output buffer,
+ * not replace it.
+ */
+void NullState::process(const ALsizei /*samplesToDo*/,
+ const FloatBufferLine *RESTRICT /*samplesIn*/, const ALsizei /*numInput*/,
+ const al::span<FloatBufferLine> /*samplesOut*/)
+{
+}
+
+
+void NullEffect_setParami(EffectProps* /*props*/, ALCcontext *context, ALenum param, ALint /*val*/)
+{
+ switch(param)
+ {
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid null effect integer property 0x%04x", param);
+ }
+}
+void NullEffect_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{
+ switch(param)
+ {
+ default:
+ NullEffect_setParami(props, context, param, vals[0]);
+ }
+}
+void NullEffect_setParamf(EffectProps* /*props*/, ALCcontext *context, ALenum param, ALfloat /*val*/)
+{
+ switch(param)
+ {
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid null effect float property 0x%04x", param);
+ }
+}
+void NullEffect_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{
+ switch(param)
+ {
+ default:
+ NullEffect_setParamf(props, context, param, vals[0]);
+ }
+}
+
+void NullEffect_getParami(const EffectProps* /*props*/, ALCcontext *context, ALenum param, ALint* /*val*/)
+{
+ switch(param)
+ {
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid null effect integer property 0x%04x", param);
+ }
+}
+void NullEffect_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{
+ switch(param)
+ {
+ default:
+ NullEffect_getParami(props, context, param, vals);
+ }
+}
+void NullEffect_getParamf(const EffectProps* /*props*/, ALCcontext *context, ALenum param, ALfloat* /*val*/)
+{
+ switch(param)
+ {
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid null effect float property 0x%04x", param);
+ }
+}
+void NullEffect_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{
+ switch(param)
+ {
+ default:
+ NullEffect_getParamf(props, context, param, vals);
+ }
+}
+
+DEFINE_ALEFFECT_VTABLE(NullEffect);
+
+
+struct NullStateFactory final : public EffectStateFactory {
+ EffectState *create() override;
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override;
+};
+
+/* Creates EffectState objects of the appropriate type. */
+EffectState *NullStateFactory::create()
+{ return new NullState{}; }
+
+/* Returns an ALeffectProps initialized with this effect type's default
+ * property values.
+ */
+EffectProps NullStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ return props;
+}
+
+/* Returns a pointer to this effect type's global set/get vtable. */
+const EffectVtable *NullStateFactory::getEffectVtable() const noexcept
+{ return &NullEffect_vtable; }
+
+} // namespace
+
+EffectStateFactory *NullStateFactory_getFactory()
+{
+ static NullStateFactory NullFactory{};
+ return &NullFactory;
+}
diff --git a/alc/effects/pshifter.cpp b/alc/effects/pshifter.cpp
new file mode 100644
index 00000000..39d3cf1a
--- /dev/null
+++ b/alc/effects/pshifter.cpp
@@ -0,0 +1,405 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2018 by Raul Herraiz.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#ifdef HAVE_SSE_INTRINSICS
+#include <emmintrin.h>
+#endif
+
+#include <cmath>
+#include <cstdlib>
+#include <array>
+#include <complex>
+#include <algorithm>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+
+#include "alcomplex.h"
+
+
+namespace {
+
+using complex_d = std::complex<double>;
+
+#define STFT_SIZE 1024
+#define STFT_HALF_SIZE (STFT_SIZE>>1)
+#define OVERSAMP (1<<2)
+
+#define STFT_STEP (STFT_SIZE / OVERSAMP)
+#define FIFO_LATENCY (STFT_STEP * (OVERSAMP-1))
+
+inline int double2int(double d)
+{
+#if defined(HAVE_SSE_INTRINSICS)
+ return _mm_cvttsd_si32(_mm_set_sd(d));
+
+#elif ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
+ !defined(__SSE2_MATH__)) || (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP < 2)
+
+ int sign, shift;
+ int64_t mant;
+ union {
+ double d;
+ int64_t i64;
+ } conv;
+
+ conv.d = d;
+ sign = (conv.i64>>63) | 1;
+ shift = ((conv.i64>>52)&0x7ff) - (1023+52);
+
+ /* Over/underflow */
+ if(UNLIKELY(shift >= 63 || shift < -52))
+ return 0;
+
+ mant = (conv.i64&0xfffffffffffff_i64) | 0x10000000000000_i64;
+ if(LIKELY(shift < 0))
+ return (int)(mant >> -shift) * sign;
+ return (int)(mant << shift) * sign;
+
+#else
+
+ return static_cast<int>(d);
+#endif
+}
+
+/* Define a Hann window, used to filter the STFT input and output. */
+/* Making this constexpr seems to require C++14. */
+std::array<ALdouble,STFT_SIZE> InitHannWindow()
+{
+ std::array<ALdouble,STFT_SIZE> ret;
+ /* Create lookup table of the Hann window for the desired size, i.e. HIL_SIZE */
+ for(ALsizei i{0};i < STFT_SIZE>>1;i++)
+ {
+ ALdouble val = std::sin(al::MathDefs<double>::Pi() * i / ALdouble{STFT_SIZE-1});
+ ret[i] = ret[STFT_SIZE-1-i] = val * val;
+ }
+ return ret;
+}
+alignas(16) const std::array<ALdouble,STFT_SIZE> HannWindow = InitHannWindow();
+
+
+struct ALphasor {
+ ALdouble Amplitude;
+ ALdouble Phase;
+};
+
+struct ALfrequencyDomain {
+ ALdouble Amplitude;
+ ALdouble Frequency;
+};
+
+
+/* Converts complex to ALphasor */
+inline ALphasor rect2polar(const complex_d &number)
+{
+ ALphasor polar;
+ polar.Amplitude = std::abs(number);
+ polar.Phase = std::arg(number);
+ return polar;
+}
+
+/* Converts ALphasor to complex */
+inline complex_d polar2rect(const ALphasor &number)
+{ return std::polar<double>(number.Amplitude, number.Phase); }
+
+
+struct PshifterState final : public EffectState {
+ /* Effect parameters */
+ ALsizei mCount;
+ ALsizei mPitchShiftI;
+ ALfloat mPitchShift;
+ ALfloat mFreqPerBin;
+
+ /* Effects buffers */
+ ALfloat mInFIFO[STFT_SIZE];
+ ALfloat mOutFIFO[STFT_STEP];
+ ALdouble mLastPhase[STFT_HALF_SIZE+1];
+ ALdouble mSumPhase[STFT_HALF_SIZE+1];
+ ALdouble mOutputAccum[STFT_SIZE];
+
+ complex_d mFFTbuffer[STFT_SIZE];
+
+ ALfrequencyDomain mAnalysis_buffer[STFT_HALF_SIZE+1];
+ ALfrequencyDomain mSyntesis_buffer[STFT_HALF_SIZE+1];
+
+ alignas(16) ALfloat mBufferOut[BUFFERSIZE];
+
+ /* Effect gains for each output channel */
+ ALfloat mCurrentGains[MAX_OUTPUT_CHANNELS];
+ ALfloat mTargetGains[MAX_OUTPUT_CHANNELS];
+
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(PshifterState)
+};
+
+ALboolean PshifterState::deviceUpdate(const ALCdevice *device)
+{
+ /* (Re-)initializing parameters and clear the buffers. */
+ mCount = FIFO_LATENCY;
+ mPitchShiftI = FRACTIONONE;
+ mPitchShift = 1.0f;
+ mFreqPerBin = device->Frequency / static_cast<ALfloat>(STFT_SIZE);
+
+ std::fill(std::begin(mInFIFO), std::end(mInFIFO), 0.0f);
+ std::fill(std::begin(mOutFIFO), std::end(mOutFIFO), 0.0f);
+ std::fill(std::begin(mLastPhase), std::end(mLastPhase), 0.0);
+ std::fill(std::begin(mSumPhase), std::end(mSumPhase), 0.0);
+ std::fill(std::begin(mOutputAccum), std::end(mOutputAccum), 0.0);
+ std::fill(std::begin(mFFTbuffer), std::end(mFFTbuffer), complex_d{});
+ std::fill(std::begin(mAnalysis_buffer), std::end(mAnalysis_buffer), ALfrequencyDomain{});
+ std::fill(std::begin(mSyntesis_buffer), std::end(mSyntesis_buffer), ALfrequencyDomain{});
+
+ std::fill(std::begin(mCurrentGains), std::end(mCurrentGains), 0.0f);
+ std::fill(std::begin(mTargetGains), std::end(mTargetGains), 0.0f);
+
+ return AL_TRUE;
+}
+
+void PshifterState::update(const ALCcontext*, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ const float pitch{std::pow(2.0f,
+ static_cast<ALfloat>(props->Pshifter.CoarseTune*100 + props->Pshifter.FineTune) / 1200.0f
+ )};
+ mPitchShiftI = fastf2i(pitch*FRACTIONONE);
+ mPitchShift = mPitchShiftI * (1.0f/FRACTIONONE);
+
+ ALfloat coeffs[MAX_AMBI_CHANNELS];
+ CalcDirectionCoeffs({0.0f, 0.0f, -1.0f}, 0.0f, coeffs);
+
+ mOutTarget = target.Main->Buffer;
+ ComputePanGains(target.Main, coeffs, slot->Params.Gain, mTargetGains);
+}
+
+void PshifterState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei /*numInput*/, const al::span<FloatBufferLine> samplesOut)
+{
+ /* Pitch shifter engine based on the work of Stephan Bernsee.
+ * http://blogs.zynaptiq.com/bernsee/pitch-shifting-using-the-ft/
+ */
+
+ static constexpr ALdouble expected{al::MathDefs<double>::Tau() / OVERSAMP};
+ const ALdouble freq_per_bin{mFreqPerBin};
+ ALfloat *RESTRICT bufferOut{mBufferOut};
+ ALsizei count{mCount};
+
+ for(ALsizei i{0};i < samplesToDo;)
+ {
+ do {
+ /* Fill FIFO buffer with samples data */
+ mInFIFO[count] = samplesIn[0][i];
+ bufferOut[i] = mOutFIFO[count - FIFO_LATENCY];
+
+ count++;
+ } while(++i < samplesToDo && count < STFT_SIZE);
+
+ /* Check whether FIFO buffer is filled */
+ if(count < STFT_SIZE) break;
+ count = FIFO_LATENCY;
+
+ /* Real signal windowing and store in FFTbuffer */
+ for(ALsizei k{0};k < STFT_SIZE;k++)
+ {
+ mFFTbuffer[k].real(mInFIFO[k] * HannWindow[k]);
+ mFFTbuffer[k].imag(0.0);
+ }
+
+ /* ANALYSIS */
+ /* Apply FFT to FFTbuffer data */
+ complex_fft(mFFTbuffer, -1.0);
+
+ /* Analyze the obtained data. Since the real FFT is symmetric, only
+ * STFT_HALF_SIZE+1 samples are needed.
+ */
+ for(ALsizei k{0};k < STFT_HALF_SIZE+1;k++)
+ {
+ /* Compute amplitude and phase */
+ ALphasor component{rect2polar(mFFTbuffer[k])};
+
+ /* Compute phase difference and subtract expected phase difference */
+ double tmp{(component.Phase - mLastPhase[k]) - k*expected};
+
+ /* Map delta phase into +/- Pi interval */
+ int qpd{double2int(tmp / al::MathDefs<double>::Pi())};
+ tmp -= al::MathDefs<double>::Pi() * (qpd + (qpd%2));
+
+ /* Get deviation from bin frequency from the +/- Pi interval */
+ tmp /= expected;
+
+ /* Compute the k-th partials' true frequency, twice the amplitude
+ * for maintain the gain (because half of bins are used) and store
+ * amplitude and true frequency in analysis buffer.
+ */
+ mAnalysis_buffer[k].Amplitude = 2.0 * component.Amplitude;
+ mAnalysis_buffer[k].Frequency = (k + tmp) * freq_per_bin;
+
+ /* Store actual phase[k] for the calculations in the next frame*/
+ mLastPhase[k] = component.Phase;
+ }
+
+ /* PROCESSING */
+ /* pitch shifting */
+ for(ALsizei k{0};k < STFT_HALF_SIZE+1;k++)
+ {
+ mSyntesis_buffer[k].Amplitude = 0.0;
+ mSyntesis_buffer[k].Frequency = 0.0;
+ }
+
+ for(ALsizei k{0};k < STFT_HALF_SIZE+1;k++)
+ {
+ ALsizei j{(k*mPitchShiftI) >> FRACTIONBITS};
+ if(j >= STFT_HALF_SIZE+1) break;
+
+ mSyntesis_buffer[j].Amplitude += mAnalysis_buffer[k].Amplitude;
+ mSyntesis_buffer[j].Frequency = mAnalysis_buffer[k].Frequency * mPitchShift;
+ }
+
+ /* SYNTHESIS */
+ /* Synthesis the processing data */
+ for(ALsizei k{0};k < STFT_HALF_SIZE+1;k++)
+ {
+ ALphasor component;
+ ALdouble tmp;
+
+ /* Compute bin deviation from scaled freq */
+ tmp = mSyntesis_buffer[k].Frequency/freq_per_bin - k;
+
+ /* Calculate actual delta phase and accumulate it to get bin phase */
+ mSumPhase[k] += (k + tmp) * expected;
+
+ component.Amplitude = mSyntesis_buffer[k].Amplitude;
+ component.Phase = mSumPhase[k];
+
+ /* Compute phasor component to cartesian complex number and storage it into FFTbuffer*/
+ mFFTbuffer[k] = polar2rect(component);
+ }
+ /* zero negative frequencies for recontruct a real signal */
+ for(ALsizei k{STFT_HALF_SIZE+1};k < STFT_SIZE;k++)
+ mFFTbuffer[k] = complex_d{};
+
+ /* Apply iFFT to buffer data */
+ complex_fft(mFFTbuffer, 1.0);
+
+ /* Windowing and add to output */
+ for(ALsizei k{0};k < STFT_SIZE;k++)
+ mOutputAccum[k] += HannWindow[k] * mFFTbuffer[k].real() /
+ (0.5 * STFT_HALF_SIZE * OVERSAMP);
+
+ /* Shift accumulator, input & output FIFO */
+ ALsizei j, k;
+ for(k = 0;k < STFT_STEP;k++) mOutFIFO[k] = static_cast<ALfloat>(mOutputAccum[k]);
+ for(j = 0;k < STFT_SIZE;k++,j++) mOutputAccum[j] = mOutputAccum[k];
+ for(;j < STFT_SIZE;j++) mOutputAccum[j] = 0.0;
+ for(k = 0;k < FIFO_LATENCY;k++)
+ mInFIFO[k] = mInFIFO[k+STFT_STEP];
+ }
+ mCount = count;
+
+ /* Now, mix the processed sound data to the output. */
+ MixSamples(bufferOut, samplesOut, mCurrentGains, mTargetGains, maxi(samplesToDo, 512), 0,
+ samplesToDo);
+}
+
+
+void Pshifter_setParamf(EffectProps*, ALCcontext *context, ALenum param, ALfloat)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid pitch shifter float property 0x%04x", param); }
+void Pshifter_setParamfv(EffectProps*, ALCcontext *context, ALenum param, const ALfloat*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid pitch shifter float-vector property 0x%04x", param); }
+
+void Pshifter_setParami(EffectProps *props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_PITCH_SHIFTER_COARSE_TUNE:
+ if(!(val >= AL_PITCH_SHIFTER_MIN_COARSE_TUNE && val <= AL_PITCH_SHIFTER_MAX_COARSE_TUNE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Pitch shifter coarse tune out of range");
+ props->Pshifter.CoarseTune = val;
+ break;
+
+ case AL_PITCH_SHIFTER_FINE_TUNE:
+ if(!(val >= AL_PITCH_SHIFTER_MIN_FINE_TUNE && val <= AL_PITCH_SHIFTER_MAX_FINE_TUNE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,,"Pitch shifter fine tune out of range");
+ props->Pshifter.FineTune = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid pitch shifter integer property 0x%04x", param);
+ }
+}
+void Pshifter_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{ Pshifter_setParami(props, context, param, vals[0]); }
+
+void Pshifter_getParami(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val)
+{
+ switch(param)
+ {
+ case AL_PITCH_SHIFTER_COARSE_TUNE:
+ *val = props->Pshifter.CoarseTune;
+ break;
+ case AL_PITCH_SHIFTER_FINE_TUNE:
+ *val = props->Pshifter.FineTune;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid pitch shifter integer property 0x%04x", param);
+ }
+}
+void Pshifter_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{ Pshifter_getParami(props, context, param, vals); }
+
+void Pshifter_getParamf(const EffectProps*, ALCcontext *context, ALenum param, ALfloat*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid pitch shifter float property 0x%04x", param); }
+void Pshifter_getParamfv(const EffectProps*, ALCcontext *context, ALenum param, ALfloat*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid pitch shifter float vector-property 0x%04x", param); }
+
+DEFINE_ALEFFECT_VTABLE(Pshifter);
+
+
+struct PshifterStateFactory final : public EffectStateFactory {
+ EffectState *create() override;
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Pshifter_vtable; }
+};
+
+EffectState *PshifterStateFactory::create()
+{ return new PshifterState{}; }
+
+EffectProps PshifterStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Pshifter.CoarseTune = AL_PITCH_SHIFTER_DEFAULT_COARSE_TUNE;
+ props.Pshifter.FineTune = AL_PITCH_SHIFTER_DEFAULT_FINE_TUNE;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *PshifterStateFactory_getFactory()
+{
+ static PshifterStateFactory PshifterFactory{};
+ return &PshifterFactory;
+}
diff --git a/alc/effects/reverb.cpp b/alc/effects/reverb.cpp
new file mode 100644
index 00000000..ac996b3f
--- /dev/null
+++ b/alc/effects/reverb.cpp
@@ -0,0 +1,2102 @@
+/**
+ * Ambisonic reverb engine for the OpenAL cross platform audio library
+ * Copyright (C) 2008-2017 by Chris Robinson and Christopher Fitzgerald.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cstdio>
+#include <cstdlib>
+#include <cmath>
+
+#include <array>
+#include <numeric>
+#include <algorithm>
+#include <functional>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alu.h"
+#include "alAuxEffectSlot.h"
+#include "alListener.h"
+#include "alError.h"
+#include "bformatdec.h"
+#include "filters/biquad.h"
+#include "vector.h"
+#include "vecmat.h"
+
+/* This is a user config option for modifying the overall output of the reverb
+ * effect.
+ */
+ALfloat ReverbBoost = 1.0f;
+
+namespace {
+
+using namespace std::placeholders;
+
+/* The number of samples used for cross-faded delay lines. This can be used
+ * to balance the compensation for abrupt line changes and attenuation due to
+ * minimally lengthed recursive lines. Try to keep this below the device
+ * update size.
+ */
+constexpr int FADE_SAMPLES{128};
+
+/* The number of spatialized lines or channels to process. Four channels allows
+ * for a 3D A-Format response. NOTE: This can't be changed without taking care
+ * of the conversion matrices, and a few places where the length arrays are
+ * assumed to have 4 elements.
+ */
+constexpr int NUM_LINES{4};
+
+
+/* The B-Format to A-Format conversion matrix. The arrangement of rows is
+ * deliberately chosen to align the resulting lines to their spatial opposites
+ * (0:above front left <-> 3:above back right, 1:below front right <-> 2:below
+ * back left). It's not quite opposite, since the A-Format results in a
+ * tetrahedron, but it's close enough. Should the model be extended to 8-lines
+ * in the future, true opposites can be used.
+ */
+alignas(16) constexpr ALfloat B2A[NUM_LINES][MAX_AMBI_CHANNELS]{
+ { 0.288675134595f, 0.288675134595f, 0.288675134595f, 0.288675134595f },
+ { 0.288675134595f, -0.288675134595f, -0.288675134595f, 0.288675134595f },
+ { 0.288675134595f, 0.288675134595f, -0.288675134595f, -0.288675134595f },
+ { 0.288675134595f, -0.288675134595f, 0.288675134595f, -0.288675134595f }
+};
+
+/* Converts A-Format to B-Format. */
+alignas(16) constexpr ALfloat A2B[NUM_LINES][NUM_LINES]{
+ { 0.866025403785f, 0.866025403785f, 0.866025403785f, 0.866025403785f },
+ { 0.866025403785f, -0.866025403785f, 0.866025403785f, -0.866025403785f },
+ { 0.866025403785f, -0.866025403785f, -0.866025403785f, 0.866025403785f },
+ { 0.866025403785f, 0.866025403785f, -0.866025403785f, -0.866025403785f }
+};
+
+
+constexpr ALfloat FadeStep{1.0f / FADE_SAMPLES};
+
+/* The all-pass and delay lines have a variable length dependent on the
+ * effect's density parameter, which helps alter the perceived environment
+ * size. The size-to-density conversion is a cubed scale:
+ *
+ * density = min(1.0, pow(size, 3.0) / DENSITY_SCALE);
+ *
+ * The line lengths scale linearly with room size, so the inverse density
+ * conversion is needed, taking the cube root of the re-scaled density to
+ * calculate the line length multiplier:
+ *
+ * length_mult = max(5.0, cbrt(density*DENSITY_SCALE));
+ *
+ * The density scale below will result in a max line multiplier of 50, for an
+ * effective size range of 5m to 50m.
+ */
+constexpr ALfloat DENSITY_SCALE{125000.0f};
+
+/* All delay line lengths are specified in seconds.
+ *
+ * To approximate early reflections, we break them up into primary (those
+ * arriving from the same direction as the source) and secondary (those
+ * arriving from the opposite direction).
+ *
+ * The early taps decorrelate the 4-channel signal to approximate an average
+ * room response for the primary reflections after the initial early delay.
+ *
+ * Given an average room dimension (d_a) and the speed of sound (c) we can
+ * calculate the average reflection delay (r_a) regardless of listener and
+ * source positions as:
+ *
+ * r_a = d_a / c
+ * c = 343.3
+ *
+ * This can extended to finding the average difference (r_d) between the
+ * maximum (r_1) and minimum (r_0) reflection delays:
+ *
+ * r_0 = 2 / 3 r_a
+ * = r_a - r_d / 2
+ * = r_d
+ * r_1 = 4 / 3 r_a
+ * = r_a + r_d / 2
+ * = 2 r_d
+ * r_d = 2 / 3 r_a
+ * = r_1 - r_0
+ *
+ * As can be determined by integrating the 1D model with a source (s) and
+ * listener (l) positioned across the dimension of length (d_a):
+ *
+ * r_d = int_(l=0)^d_a (int_(s=0)^d_a |2 d_a - 2 (l + s)| ds) dl / c
+ *
+ * The initial taps (T_(i=0)^N) are then specified by taking a power series
+ * that ranges between r_0 and half of r_1 less r_0:
+ *
+ * R_i = 2^(i / (2 N - 1)) r_d
+ * = r_0 + (2^(i / (2 N - 1)) - 1) r_d
+ * = r_0 + T_i
+ * T_i = R_i - r_0
+ * = (2^(i / (2 N - 1)) - 1) r_d
+ *
+ * Assuming an average of 1m, we get the following taps:
+ */
+constexpr std::array<ALfloat,NUM_LINES> EARLY_TAP_LENGTHS{{
+ 0.0000000e+0f, 2.0213520e-4f, 4.2531060e-4f, 6.7171600e-4f
+}};
+
+/* The early all-pass filter lengths are based on the early tap lengths:
+ *
+ * A_i = R_i / a
+ *
+ * Where a is the approximate maximum all-pass cycle limit (20).
+ */
+constexpr std::array<ALfloat,NUM_LINES> EARLY_ALLPASS_LENGTHS{{
+ 9.7096800e-5f, 1.0720356e-4f, 1.1836234e-4f, 1.3068260e-4f
+}};
+
+/* The early delay lines are used to transform the primary reflections into
+ * the secondary reflections. The A-format is arranged in such a way that
+ * the channels/lines are spatially opposite:
+ *
+ * C_i is opposite C_(N-i-1)
+ *
+ * The delays of the two opposing reflections (R_i and O_i) from a source
+ * anywhere along a particular dimension always sum to twice its full delay:
+ *
+ * 2 r_a = R_i + O_i
+ *
+ * With that in mind we can determine the delay between the two reflections
+ * and thus specify our early line lengths (L_(i=0)^N) using:
+ *
+ * O_i = 2 r_a - R_(N-i-1)
+ * L_i = O_i - R_(N-i-1)
+ * = 2 (r_a - R_(N-i-1))
+ * = 2 (r_a - T_(N-i-1) - r_0)
+ * = 2 r_a (1 - (2 / 3) 2^((N - i - 1) / (2 N - 1)))
+ *
+ * Using an average dimension of 1m, we get:
+ */
+constexpr std::array<ALfloat,NUM_LINES> EARLY_LINE_LENGTHS{{
+ 5.9850400e-4f, 1.0913150e-3f, 1.5376658e-3f, 1.9419362e-3f
+}};
+
+/* The late all-pass filter lengths are based on the late line lengths:
+ *
+ * A_i = (5 / 3) L_i / r_1
+ */
+constexpr std::array<ALfloat,NUM_LINES> LATE_ALLPASS_LENGTHS{{
+ 1.6182800e-4f, 2.0389060e-4f, 2.8159360e-4f, 3.2365600e-4f
+}};
+constexpr auto LATE_ALLPASS_LENGTHS_size = LATE_ALLPASS_LENGTHS.size();
+
+/* The late lines are used to approximate the decaying cycle of recursive
+ * late reflections.
+ *
+ * Splitting the lines in half, we start with the shortest reflection paths
+ * (L_(i=0)^(N/2)):
+ *
+ * L_i = 2^(i / (N - 1)) r_d
+ *
+ * Then for the opposite (longest) reflection paths (L_(i=N/2)^N):
+ *
+ * L_i = 2 r_a - L_(i-N/2)
+ * = 2 r_a - 2^((i - N / 2) / (N - 1)) r_d
+ *
+ * For our 1m average room, we get:
+ */
+constexpr std::array<ALfloat,NUM_LINES> LATE_LINE_LENGTHS{{
+ 1.9419362e-3f, 2.4466860e-3f, 3.3791220e-3f, 3.8838720e-3f
+}};
+constexpr auto LATE_LINE_LENGTHS_size = LATE_LINE_LENGTHS.size();
+
+
+struct DelayLineI {
+ /* The delay lines use interleaved samples, with the lengths being powers
+ * of 2 to allow the use of bit-masking instead of a modulus for wrapping.
+ */
+ ALsizei Mask{0};
+ ALfloat (*Line)[NUM_LINES]{nullptr};
+
+
+ void write(ALsizei offset, const ALsizei c, const ALfloat *RESTRICT in, const ALsizei count) const noexcept
+ {
+ ASSUME(count > 0);
+ for(ALsizei i{0};i < count;)
+ {
+ offset &= Mask;
+ ALsizei td{mini(Mask+1 - offset, count - i)};
+ do {
+ Line[offset++][c] = in[i++];
+ } while(--td);
+ }
+ }
+};
+
+struct VecAllpass {
+ DelayLineI Delay;
+ ALfloat Coeff{0.0f};
+ ALsizei Offset[NUM_LINES][2]{};
+
+ void processFaded(const al::span<FloatBufferLine,NUM_LINES> samples, ALsizei offset,
+ const ALfloat xCoeff, const ALfloat yCoeff, ALfloat fade, const ALsizei todo);
+ void processUnfaded(const al::span<FloatBufferLine,NUM_LINES> samples, ALsizei offset,
+ const ALfloat xCoeff, const ALfloat yCoeff, const ALsizei todo);
+};
+
+struct T60Filter {
+ /* Two filters are used to adjust the signal. One to control the low
+ * frequencies, and one to control the high frequencies.
+ */
+ ALfloat MidGain[2]{0.0f, 0.0f};
+ BiquadFilter HFFilter, LFFilter;
+
+ void calcCoeffs(const ALfloat length, const ALfloat lfDecayTime, const ALfloat mfDecayTime,
+ const ALfloat hfDecayTime, const ALfloat lf0norm, const ALfloat hf0norm);
+
+ /* Applies the two T60 damping filter sections. */
+ void process(ALfloat *samples, const ALsizei todo)
+ {
+ HFFilter.process(samples, samples, todo);
+ LFFilter.process(samples, samples, todo);
+ }
+};
+
+struct EarlyReflections {
+ /* A Gerzon vector all-pass filter is used to simulate initial diffusion.
+ * The spread from this filter also helps smooth out the reverb tail.
+ */
+ VecAllpass VecAp;
+
+ /* An echo line is used to complete the second half of the early
+ * reflections.
+ */
+ DelayLineI Delay;
+ ALsizei Offset[NUM_LINES][2]{};
+ ALfloat Coeff[NUM_LINES][2]{};
+
+ /* The gain for each output channel based on 3D panning. */
+ ALfloat CurrentGain[NUM_LINES][MAX_OUTPUT_CHANNELS]{};
+ ALfloat PanGain[NUM_LINES][MAX_OUTPUT_CHANNELS]{};
+
+ void updateLines(const ALfloat density, const ALfloat diffusion, const ALfloat decayTime,
+ const ALfloat frequency);
+};
+
+struct LateReverb {
+ /* A recursive delay line is used fill in the reverb tail. */
+ DelayLineI Delay;
+ ALsizei Offset[NUM_LINES][2]{};
+
+ /* Attenuation to compensate for the modal density and decay rate of the
+ * late lines.
+ */
+ ALfloat DensityGain[2]{0.0f, 0.0f};
+
+ /* T60 decay filters are used to simulate absorption. */
+ T60Filter T60[NUM_LINES];
+
+ /* A Gerzon vector all-pass filter is used to simulate diffusion. */
+ VecAllpass VecAp;
+
+ /* The gain for each output channel based on 3D panning. */
+ ALfloat CurrentGain[NUM_LINES][MAX_OUTPUT_CHANNELS]{};
+ ALfloat PanGain[NUM_LINES][MAX_OUTPUT_CHANNELS]{};
+
+ void updateLines(const ALfloat density, const ALfloat diffusion, const ALfloat lfDecayTime,
+ const ALfloat mfDecayTime, const ALfloat hfDecayTime, const ALfloat lf0norm,
+ const ALfloat hf0norm, const ALfloat frequency);
+};
+
+struct ReverbState final : public EffectState {
+ /* All delay lines are allocated as a single buffer to reduce memory
+ * fragmentation and management code.
+ */
+ al::vector<ALfloat,16> mSampleBuffer;
+
+ struct {
+ /* Calculated parameters which indicate if cross-fading is needed after
+ * an update.
+ */
+ ALfloat Density{AL_EAXREVERB_DEFAULT_DENSITY};
+ ALfloat Diffusion{AL_EAXREVERB_DEFAULT_DIFFUSION};
+ ALfloat DecayTime{AL_EAXREVERB_DEFAULT_DECAY_TIME};
+ ALfloat HFDecayTime{AL_EAXREVERB_DEFAULT_DECAY_HFRATIO * AL_EAXREVERB_DEFAULT_DECAY_TIME};
+ ALfloat LFDecayTime{AL_EAXREVERB_DEFAULT_DECAY_LFRATIO * AL_EAXREVERB_DEFAULT_DECAY_TIME};
+ ALfloat HFReference{AL_EAXREVERB_DEFAULT_HFREFERENCE};
+ ALfloat LFReference{AL_EAXREVERB_DEFAULT_LFREFERENCE};
+ } mParams;
+
+ /* Master effect filters */
+ struct {
+ BiquadFilter Lp;
+ BiquadFilter Hp;
+ } mFilter[NUM_LINES];
+
+ /* Core delay line (early reflections and late reverb tap from this). */
+ DelayLineI mDelay;
+
+ /* Tap points for early reflection delay. */
+ ALsizei mEarlyDelayTap[NUM_LINES][2]{};
+ ALfloat mEarlyDelayCoeff[NUM_LINES][2]{};
+
+ /* Tap points for late reverb feed and delay. */
+ ALsizei mLateFeedTap{};
+ ALsizei mLateDelayTap[NUM_LINES][2]{};
+
+ /* Coefficients for the all-pass and line scattering matrices. */
+ ALfloat mMixX{0.0f};
+ ALfloat mMixY{0.0f};
+
+ EarlyReflections mEarly;
+
+ LateReverb mLate;
+
+ /* Indicates the cross-fade point for delay line reads [0,FADE_SAMPLES]. */
+ ALsizei mFadeCount{0};
+
+ /* Maximum number of samples to process at once. */
+ ALsizei mMaxUpdate[2]{BUFFERSIZE, BUFFERSIZE};
+
+ /* The current write offset for all delay lines. */
+ ALsizei mOffset{0};
+
+ /* Temporary storage used when processing. */
+ alignas(16) std::array<FloatBufferLine,NUM_LINES> mTempSamples{};
+ alignas(16) std::array<FloatBufferLine,NUM_LINES> mEarlyBuffer{};
+ alignas(16) std::array<FloatBufferLine,NUM_LINES> mLateBuffer{};
+
+ using MixOutT = void (ReverbState::*)(const al::span<FloatBufferLine> samplesOut,
+ const ALsizei todo);
+
+ MixOutT mMixOut{&ReverbState::MixOutPlain};
+ std::array<ALfloat,MAX_AMBI_ORDER+1> mOrderScales{};
+ std::array<std::array<BandSplitter,NUM_LINES>,2> mAmbiSplitter;
+
+
+ void MixOutPlain(const al::span<FloatBufferLine> samplesOut, const ALsizei todo)
+ {
+ ASSUME(todo > 0);
+
+ /* Convert back to B-Format, and mix the results to output. */
+ for(ALsizei c{0};c < NUM_LINES;c++)
+ {
+ std::fill_n(mTempSamples[0].begin(), todo, 0.0f);
+ MixRowSamples(mTempSamples[0], A2B[c], mEarlyBuffer, 0, todo);
+ MixSamples(mTempSamples[0].data(), samplesOut, mEarly.CurrentGain[c],
+ mEarly.PanGain[c], todo, 0, todo);
+ }
+
+ for(ALsizei c{0};c < NUM_LINES;c++)
+ {
+ std::fill_n(mTempSamples[0].begin(), todo, 0.0f);
+ MixRowSamples(mTempSamples[0], A2B[c], mLateBuffer, 0, todo);
+ MixSamples(mTempSamples[0].data(), samplesOut, mLate.CurrentGain[c], mLate.PanGain[c],
+ todo, 0, todo);
+ }
+ }
+
+ void MixOutAmbiUp(const al::span<FloatBufferLine> samplesOut, const ALsizei todo)
+ {
+ ASSUME(todo > 0);
+
+ for(ALsizei c{0};c < NUM_LINES;c++)
+ {
+ std::fill_n(mTempSamples[0].begin(), todo, 0.0f);
+ MixRowSamples(mTempSamples[0], A2B[c], mEarlyBuffer, 0, todo);
+
+ /* Apply scaling to the B-Format's HF response to "upsample" it to
+ * higher-order output.
+ */
+ const ALfloat hfscale{(c==0) ? mOrderScales[0] : mOrderScales[1]};
+ mAmbiSplitter[0][c].applyHfScale(mTempSamples[0].data(), hfscale, todo);
+
+ MixSamples(mTempSamples[0].data(), samplesOut, mEarly.CurrentGain[c],
+ mEarly.PanGain[c], todo, 0, todo);
+ }
+
+ for(ALsizei c{0};c < NUM_LINES;c++)
+ {
+ std::fill_n(mTempSamples[0].begin(), todo, 0.0f);
+ MixRowSamples(mTempSamples[0], A2B[c], mLateBuffer, 0, todo);
+
+ const ALfloat hfscale{(c==0) ? mOrderScales[0] : mOrderScales[1]};
+ mAmbiSplitter[1][c].applyHfScale(mTempSamples[0].data(), hfscale, todo);
+
+ MixSamples(mTempSamples[0].data(), samplesOut, mLate.CurrentGain[c], mLate.PanGain[c],
+ todo, 0, todo);
+ }
+ }
+
+ bool allocLines(const ALfloat frequency);
+
+ void updateDelayLine(const ALfloat earlyDelay, const ALfloat lateDelay, const ALfloat density,
+ const ALfloat decayTime, const ALfloat frequency);
+ void update3DPanning(const ALfloat *ReflectionsPan, const ALfloat *LateReverbPan,
+ const ALfloat earlyGain, const ALfloat lateGain, const EffectTarget &target);
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ DEF_NEWDEL(ReverbState)
+};
+
+/**************************************
+ * Device Update *
+ **************************************/
+
+inline ALfloat CalcDelayLengthMult(ALfloat density)
+{ return maxf(5.0f, std::cbrt(density*DENSITY_SCALE)); }
+
+/* Given the allocated sample buffer, this function updates each delay line
+ * offset.
+ */
+inline ALvoid RealizeLineOffset(ALfloat *sampleBuffer, DelayLineI *Delay)
+{
+ union {
+ ALfloat *f;
+ ALfloat (*f4)[NUM_LINES];
+ } u;
+ u.f = &sampleBuffer[reinterpret_cast<ptrdiff_t>(Delay->Line) * NUM_LINES];
+ Delay->Line = u.f4;
+}
+
+/* Calculate the length of a delay line and store its mask and offset. */
+ALuint CalcLineLength(const ALfloat length, const ptrdiff_t offset, const ALfloat frequency,
+ const ALuint extra, DelayLineI *Delay)
+{
+ /* All line lengths are powers of 2, calculated from their lengths in
+ * seconds, rounded up.
+ */
+ auto samples = static_cast<ALuint>(float2int(std::ceil(length*frequency)));
+ samples = NextPowerOf2(samples + extra);
+
+ /* All lines share a single sample buffer. */
+ Delay->Mask = samples - 1;
+ Delay->Line = reinterpret_cast<ALfloat(*)[NUM_LINES]>(offset);
+
+ /* Return the sample count for accumulation. */
+ return samples;
+}
+
+/* Calculates the delay line metrics and allocates the shared sample buffer
+ * for all lines given the sample rate (frequency). If an allocation failure
+ * occurs, it returns AL_FALSE.
+ */
+bool ReverbState::allocLines(const ALfloat frequency)
+{
+ /* All delay line lengths are calculated to accomodate the full range of
+ * lengths given their respective paramters.
+ */
+ ALuint totalSamples{0u};
+
+ /* Multiplier for the maximum density value, i.e. density=1, which is
+ * actually the least density...
+ */
+ ALfloat multiplier{CalcDelayLengthMult(AL_EAXREVERB_MAX_DENSITY)};
+
+ /* The main delay length includes the maximum early reflection delay, the
+ * largest early tap width, the maximum late reverb delay, and the
+ * largest late tap width. Finally, it must also be extended by the
+ * update size (BUFFERSIZE) for block processing.
+ */
+ ALfloat length{AL_EAXREVERB_MAX_REFLECTIONS_DELAY + EARLY_TAP_LENGTHS.back()*multiplier +
+ AL_EAXREVERB_MAX_LATE_REVERB_DELAY +
+ (LATE_LINE_LENGTHS.back() - LATE_LINE_LENGTHS.front())/float{LATE_LINE_LENGTHS_size}*multiplier};
+ totalSamples += CalcLineLength(length, totalSamples, frequency, BUFFERSIZE, &mDelay);
+
+ /* The early vector all-pass line. */
+ length = EARLY_ALLPASS_LENGTHS.back() * multiplier;
+ totalSamples += CalcLineLength(length, totalSamples, frequency, 0, &mEarly.VecAp.Delay);
+
+ /* The early reflection line. */
+ length = EARLY_LINE_LENGTHS.back() * multiplier;
+ totalSamples += CalcLineLength(length, totalSamples, frequency, 0, &mEarly.Delay);
+
+ /* The late vector all-pass line. */
+ length = LATE_ALLPASS_LENGTHS.back() * multiplier;
+ totalSamples += CalcLineLength(length, totalSamples, frequency, 0, &mLate.VecAp.Delay);
+
+ /* The late delay lines are calculated from the largest maximum density
+ * line length.
+ */
+ length = LATE_LINE_LENGTHS.back() * multiplier;
+ totalSamples += CalcLineLength(length, totalSamples, frequency, 0, &mLate.Delay);
+
+ totalSamples *= NUM_LINES;
+ if(totalSamples != mSampleBuffer.size())
+ {
+ mSampleBuffer.resize(totalSamples);
+ mSampleBuffer.shrink_to_fit();
+ }
+
+ /* Clear the sample buffer. */
+ std::fill(mSampleBuffer.begin(), mSampleBuffer.end(), 0.0f);
+
+ /* Update all delays to reflect the new sample buffer. */
+ RealizeLineOffset(mSampleBuffer.data(), &mDelay);
+ RealizeLineOffset(mSampleBuffer.data(), &mEarly.VecAp.Delay);
+ RealizeLineOffset(mSampleBuffer.data(), &mEarly.Delay);
+ RealizeLineOffset(mSampleBuffer.data(), &mLate.VecAp.Delay);
+ RealizeLineOffset(mSampleBuffer.data(), &mLate.Delay);
+
+ return true;
+}
+
+ALboolean ReverbState::deviceUpdate(const ALCdevice *device)
+{
+ const auto frequency = static_cast<ALfloat>(device->Frequency);
+
+ /* Allocate the delay lines. */
+ if(!allocLines(frequency))
+ return AL_FALSE;
+
+ const ALfloat multiplier{CalcDelayLengthMult(AL_EAXREVERB_MAX_DENSITY)};
+
+ /* The late feed taps are set a fixed position past the latest delay tap. */
+ mLateFeedTap = float2int(
+ (AL_EAXREVERB_MAX_REFLECTIONS_DELAY + EARLY_TAP_LENGTHS.back()*multiplier) * frequency);
+
+ /* Clear filters and gain coefficients since the delay lines were all just
+ * cleared (if not reallocated).
+ */
+ for(auto &filter : mFilter)
+ {
+ filter.Lp.clear();
+ filter.Hp.clear();
+ }
+
+ for(auto &coeff : mEarlyDelayCoeff)
+ std::fill(std::begin(coeff), std::end(coeff), 0.0f);
+ for(auto &coeff : mEarly.Coeff)
+ std::fill(std::begin(coeff), std::end(coeff), 0.0f);
+
+ mLate.DensityGain[0] = 0.0f;
+ mLate.DensityGain[1] = 0.0f;
+ for(auto &t60 : mLate.T60)
+ {
+ t60.MidGain[0] = 0.0f;
+ t60.MidGain[1] = 0.0f;
+ t60.HFFilter.clear();
+ t60.LFFilter.clear();
+ }
+
+ for(auto &gains : mEarly.CurrentGain)
+ std::fill(std::begin(gains), std::end(gains), 0.0f);
+ for(auto &gains : mEarly.PanGain)
+ std::fill(std::begin(gains), std::end(gains), 0.0f);
+ for(auto &gains : mLate.CurrentGain)
+ std::fill(std::begin(gains), std::end(gains), 0.0f);
+ for(auto &gains : mLate.PanGain)
+ std::fill(std::begin(gains), std::end(gains), 0.0f);
+
+ /* Reset counters and offset base. */
+ mFadeCount = 0;
+ std::fill(std::begin(mMaxUpdate), std::end(mMaxUpdate), BUFFERSIZE);
+ mOffset = 0;
+
+ if(device->mAmbiOrder > 1)
+ {
+ mMixOut = &ReverbState::MixOutAmbiUp;
+ mOrderScales = BFormatDec::GetHFOrderScales(1, device->mAmbiOrder);
+ }
+ else
+ {
+ mMixOut = &ReverbState::MixOutPlain;
+ mOrderScales.fill(1.0f);
+ }
+ mAmbiSplitter[0][0].init(400.0f / frequency);
+ std::fill(mAmbiSplitter[0].begin()+1, mAmbiSplitter[0].end(), mAmbiSplitter[0][0]);
+ std::fill(mAmbiSplitter[1].begin(), mAmbiSplitter[1].end(), mAmbiSplitter[0][0]);
+
+ return AL_TRUE;
+}
+
+/**************************************
+ * Effect Update *
+ **************************************/
+
+/* Calculate a decay coefficient given the length of each cycle and the time
+ * until the decay reaches -60 dB.
+ */
+inline ALfloat CalcDecayCoeff(const ALfloat length, const ALfloat decayTime)
+{ return std::pow(REVERB_DECAY_GAIN, length/decayTime); }
+
+/* Calculate a decay length from a coefficient and the time until the decay
+ * reaches -60 dB.
+ */
+inline ALfloat CalcDecayLength(const ALfloat coeff, const ALfloat decayTime)
+{ return std::log10(coeff) * decayTime / std::log10(REVERB_DECAY_GAIN); }
+
+/* Calculate an attenuation to be applied to the input of any echo models to
+ * compensate for modal density and decay time.
+ */
+inline ALfloat CalcDensityGain(const ALfloat a)
+{
+ /* The energy of a signal can be obtained by finding the area under the
+ * squared signal. This takes the form of Sum(x_n^2), where x is the
+ * amplitude for the sample n.
+ *
+ * Decaying feedback matches exponential decay of the form Sum(a^n),
+ * where a is the attenuation coefficient, and n is the sample. The area
+ * under this decay curve can be calculated as: 1 / (1 - a).
+ *
+ * Modifying the above equation to find the area under the squared curve
+ * (for energy) yields: 1 / (1 - a^2). Input attenuation can then be
+ * calculated by inverting the square root of this approximation,
+ * yielding: 1 / sqrt(1 / (1 - a^2)), simplified to: sqrt(1 - a^2).
+ */
+ return std::sqrt(1.0f - a*a);
+}
+
+/* Calculate the scattering matrix coefficients given a diffusion factor. */
+inline ALvoid CalcMatrixCoeffs(const ALfloat diffusion, ALfloat *x, ALfloat *y)
+{
+ /* The matrix is of order 4, so n is sqrt(4 - 1). */
+ ALfloat n{std::sqrt(3.0f)};
+ ALfloat t{diffusion * std::atan(n)};
+
+ /* Calculate the first mixing matrix coefficient. */
+ *x = std::cos(t);
+ /* Calculate the second mixing matrix coefficient. */
+ *y = std::sin(t) / n;
+}
+
+/* Calculate the limited HF ratio for use with the late reverb low-pass
+ * filters.
+ */
+ALfloat CalcLimitedHfRatio(const ALfloat hfRatio, const ALfloat airAbsorptionGainHF,
+ const ALfloat decayTime, const ALfloat SpeedOfSound)
+{
+ /* Find the attenuation due to air absorption in dB (converting delay
+ * time to meters using the speed of sound). Then reversing the decay
+ * equation, solve for HF ratio. The delay length is cancelled out of
+ * the equation, so it can be calculated once for all lines.
+ */
+ ALfloat limitRatio{1.0f / (CalcDecayLength(airAbsorptionGainHF, decayTime) * SpeedOfSound)};
+
+ /* Using the limit calculated above, apply the upper bound to the HF ratio.
+ */
+ return minf(limitRatio, hfRatio);
+}
+
+
+/* Calculates the 3-band T60 damping coefficients for a particular delay line
+ * of specified length, using a combination of two shelf filter sections given
+ * decay times for each band split at two reference frequencies.
+ */
+void T60Filter::calcCoeffs(const ALfloat length, const ALfloat lfDecayTime,
+ const ALfloat mfDecayTime, const ALfloat hfDecayTime, const ALfloat lf0norm,
+ const ALfloat hf0norm)
+{
+ const ALfloat mfGain{CalcDecayCoeff(length, mfDecayTime)};
+ const ALfloat lfGain{maxf(CalcDecayCoeff(length, lfDecayTime)/mfGain, 0.001f)};
+ const ALfloat hfGain{maxf(CalcDecayCoeff(length, hfDecayTime)/mfGain, 0.001f)};
+
+ MidGain[1] = mfGain;
+ LFFilter.setParams(BiquadType::LowShelf, lfGain, lf0norm,
+ LFFilter.rcpQFromSlope(lfGain, 1.0f));
+ HFFilter.setParams(BiquadType::HighShelf, hfGain, hf0norm,
+ HFFilter.rcpQFromSlope(hfGain, 1.0f));
+}
+
+/* Update the early reflection line lengths and gain coefficients. */
+void EarlyReflections::updateLines(const ALfloat density, const ALfloat diffusion,
+ const ALfloat decayTime, const ALfloat frequency)
+{
+ const ALfloat multiplier{CalcDelayLengthMult(density)};
+
+ /* Calculate the all-pass feed-back/forward coefficient. */
+ VecAp.Coeff = std::sqrt(0.5f) * std::pow(diffusion, 2.0f);
+
+ for(ALsizei i{0};i < NUM_LINES;i++)
+ {
+ /* Calculate the length (in seconds) of each all-pass line. */
+ ALfloat length{EARLY_ALLPASS_LENGTHS[i] * multiplier};
+
+ /* Calculate the delay offset for each all-pass line. */
+ VecAp.Offset[i][1] = float2int(length * frequency);
+
+ /* Calculate the length (in seconds) of each delay line. */
+ length = EARLY_LINE_LENGTHS[i] * multiplier;
+
+ /* Calculate the delay offset for each delay line. */
+ Offset[i][1] = float2int(length * frequency);
+
+ /* Calculate the gain (coefficient) for each line. */
+ Coeff[i][1] = CalcDecayCoeff(length, decayTime);
+ }
+}
+
+/* Update the late reverb line lengths and T60 coefficients. */
+void LateReverb::updateLines(const ALfloat density, const ALfloat diffusion,
+ const ALfloat lfDecayTime, const ALfloat mfDecayTime, const ALfloat hfDecayTime,
+ const ALfloat lf0norm, const ALfloat hf0norm, const ALfloat frequency)
+{
+ /* Scaling factor to convert the normalized reference frequencies from
+ * representing 0...freq to 0...max_reference.
+ */
+ const ALfloat norm_weight_factor{frequency / AL_EAXREVERB_MAX_HFREFERENCE};
+
+ const ALfloat late_allpass_avg{
+ std::accumulate(LATE_ALLPASS_LENGTHS.begin(), LATE_ALLPASS_LENGTHS.end(), 0.0f) /
+ float{LATE_ALLPASS_LENGTHS_size}};
+
+ /* To compensate for changes in modal density and decay time of the late
+ * reverb signal, the input is attenuated based on the maximal energy of
+ * the outgoing signal. This approximation is used to keep the apparent
+ * energy of the signal equal for all ranges of density and decay time.
+ *
+ * The average length of the delay lines is used to calculate the
+ * attenuation coefficient.
+ */
+ const ALfloat multiplier{CalcDelayLengthMult(density)};
+ ALfloat length{std::accumulate(LATE_LINE_LENGTHS.begin(), LATE_LINE_LENGTHS.end(), 0.0f) /
+ float{LATE_LINE_LENGTHS_size} * multiplier};
+ length += late_allpass_avg * multiplier;
+ /* The density gain calculation uses an average decay time weighted by
+ * approximate bandwidth. This attempts to compensate for losses of energy
+ * that reduce decay time due to scattering into highly attenuated bands.
+ */
+ const ALfloat bandWeights[3]{
+ lf0norm*norm_weight_factor,
+ hf0norm*norm_weight_factor - lf0norm*norm_weight_factor,
+ 1.0f - hf0norm*norm_weight_factor};
+ DensityGain[1] = CalcDensityGain(
+ CalcDecayCoeff(length,
+ bandWeights[0]*lfDecayTime + bandWeights[1]*mfDecayTime + bandWeights[2]*hfDecayTime
+ )
+ );
+
+ /* Calculate the all-pass feed-back/forward coefficient. */
+ VecAp.Coeff = std::sqrt(0.5f) * std::pow(diffusion, 2.0f);
+
+ for(ALsizei i{0};i < NUM_LINES;i++)
+ {
+ /* Calculate the length (in seconds) of each all-pass line. */
+ length = LATE_ALLPASS_LENGTHS[i] * multiplier;
+
+ /* Calculate the delay offset for each all-pass line. */
+ VecAp.Offset[i][1] = float2int(length * frequency);
+
+ /* Calculate the length (in seconds) of each delay line. */
+ length = LATE_LINE_LENGTHS[i] * multiplier;
+
+ /* Calculate the delay offset for each delay line. */
+ Offset[i][1] = float2int(length*frequency + 0.5f);
+
+ /* Approximate the absorption that the vector all-pass would exhibit
+ * given the current diffusion so we don't have to process a full T60
+ * filter for each of its four lines.
+ */
+ length += lerp(LATE_ALLPASS_LENGTHS[i], late_allpass_avg, diffusion) * multiplier;
+
+ /* Calculate the T60 damping coefficients for each line. */
+ T60[i].calcCoeffs(length, lfDecayTime, mfDecayTime, hfDecayTime, lf0norm, hf0norm);
+ }
+}
+
+
+/* Update the offsets for the main effect delay line. */
+void ReverbState::updateDelayLine(const ALfloat earlyDelay, const ALfloat lateDelay,
+ const ALfloat density, const ALfloat decayTime, const ALfloat frequency)
+{
+ const ALfloat multiplier{CalcDelayLengthMult(density)};
+
+ /* Early reflection taps are decorrelated by means of an average room
+ * reflection approximation described above the definition of the taps.
+ * This approximation is linear and so the above density multiplier can
+ * be applied to adjust the width of the taps. A single-band decay
+ * coefficient is applied to simulate initial attenuation and absorption.
+ *
+ * Late reverb taps are based on the late line lengths to allow a zero-
+ * delay path and offsets that would continue the propagation naturally
+ * into the late lines.
+ */
+ for(ALsizei i{0};i < NUM_LINES;i++)
+ {
+ ALfloat length{earlyDelay + EARLY_TAP_LENGTHS[i]*multiplier};
+ mEarlyDelayTap[i][1] = float2int(length * frequency);
+
+ length = EARLY_TAP_LENGTHS[i]*multiplier;
+ mEarlyDelayCoeff[i][1] = CalcDecayCoeff(length, decayTime);
+
+ length = lateDelay + (LATE_LINE_LENGTHS[i] - LATE_LINE_LENGTHS.front()) /
+ float{LATE_LINE_LENGTHS_size} * multiplier;
+ mLateDelayTap[i][1] = mLateFeedTap + float2int(length * frequency);
+ }
+}
+
+/* Creates a transform matrix given a reverb vector. The vector pans the reverb
+ * reflections toward the given direction, using its magnitude (up to 1) as a
+ * focal strength. This function results in a B-Format transformation matrix
+ * that spatially focuses the signal in the desired direction.
+ */
+alu::Matrix GetTransformFromVector(const ALfloat *vec)
+{
+ /* Normalize the panning vector according to the N3D scale, which has an
+ * extra sqrt(3) term on the directional components. Converting from OpenAL
+ * to B-Format also requires negating X (ACN 1) and Z (ACN 3). Note however
+ * that the reverb panning vectors use left-handed coordinates, unlike the
+ * rest of OpenAL which use right-handed. This is fixed by negating Z,
+ * which cancels out with the B-Format Z negation.
+ */
+ ALfloat norm[3];
+ ALfloat mag{std::sqrt(vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2])};
+ if(mag > 1.0f)
+ {
+ norm[0] = vec[0] / mag * -al::MathDefs<float>::Sqrt3();
+ norm[1] = vec[1] / mag * al::MathDefs<float>::Sqrt3();
+ norm[2] = vec[2] / mag * al::MathDefs<float>::Sqrt3();
+ mag = 1.0f;
+ }
+ else
+ {
+ /* If the magnitude is less than or equal to 1, just apply the sqrt(3)
+ * term. There's no need to renormalize the magnitude since it would
+ * just be reapplied in the matrix.
+ */
+ norm[0] = vec[0] * -al::MathDefs<float>::Sqrt3();
+ norm[1] = vec[1] * al::MathDefs<float>::Sqrt3();
+ norm[2] = vec[2] * al::MathDefs<float>::Sqrt3();
+ }
+
+ return alu::Matrix{
+ 1.0f, 0.0f, 0.0f, 0.0f,
+ norm[0], 1.0f-mag, 0.0f, 0.0f,
+ norm[1], 0.0f, 1.0f-mag, 0.0f,
+ norm[2], 0.0f, 0.0f, 1.0f-mag
+ };
+}
+
+/* Update the early and late 3D panning gains. */
+void ReverbState::update3DPanning(const ALfloat *ReflectionsPan, const ALfloat *LateReverbPan,
+ const ALfloat earlyGain, const ALfloat lateGain, const EffectTarget &target)
+{
+ /* Create matrices that transform a B-Format signal according to the
+ * panning vectors.
+ */
+ const alu::Matrix earlymat{GetTransformFromVector(ReflectionsPan)};
+ const alu::Matrix latemat{GetTransformFromVector(LateReverbPan)};
+
+ mOutTarget = target.Main->Buffer;
+ for(ALsizei i{0};i < NUM_LINES;i++)
+ {
+ const ALfloat coeffs[MAX_AMBI_CHANNELS]{earlymat[0][i], earlymat[1][i], earlymat[2][i],
+ earlymat[3][i]};
+ ComputePanGains(target.Main, coeffs, earlyGain, mEarly.PanGain[i]);
+ }
+ for(ALsizei i{0};i < NUM_LINES;i++)
+ {
+ const ALfloat coeffs[MAX_AMBI_CHANNELS]{latemat[0][i], latemat[1][i], latemat[2][i],
+ latemat[3][i]};
+ ComputePanGains(target.Main, coeffs, lateGain, mLate.PanGain[i]);
+ }
+}
+
+void ReverbState::update(const ALCcontext *Context, const ALeffectslot *Slot, const EffectProps *props, const EffectTarget target)
+{
+ const ALCdevice *Device{Context->Device};
+ const ALlistener &Listener = Context->Listener;
+ const auto frequency = static_cast<ALfloat>(Device->Frequency);
+
+ /* Calculate the master filters */
+ ALfloat hf0norm{minf(props->Reverb.HFReference / frequency, 0.49f)};
+ /* Restrict the filter gains from going below -60dB to keep the filter from
+ * killing most of the signal.
+ */
+ ALfloat gainhf{maxf(props->Reverb.GainHF, 0.001f)};
+ mFilter[0].Lp.setParams(BiquadType::HighShelf, gainhf, hf0norm,
+ mFilter[0].Lp.rcpQFromSlope(gainhf, 1.0f));
+ ALfloat lf0norm{minf(props->Reverb.LFReference / frequency, 0.49f)};
+ ALfloat gainlf{maxf(props->Reverb.GainLF, 0.001f)};
+ mFilter[0].Hp.setParams(BiquadType::LowShelf, gainlf, lf0norm,
+ mFilter[0].Hp.rcpQFromSlope(gainlf, 1.0f));
+ for(ALsizei i{1};i < NUM_LINES;i++)
+ {
+ mFilter[i].Lp.copyParamsFrom(mFilter[0].Lp);
+ mFilter[i].Hp.copyParamsFrom(mFilter[0].Hp);
+ }
+
+ /* Update the main effect delay and associated taps. */
+ updateDelayLine(props->Reverb.ReflectionsDelay, props->Reverb.LateReverbDelay,
+ props->Reverb.Density, props->Reverb.DecayTime, frequency);
+
+ /* Update the early lines. */
+ mEarly.updateLines(props->Reverb.Density, props->Reverb.Diffusion, props->Reverb.DecayTime,
+ frequency);
+
+ /* Get the mixing matrix coefficients. */
+ CalcMatrixCoeffs(props->Reverb.Diffusion, &mMixX, &mMixY);
+
+ /* If the HF limit parameter is flagged, calculate an appropriate limit
+ * based on the air absorption parameter.
+ */
+ ALfloat hfRatio{props->Reverb.DecayHFRatio};
+ if(props->Reverb.DecayHFLimit && props->Reverb.AirAbsorptionGainHF < 1.0f)
+ hfRatio = CalcLimitedHfRatio(hfRatio, props->Reverb.AirAbsorptionGainHF,
+ props->Reverb.DecayTime, Listener.Params.ReverbSpeedOfSound
+ );
+
+ /* Calculate the LF/HF decay times. */
+ const ALfloat lfDecayTime{clampf(props->Reverb.DecayTime * props->Reverb.DecayLFRatio,
+ AL_EAXREVERB_MIN_DECAY_TIME, AL_EAXREVERB_MAX_DECAY_TIME)};
+ const ALfloat hfDecayTime{clampf(props->Reverb.DecayTime * hfRatio,
+ AL_EAXREVERB_MIN_DECAY_TIME, AL_EAXREVERB_MAX_DECAY_TIME)};
+
+ /* Update the late lines. */
+ mLate.updateLines(props->Reverb.Density, props->Reverb.Diffusion, lfDecayTime,
+ props->Reverb.DecayTime, hfDecayTime, lf0norm, hf0norm, frequency);
+
+ /* Update early and late 3D panning. */
+ const ALfloat gain{props->Reverb.Gain * Slot->Params.Gain * ReverbBoost};
+ update3DPanning(props->Reverb.ReflectionsPan, props->Reverb.LateReverbPan,
+ props->Reverb.ReflectionsGain*gain, props->Reverb.LateReverbGain*gain, target);
+
+ /* Calculate the max update size from the smallest relevant delay. */
+ mMaxUpdate[1] = mini(BUFFERSIZE, mini(mEarly.Offset[0][1], mLate.Offset[0][1]));
+
+ /* Determine if delay-line cross-fading is required. Density is essentially
+ * a master control for the feedback delays, so changes the offsets of many
+ * delay lines.
+ */
+ if(mParams.Density != props->Reverb.Density ||
+ /* Diffusion and decay times influences the decay rate (gain) of the
+ * late reverb T60 filter.
+ */
+ mParams.Diffusion != props->Reverb.Diffusion ||
+ mParams.DecayTime != props->Reverb.DecayTime ||
+ mParams.HFDecayTime != hfDecayTime ||
+ mParams.LFDecayTime != lfDecayTime ||
+ /* HF/LF References control the weighting used to calculate the density
+ * gain.
+ */
+ mParams.HFReference != props->Reverb.HFReference ||
+ mParams.LFReference != props->Reverb.LFReference)
+ mFadeCount = 0;
+ mParams.Density = props->Reverb.Density;
+ mParams.Diffusion = props->Reverb.Diffusion;
+ mParams.DecayTime = props->Reverb.DecayTime;
+ mParams.HFDecayTime = hfDecayTime;
+ mParams.LFDecayTime = lfDecayTime;
+ mParams.HFReference = props->Reverb.HFReference;
+ mParams.LFReference = props->Reverb.LFReference;
+}
+
+
+/**************************************
+ * Effect Processing *
+ **************************************/
+
+/* Applies a scattering matrix to the 4-line (vector) input. This is used
+ * for both the below vector all-pass model and to perform modal feed-back
+ * delay network (FDN) mixing.
+ *
+ * The matrix is derived from a skew-symmetric matrix to form a 4D rotation
+ * matrix with a single unitary rotational parameter:
+ *
+ * [ d, a, b, c ] 1 = a^2 + b^2 + c^2 + d^2
+ * [ -a, d, c, -b ]
+ * [ -b, -c, d, a ]
+ * [ -c, b, -a, d ]
+ *
+ * The rotation is constructed from the effect's diffusion parameter,
+ * yielding:
+ *
+ * 1 = x^2 + 3 y^2
+ *
+ * Where a, b, and c are the coefficient y with differing signs, and d is the
+ * coefficient x. The final matrix is thus:
+ *
+ * [ x, y, -y, y ] n = sqrt(matrix_order - 1)
+ * [ -y, x, y, y ] t = diffusion_parameter * atan(n)
+ * [ y, -y, x, y ] x = cos(t)
+ * [ -y, -y, -y, x ] y = sin(t) / n
+ *
+ * Any square orthogonal matrix with an order that is a power of two will
+ * work (where ^T is transpose, ^-1 is inverse):
+ *
+ * M^T = M^-1
+ *
+ * Using that knowledge, finding an appropriate matrix can be accomplished
+ * naively by searching all combinations of:
+ *
+ * M = D + S - S^T
+ *
+ * Where D is a diagonal matrix (of x), and S is a triangular matrix (of y)
+ * whose combination of signs are being iterated.
+ */
+inline void VectorPartialScatter(ALfloat *RESTRICT out, const ALfloat *RESTRICT in,
+ const ALfloat xCoeff, const ALfloat yCoeff)
+{
+ out[0] = xCoeff*in[0] + yCoeff*( in[1] + -in[2] + in[3]);
+ out[1] = xCoeff*in[1] + yCoeff*(-in[0] + in[2] + in[3]);
+ out[2] = xCoeff*in[2] + yCoeff*( in[0] + -in[1] + in[3]);
+ out[3] = xCoeff*in[3] + yCoeff*(-in[0] + -in[1] + -in[2] );
+}
+
+/* Utilizes the above, but reverses the input channels. */
+void VectorScatterRevDelayIn(const DelayLineI delay, ALint offset, const ALfloat xCoeff,
+ const ALfloat yCoeff, const ALsizei base, const al::span<const FloatBufferLine,NUM_LINES> in,
+ const ALsizei count)
+{
+ ASSUME(base >= 0);
+ ASSUME(count > 0);
+
+ for(ALsizei i{0};i < count;)
+ {
+ offset &= delay.Mask;
+ ALsizei td{mini(delay.Mask+1 - offset, count-i)};
+ do {
+ ALfloat f[NUM_LINES];
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ f[NUM_LINES-1-j] = in[j][base+i];
+ ++i;
+
+ VectorPartialScatter(delay.Line[offset++], f, xCoeff, yCoeff);
+ } while(--td);
+ }
+}
+
+/* This applies a Gerzon multiple-in/multiple-out (MIMO) vector all-pass
+ * filter to the 4-line input.
+ *
+ * It works by vectorizing a regular all-pass filter and replacing the delay
+ * element with a scattering matrix (like the one above) and a diagonal
+ * matrix of delay elements.
+ *
+ * Two static specializations are used for transitional (cross-faded) delay
+ * line processing and non-transitional processing.
+ */
+void VecAllpass::processUnfaded(const al::span<FloatBufferLine,NUM_LINES> samples, ALsizei offset,
+ const ALfloat xCoeff, const ALfloat yCoeff, const ALsizei todo)
+{
+ const DelayLineI delay{Delay};
+ const ALfloat feedCoeff{Coeff};
+
+ ASSUME(todo > 0);
+
+ ALsizei vap_offset[NUM_LINES];
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ vap_offset[j] = offset - Offset[j][0];
+ for(ALsizei i{0};i < todo;)
+ {
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ vap_offset[j] &= delay.Mask;
+ offset &= delay.Mask;
+
+ ALsizei maxoff{offset};
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ maxoff = maxi(maxoff, vap_offset[j]);
+ ALsizei td{mini(delay.Mask+1 - maxoff, todo - i)};
+
+ do {
+ ALfloat f[NUM_LINES];
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ const ALfloat input{samples[j][i]};
+ const ALfloat out{delay.Line[vap_offset[j]++][j] - feedCoeff*input};
+ f[j] = input + feedCoeff*out;
+
+ samples[j][i] = out;
+ }
+ ++i;
+
+ VectorPartialScatter(delay.Line[offset++], f, xCoeff, yCoeff);
+ } while(--td);
+ }
+}
+void VecAllpass::processFaded(const al::span<FloatBufferLine,NUM_LINES> samples, ALsizei offset,
+ const ALfloat xCoeff, const ALfloat yCoeff, ALfloat fade, const ALsizei todo)
+{
+ const DelayLineI delay{Delay};
+ const ALfloat feedCoeff{Coeff};
+
+ ASSUME(todo > 0);
+
+ fade *= 1.0f/FADE_SAMPLES;
+ ALsizei vap_offset[NUM_LINES][2];
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ vap_offset[j][0] = offset - Offset[j][0];
+ vap_offset[j][1] = offset - Offset[j][1];
+ }
+ for(ALsizei i{0};i < todo;)
+ {
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ vap_offset[j][0] &= delay.Mask;
+ vap_offset[j][1] &= delay.Mask;
+ }
+ offset &= delay.Mask;
+
+ ALsizei maxoff{offset};
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ maxoff = maxi(maxoff, maxi(vap_offset[j][0], vap_offset[j][1]));
+ ALsizei td{mini(delay.Mask+1 - maxoff, todo - i)};
+
+ do {
+ fade += FadeStep;
+ ALfloat f[NUM_LINES];
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ f[j] = delay.Line[vap_offset[j][0]++][j]*(1.0f-fade) +
+ delay.Line[vap_offset[j][1]++][j]*fade;
+
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ const ALfloat input{samples[j][i]};
+ const ALfloat out{f[j] - feedCoeff*input};
+ f[j] = input + feedCoeff*out;
+
+ samples[j][i] = out;
+ }
+ ++i;
+
+ VectorPartialScatter(delay.Line[offset++], f, xCoeff, yCoeff);
+ } while(--td);
+ }
+}
+
+/* This generates early reflections.
+ *
+ * This is done by obtaining the primary reflections (those arriving from the
+ * same direction as the source) from the main delay line. These are
+ * attenuated and all-pass filtered (based on the diffusion parameter).
+ *
+ * The early lines are then fed in reverse (according to the approximately
+ * opposite spatial location of the A-Format lines) to create the secondary
+ * reflections (those arriving from the opposite direction as the source).
+ *
+ * The early response is then completed by combining the primary reflections
+ * with the delayed and attenuated output from the early lines.
+ *
+ * Finally, the early response is reversed, scattered (based on diffusion),
+ * and fed into the late reverb section of the main delay line.
+ *
+ * Two static specializations are used for transitional (cross-faded) delay
+ * line processing and non-transitional processing.
+ */
+void EarlyReflection_Unfaded(ReverbState *State, const ALsizei offset, const ALsizei todo,
+ const ALsizei base, const al::span<FloatBufferLine,NUM_LINES> out)
+{
+ const al::span<FloatBufferLine,NUM_LINES> temps{State->mTempSamples};
+ const DelayLineI early_delay{State->mEarly.Delay};
+ const DelayLineI main_delay{State->mDelay};
+ const ALfloat mixX{State->mMixX};
+ const ALfloat mixY{State->mMixY};
+
+ ASSUME(todo > 0);
+
+ /* First, load decorrelated samples from the main delay line as the primary
+ * reflections.
+ */
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ ALsizei early_delay_tap{offset - State->mEarlyDelayTap[j][0]};
+ const ALfloat coeff{State->mEarlyDelayCoeff[j][0]};
+ for(ALsizei i{0};i < todo;)
+ {
+ early_delay_tap &= main_delay.Mask;
+ ALsizei td{mini(main_delay.Mask+1 - early_delay_tap, todo - i)};
+ do {
+ temps[j][i++] = main_delay.Line[early_delay_tap++][j] * coeff;
+ } while(--td);
+ }
+ }
+
+ /* Apply a vector all-pass, to help color the initial reflections based on
+ * the diffusion strength.
+ */
+ State->mEarly.VecAp.processUnfaded(temps, offset, mixX, mixY, todo);
+
+ /* Apply a delay and bounce to generate secondary reflections, combine with
+ * the primary reflections and write out the result for mixing.
+ */
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ ALint feedb_tap{offset - State->mEarly.Offset[j][0]};
+ const ALfloat feedb_coeff{State->mEarly.Coeff[j][0]};
+
+ ASSUME(base >= 0);
+ for(ALsizei i{0};i < todo;)
+ {
+ feedb_tap &= early_delay.Mask;
+ ALsizei td{mini(early_delay.Mask+1 - feedb_tap, todo - i)};
+ do {
+ out[j][base+i] = temps[j][i] + early_delay.Line[feedb_tap++][j]*feedb_coeff;
+ ++i;
+ } while(--td);
+ }
+ }
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ early_delay.write(offset, NUM_LINES-1-j, temps[j].data(), todo);
+
+ /* Also write the result back to the main delay line for the late reverb
+ * stage to pick up at the appropriate time, appplying a scatter and
+ * bounce to improve the initial diffusion in the late reverb.
+ */
+ const ALsizei late_feed_tap{offset - State->mLateFeedTap};
+ VectorScatterRevDelayIn(main_delay, late_feed_tap, mixX, mixY, base,
+ {out.cbegin(), out.cend()}, todo);
+}
+void EarlyReflection_Faded(ReverbState *State, const ALsizei offset, const ALsizei todo,
+ const ALfloat fade, const ALsizei base, const al::span<FloatBufferLine,NUM_LINES> out)
+{
+ const al::span<FloatBufferLine,NUM_LINES> temps{State->mTempSamples};
+ const DelayLineI early_delay{State->mEarly.Delay};
+ const DelayLineI main_delay{State->mDelay};
+ const ALfloat mixX{State->mMixX};
+ const ALfloat mixY{State->mMixY};
+
+ ASSUME(todo > 0);
+
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ ALsizei early_delay_tap0{offset - State->mEarlyDelayTap[j][0]};
+ ALsizei early_delay_tap1{offset - State->mEarlyDelayTap[j][1]};
+ const ALfloat oldCoeff{State->mEarlyDelayCoeff[j][0]};
+ const ALfloat oldCoeffStep{-oldCoeff / FADE_SAMPLES};
+ const ALfloat newCoeffStep{State->mEarlyDelayCoeff[j][1] / FADE_SAMPLES};
+ ALfloat fadeCount{fade};
+
+ for(ALsizei i{0};i < todo;)
+ {
+ early_delay_tap0 &= main_delay.Mask;
+ early_delay_tap1 &= main_delay.Mask;
+ ALsizei td{mini(main_delay.Mask+1 - maxi(early_delay_tap0, early_delay_tap1), todo-i)};
+ do {
+ fadeCount += 1.0f;
+ const ALfloat fade0{oldCoeff + oldCoeffStep*fadeCount};
+ const ALfloat fade1{newCoeffStep*fadeCount};
+ temps[j][i++] =
+ main_delay.Line[early_delay_tap0++][j]*fade0 +
+ main_delay.Line[early_delay_tap1++][j]*fade1;
+ } while(--td);
+ }
+ }
+
+ State->mEarly.VecAp.processFaded(temps, offset, mixX, mixY, fade, todo);
+
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ ALint feedb_tap0{offset - State->mEarly.Offset[j][0]};
+ ALint feedb_tap1{offset - State->mEarly.Offset[j][1]};
+ const ALfloat feedb_oldCoeff{State->mEarly.Coeff[j][0]};
+ const ALfloat feedb_oldCoeffStep{-feedb_oldCoeff / FADE_SAMPLES};
+ const ALfloat feedb_newCoeffStep{State->mEarly.Coeff[j][1] / FADE_SAMPLES};
+ ALfloat fadeCount{fade};
+
+ ASSUME(base >= 0);
+ for(ALsizei i{0};i < todo;)
+ {
+ feedb_tap0 &= early_delay.Mask;
+ feedb_tap1 &= early_delay.Mask;
+ ALsizei td{mini(early_delay.Mask+1 - maxi(feedb_tap0, feedb_tap1), todo - i)};
+
+ do {
+ fadeCount += 1.0f;
+ const ALfloat fade0{feedb_oldCoeff + feedb_oldCoeffStep*fadeCount};
+ const ALfloat fade1{feedb_newCoeffStep*fadeCount};
+ out[j][base+i] = temps[j][i] +
+ early_delay.Line[feedb_tap0++][j]*fade0 +
+ early_delay.Line[feedb_tap1++][j]*fade1;
+ ++i;
+ } while(--td);
+ }
+ }
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ early_delay.write(offset, NUM_LINES-1-j, temps[j].data(), todo);
+
+ const ALsizei late_feed_tap{offset - State->mLateFeedTap};
+ VectorScatterRevDelayIn(main_delay, late_feed_tap, mixX, mixY, base,
+ {out.cbegin(), out.cend()}, todo);
+}
+
+/* This generates the reverb tail using a modified feed-back delay network
+ * (FDN).
+ *
+ * Results from the early reflections are mixed with the output from the late
+ * delay lines.
+ *
+ * The late response is then completed by T60 and all-pass filtering the mix.
+ *
+ * Finally, the lines are reversed (so they feed their opposite directions)
+ * and scattered with the FDN matrix before re-feeding the delay lines.
+ *
+ * Two variations are made, one for for transitional (cross-faded) delay line
+ * processing and one for non-transitional processing.
+ */
+void LateReverb_Unfaded(ReverbState *State, const ALsizei offset, const ALsizei todo,
+ const ALsizei base, const al::span<FloatBufferLine,NUM_LINES> out)
+{
+ const al::span<FloatBufferLine,NUM_LINES> temps{State->mTempSamples};
+ const DelayLineI late_delay{State->mLate.Delay};
+ const DelayLineI main_delay{State->mDelay};
+ const ALfloat mixX{State->mMixX};
+ const ALfloat mixY{State->mMixY};
+
+ ASSUME(todo > 0);
+
+ /* First, load decorrelated samples from the main and feedback delay lines.
+ * Filter the signal to apply its frequency-dependent decay.
+ */
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ ALsizei late_delay_tap{offset - State->mLateDelayTap[j][0]};
+ ALsizei late_feedb_tap{offset - State->mLate.Offset[j][0]};
+ const ALfloat midGain{State->mLate.T60[j].MidGain[0]};
+ const ALfloat densityGain{State->mLate.DensityGain[0] * midGain};
+ for(ALsizei i{0};i < todo;)
+ {
+ late_delay_tap &= main_delay.Mask;
+ late_feedb_tap &= late_delay.Mask;
+ ALsizei td{mini(
+ mini(main_delay.Mask+1 - late_delay_tap, late_delay.Mask+1 - late_feedb_tap),
+ todo - i)};
+ do {
+ temps[j][i++] =
+ main_delay.Line[late_delay_tap++][j]*densityGain +
+ late_delay.Line[late_feedb_tap++][j]*midGain;
+ } while(--td);
+ }
+ State->mLate.T60[j].process(temps[j].data(), todo);
+ }
+
+ /* Apply a vector all-pass to improve micro-surface diffusion, and write
+ * out the results for mixing.
+ */
+ State->mLate.VecAp.processUnfaded(temps, offset, mixX, mixY, todo);
+
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ std::copy_n(temps[j].begin(), todo, out[j].begin()+base);
+
+ /* Finally, scatter and bounce the results to refeed the feedback buffer. */
+ VectorScatterRevDelayIn(late_delay, offset, mixX, mixY, base,
+ {out.cbegin(), out.cend()}, todo);
+}
+void LateReverb_Faded(ReverbState *State, const ALsizei offset, const ALsizei todo,
+ const ALfloat fade, const ALsizei base, const al::span<FloatBufferLine,NUM_LINES> out)
+{
+ const al::span<FloatBufferLine,NUM_LINES> temps{State->mTempSamples};
+ const DelayLineI late_delay{State->mLate.Delay};
+ const DelayLineI main_delay{State->mDelay};
+ const ALfloat mixX{State->mMixX};
+ const ALfloat mixY{State->mMixY};
+
+ ASSUME(todo > 0);
+
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ {
+ const ALfloat oldMidGain{State->mLate.T60[j].MidGain[0]};
+ const ALfloat midGain{State->mLate.T60[j].MidGain[1]};
+ const ALfloat oldMidStep{-oldMidGain / FADE_SAMPLES};
+ const ALfloat midStep{midGain / FADE_SAMPLES};
+ const ALfloat oldDensityGain{State->mLate.DensityGain[0] * oldMidGain};
+ const ALfloat densityGain{State->mLate.DensityGain[1] * midGain};
+ const ALfloat oldDensityStep{-oldDensityGain / FADE_SAMPLES};
+ const ALfloat densityStep{densityGain / FADE_SAMPLES};
+ ALsizei late_delay_tap0{offset - State->mLateDelayTap[j][0]};
+ ALsizei late_delay_tap1{offset - State->mLateDelayTap[j][1]};
+ ALsizei late_feedb_tap0{offset - State->mLate.Offset[j][0]};
+ ALsizei late_feedb_tap1{offset - State->mLate.Offset[j][1]};
+ ALfloat fadeCount{fade};
+
+ for(ALsizei i{0};i < todo;)
+ {
+ late_delay_tap0 &= main_delay.Mask;
+ late_delay_tap1 &= main_delay.Mask;
+ late_feedb_tap0 &= late_delay.Mask;
+ late_feedb_tap1 &= late_delay.Mask;
+ ALsizei td{mini(
+ mini(main_delay.Mask+1 - maxi(late_delay_tap0, late_delay_tap1),
+ late_delay.Mask+1 - maxi(late_feedb_tap0, late_feedb_tap1)),
+ todo - i)};
+ do {
+ fadeCount += 1.0f;
+ const ALfloat fade0{oldDensityGain + oldDensityStep*fadeCount};
+ const ALfloat fade1{densityStep*fadeCount};
+ const ALfloat gfade0{oldMidGain + oldMidStep*fadeCount};
+ const ALfloat gfade1{midStep*fadeCount};
+ temps[j][i++] =
+ main_delay.Line[late_delay_tap0++][j]*fade0 +
+ main_delay.Line[late_delay_tap1++][j]*fade1 +
+ late_delay.Line[late_feedb_tap0++][j]*gfade0 +
+ late_delay.Line[late_feedb_tap1++][j]*gfade1;
+ } while(--td);
+ }
+ State->mLate.T60[j].process(temps[j].data(), todo);
+ }
+
+ State->mLate.VecAp.processFaded(temps, offset, mixX, mixY, fade, todo);
+
+ for(ALsizei j{0};j < NUM_LINES;j++)
+ std::copy_n(temps[j].begin(), todo, out[j].begin()+base);
+
+ VectorScatterRevDelayIn(late_delay, offset, mixX, mixY, base,
+ {out.cbegin(), out.cend()}, todo);
+}
+
+void ReverbState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut)
+{
+ ALsizei fadeCount{mFadeCount};
+
+ ASSUME(samplesToDo > 0);
+
+ /* Convert B-Format to A-Format for processing. */
+ const al::span<FloatBufferLine,NUM_LINES> afmt{mTempSamples};
+ for(ALsizei c{0};c < NUM_LINES;c++)
+ {
+ std::fill_n(afmt[c].begin(), samplesToDo, 0.0f);
+ MixRowSamples(afmt[c], B2A[c], {samplesIn, samplesIn+numInput}, 0, samplesToDo);
+
+ /* Band-pass the incoming samples. */
+ mFilter[c].Lp.process(afmt[c].data(), afmt[c].data(), samplesToDo);
+ mFilter[c].Hp.process(afmt[c].data(), afmt[c].data(), samplesToDo);
+ }
+
+ /* Process reverb for these samples. */
+ for(ALsizei base{0};base < samplesToDo;)
+ {
+ ALsizei todo{samplesToDo - base};
+ /* If cross-fading, don't do more samples than there are to fade. */
+ if(FADE_SAMPLES-fadeCount > 0)
+ {
+ todo = mini(todo, FADE_SAMPLES-fadeCount);
+ todo = mini(todo, mMaxUpdate[0]);
+ }
+ todo = mini(todo, mMaxUpdate[1]);
+ ASSUME(todo > 0 && todo <= BUFFERSIZE);
+
+ const ALsizei offset{mOffset + base};
+ ASSUME(offset >= 0);
+
+ /* Feed the initial delay line. */
+ for(ALsizei c{0};c < NUM_LINES;c++)
+ mDelay.write(offset, c, afmt[c].data()+base, todo);
+
+ /* Process the samples for reverb. */
+ if(UNLIKELY(fadeCount < FADE_SAMPLES))
+ {
+ auto fade = static_cast<ALfloat>(fadeCount);
+
+ /* Generate early reflections and late reverb. */
+ EarlyReflection_Faded(this, offset, todo, fade, base, mEarlyBuffer);
+
+ LateReverb_Faded(this, offset, todo, fade, base, mLateBuffer);
+
+ /* Step fading forward. */
+ fadeCount += todo;
+ if(fadeCount >= FADE_SAMPLES)
+ {
+ /* Update the cross-fading delay line taps. */
+ fadeCount = FADE_SAMPLES;
+ for(ALsizei c{0};c < NUM_LINES;c++)
+ {
+ mEarlyDelayTap[c][0] = mEarlyDelayTap[c][1];
+ mEarlyDelayCoeff[c][0] = mEarlyDelayCoeff[c][1];
+ mEarly.VecAp.Offset[c][0] = mEarly.VecAp.Offset[c][1];
+ mEarly.Offset[c][0] = mEarly.Offset[c][1];
+ mEarly.Coeff[c][0] = mEarly.Coeff[c][1];
+ mLateDelayTap[c][0] = mLateDelayTap[c][1];
+ mLate.VecAp.Offset[c][0] = mLate.VecAp.Offset[c][1];
+ mLate.Offset[c][0] = mLate.Offset[c][1];
+ mLate.T60[c].MidGain[0] = mLate.T60[c].MidGain[1];
+ }
+ mLate.DensityGain[0] = mLate.DensityGain[1];
+ mMaxUpdate[0] = mMaxUpdate[1];
+ }
+ }
+ else
+ {
+ /* Generate early reflections and late reverb. */
+ EarlyReflection_Unfaded(this, offset, todo, base, mEarlyBuffer);
+
+ LateReverb_Unfaded(this, offset, todo, base, mLateBuffer);
+ }
+
+ base += todo;
+ }
+ mOffset = (mOffset+samplesToDo) & 0x3fffffff;
+ mFadeCount = fadeCount;
+
+ /* Finally, mix early reflections and late reverb. */
+ (this->*mMixOut)(samplesOut, samplesToDo);
+}
+
+
+void EAXReverb_setParami(EffectProps *props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_EAXREVERB_DECAY_HFLIMIT:
+ if(!(val >= AL_EAXREVERB_MIN_DECAY_HFLIMIT && val <= AL_EAXREVERB_MAX_DECAY_HFLIMIT))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb decay hflimit out of range");
+ props->Reverb.DecayHFLimit = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid EAX reverb integer property 0x%04x",
+ param);
+ }
+}
+void EAXReverb_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{ EAXReverb_setParami(props, context, param, vals[0]); }
+void EAXReverb_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_EAXREVERB_DENSITY:
+ if(!(val >= AL_EAXREVERB_MIN_DENSITY && val <= AL_EAXREVERB_MAX_DENSITY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb density out of range");
+ props->Reverb.Density = val;
+ break;
+
+ case AL_EAXREVERB_DIFFUSION:
+ if(!(val >= AL_EAXREVERB_MIN_DIFFUSION && val <= AL_EAXREVERB_MAX_DIFFUSION))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb diffusion out of range");
+ props->Reverb.Diffusion = val;
+ break;
+
+ case AL_EAXREVERB_GAIN:
+ if(!(val >= AL_EAXREVERB_MIN_GAIN && val <= AL_EAXREVERB_MAX_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb gain out of range");
+ props->Reverb.Gain = val;
+ break;
+
+ case AL_EAXREVERB_GAINHF:
+ if(!(val >= AL_EAXREVERB_MIN_GAINHF && val <= AL_EAXREVERB_MAX_GAINHF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb gainhf out of range");
+ props->Reverb.GainHF = val;
+ break;
+
+ case AL_EAXREVERB_GAINLF:
+ if(!(val >= AL_EAXREVERB_MIN_GAINLF && val <= AL_EAXREVERB_MAX_GAINLF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb gainlf out of range");
+ props->Reverb.GainLF = val;
+ break;
+
+ case AL_EAXREVERB_DECAY_TIME:
+ if(!(val >= AL_EAXREVERB_MIN_DECAY_TIME && val <= AL_EAXREVERB_MAX_DECAY_TIME))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb decay time out of range");
+ props->Reverb.DecayTime = val;
+ break;
+
+ case AL_EAXREVERB_DECAY_HFRATIO:
+ if(!(val >= AL_EAXREVERB_MIN_DECAY_HFRATIO && val <= AL_EAXREVERB_MAX_DECAY_HFRATIO))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb decay hfratio out of range");
+ props->Reverb.DecayHFRatio = val;
+ break;
+
+ case AL_EAXREVERB_DECAY_LFRATIO:
+ if(!(val >= AL_EAXREVERB_MIN_DECAY_LFRATIO && val <= AL_EAXREVERB_MAX_DECAY_LFRATIO))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb decay lfratio out of range");
+ props->Reverb.DecayLFRatio = val;
+ break;
+
+ case AL_EAXREVERB_REFLECTIONS_GAIN:
+ if(!(val >= AL_EAXREVERB_MIN_REFLECTIONS_GAIN && val <= AL_EAXREVERB_MAX_REFLECTIONS_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb reflections gain out of range");
+ props->Reverb.ReflectionsGain = val;
+ break;
+
+ case AL_EAXREVERB_REFLECTIONS_DELAY:
+ if(!(val >= AL_EAXREVERB_MIN_REFLECTIONS_DELAY && val <= AL_EAXREVERB_MAX_REFLECTIONS_DELAY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb reflections delay out of range");
+ props->Reverb.ReflectionsDelay = val;
+ break;
+
+ case AL_EAXREVERB_LATE_REVERB_GAIN:
+ if(!(val >= AL_EAXREVERB_MIN_LATE_REVERB_GAIN && val <= AL_EAXREVERB_MAX_LATE_REVERB_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb late reverb gain out of range");
+ props->Reverb.LateReverbGain = val;
+ break;
+
+ case AL_EAXREVERB_LATE_REVERB_DELAY:
+ if(!(val >= AL_EAXREVERB_MIN_LATE_REVERB_DELAY && val <= AL_EAXREVERB_MAX_LATE_REVERB_DELAY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb late reverb delay out of range");
+ props->Reverb.LateReverbDelay = val;
+ break;
+
+ case AL_EAXREVERB_AIR_ABSORPTION_GAINHF:
+ if(!(val >= AL_EAXREVERB_MIN_AIR_ABSORPTION_GAINHF && val <= AL_EAXREVERB_MAX_AIR_ABSORPTION_GAINHF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb air absorption gainhf out of range");
+ props->Reverb.AirAbsorptionGainHF = val;
+ break;
+
+ case AL_EAXREVERB_ECHO_TIME:
+ if(!(val >= AL_EAXREVERB_MIN_ECHO_TIME && val <= AL_EAXREVERB_MAX_ECHO_TIME))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb echo time out of range");
+ props->Reverb.EchoTime = val;
+ break;
+
+ case AL_EAXREVERB_ECHO_DEPTH:
+ if(!(val >= AL_EAXREVERB_MIN_ECHO_DEPTH && val <= AL_EAXREVERB_MAX_ECHO_DEPTH))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb echo depth out of range");
+ props->Reverb.EchoDepth = val;
+ break;
+
+ case AL_EAXREVERB_MODULATION_TIME:
+ if(!(val >= AL_EAXREVERB_MIN_MODULATION_TIME && val <= AL_EAXREVERB_MAX_MODULATION_TIME))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb modulation time out of range");
+ props->Reverb.ModulationTime = val;
+ break;
+
+ case AL_EAXREVERB_MODULATION_DEPTH:
+ if(!(val >= AL_EAXREVERB_MIN_MODULATION_DEPTH && val <= AL_EAXREVERB_MAX_MODULATION_DEPTH))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb modulation depth out of range");
+ props->Reverb.ModulationDepth = val;
+ break;
+
+ case AL_EAXREVERB_HFREFERENCE:
+ if(!(val >= AL_EAXREVERB_MIN_HFREFERENCE && val <= AL_EAXREVERB_MAX_HFREFERENCE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb hfreference out of range");
+ props->Reverb.HFReference = val;
+ break;
+
+ case AL_EAXREVERB_LFREFERENCE:
+ if(!(val >= AL_EAXREVERB_MIN_LFREFERENCE && val <= AL_EAXREVERB_MAX_LFREFERENCE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb lfreference out of range");
+ props->Reverb.LFReference = val;
+ break;
+
+ case AL_EAXREVERB_ROOM_ROLLOFF_FACTOR:
+ if(!(val >= AL_EAXREVERB_MIN_ROOM_ROLLOFF_FACTOR && val <= AL_EAXREVERB_MAX_ROOM_ROLLOFF_FACTOR))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb room rolloff factor out of range");
+ props->Reverb.RoomRolloffFactor = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid EAX reverb float property 0x%04x",
+ param);
+ }
+}
+void EAXReverb_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{
+ switch(param)
+ {
+ case AL_EAXREVERB_REFLECTIONS_PAN:
+ if(!(std::isfinite(vals[0]) && std::isfinite(vals[1]) && std::isfinite(vals[2])))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb reflections pan out of range");
+ props->Reverb.ReflectionsPan[0] = vals[0];
+ props->Reverb.ReflectionsPan[1] = vals[1];
+ props->Reverb.ReflectionsPan[2] = vals[2];
+ break;
+ case AL_EAXREVERB_LATE_REVERB_PAN:
+ if(!(std::isfinite(vals[0]) && std::isfinite(vals[1]) && std::isfinite(vals[2])))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "EAX Reverb late reverb pan out of range");
+ props->Reverb.LateReverbPan[0] = vals[0];
+ props->Reverb.LateReverbPan[1] = vals[1];
+ props->Reverb.LateReverbPan[2] = vals[2];
+ break;
+
+ default:
+ EAXReverb_setParamf(props, context, param, vals[0]);
+ break;
+ }
+}
+
+void EAXReverb_getParami(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val)
+{
+ switch(param)
+ {
+ case AL_EAXREVERB_DECAY_HFLIMIT:
+ *val = props->Reverb.DecayHFLimit;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid EAX reverb integer property 0x%04x",
+ param);
+ }
+}
+void EAXReverb_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{ EAXReverb_getParami(props, context, param, vals); }
+void EAXReverb_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_EAXREVERB_DENSITY:
+ *val = props->Reverb.Density;
+ break;
+
+ case AL_EAXREVERB_DIFFUSION:
+ *val = props->Reverb.Diffusion;
+ break;
+
+ case AL_EAXREVERB_GAIN:
+ *val = props->Reverb.Gain;
+ break;
+
+ case AL_EAXREVERB_GAINHF:
+ *val = props->Reverb.GainHF;
+ break;
+
+ case AL_EAXREVERB_GAINLF:
+ *val = props->Reverb.GainLF;
+ break;
+
+ case AL_EAXREVERB_DECAY_TIME:
+ *val = props->Reverb.DecayTime;
+ break;
+
+ case AL_EAXREVERB_DECAY_HFRATIO:
+ *val = props->Reverb.DecayHFRatio;
+ break;
+
+ case AL_EAXREVERB_DECAY_LFRATIO:
+ *val = props->Reverb.DecayLFRatio;
+ break;
+
+ case AL_EAXREVERB_REFLECTIONS_GAIN:
+ *val = props->Reverb.ReflectionsGain;
+ break;
+
+ case AL_EAXREVERB_REFLECTIONS_DELAY:
+ *val = props->Reverb.ReflectionsDelay;
+ break;
+
+ case AL_EAXREVERB_LATE_REVERB_GAIN:
+ *val = props->Reverb.LateReverbGain;
+ break;
+
+ case AL_EAXREVERB_LATE_REVERB_DELAY:
+ *val = props->Reverb.LateReverbDelay;
+ break;
+
+ case AL_EAXREVERB_AIR_ABSORPTION_GAINHF:
+ *val = props->Reverb.AirAbsorptionGainHF;
+ break;
+
+ case AL_EAXREVERB_ECHO_TIME:
+ *val = props->Reverb.EchoTime;
+ break;
+
+ case AL_EAXREVERB_ECHO_DEPTH:
+ *val = props->Reverb.EchoDepth;
+ break;
+
+ case AL_EAXREVERB_MODULATION_TIME:
+ *val = props->Reverb.ModulationTime;
+ break;
+
+ case AL_EAXREVERB_MODULATION_DEPTH:
+ *val = props->Reverb.ModulationDepth;
+ break;
+
+ case AL_EAXREVERB_HFREFERENCE:
+ *val = props->Reverb.HFReference;
+ break;
+
+ case AL_EAXREVERB_LFREFERENCE:
+ *val = props->Reverb.LFReference;
+ break;
+
+ case AL_EAXREVERB_ROOM_ROLLOFF_FACTOR:
+ *val = props->Reverb.RoomRolloffFactor;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid EAX reverb float property 0x%04x",
+ param);
+ }
+}
+void EAXReverb_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{
+ switch(param)
+ {
+ case AL_EAXREVERB_REFLECTIONS_PAN:
+ vals[0] = props->Reverb.ReflectionsPan[0];
+ vals[1] = props->Reverb.ReflectionsPan[1];
+ vals[2] = props->Reverb.ReflectionsPan[2];
+ break;
+ case AL_EAXREVERB_LATE_REVERB_PAN:
+ vals[0] = props->Reverb.LateReverbPan[0];
+ vals[1] = props->Reverb.LateReverbPan[1];
+ vals[2] = props->Reverb.LateReverbPan[2];
+ break;
+
+ default:
+ EAXReverb_getParamf(props, context, param, vals);
+ break;
+ }
+}
+
+DEFINE_ALEFFECT_VTABLE(EAXReverb);
+
+
+struct ReverbStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new ReverbState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &EAXReverb_vtable; }
+};
+
+EffectProps ReverbStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Reverb.Density = AL_EAXREVERB_DEFAULT_DENSITY;
+ props.Reverb.Diffusion = AL_EAXREVERB_DEFAULT_DIFFUSION;
+ props.Reverb.Gain = AL_EAXREVERB_DEFAULT_GAIN;
+ props.Reverb.GainHF = AL_EAXREVERB_DEFAULT_GAINHF;
+ props.Reverb.GainLF = AL_EAXREVERB_DEFAULT_GAINLF;
+ props.Reverb.DecayTime = AL_EAXREVERB_DEFAULT_DECAY_TIME;
+ props.Reverb.DecayHFRatio = AL_EAXREVERB_DEFAULT_DECAY_HFRATIO;
+ props.Reverb.DecayLFRatio = AL_EAXREVERB_DEFAULT_DECAY_LFRATIO;
+ props.Reverb.ReflectionsGain = AL_EAXREVERB_DEFAULT_REFLECTIONS_GAIN;
+ props.Reverb.ReflectionsDelay = AL_EAXREVERB_DEFAULT_REFLECTIONS_DELAY;
+ props.Reverb.ReflectionsPan[0] = AL_EAXREVERB_DEFAULT_REFLECTIONS_PAN_XYZ;
+ props.Reverb.ReflectionsPan[1] = AL_EAXREVERB_DEFAULT_REFLECTIONS_PAN_XYZ;
+ props.Reverb.ReflectionsPan[2] = AL_EAXREVERB_DEFAULT_REFLECTIONS_PAN_XYZ;
+ props.Reverb.LateReverbGain = AL_EAXREVERB_DEFAULT_LATE_REVERB_GAIN;
+ props.Reverb.LateReverbDelay = AL_EAXREVERB_DEFAULT_LATE_REVERB_DELAY;
+ props.Reverb.LateReverbPan[0] = AL_EAXREVERB_DEFAULT_LATE_REVERB_PAN_XYZ;
+ props.Reverb.LateReverbPan[1] = AL_EAXREVERB_DEFAULT_LATE_REVERB_PAN_XYZ;
+ props.Reverb.LateReverbPan[2] = AL_EAXREVERB_DEFAULT_LATE_REVERB_PAN_XYZ;
+ props.Reverb.EchoTime = AL_EAXREVERB_DEFAULT_ECHO_TIME;
+ props.Reverb.EchoDepth = AL_EAXREVERB_DEFAULT_ECHO_DEPTH;
+ props.Reverb.ModulationTime = AL_EAXREVERB_DEFAULT_MODULATION_TIME;
+ props.Reverb.ModulationDepth = AL_EAXREVERB_DEFAULT_MODULATION_DEPTH;
+ props.Reverb.AirAbsorptionGainHF = AL_EAXREVERB_DEFAULT_AIR_ABSORPTION_GAINHF;
+ props.Reverb.HFReference = AL_EAXREVERB_DEFAULT_HFREFERENCE;
+ props.Reverb.LFReference = AL_EAXREVERB_DEFAULT_LFREFERENCE;
+ props.Reverb.RoomRolloffFactor = AL_EAXREVERB_DEFAULT_ROOM_ROLLOFF_FACTOR;
+ props.Reverb.DecayHFLimit = AL_EAXREVERB_DEFAULT_DECAY_HFLIMIT;
+ return props;
+}
+
+
+void StdReverb_setParami(EffectProps *props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_REVERB_DECAY_HFLIMIT:
+ if(!(val >= AL_REVERB_MIN_DECAY_HFLIMIT && val <= AL_REVERB_MAX_DECAY_HFLIMIT))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb decay hflimit out of range");
+ props->Reverb.DecayHFLimit = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid reverb integer property 0x%04x", param);
+ }
+}
+void StdReverb_setParamiv(EffectProps *props, ALCcontext *context, ALenum param, const ALint *vals)
+{ StdReverb_setParami(props, context, param, vals[0]); }
+void StdReverb_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_REVERB_DENSITY:
+ if(!(val >= AL_REVERB_MIN_DENSITY && val <= AL_REVERB_MAX_DENSITY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb density out of range");
+ props->Reverb.Density = val;
+ break;
+
+ case AL_REVERB_DIFFUSION:
+ if(!(val >= AL_REVERB_MIN_DIFFUSION && val <= AL_REVERB_MAX_DIFFUSION))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb diffusion out of range");
+ props->Reverb.Diffusion = val;
+ break;
+
+ case AL_REVERB_GAIN:
+ if(!(val >= AL_REVERB_MIN_GAIN && val <= AL_REVERB_MAX_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb gain out of range");
+ props->Reverb.Gain = val;
+ break;
+
+ case AL_REVERB_GAINHF:
+ if(!(val >= AL_REVERB_MIN_GAINHF && val <= AL_REVERB_MAX_GAINHF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb gainhf out of range");
+ props->Reverb.GainHF = val;
+ break;
+
+ case AL_REVERB_DECAY_TIME:
+ if(!(val >= AL_REVERB_MIN_DECAY_TIME && val <= AL_REVERB_MAX_DECAY_TIME))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb decay time out of range");
+ props->Reverb.DecayTime = val;
+ break;
+
+ case AL_REVERB_DECAY_HFRATIO:
+ if(!(val >= AL_REVERB_MIN_DECAY_HFRATIO && val <= AL_REVERB_MAX_DECAY_HFRATIO))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb decay hfratio out of range");
+ props->Reverb.DecayHFRatio = val;
+ break;
+
+ case AL_REVERB_REFLECTIONS_GAIN:
+ if(!(val >= AL_REVERB_MIN_REFLECTIONS_GAIN && val <= AL_REVERB_MAX_REFLECTIONS_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb reflections gain out of range");
+ props->Reverb.ReflectionsGain = val;
+ break;
+
+ case AL_REVERB_REFLECTIONS_DELAY:
+ if(!(val >= AL_REVERB_MIN_REFLECTIONS_DELAY && val <= AL_REVERB_MAX_REFLECTIONS_DELAY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb reflections delay out of range");
+ props->Reverb.ReflectionsDelay = val;
+ break;
+
+ case AL_REVERB_LATE_REVERB_GAIN:
+ if(!(val >= AL_REVERB_MIN_LATE_REVERB_GAIN && val <= AL_REVERB_MAX_LATE_REVERB_GAIN))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb late reverb gain out of range");
+ props->Reverb.LateReverbGain = val;
+ break;
+
+ case AL_REVERB_LATE_REVERB_DELAY:
+ if(!(val >= AL_REVERB_MIN_LATE_REVERB_DELAY && val <= AL_REVERB_MAX_LATE_REVERB_DELAY))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb late reverb delay out of range");
+ props->Reverb.LateReverbDelay = val;
+ break;
+
+ case AL_REVERB_AIR_ABSORPTION_GAINHF:
+ if(!(val >= AL_REVERB_MIN_AIR_ABSORPTION_GAINHF && val <= AL_REVERB_MAX_AIR_ABSORPTION_GAINHF))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb air absorption gainhf out of range");
+ props->Reverb.AirAbsorptionGainHF = val;
+ break;
+
+ case AL_REVERB_ROOM_ROLLOFF_FACTOR:
+ if(!(val >= AL_REVERB_MIN_ROOM_ROLLOFF_FACTOR && val <= AL_REVERB_MAX_ROOM_ROLLOFF_FACTOR))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Reverb room rolloff factor out of range");
+ props->Reverb.RoomRolloffFactor = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid reverb float property 0x%04x", param);
+ }
+}
+void StdReverb_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ StdReverb_setParamf(props, context, param, vals[0]); }
+
+void StdReverb_getParami(const EffectProps *props, ALCcontext *context, ALenum param, ALint *val)
+{
+ switch(param)
+ {
+ case AL_REVERB_DECAY_HFLIMIT:
+ *val = props->Reverb.DecayHFLimit;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid reverb integer property 0x%04x", param);
+ }
+}
+void StdReverb_getParamiv(const EffectProps *props, ALCcontext *context, ALenum param, ALint *vals)
+{ StdReverb_getParami(props, context, param, vals); }
+void StdReverb_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_REVERB_DENSITY:
+ *val = props->Reverb.Density;
+ break;
+
+ case AL_REVERB_DIFFUSION:
+ *val = props->Reverb.Diffusion;
+ break;
+
+ case AL_REVERB_GAIN:
+ *val = props->Reverb.Gain;
+ break;
+
+ case AL_REVERB_GAINHF:
+ *val = props->Reverb.GainHF;
+ break;
+
+ case AL_REVERB_DECAY_TIME:
+ *val = props->Reverb.DecayTime;
+ break;
+
+ case AL_REVERB_DECAY_HFRATIO:
+ *val = props->Reverb.DecayHFRatio;
+ break;
+
+ case AL_REVERB_REFLECTIONS_GAIN:
+ *val = props->Reverb.ReflectionsGain;
+ break;
+
+ case AL_REVERB_REFLECTIONS_DELAY:
+ *val = props->Reverb.ReflectionsDelay;
+ break;
+
+ case AL_REVERB_LATE_REVERB_GAIN:
+ *val = props->Reverb.LateReverbGain;
+ break;
+
+ case AL_REVERB_LATE_REVERB_DELAY:
+ *val = props->Reverb.LateReverbDelay;
+ break;
+
+ case AL_REVERB_AIR_ABSORPTION_GAINHF:
+ *val = props->Reverb.AirAbsorptionGainHF;
+ break;
+
+ case AL_REVERB_ROOM_ROLLOFF_FACTOR:
+ *val = props->Reverb.RoomRolloffFactor;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid reverb float property 0x%04x", param);
+ }
+}
+void StdReverb_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ StdReverb_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(StdReverb);
+
+
+struct StdReverbStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new ReverbState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &StdReverb_vtable; }
+};
+
+EffectProps StdReverbStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Reverb.Density = AL_REVERB_DEFAULT_DENSITY;
+ props.Reverb.Diffusion = AL_REVERB_DEFAULT_DIFFUSION;
+ props.Reverb.Gain = AL_REVERB_DEFAULT_GAIN;
+ props.Reverb.GainHF = AL_REVERB_DEFAULT_GAINHF;
+ props.Reverb.GainLF = 1.0f;
+ props.Reverb.DecayTime = AL_REVERB_DEFAULT_DECAY_TIME;
+ props.Reverb.DecayHFRatio = AL_REVERB_DEFAULT_DECAY_HFRATIO;
+ props.Reverb.DecayLFRatio = 1.0f;
+ props.Reverb.ReflectionsGain = AL_REVERB_DEFAULT_REFLECTIONS_GAIN;
+ props.Reverb.ReflectionsDelay = AL_REVERB_DEFAULT_REFLECTIONS_DELAY;
+ props.Reverb.ReflectionsPan[0] = 0.0f;
+ props.Reverb.ReflectionsPan[1] = 0.0f;
+ props.Reverb.ReflectionsPan[2] = 0.0f;
+ props.Reverb.LateReverbGain = AL_REVERB_DEFAULT_LATE_REVERB_GAIN;
+ props.Reverb.LateReverbDelay = AL_REVERB_DEFAULT_LATE_REVERB_DELAY;
+ props.Reverb.LateReverbPan[0] = 0.0f;
+ props.Reverb.LateReverbPan[1] = 0.0f;
+ props.Reverb.LateReverbPan[2] = 0.0f;
+ props.Reverb.EchoTime = 0.25f;
+ props.Reverb.EchoDepth = 0.0f;
+ props.Reverb.ModulationTime = 0.25f;
+ props.Reverb.ModulationDepth = 0.0f;
+ props.Reverb.AirAbsorptionGainHF = AL_REVERB_DEFAULT_AIR_ABSORPTION_GAINHF;
+ props.Reverb.HFReference = 5000.0f;
+ props.Reverb.LFReference = 250.0f;
+ props.Reverb.RoomRolloffFactor = AL_REVERB_DEFAULT_ROOM_ROLLOFF_FACTOR;
+ props.Reverb.DecayHFLimit = AL_REVERB_DEFAULT_DECAY_HFLIMIT;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *ReverbStateFactory_getFactory()
+{
+ static ReverbStateFactory ReverbFactory{};
+ return &ReverbFactory;
+}
+
+EffectStateFactory *StdReverbStateFactory_getFactory()
+{
+ static StdReverbStateFactory ReverbFactory{};
+ return &ReverbFactory;
+}
diff --git a/alc/effects/vmorpher.cpp b/alc/effects/vmorpher.cpp
new file mode 100644
index 00000000..eebba3f1
--- /dev/null
+++ b/alc/effects/vmorpher.cpp
@@ -0,0 +1,430 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2019 by Anis A. Hireche
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstdlib>
+#include <algorithm>
+#include <functional>
+
+#include "alcmain.h"
+#include "alcontext.h"
+#include "alAuxEffectSlot.h"
+#include "alError.h"
+#include "alu.h"
+
+namespace {
+
+#define MAX_UPDATE_SAMPLES 128
+#define NUM_FORMANTS 4
+#define NUM_FILTERS 2
+#define Q_FACTOR 5.0f
+
+#define VOWEL_A_INDEX 0
+#define VOWEL_B_INDEX 1
+
+#define WAVEFORM_FRACBITS 24
+#define WAVEFORM_FRACONE (1<<WAVEFORM_FRACBITS)
+#define WAVEFORM_FRACMASK (WAVEFORM_FRACONE-1)
+
+inline ALfloat Sin(ALsizei index)
+{
+ constexpr ALfloat scale{al::MathDefs<float>::Tau() / ALfloat{WAVEFORM_FRACONE}};
+ return std::sin(static_cast<ALfloat>(index) * scale)*0.5f + 0.5f;
+}
+
+inline ALfloat Saw(ALsizei index)
+{
+ return static_cast<ALfloat>(index) / ALfloat{WAVEFORM_FRACONE};
+}
+
+inline ALfloat Triangle(ALsizei index)
+{
+ return std::fabs(static_cast<ALfloat>(index)*(2.0f/WAVEFORM_FRACONE) - 1.0f);
+}
+
+inline ALfloat Half(ALsizei)
+{
+ return 0.5f;
+}
+
+template<ALfloat func(ALsizei)>
+void Oscillate(ALfloat *RESTRICT dst, ALsizei index, const ALsizei step, ALsizei todo)
+{
+ for(ALsizei i{0};i < todo;i++)
+ {
+ index += step;
+ index &= WAVEFORM_FRACMASK;
+ dst[i] = func(index);
+ }
+}
+
+struct FormantFilter
+{
+ ALfloat f0norm{0.0f};
+ ALfloat fGain{1.0f};
+ ALfloat s1{0.0f};
+ ALfloat s2{0.0f};
+
+ FormantFilter() = default;
+ FormantFilter(ALfloat f0norm_, ALfloat gain) : f0norm{f0norm_}, fGain{gain} { }
+
+ inline void process(const ALfloat* samplesIn, ALfloat* samplesOut, const ALsizei numInput)
+ {
+ /* A state variable filter from a topology-preserving transform.
+ * Based on a talk given by Ivan Cohen: https://www.youtube.com/watch?v=esjHXGPyrhg
+ */
+ const ALfloat g = std::tan(al::MathDefs<float>::Pi() * f0norm);
+ const ALfloat h = 1.0f / (1 + (g / Q_FACTOR) + (g * g));
+
+ for (ALsizei i{0};i < numInput;i++)
+ {
+ const ALfloat H = h * (samplesIn[i] - (1.0f / Q_FACTOR + g) * s1 - s2);
+ const ALfloat B = g * H + s1;
+ const ALfloat L = g * B + s2;
+
+ s1 = g * H + B;
+ s2 = g * B + L;
+
+ // Apply peak and accumulate samples.
+ samplesOut[i] += B * fGain;
+ }
+ }
+
+ inline void clear()
+ {
+ s1 = 0.0f;
+ s2 = 0.0f;
+ }
+};
+
+
+struct VmorpherState final : public EffectState {
+ struct {
+ /* Effect parameters */
+ FormantFilter Formants[NUM_FILTERS][NUM_FORMANTS];
+
+ /* Effect gains for each channel */
+ ALfloat CurrentGains[MAX_OUTPUT_CHANNELS]{};
+ ALfloat TargetGains[MAX_OUTPUT_CHANNELS]{};
+ } mChans[MAX_AMBI_CHANNELS];
+
+ void (*mGetSamples)(ALfloat* RESTRICT, ALsizei, const ALsizei, ALsizei) {};
+
+ ALsizei mIndex{0};
+ ALsizei mStep{1};
+
+ /* Effects buffers */
+ ALfloat mSampleBufferA[MAX_UPDATE_SAMPLES]{};
+ ALfloat mSampleBufferB[MAX_UPDATE_SAMPLES]{};
+
+ ALboolean deviceUpdate(const ALCdevice *device) override;
+ void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override;
+ void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut) override;
+
+ static std::array<FormantFilter,4> getFiltersByPhoneme(ALenum phoneme, ALfloat frequency, ALfloat pitch);
+
+ DEF_NEWDEL(VmorpherState)
+};
+
+std::array<FormantFilter,4> VmorpherState::getFiltersByPhoneme(ALenum phoneme, ALfloat frequency, ALfloat pitch)
+{
+ /* Using soprano formant set of values to
+ * better match mid-range frequency space.
+ *
+ * See: https://www.classes.cs.uchicago.edu/archive/1999/spring/CS295/Computing_Resources/Csound/CsManual3.48b1.HTML/Appendices/table3.html
+ */
+ switch(phoneme)
+ {
+ case AL_VOCAL_MORPHER_PHONEME_A:
+ return {{
+ {( 800 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
+ {(1150 * pitch) / frequency, 0.501187f}, /* std::pow(10.0f, -6 / 20.0f); */
+ {(2900 * pitch) / frequency, 0.025118f}, /* std::pow(10.0f, -32 / 20.0f); */
+ {(3900 * pitch) / frequency, 0.100000f} /* std::pow(10.0f, -20 / 20.0f); */
+ }};
+ case AL_VOCAL_MORPHER_PHONEME_E:
+ return {{
+ {( 350 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
+ {(2000 * pitch) / frequency, 0.100000f}, /* std::pow(10.0f, -20 / 20.0f); */
+ {(2800 * pitch) / frequency, 0.177827f}, /* std::pow(10.0f, -15 / 20.0f); */
+ {(3600 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */
+ }};
+ case AL_VOCAL_MORPHER_PHONEME_I:
+ return {{
+ {( 270 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
+ {(2140 * pitch) / frequency, 0.251188f}, /* std::pow(10.0f, -12 / 20.0f); */
+ {(2950 * pitch) / frequency, 0.050118f}, /* std::pow(10.0f, -26 / 20.0f); */
+ {(3900 * pitch) / frequency, 0.050118f} /* std::pow(10.0f, -26 / 20.0f); */
+ }};
+ case AL_VOCAL_MORPHER_PHONEME_O:
+ return {{
+ {( 450 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
+ {( 800 * pitch) / frequency, 0.281838f}, /* std::pow(10.0f, -11 / 20.0f); */
+ {(2830 * pitch) / frequency, 0.079432f}, /* std::pow(10.0f, -22 / 20.0f); */
+ {(3800 * pitch) / frequency, 0.079432f} /* std::pow(10.0f, -22 / 20.0f); */
+ }};
+ case AL_VOCAL_MORPHER_PHONEME_U:
+ return {{
+ {( 325 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
+ {( 700 * pitch) / frequency, 0.158489f}, /* std::pow(10.0f, -16 / 20.0f); */
+ {(2700 * pitch) / frequency, 0.017782f}, /* std::pow(10.0f, -35 / 20.0f); */
+ {(3800 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */
+ }};
+ }
+ return {};
+}
+
+
+ALboolean VmorpherState::deviceUpdate(const ALCdevice* /*device*/)
+{
+ for(auto &e : mChans)
+ {
+ std::for_each(std::begin(e.Formants[VOWEL_A_INDEX]), std::end(e.Formants[VOWEL_A_INDEX]),
+ std::mem_fn(&FormantFilter::clear));
+ std::for_each(std::begin(e.Formants[VOWEL_B_INDEX]), std::end(e.Formants[VOWEL_B_INDEX]),
+ std::mem_fn(&FormantFilter::clear));
+ std::fill(std::begin(e.CurrentGains), std::end(e.CurrentGains), 0.0f);
+ }
+
+ return AL_TRUE;
+}
+
+void VmorpherState::update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target)
+{
+ const ALCdevice *device{context->Device};
+ const ALfloat frequency{static_cast<ALfloat>(device->Frequency)};
+ const ALfloat step{props->Vmorpher.Rate / static_cast<ALfloat>(device->Frequency)};
+ mStep = fastf2i(clampf(step*WAVEFORM_FRACONE, 0.0f, ALfloat{WAVEFORM_FRACONE-1}));
+
+ if(mStep == 0)
+ mGetSamples = Oscillate<Half>;
+ else if(props->Vmorpher.Waveform == AL_VOCAL_MORPHER_WAVEFORM_SINUSOID)
+ mGetSamples = Oscillate<Sin>;
+ else if(props->Vmorpher.Waveform == AL_VOCAL_MORPHER_WAVEFORM_SAWTOOTH)
+ mGetSamples = Oscillate<Saw>;
+ else /*if(props->Vmorpher.Waveform == AL_VOCAL_MORPHER_WAVEFORM_TRIANGLE)*/
+ mGetSamples = Oscillate<Triangle>;
+
+ const ALfloat pitchA{fastf2i(std::pow(2.0f, props->Vmorpher.PhonemeACoarseTuning*100.0f / 2400.0f)*FRACTIONONE) * (1.0f/FRACTIONONE)};
+ const ALfloat pitchB{fastf2i(std::pow(2.0f, props->Vmorpher.PhonemeBCoarseTuning*100.0f / 2400.0f)*FRACTIONONE) * (1.0f/FRACTIONONE)};
+
+ auto vowelA = getFiltersByPhoneme(props->Vmorpher.PhonemeA, frequency, pitchA);
+ auto vowelB = getFiltersByPhoneme(props->Vmorpher.PhonemeB, frequency, pitchB);
+
+ /* Copy the filter coefficients to the input channels. */
+ for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
+ {
+ std::copy(vowelA.begin(), vowelA.end(), std::begin(mChans[i].Formants[VOWEL_A_INDEX]));
+ std::copy(vowelB.begin(), vowelB.end(), std::begin(mChans[i].Formants[VOWEL_B_INDEX]));
+ }
+
+ mOutTarget = target.Main->Buffer;
+ for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
+ {
+ auto coeffs = GetAmbiIdentityRow(i);
+ ComputePanGains(target.Main, coeffs.data(), slot->Params.Gain, mChans[i].TargetGains);
+ }
+}
+
+void VmorpherState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span<FloatBufferLine> samplesOut)
+{
+ /* Following the EFX specification for a conformant implementation which describes
+ * the effect as a pair of 4-band formant filters blended together using an LFO.
+ */
+ for(ALsizei base{0};base < samplesToDo;)
+ {
+ alignas(16) ALfloat lfo[MAX_UPDATE_SAMPLES];
+ const ALsizei td = mini(MAX_UPDATE_SAMPLES, samplesToDo-base);
+
+ mGetSamples(lfo, mIndex, mStep, td);
+ mIndex += (mStep * td) & WAVEFORM_FRACMASK;
+ mIndex &= WAVEFORM_FRACMASK;
+
+ ASSUME(numInput > 0);
+ for(ALsizei c{0};c < numInput;c++)
+ {
+ for (ALsizei i{0};i < td;i++)
+ {
+ mSampleBufferA[i] = 0.0f;
+ mSampleBufferB[i] = 0.0f;
+ }
+
+ auto& vowelA = mChans[c].Formants[VOWEL_A_INDEX];
+ auto& vowelB = mChans[c].Formants[VOWEL_B_INDEX];
+
+ /* Process first vowel. */
+ vowelA[0].process(&samplesIn[c][base], mSampleBufferA, td);
+ vowelA[1].process(&samplesIn[c][base], mSampleBufferA, td);
+ vowelA[2].process(&samplesIn[c][base], mSampleBufferA, td);
+ vowelA[3].process(&samplesIn[c][base], mSampleBufferA, td);
+
+ /* Process second vowel. */
+ vowelB[0].process(&samplesIn[c][base], mSampleBufferB, td);
+ vowelB[1].process(&samplesIn[c][base], mSampleBufferB, td);
+ vowelB[2].process(&samplesIn[c][base], mSampleBufferB, td);
+ vowelB[3].process(&samplesIn[c][base], mSampleBufferB, td);
+
+ alignas(16) ALfloat samplesBlended[MAX_UPDATE_SAMPLES];
+
+ for (ALsizei i{0};i < td;i++)
+ samplesBlended[i] = lerp(mSampleBufferA[i], mSampleBufferB[i], lfo[i]);
+
+ /* Now, mix the processed sound data to the output. */
+ MixSamples(samplesBlended, samplesOut, mChans[c].CurrentGains, mChans[c].TargetGains,
+ samplesToDo-base, base, td);
+ }
+
+ base += td;
+ }
+}
+
+
+void Vmorpher_setParami(EffectProps* props, ALCcontext *context, ALenum param, ALint val)
+{
+ switch(param)
+ {
+ case AL_VOCAL_MORPHER_WAVEFORM:
+ if(!(val >= AL_VOCAL_MORPHER_MIN_WAVEFORM && val <= AL_VOCAL_MORPHER_MAX_WAVEFORM))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher waveform out of range");
+ props->Vmorpher.Waveform = val;
+ break;
+
+ case AL_VOCAL_MORPHER_PHONEMEA:
+ if(!(val >= AL_VOCAL_MORPHER_MIN_PHONEMEA && val <= AL_VOCAL_MORPHER_MAX_PHONEMEA))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher phoneme-a out of range");
+ props->Vmorpher.PhonemeA = val;
+ break;
+
+ case AL_VOCAL_MORPHER_PHONEMEB:
+ if(!(val >= AL_VOCAL_MORPHER_MIN_PHONEMEB && val <= AL_VOCAL_MORPHER_MAX_PHONEMEB))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher phoneme-b out of range");
+ props->Vmorpher.PhonemeB = val;
+ break;
+
+ case AL_VOCAL_MORPHER_PHONEMEA_COARSE_TUNING:
+ if(!(val >= AL_VOCAL_MORPHER_MIN_PHONEMEA_COARSE_TUNING && val <= AL_VOCAL_MORPHER_MAX_PHONEMEA_COARSE_TUNING))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher phoneme-a coarse tuning out of range");
+ props->Vmorpher.PhonemeACoarseTuning = val;
+ break;
+
+ case AL_VOCAL_MORPHER_PHONEMEB_COARSE_TUNING:
+ if(!(val >= AL_VOCAL_MORPHER_MIN_PHONEMEB_COARSE_TUNING && val <= AL_VOCAL_MORPHER_MAX_PHONEMEB_COARSE_TUNING))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher phoneme-b coarse tuning out of range");
+ props->Vmorpher.PhonemeBCoarseTuning = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid vocal morpher integer property 0x%04x", param);
+ }
+}
+void Vmorpher_setParamiv(EffectProps*, ALCcontext *context, ALenum param, const ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid vocal morpher integer-vector property 0x%04x", param); }
+void Vmorpher_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val)
+{
+ switch(param)
+ {
+ case AL_VOCAL_MORPHER_RATE:
+ if(!(val >= AL_VOCAL_MORPHER_MIN_RATE && val <= AL_VOCAL_MORPHER_MAX_RATE))
+ SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher rate out of range");
+ props->Vmorpher.Rate = val;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid vocal morpher float property 0x%04x", param);
+ }
+}
+void Vmorpher_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals)
+{ Vmorpher_setParamf(props, context, param, vals[0]); }
+
+void Vmorpher_getParami(const EffectProps* props, ALCcontext *context, ALenum param, ALint* val)
+{
+ switch(param)
+ {
+ case AL_VOCAL_MORPHER_PHONEMEA:
+ *val = props->Vmorpher.PhonemeA;
+ break;
+
+ case AL_VOCAL_MORPHER_PHONEMEB:
+ *val = props->Vmorpher.PhonemeB;
+ break;
+
+ case AL_VOCAL_MORPHER_PHONEMEA_COARSE_TUNING:
+ *val = props->Vmorpher.PhonemeACoarseTuning;
+ break;
+
+ case AL_VOCAL_MORPHER_PHONEMEB_COARSE_TUNING:
+ *val = props->Vmorpher.PhonemeBCoarseTuning;
+ break;
+
+ case AL_VOCAL_MORPHER_WAVEFORM:
+ *val = props->Vmorpher.Waveform;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid vocal morpher integer property 0x%04x", param);
+ }
+}
+void Vmorpher_getParamiv(const EffectProps*, ALCcontext *context, ALenum param, ALint*)
+{ alSetError(context, AL_INVALID_ENUM, "Invalid vocal morpher integer-vector property 0x%04x", param); }
+void Vmorpher_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val)
+{
+ switch(param)
+ {
+ case AL_VOCAL_MORPHER_RATE:
+ *val = props->Vmorpher.Rate;
+ break;
+
+ default:
+ alSetError(context, AL_INVALID_ENUM, "Invalid vocal morpher float property 0x%04x", param);
+ }
+}
+void Vmorpher_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals)
+{ Vmorpher_getParamf(props, context, param, vals); }
+
+DEFINE_ALEFFECT_VTABLE(Vmorpher);
+
+
+struct VmorpherStateFactory final : public EffectStateFactory {
+ EffectState *create() override { return new VmorpherState{}; }
+ EffectProps getDefaultProps() const noexcept override;
+ const EffectVtable *getEffectVtable() const noexcept override { return &Vmorpher_vtable; }
+};
+
+EffectProps VmorpherStateFactory::getDefaultProps() const noexcept
+{
+ EffectProps props{};
+ props.Vmorpher.Rate = AL_VOCAL_MORPHER_DEFAULT_RATE;
+ props.Vmorpher.PhonemeA = AL_VOCAL_MORPHER_DEFAULT_PHONEMEA;
+ props.Vmorpher.PhonemeB = AL_VOCAL_MORPHER_DEFAULT_PHONEMEB;
+ props.Vmorpher.PhonemeACoarseTuning = AL_VOCAL_MORPHER_DEFAULT_PHONEMEA_COARSE_TUNING;
+ props.Vmorpher.PhonemeBCoarseTuning = AL_VOCAL_MORPHER_DEFAULT_PHONEMEB_COARSE_TUNING;
+ props.Vmorpher.Waveform = AL_VOCAL_MORPHER_DEFAULT_WAVEFORM;
+ return props;
+}
+
+} // namespace
+
+EffectStateFactory *VmorpherStateFactory_getFactory()
+{
+ static VmorpherStateFactory VmorpherFactory{};
+ return &VmorpherFactory;
+}
diff --git a/alc/filters/biquad.cpp b/alc/filters/biquad.cpp
new file mode 100644
index 00000000..6a3cef64
--- /dev/null
+++ b/alc/filters/biquad.cpp
@@ -0,0 +1,127 @@
+
+#include "config.h"
+
+#include "biquad.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+
+#include "opthelpers.h"
+
+
+template<typename Real>
+void BiquadFilterR<Real>::setParams(BiquadType type, Real gain, Real f0norm, Real rcpQ)
+{
+ // Limit gain to -100dB
+ assert(gain > 0.00001f);
+
+ const Real w0{al::MathDefs<Real>::Tau() * f0norm};
+ const Real sin_w0{std::sin(w0)};
+ const Real cos_w0{std::cos(w0)};
+ const Real alpha{sin_w0/2.0f * rcpQ};
+
+ Real sqrtgain_alpha_2;
+ Real a[3]{ 1.0f, 0.0f, 0.0f };
+ Real b[3]{ 1.0f, 0.0f, 0.0f };
+
+ /* Calculate filter coefficients depending on filter type */
+ switch(type)
+ {
+ case BiquadType::HighShelf:
+ sqrtgain_alpha_2 = 2.0f * std::sqrt(gain) * alpha;
+ b[0] = gain*((gain+1.0f) + (gain-1.0f)*cos_w0 + sqrtgain_alpha_2);
+ b[1] = -2.0f*gain*((gain-1.0f) + (gain+1.0f)*cos_w0 );
+ b[2] = gain*((gain+1.0f) + (gain-1.0f)*cos_w0 - sqrtgain_alpha_2);
+ a[0] = (gain+1.0f) - (gain-1.0f)*cos_w0 + sqrtgain_alpha_2;
+ a[1] = 2.0f* ((gain-1.0f) - (gain+1.0f)*cos_w0 );
+ a[2] = (gain+1.0f) - (gain-1.0f)*cos_w0 - sqrtgain_alpha_2;
+ break;
+ case BiquadType::LowShelf:
+ sqrtgain_alpha_2 = 2.0f * std::sqrt(gain) * alpha;
+ b[0] = gain*((gain+1.0f) - (gain-1.0f)*cos_w0 + sqrtgain_alpha_2);
+ b[1] = 2.0f*gain*((gain-1.0f) - (gain+1.0f)*cos_w0 );
+ b[2] = gain*((gain+1.0f) - (gain-1.0f)*cos_w0 - sqrtgain_alpha_2);
+ a[0] = (gain+1.0f) + (gain-1.0f)*cos_w0 + sqrtgain_alpha_2;
+ a[1] = -2.0f* ((gain-1.0f) + (gain+1.0f)*cos_w0 );
+ a[2] = (gain+1.0f) + (gain-1.0f)*cos_w0 - sqrtgain_alpha_2;
+ break;
+ case BiquadType::Peaking:
+ gain = std::sqrt(gain);
+ b[0] = 1.0f + alpha * gain;
+ b[1] = -2.0f * cos_w0;
+ b[2] = 1.0f - alpha * gain;
+ a[0] = 1.0f + alpha / gain;
+ a[1] = -2.0f * cos_w0;
+ a[2] = 1.0f - alpha / gain;
+ break;
+
+ case BiquadType::LowPass:
+ b[0] = (1.0f - cos_w0) / 2.0f;
+ b[1] = 1.0f - cos_w0;
+ b[2] = (1.0f - cos_w0) / 2.0f;
+ a[0] = 1.0f + alpha;
+ a[1] = -2.0f * cos_w0;
+ a[2] = 1.0f - alpha;
+ break;
+ case BiquadType::HighPass:
+ b[0] = (1.0f + cos_w0) / 2.0f;
+ b[1] = -(1.0f + cos_w0);
+ b[2] = (1.0f + cos_w0) / 2.0f;
+ a[0] = 1.0f + alpha;
+ a[1] = -2.0f * cos_w0;
+ a[2] = 1.0f - alpha;
+ break;
+ case BiquadType::BandPass:
+ b[0] = alpha;
+ b[1] = 0.0f;
+ b[2] = -alpha;
+ a[0] = 1.0f + alpha;
+ a[1] = -2.0f * cos_w0;
+ a[2] = 1.0f - alpha;
+ break;
+ }
+
+ a1 = a[1] / a[0];
+ a2 = a[2] / a[0];
+ b0 = b[0] / a[0];
+ b1 = b[1] / a[0];
+ b2 = b[2] / a[0];
+}
+
+template<typename Real>
+void BiquadFilterR<Real>::process(Real *dst, const Real *src, int numsamples)
+{
+ ASSUME(numsamples > 0);
+
+ const Real b0{this->b0};
+ const Real b1{this->b1};
+ const Real b2{this->b2};
+ const Real a1{this->a1};
+ const Real a2{this->a2};
+ Real z1{this->z1};
+ Real z2{this->z2};
+
+ /* Processing loop is Transposed Direct Form II. This requires less storage
+ * compared to Direct Form I (only two delay components, instead of a four-
+ * sample history; the last two inputs and outputs), and works better for
+ * floating-point which favors summing similarly-sized values while being
+ * less bothered by overflow.
+ *
+ * See: http://www.earlevel.com/main/2003/02/28/biquads/
+ */
+ auto proc_sample = [b0,b1,b2,a1,a2,&z1,&z2](Real input) noexcept -> Real
+ {
+ Real output = input*b0 + z1;
+ z1 = input*b1 - output*a1 + z2;
+ z2 = input*b2 - output*a2;
+ return output;
+ };
+ std::transform(src, src+numsamples, dst, proc_sample);
+
+ this->z1 = z1;
+ this->z2 = z2;
+}
+
+template class BiquadFilterR<float>;
+template class BiquadFilterR<double>;
diff --git a/alc/filters/biquad.h b/alc/filters/biquad.h
new file mode 100644
index 00000000..893a69a9
--- /dev/null
+++ b/alc/filters/biquad.h
@@ -0,0 +1,113 @@
+#ifndef FILTERS_BIQUAD_H
+#define FILTERS_BIQUAD_H
+
+#include <cmath>
+#include <utility>
+
+#include "math_defs.h"
+
+
+/* Filters implementation is based on the "Cookbook formulae for audio
+ * EQ biquad filter coefficients" by Robert Bristow-Johnson
+ * http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
+ */
+/* Implementation note: For the shelf filters, the specified gain is for the
+ * reference frequency, which is the centerpoint of the transition band. This
+ * better matches EFX filter design. To set the gain for the shelf itself, use
+ * the square root of the desired linear gain (or halve the dB gain).
+ */
+
+enum class BiquadType {
+ /** EFX-style low-pass filter, specifying a gain and reference frequency. */
+ HighShelf,
+ /** EFX-style high-pass filter, specifying a gain and reference frequency. */
+ LowShelf,
+ /** Peaking filter, specifying a gain and reference frequency. */
+ Peaking,
+
+ /** Low-pass cut-off filter, specifying a cut-off frequency. */
+ LowPass,
+ /** High-pass cut-off filter, specifying a cut-off frequency. */
+ HighPass,
+ /** Band-pass filter, specifying a center frequency. */
+ BandPass,
+};
+
+template<typename Real>
+class BiquadFilterR {
+ /* Last two delayed components for direct form II. */
+ Real z1{0.0f}, z2{0.0f};
+ /* Transfer function coefficients "b" (numerator) */
+ Real b0{1.0f}, b1{0.0f}, b2{0.0f};
+ /* Transfer function coefficients "a" (denominator; a0 is pre-applied). */
+ Real a1{0.0f}, a2{0.0f};
+
+public:
+ void clear() noexcept { z1 = z2 = 0.0f; }
+
+ /**
+ * Sets the filter state for the specified filter type and its parameters.
+ *
+ * \param type The type of filter to apply.
+ * \param gain The gain for the reference frequency response. Only used by
+ * the Shelf and Peaking filter types.
+ * \param f0norm The reference frequency normal (ref_freq / sample_rate).
+ * This is the center point for the Shelf, Peaking, and
+ * BandPass filter types, or the cutoff frequency for the
+ * LowPass and HighPass filter types.
+ * \param rcpQ The reciprocal of the Q coefficient for the filter's
+ * transition band. Can be generated from rcpQFromSlope or
+ * rcpQFromBandwidth as needed.
+ */
+ void setParams(BiquadType type, Real gain, Real f0norm, Real rcpQ);
+
+ void copyParamsFrom(const BiquadFilterR &other)
+ {
+ b0 = other.b0;
+ b1 = other.b1;
+ b2 = other.b2;
+ a1 = other.a1;
+ a2 = other.a2;
+ }
+
+
+ void process(Real *dst, const Real *src, int numsamples);
+
+ /* Rather hacky. It's just here to support "manual" processing. */
+ std::pair<Real,Real> getComponents() const noexcept
+ { return {z1, z2}; }
+ void setComponents(Real z1_, Real z2_) noexcept
+ { z1 = z1_; z2 = z2_; }
+ Real processOne(const Real in, Real &z1_, Real &z2_) const noexcept
+ {
+ Real out{in*b0 + z1_};
+ z1_ = in*b1 - out*a1 + z2_;
+ z2_ = in*b2 - out*a2;
+ return out;
+ }
+
+ /**
+ * Calculates the rcpQ (i.e. 1/Q) coefficient for shelving filters, using
+ * the reference gain and shelf slope parameter.
+ * \param gain 0 < gain
+ * \param slope 0 < slope <= 1
+ */
+ static Real rcpQFromSlope(Real gain, Real slope)
+ { return std::sqrt((gain + 1.0f/gain)*(1.0f/slope - 1.0f) + 2.0f); }
+
+ /**
+ * Calculates the rcpQ (i.e. 1/Q) coefficient for filters, using the
+ * normalized reference frequency and bandwidth.
+ * \param f0norm 0 < f0norm < 0.5.
+ * \param bandwidth 0 < bandwidth
+ */
+ static Real rcpQFromBandwidth(Real f0norm, Real bandwidth)
+ {
+ const Real w0{al::MathDefs<Real>::Tau() * f0norm};
+ return 2.0f*std::sinh(std::log(Real{2.0f})/2.0f*bandwidth*w0/std::sin(w0));
+ }
+};
+
+using BiquadFilter = BiquadFilterR<float>;
+
+#endif /* FILTERS_BIQUAD_H */
diff --git a/alc/filters/nfc.cpp b/alc/filters/nfc.cpp
new file mode 100644
index 00000000..1a567f2c
--- /dev/null
+++ b/alc/filters/nfc.cpp
@@ -0,0 +1,391 @@
+
+#include "config.h"
+
+#include "nfc.h"
+
+#include <algorithm>
+
+#include "alcmain.h"
+
+
+/* Near-field control filters are the basis for handling the near-field effect.
+ * The near-field effect is a bass-boost present in the directional components
+ * of a recorded signal, created as a result of the wavefront curvature (itself
+ * a function of sound distance). Proper reproduction dictates this be
+ * compensated for using a bass-cut given the playback speaker distance, to
+ * avoid excessive bass in the playback.
+ *
+ * For real-time rendered audio, emulating the near-field effect based on the
+ * sound source's distance, and subsequently compensating for it at output
+ * based on the speaker distances, can create a more realistic perception of
+ * sound distance beyond a simple 1/r attenuation.
+ *
+ * These filters do just that. Each one applies a low-shelf filter, created as
+ * the combination of a bass-boost for a given sound source distance (near-
+ * field emulation) along with a bass-cut for a given control/speaker distance
+ * (near-field compensation).
+ *
+ * Note that it is necessary to apply a cut along with the boost, since the
+ * boost alone is unstable in higher-order ambisonics as it causes an infinite
+ * DC gain (even first-order ambisonics requires there to be no DC offset for
+ * the boost to work). Consequently, ambisonics requires a control parameter to
+ * be used to avoid an unstable boost-only filter. NFC-HOA defines this control
+ * as a reference delay, calculated with:
+ *
+ * reference_delay = control_distance / speed_of_sound
+ *
+ * This means w0 (for input) or w1 (for output) should be set to:
+ *
+ * wN = 1 / (reference_delay * sample_rate)
+ *
+ * when dealing with NFC-HOA content. For FOA input content, which does not
+ * specify a reference_delay variable, w0 should be set to 0 to apply only
+ * near-field compensation for output. It's important that w1 be a finite,
+ * positive, non-0 value or else the bass-boost will become unstable again.
+ * Also, w0 should not be too large compared to w1, to avoid excessively loud
+ * low frequencies.
+ */
+
+namespace {
+
+constexpr float B[5][4] = {
+ { 0.0f },
+ { 1.0f },
+ { 3.0f, 3.0f },
+ { 3.6778f, 6.4595f, 2.3222f },
+ { 4.2076f, 11.4877f, 5.7924f, 9.1401f }
+};
+
+NfcFilter1 NfcFilterCreate1(const float w0, const float w1) noexcept
+{
+ NfcFilter1 nfc{};
+ float b_00, g_0;
+ float r;
+
+ nfc.base_gain = 1.0f;
+ nfc.gain = 1.0f;
+
+ /* Calculate bass-boost coefficients. */
+ r = 0.5f * w0;
+ b_00 = B[1][0] * r;
+ g_0 = 1.0f + b_00;
+
+ nfc.gain *= g_0;
+ nfc.b1 = 2.0f * b_00 / g_0;
+
+ /* Calculate bass-cut coefficients. */
+ r = 0.5f * w1;
+ b_00 = B[1][0] * r;
+ g_0 = 1.0f + b_00;
+
+ nfc.base_gain /= g_0;
+ nfc.gain /= g_0;
+ nfc.a1 = 2.0f * b_00 / g_0;
+
+ return nfc;
+}
+
+void NfcFilterAdjust1(NfcFilter1 *nfc, const float w0) noexcept
+{
+ const float r{0.5f * w0};
+ const float b_00{B[1][0] * r};
+ const float g_0{1.0f + b_00};
+
+ nfc->gain = nfc->base_gain * g_0;
+ nfc->b1 = 2.0f * b_00 / g_0;
+}
+
+
+NfcFilter2 NfcFilterCreate2(const float w0, const float w1) noexcept
+{
+ NfcFilter2 nfc{};
+ float b_10, b_11, g_1;
+ float r;
+
+ nfc.base_gain = 1.0f;
+ nfc.gain = 1.0f;
+
+ /* Calculate bass-boost coefficients. */
+ r = 0.5f * w0;
+ b_10 = B[2][0] * r;
+ b_11 = B[2][1] * r * r;
+ g_1 = 1.0f + b_10 + b_11;
+
+ nfc.gain *= g_1;
+ nfc.b1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc.b2 = 4.0f * b_11 / g_1;
+
+ /* Calculate bass-cut coefficients. */
+ r = 0.5f * w1;
+ b_10 = B[2][0] * r;
+ b_11 = B[2][1] * r * r;
+ g_1 = 1.0f + b_10 + b_11;
+
+ nfc.base_gain /= g_1;
+ nfc.gain /= g_1;
+ nfc.a1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc.a2 = 4.0f * b_11 / g_1;
+
+ return nfc;
+}
+
+void NfcFilterAdjust2(NfcFilter2 *nfc, const float w0) noexcept
+{
+ const float r{0.5f * w0};
+ const float b_10{B[2][0] * r};
+ const float b_11{B[2][1] * r * r};
+ const float g_1{1.0f + b_10 + b_11};
+
+ nfc->gain = nfc->base_gain * g_1;
+ nfc->b1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc->b2 = 4.0f * b_11 / g_1;
+}
+
+
+NfcFilter3 NfcFilterCreate3(const float w0, const float w1) noexcept
+{
+ NfcFilter3 nfc{};
+ float b_10, b_11, g_1;
+ float b_00, g_0;
+ float r;
+
+ nfc.base_gain = 1.0f;
+ nfc.gain = 1.0f;
+
+ /* Calculate bass-boost coefficients. */
+ r = 0.5f * w0;
+ b_10 = B[3][0] * r;
+ b_11 = B[3][1] * r * r;
+ b_00 = B[3][2] * r;
+ g_1 = 1.0f + b_10 + b_11;
+ g_0 = 1.0f + b_00;
+
+ nfc.gain *= g_1 * g_0;
+ nfc.b1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc.b2 = 4.0f * b_11 / g_1;
+ nfc.b3 = 2.0f * b_00 / g_0;
+
+ /* Calculate bass-cut coefficients. */
+ r = 0.5f * w1;
+ b_10 = B[3][0] * r;
+ b_11 = B[3][1] * r * r;
+ b_00 = B[3][2] * r;
+ g_1 = 1.0f + b_10 + b_11;
+ g_0 = 1.0f + b_00;
+
+ nfc.base_gain /= g_1 * g_0;
+ nfc.gain /= g_1 * g_0;
+ nfc.a1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc.a2 = 4.0f * b_11 / g_1;
+ nfc.a3 = 2.0f * b_00 / g_0;
+
+ return nfc;
+}
+
+void NfcFilterAdjust3(NfcFilter3 *nfc, const float w0) noexcept
+{
+ const float r{0.5f * w0};
+ const float b_10{B[3][0] * r};
+ const float b_11{B[3][1] * r * r};
+ const float b_00{B[3][2] * r};
+ const float g_1{1.0f + b_10 + b_11};
+ const float g_0{1.0f + b_00};
+
+ nfc->gain = nfc->base_gain * g_1 * g_0;
+ nfc->b1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc->b2 = 4.0f * b_11 / g_1;
+ nfc->b3 = 2.0f * b_00 / g_0;
+}
+
+
+NfcFilter4 NfcFilterCreate4(const float w0, const float w1) noexcept
+{
+ NfcFilter4 nfc{};
+ float b_10, b_11, g_1;
+ float b_00, b_01, g_0;
+ float r;
+
+ nfc.base_gain = 1.0f;
+ nfc.gain = 1.0f;
+
+ /* Calculate bass-boost coefficients. */
+ r = 0.5f * w0;
+ b_10 = B[4][0] * r;
+ b_11 = B[4][1] * r * r;
+ b_00 = B[4][2] * r;
+ b_01 = B[4][3] * r * r;
+ g_1 = 1.0f + b_10 + b_11;
+ g_0 = 1.0f + b_00 + b_01;
+
+ nfc.gain *= g_1 * g_0;
+ nfc.b1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc.b2 = 4.0f * b_11 / g_1;
+ nfc.b3 = (2.0f*b_00 + 4.0f*b_01) / g_0;
+ nfc.b4 = 4.0f * b_01 / g_0;
+
+ /* Calculate bass-cut coefficients. */
+ r = 0.5f * w1;
+ b_10 = B[4][0] * r;
+ b_11 = B[4][1] * r * r;
+ b_00 = B[4][2] * r;
+ b_01 = B[4][3] * r * r;
+ g_1 = 1.0f + b_10 + b_11;
+ g_0 = 1.0f + b_00 + b_01;
+
+ nfc.base_gain /= g_1 * g_0;
+ nfc.gain /= g_1 * g_0;
+ nfc.a1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc.a2 = 4.0f * b_11 / g_1;
+ nfc.a3 = (2.0f*b_00 + 4.0f*b_01) / g_0;
+ nfc.a4 = 4.0f * b_01 / g_0;
+
+ return nfc;
+}
+
+void NfcFilterAdjust4(NfcFilter4 *nfc, const float w0) noexcept
+{
+ const float r{0.5f * w0};
+ const float b_10{B[4][0] * r};
+ const float b_11{B[4][1] * r * r};
+ const float b_00{B[4][2] * r};
+ const float b_01{B[4][3] * r * r};
+ const float g_1{1.0f + b_10 + b_11};
+ const float g_0{1.0f + b_00 + b_01};
+
+ nfc->gain = nfc->base_gain * g_1 * g_0;
+ nfc->b1 = (2.0f*b_10 + 4.0f*b_11) / g_1;
+ nfc->b2 = 4.0f * b_11 / g_1;
+ nfc->b3 = (2.0f*b_00 + 4.0f*b_01) / g_0;
+ nfc->b4 = 4.0f * b_01 / g_0;
+}
+
+} // namespace
+
+void NfcFilter::init(const float w1) noexcept
+{
+ first = NfcFilterCreate1(0.0f, w1);
+ second = NfcFilterCreate2(0.0f, w1);
+ third = NfcFilterCreate3(0.0f, w1);
+ fourth = NfcFilterCreate4(0.0f, w1);
+}
+
+void NfcFilter::adjust(const float w0) noexcept
+{
+ NfcFilterAdjust1(&first, w0);
+ NfcFilterAdjust2(&second, w0);
+ NfcFilterAdjust3(&third, w0);
+ NfcFilterAdjust4(&fourth, w0);
+}
+
+
+void NfcFilter::process1(float *RESTRICT dst, const float *RESTRICT src, const int count)
+{
+ ASSUME(count > 0);
+
+ const float gain{first.gain};
+ const float b1{first.b1};
+ const float a1{first.a1};
+ float z1{first.z[0]};
+ auto proc_sample = [gain,b1,a1,&z1](const float in) noexcept -> float
+ {
+ const float y{in*gain - a1*z1};
+ const float out{y + b1*z1};
+ z1 += y;
+ return out;
+ };
+ std::transform(src, src+count, dst, proc_sample);
+ first.z[0] = z1;
+}
+
+void NfcFilter::process2(float *RESTRICT dst, const float *RESTRICT src, const int count)
+{
+ ASSUME(count > 0);
+
+ const float gain{second.gain};
+ const float b1{second.b1};
+ const float b2{second.b2};
+ const float a1{second.a1};
+ const float a2{second.a2};
+ float z1{second.z[0]};
+ float z2{second.z[1]};
+ auto proc_sample = [gain,b1,b2,a1,a2,&z1,&z2](const float in) noexcept -> float
+ {
+ const float y{in*gain - a1*z1 - a2*z2};
+ const float out{y + b1*z1 + b2*z2};
+ z2 += z1;
+ z1 += y;
+ return out;
+ };
+ std::transform(src, src+count, dst, proc_sample);
+ second.z[0] = z1;
+ second.z[1] = z2;
+}
+
+void NfcFilter::process3(float *RESTRICT dst, const float *RESTRICT src, const int count)
+{
+ ASSUME(count > 0);
+
+ const float gain{third.gain};
+ const float b1{third.b1};
+ const float b2{third.b2};
+ const float b3{third.b3};
+ const float a1{third.a1};
+ const float a2{third.a2};
+ const float a3{third.a3};
+ float z1{third.z[0]};
+ float z2{third.z[1]};
+ float z3{third.z[2]};
+ auto proc_sample = [gain,b1,b2,b3,a1,a2,a3,&z1,&z2,&z3](const float in) noexcept -> float
+ {
+ float y{in*gain - a1*z1 - a2*z2};
+ float out{y + b1*z1 + b2*z2};
+ z2 += z1;
+ z1 += y;
+
+ y = out - a3*z3;
+ out = y + b3*z3;
+ z3 += y;
+ return out;
+ };
+ std::transform(src, src+count, dst, proc_sample);
+ third.z[0] = z1;
+ third.z[1] = z2;
+ third.z[2] = z3;
+}
+
+void NfcFilter::process4(float *RESTRICT dst, const float *RESTRICT src, const int count)
+{
+ ASSUME(count > 0);
+
+ const float gain{fourth.gain};
+ const float b1{fourth.b1};
+ const float b2{fourth.b2};
+ const float b3{fourth.b3};
+ const float b4{fourth.b4};
+ const float a1{fourth.a1};
+ const float a2{fourth.a2};
+ const float a3{fourth.a3};
+ const float a4{fourth.a4};
+ float z1{fourth.z[0]};
+ float z2{fourth.z[1]};
+ float z3{fourth.z[2]};
+ float z4{fourth.z[3]};
+ auto proc_sample = [gain,b1,b2,b3,b4,a1,a2,a3,a4,&z1,&z2,&z3,&z4](const float in) noexcept -> float
+ {
+ float y{in*gain - a1*z1 - a2*z2};
+ float out{y + b1*z1 + b2*z2};
+ z2 += z1;
+ z1 += y;
+
+ y = out - a3*z3 - a4*z4;
+ out = y + b3*z3 + b4*z4;
+ z4 += z3;
+ z3 += y;
+ return out;
+ };
+ std::transform(src, src+count, dst, proc_sample);
+ fourth.z[0] = z1;
+ fourth.z[1] = z2;
+ fourth.z[2] = z3;
+ fourth.z[3] = z4;
+}
diff --git a/alc/filters/nfc.h b/alc/filters/nfc.h
new file mode 100644
index 00000000..b656850a
--- /dev/null
+++ b/alc/filters/nfc.h
@@ -0,0 +1,58 @@
+#ifndef FILTER_NFC_H
+#define FILTER_NFC_H
+
+struct NfcFilter1 {
+ float base_gain, gain;
+ float b1, a1;
+ float z[1];
+};
+struct NfcFilter2 {
+ float base_gain, gain;
+ float b1, b2, a1, a2;
+ float z[2];
+};
+struct NfcFilter3 {
+ float base_gain, gain;
+ float b1, b2, b3, a1, a2, a3;
+ float z[3];
+};
+struct NfcFilter4 {
+ float base_gain, gain;
+ float b1, b2, b3, b4, a1, a2, a3, a4;
+ float z[4];
+};
+
+class NfcFilter {
+ NfcFilter1 first;
+ NfcFilter2 second;
+ NfcFilter3 third;
+ NfcFilter4 fourth;
+
+public:
+ /* NOTE:
+ * w0 = speed_of_sound / (source_distance * sample_rate);
+ * w1 = speed_of_sound / (control_distance * sample_rate);
+ *
+ * Generally speaking, the control distance should be approximately the
+ * average speaker distance, or based on the reference delay if outputing
+ * NFC-HOA. It must not be negative, 0, or infinite. The source distance
+ * should not be too small relative to the control distance.
+ */
+
+ void init(const float w1) noexcept;
+ void adjust(const float w0) noexcept;
+
+ /* Near-field control filter for first-order ambisonic channels (1-3). */
+ void process1(float *RESTRICT dst, const float *RESTRICT src, const int count);
+
+ /* Near-field control filter for second-order ambisonic channels (4-8). */
+ void process2(float *RESTRICT dst, const float *RESTRICT src, const int count);
+
+ /* Near-field control filter for third-order ambisonic channels (9-15). */
+ void process3(float *RESTRICT dst, const float *RESTRICT src, const int count);
+
+ /* Near-field control filter for fourth-order ambisonic channels (16-24). */
+ void process4(float *RESTRICT dst, const float *RESTRICT src, const int count);
+};
+
+#endif /* FILTER_NFC_H */
diff --git a/alc/filters/splitter.cpp b/alc/filters/splitter.cpp
new file mode 100644
index 00000000..09e7bfe8
--- /dev/null
+++ b/alc/filters/splitter.cpp
@@ -0,0 +1,115 @@
+
+#include "config.h"
+
+#include "splitter.h"
+
+#include <cmath>
+#include <limits>
+#include <algorithm>
+
+#include "math_defs.h"
+
+template<typename Real>
+void BandSplitterR<Real>::init(Real f0norm)
+{
+ const Real w{f0norm * al::MathDefs<Real>::Tau()};
+ const Real cw{std::cos(w)};
+ if(cw > std::numeric_limits<float>::epsilon())
+ coeff = (std::sin(w) - 1.0f) / cw;
+ else
+ coeff = cw * -0.5f;
+
+ lp_z1 = 0.0f;
+ lp_z2 = 0.0f;
+ ap_z1 = 0.0f;
+}
+
+template<typename Real>
+void BandSplitterR<Real>::process(Real *hpout, Real *lpout, const Real *input, const int count)
+{
+ ASSUME(count > 0);
+
+ const Real ap_coeff{this->coeff};
+ const Real lp_coeff{this->coeff*0.5f + 0.5f};
+ Real lp_z1{this->lp_z1};
+ Real lp_z2{this->lp_z2};
+ Real ap_z1{this->ap_z1};
+ auto proc_sample = [ap_coeff,lp_coeff,&lp_z1,&lp_z2,&ap_z1,&lpout](const Real in) noexcept -> Real
+ {
+ /* Low-pass sample processing. */
+ Real d{(in - lp_z1) * lp_coeff};
+ Real lp_y{lp_z1 + d};
+ lp_z1 = lp_y + d;
+
+ d = (lp_y - lp_z2) * lp_coeff;
+ lp_y = lp_z2 + d;
+ lp_z2 = lp_y + d;
+
+ *(lpout++) = lp_y;
+
+ /* All-pass sample processing. */
+ Real ap_y{in*ap_coeff + ap_z1};
+ ap_z1 = in - ap_y*ap_coeff;
+
+ /* High-pass generated from removing low-passed output. */
+ return ap_y - lp_y;
+ };
+ std::transform(input, input+count, hpout, proc_sample);
+ this->lp_z1 = lp_z1;
+ this->lp_z2 = lp_z2;
+ this->ap_z1 = ap_z1;
+}
+
+template<typename Real>
+void BandSplitterR<Real>::applyHfScale(Real *samples, const Real hfscale, const int count)
+{
+ ASSUME(count > 0);
+
+ const Real ap_coeff{this->coeff};
+ const Real lp_coeff{this->coeff*0.5f + 0.5f};
+ Real lp_z1{this->lp_z1};
+ Real lp_z2{this->lp_z2};
+ Real ap_z1{this->ap_z1};
+ auto proc_sample = [hfscale,ap_coeff,lp_coeff,&lp_z1,&lp_z2,&ap_z1](const Real in) noexcept -> Real
+ {
+ /* Low-pass sample processing. */
+ Real d{(in - lp_z1) * lp_coeff};
+ Real lp_y{lp_z1 + d};
+ lp_z1 = lp_y + d;
+
+ d = (lp_y - lp_z2) * lp_coeff;
+ lp_y = lp_z2 + d;
+ lp_z2 = lp_y + d;
+
+ /* All-pass sample processing. */
+ Real ap_y{in*ap_coeff + ap_z1};
+ ap_z1 = in - ap_y*ap_coeff;
+
+ /* High-pass generated from removing low-passed output. */
+ return (ap_y-lp_y)*hfscale + lp_y;
+ };
+ std::transform(samples, samples+count, samples, proc_sample);
+ this->lp_z1 = lp_z1;
+ this->lp_z2 = lp_z2;
+ this->ap_z1 = ap_z1;
+}
+
+template<typename Real>
+void BandSplitterR<Real>::applyAllpass(Real *samples, const int count) const
+{
+ ASSUME(count > 0);
+
+ const Real coeff{this->coeff};
+ Real z1{0.0f};
+ auto proc_sample = [coeff,&z1](const Real in) noexcept -> Real
+ {
+ const Real out{in*coeff + z1};
+ z1 = in - out*coeff;
+ return out;
+ };
+ std::transform(samples, samples+count, samples, proc_sample);
+}
+
+
+template class BandSplitterR<float>;
+template class BandSplitterR<double>;
diff --git a/alc/filters/splitter.h b/alc/filters/splitter.h
new file mode 100644
index 00000000..927c4d17
--- /dev/null
+++ b/alc/filters/splitter.h
@@ -0,0 +1,50 @@
+#ifndef FILTER_SPLITTER_H
+#define FILTER_SPLITTER_H
+
+#include "alcmain.h"
+#include "almalloc.h"
+
+
+/* Band splitter. Splits a signal into two phase-matching frequency bands. */
+template<typename Real>
+class BandSplitterR {
+ Real coeff{0.0f};
+ Real lp_z1{0.0f};
+ Real lp_z2{0.0f};
+ Real ap_z1{0.0f};
+
+public:
+ BandSplitterR() = default;
+ BandSplitterR(const BandSplitterR&) = default;
+ BandSplitterR(Real f0norm) { init(f0norm); }
+
+ void init(Real f0norm);
+ void clear() noexcept { lp_z1 = lp_z2 = ap_z1 = 0.0f; }
+ void process(Real *hpout, Real *lpout, const Real *input, const int count);
+
+ void applyHfScale(Real *samples, const Real hfscale, const int count);
+
+ /* The all-pass portion of the band splitter. Applies the same phase shift
+ * without splitting the signal. Note that each use of this method is
+ * indepedent, it does not track history between calls.
+ */
+ void applyAllpass(Real *samples, const int count) const;
+};
+using BandSplitter = BandSplitterR<float>;
+
+
+struct FrontStablizer {
+ static constexpr size_t DelayLength{256u};
+
+ alignas(16) float DelayBuf[MAX_OUTPUT_CHANNELS][DelayLength];
+
+ BandSplitter LFilter, RFilter;
+ alignas(16) float LSplit[2][BUFFERSIZE];
+ alignas(16) float RSplit[2][BUFFERSIZE];
+
+ alignas(16) float TempBuf[BUFFERSIZE + DelayLength];
+
+ DEF_NEWDEL(FrontStablizer)
+};
+
+#endif /* FILTER_SPLITTER_H */
diff --git a/alc/fpu_modes.h b/alc/fpu_modes.h
new file mode 100644
index 00000000..5465e9cf
--- /dev/null
+++ b/alc/fpu_modes.h
@@ -0,0 +1,25 @@
+#ifndef FPU_MODES_H
+#define FPU_MODES_H
+
+class FPUCtl {
+#if defined(HAVE_SSE_INTRINSICS) || (defined(__GNUC__) && defined(HAVE_SSE))
+ unsigned int sse_state{};
+#endif
+ bool in_mode{};
+
+public:
+ FPUCtl();
+ /* HACK: 32-bit targets for GCC seem to have a problem here with certain
+ * noexcept methods (which destructors are) causing an internal compiler
+ * error. No idea why it's these methods specifically, but this is needed
+ * to get it to compile.
+ */
+ ~FPUCtl() noexcept(false) { leave(); }
+
+ FPUCtl(const FPUCtl&) = delete;
+ FPUCtl& operator=(const FPUCtl&) = delete;
+
+ void leave();
+};
+
+#endif /* FPU_MODES_H */
diff --git a/alc/helpers.cpp b/alc/helpers.cpp
new file mode 100644
index 00000000..e86af6ce
--- /dev/null
+++ b/alc/helpers.cpp
@@ -0,0 +1,851 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2011 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#ifdef _WIN32
+#ifdef __MINGW32__
+#define _WIN32_IE 0x501
+#else
+#define _WIN32_IE 0x400
+#endif
+#endif
+
+#include "config.h"
+
+#include <algorithm>
+#include <cerrno>
+#include <cstdarg>
+#include <cstdlib>
+#include <cstdio>
+#include <cstring>
+#include <mutex>
+#include <string>
+
+#ifdef HAVE_DIRENT_H
+#include <dirent.h>
+#endif
+#ifdef HAVE_PROC_PIDPATH
+#include <libproc.h>
+#endif
+
+#ifdef __FreeBSD__
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif
+
+#ifndef AL_NO_UID_DEFS
+#if defined(HAVE_GUIDDEF_H) || defined(HAVE_INITGUID_H)
+#define INITGUID
+#include <windows.h>
+#ifdef HAVE_GUIDDEF_H
+#include <guiddef.h>
+#else
+#include <initguid.h>
+#endif
+
+DEFINE_GUID(KSDATAFORMAT_SUBTYPE_PCM, 0x00000001, 0x0000, 0x0010, 0x80,0x00, 0x00,0xaa,0x00,0x38,0x9b,0x71);
+DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, 0x00000003, 0x0000, 0x0010, 0x80,0x00, 0x00,0xaa,0x00,0x38,0x9b,0x71);
+
+DEFINE_GUID(IID_IDirectSoundNotify, 0xb0210783, 0x89cd, 0x11d0, 0xaf,0x08, 0x00,0xa0,0xc9,0x25,0xcd,0x16);
+
+DEFINE_GUID(CLSID_MMDeviceEnumerator, 0xbcde0395, 0xe52f, 0x467c, 0x8e,0x3d, 0xc4,0x57,0x92,0x91,0x69,0x2e);
+DEFINE_GUID(IID_IMMDeviceEnumerator, 0xa95664d2, 0x9614, 0x4f35, 0xa7,0x46, 0xde,0x8d,0xb6,0x36,0x17,0xe6);
+DEFINE_GUID(IID_IAudioClient, 0x1cb9ad4c, 0xdbfa, 0x4c32, 0xb1,0x78, 0xc2,0xf5,0x68,0xa7,0x03,0xb2);
+DEFINE_GUID(IID_IAudioRenderClient, 0xf294acfc, 0x3146, 0x4483, 0xa7,0xbf, 0xad,0xdc,0xa7,0xc2,0x60,0xe2);
+DEFINE_GUID(IID_IAudioCaptureClient, 0xc8adbd64, 0xe71e, 0x48a0, 0xa4,0xde, 0x18,0x5c,0x39,0x5c,0xd3,0x17);
+
+#ifdef HAVE_WASAPI
+#include <wtypes.h>
+#include <devpropdef.h>
+#include <propkeydef.h>
+DEFINE_DEVPROPKEY(DEVPKEY_Device_FriendlyName, 0xa45c254e, 0xdf1c, 0x4efd, 0x80,0x20, 0x67,0xd1,0x46,0xa8,0x50,0xe0, 14);
+DEFINE_PROPERTYKEY(PKEY_AudioEndpoint_FormFactor, 0x1da5d803, 0xd492, 0x4edd, 0x8c,0x23, 0xe0,0xc0,0xff,0xee,0x7f,0x0e, 0);
+DEFINE_PROPERTYKEY(PKEY_AudioEndpoint_GUID, 0x1da5d803, 0xd492, 0x4edd, 0x8c, 0x23,0xe0, 0xc0,0xff,0xee,0x7f,0x0e, 4 );
+#endif
+#endif
+#endif /* AL_NO_UID_DEFS */
+
+#ifdef HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+#ifdef HAVE_INTRIN_H
+#include <intrin.h>
+#endif
+#ifdef HAVE_CPUID_H
+#include <cpuid.h>
+#endif
+#ifdef HAVE_SSE_INTRINSICS
+#include <xmmintrin.h>
+#endif
+#ifdef HAVE_SYS_SYSCONF_H
+#include <sys/sysconf.h>
+#endif
+
+#ifndef _WIN32
+#include <unistd.h>
+#elif defined(_WIN32_IE)
+#include <shlobj.h>
+#endif
+
+#include "alcmain.h"
+#include "almalloc.h"
+#include "compat.h"
+#include "cpu_caps.h"
+#include "fpu_modes.h"
+#include "logging.h"
+
+
+#if defined(HAVE_GCC_GET_CPUID) && (defined(__i386__) || defined(__x86_64__) || \
+ defined(_M_IX86) || defined(_M_X64))
+using reg_type = unsigned int;
+static inline void get_cpuid(int f, reg_type *regs)
+{ __get_cpuid(f, &regs[0], &regs[1], &regs[2], &regs[3]); }
+#define CAN_GET_CPUID
+#elif defined(HAVE_CPUID_INTRINSIC) && (defined(__i386__) || defined(__x86_64__) || \
+ defined(_M_IX86) || defined(_M_X64))
+using reg_type = int;
+static inline void get_cpuid(int f, reg_type *regs)
+{ (__cpuid)(regs, f); }
+#define CAN_GET_CPUID
+#endif
+
+int CPUCapFlags = 0;
+
+void FillCPUCaps(int capfilter)
+{
+ int caps = 0;
+
+/* FIXME: We really should get this for all available CPUs in case different
+ * CPUs have different caps (is that possible on one machine?). */
+#ifdef CAN_GET_CPUID
+ union {
+ reg_type regs[4];
+ char str[sizeof(reg_type[4])];
+ } cpuinf[3] = {{ { 0, 0, 0, 0 } }};
+
+ get_cpuid(0, cpuinf[0].regs);
+ if(cpuinf[0].regs[0] == 0)
+ ERR("Failed to get CPUID\n");
+ else
+ {
+ unsigned int maxfunc = cpuinf[0].regs[0];
+ unsigned int maxextfunc;
+
+ get_cpuid(0x80000000, cpuinf[0].regs);
+ maxextfunc = cpuinf[0].regs[0];
+
+ TRACE("Detected max CPUID function: 0x%x (ext. 0x%x)\n", maxfunc, maxextfunc);
+
+ TRACE("Vendor ID: \"%.4s%.4s%.4s\"\n", cpuinf[0].str+4, cpuinf[0].str+12, cpuinf[0].str+8);
+ if(maxextfunc >= 0x80000004)
+ {
+ get_cpuid(0x80000002, cpuinf[0].regs);
+ get_cpuid(0x80000003, cpuinf[1].regs);
+ get_cpuid(0x80000004, cpuinf[2].regs);
+ TRACE("Name: \"%.16s%.16s%.16s\"\n", cpuinf[0].str, cpuinf[1].str, cpuinf[2].str);
+ }
+
+ if(maxfunc >= 1)
+ {
+ get_cpuid(1, cpuinf[0].regs);
+ if((cpuinf[0].regs[3]&(1<<25)))
+ caps |= CPU_CAP_SSE;
+ if((caps&CPU_CAP_SSE) && (cpuinf[0].regs[3]&(1<<26)))
+ caps |= CPU_CAP_SSE2;
+ if((caps&CPU_CAP_SSE2) && (cpuinf[0].regs[2]&(1<<0)))
+ caps |= CPU_CAP_SSE3;
+ if((caps&CPU_CAP_SSE3) && (cpuinf[0].regs[2]&(1<<19)))
+ caps |= CPU_CAP_SSE4_1;
+ }
+ }
+#else
+ /* Assume support for whatever's supported if we can't check for it */
+#if defined(HAVE_SSE4_1)
+#warning "Assuming SSE 4.1 run-time support!"
+ caps |= CPU_CAP_SSE | CPU_CAP_SSE2 | CPU_CAP_SSE3 | CPU_CAP_SSE4_1;
+#elif defined(HAVE_SSE3)
+#warning "Assuming SSE 3 run-time support!"
+ caps |= CPU_CAP_SSE | CPU_CAP_SSE2 | CPU_CAP_SSE3;
+#elif defined(HAVE_SSE2)
+#warning "Assuming SSE 2 run-time support!"
+ caps |= CPU_CAP_SSE | CPU_CAP_SSE2;
+#elif defined(HAVE_SSE)
+#warning "Assuming SSE run-time support!"
+ caps |= CPU_CAP_SSE;
+#endif
+#endif
+#ifdef HAVE_NEON
+ al::ifstream file{"/proc/cpuinfo"};
+ if(!file.is_open())
+ ERR("Failed to open /proc/cpuinfo, cannot check for NEON support\n");
+ else
+ {
+ std::string features;
+
+ auto getline = [](std::istream &f, std::string &output) -> bool
+ {
+ while(f.good() && f.peek() == '\n')
+ f.ignore();
+ return std::getline(f, output) && !output.empty();
+
+ };
+ while(getline(file, features))
+ {
+ if(features.compare(0, 10, "Features\t:", 10) == 0)
+ break;
+ }
+ file.close();
+
+ size_t extpos{9};
+ while((extpos=features.find("neon", extpos+1)) != std::string::npos)
+ {
+ if((extpos == 0 || std::isspace(features[extpos-1])) &&
+ (extpos+4 == features.length() || std::isspace(features[extpos+4])))
+ {
+ caps |= CPU_CAP_NEON;
+ break;
+ }
+ }
+ }
+#endif
+
+ TRACE("Extensions:%s%s%s%s%s%s\n",
+ ((capfilter&CPU_CAP_SSE) ? ((caps&CPU_CAP_SSE) ? " +SSE" : " -SSE") : ""),
+ ((capfilter&CPU_CAP_SSE2) ? ((caps&CPU_CAP_SSE2) ? " +SSE2" : " -SSE2") : ""),
+ ((capfilter&CPU_CAP_SSE3) ? ((caps&CPU_CAP_SSE3) ? " +SSE3" : " -SSE3") : ""),
+ ((capfilter&CPU_CAP_SSE4_1) ? ((caps&CPU_CAP_SSE4_1) ? " +SSE4.1" : " -SSE4.1") : ""),
+ ((capfilter&CPU_CAP_NEON) ? ((caps&CPU_CAP_NEON) ? " +NEON" : " -NEON") : ""),
+ ((!capfilter) ? " -none-" : "")
+ );
+ CPUCapFlags = caps & capfilter;
+}
+
+
+FPUCtl::FPUCtl()
+{
+#if defined(HAVE_SSE_INTRINSICS)
+ this->sse_state = _mm_getcsr();
+ unsigned int sseState = this->sse_state;
+ sseState |= 0x8000; /* set flush-to-zero */
+ sseState |= 0x0040; /* set denormals-are-zero */
+ _mm_setcsr(sseState);
+
+#elif defined(__GNUC__) && defined(HAVE_SSE)
+
+ if((CPUCapFlags&CPU_CAP_SSE))
+ {
+ __asm__ __volatile__("stmxcsr %0" : "=m" (*&this->sse_state));
+ unsigned int sseState = this->sse_state;
+ sseState |= 0x8000; /* set flush-to-zero */
+ if((CPUCapFlags&CPU_CAP_SSE2))
+ sseState |= 0x0040; /* set denormals-are-zero */
+ __asm__ __volatile__("ldmxcsr %0" : : "m" (*&sseState));
+ }
+#endif
+
+ this->in_mode = true;
+}
+
+void FPUCtl::leave()
+{
+ if(!this->in_mode) return;
+
+#if defined(HAVE_SSE_INTRINSICS)
+ _mm_setcsr(this->sse_state);
+
+#elif defined(__GNUC__) && defined(HAVE_SSE)
+
+ if((CPUCapFlags&CPU_CAP_SSE))
+ __asm__ __volatile__("ldmxcsr %0" : : "m" (*&this->sse_state));
+#endif
+ this->in_mode = false;
+}
+
+
+#ifdef _WIN32
+
+namespace al {
+
+auto filebuf::underflow() -> int_type
+{
+ if(mFile != INVALID_HANDLE_VALUE && gptr() == egptr())
+ {
+ // Read in the next chunk of data, and set the pointers on success
+ DWORD got{};
+ if(ReadFile(mFile, mBuffer.data(), (DWORD)mBuffer.size(), &got, nullptr))
+ setg(mBuffer.data(), mBuffer.data(), mBuffer.data()+got);
+ }
+ if(gptr() == egptr())
+ return traits_type::eof();
+ return traits_type::to_int_type(*gptr());
+}
+
+auto filebuf::seekoff(off_type offset, std::ios_base::seekdir whence, std::ios_base::openmode mode) -> pos_type
+{
+ if(mFile == INVALID_HANDLE_VALUE || (mode&std::ios_base::out) || !(mode&std::ios_base::in))
+ return traits_type::eof();
+
+ LARGE_INTEGER fpos{};
+ switch(whence)
+ {
+ case std::ios_base::beg:
+ fpos.QuadPart = offset;
+ if(!SetFilePointerEx(mFile, fpos, &fpos, FILE_BEGIN))
+ return traits_type::eof();
+ break;
+
+ case std::ios_base::cur:
+ // If the offset remains in the current buffer range, just
+ // update the pointer.
+ if((offset >= 0 && offset < off_type(egptr()-gptr())) ||
+ (offset < 0 && -offset <= off_type(gptr()-eback())))
+ {
+ // Get the current file offset to report the correct read
+ // offset.
+ fpos.QuadPart = 0;
+ if(!SetFilePointerEx(mFile, fpos, &fpos, FILE_CURRENT))
+ return traits_type::eof();
+ setg(eback(), gptr()+offset, egptr());
+ return fpos.QuadPart - off_type(egptr()-gptr());
+ }
+ // Need to offset for the file offset being at egptr() while
+ // the requested offset is relative to gptr().
+ offset -= off_type(egptr()-gptr());
+ fpos.QuadPart = offset;
+ if(!SetFilePointerEx(mFile, fpos, &fpos, FILE_CURRENT))
+ return traits_type::eof();
+ break;
+
+ case std::ios_base::end:
+ fpos.QuadPart = offset;
+ if(!SetFilePointerEx(mFile, fpos, &fpos, FILE_END))
+ return traits_type::eof();
+ break;
+
+ default:
+ return traits_type::eof();
+ }
+ setg(nullptr, nullptr, nullptr);
+ return fpos.QuadPart;
+}
+
+auto filebuf::seekpos(pos_type pos, std::ios_base::openmode mode) -> pos_type
+{
+ // Simplified version of seekoff
+ if(mFile == INVALID_HANDLE_VALUE || (mode&std::ios_base::out) || !(mode&std::ios_base::in))
+ return traits_type::eof();
+
+ LARGE_INTEGER fpos{};
+ fpos.QuadPart = pos;
+ if(!SetFilePointerEx(mFile, fpos, &fpos, FILE_BEGIN))
+ return traits_type::eof();
+
+ setg(nullptr, nullptr, nullptr);
+ return fpos.QuadPart;
+}
+
+filebuf::~filebuf()
+{
+ if(mFile != INVALID_HANDLE_VALUE)
+ CloseHandle(mFile);
+ mFile = INVALID_HANDLE_VALUE;
+}
+
+bool filebuf::open(const wchar_t *filename, std::ios_base::openmode mode)
+{
+ if((mode&std::ios_base::out) || !(mode&std::ios_base::in))
+ return false;
+ HANDLE f{CreateFileW(filename, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL, nullptr)};
+ if(f == INVALID_HANDLE_VALUE) return false;
+
+ if(mFile != INVALID_HANDLE_VALUE)
+ CloseHandle(mFile);
+ mFile = f;
+
+ setg(nullptr, nullptr, nullptr);
+ return true;
+}
+bool filebuf::open(const char *filename, std::ios_base::openmode mode)
+{
+ std::wstring wname{utf8_to_wstr(filename)};
+ return open(wname.c_str(), mode);
+}
+
+
+ifstream::ifstream(const wchar_t *filename, std::ios_base::openmode mode)
+ : std::istream{nullptr}
+{
+ init(&mStreamBuf);
+
+ // Set the failbit if the file failed to open.
+ if((mode&std::ios_base::out) || !mStreamBuf.open(filename, mode|std::ios_base::in))
+ clear(failbit);
+}
+
+ifstream::ifstream(const char *filename, std::ios_base::openmode mode)
+ : std::istream{nullptr}
+{
+ init(&mStreamBuf);
+
+ // Set the failbit if the file failed to open.
+ if((mode&std::ios_base::out) || !mStreamBuf.open(filename, mode|std::ios_base::in))
+ clear(failbit);
+}
+
+/* This is only here to ensure the compiler doesn't define an implicit
+ * destructor, which it tries to automatically inline and subsequently complain
+ * it can't inline without excessive code growth.
+ */
+ifstream::~ifstream() { }
+
+} // namespace al
+
+const PathNamePair &GetProcBinary()
+{
+ static PathNamePair ret;
+ if(!ret.fname.empty() || !ret.path.empty())
+ return ret;
+
+ al::vector<WCHAR> fullpath(256);
+ DWORD len;
+ while((len=GetModuleFileNameW(nullptr, fullpath.data(), static_cast<DWORD>(fullpath.size()))) == fullpath.size())
+ fullpath.resize(fullpath.size() << 1);
+ if(len == 0)
+ {
+ ERR("Failed to get process name: error %lu\n", GetLastError());
+ return ret;
+ }
+
+ fullpath.resize(len);
+ if(fullpath.back() != 0)
+ fullpath.push_back(0);
+
+ auto sep = std::find(fullpath.rbegin()+1, fullpath.rend(), '\\');
+ sep = std::find(fullpath.rbegin()+1, sep, '/');
+ if(sep != fullpath.rend())
+ {
+ *sep = 0;
+ ret.fname = wstr_to_utf8(&*sep + 1);
+ ret.path = wstr_to_utf8(fullpath.data());
+ }
+ else
+ ret.fname = wstr_to_utf8(fullpath.data());
+
+ TRACE("Got binary: %s, %s\n", ret.path.c_str(), ret.fname.c_str());
+ return ret;
+}
+
+
+void *LoadLib(const char *name)
+{
+ std::wstring wname{utf8_to_wstr(name)};
+ return LoadLibraryW(wname.c_str());
+}
+void CloseLib(void *handle)
+{ FreeLibrary(static_cast<HMODULE>(handle)); }
+void *GetSymbol(void *handle, const char *name)
+{
+ void *ret{reinterpret_cast<void*>(GetProcAddress(static_cast<HMODULE>(handle), name))};
+ if(!ret) ERR("Failed to load %s\n", name);
+ return ret;
+}
+
+
+void al_print(FILE *logfile, const char *fmt, ...)
+{
+ al::vector<char> dynmsg;
+ char stcmsg[256];
+ char *str{stcmsg};
+
+ va_list args, args2;
+ va_start(args, fmt);
+ va_copy(args2, args);
+ int msglen{std::vsnprintf(str, sizeof(stcmsg), fmt, args)};
+ if(UNLIKELY(msglen >= 0 && static_cast<size_t>(msglen) >= sizeof(stcmsg)))
+ {
+ dynmsg.resize(static_cast<size_t>(msglen) + 1u);
+ str = dynmsg.data();
+ msglen = std::vsnprintf(str, dynmsg.size(), fmt, args2);
+ }
+ va_end(args2);
+ va_end(args);
+
+ std::wstring wstr{utf8_to_wstr(str)};
+ fprintf(logfile, "%ls", wstr.c_str());
+ fflush(logfile);
+}
+
+
+static inline int is_slash(int c)
+{ return (c == '\\' || c == '/'); }
+
+static void DirectorySearch(const char *path, const char *ext, al::vector<std::string> *const results)
+{
+ std::string pathstr{path};
+ pathstr += "\\*";
+ pathstr += ext;
+ TRACE("Searching %s\n", pathstr.c_str());
+
+ std::wstring wpath{utf8_to_wstr(pathstr.c_str())};
+ WIN32_FIND_DATAW fdata;
+ HANDLE hdl{FindFirstFileW(wpath.c_str(), &fdata)};
+ if(hdl != INVALID_HANDLE_VALUE)
+ {
+ size_t base = results->size();
+ do {
+ results->emplace_back();
+ std::string &str = results->back();
+ str = path;
+ str += '\\';
+ str += wstr_to_utf8(fdata.cFileName);
+ TRACE("Got result %s\n", str.c_str());
+ } while(FindNextFileW(hdl, &fdata));
+ FindClose(hdl);
+
+ std::sort(results->begin()+base, results->end());
+ }
+}
+
+al::vector<std::string> SearchDataFiles(const char *ext, const char *subdir)
+{
+ static std::mutex search_lock;
+ std::lock_guard<std::mutex> _{search_lock};
+
+ /* If the path is absolute, use it directly. */
+ al::vector<std::string> results;
+ if(isalpha(subdir[0]) && subdir[1] == ':' && is_slash(subdir[2]))
+ {
+ std::string path{subdir};
+ std::replace(path.begin(), path.end(), '/', '\\');
+ DirectorySearch(path.c_str(), ext, &results);
+ return results;
+ }
+ if(subdir[0] == '\\' && subdir[1] == '\\' && subdir[2] == '?' && subdir[3] == '\\')
+ {
+ DirectorySearch(subdir, ext, &results);
+ return results;
+ }
+
+ std::string path;
+
+ /* Search the app-local directory. */
+ WCHAR *cwdbuf{_wgetenv(L"ALSOFT_LOCAL_PATH")};
+ if(cwdbuf && *cwdbuf != '\0')
+ {
+ path = wstr_to_utf8(cwdbuf);
+ if(is_slash(path.back()))
+ path.pop_back();
+ }
+ else if(!(cwdbuf=_wgetcwd(nullptr, 0)))
+ path = ".";
+ else
+ {
+ path = wstr_to_utf8(cwdbuf);
+ if(is_slash(path.back()))
+ path.pop_back();
+ free(cwdbuf);
+ }
+ std::replace(path.begin(), path.end(), '/', '\\');
+ DirectorySearch(path.c_str(), ext, &results);
+
+ /* Search the local and global data dirs. */
+ static constexpr int ids[2]{ CSIDL_APPDATA, CSIDL_COMMON_APPDATA };
+ for(int id : ids)
+ {
+ WCHAR buffer[MAX_PATH];
+ if(SHGetSpecialFolderPathW(nullptr, buffer, id, FALSE) == FALSE)
+ continue;
+
+ path = wstr_to_utf8(buffer);
+ if(!is_slash(path.back()))
+ path += '\\';
+ path += subdir;
+ std::replace(path.begin(), path.end(), '/', '\\');
+
+ DirectorySearch(path.c_str(), ext, &results);
+ }
+
+ return results;
+}
+
+void SetRTPriority(void)
+{
+ bool failed = false;
+ if(RTPrioLevel > 0)
+ failed = !SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL);
+ if(failed) ERR("Failed to set priority level for thread\n");
+}
+
+#else
+
+const PathNamePair &GetProcBinary()
+{
+ static PathNamePair ret;
+ if(!ret.fname.empty() || !ret.path.empty())
+ return ret;
+
+ al::vector<char> pathname;
+#ifdef __FreeBSD__
+ size_t pathlen;
+ int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+ if(sysctl(mib, 4, nullptr, &pathlen, nullptr, 0) == -1)
+ WARN("Failed to sysctl kern.proc.pathname: %s\n", strerror(errno));
+ else
+ {
+ pathname.resize(pathlen + 1);
+ sysctl(mib, 4, pathname.data(), &pathlen, nullptr, 0);
+ pathname.resize(pathlen);
+ }
+#endif
+#ifdef HAVE_PROC_PIDPATH
+ if(pathname.empty())
+ {
+ char procpath[PROC_PIDPATHINFO_MAXSIZE]{};
+ const pid_t pid{getpid()};
+ if(proc_pidpath(pid, procpath, sizeof(procpath)) < 1)
+ ERR("proc_pidpath(%d, ...) failed: %s\n", pid, strerror(errno));
+ else
+ pathname.insert(pathname.end(), procpath, procpath+strlen(procpath));
+ }
+#endif
+ if(pathname.empty())
+ {
+ pathname.resize(256);
+
+ const char *selfname{"/proc/self/exe"};
+ ssize_t len{readlink(selfname, pathname.data(), pathname.size())};
+ if(len == -1 && errno == ENOENT)
+ {
+ selfname = "/proc/self/file";
+ len = readlink(selfname, pathname.data(), pathname.size());
+ }
+ if(len == -1 && errno == ENOENT)
+ {
+ selfname = "/proc/curproc/exe";
+ len = readlink(selfname, pathname.data(), pathname.size());
+ }
+ if(len == -1 && errno == ENOENT)
+ {
+ selfname = "/proc/curproc/file";
+ len = readlink(selfname, pathname.data(), pathname.size());
+ }
+
+ while(len > 0 && static_cast<size_t>(len) == pathname.size())
+ {
+ pathname.resize(pathname.size() << 1);
+ len = readlink(selfname, pathname.data(), pathname.size());
+ }
+ if(len <= 0)
+ {
+ WARN("Failed to readlink %s: %s\n", selfname, strerror(errno));
+ return ret;
+ }
+
+ pathname.resize(len);
+ }
+ while(!pathname.empty() && pathname.back() == 0)
+ pathname.pop_back();
+
+ auto sep = std::find(pathname.crbegin(), pathname.crend(), '/');
+ if(sep != pathname.crend())
+ {
+ ret.path = std::string(pathname.cbegin(), sep.base()-1);
+ ret.fname = std::string(sep.base(), pathname.cend());
+ }
+ else
+ ret.fname = std::string(pathname.cbegin(), pathname.cend());
+
+ TRACE("Got binary: %s, %s\n", ret.path.c_str(), ret.fname.c_str());
+ return ret;
+}
+
+
+#ifdef HAVE_DLFCN_H
+
+void *LoadLib(const char *name)
+{
+ dlerror();
+ void *handle{dlopen(name, RTLD_NOW)};
+ const char *err{dlerror()};
+ if(err) handle = nullptr;
+ return handle;
+}
+void CloseLib(void *handle)
+{ dlclose(handle); }
+void *GetSymbol(void *handle, const char *name)
+{
+ dlerror();
+ void *sym{dlsym(handle, name)};
+ const char *err{dlerror()};
+ if(err)
+ {
+ WARN("Failed to load %s: %s\n", name, err);
+ sym = nullptr;
+ }
+ return sym;
+}
+
+#endif /* HAVE_DLFCN_H */
+
+void al_print(FILE *logfile, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(logfile, fmt, ap);
+ va_end(ap);
+
+ fflush(logfile);
+}
+
+
+static void DirectorySearch(const char *path, const char *ext, al::vector<std::string> *const results)
+{
+ TRACE("Searching %s for *%s\n", path, ext);
+ DIR *dir{opendir(path)};
+ if(dir != nullptr)
+ {
+ const size_t extlen = strlen(ext);
+ size_t base = results->size();
+
+ struct dirent *dirent;
+ while((dirent=readdir(dir)) != nullptr)
+ {
+ if(strcmp(dirent->d_name, ".") == 0 || strcmp(dirent->d_name, "..") == 0)
+ continue;
+
+ size_t len{strlen(dirent->d_name)};
+ if(len <= extlen) continue;
+ if(strcasecmp(dirent->d_name+len-extlen, ext) != 0)
+ continue;
+
+ results->emplace_back();
+ std::string &str = results->back();
+ str = path;
+ if(str.back() != '/')
+ str.push_back('/');
+ str += dirent->d_name;
+ TRACE("Got result %s\n", str.c_str());
+ }
+ closedir(dir);
+
+ std::sort(results->begin()+base, results->end());
+ }
+}
+
+al::vector<std::string> SearchDataFiles(const char *ext, const char *subdir)
+{
+ static std::mutex search_lock;
+ std::lock_guard<std::mutex> _{search_lock};
+
+ al::vector<std::string> results;
+ if(subdir[0] == '/')
+ {
+ DirectorySearch(subdir, ext, &results);
+ return results;
+ }
+
+ /* Search the app-local directory. */
+ const char *str{getenv("ALSOFT_LOCAL_PATH")};
+ if(str && *str != '\0')
+ DirectorySearch(str, ext, &results);
+ else
+ {
+ al::vector<char> cwdbuf(256);
+ while(!getcwd(cwdbuf.data(), cwdbuf.size()))
+ {
+ if(errno != ERANGE)
+ {
+ cwdbuf.clear();
+ break;
+ }
+ cwdbuf.resize(cwdbuf.size() << 1);
+ }
+ if(cwdbuf.empty())
+ DirectorySearch(".", ext, &results);
+ else
+ {
+ DirectorySearch(cwdbuf.data(), ext, &results);
+ cwdbuf.clear();
+ }
+ }
+
+ // Search local data dir
+ if((str=getenv("XDG_DATA_HOME")) != nullptr && str[0] != '\0')
+ {
+ std::string path{str};
+ if(path.back() != '/')
+ path += '/';
+ path += subdir;
+ DirectorySearch(path.c_str(), ext, &results);
+ }
+ else if((str=getenv("HOME")) != nullptr && str[0] != '\0')
+ {
+ std::string path{str};
+ if(path.back() == '/')
+ path.pop_back();
+ path += "/.local/share/";
+ path += subdir;
+ DirectorySearch(path.c_str(), ext, &results);
+ }
+
+ // Search global data dirs
+ if((str=getenv("XDG_DATA_DIRS")) == nullptr || str[0] == '\0')
+ str = "/usr/local/share/:/usr/share/";
+
+ const char *next{str};
+ while((str=next) != nullptr && str[0] != '\0')
+ {
+ next = strchr(str, ':');
+
+ std::string path = (next ? std::string(str, next++) : std::string(str));
+ if(path.empty()) continue;
+
+ if(path.back() != '/')
+ path += '/';
+ path += subdir;
+
+ DirectorySearch(path.c_str(), ext, &results);
+ }
+
+ return results;
+}
+
+void SetRTPriority()
+{
+ bool failed = false;
+#if defined(HAVE_PTHREAD_SETSCHEDPARAM) && !defined(__OpenBSD__)
+ if(RTPrioLevel > 0)
+ {
+ struct sched_param param;
+ /* Use the minimum real-time priority possible for now (on Linux this
+ * should be 1 for SCHED_RR) */
+ param.sched_priority = sched_get_priority_min(SCHED_RR);
+ failed = !!pthread_setschedparam(pthread_self(), SCHED_RR, &param);
+ }
+#else
+ /* Real-time priority not available */
+ failed = (RTPrioLevel>0);
+#endif
+ if(failed)
+ ERR("Failed to set priority level for thread\n");
+}
+
+#endif
diff --git a/alc/hrtf.cpp b/alc/hrtf.cpp
new file mode 100644
index 00000000..786c4c5d
--- /dev/null
+++ b/alc/hrtf.cpp
@@ -0,0 +1,1400 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2011 by Chris Robinson
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include "hrtf.h"
+
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cctype>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <functional>
+#include <fstream>
+#include <iterator>
+#include <memory>
+#include <mutex>
+#include <new>
+#include <numeric>
+#include <utility>
+
+#include "AL/al.h"
+
+#include "alcmain.h"
+#include "alconfig.h"
+#include "almalloc.h"
+#include "alnumeric.h"
+#include "aloptional.h"
+#include "alspan.h"
+#include "compat.h"
+#include "filters/splitter.h"
+#include "logging.h"
+#include "math_defs.h"
+#include "opthelpers.h"
+
+
+struct HrtfHandle {
+ std::unique_ptr<HrtfEntry> entry;
+ al::FlexArray<char> filename;
+
+ HrtfHandle(size_t fname_len) : filename{fname_len} { }
+ HrtfHandle(const HrtfHandle&) = delete;
+ HrtfHandle& operator=(const HrtfHandle&) = delete;
+
+ static std::unique_ptr<HrtfHandle> Create(size_t fname_len);
+ static constexpr size_t Sizeof(size_t length) noexcept
+ {
+ return maxz(sizeof(HrtfHandle),
+ al::FlexArray<char>::Sizeof(length, offsetof(HrtfHandle, filename)));
+ }
+
+ DEF_PLACE_NEWDEL()
+};
+
+std::unique_ptr<HrtfHandle> HrtfHandle::Create(size_t fname_len)
+{
+ void *ptr{al_calloc(alignof(HrtfHandle), HrtfHandle::Sizeof(fname_len))};
+ return std::unique_ptr<HrtfHandle>{new (ptr) HrtfHandle{fname_len}};
+}
+
+namespace {
+
+using namespace std::placeholders;
+
+using HrtfHandlePtr = std::unique_ptr<HrtfHandle>;
+
+/* Data set limits must be the same as or more flexible than those defined in
+ * the makemhr utility.
+ */
+#define MIN_IR_SIZE (8)
+#define MAX_IR_SIZE (512)
+#define MOD_IR_SIZE (2)
+
+#define MIN_FD_COUNT (1)
+#define MAX_FD_COUNT (16)
+
+#define MIN_FD_DISTANCE (0.05f)
+#define MAX_FD_DISTANCE (2.5f)
+
+#define MIN_EV_COUNT (5)
+#define MAX_EV_COUNT (128)
+
+#define MIN_AZ_COUNT (1)
+#define MAX_AZ_COUNT (128)
+
+#define MAX_HRIR_DELAY (HRTF_HISTORY_LENGTH-1)
+
+constexpr ALchar magicMarker00[8]{'M','i','n','P','H','R','0','0'};
+constexpr ALchar magicMarker01[8]{'M','i','n','P','H','R','0','1'};
+constexpr ALchar magicMarker02[8]{'M','i','n','P','H','R','0','2'};
+
+/* First value for pass-through coefficients (remaining are 0), used for omni-
+ * directional sounds. */
+constexpr ALfloat PassthruCoeff{0.707106781187f/*sqrt(0.5)*/};
+
+std::mutex LoadedHrtfLock;
+al::vector<HrtfHandlePtr> LoadedHrtfs;
+
+
+class databuf final : public std::streambuf {
+ int_type underflow() override
+ { return traits_type::eof(); }
+
+ pos_type seekoff(off_type offset, std::ios_base::seekdir whence, std::ios_base::openmode mode) override
+ {
+ if((mode&std::ios_base::out) || !(mode&std::ios_base::in))
+ return traits_type::eof();
+
+ char_type *cur;
+ switch(whence)
+ {
+ case std::ios_base::beg:
+ if(offset < 0 || offset > egptr()-eback())
+ return traits_type::eof();
+ cur = eback() + offset;
+ break;
+
+ case std::ios_base::cur:
+ if((offset >= 0 && offset > egptr()-gptr()) ||
+ (offset < 0 && -offset > gptr()-eback()))
+ return traits_type::eof();
+ cur = gptr() + offset;
+ break;
+
+ case std::ios_base::end:
+ if(offset > 0 || -offset > egptr()-eback())
+ return traits_type::eof();
+ cur = egptr() + offset;
+ break;
+
+ default:
+ return traits_type::eof();
+ }
+
+ setg(eback(), cur, egptr());
+ return cur - eback();
+ }
+
+ pos_type seekpos(pos_type pos, std::ios_base::openmode mode) override
+ {
+ // Simplified version of seekoff
+ if((mode&std::ios_base::out) || !(mode&std::ios_base::in))
+ return traits_type::eof();
+
+ if(pos < 0 || pos > egptr()-eback())
+ return traits_type::eof();
+
+ setg(eback(), eback() + static_cast<size_t>(pos), egptr());
+ return pos;
+ }
+
+public:
+ databuf(const char_type *start, const char_type *end) noexcept
+ {
+ setg(const_cast<char_type*>(start), const_cast<char_type*>(start),
+ const_cast<char_type*>(end));
+ }
+};
+
+class idstream final : public std::istream {
+ databuf mStreamBuf;
+
+public:
+ idstream(const char *start, const char *end)
+ : std::istream{nullptr}, mStreamBuf{start, end}
+ { init(&mStreamBuf); }
+};
+
+
+struct IdxBlend { ALsizei idx; ALfloat blend; };
+/* Calculate the elevation index given the polar elevation in radians. This
+ * will return an index between 0 and (evcount - 1).
+ */
+IdxBlend CalcEvIndex(ALsizei evcount, ALfloat ev)
+{
+ ev = (al::MathDefs<float>::Pi()*0.5f + ev) * (evcount-1) / al::MathDefs<float>::Pi();
+ ALsizei idx{float2int(ev)};
+
+ return IdxBlend{mini(idx, evcount-1), ev-idx};
+}
+
+/* Calculate the azimuth index given the polar azimuth in radians. This will
+ * return an index between 0 and (azcount - 1).
+ */
+IdxBlend CalcAzIndex(ALsizei azcount, ALfloat az)
+{
+ az = (al::MathDefs<float>::Tau()+az) * azcount / al::MathDefs<float>::Tau();
+ ALsizei idx{float2int(az)};
+
+ return IdxBlend{idx%azcount, az-idx};
+}
+
+} // namespace
+
+
+/* Calculates static HRIR coefficients and delays for the given polar elevation
+ * and azimuth in radians. The coefficients are normalized.
+ */
+void GetHrtfCoeffs(const HrtfEntry *Hrtf, ALfloat elevation, ALfloat azimuth, ALfloat distance,
+ ALfloat spread, HrirArray<ALfloat> &coeffs, ALsizei (&delays)[2])
+{
+ const ALfloat dirfact{1.0f - (spread / al::MathDefs<float>::Tau())};
+
+ const auto *field = Hrtf->field;
+ const auto *field_end = field + Hrtf->fdCount-1;
+ ALsizei ebase{0};
+ while(distance < field->distance && field != field_end)
+ {
+ ebase += field->evCount;
+ ++field;
+ }
+
+ /* Claculate the elevation indinces. */
+ const auto elev0 = CalcEvIndex(field->evCount, elevation);
+ const ALsizei elev1_idx{mini(elev0.idx+1, field->evCount-1)};
+ const ALsizei ir0offset{Hrtf->elev[ebase + elev0.idx].irOffset};
+ const ALsizei ir1offset{Hrtf->elev[ebase + elev1_idx].irOffset};
+
+ /* Calculate azimuth indices. */
+ const auto az0 = CalcAzIndex(Hrtf->elev[ebase + elev0.idx].azCount, azimuth);
+ const auto az1 = CalcAzIndex(Hrtf->elev[ebase + elev1_idx].azCount, azimuth);
+
+ /* Calculate the HRIR indices to blend. */
+ ALsizei idx[4]{
+ ir0offset + az0.idx,
+ ir0offset + ((az0.idx+1) % Hrtf->elev[ebase + elev0.idx].azCount),
+ ir1offset + az1.idx,
+ ir1offset + ((az1.idx+1) % Hrtf->elev[ebase + elev1_idx].azCount)
+ };
+
+ /* Calculate bilinear blending weights, attenuated according to the
+ * directional panning factor.
+ */
+ const ALfloat blend[4]{
+ (1.0f-elev0.blend) * (1.0f-az0.blend) * dirfact,
+ (1.0f-elev0.blend) * ( az0.blend) * dirfact,
+ ( elev0.blend) * (1.0f-az1.blend) * dirfact,
+ ( elev0.blend) * ( az1.blend) * dirfact
+ };
+
+ /* Calculate the blended HRIR delays. */
+ delays[0] = fastf2i(
+ Hrtf->delays[idx[0]][0]*blend[0] + Hrtf->delays[idx[1]][0]*blend[1] +
+ Hrtf->delays[idx[2]][0]*blend[2] + Hrtf->delays[idx[3]][0]*blend[3]
+ );
+ delays[1] = fastf2i(
+ Hrtf->delays[idx[0]][1]*blend[0] + Hrtf->delays[idx[1]][1]*blend[1] +
+ Hrtf->delays[idx[2]][1]*blend[2] + Hrtf->delays[idx[3]][1]*blend[3]
+ );
+
+ const ALsizei irSize{Hrtf->irSize};
+ ASSUME(irSize >= MIN_IR_SIZE);
+
+ /* Calculate the sample offsets for the HRIR indices. */
+ idx[0] *= irSize;
+ idx[1] *= irSize;
+ idx[2] *= irSize;
+ idx[3] *= irSize;
+
+ /* Calculate the blended HRIR coefficients. */
+ ALfloat *coeffout{al::assume_aligned<16>(&coeffs[0][0])};
+ coeffout[0] = PassthruCoeff * (1.0f-dirfact);
+ coeffout[1] = PassthruCoeff * (1.0f-dirfact);
+ std::fill(coeffout+2, coeffout + irSize*2, 0.0f);
+ for(ALsizei c{0};c < 4;c++)
+ {
+ const ALfloat *srccoeffs{al::assume_aligned<16>(Hrtf->coeffs[idx[c]])};
+ const ALfloat mult{blend[c]};
+ auto blend_coeffs = [mult](const ALfloat src, const ALfloat coeff) noexcept -> ALfloat
+ { return src*mult + coeff; };
+ std::transform(srccoeffs, srccoeffs + irSize*2, coeffout, coeffout, blend_coeffs);
+ }
+}
+
+
+std::unique_ptr<DirectHrtfState> DirectHrtfState::Create(size_t num_chans)
+{
+ void *ptr{al_calloc(16, DirectHrtfState::Sizeof(num_chans))};
+ return std::unique_ptr<DirectHrtfState>{new (ptr) DirectHrtfState{num_chans}};
+}
+
+void BuildBFormatHrtf(const HrtfEntry *Hrtf, DirectHrtfState *state, const ALuint NumChannels,
+ const AngularPoint *AmbiPoints, const ALfloat (*RESTRICT AmbiMatrix)[MAX_AMBI_CHANNELS],
+ const size_t AmbiCount, const ALfloat *RESTRICT AmbiOrderHFGain)
+{
+ static constexpr int OrderFromChan[MAX_AMBI_CHANNELS]{
+ 0, 1,1,1, 2,2,2,2,2, 3,3,3,3,3,3,3,
+ };
+ /* Set this to true for dual-band HRTF processing. May require better
+ * calculation of the new IR length to deal with the head and tail
+ * generated by the HF scaling.
+ */
+ static constexpr bool DualBand{true};
+
+ ASSUME(NumChannels > 0);
+ ASSUME(AmbiCount > 0);
+
+ auto &field = Hrtf->field[0];
+ ALsizei min_delay{HRTF_HISTORY_LENGTH};
+ ALsizei max_delay{0};
+ auto idx = al::vector<ALsizei>(AmbiCount);
+ auto calc_idxs = [Hrtf,&field,&max_delay,&min_delay](const AngularPoint &pt) noexcept -> ALsizei
+ {
+ /* Calculate elevation index. */
+ const auto evidx = clampi(
+ static_cast<ALsizei>((90.0f+pt.Elev)*(field.evCount-1)/180.0f + 0.5f),
+ 0, field.evCount-1);
+
+ const ALsizei azcount{Hrtf->elev[evidx].azCount};
+ const ALsizei iroffset{Hrtf->elev[evidx].irOffset};
+
+ /* Calculate azimuth index for this elevation. */
+ const auto azidx = static_cast<ALsizei>((360.0f+pt.Azim)*azcount/360.0f + 0.5f) % azcount;
+
+ /* Calculate the index for the impulse response. */
+ ALsizei idx{iroffset + azidx};
+
+ min_delay = mini(min_delay, mini(Hrtf->delays[idx][0], Hrtf->delays[idx][1]));
+ max_delay = maxi(max_delay, maxi(Hrtf->delays[idx][0], Hrtf->delays[idx][1]));
+
+ return idx;
+ };
+ std::transform(AmbiPoints, AmbiPoints+AmbiCount, idx.begin(), calc_idxs);
+
+ /* For dual-band processing, add a 16-sample delay to compensate for the HF
+ * scale on the minimum-phase response.
+ */
+ static constexpr ALsizei base_delay{DualBand ? 16 : 0};
+ const ALdouble xover_norm{400.0 / Hrtf->sampleRate};
+ BandSplitterR<double> splitter{xover_norm};
+
+ auto tmpres = al::vector<HrirArray<ALdouble>>(NumChannels);
+ auto tmpfilt = al::vector<std::array<ALdouble,HRIR_LENGTH*4>>(3);
+ for(size_t c{0u};c < AmbiCount;++c)
+ {
+ const ALfloat (*fir)[2]{&Hrtf->coeffs[idx[c] * Hrtf->irSize]};
+ const ALsizei ldelay{Hrtf->delays[idx[c]][0] - min_delay + base_delay};
+ const ALsizei rdelay{Hrtf->delays[idx[c]][1] - min_delay + base_delay};
+
+ if(!DualBand)
+ {
+ /* For single-band decoding, apply the HF scale to the response. */
+ for(ALuint i{0u};i < NumChannels;++i)
+ {
+ const ALdouble mult{ALdouble{AmbiOrderHFGain[OrderFromChan[i]]} *
+ AmbiMatrix[c][i]};
+ const ALsizei numirs{mini(Hrtf->irSize, HRIR_LENGTH-maxi(ldelay, rdelay))};
+ ALsizei lidx{ldelay}, ridx{rdelay};
+ for(ALsizei j{0};j < numirs;++j)
+ {
+ tmpres[i][lidx++][0] += fir[j][0] * mult;
+ tmpres[i][ridx++][1] += fir[j][1] * mult;
+ }
+ }
+ continue;
+ }
+
+ /* For dual-band processing, the HRIR needs to be split into low and
+ * high frequency responses. The band-splitter alone creates frequency-
+ * dependent phase-shifts, which is not ideal. To counteract it,
+ * combine it with a backwards phase-shift.
+ */
+
+ /* Load the (left) HRIR backwards, into a temp buffer with padding. */
+ std::fill(tmpfilt[2].begin(), tmpfilt[2].end(), 0.0);
+ std::transform(fir, fir+Hrtf->irSize, tmpfilt[2].rbegin() + HRIR_LENGTH*3,
+ [](const ALfloat (&ir)[2]) noexcept -> ALdouble { return ir[0]; });
+
+ /* Apply the all-pass on the reversed signal and reverse the resulting
+ * sample array. This produces the forward response with a backwards
+ * phase-shift (+n degrees becomes -n degrees).
+ */
+ splitter.applyAllpass(tmpfilt[2].data(), static_cast<int>(tmpfilt[2].size()));
+ std::reverse(tmpfilt[2].begin(), tmpfilt[2].end());
+
+ /* Now apply the band-splitter. This applies the normal phase-shift,
+ * which cancels out with the backwards phase-shift to get the original
+ * phase on the split signal.
+ */
+ splitter.clear();
+ splitter.process(tmpfilt[0].data(), tmpfilt[1].data(), tmpfilt[2].data(),
+ static_cast<int>(tmpfilt[2].size()));
+
+ /* Apply left ear response with delay and HF scale. */
+ for(ALuint i{0u};i < NumChannels;++i)
+ {
+ const ALdouble mult{AmbiMatrix[c][i]};
+ const ALdouble hfgain{AmbiOrderHFGain[OrderFromChan[i]]};
+ ALsizei j{HRIR_LENGTH*3 - ldelay};
+ for(ALsizei lidx{0};lidx < HRIR_LENGTH;++lidx,++j)
+ tmpres[i][lidx][0] += (tmpfilt[0][j]*hfgain + tmpfilt[1][j]) * mult;
+ }
+
+ /* Now run the same process on the right HRIR. */
+ std::fill(tmpfilt[2].begin(), tmpfilt[2].end(), 0.0);
+ std::transform(fir, fir+Hrtf->irSize, tmpfilt[2].rbegin() + HRIR_LENGTH*3,
+ [](const ALfloat (&ir)[2]) noexcept -> ALdouble { return ir[1]; });
+
+ splitter.applyAllpass(tmpfilt[2].data(), static_cast<int>(tmpfilt[2].size()));
+ std::reverse(tmpfilt[2].begin(), tmpfilt[2].end());
+
+ splitter.clear();
+ splitter.process(tmpfilt[0].data(), tmpfilt[1].data(), tmpfilt[2].data(),
+ static_cast<int>(tmpfilt[2].size()));
+
+ for(ALuint i{0u};i < NumChannels;++i)
+ {
+ const ALdouble mult{AmbiMatrix[c][i]};
+ const ALdouble hfgain{AmbiOrderHFGain[OrderFromChan[i]]};
+ ALsizei j{HRIR_LENGTH*3 - rdelay};
+ for(ALsizei ridx{0};ridx < HRIR_LENGTH;++ridx,++j)
+ tmpres[i][ridx][1] += (tmpfilt[0][j]*hfgain + tmpfilt[1][j]) * mult;
+ }
+ }
+ tmpfilt.clear();
+ idx.clear();
+
+ for(ALuint i{0u};i < NumChannels;++i)
+ {
+ auto copy_arr = [](const std::array<double,2> &in) noexcept -> std::array<float,2>
+ { return std::array<float,2>{{static_cast<float>(in[0]), static_cast<float>(in[1])}}; };
+ std::transform(tmpres[i].begin(), tmpres[i].end(), state->Chan[i].Coeffs.begin(),
+ copy_arr);
+ }
+ tmpres.clear();
+
+ ALsizei max_length{HRIR_LENGTH};
+ /* Increase the IR size by double the base delay with dual-band processing
+ * to account for the head and tail from the HF response scale.
+ */
+ const ALsizei irsize{mini(Hrtf->irSize + base_delay*2, max_length)};
+ max_length = mini(max_delay-min_delay + irsize, max_length);
+
+ /* Round up to the next IR size multiple. */
+ max_length += MOD_IR_SIZE-1;
+ max_length -= max_length%MOD_IR_SIZE;
+
+ TRACE("Skipped delay: %d, max delay: %d, new FIR length: %d\n",
+ min_delay, max_delay-min_delay, max_length);
+ state->IrSize = max_length;
+}
+
+
+namespace {
+
+std::unique_ptr<HrtfEntry> CreateHrtfStore(ALuint rate, ALsizei irSize, const ALsizei fdCount,
+ const ALubyte *evCount, const ALfloat *distance, const ALushort *azCount,
+ const ALushort *irOffset, ALsizei irCount, const ALfloat (*coeffs)[2],
+ const ALubyte (*delays)[2], const char *filename)
+{
+ std::unique_ptr<HrtfEntry> Hrtf;
+
+ ALsizei evTotal{std::accumulate(evCount, evCount+fdCount, 0)};
+ size_t total{sizeof(HrtfEntry)};
+ total = RoundUp(total, alignof(HrtfEntry::Field)); /* Align for field infos */
+ total += sizeof(HrtfEntry::Field)*fdCount;
+ total = RoundUp(total, alignof(HrtfEntry::Elevation)); /* Align for elevation infos */
+ total += sizeof(Hrtf->elev[0])*evTotal;
+ total = RoundUp(total, 16); /* Align for coefficients using SIMD */
+ total += sizeof(Hrtf->coeffs[0])*irSize*irCount;
+ total += sizeof(Hrtf->delays[0])*irCount;
+
+ Hrtf.reset(new (al_calloc(16, total)) HrtfEntry{});
+ if(!Hrtf)
+ ERR("Out of memory allocating storage for %s.\n", filename);
+ else
+ {
+ InitRef(&Hrtf->ref, 1u);
+ Hrtf->sampleRate = rate;
+ Hrtf->irSize = irSize;
+ Hrtf->fdCount = fdCount;
+
+ /* Set up pointers to storage following the main HRTF struct. */
+ char *base = reinterpret_cast<char*>(Hrtf.get());
+ uintptr_t offset = sizeof(HrtfEntry);
+
+ offset = RoundUp(offset, alignof(HrtfEntry::Field)); /* Align for field infos */
+ auto field_ = reinterpret_cast<HrtfEntry::Field*>(base + offset);
+ offset += sizeof(field_[0])*fdCount;
+
+ offset = RoundUp(offset, alignof(HrtfEntry::Elevation)); /* Align for elevation infos */
+ auto elev_ = reinterpret_cast<HrtfEntry::Elevation*>(base + offset);
+ offset += sizeof(elev_[0])*evTotal;
+
+ offset = RoundUp(offset, 16); /* Align for coefficients using SIMD */
+ auto coeffs_ = reinterpret_cast<ALfloat(*)[2]>(base + offset);
+ offset += sizeof(coeffs_[0])*irSize*irCount;
+
+ auto delays_ = reinterpret_cast<ALubyte(*)[2]>(base + offset);
+ offset += sizeof(delays_[0])*irCount;
+
+ assert(offset == total);
+
+ /* Copy input data to storage. */
+ for(ALsizei i{0};i < fdCount;i++)
+ {
+ field_[i].distance = distance[i];
+ field_[i].evCount = evCount[i];
+ }
+ for(ALsizei i{0};i < evTotal;i++)
+ {
+ elev_[i].azCount = azCount[i];
+ elev_[i].irOffset = irOffset[i];
+ }
+ for(ALsizei i{0};i < irSize*irCount;i++)
+ {
+ coeffs_[i][0] = coeffs[i][0];
+ coeffs_[i][1] = coeffs[i][1];
+ }
+ for(ALsizei i{0};i < irCount;i++)
+ {
+ delays_[i][0] = delays[i][0];
+ delays_[i][1] = delays[i][1];
+ }
+
+ /* Finally, assign the storage pointers. */
+ Hrtf->field = field_;
+ Hrtf->elev = elev_;
+ Hrtf->coeffs = coeffs_;
+ Hrtf->delays = delays_;
+ }
+
+ return Hrtf;
+}
+
+ALubyte GetLE_ALubyte(std::istream &data)
+{
+ return static_cast<ALubyte>(data.get());
+}
+
+ALshort GetLE_ALshort(std::istream &data)
+{
+ int ret = data.get();
+ ret |= data.get() << 8;
+ return static_cast<ALshort>((ret^32768) - 32768);
+}
+
+ALushort GetLE_ALushort(std::istream &data)
+{
+ int ret = data.get();
+ ret |= data.get() << 8;
+ return static_cast<ALushort>(ret);
+}
+
+ALint GetLE_ALint24(std::istream &data)
+{
+ int ret = data.get();
+ ret |= data.get() << 8;
+ ret |= data.get() << 16;
+ return (ret^8388608) - 8388608;
+}
+
+ALuint GetLE_ALuint(std::istream &data)
+{
+ int ret = data.get();
+ ret |= data.get() << 8;
+ ret |= data.get() << 16;
+ ret |= data.get() << 24;
+ return ret;
+}
+
+std::unique_ptr<HrtfEntry> LoadHrtf00(std::istream &data, const char *filename)
+{
+ ALuint rate{GetLE_ALuint(data)};
+ ALushort irCount{GetLE_ALushort(data)};
+ ALushort irSize{GetLE_ALushort(data)};
+ ALubyte evCount{GetLE_ALubyte(data)};
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+
+ ALboolean failed{AL_FALSE};
+ if(irSize < MIN_IR_SIZE || irSize > MAX_IR_SIZE || (irSize%MOD_IR_SIZE))
+ {
+ ERR("Unsupported HRIR size: irSize=%d (%d to %d by %d)\n",
+ irSize, MIN_IR_SIZE, MAX_IR_SIZE, MOD_IR_SIZE);
+ failed = AL_TRUE;
+ }
+ if(evCount < MIN_EV_COUNT || evCount > MAX_EV_COUNT)
+ {
+ ERR("Unsupported elevation count: evCount=%d (%d to %d)\n",
+ evCount, MIN_EV_COUNT, MAX_EV_COUNT);
+ failed = AL_TRUE;
+ }
+ if(failed)
+ return nullptr;
+
+ al::vector<ALushort> evOffset(evCount);
+ for(auto &val : evOffset)
+ val = GetLE_ALushort(data);
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+ for(ALsizei i{1};i < evCount;i++)
+ {
+ if(evOffset[i] <= evOffset[i-1])
+ {
+ ERR("Invalid evOffset: evOffset[%d]=%d (last=%d)\n",
+ i, evOffset[i], evOffset[i-1]);
+ failed = AL_TRUE;
+ }
+ }
+ if(irCount <= evOffset.back())
+ {
+ ERR("Invalid evOffset: evOffset[%zu]=%d (irCount=%d)\n",
+ evOffset.size()-1, evOffset.back(), irCount);
+ failed = AL_TRUE;
+ }
+ if(failed)
+ return nullptr;
+
+ al::vector<ALushort> azCount(evCount);
+ for(ALsizei i{1};i < evCount;i++)
+ {
+ azCount[i-1] = evOffset[i] - evOffset[i-1];
+ if(azCount[i-1] < MIN_AZ_COUNT || azCount[i-1] > MAX_AZ_COUNT)
+ {
+ ERR("Unsupported azimuth count: azCount[%d]=%d (%d to %d)\n",
+ i-1, azCount[i-1], MIN_AZ_COUNT, MAX_AZ_COUNT);
+ failed = AL_TRUE;
+ }
+ }
+ azCount.back() = irCount - evOffset.back();
+ if(azCount.back() < MIN_AZ_COUNT || azCount.back() > MAX_AZ_COUNT)
+ {
+ ERR("Unsupported azimuth count: azCount[%zu]=%d (%d to %d)\n",
+ azCount.size()-1, azCount.back(), MIN_AZ_COUNT, MAX_AZ_COUNT);
+ failed = AL_TRUE;
+ }
+ if(failed)
+ return nullptr;
+
+ al::vector<std::array<ALfloat,2>> coeffs(irSize*irCount);
+ al::vector<std::array<ALubyte,2>> delays(irCount);
+ for(auto &val : coeffs)
+ val[0] = GetLE_ALshort(data) / 32768.0f;
+ for(auto &val : delays)
+ val[0] = GetLE_ALubyte(data);
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+ for(ALsizei i{0};i < irCount;i++)
+ {
+ if(delays[i][0] > MAX_HRIR_DELAY)
+ {
+ ERR("Invalid delays[%d]: %d (%d)\n", i, delays[i][0], MAX_HRIR_DELAY);
+ failed = AL_TRUE;
+ }
+ }
+ if(failed)
+ return nullptr;
+
+ /* Mirror the left ear responses to the right ear. */
+ for(ALsizei i{0};i < evCount;i++)
+ {
+ const ALushort evoffset{evOffset[i]};
+ const ALushort azcount{azCount[i]};
+ for(ALsizei j{0};j < azcount;j++)
+ {
+ const ALsizei lidx{evoffset + j};
+ const ALsizei ridx{evoffset + ((azcount-j) % azcount)};
+
+ for(ALsizei k{0};k < irSize;k++)
+ coeffs[ridx*irSize + k][1] = coeffs[lidx*irSize + k][0];
+ delays[ridx][1] = delays[lidx][0];
+ }
+ }
+
+ static constexpr ALfloat distance{0.0f};
+ return CreateHrtfStore(rate, irSize, 1, &evCount, &distance, azCount.data(), evOffset.data(),
+ irCount, &reinterpret_cast<ALfloat(&)[2]>(coeffs[0]),
+ &reinterpret_cast<ALubyte(&)[2]>(delays[0]), filename);
+}
+
+std::unique_ptr<HrtfEntry> LoadHrtf01(std::istream &data, const char *filename)
+{
+ ALuint rate{GetLE_ALuint(data)};
+ ALushort irSize{GetLE_ALubyte(data)};
+ ALubyte evCount{GetLE_ALubyte(data)};
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+
+ ALboolean failed{AL_FALSE};
+ if(irSize < MIN_IR_SIZE || irSize > MAX_IR_SIZE || (irSize%MOD_IR_SIZE))
+ {
+ ERR("Unsupported HRIR size: irSize=%d (%d to %d by %d)\n",
+ irSize, MIN_IR_SIZE, MAX_IR_SIZE, MOD_IR_SIZE);
+ failed = AL_TRUE;
+ }
+ if(evCount < MIN_EV_COUNT || evCount > MAX_EV_COUNT)
+ {
+ ERR("Unsupported elevation count: evCount=%d (%d to %d)\n",
+ evCount, MIN_EV_COUNT, MAX_EV_COUNT);
+ failed = AL_TRUE;
+ }
+ if(failed)
+ return nullptr;
+
+ al::vector<ALushort> azCount(evCount);
+ std::generate(azCount.begin(), azCount.end(), std::bind(GetLE_ALubyte, std::ref(data)));
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+ for(ALsizei i{0};i < evCount;++i)
+ {
+ if(azCount[i] < MIN_AZ_COUNT || azCount[i] > MAX_AZ_COUNT)
+ {
+ ERR("Unsupported azimuth count: azCount[%d]=%d (%d to %d)\n",
+ i, azCount[i], MIN_AZ_COUNT, MAX_AZ_COUNT);
+ failed = AL_TRUE;
+ }
+ }
+ if(failed)
+ return nullptr;
+
+ al::vector<ALushort> evOffset(evCount);
+ evOffset[0] = 0;
+ ALushort irCount{azCount[0]};
+ for(ALsizei i{1};i < evCount;i++)
+ {
+ evOffset[i] = evOffset[i-1] + azCount[i-1];
+ irCount += azCount[i];
+ }
+
+ al::vector<std::array<ALfloat,2>> coeffs(irSize*irCount);
+ al::vector<std::array<ALubyte,2>> delays(irCount);
+ for(auto &val : coeffs)
+ val[0] = GetLE_ALshort(data) / 32768.0f;
+ for(auto &val : delays)
+ val[0] = GetLE_ALubyte(data);
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+ for(ALsizei i{0};i < irCount;i++)
+ {
+ if(delays[i][0] > MAX_HRIR_DELAY)
+ {
+ ERR("Invalid delays[%d]: %d (%d)\n", i, delays[i][0], MAX_HRIR_DELAY);
+ failed = AL_TRUE;
+ }
+ }
+ if(failed)
+ return nullptr;
+
+ /* Mirror the left ear responses to the right ear. */
+ for(ALsizei i{0};i < evCount;i++)
+ {
+ const ALushort evoffset{evOffset[i]};
+ const ALushort azcount{azCount[i]};
+ for(ALsizei j{0};j < azcount;j++)
+ {
+ const ALsizei lidx{evoffset + j};
+ const ALsizei ridx{evoffset + ((azcount-j) % azcount)};
+
+ for(ALsizei k{0};k < irSize;k++)
+ coeffs[ridx*irSize + k][1] = coeffs[lidx*irSize + k][0];
+ delays[ridx][1] = delays[lidx][0];
+ }
+ }
+
+ static constexpr ALfloat distance{0.0f};
+ return CreateHrtfStore(rate, irSize, 1, &evCount, &distance, azCount.data(), evOffset.data(),
+ irCount, &reinterpret_cast<ALfloat(&)[2]>(coeffs[0]),
+ &reinterpret_cast<ALubyte(&)[2]>(delays[0]), filename);
+}
+
+#define SAMPLETYPE_S16 0
+#define SAMPLETYPE_S24 1
+
+#define CHANTYPE_LEFTONLY 0
+#define CHANTYPE_LEFTRIGHT 1
+
+std::unique_ptr<HrtfEntry> LoadHrtf02(std::istream &data, const char *filename)
+{
+ ALuint rate{GetLE_ALuint(data)};
+ ALubyte sampleType{GetLE_ALubyte(data)};
+ ALubyte channelType{GetLE_ALubyte(data)};
+ ALushort irSize{GetLE_ALubyte(data)};
+ ALubyte fdCount{GetLE_ALubyte(data)};
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+
+ ALboolean failed{AL_FALSE};
+ if(sampleType > SAMPLETYPE_S24)
+ {
+ ERR("Unsupported sample type: %d\n", sampleType);
+ failed = AL_TRUE;
+ }
+ if(channelType > CHANTYPE_LEFTRIGHT)
+ {
+ ERR("Unsupported channel type: %d\n", channelType);
+ failed = AL_TRUE;
+ }
+
+ if(irSize < MIN_IR_SIZE || irSize > MAX_IR_SIZE || (irSize%MOD_IR_SIZE))
+ {
+ ERR("Unsupported HRIR size: irSize=%d (%d to %d by %d)\n",
+ irSize, MIN_IR_SIZE, MAX_IR_SIZE, MOD_IR_SIZE);
+ failed = AL_TRUE;
+ }
+ if(fdCount < 1 || fdCount > MAX_FD_COUNT)
+ {
+ ERR("Multiple field-depths not supported: fdCount=%d (%d to %d)\n",
+ fdCount, MIN_FD_COUNT, MAX_FD_COUNT);
+ failed = AL_TRUE;
+ }
+ if(failed)
+ return nullptr;
+
+ al::vector<ALfloat> distance(fdCount);
+ al::vector<ALubyte> evCount(fdCount);
+ al::vector<ALushort> azCount;
+ for(ALsizei f{0};f < fdCount;f++)
+ {
+ distance[f] = GetLE_ALushort(data) / 1000.0f;
+ evCount[f] = GetLE_ALubyte(data);
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+
+ if(distance[f] < MIN_FD_DISTANCE || distance[f] > MAX_FD_DISTANCE)
+ {
+ ERR("Unsupported field distance[%d]=%f (%f to %f meters)\n", f,
+ distance[f], MIN_FD_DISTANCE, MAX_FD_DISTANCE);
+ failed = AL_TRUE;
+ }
+ if(f > 0 && distance[f] <= distance[f-1])
+ {
+ ERR("Field distance[%d] is not after previous (%f > %f)\n", f, distance[f],
+ distance[f-1]);
+ failed = AL_TRUE;
+ }
+ if(evCount[f] < MIN_EV_COUNT || evCount[f] > MAX_EV_COUNT)
+ {
+ ERR("Unsupported elevation count: evCount[%d]=%d (%d to %d)\n", f,
+ evCount[f], MIN_EV_COUNT, MAX_EV_COUNT);
+ failed = AL_TRUE;
+ }
+ if(failed)
+ return nullptr;
+
+ size_t ebase{azCount.size()};
+ azCount.resize(ebase + evCount[f]);
+ std::generate(azCount.begin()+ebase, azCount.end(),
+ std::bind(GetLE_ALubyte, std::ref(data)));
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+
+ for(ALsizei e{0};e < evCount[f];e++)
+ {
+ if(azCount[ebase+e] < MIN_AZ_COUNT || azCount[ebase+e] > MAX_AZ_COUNT)
+ {
+ ERR("Unsupported azimuth count: azCount[%d][%d]=%d (%d to %d)\n", f, e,
+ azCount[ebase+e], MIN_AZ_COUNT, MAX_AZ_COUNT);
+ failed = AL_TRUE;
+ }
+ }
+ if(failed)
+ return nullptr;
+ }
+
+ al::vector<ALushort> evOffset(azCount.size());
+ evOffset[0] = 0;
+ std::partial_sum(azCount.cbegin(), azCount.cend()-1, evOffset.begin()+1);
+ const ALsizei irTotal{evOffset.back() + azCount.back()};
+
+ al::vector<std::array<ALfloat,2>> coeffs(irSize*irTotal);
+ al::vector<std::array<ALubyte,2>> delays(irTotal);
+ if(channelType == CHANTYPE_LEFTONLY)
+ {
+ if(sampleType == SAMPLETYPE_S16)
+ {
+ for(auto &val : coeffs)
+ val[0] = GetLE_ALshort(data) / 32768.0f;
+ }
+ else if(sampleType == SAMPLETYPE_S24)
+ {
+ for(auto &val : coeffs)
+ val[0] = GetLE_ALint24(data) / 8388608.0f;
+ }
+ for(auto &val : delays)
+ val[0] = GetLE_ALubyte(data);
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+ for(ALsizei i{0};i < irTotal;++i)
+ {
+ if(delays[i][0] > MAX_HRIR_DELAY)
+ {
+ ERR("Invalid delays[%d][0]: %d (%d)\n", i, delays[i][0], MAX_HRIR_DELAY);
+ failed = AL_TRUE;
+ }
+ }
+ }
+ else if(channelType == CHANTYPE_LEFTRIGHT)
+ {
+ if(sampleType == SAMPLETYPE_S16)
+ {
+ for(auto &val : coeffs)
+ {
+ val[0] = GetLE_ALshort(data) / 32768.0f;
+ val[1] = GetLE_ALshort(data) / 32768.0f;
+ }
+ }
+ else if(sampleType == SAMPLETYPE_S24)
+ {
+ for(auto &val : coeffs)
+ {
+ val[0] = GetLE_ALint24(data) / 8388608.0f;
+ val[1] = GetLE_ALint24(data) / 8388608.0f;
+ }
+ }
+ for(auto &val : delays)
+ {
+ val[0] = GetLE_ALubyte(data);
+ val[1] = GetLE_ALubyte(data);
+ }
+ if(!data || data.eof())
+ {
+ ERR("Failed reading %s\n", filename);
+ return nullptr;
+ }
+
+ for(ALsizei i{0};i < irTotal;++i)
+ {
+ if(delays[i][0] > MAX_HRIR_DELAY)
+ {
+ ERR("Invalid delays[%d][0]: %d (%d)\n", i, delays[i][0], MAX_HRIR_DELAY);
+ failed = AL_TRUE;
+ }
+ if(delays[i][1] > MAX_HRIR_DELAY)
+ {
+ ERR("Invalid delays[%d][1]: %d (%d)\n", i, delays[i][1], MAX_HRIR_DELAY);
+ failed = AL_TRUE;
+ }
+ }
+ }
+ if(failed)
+ return nullptr;
+
+ if(channelType == CHANTYPE_LEFTONLY)
+ {
+ /* Mirror the left ear responses to the right ear. */
+ ALsizei ebase{0};
+ for(ALsizei f{0};f < fdCount;f++)
+ {
+ for(ALsizei e{0};e < evCount[f];e++)
+ {
+ const ALushort evoffset{evOffset[ebase+e]};
+ const ALushort azcount{azCount[ebase+e]};
+ for(ALsizei a{0};a < azcount;a++)
+ {
+ const ALsizei lidx{evoffset + a};
+ const ALsizei ridx{evoffset + ((azcount-a) % azcount)};
+
+ for(ALsizei k{0};k < irSize;k++)
+ coeffs[ridx*irSize + k][1] = coeffs[lidx*irSize + k][0];
+ delays[ridx][1] = delays[lidx][0];
+ }
+ }
+ ebase += evCount[f];
+ }
+ }
+
+ if(fdCount > 1)
+ {
+ auto distance_ = al::vector<ALfloat>(distance.size());
+ auto evCount_ = al::vector<ALubyte>(evCount.size());
+ auto azCount_ = al::vector<ALushort>(azCount.size());
+ auto evOffset_ = al::vector<ALushort>(evOffset.size());
+ auto coeffs_ = al::vector<float2>(coeffs.size());
+ auto delays_ = al::vector<std::array<ALubyte,2>>(delays.size());
+
+ /* Simple reverse for the per-field elements. */
+ std::reverse_copy(distance.cbegin(), distance.cend(), distance_.begin());
+ std::reverse_copy(evCount.cbegin(), evCount.cend(), evCount_.begin());
+
+ /* Each field has a group of elevations, which each have an azimuth
+ * count. Reverse the order of the groups, keeping the relative order
+ * of per-group azimuth counts.
+ */
+ auto azcnt_end = azCount_.end();
+ auto copy_azs = [&azCount,&azcnt_end](const size_t ebase, const ALubyte num_evs) -> size_t
+ {
+ auto azcnt_src = azCount.begin()+ebase;
+ azcnt_end = std::copy_backward(azcnt_src, azcnt_src+num_evs, azcnt_end);
+ return ebase + num_evs;
+ };
+ std::accumulate(evCount.cbegin(), evCount.cend(), size_t{0u}, copy_azs);
+ assert(azCount_.begin() == azcnt_end);
+
+ /* Reestablish the IR offset for each elevation index, given the new
+ * ordering of elevations.
+ */
+ evOffset_[0] = 0;
+ std::partial_sum(azCount_.cbegin(), azCount_.cend()-1, evOffset_.begin()+1);
+
+ /* Reverse the order of each field's group of IRs. */
+ auto coeffs_end = coeffs_.end();
+ auto delays_end = delays_.end();
+ auto copy_irs = [irSize,&azCount,&coeffs,&delays,&coeffs_end,&delays_end](const size_t ebase, const ALubyte num_evs) -> size_t
+ {
+ const ALsizei abase{std::accumulate(azCount.cbegin(), azCount.cbegin()+ebase, 0)};
+ const ALsizei num_azs{std::accumulate(azCount.cbegin()+ebase,
+ azCount.cbegin() + (ebase+num_evs), 0)};
+
+ coeffs_end = std::copy_backward(coeffs.cbegin() + abase*irSize,
+ coeffs.cbegin() + (abase+num_azs)*irSize, coeffs_end);
+ delays_end = std::copy_backward(delays.cbegin() + abase,
+ delays.cbegin() + (abase+num_azs), delays_end);
+
+ return ebase + num_evs;
+ };
+ std::accumulate(evCount.cbegin(), evCount.cend(), size_t{0u}, copy_irs);
+ assert(coeffs_.begin() == coeffs_end);
+ assert(delays_.begin() == delays_end);
+
+ distance = std::move(distance_);
+ evCount = std::move(evCount_);
+ azCount = std::move(azCount_);
+ evOffset = std::move(evOffset_);
+ coeffs = std::move(coeffs_);
+ delays = std::move(delays_);
+ }
+
+ return CreateHrtfStore(rate, irSize, fdCount, evCount.data(), distance.data(), azCount.data(),
+ evOffset.data(), irTotal, &reinterpret_cast<ALfloat(&)[2]>(coeffs[0]),
+ &reinterpret_cast<ALubyte(&)[2]>(delays[0]), filename);
+}
+
+
+bool checkName(al::vector<EnumeratedHrtf> &list, const std::string &name)
+{
+ return std::find_if(list.cbegin(), list.cend(),
+ [&name](const EnumeratedHrtf &entry)
+ { return name == entry.name; }
+ ) != list.cend();
+}
+
+void AddFileEntry(al::vector<EnumeratedHrtf> &list, const std::string &filename)
+{
+ /* Check if this file has already been loaded globally. */
+ auto loaded_entry = LoadedHrtfs.begin();
+ for(;loaded_entry != LoadedHrtfs.end();++loaded_entry)
+ {
+ if(filename != (*loaded_entry)->filename.data())
+ continue;
+
+ /* Check if this entry has already been added to the list. */
+ auto iter = std::find_if(list.cbegin(), list.cend(),
+ [loaded_entry](const EnumeratedHrtf &entry) -> bool
+ { return loaded_entry->get() == entry.hrtf; }
+ );
+ if(iter != list.cend())
+ {
+ TRACE("Skipping duplicate file entry %s\n", filename.c_str());
+ return;
+ }
+
+ break;
+ }
+
+ if(loaded_entry == LoadedHrtfs.end())
+ {
+ TRACE("Got new file \"%s\"\n", filename.c_str());
+
+ LoadedHrtfs.emplace_back(HrtfHandle::Create(filename.length()+1));
+ loaded_entry = LoadedHrtfs.end()-1;
+ std::copy(filename.begin(), filename.end(), (*loaded_entry)->filename.begin());
+ (*loaded_entry)->filename.back() = '\0';
+ }
+
+ /* TODO: Get a human-readable name from the HRTF data (possibly coming in a
+ * format update). */
+ size_t namepos = filename.find_last_of('/')+1;
+ if(!namepos) namepos = filename.find_last_of('\\')+1;
+
+ size_t extpos{filename.find_last_of('.')};
+ if(extpos <= namepos) extpos = std::string::npos;
+
+ const std::string basename{(extpos == std::string::npos) ?
+ filename.substr(namepos) : filename.substr(namepos, extpos-namepos)};
+ std::string newname{basename};
+ int count{1};
+ while(checkName(list, newname))
+ {
+ newname = basename;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+ list.emplace_back(EnumeratedHrtf{newname, loaded_entry->get()});
+ const EnumeratedHrtf &entry = list.back();
+
+ TRACE("Adding file entry \"%s\"\n", entry.name.c_str());
+}
+
+/* Unfortunate that we have to duplicate AddFileEntry to take a memory buffer
+ * for input instead of opening the given filename.
+ */
+void AddBuiltInEntry(al::vector<EnumeratedHrtf> &list, const std::string &filename, ALuint residx)
+{
+ auto loaded_entry = LoadedHrtfs.begin();
+ for(;loaded_entry != LoadedHrtfs.end();++loaded_entry)
+ {
+ if(filename != (*loaded_entry)->filename.data())
+ continue;
+
+ /* Check if this entry has already been added to the list. */
+ auto iter = std::find_if(list.cbegin(), list.cend(),
+ [loaded_entry](const EnumeratedHrtf &entry) -> bool
+ { return loaded_entry->get() == entry.hrtf; }
+ );
+ if(iter != list.cend())
+ {
+ TRACE("Skipping duplicate file entry %s\n", filename.c_str());
+ return;
+ }
+
+ break;
+ }
+
+ if(loaded_entry == LoadedHrtfs.end())
+ {
+ TRACE("Got new file \"%s\"\n", filename.c_str());
+
+ LoadedHrtfs.emplace_back(HrtfHandle::Create(filename.length()+32));
+ loaded_entry = LoadedHrtfs.end()-1;
+ snprintf((*loaded_entry)->filename.data(), (*loaded_entry)->filename.size(), "!%u_%s",
+ residx, filename.c_str());
+ }
+
+ /* TODO: Get a human-readable name from the HRTF data (possibly coming in a
+ * format update). */
+
+ std::string newname{filename};
+ int count{1};
+ while(checkName(list, newname))
+ {
+ newname = filename;
+ newname += " #";
+ newname += std::to_string(++count);
+ }
+ list.emplace_back(EnumeratedHrtf{newname, loaded_entry->get()});
+ const EnumeratedHrtf &entry = list.back();
+
+ TRACE("Adding built-in entry \"%s\"\n", entry.name.c_str());
+}
+
+
+#define IDR_DEFAULT_44100_MHR 1
+#define IDR_DEFAULT_48000_MHR 2
+
+using ResData = al::span<const char>;
+#ifndef ALSOFT_EMBED_HRTF_DATA
+
+ResData GetResource(int /*name*/)
+{ return ResData{}; }
+
+#else
+
+#include "default-44100.mhr.h"
+#include "default-48000.mhr.h"
+
+ResData GetResource(int name)
+{
+ if(name == IDR_DEFAULT_44100_MHR)
+ return {reinterpret_cast<const char*>(hrtf_default_44100), sizeof(hrtf_default_44100)};
+ if(name == IDR_DEFAULT_48000_MHR)
+ return {reinterpret_cast<const char*>(hrtf_default_48000), sizeof(hrtf_default_48000)};
+ return ResData{};
+}
+#endif
+
+} // namespace
+
+
+al::vector<EnumeratedHrtf> EnumerateHrtf(const char *devname)
+{
+ al::vector<EnumeratedHrtf> list;
+
+ bool usedefaults{true};
+ if(auto pathopt = ConfigValueStr(devname, nullptr, "hrtf-paths"))
+ {
+ const char *pathlist{pathopt->c_str()};
+ while(pathlist && *pathlist)
+ {
+ const char *next, *end;
+
+ while(isspace(*pathlist) || *pathlist == ',')
+ pathlist++;
+ if(*pathlist == '\0')
+ continue;
+
+ next = strchr(pathlist, ',');
+ if(next)
+ end = next++;
+ else
+ {
+ end = pathlist + strlen(pathlist);
+ usedefaults = false;
+ }
+
+ while(end != pathlist && isspace(*(end-1)))
+ --end;
+ if(end != pathlist)
+ {
+ const std::string pname{pathlist, end};
+ for(const auto &fname : SearchDataFiles(".mhr", pname.c_str()))
+ AddFileEntry(list, fname);
+ }
+
+ pathlist = next;
+ }
+ }
+ else if(ConfigValueExists(devname, nullptr, "hrtf_tables"))
+ ERR("The hrtf_tables option is deprecated, please use hrtf-paths instead.\n");
+
+ if(usedefaults)
+ {
+ for(const auto &fname : SearchDataFiles(".mhr", "openal/hrtf"))
+ AddFileEntry(list, fname);
+
+ if(!GetResource(IDR_DEFAULT_44100_MHR).empty())
+ AddBuiltInEntry(list, "Built-In 44100hz", IDR_DEFAULT_44100_MHR);
+
+ if(!GetResource(IDR_DEFAULT_48000_MHR).empty())
+ AddBuiltInEntry(list, "Built-In 48000hz", IDR_DEFAULT_48000_MHR);
+ }
+
+ if(!list.empty())
+ {
+ if(auto defhrtfopt = ConfigValueStr(devname, nullptr, "default-hrtf"))
+ {
+ auto iter = std::find_if(list.begin(), list.end(),
+ [&defhrtfopt](const EnumeratedHrtf &entry) -> bool
+ { return entry.name == *defhrtfopt; }
+ );
+ if(iter == list.end())
+ WARN("Failed to find default HRTF \"%s\"\n", defhrtfopt->c_str());
+ else if(iter != list.begin())
+ {
+ EnumeratedHrtf entry{std::move(*iter)};
+ list.erase(iter);
+ list.insert(list.begin(), std::move(entry));
+ }
+ }
+ }
+
+ return list;
+}
+
+HrtfEntry *GetLoadedHrtf(HrtfHandle *handle)
+{
+ std::lock_guard<std::mutex> _{LoadedHrtfLock};
+
+ if(handle->entry)
+ {
+ HrtfEntry *hrtf{handle->entry.get()};
+ hrtf->IncRef();
+ return hrtf;
+ }
+
+ std::unique_ptr<std::istream> stream;
+ const char *name{""};
+ ALuint residx{};
+ char ch{};
+ if(sscanf(handle->filename.data(), "!%u%c", &residx, &ch) == 2 && ch == '_')
+ {
+ name = strchr(handle->filename.data(), ch)+1;
+
+ TRACE("Loading %s...\n", name);
+ ResData res{GetResource(residx)};
+ if(res.empty())
+ {
+ ERR("Could not get resource %u, %s\n", residx, name);
+ return nullptr;
+ }
+ stream = al::make_unique<idstream>(res.begin(), res.end());
+ }
+ else
+ {
+ name = handle->filename.data();
+
+ TRACE("Loading %s...\n", handle->filename.data());
+ auto fstr = al::make_unique<al::ifstream>(handle->filename.data(), std::ios::binary);
+ if(!fstr->is_open())
+ {
+ ERR("Could not open %s\n", handle->filename.data());
+ return nullptr;
+ }
+ stream = std::move(fstr);
+ }
+
+ std::unique_ptr<HrtfEntry> hrtf;
+ char magic[sizeof(magicMarker02)];
+ stream->read(magic, sizeof(magic));
+ if(stream->gcount() < static_cast<std::streamsize>(sizeof(magicMarker02)))
+ ERR("%s data is too short (%zu bytes)\n", name, stream->gcount());
+ else if(memcmp(magic, magicMarker02, sizeof(magicMarker02)) == 0)
+ {
+ TRACE("Detected data set format v2\n");
+ hrtf = LoadHrtf02(*stream, name);
+ }
+ else if(memcmp(magic, magicMarker01, sizeof(magicMarker01)) == 0)
+ {
+ TRACE("Detected data set format v1\n");
+ hrtf = LoadHrtf01(*stream, name);
+ }
+ else if(memcmp(magic, magicMarker00, sizeof(magicMarker00)) == 0)
+ {
+ TRACE("Detected data set format v0\n");
+ hrtf = LoadHrtf00(*stream, name);
+ }
+ else
+ ERR("Invalid header in %s: \"%.8s\"\n", name, magic);
+ stream.reset();
+
+ if(!hrtf)
+ {
+ ERR("Failed to load %s\n", name);
+ return nullptr;
+ }
+
+ TRACE("Loaded HRTF support for format: %s %uhz\n",
+ DevFmtChannelsString(DevFmtStereo), hrtf->sampleRate);
+ handle->entry = std::move(hrtf);
+
+ return handle->entry.get();
+}
+
+
+void HrtfEntry::IncRef()
+{
+ auto ref = IncrementRef(&this->ref);
+ TRACEREF("HrtfEntry %p increasing refcount to %u\n", this, ref);
+}
+
+void HrtfEntry::DecRef()
+{
+ auto ref = DecrementRef(&this->ref);
+ TRACEREF("HrtfEntry %p decreasing refcount to %u\n", this, ref);
+ if(ref == 0)
+ {
+ std::lock_guard<std::mutex> _{LoadedHrtfLock};
+
+ /* Go through and clear all unused HRTFs. */
+ auto delete_unused = [](HrtfHandlePtr &handle) -> void
+ {
+ HrtfEntry *entry{handle->entry.get()};
+ if(entry && ReadRef(&entry->ref) == 0)
+ {
+ TRACE("Unloading unused HRTF %s\n", handle->filename.data());
+ handle->entry = nullptr;
+ }
+ };
+ std::for_each(LoadedHrtfs.begin(), LoadedHrtfs.end(), delete_unused);
+ }
+}
diff --git a/alc/hrtf.h b/alc/hrtf.h
new file mode 100644
index 00000000..6c41cb82
--- /dev/null
+++ b/alc/hrtf.h
@@ -0,0 +1,124 @@
+#ifndef ALC_HRTF_H
+#define ALC_HRTF_H
+
+#include <array>
+#include <cstddef>
+#include <memory>
+#include <string>
+
+#include "AL/al.h"
+
+#include "almalloc.h"
+#include "ambidefs.h"
+#include "atomic.h"
+#include "vector.h"
+
+
+struct HrtfHandle;
+
+#define HRTF_HISTORY_BITS (6)
+#define HRTF_HISTORY_LENGTH (1<<HRTF_HISTORY_BITS)
+#define HRTF_HISTORY_MASK (HRTF_HISTORY_LENGTH-1)
+
+#define HRIR_BITS (7)
+#define HRIR_LENGTH (1<<HRIR_BITS)
+#define HRIR_MASK (HRIR_LENGTH-1)
+
+
+struct HrtfEntry {
+ RefCount ref;
+
+ ALuint sampleRate;
+ ALsizei irSize;
+
+ struct Field {
+ ALfloat distance;
+ ALubyte evCount;
+ };
+ /* NOTE: Fields are stored *backwards*. field[0] is the farthest field, and
+ * field[fdCount-1] is the nearest.
+ */
+ ALsizei fdCount;
+ const Field *field;
+
+ struct Elevation {
+ ALushort azCount;
+ ALushort irOffset;
+ };
+ Elevation *elev;
+ const ALfloat (*coeffs)[2];
+ const ALubyte (*delays)[2];
+
+ void IncRef();
+ void DecRef();
+
+ DEF_PLACE_NEWDEL()
+};
+
+struct EnumeratedHrtf {
+ std::string name;
+
+ HrtfHandle *hrtf;
+};
+
+
+using float2 = std::array<float,2>;
+
+template<typename T>
+using HrirArray = std::array<std::array<T,2>,HRIR_LENGTH>;
+
+struct HrtfState {
+ alignas(16) std::array<ALfloat,HRTF_HISTORY_LENGTH> History;
+ alignas(16) HrirArray<ALfloat> Values;
+};
+
+struct HrtfFilter {
+ alignas(16) HrirArray<ALfloat> Coeffs;
+ ALsizei Delay[2];
+ ALfloat Gain;
+};
+
+struct DirectHrtfState {
+ /* HRTF filter state for dry buffer content */
+ ALsizei IrSize{0};
+ struct ChanData {
+ alignas(16) HrirArray<ALfloat> Values;
+ alignas(16) HrirArray<ALfloat> Coeffs;
+ };
+ al::FlexArray<ChanData> Chan;
+
+ DirectHrtfState(size_t numchans) : Chan{numchans} { }
+ DirectHrtfState(const DirectHrtfState&) = delete;
+ DirectHrtfState& operator=(const DirectHrtfState&) = delete;
+
+ static std::unique_ptr<DirectHrtfState> Create(size_t num_chans);
+ static constexpr size_t Sizeof(size_t numchans) noexcept
+ { return al::FlexArray<ChanData>::Sizeof(numchans, offsetof(DirectHrtfState, Chan)); }
+
+ DEF_PLACE_NEWDEL()
+};
+
+struct AngularPoint {
+ ALfloat Elev;
+ ALfloat Azim;
+};
+
+
+al::vector<EnumeratedHrtf> EnumerateHrtf(const char *devname);
+HrtfEntry *GetLoadedHrtf(HrtfHandle *handle);
+
+void GetHrtfCoeffs(const HrtfEntry *Hrtf, ALfloat elevation, ALfloat azimuth, ALfloat distance,
+ ALfloat spread, HrirArray<ALfloat> &coeffs, ALsizei (&delays)[2]);
+
+/**
+ * Produces HRTF filter coefficients for decoding B-Format, given a set of
+ * virtual speaker positions, a matching decoding matrix, and per-order high-
+ * frequency gains for the decoder. The calculated impulse responses are
+ * ordered and scaled according to the matrix input. Note the specified virtual
+ * positions should be in degrees, not radians!
+ */
+void BuildBFormatHrtf(const HrtfEntry *Hrtf, DirectHrtfState *state, const ALuint NumChannels,
+ const AngularPoint *AmbiPoints, const ALfloat (*RESTRICT AmbiMatrix)[MAX_AMBI_CHANNELS],
+ const size_t AmbiCount, const ALfloat *RESTRICT AmbiOrderHFGain);
+
+#endif /* ALC_HRTF_H */
diff --git a/alc/inprogext.h b/alc/inprogext.h
new file mode 100644
index 00000000..15881b59
--- /dev/null
+++ b/alc/inprogext.h
@@ -0,0 +1,92 @@
+#ifndef INPROGEXT_H
+#define INPROGEXT_H
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/alext.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef ALC_SOFT_loopback_bformat
+#define ALC_SOFT_loopback_bformat 1
+#define ALC_AMBISONIC_LAYOUT_SOFT 0x1997
+#define ALC_AMBISONIC_SCALING_SOFT 0x1998
+#define ALC_AMBISONIC_ORDER_SOFT 0x1999
+#define ALC_MAX_AMBISONIC_ORDER_SOFT 0x199B
+
+#define ALC_BFORMAT3D_SOFT 0x1508
+
+/* Ambisonic layouts */
+#define ALC_FUMA_SOFT 0x0000
+#define ALC_ACN_SOFT 0x0001
+
+/* Ambisonic scalings (normalization) */
+/*#define ALC_FUMA_SOFT*/
+#define ALC_SN3D_SOFT 0x0001
+#define ALC_N3D_SOFT 0x0002
+#endif
+
+#ifndef AL_SOFT_map_buffer
+#define AL_SOFT_map_buffer 1
+typedef unsigned int ALbitfieldSOFT;
+#define AL_MAP_READ_BIT_SOFT 0x00000001
+#define AL_MAP_WRITE_BIT_SOFT 0x00000002
+#define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
+#define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
+typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
+typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
+typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
+typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
+#ifdef AL_ALEXT_PROTOTYPES
+AL_API void AL_APIENTRY alBufferStorageSOFT(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
+AL_API void* AL_APIENTRY alMapBufferSOFT(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
+AL_API void AL_APIENTRY alUnmapBufferSOFT(ALuint buffer);
+AL_API void AL_APIENTRY alFlushMappedBufferSOFT(ALuint buffer, ALsizei offset, ALsizei length);
+#endif
+#endif
+
+#ifndef AL_SOFT_events
+#define AL_SOFT_events 1
+#define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
+#define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
+#define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
+#define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
+#define AL_EVENT_TYPE_ERROR_SOFT 0x1224
+#define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
+#define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
+#define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
+typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param,
+ ALsizei length, const ALchar *message,
+ void *userParam);
+typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable);
+typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam);
+typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname);
+typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values);
+#ifdef AL_ALEXT_PROTOTYPES
+AL_API void AL_APIENTRY alEventControlSOFT(ALsizei count, const ALenum *types, ALboolean enable);
+AL_API void AL_APIENTRY alEventCallbackSOFT(ALEVENTPROCSOFT callback, void *userParam);
+AL_API void* AL_APIENTRY alGetPointerSOFT(ALenum pname);
+AL_API void AL_APIENTRY alGetPointervSOFT(ALenum pname, void **values);
+#endif
+#endif
+
+#ifndef AL_SOFT_buffer_layers
+#define AL_SOFT_buffer_layers
+typedef void (AL_APIENTRY*LPALSOURCEQUEUEBUFFERLAYERSSOFT)(ALuint src, ALsizei nb, const ALuint *buffers);
+#ifdef AL_ALEXT_PROTOTYPES
+AL_API void AL_APIENTRY alSourceQueueBufferLayersSOFT(ALuint src, ALsizei nb, const ALuint *buffers);
+#endif
+#endif
+
+#ifndef AL_SOFT_effect_chain
+#define AL_SOFT_effect_chain
+#define AL_EFFECTSLOT_TARGET_SOFT 0xf000
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* INPROGEXT_H */
diff --git a/alc/logging.h b/alc/logging.h
new file mode 100644
index 00000000..0bb0c87b
--- /dev/null
+++ b/alc/logging.h
@@ -0,0 +1,64 @@
+#ifndef LOGGING_H
+#define LOGGING_H
+
+#include <stdio.h>
+
+#include "opthelpers.h"
+
+
+#ifdef __GNUC__
+#define DECL_FORMAT(x, y, z) __attribute__((format(x, (y), (z))))
+#else
+#define DECL_FORMAT(x, y, z)
+#endif
+
+
+extern FILE *gLogFile;
+
+void al_print(FILE *logfile, const char *fmt, ...) DECL_FORMAT(printf, 2,3);
+#if !defined(_WIN32)
+#define AL_PRINT(T, ...) fprintf(gLogFile, "AL lib: " T " " __VA_ARGS__)
+#else
+#define AL_PRINT(T, ...) al_print(gLogFile, "AL lib: " T " " __VA_ARGS__)
+#endif
+
+#ifdef __ANDROID__
+#include <android/log.h>
+#define LOG_ANDROID(T, ...) __android_log_print(T, "openal", "AL lib: " __VA_ARGS__)
+#else
+#define LOG_ANDROID(T, ...) ((void)0)
+#endif
+
+enum LogLevel {
+ NoLog,
+ LogError,
+ LogWarning,
+ LogTrace,
+ LogRef
+};
+extern LogLevel gLogLevel;
+
+#define TRACEREF(...) do { \
+ if(UNLIKELY(gLogLevel >= LogRef)) \
+ AL_PRINT("(--)", __VA_ARGS__); \
+} while(0)
+
+#define TRACE(...) do { \
+ if(UNLIKELY(gLogLevel >= LogTrace)) \
+ AL_PRINT("(II)", __VA_ARGS__); \
+ LOG_ANDROID(ANDROID_LOG_DEBUG, __VA_ARGS__); \
+} while(0)
+
+#define WARN(...) do { \
+ if(UNLIKELY(gLogLevel >= LogWarning)) \
+ AL_PRINT("(WW)", __VA_ARGS__); \
+ LOG_ANDROID(ANDROID_LOG_WARN, __VA_ARGS__); \
+} while(0)
+
+#define ERR(...) do { \
+ if(UNLIKELY(gLogLevel >= LogError)) \
+ AL_PRINT("(EE)", __VA_ARGS__); \
+ LOG_ANDROID(ANDROID_LOG_ERROR, __VA_ARGS__); \
+} while(0)
+
+#endif /* LOGGING_H */
diff --git a/alc/mastering.cpp b/alc/mastering.cpp
new file mode 100644
index 00000000..551fdcdf
--- /dev/null
+++ b/alc/mastering.cpp
@@ -0,0 +1,479 @@
+#include "config.h"
+
+#include <cmath>
+#include <limits>
+#include <algorithm>
+#include <functional>
+
+#include "mastering.h"
+#include "alu.h"
+#include "almalloc.h"
+#include "math_defs.h"
+
+
+/* These structures assume BUFFERSIZE is a power of 2. */
+static_assert((BUFFERSIZE & (BUFFERSIZE-1)) == 0, "BUFFERSIZE is not a power of 2");
+
+struct SlidingHold {
+ alignas(16) ALfloat mValues[BUFFERSIZE];
+ ALsizei mExpiries[BUFFERSIZE];
+ ALsizei mLowerIndex;
+ ALsizei mUpperIndex;
+ ALsizei mLength;
+};
+
+
+namespace {
+
+using namespace std::placeholders;
+
+/* This sliding hold follows the input level with an instant attack and a
+ * fixed duration hold before an instant release to the next highest level.
+ * It is a sliding window maximum (descending maxima) implementation based on
+ * Richard Harter's ascending minima algorithm available at:
+ *
+ * http://www.richardhartersworld.com/cri/2001/slidingmin.html
+ */
+ALfloat UpdateSlidingHold(SlidingHold *Hold, const ALsizei i, const ALfloat in)
+{
+ static constexpr ALsizei mask{BUFFERSIZE - 1};
+ const ALsizei length{Hold->mLength};
+ ALfloat (&values)[BUFFERSIZE] = Hold->mValues;
+ ALsizei (&expiries)[BUFFERSIZE] = Hold->mExpiries;
+ ALsizei lowerIndex{Hold->mLowerIndex};
+ ALsizei upperIndex{Hold->mUpperIndex};
+
+ ASSUME(upperIndex >= 0);
+ ASSUME(lowerIndex >= 0);
+
+ if(i >= expiries[upperIndex])
+ upperIndex = (upperIndex + 1) & mask;
+
+ if(in >= values[upperIndex])
+ {
+ values[upperIndex] = in;
+ expiries[upperIndex] = i + length;
+ lowerIndex = upperIndex;
+ }
+ else
+ {
+ do {
+ do {
+ if(!(in >= values[lowerIndex]))
+ goto found_place;
+ } while(lowerIndex--);
+ lowerIndex = mask;
+ } while(1);
+ found_place:
+
+ lowerIndex = (lowerIndex + 1) & mask;
+ values[lowerIndex] = in;
+ expiries[lowerIndex] = i + length;
+ }
+
+ Hold->mLowerIndex = lowerIndex;
+ Hold->mUpperIndex = upperIndex;
+
+ return values[upperIndex];
+}
+
+void ShiftSlidingHold(SlidingHold *Hold, const ALsizei n)
+{
+ ASSUME(Hold->mUpperIndex >= 0);
+ ASSUME(Hold->mLowerIndex >= 0);
+
+ auto exp_begin = std::begin(Hold->mExpiries) + Hold->mUpperIndex;
+ auto exp_last = std::begin(Hold->mExpiries) + Hold->mLowerIndex;
+ if(exp_last < exp_begin)
+ {
+ std::transform(exp_begin, std::end(Hold->mExpiries), exp_begin,
+ std::bind(std::minus<ALsizei>{}, _1, n));
+ exp_begin = std::begin(Hold->mExpiries);
+ }
+ std::transform(exp_begin, exp_last+1, exp_begin, std::bind(std::minus<ALsizei>{}, _1, n));
+}
+
+
+/* Multichannel compression is linked via the absolute maximum of all
+ * channels.
+ */
+void LinkChannels(Compressor *Comp, const ALsizei SamplesToDo, const FloatBufferLine *OutBuffer)
+{
+ const ALsizei index{Comp->mLookAhead};
+ const ALuint numChans{Comp->mNumChans};
+
+ ASSUME(SamplesToDo > 0);
+ ASSUME(numChans > 0);
+ ASSUME(index >= 0);
+
+ auto side_begin = std::begin(Comp->mSideChain) + index;
+ std::fill(side_begin, side_begin+SamplesToDo, 0.0f);
+
+ auto fill_max = [SamplesToDo,side_begin](const FloatBufferLine &input) -> void
+ {
+ const ALfloat *RESTRICT buffer{al::assume_aligned<16>(input.data())};
+ auto max_abs = std::bind(maxf, _1, std::bind(static_cast<float(&)(float)>(std::fabs), _2));
+ std::transform(side_begin, side_begin+SamplesToDo, buffer, side_begin, max_abs);
+ };
+ std::for_each(OutBuffer, OutBuffer+numChans, fill_max);
+}
+
+/* This calculates the squared crest factor of the control signal for the
+ * basic automation of the attack/release times. As suggested by the paper,
+ * it uses an instantaneous squared peak detector and a squared RMS detector
+ * both with 200ms release times.
+ */
+static void CrestDetector(Compressor *Comp, const ALsizei SamplesToDo)
+{
+ const ALfloat a_crest{Comp->mCrestCoeff};
+ const ALsizei index{Comp->mLookAhead};
+ ALfloat y2_peak{Comp->mLastPeakSq};
+ ALfloat y2_rms{Comp->mLastRmsSq};
+
+ ASSUME(SamplesToDo > 0);
+ ASSUME(index >= 0);
+
+ auto calc_crest = [&y2_rms,&y2_peak,a_crest](const ALfloat x_abs) noexcept -> ALfloat
+ {
+ ALfloat x2 = maxf(0.000001f, x_abs * x_abs);
+
+ y2_peak = maxf(x2, lerp(x2, y2_peak, a_crest));
+ y2_rms = lerp(x2, y2_rms, a_crest);
+ return y2_peak / y2_rms;
+ };
+ auto side_begin = std::begin(Comp->mSideChain) + index;
+ std::transform(side_begin, side_begin+SamplesToDo, std::begin(Comp->mCrestFactor), calc_crest);
+
+ Comp->mLastPeakSq = y2_peak;
+ Comp->mLastRmsSq = y2_rms;
+}
+
+/* The side-chain starts with a simple peak detector (based on the absolute
+ * value of the incoming signal) and performs most of its operations in the
+ * log domain.
+ */
+void PeakDetector(Compressor *Comp, const ALsizei SamplesToDo)
+{
+ const ALsizei index{Comp->mLookAhead};
+
+ ASSUME(SamplesToDo > 0);
+ ASSUME(index >= 0);
+
+ /* Clamp the minimum amplitude to near-zero and convert to logarithm. */
+ auto side_begin = std::begin(Comp->mSideChain) + index;
+ std::transform(side_begin, side_begin+SamplesToDo, side_begin,
+ std::bind(static_cast<float(&)(float)>(std::log), std::bind(maxf, 0.000001f, _1)));
+}
+
+/* An optional hold can be used to extend the peak detector so it can more
+ * solidly detect fast transients. This is best used when operating as a
+ * limiter.
+ */
+void PeakHoldDetector(Compressor *Comp, const ALsizei SamplesToDo)
+{
+ const ALsizei index{Comp->mLookAhead};
+
+ ASSUME(SamplesToDo > 0);
+ ASSUME(index >= 0);
+
+ SlidingHold *hold{Comp->mHold};
+ ALsizei i{0};
+ auto detect_peak = [&i,hold](const ALfloat x_abs) -> ALfloat
+ {
+ const ALfloat x_G{std::log(maxf(0.000001f, x_abs))};
+ return UpdateSlidingHold(hold, i++, x_G);
+ };
+ auto side_begin = std::begin(Comp->mSideChain) + index;
+ std::transform(side_begin, side_begin+SamplesToDo, side_begin, detect_peak);
+
+ ShiftSlidingHold(hold, SamplesToDo);
+}
+
+/* This is the heart of the feed-forward compressor. It operates in the log
+ * domain (to better match human hearing) and can apply some basic automation
+ * to knee width, attack/release times, make-up/post gain, and clipping
+ * reduction.
+ */
+void GainCompressor(Compressor *Comp, const ALsizei SamplesToDo)
+{
+ const bool autoKnee{Comp->mAuto.Knee};
+ const bool autoAttack{Comp->mAuto.Attack};
+ const bool autoRelease{Comp->mAuto.Release};
+ const bool autoPostGain{Comp->mAuto.PostGain};
+ const bool autoDeclip{Comp->mAuto.Declip};
+ const ALsizei lookAhead{Comp->mLookAhead};
+ const ALfloat threshold{Comp->mThreshold};
+ const ALfloat slope{Comp->mSlope};
+ const ALfloat attack{Comp->mAttack};
+ const ALfloat release{Comp->mRelease};
+ const ALfloat c_est{Comp->mGainEstimate};
+ const ALfloat a_adp{Comp->mAdaptCoeff};
+ const ALfloat (&crestFactor)[BUFFERSIZE] = Comp->mCrestFactor;
+ ALfloat (&sideChain)[BUFFERSIZE*2] = Comp->mSideChain;
+ ALfloat postGain{Comp->mPostGain};
+ ALfloat knee{Comp->mKnee};
+ ALfloat t_att{attack};
+ ALfloat t_rel{release - attack};
+ ALfloat a_att{std::exp(-1.0f / t_att)};
+ ALfloat a_rel{std::exp(-1.0f / t_rel)};
+ ALfloat y_1{Comp->mLastRelease};
+ ALfloat y_L{Comp->mLastAttack};
+ ALfloat c_dev{Comp->mLastGainDev};
+
+ ASSUME(SamplesToDo > 0);
+ ASSUME(lookAhead >= 0);
+
+ for(ALsizei i{0};i < SamplesToDo;i++)
+ {
+ if(autoKnee)
+ knee = maxf(0.0f, 2.5f * (c_dev + c_est));
+ const ALfloat knee_h{0.5f * knee};
+
+ /* This is the gain computer. It applies a static compression curve
+ * to the control signal.
+ */
+ const ALfloat x_over{sideChain[lookAhead+i] - threshold};
+ const ALfloat y_G{
+ (x_over <= -knee_h) ? 0.0f :
+ (std::fabs(x_over) < knee_h) ? (x_over + knee_h) * (x_over + knee_h) / (2.0f * knee) :
+ x_over
+ };
+
+ const ALfloat y2_crest{crestFactor[i]};
+ if(autoAttack)
+ {
+ t_att = 2.0f*attack/y2_crest;
+ a_att = std::exp(-1.0f / t_att);
+ }
+ if(autoRelease)
+ {
+ t_rel = 2.0f*release/y2_crest - t_att;
+ a_rel = std::exp(-1.0f / t_rel);
+ }
+
+ /* Gain smoothing (ballistics) is done via a smooth decoupled peak
+ * detector. The attack time is subtracted from the release time
+ * above to compensate for the chained operating mode.
+ */
+ const ALfloat x_L{-slope * y_G};
+ y_1 = maxf(x_L, lerp(x_L, y_1, a_rel));
+ y_L = lerp(y_1, y_L, a_att);
+
+ /* Knee width and make-up gain automation make use of a smoothed
+ * measurement of deviation between the control signal and estimate.
+ * The estimate is also used to bias the measurement to hot-start its
+ * average.
+ */
+ c_dev = lerp(-(y_L+c_est), c_dev, a_adp);
+
+ if(autoPostGain)
+ {
+ /* Clipping reduction is only viable when make-up gain is being
+ * automated. It modifies the deviation to further attenuate the
+ * control signal when clipping is detected. The adaptation time
+ * is sufficiently long enough to suppress further clipping at the
+ * same output level.
+ */
+ if(autoDeclip)
+ c_dev = maxf(c_dev, sideChain[i] - y_L - threshold - c_est);
+
+ postGain = -(c_dev + c_est);
+ }
+
+ sideChain[i] = std::exp(postGain - y_L);
+ }
+
+ Comp->mLastRelease = y_1;
+ Comp->mLastAttack = y_L;
+ Comp->mLastGainDev = c_dev;
+}
+
+/* Combined with the hold time, a look-ahead delay can improve handling of
+ * fast transients by allowing the envelope time to converge prior to
+ * reaching the offending impulse. This is best used when operating as a
+ * limiter.
+ */
+void SignalDelay(Compressor *Comp, const ALsizei SamplesToDo, FloatBufferLine *OutBuffer)
+{
+ const ALuint numChans{Comp->mNumChans};
+ const ALsizei lookAhead{Comp->mLookAhead};
+
+ ASSUME(SamplesToDo > 0);
+ ASSUME(numChans > 0);
+ ASSUME(lookAhead > 0);
+
+ for(ALuint c{0};c < numChans;c++)
+ {
+ ALfloat *inout{al::assume_aligned<16>(OutBuffer[c].data())};
+ ALfloat *delaybuf{al::assume_aligned<16>(Comp->mDelay[c].data())};
+
+ auto inout_end = inout + SamplesToDo;
+ if(LIKELY(SamplesToDo >= lookAhead))
+ {
+ auto delay_end = std::rotate(inout, inout_end - lookAhead, inout_end);
+ std::swap_ranges(inout, delay_end, delaybuf);
+ }
+ else
+ {
+ auto delay_start = std::swap_ranges(inout, inout_end, delaybuf);
+ std::rotate(delaybuf, delay_start, delaybuf + lookAhead);
+ }
+ }
+}
+
+} // namespace
+
+/* The compressor is initialized with the following settings:
+ *
+ * NumChans - Number of channels to process.
+ * SampleRate - Sample rate to process.
+ * AutoKnee - Whether to automate the knee width parameter.
+ * AutoAttack - Whether to automate the attack time parameter.
+ * AutoRelease - Whether to automate the release time parameter.
+ * AutoPostGain - Whether to automate the make-up (post) gain parameter.
+ * AutoDeclip - Whether to automate clipping reduction. Ignored when
+ * not automating make-up gain.
+ * LookAheadTime - Look-ahead time (in seconds).
+ * HoldTime - Peak hold-time (in seconds).
+ * PreGainDb - Gain applied before detection (in dB).
+ * PostGainDb - Make-up gain applied after compression (in dB).
+ * ThresholdDb - Triggering threshold (in dB).
+ * Ratio - Compression ratio (x:1). Set to INFINITY for true
+ * limiting. Ignored when automating knee width.
+ * KneeDb - Knee width (in dB). Ignored when automating knee
+ * width.
+ * AttackTimeMin - Attack time (in seconds). Acts as a maximum when
+ * automating attack time.
+ * ReleaseTimeMin - Release time (in seconds). Acts as a maximum when
+ * automating release time.
+ */
+std::unique_ptr<Compressor> CompressorInit(const ALuint NumChans, const ALuint SampleRate,
+ const ALboolean AutoKnee, const ALboolean AutoAttack, const ALboolean AutoRelease,
+ const ALboolean AutoPostGain, const ALboolean AutoDeclip, const ALfloat LookAheadTime,
+ const ALfloat HoldTime, const ALfloat PreGainDb, const ALfloat PostGainDb,
+ const ALfloat ThresholdDb, const ALfloat Ratio, const ALfloat KneeDb, const ALfloat AttackTime,
+ const ALfloat ReleaseTime)
+{
+ const auto lookAhead = static_cast<ALsizei>(
+ clampf(std::round(LookAheadTime*SampleRate), 0.0f, BUFFERSIZE-1));
+ const auto hold = static_cast<ALsizei>(
+ clampf(std::round(HoldTime*SampleRate), 0.0f, BUFFERSIZE-1));
+
+ size_t size{sizeof(Compressor)};
+ if(lookAhead > 0)
+ {
+ size += sizeof(*Compressor::mDelay) * NumChans;
+ /* The sliding hold implementation doesn't handle a length of 1. A 1-
+ * sample hold is useless anyway, it would only ever give back what was
+ * just given to it.
+ */
+ if(hold > 1)
+ size += sizeof(*Compressor::mHold);
+ }
+
+ auto Comp = std::unique_ptr<Compressor>{new (al_calloc(16, size)) Compressor{}};
+ Comp->mNumChans = NumChans;
+ Comp->mSampleRate = SampleRate;
+ Comp->mAuto.Knee = AutoKnee != AL_FALSE;
+ Comp->mAuto.Attack = AutoAttack != AL_FALSE;
+ Comp->mAuto.Release = AutoRelease != AL_FALSE;
+ Comp->mAuto.PostGain = AutoPostGain != AL_FALSE;
+ Comp->mAuto.Declip = AutoPostGain && AutoDeclip;
+ Comp->mLookAhead = lookAhead;
+ Comp->mPreGain = std::pow(10.0f, PreGainDb / 20.0f);
+ Comp->mPostGain = PostGainDb * std::log(10.0f) / 20.0f;
+ Comp->mThreshold = ThresholdDb * std::log(10.0f) / 20.0f;
+ Comp->mSlope = 1.0f / maxf(1.0f, Ratio) - 1.0f;
+ Comp->mKnee = maxf(0.0f, KneeDb * std::log(10.0f) / 20.0f);
+ Comp->mAttack = maxf(1.0f, AttackTime * SampleRate);
+ Comp->mRelease = maxf(1.0f, ReleaseTime * SampleRate);
+
+ /* Knee width automation actually treats the compressor as a limiter. By
+ * varying the knee width, it can effectively be seen as applying
+ * compression over a wide range of ratios.
+ */
+ if(AutoKnee)
+ Comp->mSlope = -1.0f;
+
+ if(lookAhead > 0)
+ {
+ if(hold > 1)
+ {
+ Comp->mHold = ::new (static_cast<void*>(Comp.get() + 1)) SlidingHold{};
+ Comp->mHold->mValues[0] = -std::numeric_limits<float>::infinity();
+ Comp->mHold->mExpiries[0] = hold;
+ Comp->mHold->mLength = hold;
+ Comp->mDelay = ::new (static_cast<void*>(Comp->mHold + 1)) FloatBufferLine[NumChans];
+ }
+ else
+ {
+ Comp->mDelay = ::new (static_cast<void*>(Comp.get() + 1)) FloatBufferLine[NumChans];
+ }
+ }
+
+ Comp->mCrestCoeff = std::exp(-1.0f / (0.200f * SampleRate)); // 200ms
+ Comp->mGainEstimate = Comp->mThreshold * -0.5f * Comp->mSlope;
+ Comp->mAdaptCoeff = std::exp(-1.0f / (2.0f * SampleRate)); // 2s
+
+ return Comp;
+}
+
+Compressor::~Compressor()
+{
+ if(mHold)
+ al::destroy_at(mHold);
+ mHold = nullptr;
+ if(mDelay)
+ al::destroy_n(mDelay, mNumChans);
+ mDelay = nullptr;
+}
+
+
+void Compressor::process(const ALsizei SamplesToDo, FloatBufferLine *OutBuffer)
+{
+ const ALuint numChans{mNumChans};
+
+ ASSUME(SamplesToDo > 0);
+ ASSUME(numChans > 0);
+
+ const ALfloat preGain{mPreGain};
+ if(preGain != 1.0f)
+ {
+ auto apply_gain = [SamplesToDo,preGain](FloatBufferLine &input) noexcept -> void
+ {
+ ALfloat *buffer{al::assume_aligned<16>(input.data())};
+ std::transform(buffer, buffer+SamplesToDo, buffer,
+ std::bind(std::multiplies<float>{}, _1, preGain));
+ };
+ std::for_each(OutBuffer, OutBuffer+numChans, apply_gain);
+ }
+
+ LinkChannels(this, SamplesToDo, OutBuffer);
+
+ if(mAuto.Attack || mAuto.Release)
+ CrestDetector(this, SamplesToDo);
+
+ if(mHold)
+ PeakHoldDetector(this, SamplesToDo);
+ else
+ PeakDetector(this, SamplesToDo);
+
+ GainCompressor(this, SamplesToDo);
+
+ if(mDelay)
+ SignalDelay(this, SamplesToDo, OutBuffer);
+
+ const ALfloat (&sideChain)[BUFFERSIZE*2] = mSideChain;
+ auto apply_comp = [SamplesToDo,&sideChain](FloatBufferLine &input) noexcept -> void
+ {
+ ALfloat *buffer{al::assume_aligned<16>(input.data())};
+ const ALfloat *gains{al::assume_aligned<16>(&sideChain[0])};
+ std::transform(gains, gains+SamplesToDo, buffer, buffer,
+ std::bind(std::multiplies<float>{}, _1, _2));
+ };
+ std::for_each(OutBuffer, OutBuffer+numChans, apply_comp);
+
+ ASSUME(mLookAhead >= 0);
+ auto side_begin = std::begin(mSideChain) + SamplesToDo;
+ std::copy(side_begin, side_begin+mLookAhead, std::begin(mSideChain));
+}
diff --git a/alc/mastering.h b/alc/mastering.h
new file mode 100644
index 00000000..34dc8dcb
--- /dev/null
+++ b/alc/mastering.h
@@ -0,0 +1,104 @@
+#ifndef MASTERING_H
+#define MASTERING_H
+
+#include <memory>
+
+#include "AL/al.h"
+
+#include "almalloc.h"
+/* For FloatBufferLine/BUFFERSIZE. */
+#include "alcmain.h"
+
+
+struct SlidingHold;
+
+/* General topology and basic automation was based on the following paper:
+ *
+ * D. Giannoulis, M. Massberg and J. D. Reiss,
+ * "Parameter Automation in a Dynamic Range Compressor,"
+ * Journal of the Audio Engineering Society, v61 (10), Oct. 2013
+ *
+ * Available (along with supplemental reading) at:
+ *
+ * http://c4dm.eecs.qmul.ac.uk/audioengineering/compressors/
+ */
+struct Compressor {
+ ALuint mNumChans{0u};
+ ALuint mSampleRate{0u};
+
+ struct {
+ bool Knee : 1;
+ bool Attack : 1;
+ bool Release : 1;
+ bool PostGain : 1;
+ bool Declip : 1;
+ } mAuto{};
+
+ ALsizei mLookAhead{0};
+
+ ALfloat mPreGain{0.0f};
+ ALfloat mPostGain{0.0f};
+
+ ALfloat mThreshold{0.0f};
+ ALfloat mSlope{0.0f};
+ ALfloat mKnee{0.0f};
+
+ ALfloat mAttack{0.0f};
+ ALfloat mRelease{0.0f};
+
+ alignas(16) ALfloat mSideChain[2*BUFFERSIZE]{};
+ alignas(16) ALfloat mCrestFactor[BUFFERSIZE]{};
+
+ SlidingHold *mHold{nullptr};
+ FloatBufferLine *mDelay{nullptr};
+
+ ALfloat mCrestCoeff{0.0f};
+ ALfloat mGainEstimate{0.0f};
+ ALfloat mAdaptCoeff{0.0f};
+
+ ALfloat mLastPeakSq{0.0f};
+ ALfloat mLastRmsSq{0.0f};
+ ALfloat mLastRelease{0.0f};
+ ALfloat mLastAttack{0.0f};
+ ALfloat mLastGainDev{0.0f};
+
+
+ ~Compressor();
+ void process(const ALsizei SamplesToDo, FloatBufferLine *OutBuffer);
+ ALsizei getLookAhead() const noexcept { return mLookAhead; }
+
+ DEF_PLACE_NEWDEL()
+};
+
+/* The compressor is initialized with the following settings:
+ *
+ * NumChans - Number of channels to process.
+ * SampleRate - Sample rate to process.
+ * AutoKnee - Whether to automate the knee width parameter.
+ * AutoAttack - Whether to automate the attack time parameter.
+ * AutoRelease - Whether to automate the release time parameter.
+ * AutoPostGain - Whether to automate the make-up (post) gain parameter.
+ * AutoDeclip - Whether to automate clipping reduction. Ignored when
+ * not automating make-up gain.
+ * LookAheadTime - Look-ahead time (in seconds).
+ * HoldTime - Peak hold-time (in seconds).
+ * PreGainDb - Gain applied before detection (in dB).
+ * PostGainDb - Make-up gain applied after compression (in dB).
+ * ThresholdDb - Triggering threshold (in dB).
+ * Ratio - Compression ratio (x:1). Set to INFINIFTY for true
+ * limiting. Ignored when automating knee width.
+ * KneeDb - Knee width (in dB). Ignored when automating knee
+ * width.
+ * AttackTimeMin - Attack time (in seconds). Acts as a maximum when
+ * automating attack time.
+ * ReleaseTimeMin - Release time (in seconds). Acts as a maximum when
+ * automating release time.
+ */
+std::unique_ptr<Compressor> CompressorInit(const ALuint NumChans, const ALuint SampleRate,
+ const ALboolean AutoKnee, const ALboolean AutoAttack, const ALboolean AutoRelease,
+ const ALboolean AutoPostGain, const ALboolean AutoDeclip, const ALfloat LookAheadTime,
+ const ALfloat HoldTime, const ALfloat PreGainDb, const ALfloat PostGainDb,
+ const ALfloat ThresholdDb, const ALfloat Ratio, const ALfloat KneeDb, const ALfloat AttackTime,
+ const ALfloat ReleaseTime);
+
+#endif /* MASTERING_H */
diff --git a/alc/mixer/defs.h b/alc/mixer/defs.h
new file mode 100644
index 00000000..3e5d1125
--- /dev/null
+++ b/alc/mixer/defs.h
@@ -0,0 +1,59 @@
+#ifndef MIXER_DEFS_H
+#define MIXER_DEFS_H
+
+#include "AL/alc.h"
+#include "AL/al.h"
+
+#include "alcmain.h"
+#include "alu.h"
+#include "alspan.h"
+
+
+struct MixGains;
+struct MixHrtfFilter;
+struct HrtfState;
+struct DirectHrtfState;
+
+
+struct CTag { };
+struct SSETag { };
+struct SSE2Tag { };
+struct SSE3Tag { };
+struct SSE4Tag { };
+struct NEONTag { };
+
+struct CopyTag { };
+struct PointTag { };
+struct LerpTag { };
+struct CubicTag { };
+struct BSincTag { };
+
+template<typename TypeTag, typename InstTag>
+const ALfloat *Resample_(const InterpState *state, const ALfloat *RESTRICT src, ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen);
+
+template<typename InstTag>
+void Mix_(const ALfloat *data, const al::span<FloatBufferLine> OutBuffer, ALfloat *CurrentGains, const ALfloat *TargetGains, const ALsizei Counter, const ALsizei OutPos, const ALsizei BufferSize);
+template<typename InstTag>
+void MixRow_(FloatBufferLine &OutBuffer, const ALfloat *Gains, const al::span<const FloatBufferLine> InSamples, const ALsizei InPos, const ALsizei BufferSize);
+
+template<typename InstTag>
+void MixHrtf_(FloatBufferLine &LeftOut, FloatBufferLine &RightOut, const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize, MixHrtfFilter *hrtfparams, const ALsizei BufferSize);
+template<typename InstTag>
+void MixHrtfBlend_(FloatBufferLine &LeftOut, FloatBufferLine &RightOut, const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize, const HrtfFilter *oldparams, MixHrtfFilter *newparams, const ALsizei BufferSize);
+template<typename InstTag>
+void MixDirectHrtf_(FloatBufferLine &LeftOut, FloatBufferLine &RightOut, const al::span<const FloatBufferLine> InSamples, float2 *AccumSamples, DirectHrtfState *State, const ALsizei BufferSize);
+
+/* Vectorized resampler helpers */
+inline void InitiatePositionArrays(ALsizei frac, ALint increment, ALsizei *RESTRICT frac_arr, ALsizei *RESTRICT pos_arr, ALsizei size)
+{
+ pos_arr[0] = 0;
+ frac_arr[0] = frac;
+ for(ALsizei i{1};i < size;i++)
+ {
+ ALint frac_tmp = frac_arr[i-1] + increment;
+ pos_arr[i] = pos_arr[i-1] + (frac_tmp>>FRACTIONBITS);
+ frac_arr[i] = frac_tmp&FRACTIONMASK;
+ }
+}
+
+#endif /* MIXER_DEFS_H */
diff --git a/alc/mixer/hrtfbase.h b/alc/mixer/hrtfbase.h
new file mode 100644
index 00000000..a76bd62e
--- /dev/null
+++ b/alc/mixer/hrtfbase.h
@@ -0,0 +1,138 @@
+#ifndef MIXER_HRTFBASE_H
+#define MIXER_HRTFBASE_H
+
+#include <algorithm>
+
+#include "alu.h"
+#include "../hrtf.h"
+#include "opthelpers.h"
+
+
+using ApplyCoeffsT = void(ALsizei Offset, float2 *RESTRICT Values, const ALsizei irSize,
+ const HrirArray<ALfloat> &Coeffs, const ALfloat left, const ALfloat right);
+
+template<ApplyCoeffsT &ApplyCoeffs>
+inline void MixHrtfBase(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *RESTRICT AccumSamples, const ALsizei OutPos,
+ const ALsizei IrSize, MixHrtfFilter *hrtfparams, const ALsizei BufferSize)
+{
+ ASSUME(OutPos >= 0);
+ ASSUME(IrSize >= 4);
+ ASSUME(BufferSize > 0);
+
+ const auto &Coeffs = *hrtfparams->Coeffs;
+ const ALfloat gainstep{hrtfparams->GainStep};
+ const ALfloat gain{hrtfparams->Gain};
+
+ ALsizei Delay[2]{
+ HRTF_HISTORY_LENGTH - hrtfparams->Delay[0],
+ HRTF_HISTORY_LENGTH - hrtfparams->Delay[1] };
+ ASSUME(Delay[0] >= 0 && Delay[1] >= 0);
+ ALfloat stepcount{0.0f};
+ for(ALsizei i{0};i < BufferSize;++i)
+ {
+ const ALfloat g{gain + gainstep*stepcount};
+ const ALfloat left{InSamples[Delay[0]++] * g};
+ const ALfloat right{InSamples[Delay[1]++] * g};
+ ApplyCoeffs(i, AccumSamples+i, IrSize, Coeffs, left, right);
+
+ stepcount += 1.0f;
+ }
+
+ for(ALsizei i{0};i < BufferSize;++i)
+ LeftOut[OutPos+i] += AccumSamples[i][0];
+ for(ALsizei i{0};i < BufferSize;++i)
+ RightOut[OutPos+i] += AccumSamples[i][1];
+
+ hrtfparams->Gain = gain + gainstep*stepcount;
+}
+
+template<ApplyCoeffsT &ApplyCoeffs>
+inline void MixHrtfBlendBase(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *RESTRICT AccumSamples, const ALsizei OutPos,
+ const ALsizei IrSize, const HrtfFilter *oldparams, MixHrtfFilter *newparams,
+ const ALsizei BufferSize)
+{
+ const auto &OldCoeffs = oldparams->Coeffs;
+ const ALfloat oldGain{oldparams->Gain};
+ const ALfloat oldGainStep{-oldGain / static_cast<ALfloat>(BufferSize)};
+ const auto &NewCoeffs = *newparams->Coeffs;
+ const ALfloat newGainStep{newparams->GainStep};
+
+ ASSUME(OutPos >= 0);
+ ASSUME(IrSize >= 4);
+ ASSUME(BufferSize > 0);
+
+ ALsizei Delay[2]{
+ HRTF_HISTORY_LENGTH - oldparams->Delay[0],
+ HRTF_HISTORY_LENGTH - oldparams->Delay[1] };
+ ASSUME(Delay[0] >= 0 && Delay[1] >= 0);
+ ALfloat stepcount{0.0f};
+ for(ALsizei i{0};i < BufferSize;++i)
+ {
+ const ALfloat g{oldGain + oldGainStep*stepcount};
+ const ALfloat left{InSamples[Delay[0]++] * g};
+ const ALfloat right{InSamples[Delay[1]++] * g};
+ ApplyCoeffs(i, AccumSamples+i, IrSize, OldCoeffs, left, right);
+
+ stepcount += 1.0f;
+ }
+
+ Delay[0] = HRTF_HISTORY_LENGTH - newparams->Delay[0];
+ Delay[1] = HRTF_HISTORY_LENGTH - newparams->Delay[1];
+ ASSUME(Delay[0] >= 0 && Delay[1] >= 0);
+ stepcount = 0.0f;
+ for(ALsizei i{0};i < BufferSize;++i)
+ {
+ const ALfloat g{newGainStep*stepcount};
+ const ALfloat left{InSamples[Delay[0]++] * g};
+ const ALfloat right{InSamples[Delay[1]++] * g};
+ ApplyCoeffs(i, AccumSamples+i, IrSize, NewCoeffs, left, right);
+
+ stepcount += 1.0f;
+ }
+
+ for(ALsizei i{0};i < BufferSize;++i)
+ LeftOut[OutPos+i] += AccumSamples[i][0];
+ for(ALsizei i{0};i < BufferSize;++i)
+ RightOut[OutPos+i] += AccumSamples[i][1];
+
+ newparams->Gain = newGainStep*stepcount;
+}
+
+template<ApplyCoeffsT &ApplyCoeffs>
+inline void MixDirectHrtfBase(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const al::span<const FloatBufferLine> InSamples, float2 *RESTRICT AccumSamples,
+ DirectHrtfState *State, const ALsizei BufferSize)
+{
+ ASSUME(BufferSize > 0);
+
+ const ALsizei IrSize{State->IrSize};
+ ASSUME(IrSize >= 4);
+
+ auto chanstate = State->Chan.begin();
+ for(const FloatBufferLine &input : InSamples)
+ {
+ const auto &Coeffs = chanstate->Coeffs;
+
+ auto accum_iter = std::copy_n(chanstate->Values.begin(),
+ chanstate->Values.size(), AccumSamples);
+ std::fill_n(accum_iter, BufferSize, float2{});
+
+ for(ALsizei i{0};i < BufferSize;++i)
+ {
+ const ALfloat insample{input[i]};
+ ApplyCoeffs(i, AccumSamples+i, IrSize, Coeffs, insample, insample);
+ }
+ for(ALsizei i{0};i < BufferSize;++i)
+ LeftOut[i] += AccumSamples[i][0];
+ for(ALsizei i{0};i < BufferSize;++i)
+ RightOut[i] += AccumSamples[i][1];
+
+ std::copy_n(AccumSamples + BufferSize, chanstate->Values.size(),
+ chanstate->Values.begin());
+ ++chanstate;
+ }
+}
+
+#endif /* MIXER_HRTFBASE_H */
diff --git a/alc/mixer/mixer_c.cpp b/alc/mixer/mixer_c.cpp
new file mode 100644
index 00000000..47c4a6f4
--- /dev/null
+++ b/alc/mixer/mixer_c.cpp
@@ -0,0 +1,208 @@
+#include "config.h"
+
+#include <cassert>
+
+#include <limits>
+
+#include "alcmain.h"
+#include "alu.h"
+#include "alSource.h"
+#include "alAuxEffectSlot.h"
+#include "defs.h"
+#include "hrtfbase.h"
+
+
+namespace {
+
+inline ALfloat do_point(const InterpState&, const ALfloat *RESTRICT vals, const ALsizei)
+{ return vals[0]; }
+inline ALfloat do_lerp(const InterpState&, const ALfloat *RESTRICT vals, const ALsizei frac)
+{ return lerp(vals[0], vals[1], frac * (1.0f/FRACTIONONE)); }
+inline ALfloat do_cubic(const InterpState&, const ALfloat *RESTRICT vals, const ALsizei frac)
+{ return cubic(vals[0], vals[1], vals[2], vals[3], frac * (1.0f/FRACTIONONE)); }
+inline ALfloat do_bsinc(const InterpState &istate, const ALfloat *RESTRICT vals, const ALsizei frac)
+{
+ ASSUME(istate.bsinc.m > 0);
+
+ // Calculate the phase index and factor.
+#define FRAC_PHASE_BITDIFF (FRACTIONBITS-BSINC_PHASE_BITS)
+ const ALsizei pi{frac >> FRAC_PHASE_BITDIFF};
+ const ALfloat pf{(frac & ((1<<FRAC_PHASE_BITDIFF)-1)) * (1.0f/(1<<FRAC_PHASE_BITDIFF))};
+#undef FRAC_PHASE_BITDIFF
+
+ const ALfloat *fil{istate.bsinc.filter + istate.bsinc.m*pi*4};
+ const ALfloat *scd{fil + istate.bsinc.m};
+ const ALfloat *phd{scd + istate.bsinc.m};
+ const ALfloat *spd{phd + istate.bsinc.m};
+
+ // Apply the scale and phase interpolated filter.
+ ALfloat r{0.0f};
+ for(ALsizei j_f{0};j_f < istate.bsinc.m;j_f++)
+ r += (fil[j_f] + istate.bsinc.sf*scd[j_f] + pf*(phd[j_f] + istate.bsinc.sf*spd[j_f])) * vals[j_f];
+ return r;
+}
+
+using SamplerT = ALfloat(const InterpState&, const ALfloat*RESTRICT, const ALsizei);
+template<SamplerT &Sampler>
+const ALfloat *DoResample(const InterpState *state, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei numsamples)
+{
+ ASSUME(numsamples > 0);
+ ASSUME(increment > 0);
+ ASSUME(frac >= 0);
+
+ const InterpState istate{*state};
+ auto proc_sample = [&src,&frac,istate,increment]() -> ALfloat
+ {
+ const ALfloat ret{Sampler(istate, src, frac)};
+
+ frac += increment;
+ src += frac>>FRACTIONBITS;
+ frac &= FRACTIONMASK;
+
+ return ret;
+ };
+ std::generate_n(dst, numsamples, proc_sample);
+
+ return dst;
+}
+
+} // namespace
+
+template<>
+const ALfloat *Resample_<CopyTag,CTag>(const InterpState*, const ALfloat *RESTRICT src, ALsizei,
+ ALint, ALfloat *RESTRICT dst, ALsizei dstlen)
+{
+ ASSUME(dstlen > 0);
+#if defined(HAVE_SSE) || defined(HAVE_NEON)
+ /* Avoid copying the source data if it's aligned like the destination. */
+ if((reinterpret_cast<intptr_t>(src)&15) == (reinterpret_cast<intptr_t>(dst)&15))
+ return src;
+#endif
+ std::copy_n(src, dstlen, dst);
+ return dst;
+}
+
+template<>
+const ALfloat *Resample_<PointTag,CTag>(const InterpState *state, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{ return DoResample<do_point>(state, src, frac, increment, dst, dstlen); }
+
+template<>
+const ALfloat *Resample_<LerpTag,CTag>(const InterpState *state, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{ return DoResample<do_lerp>(state, src, frac, increment, dst, dstlen); }
+
+template<>
+const ALfloat *Resample_<CubicTag,CTag>(const InterpState *state, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{ return DoResample<do_cubic>(state, src-1, frac, increment, dst, dstlen); }
+
+template<>
+const ALfloat *Resample_<BSincTag,CTag>(const InterpState *state, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{ return DoResample<do_bsinc>(state, src-state->bsinc.l, frac, increment, dst, dstlen); }
+
+
+static inline void ApplyCoeffs(ALsizei /*Offset*/, float2 *RESTRICT Values, const ALsizei IrSize,
+ const HrirArray<ALfloat> &Coeffs, const ALfloat left, const ALfloat right)
+{
+ ASSUME(IrSize >= 2);
+ for(ALsizei c{0};c < IrSize;++c)
+ {
+ Values[c][0] += Coeffs[c][0] * left;
+ Values[c][1] += Coeffs[c][1] * right;
+ }
+}
+
+template<>
+void MixHrtf_<CTag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize,
+ MixHrtfFilter *hrtfparams, const ALsizei BufferSize)
+{
+ MixHrtfBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, OutPos, IrSize,
+ hrtfparams, BufferSize);
+}
+
+template<>
+void MixHrtfBlend_<CTag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize,
+ const HrtfFilter *oldparams, MixHrtfFilter *newparams, const ALsizei BufferSize)
+{
+ MixHrtfBlendBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, OutPos, IrSize,
+ oldparams, newparams, BufferSize);
+}
+
+template<>
+void MixDirectHrtf_<CTag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const al::span<const FloatBufferLine> InSamples, float2 *AccumSamples, DirectHrtfState *State,
+ const ALsizei BufferSize)
+{
+ MixDirectHrtfBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, State, BufferSize);
+}
+
+
+template<>
+void Mix_<CTag>(const ALfloat *data, const al::span<FloatBufferLine> OutBuffer,
+ ALfloat *CurrentGains, const ALfloat *TargetGains, const ALsizei Counter, const ALsizei OutPos,
+ const ALsizei BufferSize)
+{
+ ASSUME(BufferSize > 0);
+
+ const ALfloat delta{(Counter > 0) ? 1.0f / static_cast<ALfloat>(Counter) : 0.0f};
+ for(FloatBufferLine &output : OutBuffer)
+ {
+ ALfloat *RESTRICT dst{output.data()+OutPos};
+ ALfloat gain{*CurrentGains};
+ const ALfloat diff{*TargetGains - gain};
+
+ ALsizei pos{0};
+ if(std::fabs(diff) > std::numeric_limits<float>::epsilon())
+ {
+ ALsizei minsize{mini(BufferSize, Counter)};
+ const ALfloat step{diff * delta};
+ ALfloat step_count{0.0f};
+ for(;pos < minsize;pos++)
+ {
+ dst[pos] += data[pos] * (gain + step*step_count);
+ step_count += 1.0f;
+ }
+ if(pos == Counter)
+ gain = *TargetGains;
+ else
+ gain += step*step_count;
+ *CurrentGains = gain;
+ }
+ ++CurrentGains;
+ ++TargetGains;
+
+ if(!(std::fabs(gain) > GAIN_SILENCE_THRESHOLD))
+ continue;
+ for(;pos < BufferSize;pos++)
+ dst[pos] += data[pos]*gain;
+ }
+}
+
+/* Basically the inverse of the above. Rather than one input going to multiple
+ * outputs (each with its own gain), it's multiple inputs (each with its own
+ * gain) going to one output. This applies one row (vs one column) of a matrix
+ * transform. And as the matrices are more or less static once set up, no
+ * stepping is necessary.
+ */
+template<>
+void MixRow_<CTag>(FloatBufferLine &OutBuffer, const ALfloat *Gains,
+ const al::span<const FloatBufferLine> InSamples, const ALsizei InPos, const ALsizei BufferSize)
+{
+ ASSUME(BufferSize > 0);
+
+ for(const FloatBufferLine &input : InSamples)
+ {
+ const ALfloat *RESTRICT src{input.data()+InPos};
+ const ALfloat gain{*(Gains++)};
+ if(!(std::fabs(gain) > GAIN_SILENCE_THRESHOLD))
+ continue;
+
+ for(ALsizei i{0};i < BufferSize;i++)
+ OutBuffer[i] += src[i] * gain;
+ }
+}
diff --git a/alc/mixer/mixer_neon.cpp b/alc/mixer/mixer_neon.cpp
new file mode 100644
index 00000000..fa487d97
--- /dev/null
+++ b/alc/mixer/mixer_neon.cpp
@@ -0,0 +1,307 @@
+#include "config.h"
+
+#include <arm_neon.h>
+
+#include <limits>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "alcmain.h"
+#include "alu.h"
+#include "hrtf.h"
+#include "defs.h"
+#include "hrtfbase.h"
+
+
+
+template<>
+const ALfloat *Resample_<LerpTag,NEONTag>(const InterpState*, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{
+ const int32x4_t increment4 = vdupq_n_s32(increment*4);
+ const float32x4_t fracOne4 = vdupq_n_f32(1.0f/FRACTIONONE);
+ const int32x4_t fracMask4 = vdupq_n_s32(FRACTIONMASK);
+ alignas(16) ALsizei pos_[4], frac_[4];
+ int32x4_t pos4, frac4;
+ ALsizei todo, pos, i;
+
+ ASSUME(frac >= 0);
+ ASSUME(increment > 0);
+ ASSUME(dstlen > 0);
+
+ InitiatePositionArrays(frac, increment, frac_, pos_, 4);
+ frac4 = vld1q_s32(frac_);
+ pos4 = vld1q_s32(pos_);
+
+ todo = dstlen & ~3;
+ for(i = 0;i < todo;i += 4)
+ {
+ const int pos0 = vgetq_lane_s32(pos4, 0);
+ const int pos1 = vgetq_lane_s32(pos4, 1);
+ const int pos2 = vgetq_lane_s32(pos4, 2);
+ const int pos3 = vgetq_lane_s32(pos4, 3);
+ const float32x4_t val1 = (float32x4_t){src[pos0], src[pos1], src[pos2], src[pos3]};
+ const float32x4_t val2 = (float32x4_t){src[pos0+1], src[pos1+1], src[pos2+1], src[pos3+1]};
+
+ /* val1 + (val2-val1)*mu */
+ const float32x4_t r0 = vsubq_f32(val2, val1);
+ const float32x4_t mu = vmulq_f32(vcvtq_f32_s32(frac4), fracOne4);
+ const float32x4_t out = vmlaq_f32(val1, mu, r0);
+
+ vst1q_f32(&dst[i], out);
+
+ frac4 = vaddq_s32(frac4, increment4);
+ pos4 = vaddq_s32(pos4, vshrq_n_s32(frac4, FRACTIONBITS));
+ frac4 = vandq_s32(frac4, fracMask4);
+ }
+
+ /* NOTE: These four elements represent the position *after* the last four
+ * samples, so the lowest element is the next position to resample.
+ */
+ pos = vgetq_lane_s32(pos4, 0);
+ frac = vgetq_lane_s32(frac4, 0);
+
+ for(;i < dstlen;++i)
+ {
+ dst[i] = lerp(src[pos], src[pos+1], frac * (1.0f/FRACTIONONE));
+
+ frac += increment;
+ pos += frac>>FRACTIONBITS;
+ frac &= FRACTIONMASK;
+ }
+ return dst;
+}
+
+template<>
+const ALfloat *Resample_<BSincTag,NEONTag>(const InterpState *state, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{
+ const ALfloat *const filter = state->bsinc.filter;
+ const float32x4_t sf4 = vdupq_n_f32(state->bsinc.sf);
+ const ALsizei m = state->bsinc.m;
+ const float32x4_t *fil, *scd, *phd, *spd;
+ ALsizei pi, i, j, offset;
+ float32x4_t r4;
+ ALfloat pf;
+
+ ASSUME(m > 0);
+ ASSUME(dstlen > 0);
+ ASSUME(increment > 0);
+ ASSUME(frac >= 0);
+
+ src -= state->bsinc.l;
+ for(i = 0;i < dstlen;i++)
+ {
+ // Calculate the phase index and factor.
+#define FRAC_PHASE_BITDIFF (FRACTIONBITS-BSINC_PHASE_BITS)
+ pi = frac >> FRAC_PHASE_BITDIFF;
+ pf = (frac & ((1<<FRAC_PHASE_BITDIFF)-1)) * (1.0f/(1<<FRAC_PHASE_BITDIFF));
+#undef FRAC_PHASE_BITDIFF
+
+ offset = m*pi*4;
+ fil = (const float32x4_t*)(filter + offset); offset += m;
+ scd = (const float32x4_t*)(filter + offset); offset += m;
+ phd = (const float32x4_t*)(filter + offset); offset += m;
+ spd = (const float32x4_t*)(filter + offset);
+
+ // Apply the scale and phase interpolated filter.
+ r4 = vdupq_n_f32(0.0f);
+ {
+ const ALsizei count = m >> 2;
+ const float32x4_t pf4 = vdupq_n_f32(pf);
+
+ ASSUME(count > 0);
+
+ for(j = 0;j < count;j++)
+ {
+ /* f = ((fil + sf*scd) + pf*(phd + sf*spd)) */
+ const float32x4_t f4 = vmlaq_f32(
+ vmlaq_f32(fil[j], sf4, scd[j]),
+ pf4, vmlaq_f32(phd[j], sf4, spd[j])
+ );
+ /* r += f*src */
+ r4 = vmlaq_f32(r4, f4, vld1q_f32(&src[j*4]));
+ }
+ }
+ r4 = vaddq_f32(r4, vcombine_f32(vrev64_f32(vget_high_f32(r4)),
+ vrev64_f32(vget_low_f32(r4))));
+ dst[i] = vget_lane_f32(vadd_f32(vget_low_f32(r4), vget_high_f32(r4)), 0);
+
+ frac += increment;
+ src += frac>>FRACTIONBITS;
+ frac &= FRACTIONMASK;
+ }
+ return dst;
+}
+
+
+static inline void ApplyCoeffs(ALsizei /*Offset*/, float2 *RESTRICT Values, const ALsizei IrSize,
+ const HrirArray<ALfloat> &Coeffs, const ALfloat left, const ALfloat right)
+{
+ ASSUME(IrSize >= 2);
+
+ float32x4_t leftright4;
+ {
+ float32x2_t leftright2 = vdup_n_f32(0.0);
+ leftright2 = vset_lane_f32(left, leftright2, 0);
+ leftright2 = vset_lane_f32(right, leftright2, 1);
+ leftright4 = vcombine_f32(leftright2, leftright2);
+ }
+
+ for(ALsizei c{0};c < IrSize;c += 2)
+ {
+ float32x4_t vals = vcombine_f32(vld1_f32((float32_t*)&Values[c ][0]),
+ vld1_f32((float32_t*)&Values[c+1][0]));
+ float32x4_t coefs = vld1q_f32((float32_t*)&Coeffs[c][0]);
+
+ vals = vmlaq_f32(vals, coefs, leftright4);
+
+ vst1_f32((float32_t*)&Values[c ][0], vget_low_f32(vals));
+ vst1_f32((float32_t*)&Values[c+1][0], vget_high_f32(vals));
+ }
+}
+
+template<>
+void MixHrtf_<NEONTag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize,
+ MixHrtfFilter *hrtfparams, const ALsizei BufferSize)
+{
+ MixHrtfBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, OutPos, IrSize,
+ hrtfparams, BufferSize);
+}
+
+template<>
+void MixHrtfBlend_<NEONTag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize,
+ const HrtfFilter *oldparams, MixHrtfFilter *newparams, const ALsizei BufferSize)
+{
+ MixHrtfBlendBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, OutPos, IrSize,
+ oldparams, newparams, BufferSize);
+}
+
+template<>
+void MixDirectHrtf_<NEONTag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const al::span<const FloatBufferLine> InSamples, float2 *AccumSamples, DirectHrtfState *State,
+ const ALsizei BufferSize)
+{
+ MixDirectHrtfBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, State, BufferSize);
+}
+
+
+template<>
+void Mix_<NEONTag>(const ALfloat *data, const al::span<FloatBufferLine> OutBuffer,
+ ALfloat *CurrentGains, const ALfloat *TargetGains, const ALsizei Counter, const ALsizei OutPos,
+ const ALsizei BufferSize)
+{
+ ASSUME(BufferSize > 0);
+
+ const ALfloat delta{(Counter > 0) ? 1.0f/(ALfloat)Counter : 0.0f};
+ for(FloatBufferLine &output : OutBuffer)
+ {
+ ALfloat *RESTRICT dst{al::assume_aligned<16>(output.data()+OutPos)};
+ ALfloat gain{*CurrentGains};
+ const ALfloat diff{*TargetGains - gain};
+
+ ALsizei pos{0};
+ if(std::fabs(diff) > std::numeric_limits<float>::epsilon())
+ {
+ ALsizei minsize{mini(BufferSize, Counter)};
+ const ALfloat step{diff * delta};
+ ALfloat step_count{0.0f};
+ /* Mix with applying gain steps in aligned multiples of 4. */
+ if(LIKELY(minsize > 3))
+ {
+ const float32x4_t four4{vdupq_n_f32(4.0f)};
+ const float32x4_t step4{vdupq_n_f32(step)};
+ const float32x4_t gain4{vdupq_n_f32(gain)};
+ float32x4_t step_count4{vsetq_lane_f32(0.0f,
+ vsetq_lane_f32(1.0f,
+ vsetq_lane_f32(2.0f,
+ vsetq_lane_f32(3.0f, vdupq_n_f32(0.0f), 3),
+ 2), 1), 0
+ )};
+ ALsizei todo{minsize >> 2};
+
+ do {
+ const float32x4_t val4 = vld1q_f32(&data[pos]);
+ float32x4_t dry4 = vld1q_f32(&dst[pos]);
+ dry4 = vmlaq_f32(dry4, val4, vmlaq_f32(gain4, step4, step_count4));
+ step_count4 = vaddq_f32(step_count4, four4);
+ vst1q_f32(&dst[pos], dry4);
+ pos += 4;
+ } while(--todo);
+ /* NOTE: step_count4 now represents the next four counts after
+ * the last four mixed samples, so the lowest element
+ * represents the next step count to apply.
+ */
+ step_count = vgetq_lane_f32(step_count4, 0);
+ }
+ /* Mix with applying left over gain steps that aren't aligned multiples of 4. */
+ for(;pos < minsize;pos++)
+ {
+ dst[pos] += data[pos]*(gain + step*step_count);
+ step_count += 1.0f;
+ }
+ if(pos == Counter)
+ gain = *TargetGains;
+ else
+ gain += step*step_count;
+ *CurrentGains = gain;
+
+ /* Mix until pos is aligned with 4 or the mix is done. */
+ minsize = mini(BufferSize, (pos+3)&~3);
+ for(;pos < minsize;pos++)
+ dst[pos] += data[pos]*gain;
+ }
+ ++CurrentGains;
+ ++TargetGains;
+
+ if(!(std::fabs(gain) > GAIN_SILENCE_THRESHOLD))
+ continue;
+ if(LIKELY(BufferSize-pos > 3))
+ {
+ ALsizei todo{(BufferSize-pos) >> 2};
+ const float32x4_t gain4 = vdupq_n_f32(gain);
+ do {
+ const float32x4_t val4 = vld1q_f32(&data[pos]);
+ float32x4_t dry4 = vld1q_f32(&dst[pos]);
+ dry4 = vmlaq_f32(dry4, val4, gain4);
+ vst1q_f32(&dst[pos], dry4);
+ pos += 4;
+ } while(--todo);
+ }
+ for(;pos < BufferSize;pos++)
+ dst[pos] += data[pos]*gain;
+ }
+}
+
+template<>
+void MixRow_<NEONTag>(FloatBufferLine &OutBuffer, const ALfloat *Gains,
+ const al::span<const FloatBufferLine> InSamples, const ALsizei InPos, const ALsizei BufferSize)
+{
+ ASSUME(BufferSize > 0);
+
+ for(const FloatBufferLine &input : InSamples)
+ {
+ const ALfloat *RESTRICT src{al::assume_aligned<16>(input.data()+InPos)};
+ const ALfloat gain{*(Gains++)};
+ if(!(std::fabs(gain) > GAIN_SILENCE_THRESHOLD))
+ continue;
+
+ ALsizei pos{0};
+ if(LIKELY(BufferSize > 3))
+ {
+ ALsizei todo{BufferSize >> 2};
+ float32x4_t gain4{vdupq_n_f32(gain)};
+ do {
+ const float32x4_t val4 = vld1q_f32(&src[pos]);
+ float32x4_t dry4 = vld1q_f32(&OutBuffer[pos]);
+ dry4 = vmlaq_f32(dry4, val4, gain4);
+ vst1q_f32(&OutBuffer[pos], dry4);
+ pos += 4;
+ } while(--todo);
+ }
+ for(;pos < BufferSize;pos++)
+ OutBuffer[pos] += src[pos]*gain;
+ }
+}
diff --git a/alc/mixer/mixer_sse.cpp b/alc/mixer/mixer_sse.cpp
new file mode 100644
index 00000000..b763fdbd
--- /dev/null
+++ b/alc/mixer/mixer_sse.cpp
@@ -0,0 +1,262 @@
+#include "config.h"
+
+#include <xmmintrin.h>
+
+#include <limits>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "alcmain.h"
+#include "alu.h"
+
+#include "alSource.h"
+#include "alAuxEffectSlot.h"
+#include "defs.h"
+#include "hrtfbase.h"
+
+
+template<>
+const ALfloat *Resample_<BSincTag,SSETag>(const InterpState *state, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{
+ const ALfloat *const filter{state->bsinc.filter};
+ const __m128 sf4{_mm_set1_ps(state->bsinc.sf)};
+ const ALsizei m{state->bsinc.m};
+
+ ASSUME(m > 0);
+ ASSUME(dstlen > 0);
+ ASSUME(increment > 0);
+ ASSUME(frac >= 0);
+
+ src -= state->bsinc.l;
+ for(ALsizei i{0};i < dstlen;i++)
+ {
+ // Calculate the phase index and factor.
+#define FRAC_PHASE_BITDIFF (FRACTIONBITS-BSINC_PHASE_BITS)
+ const ALsizei pi{frac >> FRAC_PHASE_BITDIFF};
+ const ALfloat pf{(frac & ((1<<FRAC_PHASE_BITDIFF)-1)) * (1.0f/(1<<FRAC_PHASE_BITDIFF))};
+#undef FRAC_PHASE_BITDIFF
+
+ ALsizei offset{m*pi*4};
+ const __m128 *fil{reinterpret_cast<const __m128*>(filter + offset)}; offset += m;
+ const __m128 *scd{reinterpret_cast<const __m128*>(filter + offset)}; offset += m;
+ const __m128 *phd{reinterpret_cast<const __m128*>(filter + offset)}; offset += m;
+ const __m128 *spd{reinterpret_cast<const __m128*>(filter + offset)};
+
+ // Apply the scale and phase interpolated filter.
+ __m128 r4{_mm_setzero_ps()};
+ {
+ const ALsizei count{m >> 2};
+ const __m128 pf4{_mm_set1_ps(pf)};
+
+ ASSUME(count > 0);
+
+#define MLA4(x, y, z) _mm_add_ps(x, _mm_mul_ps(y, z))
+ for(ALsizei j{0};j < count;j++)
+ {
+ /* f = ((fil + sf*scd) + pf*(phd + sf*spd)) */
+ const __m128 f4 = MLA4(
+ MLA4(fil[j], sf4, scd[j]),
+ pf4, MLA4(phd[j], sf4, spd[j])
+ );
+ /* r += f*src */
+ r4 = MLA4(r4, f4, _mm_loadu_ps(&src[j*4]));
+ }
+#undef MLA4
+ }
+ r4 = _mm_add_ps(r4, _mm_shuffle_ps(r4, r4, _MM_SHUFFLE(0, 1, 2, 3)));
+ r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
+ dst[i] = _mm_cvtss_f32(r4);
+
+ frac += increment;
+ src += frac>>FRACTIONBITS;
+ frac &= FRACTIONMASK;
+ }
+ return dst;
+}
+
+
+static inline void ApplyCoeffs(ALsizei Offset, float2 *RESTRICT Values, const ALsizei IrSize,
+ const HrirArray<ALfloat> &Coeffs, const ALfloat left, const ALfloat right)
+{
+ const __m128 lrlr{_mm_setr_ps(left, right, left, right)};
+
+ ASSUME(IrSize >= 2);
+
+ if((Offset&1))
+ {
+ __m128 imp0, imp1;
+ __m128 coeffs{_mm_load_ps(&Coeffs[0][0])};
+ __m128 vals{_mm_loadl_pi(_mm_setzero_ps(), reinterpret_cast<__m64*>(&Values[0][0]))};
+ imp0 = _mm_mul_ps(lrlr, coeffs);
+ vals = _mm_add_ps(imp0, vals);
+ _mm_storel_pi(reinterpret_cast<__m64*>(&Values[0][0]), vals);
+ ALsizei i{1};
+ for(;i < IrSize-1;i += 2)
+ {
+ coeffs = _mm_load_ps(&Coeffs[i+1][0]);
+ vals = _mm_load_ps(&Values[i][0]);
+ imp1 = _mm_mul_ps(lrlr, coeffs);
+ imp0 = _mm_shuffle_ps(imp0, imp1, _MM_SHUFFLE(1, 0, 3, 2));
+ vals = _mm_add_ps(imp0, vals);
+ _mm_store_ps(&Values[i][0], vals);
+ imp0 = imp1;
+ }
+ vals = _mm_loadl_pi(vals, reinterpret_cast<__m64*>(&Values[i][0]));
+ imp0 = _mm_movehl_ps(imp0, imp0);
+ vals = _mm_add_ps(imp0, vals);
+ _mm_storel_pi(reinterpret_cast<__m64*>(&Values[i][0]), vals);
+ }
+ else
+ {
+ for(ALsizei i{0};i < IrSize;i += 2)
+ {
+ __m128 coeffs{_mm_load_ps(&Coeffs[i][0])};
+ __m128 vals{_mm_load_ps(&Values[i][0])};
+ vals = _mm_add_ps(vals, _mm_mul_ps(lrlr, coeffs));
+ _mm_store_ps(&Values[i][0], vals);
+ }
+ }
+}
+
+template<>
+void MixHrtf_<SSETag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize,
+ MixHrtfFilter *hrtfparams, const ALsizei BufferSize)
+{
+ MixHrtfBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, OutPos, IrSize,
+ hrtfparams, BufferSize);
+}
+
+template<>
+void MixHrtfBlend_<SSETag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const ALfloat *InSamples, float2 *AccumSamples, const ALsizei OutPos, const ALsizei IrSize,
+ const HrtfFilter *oldparams, MixHrtfFilter *newparams, const ALsizei BufferSize)
+{
+ MixHrtfBlendBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, OutPos, IrSize,
+ oldparams, newparams, BufferSize);
+}
+
+template<>
+void MixDirectHrtf_<SSETag>(FloatBufferLine &LeftOut, FloatBufferLine &RightOut,
+ const al::span<const FloatBufferLine> InSamples, float2 *AccumSamples, DirectHrtfState *State,
+ const ALsizei BufferSize)
+{
+ MixDirectHrtfBase<ApplyCoeffs>(LeftOut, RightOut, InSamples, AccumSamples, State, BufferSize);
+}
+
+
+template<>
+void Mix_<SSETag>(const ALfloat *data, const al::span<FloatBufferLine> OutBuffer,
+ ALfloat *CurrentGains, const ALfloat *TargetGains, const ALsizei Counter, const ALsizei OutPos,
+ const ALsizei BufferSize)
+{
+ ASSUME(BufferSize > 0);
+
+ const ALfloat delta{(Counter > 0) ? 1.0f / static_cast<ALfloat>(Counter) : 0.0f};
+ for(FloatBufferLine &output : OutBuffer)
+ {
+ ALfloat *RESTRICT dst{al::assume_aligned<16>(output.data()+OutPos)};
+ ALfloat gain{*CurrentGains};
+ const ALfloat diff{*TargetGains - gain};
+
+ ALsizei pos{0};
+ if(std::fabs(diff) > std::numeric_limits<float>::epsilon())
+ {
+ ALsizei minsize{mini(BufferSize, Counter)};
+ const ALfloat step{diff * delta};
+ ALfloat step_count{0.0f};
+ /* Mix with applying gain steps in aligned multiples of 4. */
+ if(LIKELY(minsize > 3))
+ {
+ const __m128 four4{_mm_set1_ps(4.0f)};
+ const __m128 step4{_mm_set1_ps(step)};
+ const __m128 gain4{_mm_set1_ps(gain)};
+ __m128 step_count4{_mm_setr_ps(0.0f, 1.0f, 2.0f, 3.0f)};
+ ALsizei todo{minsize >> 2};
+ do {
+ const __m128 val4{_mm_load_ps(&data[pos])};
+ __m128 dry4{_mm_load_ps(&dst[pos])};
+#define MLA4(x, y, z) _mm_add_ps(x, _mm_mul_ps(y, z))
+ /* dry += val * (gain + step*step_count) */
+ dry4 = MLA4(dry4, val4, MLA4(gain4, step4, step_count4));
+#undef MLA4
+ _mm_store_ps(&dst[pos], dry4);
+ step_count4 = _mm_add_ps(step_count4, four4);
+ pos += 4;
+ } while(--todo);
+ /* NOTE: step_count4 now represents the next four counts after
+ * the last four mixed samples, so the lowest element
+ * represents the next step count to apply.
+ */
+ step_count = _mm_cvtss_f32(step_count4);
+ }
+ /* Mix with applying left over gain steps that aren't aligned multiples of 4. */
+ for(;pos < minsize;pos++)
+ {
+ dst[pos] += data[pos]*(gain + step*step_count);
+ step_count += 1.0f;
+ }
+ if(pos == Counter)
+ gain = *TargetGains;
+ else
+ gain += step*step_count;
+ *CurrentGains = gain;
+
+ /* Mix until pos is aligned with 4 or the mix is done. */
+ minsize = mini(BufferSize, (pos+3)&~3);
+ for(;pos < minsize;pos++)
+ dst[pos] += data[pos]*gain;
+ }
+ ++CurrentGains;
+ ++TargetGains;
+
+ if(!(std::fabs(gain) > GAIN_SILENCE_THRESHOLD))
+ continue;
+ if(LIKELY(BufferSize-pos > 3))
+ {
+ ALsizei todo{(BufferSize-pos) >> 2};
+ const __m128 gain4{_mm_set1_ps(gain)};
+ do {
+ const __m128 val4{_mm_load_ps(&data[pos])};
+ __m128 dry4{_mm_load_ps(&dst[pos])};
+ dry4 = _mm_add_ps(dry4, _mm_mul_ps(val4, gain4));
+ _mm_store_ps(&dst[pos], dry4);
+ pos += 4;
+ } while(--todo);
+ }
+ for(;pos < BufferSize;pos++)
+ dst[pos] += data[pos]*gain;
+ }
+}
+
+template<>
+void MixRow_<SSETag>(FloatBufferLine &OutBuffer, const ALfloat *Gains,
+ const al::span<const FloatBufferLine> InSamples, const ALsizei InPos, const ALsizei BufferSize)
+{
+ ASSUME(BufferSize > 0);
+
+ for(const FloatBufferLine &input : InSamples)
+ {
+ const ALfloat *RESTRICT src{al::assume_aligned<16>(input.data()+InPos)};
+ const ALfloat gain{*(Gains++)};
+ if(!(std::fabs(gain) > GAIN_SILENCE_THRESHOLD))
+ continue;
+
+ ALsizei pos{0};
+ if(LIKELY(BufferSize > 3))
+ {
+ ALsizei todo{BufferSize >> 2};
+ const __m128 gain4 = _mm_set1_ps(gain);
+ do {
+ const __m128 val4{_mm_load_ps(&src[pos])};
+ __m128 dry4{_mm_load_ps(&OutBuffer[pos])};
+ dry4 = _mm_add_ps(dry4, _mm_mul_ps(val4, gain4));
+ _mm_store_ps(&OutBuffer[pos], dry4);
+ pos += 4;
+ } while(--todo);
+ }
+ for(;pos < BufferSize;pos++)
+ OutBuffer[pos] += src[pos]*gain;
+ }
+}
diff --git a/alc/mixer/mixer_sse2.cpp b/alc/mixer/mixer_sse2.cpp
new file mode 100644
index 00000000..b5d00106
--- /dev/null
+++ b/alc/mixer/mixer_sse2.cpp
@@ -0,0 +1,84 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2014 by Timothy Arceri <[email protected]>.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <xmmintrin.h>
+#include <emmintrin.h>
+
+#include "alu.h"
+#include "defs.h"
+
+
+template<>
+const ALfloat *Resample_<LerpTag,SSE2Tag>(const InterpState*, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{
+ const __m128i increment4{_mm_set1_epi32(increment*4)};
+ const __m128 fracOne4{_mm_set1_ps(1.0f/FRACTIONONE)};
+ const __m128i fracMask4{_mm_set1_epi32(FRACTIONMASK)};
+
+ ASSUME(frac > 0);
+ ASSUME(increment > 0);
+ ASSUME(dstlen >= 0);
+
+ alignas(16) ALsizei pos_[4], frac_[4];
+ InitiatePositionArrays(frac, increment, frac_, pos_, 4);
+ __m128i frac4{_mm_setr_epi32(frac_[0], frac_[1], frac_[2], frac_[3])};
+ __m128i pos4{_mm_setr_epi32(pos_[0], pos_[1], pos_[2], pos_[3])};
+
+ const ALsizei todo{dstlen & ~3};
+ for(ALsizei i{0};i < todo;i += 4)
+ {
+ const int pos0{_mm_cvtsi128_si32(_mm_shuffle_epi32(pos4, _MM_SHUFFLE(0, 0, 0, 0)))};
+ const int pos1{_mm_cvtsi128_si32(_mm_shuffle_epi32(pos4, _MM_SHUFFLE(1, 1, 1, 1)))};
+ const int pos2{_mm_cvtsi128_si32(_mm_shuffle_epi32(pos4, _MM_SHUFFLE(2, 2, 2, 2)))};
+ const int pos3{_mm_cvtsi128_si32(_mm_shuffle_epi32(pos4, _MM_SHUFFLE(3, 3, 3, 3)))};
+ const __m128 val1{_mm_setr_ps(src[pos0 ], src[pos1 ], src[pos2 ], src[pos3 ])};
+ const __m128 val2{_mm_setr_ps(src[pos0+1], src[pos1+1], src[pos2+1], src[pos3+1])};
+
+ /* val1 + (val2-val1)*mu */
+ const __m128 r0{_mm_sub_ps(val2, val1)};
+ const __m128 mu{_mm_mul_ps(_mm_cvtepi32_ps(frac4), fracOne4)};
+ const __m128 out{_mm_add_ps(val1, _mm_mul_ps(mu, r0))};
+
+ _mm_store_ps(&dst[i], out);
+
+ frac4 = _mm_add_epi32(frac4, increment4);
+ pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
+ frac4 = _mm_and_si128(frac4, fracMask4);
+ }
+
+ /* NOTE: These four elements represent the position *after* the last four
+ * samples, so the lowest element is the next position to resample.
+ */
+ ALsizei pos{_mm_cvtsi128_si32(pos4)};
+ frac = _mm_cvtsi128_si32(frac4);
+
+ for(ALsizei i{todo};i < dstlen;++i)
+ {
+ dst[i] = lerp(src[pos], src[pos+1], frac * (1.0f/FRACTIONONE));
+
+ frac += increment;
+ pos += frac>>FRACTIONBITS;
+ frac &= FRACTIONMASK;
+ }
+ return dst;
+}
diff --git a/alc/mixer/mixer_sse3.cpp b/alc/mixer/mixer_sse3.cpp
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/alc/mixer/mixer_sse3.cpp
diff --git a/alc/mixer/mixer_sse41.cpp b/alc/mixer/mixer_sse41.cpp
new file mode 100644
index 00000000..7efbda7b
--- /dev/null
+++ b/alc/mixer/mixer_sse41.cpp
@@ -0,0 +1,85 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 2014 by Timothy Arceri <[email protected]>.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <xmmintrin.h>
+#include <emmintrin.h>
+#include <smmintrin.h>
+
+#include "alu.h"
+#include "defs.h"
+
+
+template<>
+const ALfloat *Resample_<LerpTag,SSE4Tag>(const InterpState*, const ALfloat *RESTRICT src,
+ ALsizei frac, ALint increment, ALfloat *RESTRICT dst, ALsizei dstlen)
+{
+ const __m128i increment4{_mm_set1_epi32(increment*4)};
+ const __m128 fracOne4{_mm_set1_ps(1.0f/FRACTIONONE)};
+ const __m128i fracMask4{_mm_set1_epi32(FRACTIONMASK)};
+
+ ASSUME(frac > 0);
+ ASSUME(increment > 0);
+ ASSUME(dstlen >= 0);
+
+ alignas(16) ALsizei pos_[4], frac_[4];
+ InitiatePositionArrays(frac, increment, frac_, pos_, 4);
+ __m128i frac4{_mm_setr_epi32(frac_[0], frac_[1], frac_[2], frac_[3])};
+ __m128i pos4{_mm_setr_epi32(pos_[0], pos_[1], pos_[2], pos_[3])};
+
+ const ALsizei todo{dstlen & ~3};
+ for(ALsizei i{0};i < todo;i += 4)
+ {
+ const int pos0{_mm_extract_epi32(pos4, 0)};
+ const int pos1{_mm_extract_epi32(pos4, 1)};
+ const int pos2{_mm_extract_epi32(pos4, 2)};
+ const int pos3{_mm_extract_epi32(pos4, 3)};
+ const __m128 val1{_mm_setr_ps(src[pos0 ], src[pos1 ], src[pos2 ], src[pos3 ])};
+ const __m128 val2{_mm_setr_ps(src[pos0+1], src[pos1+1], src[pos2+1], src[pos3+1])};
+
+ /* val1 + (val2-val1)*mu */
+ const __m128 r0{_mm_sub_ps(val2, val1)};
+ const __m128 mu{_mm_mul_ps(_mm_cvtepi32_ps(frac4), fracOne4)};
+ const __m128 out{_mm_add_ps(val1, _mm_mul_ps(mu, r0))};
+
+ _mm_store_ps(&dst[i], out);
+
+ frac4 = _mm_add_epi32(frac4, increment4);
+ pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
+ frac4 = _mm_and_si128(frac4, fracMask4);
+ }
+
+ /* NOTE: These four elements represent the position *after* the last four
+ * samples, so the lowest element is the next position to resample.
+ */
+ ALsizei pos{_mm_cvtsi128_si32(pos4)};
+ frac = _mm_cvtsi128_si32(frac4);
+
+ for(ALsizei i{todo};i < dstlen;++i)
+ {
+ dst[i] = lerp(src[pos], src[pos+1], frac * (1.0f/FRACTIONONE));
+
+ frac += increment;
+ pos += frac>>FRACTIONBITS;
+ frac &= FRACTIONMASK;
+ }
+ return dst;
+}
diff --git a/alc/mixvoice.cpp b/alc/mixvoice.cpp
new file mode 100644
index 00000000..be872f6d
--- /dev/null
+++ b/alc/mixvoice.cpp
@@ -0,0 +1,954 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <algorithm>
+#include <array>
+#include <atomic>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <numeric>
+#include <string>
+#include <utility>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+
+#include "alBuffer.h"
+#include "alcmain.h"
+#include "alSource.h"
+#include "albyte.h"
+#include "alconfig.h"
+#include "alcontext.h"
+#include "alnumeric.h"
+#include "aloptional.h"
+#include "alspan.h"
+#include "alu.h"
+#include "cpu_caps.h"
+#include "filters/biquad.h"
+#include "filters/nfc.h"
+#include "filters/splitter.h"
+#include "hrtf.h"
+#include "inprogext.h"
+#include "logging.h"
+#include "mixer/defs.h"
+#include "opthelpers.h"
+#include "ringbuffer.h"
+#include "threads.h"
+#include "vector.h"
+
+
+static_assert((INT_MAX>>FRACTIONBITS)/MAX_PITCH > BUFFERSIZE,
+ "MAX_PITCH and/or BUFFERSIZE are too large for FRACTIONBITS!");
+
+/* BSinc24 requires up to 23 extra samples before the current position, and 24 after. */
+static_assert(MAX_RESAMPLE_PADDING >= 24, "MAX_RESAMPLE_PADDING must be at least 24!");
+
+
+Resampler ResamplerDefault = LinearResampler;
+
+MixerFunc MixSamples = Mix_<CTag>;
+RowMixerFunc MixRowSamples = MixRow_<CTag>;
+static HrtfMixerFunc MixHrtfSamples = MixHrtf_<CTag>;
+static HrtfMixerBlendFunc MixHrtfBlendSamples = MixHrtfBlend_<CTag>;
+
+static MixerFunc SelectMixer()
+{
+#ifdef HAVE_NEON
+ if((CPUCapFlags&CPU_CAP_NEON))
+ return Mix_<NEONTag>;
+#endif
+#ifdef HAVE_SSE
+ if((CPUCapFlags&CPU_CAP_SSE))
+ return Mix_<SSETag>;
+#endif
+ return Mix_<CTag>;
+}
+
+static RowMixerFunc SelectRowMixer()
+{
+#ifdef HAVE_NEON
+ if((CPUCapFlags&CPU_CAP_NEON))
+ return MixRow_<NEONTag>;
+#endif
+#ifdef HAVE_SSE
+ if((CPUCapFlags&CPU_CAP_SSE))
+ return MixRow_<SSETag>;
+#endif
+ return MixRow_<CTag>;
+}
+
+static inline HrtfMixerFunc SelectHrtfMixer()
+{
+#ifdef HAVE_NEON
+ if((CPUCapFlags&CPU_CAP_NEON))
+ return MixHrtf_<NEONTag>;
+#endif
+#ifdef HAVE_SSE
+ if((CPUCapFlags&CPU_CAP_SSE))
+ return MixHrtf_<SSETag>;
+#endif
+ return MixHrtf_<CTag>;
+}
+
+static inline HrtfMixerBlendFunc SelectHrtfBlendMixer()
+{
+#ifdef HAVE_NEON
+ if((CPUCapFlags&CPU_CAP_NEON))
+ return MixHrtfBlend_<NEONTag>;
+#endif
+#ifdef HAVE_SSE
+ if((CPUCapFlags&CPU_CAP_SSE))
+ return MixHrtfBlend_<SSETag>;
+#endif
+ return MixHrtfBlend_<CTag>;
+}
+
+ResamplerFunc SelectResampler(Resampler resampler)
+{
+ switch(resampler)
+ {
+ case PointResampler:
+ return Resample_<PointTag,CTag>;
+ case LinearResampler:
+#ifdef HAVE_NEON
+ if((CPUCapFlags&CPU_CAP_NEON))
+ return Resample_<LerpTag,NEONTag>;
+#endif
+#ifdef HAVE_SSE4_1
+ if((CPUCapFlags&CPU_CAP_SSE4_1))
+ return Resample_<LerpTag,SSE4Tag>;
+#endif
+#ifdef HAVE_SSE2
+ if((CPUCapFlags&CPU_CAP_SSE2))
+ return Resample_<LerpTag,SSE2Tag>;
+#endif
+ return Resample_<LerpTag,CTag>;
+ case FIR4Resampler:
+ return Resample_<CubicTag,CTag>;
+ case BSinc12Resampler:
+ case BSinc24Resampler:
+#ifdef HAVE_NEON
+ if((CPUCapFlags&CPU_CAP_NEON))
+ return Resample_<BSincTag,NEONTag>;
+#endif
+#ifdef HAVE_SSE
+ if((CPUCapFlags&CPU_CAP_SSE))
+ return Resample_<BSincTag,SSETag>;
+#endif
+ return Resample_<BSincTag,CTag>;
+ }
+
+ return Resample_<PointTag,CTag>;
+}
+
+
+void aluInitMixer()
+{
+ if(auto resopt = ConfigValueStr(nullptr, nullptr, "resampler"))
+ {
+ const char *str{resopt->c_str()};
+ if(strcasecmp(str, "point") == 0 || strcasecmp(str, "none") == 0)
+ ResamplerDefault = PointResampler;
+ else if(strcasecmp(str, "linear") == 0)
+ ResamplerDefault = LinearResampler;
+ else if(strcasecmp(str, "cubic") == 0)
+ ResamplerDefault = FIR4Resampler;
+ else if(strcasecmp(str, "bsinc12") == 0)
+ ResamplerDefault = BSinc12Resampler;
+ else if(strcasecmp(str, "bsinc24") == 0)
+ ResamplerDefault = BSinc24Resampler;
+ else if(strcasecmp(str, "bsinc") == 0)
+ {
+ WARN("Resampler option \"%s\" is deprecated, using bsinc12\n", str);
+ ResamplerDefault = BSinc12Resampler;
+ }
+ else if(strcasecmp(str, "sinc4") == 0 || strcasecmp(str, "sinc8") == 0)
+ {
+ WARN("Resampler option \"%s\" is deprecated, using cubic\n", str);
+ ResamplerDefault = FIR4Resampler;
+ }
+ else
+ {
+ char *end;
+ long n = strtol(str, &end, 0);
+ if(*end == '\0' && (n == PointResampler || n == LinearResampler || n == FIR4Resampler))
+ ResamplerDefault = static_cast<Resampler>(n);
+ else
+ WARN("Invalid resampler: %s\n", str);
+ }
+ }
+
+ MixHrtfBlendSamples = SelectHrtfBlendMixer();
+ MixHrtfSamples = SelectHrtfMixer();
+ MixSamples = SelectMixer();
+ MixRowSamples = SelectRowMixer();
+}
+
+
+namespace {
+
+/* A quick'n'dirty lookup table to decode a muLaw-encoded byte sample into a
+ * signed 16-bit sample */
+constexpr ALshort muLawDecompressionTable[256] = {
+ -32124,-31100,-30076,-29052,-28028,-27004,-25980,-24956,
+ -23932,-22908,-21884,-20860,-19836,-18812,-17788,-16764,
+ -15996,-15484,-14972,-14460,-13948,-13436,-12924,-12412,
+ -11900,-11388,-10876,-10364, -9852, -9340, -8828, -8316,
+ -7932, -7676, -7420, -7164, -6908, -6652, -6396, -6140,
+ -5884, -5628, -5372, -5116, -4860, -4604, -4348, -4092,
+ -3900, -3772, -3644, -3516, -3388, -3260, -3132, -3004,
+ -2876, -2748, -2620, -2492, -2364, -2236, -2108, -1980,
+ -1884, -1820, -1756, -1692, -1628, -1564, -1500, -1436,
+ -1372, -1308, -1244, -1180, -1116, -1052, -988, -924,
+ -876, -844, -812, -780, -748, -716, -684, -652,
+ -620, -588, -556, -524, -492, -460, -428, -396,
+ -372, -356, -340, -324, -308, -292, -276, -260,
+ -244, -228, -212, -196, -180, -164, -148, -132,
+ -120, -112, -104, -96, -88, -80, -72, -64,
+ -56, -48, -40, -32, -24, -16, -8, 0,
+ 32124, 31100, 30076, 29052, 28028, 27004, 25980, 24956,
+ 23932, 22908, 21884, 20860, 19836, 18812, 17788, 16764,
+ 15996, 15484, 14972, 14460, 13948, 13436, 12924, 12412,
+ 11900, 11388, 10876, 10364, 9852, 9340, 8828, 8316,
+ 7932, 7676, 7420, 7164, 6908, 6652, 6396, 6140,
+ 5884, 5628, 5372, 5116, 4860, 4604, 4348, 4092,
+ 3900, 3772, 3644, 3516, 3388, 3260, 3132, 3004,
+ 2876, 2748, 2620, 2492, 2364, 2236, 2108, 1980,
+ 1884, 1820, 1756, 1692, 1628, 1564, 1500, 1436,
+ 1372, 1308, 1244, 1180, 1116, 1052, 988, 924,
+ 876, 844, 812, 780, 748, 716, 684, 652,
+ 620, 588, 556, 524, 492, 460, 428, 396,
+ 372, 356, 340, 324, 308, 292, 276, 260,
+ 244, 228, 212, 196, 180, 164, 148, 132,
+ 120, 112, 104, 96, 88, 80, 72, 64,
+ 56, 48, 40, 32, 24, 16, 8, 0
+};
+
+/* A quick'n'dirty lookup table to decode an aLaw-encoded byte sample into a
+ * signed 16-bit sample */
+constexpr ALshort aLawDecompressionTable[256] = {
+ -5504, -5248, -6016, -5760, -4480, -4224, -4992, -4736,
+ -7552, -7296, -8064, -7808, -6528, -6272, -7040, -6784,
+ -2752, -2624, -3008, -2880, -2240, -2112, -2496, -2368,
+ -3776, -3648, -4032, -3904, -3264, -3136, -3520, -3392,
+ -22016,-20992,-24064,-23040,-17920,-16896,-19968,-18944,
+ -30208,-29184,-32256,-31232,-26112,-25088,-28160,-27136,
+ -11008,-10496,-12032,-11520, -8960, -8448, -9984, -9472,
+ -15104,-14592,-16128,-15616,-13056,-12544,-14080,-13568,
+ -344, -328, -376, -360, -280, -264, -312, -296,
+ -472, -456, -504, -488, -408, -392, -440, -424,
+ -88, -72, -120, -104, -24, -8, -56, -40,
+ -216, -200, -248, -232, -152, -136, -184, -168,
+ -1376, -1312, -1504, -1440, -1120, -1056, -1248, -1184,
+ -1888, -1824, -2016, -1952, -1632, -1568, -1760, -1696,
+ -688, -656, -752, -720, -560, -528, -624, -592,
+ -944, -912, -1008, -976, -816, -784, -880, -848,
+ 5504, 5248, 6016, 5760, 4480, 4224, 4992, 4736,
+ 7552, 7296, 8064, 7808, 6528, 6272, 7040, 6784,
+ 2752, 2624, 3008, 2880, 2240, 2112, 2496, 2368,
+ 3776, 3648, 4032, 3904, 3264, 3136, 3520, 3392,
+ 22016, 20992, 24064, 23040, 17920, 16896, 19968, 18944,
+ 30208, 29184, 32256, 31232, 26112, 25088, 28160, 27136,
+ 11008, 10496, 12032, 11520, 8960, 8448, 9984, 9472,
+ 15104, 14592, 16128, 15616, 13056, 12544, 14080, 13568,
+ 344, 328, 376, 360, 280, 264, 312, 296,
+ 472, 456, 504, 488, 408, 392, 440, 424,
+ 88, 72, 120, 104, 24, 8, 56, 40,
+ 216, 200, 248, 232, 152, 136, 184, 168,
+ 1376, 1312, 1504, 1440, 1120, 1056, 1248, 1184,
+ 1888, 1824, 2016, 1952, 1632, 1568, 1760, 1696,
+ 688, 656, 752, 720, 560, 528, 624, 592,
+ 944, 912, 1008, 976, 816, 784, 880, 848
+};
+
+
+void SendSourceStoppedEvent(ALCcontext *context, ALuint id)
+{
+ ALbitfieldSOFT enabledevt{context->EnabledEvts.load(std::memory_order_acquire)};
+ if(!(enabledevt&EventType_SourceStateChange)) return;
+
+ RingBuffer *ring{context->AsyncEvents.get()};
+ auto evt_vec = ring->getWriteVector();
+ if(evt_vec.first.len < 1) return;
+
+ AsyncEvent *evt{new (evt_vec.first.buf) AsyncEvent{EventType_SourceStateChange}};
+ evt->u.srcstate.id = id;
+ evt->u.srcstate.state = AL_STOPPED;
+
+ ring->writeAdvance(1);
+ context->EventSem.post();
+}
+
+
+const ALfloat *DoFilters(BiquadFilter *lpfilter, BiquadFilter *hpfilter, ALfloat *dst,
+ const ALfloat *src, ALsizei numsamples, int type)
+{
+ switch(type)
+ {
+ case AF_None:
+ lpfilter->clear();
+ hpfilter->clear();
+ break;
+
+ case AF_LowPass:
+ lpfilter->process(dst, src, numsamples);
+ hpfilter->clear();
+ return dst;
+ case AF_HighPass:
+ lpfilter->clear();
+ hpfilter->process(dst, src, numsamples);
+ return dst;
+
+ case AF_BandPass:
+ lpfilter->process(dst, src, numsamples);
+ hpfilter->process(dst, dst, numsamples);
+ return dst;
+ }
+ return src;
+}
+
+
+/* Base template left undefined. Should be marked =delete, but Clang 3.8.1
+ * chokes on that given the inline specializations.
+ */
+template<FmtType T>
+inline ALfloat LoadSample(typename FmtTypeTraits<T>::Type val);
+
+template<> inline ALfloat LoadSample<FmtUByte>(FmtTypeTraits<FmtUByte>::Type val)
+{ return (val-128) * (1.0f/128.0f); }
+template<> inline ALfloat LoadSample<FmtShort>(FmtTypeTraits<FmtShort>::Type val)
+{ return val * (1.0f/32768.0f); }
+template<> inline ALfloat LoadSample<FmtFloat>(FmtTypeTraits<FmtFloat>::Type val)
+{ return val; }
+template<> inline ALfloat LoadSample<FmtDouble>(FmtTypeTraits<FmtDouble>::Type val)
+{ return static_cast<ALfloat>(val); }
+template<> inline ALfloat LoadSample<FmtMulaw>(FmtTypeTraits<FmtMulaw>::Type val)
+{ return muLawDecompressionTable[val] * (1.0f/32768.0f); }
+template<> inline ALfloat LoadSample<FmtAlaw>(FmtTypeTraits<FmtAlaw>::Type val)
+{ return aLawDecompressionTable[val] * (1.0f/32768.0f); }
+
+template<FmtType T>
+inline void LoadSampleArray(ALfloat *RESTRICT dst, const al::byte *src, ALint srcstep,
+ const ptrdiff_t samples)
+{
+ using SampleType = typename FmtTypeTraits<T>::Type;
+
+ const SampleType *RESTRICT ssrc{reinterpret_cast<const SampleType*>(src)};
+ for(ALsizei i{0};i < samples;i++)
+ dst[i] += LoadSample<T>(ssrc[i*srcstep]);
+}
+
+void LoadSamples(ALfloat *RESTRICT dst, const al::byte *src, ALint srcstep, FmtType srctype,
+ const ptrdiff_t samples)
+{
+#define HANDLE_FMT(T) case T: LoadSampleArray<T>(dst, src, srcstep, samples); break
+ switch(srctype)
+ {
+ HANDLE_FMT(FmtUByte);
+ HANDLE_FMT(FmtShort);
+ HANDLE_FMT(FmtFloat);
+ HANDLE_FMT(FmtDouble);
+ HANDLE_FMT(FmtMulaw);
+ HANDLE_FMT(FmtAlaw);
+ }
+#undef HANDLE_FMT
+}
+
+ALfloat *LoadBufferStatic(ALbufferlistitem *BufferListItem, ALbufferlistitem *&BufferLoopItem,
+ const ALsizei NumChannels, const ALsizei SampleSize, const ALsizei chan, ALsizei DataPosInt,
+ al::span<ALfloat> SrcBuffer)
+{
+ /* TODO: For static sources, loop points are taken from the first buffer
+ * (should be adjusted by any buffer offset, to possibly be added later).
+ */
+ const ALbuffer *Buffer0{BufferListItem->buffers[0]};
+ const ALsizei LoopStart{Buffer0->LoopStart};
+ const ALsizei LoopEnd{Buffer0->LoopEnd};
+ ASSUME(LoopStart >= 0);
+ ASSUME(LoopEnd > LoopStart);
+
+ /* If current pos is beyond the loop range, do not loop */
+ if(!BufferLoopItem || DataPosInt >= LoopEnd)
+ {
+ BufferLoopItem = nullptr;
+
+ auto load_buffer = [DataPosInt,NumChannels,SampleSize,chan,SrcBuffer](size_t CompLen, const ALbuffer *buffer) -> size_t
+ {
+ if(DataPosInt >= buffer->SampleLen)
+ return CompLen;
+
+ /* Load what's left to play from the buffer */
+ const size_t DataSize{std::min<size_t>(SrcBuffer.size(),
+ buffer->SampleLen - DataPosInt)};
+ CompLen = std::max(CompLen, DataSize);
+
+ const al::byte *Data{buffer->mData.data()};
+ Data += (DataPosInt*NumChannels + chan)*SampleSize;
+
+ LoadSamples(SrcBuffer.data(), Data, NumChannels, buffer->mFmtType, DataSize);
+ return CompLen;
+ };
+ /* It's impossible to have a buffer list item with no entries. */
+ ASSUME(BufferListItem->num_buffers > 0);
+ auto buffers_end = BufferListItem->buffers + BufferListItem->num_buffers;
+ SrcBuffer = SrcBuffer.subspan(std::accumulate(BufferListItem->buffers, buffers_end,
+ size_t{0u}, load_buffer));
+ }
+ else
+ {
+ const al::span<ALfloat> SrcData{SrcBuffer.first(
+ std::min<size_t>(SrcBuffer.size(), LoopEnd - DataPosInt))};
+
+ auto load_buffer = [DataPosInt,NumChannels,SampleSize,chan,SrcData](size_t CompLen, const ALbuffer *buffer) -> size_t
+ {
+ if(DataPosInt >= buffer->SampleLen)
+ return CompLen;
+
+ /* Load what's left of this loop iteration */
+ const size_t DataSize{std::min<size_t>(SrcData.size(),
+ buffer->SampleLen - DataPosInt)};
+ CompLen = std::max(CompLen, DataSize);
+
+ const al::byte *Data{buffer->mData.data()};
+ Data += (DataPosInt*NumChannels + chan)*SampleSize;
+
+ LoadSamples(SrcData.data(), Data, NumChannels, buffer->mFmtType, DataSize);
+ return CompLen;
+ };
+ ASSUME(BufferListItem->num_buffers > 0);
+ auto buffers_end = BufferListItem->buffers + BufferListItem->num_buffers;
+ SrcBuffer = SrcBuffer.subspan(std::accumulate(BufferListItem->buffers, buffers_end,
+ size_t{0u}, load_buffer));
+
+ const auto LoopSize = static_cast<size_t>(LoopEnd - LoopStart);
+ while(!SrcBuffer.empty())
+ {
+ const al::span<ALfloat> SrcData{SrcBuffer.first(
+ std::min<size_t>(SrcBuffer.size(), LoopSize))};
+
+ auto load_buffer_loop = [LoopStart,NumChannels,SampleSize,chan,SrcData](size_t CompLen, const ALbuffer *buffer) -> size_t
+ {
+ if(LoopStart >= buffer->SampleLen)
+ return CompLen;
+
+ const size_t DataSize{std::min<size_t>(SrcData.size(),
+ buffer->SampleLen-LoopStart)};
+ CompLen = std::max(CompLen, DataSize);
+
+ const al::byte *Data{buffer->mData.data()};
+ Data += (LoopStart*NumChannels + chan)*SampleSize;
+
+ LoadSamples(SrcData.data(), Data, NumChannels, buffer->mFmtType, DataSize);
+ return CompLen;
+ };
+ SrcBuffer = SrcBuffer.subspan(std::accumulate(BufferListItem->buffers, buffers_end,
+ size_t{0u}, load_buffer_loop));
+ }
+ }
+ return SrcBuffer.begin();
+}
+
+ALfloat *LoadBufferQueue(ALbufferlistitem *BufferListItem, ALbufferlistitem *BufferLoopItem,
+ const ALsizei NumChannels, const ALsizei SampleSize, const ALsizei chan, ALsizei DataPosInt,
+ al::span<ALfloat> SrcBuffer)
+{
+ /* Crawl the buffer queue to fill in the temp buffer */
+ while(BufferListItem && !SrcBuffer.empty())
+ {
+ if(DataPosInt >= BufferListItem->max_samples)
+ {
+ DataPosInt -= BufferListItem->max_samples;
+ BufferListItem = BufferListItem->next.load(std::memory_order_acquire);
+ if(!BufferListItem) BufferListItem = BufferLoopItem;
+ continue;
+ }
+
+ auto load_buffer = [DataPosInt,NumChannels,SampleSize,chan,SrcBuffer](size_t CompLen, const ALbuffer *buffer) -> size_t
+ {
+ if(!buffer) return CompLen;
+ if(DataPosInt >= buffer->SampleLen)
+ return CompLen;
+
+ const size_t DataSize{std::min<size_t>(SrcBuffer.size(), buffer->SampleLen-DataPosInt)};
+ CompLen = std::max(CompLen, DataSize);
+
+ const al::byte *Data{buffer->mData.data()};
+ Data += (DataPosInt*NumChannels + chan)*SampleSize;
+
+ LoadSamples(SrcBuffer.data(), Data, NumChannels, buffer->mFmtType, DataSize);
+ return CompLen;
+ };
+ ASSUME(BufferListItem->num_buffers > 0);
+ auto buffers_end = BufferListItem->buffers + BufferListItem->num_buffers;
+ SrcBuffer = SrcBuffer.subspan(std::accumulate(BufferListItem->buffers, buffers_end,
+ size_t{0u}, load_buffer));
+
+ if(SrcBuffer.empty())
+ break;
+ DataPosInt = 0;
+ BufferListItem = BufferListItem->next.load(std::memory_order_acquire);
+ if(!BufferListItem) BufferListItem = BufferLoopItem;
+ }
+
+ return SrcBuffer.begin();
+}
+
+} // namespace
+
+void MixVoice(ALvoice *voice, ALvoice::State vstate, const ALuint SourceID, ALCcontext *Context, const ALsizei SamplesToDo)
+{
+ static constexpr ALfloat SilentTarget[MAX_OUTPUT_CHANNELS]{};
+
+ ASSUME(SamplesToDo > 0);
+
+ /* Get voice info */
+ const bool isstatic{(voice->mFlags&VOICE_IS_STATIC) != 0};
+ ALsizei DataPosInt{static_cast<ALsizei>(voice->mPosition.load(std::memory_order_relaxed))};
+ ALsizei DataPosFrac{voice->mPositionFrac.load(std::memory_order_relaxed)};
+ ALbufferlistitem *BufferListItem{voice->mCurrentBuffer.load(std::memory_order_relaxed)};
+ ALbufferlistitem *BufferLoopItem{voice->mLoopBuffer.load(std::memory_order_relaxed)};
+ const ALsizei NumChannels{voice->mNumChannels};
+ const ALsizei SampleSize{voice->mSampleSize};
+ const ALint increment{voice->mStep};
+
+ ASSUME(DataPosInt >= 0);
+ ASSUME(DataPosFrac >= 0);
+ ASSUME(NumChannels > 0);
+ ASSUME(SampleSize > 0);
+ ASSUME(increment > 0);
+
+ ALCdevice *Device{Context->Device};
+ const ALsizei NumSends{Device->NumAuxSends};
+ const ALsizei IrSize{Device->mHrtf ? Device->mHrtf->irSize : 0};
+
+ ASSUME(NumSends >= 0);
+ ASSUME(IrSize >= 0);
+
+ ResamplerFunc Resample{(increment == FRACTIONONE && DataPosFrac == 0) ?
+ Resample_<CopyTag,CTag> : voice->mResampler};
+
+ ALsizei Counter{(voice->mFlags&VOICE_IS_FADING) ? SamplesToDo : 0};
+ if(!Counter)
+ {
+ /* No fading, just overwrite the old/current params. */
+ for(ALsizei chan{0};chan < NumChannels;chan++)
+ {
+ ALvoice::ChannelData &chandata = voice->mChans[chan];
+ DirectParams &parms = chandata.mDryParams;
+ if(!(voice->mFlags&VOICE_HAS_HRTF))
+ std::copy(std::begin(parms.Gains.Target), std::end(parms.Gains.Target),
+ std::begin(parms.Gains.Current));
+ else
+ parms.Hrtf.Old = parms.Hrtf.Target;
+ for(ALsizei send{0};send < NumSends;++send)
+ {
+ if(voice->mSend[send].Buffer.empty())
+ continue;
+
+ SendParams &parms = chandata.mWetParams[send];
+ std::copy(std::begin(parms.Gains.Target), std::end(parms.Gains.Target),
+ std::begin(parms.Gains.Current));
+ }
+ }
+ }
+ else if((voice->mFlags&VOICE_HAS_HRTF))
+ {
+ for(ALsizei chan{0};chan < NumChannels;chan++)
+ {
+ DirectParams &parms = voice->mChans[chan].mDryParams;
+ if(!(parms.Hrtf.Old.Gain > GAIN_SILENCE_THRESHOLD))
+ {
+ /* The old HRTF params are silent, so overwrite the old
+ * coefficients with the new, and reset the old gain to 0. The
+ * future mix will then fade from silence.
+ */
+ parms.Hrtf.Old = parms.Hrtf.Target;
+ parms.Hrtf.Old.Gain = 0.0f;
+ }
+ }
+ }
+
+ ALsizei buffers_done{0};
+ ALsizei OutPos{0};
+ do {
+ /* Figure out how many buffer samples will be needed */
+ ALsizei DstBufferSize{SamplesToDo - OutPos};
+
+ /* Calculate the last written dst sample pos. */
+ int64_t DataSize64{DstBufferSize - 1};
+ /* Calculate the last read src sample pos. */
+ DataSize64 = (DataSize64*increment + DataPosFrac) >> FRACTIONBITS;
+ /* +1 to get the src sample count, include padding. */
+ DataSize64 += 1 + MAX_RESAMPLE_PADDING*2;
+
+ auto SrcBufferSize = static_cast<ALuint>(
+ mini64(DataSize64, BUFFERSIZE + MAX_RESAMPLE_PADDING*2 + 1));
+ if(SrcBufferSize > BUFFERSIZE + MAX_RESAMPLE_PADDING*2)
+ {
+ SrcBufferSize = BUFFERSIZE + MAX_RESAMPLE_PADDING*2;
+ /* If the source buffer got saturated, we can't fill the desired
+ * dst size. Figure out how many samples we can actually mix from
+ * this.
+ */
+ DataSize64 = SrcBufferSize - MAX_RESAMPLE_PADDING*2;
+ DataSize64 = ((DataSize64<<FRACTIONBITS) - DataPosFrac + increment-1) / increment;
+ DstBufferSize = static_cast<ALsizei>(mini64(DataSize64, DstBufferSize));
+
+ /* Some mixers like having a multiple of 4, so try to give that
+ * unless this is the last update.
+ */
+ if(DstBufferSize < SamplesToDo-OutPos)
+ DstBufferSize &= ~3;
+ }
+
+ for(ALsizei chan{0};chan < NumChannels;chan++)
+ {
+ ALvoice::ChannelData &chandata = voice->mChans[chan];
+ const al::span<ALfloat> SrcData{Device->SourceData, SrcBufferSize};
+
+ /* Load the previous samples into the source data first, and clear the rest. */
+ auto srciter = std::copy_n(chandata.mPrevSamples.begin(), MAX_RESAMPLE_PADDING,
+ SrcData.begin());
+ std::fill(srciter, SrcData.end(), 0.0f);
+
+ if(UNLIKELY(!BufferListItem))
+ srciter = std::copy(chandata.mPrevSamples.begin()+MAX_RESAMPLE_PADDING,
+ chandata.mPrevSamples.end(), srciter);
+ else if(isstatic)
+ srciter = LoadBufferStatic(BufferListItem, BufferLoopItem, NumChannels,
+ SampleSize, chan, DataPosInt, {srciter, SrcData.end()});
+ else
+ srciter = LoadBufferQueue(BufferListItem, BufferLoopItem, NumChannels,
+ SampleSize, chan, DataPosInt, {srciter, SrcData.end()});
+
+ if(UNLIKELY(srciter != SrcData.end()))
+ {
+ /* If the source buffer wasn't filled, copy the last sample for
+ * the remaining buffer. Ideally it should have ended with
+ * silence, but if not the gain fading should help avoid clicks
+ * from sudden amplitude changes.
+ */
+ const ALfloat sample{*(srciter-1)};
+ std::fill(srciter, SrcData.end(), sample);
+ }
+
+ /* Store the last source samples used for next time. */
+ std::copy_n(&SrcData[(increment*DstBufferSize + DataPosFrac)>>FRACTIONBITS],
+ chandata.mPrevSamples.size(), chandata.mPrevSamples.begin());
+
+ /* Resample, then apply ambisonic upsampling as needed. */
+ const ALfloat *ResampledData{Resample(&voice->mResampleState,
+ &SrcData[MAX_RESAMPLE_PADDING], DataPosFrac, increment,
+ Device->ResampledData, DstBufferSize)};
+ if((voice->mFlags&VOICE_IS_AMBISONIC))
+ {
+ const ALfloat hfscale{chandata.mAmbiScale};
+ /* Beware the evil const_cast. It's safe since it's pointing to
+ * either SourceData or ResampledData (both non-const), but the
+ * resample method takes the source as const float* and may
+ * return it without copying to output, making it currently
+ * unavoidable.
+ */
+ chandata.mAmbiSplitter.applyHfScale(const_cast<ALfloat*>(ResampledData), hfscale,
+ DstBufferSize);
+ }
+
+ /* Now filter and mix to the appropriate outputs. */
+ {
+ DirectParams &parms = chandata.mDryParams;
+ const ALfloat *samples{DoFilters(&parms.LowPass, &parms.HighPass,
+ Device->FilteredData, ResampledData, DstBufferSize,
+ voice->mDirect.FilterType)};
+
+ if((voice->mFlags&VOICE_HAS_HRTF))
+ {
+ const int OutLIdx{GetChannelIdxByName(Device->RealOut, FrontLeft)};
+ const int OutRIdx{GetChannelIdxByName(Device->RealOut, FrontRight)};
+ ASSUME(OutLIdx >= 0 && OutRIdx >= 0);
+
+ auto &HrtfSamples = Device->HrtfSourceData;
+ auto &AccumSamples = Device->HrtfAccumData;
+ const ALfloat TargetGain{UNLIKELY(vstate == ALvoice::Stopping) ? 0.0f :
+ parms.Hrtf.Target.Gain};
+ ALsizei fademix{0};
+
+ /* Copy the HRTF history and new input samples into a temp
+ * buffer.
+ */
+ auto src_iter = std::copy(parms.Hrtf.State.History.begin(),
+ parms.Hrtf.State.History.end(), std::begin(HrtfSamples));
+ std::copy_n(samples, DstBufferSize, src_iter);
+ /* Copy the last used samples back into the history buffer
+ * for later.
+ */
+ std::copy_n(std::begin(HrtfSamples) + DstBufferSize,
+ parms.Hrtf.State.History.size(), parms.Hrtf.State.History.begin());
+
+ /* Copy the current filtered values being accumulated into
+ * the temp buffer.
+ */
+ auto accum_iter = std::copy_n(parms.Hrtf.State.Values.begin(),
+ parms.Hrtf.State.Values.size(), std::begin(AccumSamples));
+
+ /* Clear the accumulation buffer that will start getting
+ * filled in.
+ */
+ std::fill_n(accum_iter, DstBufferSize, float2{});
+
+ /* If fading, the old gain is not silence, and this is the
+ * first mixing pass, fade between the IRs.
+ */
+ if(Counter && (parms.Hrtf.Old.Gain > GAIN_SILENCE_THRESHOLD) && OutPos == 0)
+ {
+ fademix = mini(DstBufferSize, 128);
+
+ ALfloat gain{TargetGain};
+
+ /* The new coefficients need to fade in completely
+ * since they're replacing the old ones. To keep the
+ * gain fading consistent, interpolate between the old
+ * and new target gains given how much of the fade time
+ * this mix handles.
+ */
+ if(LIKELY(Counter > fademix))
+ {
+ const ALfloat a{static_cast<ALfloat>(fademix) /
+ static_cast<ALfloat>(Counter)};
+ gain = lerp(parms.Hrtf.Old.Gain, TargetGain, a);
+ }
+ MixHrtfFilter hrtfparams;
+ hrtfparams.Coeffs = &parms.Hrtf.Target.Coeffs;
+ hrtfparams.Delay[0] = parms.Hrtf.Target.Delay[0];
+ hrtfparams.Delay[1] = parms.Hrtf.Target.Delay[1];
+ hrtfparams.Gain = 0.0f;
+ hrtfparams.GainStep = gain / static_cast<ALfloat>(fademix);
+
+ MixHrtfBlendSamples(voice->mDirect.Buffer[OutLIdx],
+ voice->mDirect.Buffer[OutRIdx], HrtfSamples, AccumSamples, OutPos,
+ IrSize, &parms.Hrtf.Old, &hrtfparams, fademix);
+ /* Update the old parameters with the result. */
+ parms.Hrtf.Old = parms.Hrtf.Target;
+ if(fademix < Counter)
+ parms.Hrtf.Old.Gain = hrtfparams.Gain;
+ else
+ parms.Hrtf.Old.Gain = TargetGain;
+ }
+
+ if(LIKELY(fademix < DstBufferSize))
+ {
+ const ALsizei todo{DstBufferSize - fademix};
+ ALfloat gain{TargetGain};
+
+ /* Interpolate the target gain if the gain fading lasts
+ * longer than this mix.
+ */
+ if(Counter > DstBufferSize)
+ {
+ const ALfloat a{static_cast<ALfloat>(todo) /
+ static_cast<ALfloat>(Counter-fademix)};
+ gain = lerp(parms.Hrtf.Old.Gain, TargetGain, a);
+ }
+
+ MixHrtfFilter hrtfparams;
+ hrtfparams.Coeffs = &parms.Hrtf.Target.Coeffs;
+ hrtfparams.Delay[0] = parms.Hrtf.Target.Delay[0];
+ hrtfparams.Delay[1] = parms.Hrtf.Target.Delay[1];
+ hrtfparams.Gain = parms.Hrtf.Old.Gain;
+ hrtfparams.GainStep = (gain - parms.Hrtf.Old.Gain) /
+ static_cast<ALfloat>(todo);
+ MixHrtfSamples(voice->mDirect.Buffer[OutLIdx],
+ voice->mDirect.Buffer[OutRIdx], HrtfSamples+fademix,
+ AccumSamples+fademix, OutPos+fademix, IrSize, &hrtfparams, todo);
+ /* Store the interpolated gain or the final target gain
+ * depending if the fade is done.
+ */
+ if(DstBufferSize < Counter)
+ parms.Hrtf.Old.Gain = gain;
+ else
+ parms.Hrtf.Old.Gain = TargetGain;
+ }
+
+ /* Copy the new in-progress accumulation values back for
+ * the next mix.
+ */
+ std::copy_n(std::begin(AccumSamples) + DstBufferSize,
+ parms.Hrtf.State.Values.size(), parms.Hrtf.State.Values.begin());
+ }
+ else if((voice->mFlags&VOICE_HAS_NFC))
+ {
+ const ALfloat *TargetGains{UNLIKELY(vstate == ALvoice::Stopping) ?
+ SilentTarget : parms.Gains.Target};
+
+ const size_t outcount{Device->NumChannelsPerOrder[0]};
+ MixSamples(samples, voice->mDirect.Buffer.first(outcount), parms.Gains.Current,
+ TargetGains, Counter, OutPos, DstBufferSize);
+
+ ALfloat (&nfcsamples)[BUFFERSIZE] = Device->NfcSampleData;
+ size_t chanoffset{outcount};
+ using FilterProc = void (NfcFilter::*)(float*,const float*,int);
+ auto apply_nfc = [voice,&parms,samples,TargetGains,DstBufferSize,Counter,OutPos,&chanoffset,&nfcsamples](const FilterProc process, const size_t outcount) -> void
+ {
+ if(outcount < 1) return;
+ (parms.NFCtrlFilter.*process)(nfcsamples, samples, DstBufferSize);
+ MixSamples(nfcsamples, voice->mDirect.Buffer.subspan(chanoffset, outcount),
+ parms.Gains.Current+chanoffset, TargetGains+chanoffset, Counter,
+ OutPos, DstBufferSize);
+ chanoffset += outcount;
+ };
+ apply_nfc(&NfcFilter::process1, Device->NumChannelsPerOrder[1]);
+ apply_nfc(&NfcFilter::process2, Device->NumChannelsPerOrder[2]);
+ apply_nfc(&NfcFilter::process3, Device->NumChannelsPerOrder[3]);
+ }
+ else
+ {
+ const ALfloat *TargetGains{UNLIKELY(vstate == ALvoice::Stopping) ?
+ SilentTarget : parms.Gains.Target};
+ MixSamples(samples, voice->mDirect.Buffer, parms.Gains.Current, TargetGains,
+ Counter, OutPos, DstBufferSize);
+ }
+ }
+
+ ALfloat (&FilterBuf)[BUFFERSIZE] = Device->FilteredData;
+ for(ALsizei send{0};send < NumSends;++send)
+ {
+ if(voice->mSend[send].Buffer.empty())
+ continue;
+
+ SendParams &parms = chandata.mWetParams[send];
+ const ALfloat *samples{DoFilters(&parms.LowPass, &parms.HighPass,
+ FilterBuf, ResampledData, DstBufferSize, voice->mSend[send].FilterType)};
+
+ const ALfloat *TargetGains{UNLIKELY(vstate==ALvoice::Stopping) ? SilentTarget :
+ parms.Gains.Target};
+ MixSamples(samples, voice->mSend[send].Buffer, parms.Gains.Current, TargetGains,
+ Counter, OutPos, DstBufferSize);
+ };
+ }
+ /* Update positions */
+ DataPosFrac += increment*DstBufferSize;
+ DataPosInt += DataPosFrac>>FRACTIONBITS;
+ DataPosFrac &= FRACTIONMASK;
+
+ OutPos += DstBufferSize;
+ Counter = maxi(DstBufferSize, Counter) - DstBufferSize;
+
+ if(UNLIKELY(!BufferListItem))
+ {
+ /* Do nothing extra when there's no buffers. */
+ }
+ else if(isstatic)
+ {
+ if(BufferLoopItem)
+ {
+ /* Handle looping static source */
+ const ALbuffer *Buffer{BufferListItem->buffers[0]};
+ const ALsizei LoopStart{Buffer->LoopStart};
+ const ALsizei LoopEnd{Buffer->LoopEnd};
+ if(DataPosInt >= LoopEnd)
+ {
+ assert(LoopEnd > LoopStart);
+ DataPosInt = ((DataPosInt-LoopStart)%(LoopEnd-LoopStart)) + LoopStart;
+ }
+ }
+ else
+ {
+ /* Handle non-looping static source */
+ if(DataPosInt >= BufferListItem->max_samples)
+ {
+ if(LIKELY(vstate == ALvoice::Playing))
+ vstate = ALvoice::Stopped;
+ BufferListItem = nullptr;
+ break;
+ }
+ }
+ }
+ else while(1)
+ {
+ /* Handle streaming source */
+ if(BufferListItem->max_samples > DataPosInt)
+ break;
+
+ DataPosInt -= BufferListItem->max_samples;
+
+ buffers_done += BufferListItem->num_buffers;
+ BufferListItem = BufferListItem->next.load(std::memory_order_relaxed);
+ if(!BufferListItem && !(BufferListItem=BufferLoopItem))
+ {
+ if(LIKELY(vstate == ALvoice::Playing))
+ vstate = ALvoice::Stopped;
+ break;
+ }
+ }
+ } while(OutPos < SamplesToDo);
+
+ voice->mFlags |= VOICE_IS_FADING;
+
+ /* Don't update positions and buffers if we were stopping. */
+ if(UNLIKELY(vstate == ALvoice::Stopping))
+ {
+ voice->mPlayState.store(ALvoice::Stopped, std::memory_order_release);
+ return;
+ }
+
+ /* Update voice info */
+ voice->mPosition.store(DataPosInt, std::memory_order_relaxed);
+ voice->mPositionFrac.store(DataPosFrac, std::memory_order_relaxed);
+ voice->mCurrentBuffer.store(BufferListItem, std::memory_order_relaxed);
+ if(vstate == ALvoice::Stopped)
+ {
+ voice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
+ voice->mSourceID.store(0u, std::memory_order_relaxed);
+ }
+ std::atomic_thread_fence(std::memory_order_release);
+
+ /* Send any events now, after the position/buffer info was updated. */
+ ALbitfieldSOFT enabledevt{Context->EnabledEvts.load(std::memory_order_acquire)};
+ if(buffers_done > 0 && (enabledevt&EventType_BufferCompleted))
+ {
+ RingBuffer *ring{Context->AsyncEvents.get()};
+ auto evt_vec = ring->getWriteVector();
+ if(evt_vec.first.len > 0)
+ {
+ AsyncEvent *evt{new (evt_vec.first.buf) AsyncEvent{EventType_BufferCompleted}};
+ evt->u.bufcomp.id = SourceID;
+ evt->u.bufcomp.count = buffers_done;
+ ring->writeAdvance(1);
+ Context->EventSem.post();
+ }
+ }
+
+ if(vstate == ALvoice::Stopped)
+ {
+ /* If the voice just ended, set it to Stopping so the next render
+ * ensures any residual noise fades to 0 amplitude.
+ */
+ voice->mPlayState.store(ALvoice::Stopping, std::memory_order_release);
+ SendSourceStoppedEvent(Context, SourceID);
+ }
+}
diff --git a/alc/panning.cpp b/alc/panning.cpp
new file mode 100644
index 00000000..3a67e33a
--- /dev/null
+++ b/alc/panning.cpp
@@ -0,0 +1,964 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2010 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cmath>
+#include <cstdlib>
+#include <cstring>
+#include <cctype>
+#include <cassert>
+
+#include <cmath>
+#include <chrono>
+#include <numeric>
+#include <algorithm>
+#include <functional>
+
+#include "alcmain.h"
+#include "alAuxEffectSlot.h"
+#include "alu.h"
+#include "alconfig.h"
+#include "ambdec.h"
+#include "bformatdec.h"
+#include "filters/splitter.h"
+#include "uhjfilter.h"
+#include "bs2b.h"
+
+#include "alspan.h"
+
+
+constexpr std::array<float,MAX_AMBI_CHANNELS> AmbiScale::FromN3D;
+constexpr std::array<float,MAX_AMBI_CHANNELS> AmbiScale::FromSN3D;
+constexpr std::array<float,MAX_AMBI_CHANNELS> AmbiScale::FromFuMa;
+constexpr std::array<int,MAX_AMBI_CHANNELS> AmbiIndex::FromFuMa;
+constexpr std::array<int,MAX_AMBI_CHANNELS> AmbiIndex::FromACN;
+constexpr std::array<int,MAX_AMBI2D_CHANNELS> AmbiIndex::From2D;
+constexpr std::array<int,MAX_AMBI_CHANNELS> AmbiIndex::From3D;
+
+
+namespace {
+
+using namespace std::placeholders;
+using std::chrono::seconds;
+using std::chrono::nanoseconds;
+
+inline const char *GetLabelFromChannel(Channel channel)
+{
+ switch(channel)
+ {
+ case FrontLeft: return "front-left";
+ case FrontRight: return "front-right";
+ case FrontCenter: return "front-center";
+ case LFE: return "lfe";
+ case BackLeft: return "back-left";
+ case BackRight: return "back-right";
+ case BackCenter: return "back-center";
+ case SideLeft: return "side-left";
+ case SideRight: return "side-right";
+
+ case UpperFrontLeft: return "upper-front-left";
+ case UpperFrontRight: return "upper-front-right";
+ case UpperBackLeft: return "upper-back-left";
+ case UpperBackRight: return "upper-back-right";
+ case LowerFrontLeft: return "lower-front-left";
+ case LowerFrontRight: return "lower-front-right";
+ case LowerBackLeft: return "lower-back-left";
+ case LowerBackRight: return "lower-back-right";
+
+ case Aux0: return "aux-0";
+ case Aux1: return "aux-1";
+ case Aux2: return "aux-2";
+ case Aux3: return "aux-3";
+ case Aux4: return "aux-4";
+ case Aux5: return "aux-5";
+ case Aux6: return "aux-6";
+ case Aux7: return "aux-7";
+ case Aux8: return "aux-8";
+ case Aux9: return "aux-9";
+ case Aux10: return "aux-10";
+ case Aux11: return "aux-11";
+ case Aux12: return "aux-12";
+ case Aux13: return "aux-13";
+ case Aux14: return "aux-14";
+ case Aux15: return "aux-15";
+
+ case MaxChannels: break;
+ }
+ return "(unknown)";
+}
+
+
+void AllocChannels(ALCdevice *device, const ALuint main_chans, const ALuint real_chans)
+{
+ TRACE("Channel config, Main: %u, Real: %u\n", main_chans, real_chans);
+
+ /* Allocate extra channels for any post-filter output. */
+ const ALuint num_chans{main_chans + real_chans};
+
+ TRACE("Allocating %u channels, %zu bytes\n", num_chans,
+ num_chans*sizeof(device->MixBuffer[0]));
+ device->MixBuffer.resize(num_chans);
+ al::span<FloatBufferLine> buffer{device->MixBuffer.data(), device->MixBuffer.size()};
+
+ device->Dry.Buffer = buffer.first(main_chans);
+ buffer = buffer.subspan(main_chans);
+ if(real_chans != 0)
+ {
+ device->RealOut.Buffer = buffer.first(real_chans);
+ buffer = buffer.subspan(real_chans);
+ }
+ else
+ device->RealOut.Buffer = device->Dry.Buffer;
+}
+
+
+struct ChannelMap {
+ Channel ChanName;
+ ALfloat Config[MAX_AMBI2D_CHANNELS];
+};
+
+bool MakeSpeakerMap(ALCdevice *device, const AmbDecConf *conf, ALsizei (&speakermap)[MAX_OUTPUT_CHANNELS])
+{
+ auto map_spkr = [device](const AmbDecConf::SpeakerConf &speaker) -> ALsizei
+ {
+ /* NOTE: AmbDec does not define any standard speaker names, however
+ * for this to work we have to by able to find the output channel
+ * the speaker definition corresponds to. Therefore, OpenAL Soft
+ * requires these channel labels to be recognized:
+ *
+ * LF = Front left
+ * RF = Front right
+ * LS = Side left
+ * RS = Side right
+ * LB = Back left
+ * RB = Back right
+ * CE = Front center
+ * CB = Back center
+ *
+ * Additionally, surround51 will acknowledge back speakers for side
+ * channels, and surround51rear will acknowledge side speakers for
+ * back channels, to avoid issues with an ambdec expecting 5.1 to
+ * use the side channels when the device is configured for back,
+ * and vice-versa.
+ */
+ Channel ch{};
+ if(speaker.Name == "LF")
+ ch = FrontLeft;
+ else if(speaker.Name == "RF")
+ ch = FrontRight;
+ else if(speaker.Name == "CE")
+ ch = FrontCenter;
+ else if(speaker.Name == "LS")
+ {
+ if(device->FmtChans == DevFmtX51Rear)
+ ch = BackLeft;
+ else
+ ch = SideLeft;
+ }
+ else if(speaker.Name == "RS")
+ {
+ if(device->FmtChans == DevFmtX51Rear)
+ ch = BackRight;
+ else
+ ch = SideRight;
+ }
+ else if(speaker.Name == "LB")
+ {
+ if(device->FmtChans == DevFmtX51)
+ ch = SideLeft;
+ else
+ ch = BackLeft;
+ }
+ else if(speaker.Name == "RB")
+ {
+ if(device->FmtChans == DevFmtX51)
+ ch = SideRight;
+ else
+ ch = BackRight;
+ }
+ else if(speaker.Name == "CB")
+ ch = BackCenter;
+ else
+ {
+ const char *name{speaker.Name.c_str()};
+ unsigned int n;
+ char c;
+
+ if(sscanf(name, "AUX%u%c", &n, &c) == 1 && n < 16)
+ ch = static_cast<Channel>(Aux0+n);
+ else
+ {
+ ERR("AmbDec speaker label \"%s\" not recognized\n", name);
+ return -1;
+ }
+ }
+ const int chidx{GetChannelIdxByName(device->RealOut, ch)};
+ if(chidx == -1)
+ ERR("Failed to lookup AmbDec speaker label %s\n", speaker.Name.c_str());
+ return chidx;
+ };
+ std::transform(conf->Speakers.begin(), conf->Speakers.end(), std::begin(speakermap), map_spkr);
+ /* Return success if no invalid entries are found. */
+ auto speakermap_end = std::begin(speakermap) + conf->Speakers.size();
+ return std::find(std::begin(speakermap), speakermap_end, -1) == speakermap_end;
+}
+
+
+constexpr ChannelMap MonoCfg[1] = {
+ { FrontCenter, { 1.0f } },
+}, StereoCfg[2] = {
+ { FrontLeft, { 5.00000000e-1f, 2.88675135e-1f, 5.52305643e-2f } },
+ { FrontRight, { 5.00000000e-1f, -2.88675135e-1f, 5.52305643e-2f } },
+}, QuadCfg[4] = {
+ { BackLeft, { 3.53553391e-1f, 2.04124145e-1f, -2.04124145e-1f } },
+ { FrontLeft, { 3.53553391e-1f, 2.04124145e-1f, 2.04124145e-1f } },
+ { FrontRight, { 3.53553391e-1f, -2.04124145e-1f, 2.04124145e-1f } },
+ { BackRight, { 3.53553391e-1f, -2.04124145e-1f, -2.04124145e-1f } },
+}, X51SideCfg[4] = {
+ { SideLeft, { 3.33000782e-1f, 1.89084803e-1f, -2.00042375e-1f, -2.12307769e-2f, -1.14579885e-2f } },
+ { FrontLeft, { 1.88542860e-1f, 1.27709292e-1f, 1.66295695e-1f, 7.30571517e-2f, 2.10901184e-2f } },
+ { FrontRight, { 1.88542860e-1f, -1.27709292e-1f, 1.66295695e-1f, -7.30571517e-2f, 2.10901184e-2f } },
+ { SideRight, { 3.33000782e-1f, -1.89084803e-1f, -2.00042375e-1f, 2.12307769e-2f, -1.14579885e-2f } },
+}, X51RearCfg[4] = {
+ { BackLeft, { 3.33000782e-1f, 1.89084803e-1f, -2.00042375e-1f, -2.12307769e-2f, -1.14579885e-2f } },
+ { FrontLeft, { 1.88542860e-1f, 1.27709292e-1f, 1.66295695e-1f, 7.30571517e-2f, 2.10901184e-2f } },
+ { FrontRight, { 1.88542860e-1f, -1.27709292e-1f, 1.66295695e-1f, -7.30571517e-2f, 2.10901184e-2f } },
+ { BackRight, { 3.33000782e-1f, -1.89084803e-1f, -2.00042375e-1f, 2.12307769e-2f, -1.14579885e-2f } },
+}, X61Cfg[6] = {
+ { SideLeft, { 2.04460341e-1f, 2.17177926e-1f, -4.39996780e-2f, -2.60790269e-2f, -6.87239792e-2f } },
+ { FrontLeft, { 1.58923161e-1f, 9.21772680e-2f, 1.59658796e-1f, 6.66278083e-2f, 3.84686854e-2f } },
+ { FrontRight, { 1.58923161e-1f, -9.21772680e-2f, 1.59658796e-1f, -6.66278083e-2f, 3.84686854e-2f } },
+ { SideRight, { 2.04460341e-1f, -2.17177926e-1f, -4.39996780e-2f, 2.60790269e-2f, -6.87239792e-2f } },
+ { BackCenter, { 2.50001688e-1f, 0.00000000e+0f, -2.50000094e-1f, 0.00000000e+0f, 6.05133395e-2f } },
+}, X71Cfg[6] = {
+ { BackLeft, { 2.04124145e-1f, 1.08880247e-1f, -1.88586120e-1f, -1.29099444e-1f, 7.45355993e-2f, 3.73460789e-2f, 0.00000000e+0f } },
+ { SideLeft, { 2.04124145e-1f, 2.17760495e-1f, 0.00000000e+0f, 0.00000000e+0f, -1.49071198e-1f, -3.73460789e-2f, 0.00000000e+0f } },
+ { FrontLeft, { 2.04124145e-1f, 1.08880247e-1f, 1.88586120e-1f, 1.29099444e-1f, 7.45355993e-2f, 3.73460789e-2f, 0.00000000e+0f } },
+ { FrontRight, { 2.04124145e-1f, -1.08880247e-1f, 1.88586120e-1f, -1.29099444e-1f, 7.45355993e-2f, -3.73460789e-2f, 0.00000000e+0f } },
+ { SideRight, { 2.04124145e-1f, -2.17760495e-1f, 0.00000000e+0f, 0.00000000e+0f, -1.49071198e-1f, 3.73460789e-2f, 0.00000000e+0f } },
+ { BackRight, { 2.04124145e-1f, -1.08880247e-1f, -1.88586120e-1f, 1.29099444e-1f, 7.45355993e-2f, -3.73460789e-2f, 0.00000000e+0f } },
+};
+
+void InitNearFieldCtrl(ALCdevice *device, ALfloat ctrl_dist, ALsizei order,
+ const al::span<const ALuint,MAX_AMBI_ORDER+1> chans_per_order)
+{
+ /* NFC is only used when AvgSpeakerDist is greater than 0. */
+ const char *devname{device->DeviceName.c_str()};
+ if(!GetConfigValueBool(devname, "decoder", "nfc", 0) || !(ctrl_dist > 0.0f))
+ return;
+
+ device->AvgSpeakerDist = clampf(ctrl_dist, 0.1f, 10.0f);
+ TRACE("Using near-field reference distance: %.2f meters\n", device->AvgSpeakerDist);
+
+ auto iter = std::copy(chans_per_order.begin(), chans_per_order.begin()+order+1,
+ std::begin(device->NumChannelsPerOrder));
+ std::fill(iter, std::end(device->NumChannelsPerOrder), 0u);
+}
+
+void InitDistanceComp(ALCdevice *device, const AmbDecConf *conf, const ALsizei (&speakermap)[MAX_OUTPUT_CHANNELS])
+{
+ auto get_max = std::bind(maxf, _1,
+ std::bind(std::mem_fn(&AmbDecConf::SpeakerConf::Distance), _2));
+ const ALfloat maxdist{
+ std::accumulate(conf->Speakers.begin(), conf->Speakers.end(), float{0.0f}, get_max)};
+
+ const char *devname{device->DeviceName.c_str()};
+ if(!GetConfigValueBool(devname, "decoder", "distance-comp", 1) || !(maxdist > 0.0f))
+ return;
+
+ const auto distSampleScale = static_cast<ALfloat>(device->Frequency)/SPEEDOFSOUNDMETRESPERSEC;
+ const auto ChanDelay = device->ChannelDelay.as_span();
+ size_t total{0u};
+ for(size_t i{0u};i < conf->Speakers.size();i++)
+ {
+ const AmbDecConf::SpeakerConf &speaker = conf->Speakers[i];
+ const ALsizei chan{speakermap[i]};
+
+ /* Distance compensation only delays in steps of the sample rate. This
+ * is a bit less accurate since the delay time falls to the nearest
+ * sample time, but it's far simpler as it doesn't have to deal with
+ * phase offsets. This means at 48khz, for instance, the distance delay
+ * will be in steps of about 7 millimeters.
+ */
+ ALfloat delay{std::floor((maxdist - speaker.Distance)*distSampleScale + 0.5f)};
+ if(delay > ALfloat{MAX_DELAY_LENGTH-1})
+ {
+ ERR("Delay for speaker \"%s\" exceeds buffer length (%f > %d)\n",
+ speaker.Name.c_str(), delay, MAX_DELAY_LENGTH-1);
+ delay = ALfloat{MAX_DELAY_LENGTH-1};
+ }
+
+ ChanDelay[chan].Length = static_cast<ALsizei>(delay);
+ ChanDelay[chan].Gain = speaker.Distance / maxdist;
+ TRACE("Channel %u \"%s\" distance compensation: %d samples, %f gain\n", chan,
+ speaker.Name.c_str(), ChanDelay[chan].Length, ChanDelay[chan].Gain);
+
+ /* Round up to the next 4th sample, so each channel buffer starts
+ * 16-byte aligned.
+ */
+ total += RoundUp(ChanDelay[chan].Length, 4);
+ }
+
+ if(total > 0)
+ {
+ device->ChannelDelay.setSampleCount(total);
+ ChanDelay[0].Buffer = device->ChannelDelay.getSamples();
+ auto set_bufptr = [](const DistanceComp::DistData &last, const DistanceComp::DistData &cur) -> DistanceComp::DistData
+ {
+ DistanceComp::DistData ret{cur};
+ ret.Buffer = last.Buffer + RoundUp(last.Length, 4);
+ return ret;
+ };
+ std::partial_sum(ChanDelay.begin(), ChanDelay.end(), ChanDelay.begin(), set_bufptr);
+ }
+}
+
+
+auto GetAmbiScales(AmbiNorm scaletype) noexcept -> const std::array<float,MAX_AMBI_CHANNELS>&
+{
+ if(scaletype == AmbiNorm::FuMa) return AmbiScale::FromFuMa;
+ if(scaletype == AmbiNorm::SN3D) return AmbiScale::FromSN3D;
+ return AmbiScale::FromN3D;
+}
+
+auto GetAmbiLayout(AmbiLayout layouttype) noexcept -> const std::array<int,MAX_AMBI_CHANNELS>&
+{
+ if(layouttype == AmbiLayout::FuMa) return AmbiIndex::FromFuMa;
+ return AmbiIndex::FromACN;
+}
+
+
+void InitPanning(ALCdevice *device)
+{
+ al::span<const ChannelMap> chanmap;
+ ALuint coeffcount{};
+
+ switch(device->FmtChans)
+ {
+ case DevFmtMono:
+ chanmap = MonoCfg;
+ coeffcount = 1;
+ break;
+
+ case DevFmtStereo:
+ chanmap = StereoCfg;
+ coeffcount = 3;
+ break;
+
+ case DevFmtQuad:
+ chanmap = QuadCfg;
+ coeffcount = 3;
+ break;
+
+ case DevFmtX51:
+ chanmap = X51SideCfg;
+ coeffcount = 5;
+ break;
+
+ case DevFmtX51Rear:
+ chanmap = X51RearCfg;
+ coeffcount = 5;
+ break;
+
+ case DevFmtX61:
+ chanmap = X61Cfg;
+ coeffcount = 5;
+ break;
+
+ case DevFmtX71:
+ chanmap = X71Cfg;
+ coeffcount = 7;
+ break;
+
+ case DevFmtAmbi3D:
+ break;
+ }
+
+ if(device->FmtChans == DevFmtAmbi3D)
+ {
+ const char *devname{device->DeviceName.c_str()};
+ const std::array<int,MAX_AMBI_CHANNELS> &acnmap = GetAmbiLayout(device->mAmbiLayout);
+ const std::array<float,MAX_AMBI_CHANNELS> &n3dscale = GetAmbiScales(device->mAmbiScale);
+
+ /* For DevFmtAmbi3D, the ambisonic order is already set. */
+ const size_t count{AmbiChannelsFromOrder(device->mAmbiOrder)};
+ std::transform(acnmap.begin(), acnmap.begin()+count, std::begin(device->Dry.AmbiMap),
+ [&n3dscale](const ALsizei &acn) noexcept -> BFChannelConfig
+ { return BFChannelConfig{1.0f/n3dscale[acn], acn}; }
+ );
+ AllocChannels(device, static_cast<ALuint>(count), 0);
+
+ ALfloat nfc_delay{ConfigValueFloat(devname, "decoder", "nfc-ref-delay").value_or(0.0f)};
+ if(nfc_delay > 0.0f)
+ {
+ static constexpr ALuint chans_per_order[MAX_AMBI_ORDER+1]{ 1, 3, 5, 7 };
+ InitNearFieldCtrl(device, nfc_delay * SPEEDOFSOUNDMETRESPERSEC, device->mAmbiOrder,
+ chans_per_order);
+ }
+ }
+ else
+ {
+ ChannelDec chancoeffs[MAX_OUTPUT_CHANNELS]{};
+ ALsizei idxmap[MAX_OUTPUT_CHANNELS]{};
+ for(size_t i{0u};i < chanmap.size();++i)
+ {
+ const ALint idx{GetChannelIdxByName(device->RealOut, chanmap[i].ChanName)};
+ if(idx < 0)
+ {
+ ERR("Failed to find %s channel in device\n",
+ GetLabelFromChannel(chanmap[i].ChanName));
+ continue;
+ }
+ idxmap[i] = idx;
+ std::copy_n(chanmap[i].Config, coeffcount, chancoeffs[i]);
+ }
+
+ /* For non-DevFmtAmbi3D, set the ambisonic order given the mixing
+ * channel count. Built-in speaker decoders are always 2D, so just
+ * reverse that calculation.
+ */
+ device->mAmbiOrder = static_cast<ALsizei>((coeffcount-1) / 2);
+
+ std::transform(AmbiIndex::From2D.begin(), AmbiIndex::From2D.begin()+coeffcount,
+ std::begin(device->Dry.AmbiMap),
+ [](const ALsizei &index) noexcept { return BFChannelConfig{1.0f, index}; }
+ );
+ AllocChannels(device, coeffcount, device->channelsFromFmt());
+
+ TRACE("Enabling %s-order%s ambisonic decoder\n",
+ (coeffcount > 5) ? "third" :
+ (coeffcount > 3) ? "second" : "first",
+ ""
+ );
+ device->AmbiDecoder = al::make_unique<BFormatDec>(coeffcount,
+ static_cast<ALsizei>(chanmap.size()), chancoeffs, idxmap);
+ }
+}
+
+void InitCustomPanning(ALCdevice *device, bool hqdec, const AmbDecConf *conf, const ALsizei (&speakermap)[MAX_OUTPUT_CHANNELS])
+{
+ static constexpr ALuint chans_per_order2d[MAX_AMBI_ORDER+1] = { 1, 2, 2, 2 };
+ static constexpr ALuint chans_per_order3d[MAX_AMBI_ORDER+1] = { 1, 3, 5, 7 };
+
+ if(!hqdec && conf->FreqBands != 1)
+ ERR("Basic renderer uses the high-frequency matrix as single-band (xover_freq = %.0fhz)\n",
+ conf->XOverFreq);
+
+ ALsizei order{(conf->ChanMask > AMBI_2ORDER_MASK) ? 3 :
+ (conf->ChanMask > AMBI_1ORDER_MASK) ? 2 : 1};
+ device->mAmbiOrder = order;
+
+ ALuint count;
+ if((conf->ChanMask&AMBI_PERIPHONIC_MASK))
+ {
+ count = static_cast<ALuint>(AmbiChannelsFromOrder(order));
+ std::transform(AmbiIndex::From3D.begin(), AmbiIndex::From3D.begin()+count,
+ std::begin(device->Dry.AmbiMap),
+ [](const ALsizei &index) noexcept { return BFChannelConfig{1.0f, index}; }
+ );
+ }
+ else
+ {
+ count = static_cast<ALuint>(Ambi2DChannelsFromOrder(order));
+ std::transform(AmbiIndex::From2D.begin(), AmbiIndex::From2D.begin()+count,
+ std::begin(device->Dry.AmbiMap),
+ [](const ALsizei &index) noexcept { return BFChannelConfig{1.0f, index}; }
+ );
+ }
+ AllocChannels(device, count, device->channelsFromFmt());
+
+ TRACE("Enabling %s-band %s-order%s ambisonic decoder\n",
+ (!hqdec || conf->FreqBands == 1) ? "single" : "dual",
+ (conf->ChanMask > AMBI_2ORDER_MASK) ? "third" :
+ (conf->ChanMask > AMBI_1ORDER_MASK) ? "second" : "first",
+ (conf->ChanMask&AMBI_PERIPHONIC_MASK) ? " periphonic" : ""
+ );
+ device->AmbiDecoder = al::make_unique<BFormatDec>(conf, hqdec, count, device->Frequency,
+ speakermap);
+
+ auto accum_spkr_dist = std::bind(std::plus<float>{}, _1,
+ std::bind(std::mem_fn(&AmbDecConf::SpeakerConf::Distance), _2));
+ const ALfloat avg_dist{
+ std::accumulate(conf->Speakers.begin(), conf->Speakers.end(), float{0.0f},
+ accum_spkr_dist) / static_cast<ALfloat>(conf->Speakers.size())
+ };
+ InitNearFieldCtrl(device, avg_dist, order,
+ (conf->ChanMask&AMBI_PERIPHONIC_MASK) ? chans_per_order3d : chans_per_order2d);
+
+ InitDistanceComp(device, conf, speakermap);
+}
+
+void InitHrtfPanning(ALCdevice *device)
+{
+ /* NOTE: In degrees, and azimuth goes clockwise. */
+ static constexpr AngularPoint AmbiPoints[]{
+ { 35.264390f, -45.000000f },
+ { 35.264390f, 45.000000f },
+ { 35.264390f, 135.000000f },
+ { 35.264390f, -135.000000f },
+ { -35.264390f, -45.000000f },
+ { -35.264390f, 45.000000f },
+ { -35.264390f, 135.000000f },
+ { -35.264390f, -135.000000f },
+ { 0.000000f, -20.905157f },
+ { 0.000000f, 20.905157f },
+ { 0.000000f, 159.094843f },
+ { 0.000000f, -159.094843f },
+ { 20.905157f, -90.000000f },
+ { -20.905157f, -90.000000f },
+ { -20.905157f, 90.000000f },
+ { 20.905157f, 90.000000f },
+ { 69.094843f, 0.000000f },
+ { -69.094843f, 0.000000f },
+ { -69.094843f, 180.000000f },
+ { 69.094843f, 180.000000f },
+ };
+ static constexpr ALfloat AmbiMatrix[][MAX_AMBI_CHANNELS]{
+ { 5.00000000e-02f, 5.00000000e-02f, 5.00000000e-02f, 5.00000000e-02f, 6.45497224e-02f, 6.45497224e-02f, 0.00000000e+00f, 6.45497224e-02f, 0.00000000e+00f, 1.48264644e-02f, 6.33865691e-02f, 1.01126676e-01f, -7.36485380e-02f, -1.09260065e-02f, 7.08683387e-02f, -1.01622099e-01f },
+ { 5.00000000e-02f, -5.00000000e-02f, 5.00000000e-02f, 5.00000000e-02f, -6.45497224e-02f, -6.45497224e-02f, 0.00000000e+00f, 6.45497224e-02f, 0.00000000e+00f, -1.48264644e-02f, -6.33865691e-02f, -1.01126676e-01f, -7.36485380e-02f, -1.09260065e-02f, 7.08683387e-02f, -1.01622099e-01f },
+ { 5.00000000e-02f, -5.00000000e-02f, 5.00000000e-02f, -5.00000000e-02f, 6.45497224e-02f, -6.45497224e-02f, 0.00000000e+00f, -6.45497224e-02f, 0.00000000e+00f, -1.48264644e-02f, 6.33865691e-02f, -1.01126676e-01f, -7.36485380e-02f, 1.09260065e-02f, 7.08683387e-02f, 1.01622099e-01f },
+ { 5.00000000e-02f, 5.00000000e-02f, 5.00000000e-02f, -5.00000000e-02f, -6.45497224e-02f, 6.45497224e-02f, 0.00000000e+00f, -6.45497224e-02f, 0.00000000e+00f, 1.48264644e-02f, -6.33865691e-02f, 1.01126676e-01f, -7.36485380e-02f, 1.09260065e-02f, 7.08683387e-02f, 1.01622099e-01f },
+ { 5.00000000e-02f, 5.00000000e-02f, -5.00000000e-02f, 5.00000000e-02f, 6.45497224e-02f, -6.45497224e-02f, 0.00000000e+00f, -6.45497224e-02f, 0.00000000e+00f, 1.48264644e-02f, -6.33865691e-02f, 1.01126676e-01f, 7.36485380e-02f, -1.09260065e-02f, -7.08683387e-02f, -1.01622099e-01f },
+ { 5.00000000e-02f, -5.00000000e-02f, -5.00000000e-02f, 5.00000000e-02f, -6.45497224e-02f, 6.45497224e-02f, 0.00000000e+00f, -6.45497224e-02f, 0.00000000e+00f, -1.48264644e-02f, 6.33865691e-02f, -1.01126676e-01f, 7.36485380e-02f, -1.09260065e-02f, -7.08683387e-02f, -1.01622099e-01f },
+ { 5.00000000e-02f, -5.00000000e-02f, -5.00000000e-02f, -5.00000000e-02f, 6.45497224e-02f, 6.45497224e-02f, 0.00000000e+00f, 6.45497224e-02f, 0.00000000e+00f, -1.48264644e-02f, -6.33865691e-02f, -1.01126676e-01f, 7.36485380e-02f, 1.09260065e-02f, -7.08683387e-02f, 1.01622099e-01f },
+ { 5.00000000e-02f, 5.00000000e-02f, -5.00000000e-02f, -5.00000000e-02f, -6.45497224e-02f, -6.45497224e-02f, 0.00000000e+00f, 6.45497224e-02f, 0.00000000e+00f, 1.48264644e-02f, 6.33865691e-02f, 1.01126676e-01f, 7.36485380e-02f, 1.09260065e-02f, -7.08683387e-02f, 1.01622099e-01f },
+ { 5.00000000e-02f, 3.09016994e-02f, 0.00000000e+00f, 8.09016994e-02f, 6.45497224e-02f, 0.00000000e+00f, -5.59016994e-02f, 0.00000000e+00f, 7.21687836e-02f, 7.76323754e-02f, 0.00000000e+00f, -1.49775925e-01f, 0.00000000e+00f, -2.95083663e-02f, 0.00000000e+00f, 7.76323754e-02f },
+ { 5.00000000e-02f, -3.09016994e-02f, 0.00000000e+00f, 8.09016994e-02f, -6.45497224e-02f, 0.00000000e+00f, -5.59016994e-02f, 0.00000000e+00f, 7.21687836e-02f, -7.76323754e-02f, 0.00000000e+00f, 1.49775925e-01f, 0.00000000e+00f, -2.95083663e-02f, 0.00000000e+00f, 7.76323754e-02f },
+ { 5.00000000e-02f, -3.09016994e-02f, 0.00000000e+00f, -8.09016994e-02f, 6.45497224e-02f, 0.00000000e+00f, -5.59016994e-02f, 0.00000000e+00f, 7.21687836e-02f, -7.76323754e-02f, 0.00000000e+00f, 1.49775925e-01f, 0.00000000e+00f, 2.95083663e-02f, 0.00000000e+00f, -7.76323754e-02f },
+ { 5.00000000e-02f, 3.09016994e-02f, 0.00000000e+00f, -8.09016994e-02f, -6.45497224e-02f, 0.00000000e+00f, -5.59016994e-02f, 0.00000000e+00f, 7.21687836e-02f, 7.76323754e-02f, 0.00000000e+00f, -1.49775925e-01f, 0.00000000e+00f, 2.95083663e-02f, 0.00000000e+00f, -7.76323754e-02f },
+ { 5.00000000e-02f, 8.09016994e-02f, 3.09016994e-02f, 0.00000000e+00f, 0.00000000e+00f, 6.45497224e-02f, -3.45491503e-02f, 0.00000000e+00f, -8.44966837e-02f, -4.79794466e-02f, 0.00000000e+00f, -6.77901327e-02f, 3.03448665e-02f, 0.00000000e+00f, -1.65948192e-01f, 0.00000000e+00f },
+ { 5.00000000e-02f, 8.09016994e-02f, -3.09016994e-02f, 0.00000000e+00f, 0.00000000e+00f, -6.45497224e-02f, -3.45491503e-02f, 0.00000000e+00f, -8.44966837e-02f, -4.79794466e-02f, 0.00000000e+00f, -6.77901327e-02f, -3.03448665e-02f, 0.00000000e+00f, 1.65948192e-01f, 0.00000000e+00f },
+ { 5.00000000e-02f, -8.09016994e-02f, -3.09016994e-02f, 0.00000000e+00f, 0.00000000e+00f, 6.45497224e-02f, -3.45491503e-02f, 0.00000000e+00f, -8.44966837e-02f, 4.79794466e-02f, 0.00000000e+00f, 6.77901327e-02f, -3.03448665e-02f, 0.00000000e+00f, 1.65948192e-01f, 0.00000000e+00f },
+ { 5.00000000e-02f, -8.09016994e-02f, 3.09016994e-02f, 0.00000000e+00f, 0.00000000e+00f, -6.45497224e-02f, -3.45491503e-02f, 0.00000000e+00f, -8.44966837e-02f, 4.79794466e-02f, 0.00000000e+00f, 6.77901327e-02f, 3.03448665e-02f, 0.00000000e+00f, -1.65948192e-01f, 0.00000000e+00f },
+ { 5.00000000e-02f, 0.00000000e+00f, 8.09016994e-02f, 3.09016994e-02f, 0.00000000e+00f, 0.00000000e+00f, 9.04508497e-02f, 6.45497224e-02f, 1.23279000e-02f, 0.00000000e+00f, 0.00000000e+00f, 0.00000000e+00f, 7.94438918e-02f, 1.12611206e-01f, -2.42115150e-02f, 1.25611822e-01f },
+ { 5.00000000e-02f, 0.00000000e+00f, -8.09016994e-02f, 3.09016994e-02f, 0.00000000e+00f, 0.00000000e+00f, 9.04508497e-02f, -6.45497224e-02f, 1.23279000e-02f, 0.00000000e+00f, 0.00000000e+00f, 0.00000000e+00f, -7.94438918e-02f, 1.12611206e-01f, 2.42115150e-02f, 1.25611822e-01f },
+ { 5.00000000e-02f, 0.00000000e+00f, -8.09016994e-02f, -3.09016994e-02f, 0.00000000e+00f, 0.00000000e+00f, 9.04508497e-02f, 6.45497224e-02f, 1.23279000e-02f, 0.00000000e+00f, 0.00000000e+00f, 0.00000000e+00f, -7.94438918e-02f, -1.12611206e-01f, 2.42115150e-02f, -1.25611822e-01f },
+ { 5.00000000e-02f, 0.00000000e+00f, 8.09016994e-02f, -3.09016994e-02f, 0.00000000e+00f, 0.00000000e+00f, 9.04508497e-02f, -6.45497224e-02f, 1.23279000e-02f, 0.00000000e+00f, 0.00000000e+00f, 0.00000000e+00f, 7.94438918e-02f, -1.12611206e-01f, -2.42115150e-02f, -1.25611822e-01f }
+ };
+ static constexpr ALfloat AmbiOrderHFGain1O[MAX_AMBI_ORDER+1]{
+ 3.16227766e+00f, 1.82574186e+00f
+ }, AmbiOrderHFGain2O[MAX_AMBI_ORDER+1]{
+ 2.35702260e+00f, 1.82574186e+00f, 9.42809042e-01f
+ }, AmbiOrderHFGain3O[MAX_AMBI_ORDER+1]{
+ 1.86508671e+00f, 1.60609389e+00f, 1.14205530e+00f, 5.68379553e-01f
+ };
+ static constexpr ALuint ChansPerOrder[MAX_AMBI_ORDER+1]{ 1, 3, 5, 7 };
+ const ALfloat *AmbiOrderHFGain{AmbiOrderHFGain1O};
+
+ static_assert(al::size(AmbiPoints) == al::size(AmbiMatrix), "Ambisonic HRTF mismatch");
+
+ /* Don't bother with HOA when using full HRTF rendering. Nothing needs it,
+ * and it eases the CPU/memory load.
+ */
+ device->mRenderMode = HrtfRender;
+ ALsizei ambi_order{1};
+ if(auto modeopt = ConfigValueStr(device->DeviceName.c_str(), nullptr, "hrtf-mode"))
+ {
+ const char *mode{modeopt->c_str()};
+ if(strcasecmp(mode, "basic") == 0)
+ {
+ ERR("HRTF mode \"%s\" deprecated, substituting \"%s\"\n", mode, "ambi2");
+ mode = "ambi2";
+ }
+
+ if(strcasecmp(mode, "full") == 0)
+ device->mRenderMode = HrtfRender;
+ else if(strcasecmp(mode, "ambi1") == 0)
+ {
+ device->mRenderMode = NormalRender;
+ ambi_order = 1;
+ }
+ else if(strcasecmp(mode, "ambi2") == 0)
+ {
+ device->mRenderMode = NormalRender;
+ ambi_order = 2;
+ }
+ else if(strcasecmp(mode, "ambi3") == 0)
+ {
+ device->mRenderMode = NormalRender;
+ ambi_order = 3;
+ }
+ else
+ ERR("Unexpected hrtf-mode: %s\n", mode);
+ }
+ TRACE("%s HRTF rendering enabled, using \"%s\"\n",
+ (device->mRenderMode == HrtfRender) ? "Full" :
+ (ambi_order >= 3) ? "Third-Order" :
+ (ambi_order == 2) ? "Second-Order" :
+ (ambi_order == 1) ? "First-Order" : "Unknown",
+ device->HrtfName.c_str());
+
+ if(ambi_order >= 3)
+ AmbiOrderHFGain = AmbiOrderHFGain3O;
+ else if(ambi_order == 2)
+ AmbiOrderHFGain = AmbiOrderHFGain2O;
+ else if(ambi_order == 1)
+ AmbiOrderHFGain = AmbiOrderHFGain1O;
+ device->mAmbiOrder = ambi_order;
+
+ const size_t count{AmbiChannelsFromOrder(ambi_order)};
+ device->mHrtfState = DirectHrtfState::Create(count);
+
+ std::transform(AmbiIndex::From3D.begin(), AmbiIndex::From3D.begin()+count,
+ std::begin(device->Dry.AmbiMap),
+ [](const ALsizei &index) noexcept { return BFChannelConfig{1.0f, index}; }
+ );
+ AllocChannels(device, static_cast<ALuint>(count), device->channelsFromFmt());
+
+ BuildBFormatHrtf(device->mHrtf, device->mHrtfState.get(), static_cast<ALuint>(count),
+ AmbiPoints, AmbiMatrix, al::size(AmbiPoints), AmbiOrderHFGain);
+
+ HrtfEntry *Hrtf{device->mHrtf};
+ InitNearFieldCtrl(device, Hrtf->field[0].distance, ambi_order, ChansPerOrder);
+}
+
+void InitUhjPanning(ALCdevice *device)
+{
+ /* UHJ is always 2D first-order. */
+ static constexpr size_t count{Ambi2DChannelsFromOrder(1)};
+
+ device->mAmbiOrder = 1;
+
+ auto acnmap_end = AmbiIndex::FromFuMa.begin() + count;
+ std::transform(AmbiIndex::FromFuMa.begin(), acnmap_end, std::begin(device->Dry.AmbiMap),
+ [](const ALsizei &acn) noexcept -> BFChannelConfig
+ { return BFChannelConfig{1.0f/AmbiScale::FromFuMa[acn], acn}; }
+ );
+ AllocChannels(device, ALuint{count}, device->channelsFromFmt());
+}
+
+} // namespace
+
+void aluInitRenderer(ALCdevice *device, ALint hrtf_id, HrtfRequestMode hrtf_appreq, HrtfRequestMode hrtf_userreq)
+{
+ /* Hold the HRTF the device last used, in case it's used again. */
+ HrtfEntry *old_hrtf{device->mHrtf};
+
+ device->mHrtfState = nullptr;
+ device->mHrtf = nullptr;
+ device->HrtfName.clear();
+ device->mRenderMode = NormalRender;
+
+ if(device->FmtChans != DevFmtStereo)
+ {
+ if(old_hrtf)
+ old_hrtf->DecRef();
+ old_hrtf = nullptr;
+ if(hrtf_appreq == Hrtf_Enable)
+ device->HrtfStatus = ALC_HRTF_UNSUPPORTED_FORMAT_SOFT;
+
+ const char *layout{nullptr};
+ switch(device->FmtChans)
+ {
+ case DevFmtQuad: layout = "quad"; break;
+ case DevFmtX51: /* fall-through */
+ case DevFmtX51Rear: layout = "surround51"; break;
+ case DevFmtX61: layout = "surround61"; break;
+ case DevFmtX71: layout = "surround71"; break;
+ /* Mono, Stereo, and Ambisonics output don't use custom decoders. */
+ case DevFmtMono:
+ case DevFmtStereo:
+ case DevFmtAmbi3D:
+ break;
+ }
+
+ const char *devname{device->DeviceName.c_str()};
+ ALsizei speakermap[MAX_OUTPUT_CHANNELS];
+ AmbDecConf *pconf{nullptr};
+ AmbDecConf conf{};
+ if(layout)
+ {
+ if(auto decopt = ConfigValueStr(devname, "decoder", layout))
+ {
+ if(!conf.load(decopt->c_str()))
+ ERR("Failed to load layout file %s\n", decopt->c_str());
+ else if(conf.Speakers.size() > MAX_OUTPUT_CHANNELS)
+ ERR("Unsupported speaker count %zu (max %d)\n", conf.Speakers.size(),
+ MAX_OUTPUT_CHANNELS);
+ else if(conf.ChanMask > AMBI_3ORDER_MASK)
+ ERR("Unsupported channel mask 0x%04x (max 0x%x)\n", conf.ChanMask,
+ AMBI_3ORDER_MASK);
+ else if(MakeSpeakerMap(device, &conf, speakermap))
+ pconf = &conf;
+ }
+ }
+
+ if(!pconf)
+ InitPanning(device);
+ else
+ {
+ int hqdec{GetConfigValueBool(devname, "decoder", "hq-mode", 0)};
+ InitCustomPanning(device, !!hqdec, pconf, speakermap);
+ }
+ if(device->AmbiDecoder)
+ device->PostProcess = ProcessAmbiDec;
+ return;
+ }
+
+ bool headphones{device->IsHeadphones != AL_FALSE};
+ if(device->Type != Loopback)
+ {
+ if(auto modeopt = ConfigValueStr(device->DeviceName.c_str(), nullptr, "stereo-mode"))
+ {
+ const char *mode{modeopt->c_str()};
+ if(strcasecmp(mode, "headphones") == 0)
+ headphones = true;
+ else if(strcasecmp(mode, "speakers") == 0)
+ headphones = false;
+ else if(strcasecmp(mode, "auto") != 0)
+ ERR("Unexpected stereo-mode: %s\n", mode);
+ }
+ }
+
+ if(hrtf_userreq == Hrtf_Default)
+ {
+ bool usehrtf = (headphones && hrtf_appreq != Hrtf_Disable) ||
+ (hrtf_appreq == Hrtf_Enable);
+ if(!usehrtf) goto no_hrtf;
+
+ device->HrtfStatus = ALC_HRTF_ENABLED_SOFT;
+ if(headphones && hrtf_appreq != Hrtf_Disable)
+ device->HrtfStatus = ALC_HRTF_HEADPHONES_DETECTED_SOFT;
+ }
+ else
+ {
+ if(hrtf_userreq != Hrtf_Enable)
+ {
+ if(hrtf_appreq == Hrtf_Enable)
+ device->HrtfStatus = ALC_HRTF_DENIED_SOFT;
+ goto no_hrtf;
+ }
+ device->HrtfStatus = ALC_HRTF_REQUIRED_SOFT;
+ }
+
+ if(device->HrtfList.empty())
+ device->HrtfList = EnumerateHrtf(device->DeviceName.c_str());
+
+ if(hrtf_id >= 0 && static_cast<size_t>(hrtf_id) < device->HrtfList.size())
+ {
+ const EnumeratedHrtf &entry = device->HrtfList[hrtf_id];
+ HrtfEntry *hrtf{GetLoadedHrtf(entry.hrtf)};
+ if(hrtf && hrtf->sampleRate == device->Frequency)
+ {
+ device->mHrtf = hrtf;
+ device->HrtfName = entry.name;
+ }
+ else if(hrtf)
+ hrtf->DecRef();
+ }
+
+ if(!device->mHrtf)
+ {
+ auto find_hrtf = [device](const EnumeratedHrtf &entry) -> bool
+ {
+ HrtfEntry *hrtf{GetLoadedHrtf(entry.hrtf)};
+ if(!hrtf) return false;
+ if(hrtf->sampleRate != device->Frequency)
+ {
+ hrtf->DecRef();
+ return false;
+ }
+ device->mHrtf = hrtf;
+ device->HrtfName = entry.name;
+ return true;
+ };
+ std::find_if(device->HrtfList.cbegin(), device->HrtfList.cend(), find_hrtf);
+ }
+
+ if(device->mHrtf)
+ {
+ if(old_hrtf)
+ old_hrtf->DecRef();
+ old_hrtf = nullptr;
+
+ InitHrtfPanning(device);
+ device->PostProcess = ProcessHrtf;
+ return;
+ }
+ device->HrtfStatus = ALC_HRTF_UNSUPPORTED_FORMAT_SOFT;
+
+no_hrtf:
+ if(old_hrtf)
+ old_hrtf->DecRef();
+ old_hrtf = nullptr;
+
+ device->mRenderMode = StereoPair;
+
+ if(device->Type != Loopback)
+ {
+ if(auto cflevopt = ConfigValueInt(device->DeviceName.c_str(), nullptr, "cf_level"))
+ {
+ if(*cflevopt > 0 && *cflevopt <= 6)
+ {
+ device->Bs2b = al::make_unique<bs2b>();
+ bs2b_set_params(device->Bs2b.get(), *cflevopt, device->Frequency);
+ TRACE("BS2B enabled\n");
+ InitPanning(device);
+ device->PostProcess = ProcessBs2b;
+ return;
+ }
+ }
+ }
+
+ if(auto encopt = ConfigValueStr(device->DeviceName.c_str(), nullptr, "stereo-encoding"))
+ {
+ const char *mode{encopt->c_str()};
+ if(strcasecmp(mode, "uhj") == 0)
+ device->mRenderMode = NormalRender;
+ else if(strcasecmp(mode, "panpot") != 0)
+ ERR("Unexpected stereo-encoding: %s\n", mode);
+ }
+ if(device->mRenderMode == NormalRender)
+ {
+ device->Uhj_Encoder = al::make_unique<Uhj2Encoder>();
+ TRACE("UHJ enabled\n");
+ InitUhjPanning(device);
+ device->PostProcess = ProcessUhj;
+ return;
+ }
+
+ TRACE("Stereo rendering\n");
+ InitPanning(device);
+ device->PostProcess = ProcessAmbiDec;
+}
+
+
+void aluInitEffectPanning(ALeffectslot *slot, ALCdevice *device)
+{
+ const size_t count{AmbiChannelsFromOrder(device->mAmbiOrder)};
+ slot->MixBuffer.resize(count);
+ slot->MixBuffer.shrink_to_fit();
+
+ auto acnmap_end = AmbiIndex::From3D.begin() + count;
+ auto iter = std::transform(AmbiIndex::From3D.begin(), acnmap_end, slot->Wet.AmbiMap.begin(),
+ [](const ALsizei &acn) noexcept -> BFChannelConfig
+ { return BFChannelConfig{1.0f, acn}; }
+ );
+ std::fill(iter, slot->Wet.AmbiMap.end(), BFChannelConfig{});
+ slot->Wet.Buffer = {slot->MixBuffer.data(), slot->MixBuffer.size()};
+}
+
+
+void CalcAmbiCoeffs(const ALfloat y, const ALfloat z, const ALfloat x, const ALfloat spread,
+ ALfloat (&coeffs)[MAX_AMBI_CHANNELS])
+{
+ /* Zeroth-order */
+ coeffs[0] = 1.0f; /* ACN 0 = 1 */
+ /* First-order */
+ coeffs[1] = 1.732050808f * y; /* ACN 1 = sqrt(3) * Y */
+ coeffs[2] = 1.732050808f * z; /* ACN 2 = sqrt(3) * Z */
+ coeffs[3] = 1.732050808f * x; /* ACN 3 = sqrt(3) * X */
+ /* Second-order */
+ coeffs[4] = 3.872983346f * x * y; /* ACN 4 = sqrt(15) * X * Y */
+ coeffs[5] = 3.872983346f * y * z; /* ACN 5 = sqrt(15) * Y * Z */
+ coeffs[6] = 1.118033989f * (z*z*3.0f - 1.0f); /* ACN 6 = sqrt(5)/2 * (3*Z*Z - 1) */
+ coeffs[7] = 3.872983346f * x * z; /* ACN 7 = sqrt(15) * X * Z */
+ coeffs[8] = 1.936491673f * (x*x - y*y); /* ACN 8 = sqrt(15)/2 * (X*X - Y*Y) */
+ /* Third-order */
+ coeffs[9] = 2.091650066f * y * (x*x*3.0f - y*y); /* ACN 9 = sqrt(35/8) * Y * (3*X*X - Y*Y) */
+ coeffs[10] = 10.246950766f * z * x * y; /* ACN 10 = sqrt(105) * Z * X * Y */
+ coeffs[11] = 1.620185175f * y * (z*z*5.0f - 1.0f); /* ACN 11 = sqrt(21/8) * Y * (5*Z*Z - 1) */
+ coeffs[12] = 1.322875656f * z * (z*z*5.0f - 3.0f); /* ACN 12 = sqrt(7)/2 * Z * (5*Z*Z - 3) */
+ coeffs[13] = 1.620185175f * x * (z*z*5.0f - 1.0f); /* ACN 13 = sqrt(21/8) * X * (5*Z*Z - 1) */
+ coeffs[14] = 5.123475383f * z * (x*x - y*y); /* ACN 14 = sqrt(105)/2 * Z * (X*X - Y*Y) */
+ coeffs[15] = 2.091650066f * x * (x*x - y*y*3.0f); /* ACN 15 = sqrt(35/8) * X * (X*X - 3*Y*Y) */
+ /* Fourth-order */
+ /* ACN 16 = sqrt(35)*3/2 * X * Y * (X*X - Y*Y) */
+ /* ACN 17 = sqrt(35/2)*3/2 * (3*X*X - Y*Y) * Y * Z */
+ /* ACN 18 = sqrt(5)*3/2 * X * Y * (7*Z*Z - 1) */
+ /* ACN 19 = sqrt(5/2)*3/2 * Y * Z * (7*Z*Z - 3) */
+ /* ACN 20 = 3/8 * (35*Z*Z*Z*Z - 30*Z*Z + 3) */
+ /* ACN 21 = sqrt(5/2)*3/2 * X * Z * (7*Z*Z - 3) */
+ /* ACN 22 = sqrt(5)*3/4 * (X*X - Y*Y) * (7*Z*Z - 1) */
+ /* ACN 23 = sqrt(35/2)*3/2 * (X*X - 3*Y*Y) * X * Z */
+ /* ACN 24 = sqrt(35)*3/8 * (X*X*X*X - 6*X*X*Y*Y + Y*Y*Y*Y) */
+
+ if(spread > 0.0f)
+ {
+ /* Implement the spread by using a spherical source that subtends the
+ * angle spread. See:
+ * http://www.ppsloan.org/publications/StupidSH36.pdf - Appendix A3
+ *
+ * When adjusted for N3D normalization instead of SN3D, these
+ * calculations are:
+ *
+ * ZH0 = -sqrt(pi) * (-1+ca);
+ * ZH1 = 0.5*sqrt(pi) * sa*sa;
+ * ZH2 = -0.5*sqrt(pi) * ca*(-1+ca)*(ca+1);
+ * ZH3 = -0.125*sqrt(pi) * (-1+ca)*(ca+1)*(5*ca*ca - 1);
+ * ZH4 = -0.125*sqrt(pi) * ca*(-1+ca)*(ca+1)*(7*ca*ca - 3);
+ * ZH5 = -0.0625*sqrt(pi) * (-1+ca)*(ca+1)*(21*ca*ca*ca*ca - 14*ca*ca + 1);
+ *
+ * The gain of the source is compensated for size, so that the
+ * loudness doesn't depend on the spread. Thus:
+ *
+ * ZH0 = 1.0f;
+ * ZH1 = 0.5f * (ca+1.0f);
+ * ZH2 = 0.5f * (ca+1.0f)*ca;
+ * ZH3 = 0.125f * (ca+1.0f)*(5.0f*ca*ca - 1.0f);
+ * ZH4 = 0.125f * (ca+1.0f)*(7.0f*ca*ca - 3.0f)*ca;
+ * ZH5 = 0.0625f * (ca+1.0f)*(21.0f*ca*ca*ca*ca - 14.0f*ca*ca + 1.0f);
+ */
+ ALfloat ca = std::cos(spread * 0.5f);
+ /* Increase the source volume by up to +3dB for a full spread. */
+ ALfloat scale = std::sqrt(1.0f + spread/al::MathDefs<float>::Tau());
+
+ ALfloat ZH0_norm = scale;
+ ALfloat ZH1_norm = 0.5f * (ca+1.f) * scale;
+ ALfloat ZH2_norm = 0.5f * (ca+1.f)*ca * scale;
+ ALfloat ZH3_norm = 0.125f * (ca+1.f)*(5.f*ca*ca-1.f) * scale;
+
+ /* Zeroth-order */
+ coeffs[0] *= ZH0_norm;
+ /* First-order */
+ coeffs[1] *= ZH1_norm;
+ coeffs[2] *= ZH1_norm;
+ coeffs[3] *= ZH1_norm;
+ /* Second-order */
+ coeffs[4] *= ZH2_norm;
+ coeffs[5] *= ZH2_norm;
+ coeffs[6] *= ZH2_norm;
+ coeffs[7] *= ZH2_norm;
+ coeffs[8] *= ZH2_norm;
+ /* Third-order */
+ coeffs[9] *= ZH3_norm;
+ coeffs[10] *= ZH3_norm;
+ coeffs[11] *= ZH3_norm;
+ coeffs[12] *= ZH3_norm;
+ coeffs[13] *= ZH3_norm;
+ coeffs[14] *= ZH3_norm;
+ coeffs[15] *= ZH3_norm;
+ }
+}
+
+void ComputePanGains(const MixParams *mix, const ALfloat *RESTRICT coeffs, ALfloat ingain, ALfloat (&gains)[MAX_OUTPUT_CHANNELS])
+{
+ auto ambimap = mix->AmbiMap.cbegin();
+
+ auto iter = std::transform(ambimap, ambimap+mix->Buffer.size(), std::begin(gains),
+ [coeffs,ingain](const BFChannelConfig &chanmap) noexcept -> ALfloat
+ {
+ ASSUME(chanmap.Index >= 0);
+ return chanmap.Scale * coeffs[chanmap.Index] * ingain;
+ }
+ );
+ std::fill(iter, std::end(gains), 0.0f);
+}
diff --git a/alc/ringbuffer.cpp b/alc/ringbuffer.cpp
new file mode 100644
index 00000000..6ef576a5
--- /dev/null
+++ b/alc/ringbuffer.cpp
@@ -0,0 +1,253 @@
+/**
+ * OpenAL cross platform audio library
+ * Copyright (C) 1999-2007 by authors.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Or go to http://www.gnu.org/copyleft/lgpl.html
+ */
+
+#include "config.h"
+
+#include <cstring>
+#include <cstdlib>
+#include <climits>
+
+#include <algorithm>
+
+#include "ringbuffer.h"
+#include "atomic.h"
+#include "threads.h"
+#include "almalloc.h"
+#include "compat.h"
+
+
+RingBufferPtr CreateRingBuffer(size_t sz, size_t elem_sz, int limit_writes)
+{
+ size_t power_of_two{0u};
+ if(sz > 0)
+ {
+ power_of_two = sz;
+ power_of_two |= power_of_two>>1;
+ power_of_two |= power_of_two>>2;
+ power_of_two |= power_of_two>>4;
+ power_of_two |= power_of_two>>8;
+ power_of_two |= power_of_two>>16;
+#if SIZE_MAX > UINT_MAX
+ power_of_two |= power_of_two>>32;
+#endif
+ }
+ ++power_of_two;
+ if(power_of_two < sz) return nullptr;
+
+ const size_t bufbytes{power_of_two * elem_sz};
+ RingBufferPtr rb{new (al_calloc(16, sizeof(*rb) + bufbytes)) RingBuffer{bufbytes}};
+ rb->mWriteSize = limit_writes ? sz : (power_of_two-1);
+ rb->mSizeMask = power_of_two - 1;
+ rb->mElemSize = elem_sz;
+
+ return rb;
+}
+
+void RingBuffer::reset() noexcept
+{
+ mWritePtr.store(0, std::memory_order_relaxed);
+ mReadPtr.store(0, std::memory_order_relaxed);
+ std::fill_n(mBuffer.begin(), (mSizeMask+1)*mElemSize, al::byte{});
+}
+
+
+size_t RingBuffer::readSpace() const noexcept
+{
+ size_t w = mWritePtr.load(std::memory_order_acquire);
+ size_t r = mReadPtr.load(std::memory_order_acquire);
+ return (w-r) & mSizeMask;
+}
+
+size_t RingBuffer::writeSpace() const noexcept
+{
+ size_t w = mWritePtr.load(std::memory_order_acquire);
+ size_t r = mReadPtr.load(std::memory_order_acquire) + mWriteSize - mSizeMask;
+ return (r-w-1) & mSizeMask;
+}
+
+
+size_t RingBuffer::read(void *dest, size_t cnt) noexcept
+{
+ const size_t free_cnt{readSpace()};
+ if(free_cnt == 0) return 0;
+
+ const size_t to_read{std::min(cnt, free_cnt)};
+ size_t read_ptr{mReadPtr.load(std::memory_order_relaxed) & mSizeMask};
+
+ size_t n1, n2;
+ const size_t cnt2{read_ptr + to_read};
+ if(cnt2 > mSizeMask+1)
+ {
+ n1 = mSizeMask+1 - read_ptr;
+ n2 = cnt2 & mSizeMask;
+ }
+ else
+ {
+ n1 = to_read;
+ n2 = 0;
+ }
+
+ auto outiter = std::copy_n(mBuffer.begin() + read_ptr*mElemSize, n1*mElemSize,
+ static_cast<al::byte*>(dest));
+ read_ptr += n1;
+ if(n2 > 0)
+ {
+ std::copy_n(mBuffer.begin(), n2*mElemSize, outiter);
+ read_ptr += n2;
+ }
+ mReadPtr.store(read_ptr, std::memory_order_release);
+ return to_read;
+}
+
+size_t RingBuffer::peek(void *dest, size_t cnt) const noexcept
+{
+ const size_t free_cnt{readSpace()};
+ if(free_cnt == 0) return 0;
+
+ const size_t to_read{std::min(cnt, free_cnt)};
+ size_t read_ptr{mReadPtr.load(std::memory_order_relaxed) & mSizeMask};
+
+ size_t n1, n2;
+ const size_t cnt2{read_ptr + to_read};
+ if(cnt2 > mSizeMask+1)
+ {
+ n1 = mSizeMask+1 - read_ptr;
+ n2 = cnt2 & mSizeMask;
+ }
+ else
+ {
+ n1 = to_read;
+ n2 = 0;
+ }
+
+ auto outiter = std::copy_n(mBuffer.begin() + read_ptr*mElemSize, n1*mElemSize,
+ static_cast<al::byte*>(dest));
+ if(n2 > 0)
+ std::copy_n(mBuffer.begin(), n2*mElemSize, outiter);
+ return to_read;
+}
+
+size_t RingBuffer::write(const void *src, size_t cnt) noexcept
+{
+ const size_t free_cnt{writeSpace()};
+ if(free_cnt == 0) return 0;
+
+ const size_t to_write{std::min(cnt, free_cnt)};
+ size_t write_ptr{mWritePtr.load(std::memory_order_relaxed) & mSizeMask};
+
+ size_t n1, n2;
+ const size_t cnt2{write_ptr + to_write};
+ if(cnt2 > mSizeMask+1)
+ {
+ n1 = mSizeMask+1 - write_ptr;
+ n2 = cnt2 & mSizeMask;
+ }
+ else
+ {
+ n1 = to_write;
+ n2 = 0;
+ }
+
+ auto srcbytes = static_cast<const al::byte*>(src);
+ std::copy_n(srcbytes, n1*mElemSize, mBuffer.begin() + write_ptr*mElemSize);
+ write_ptr += n1;
+ if(n2 > 0)
+ {
+ std::copy_n(srcbytes + n1*mElemSize, n2*mElemSize, mBuffer.begin());
+ write_ptr += n2;
+ }
+ mWritePtr.store(write_ptr, std::memory_order_release);
+ return to_write;
+}
+
+
+void RingBuffer::readAdvance(size_t cnt) noexcept
+{
+ mReadPtr.fetch_add(cnt, std::memory_order_acq_rel);
+}
+
+void RingBuffer::writeAdvance(size_t cnt) noexcept
+{
+ mWritePtr.fetch_add(cnt, std::memory_order_acq_rel);
+}
+
+
+ll_ringbuffer_data_pair RingBuffer::getReadVector() const noexcept
+{
+ ll_ringbuffer_data_pair ret;
+
+ size_t w{mWritePtr.load(std::memory_order_acquire)};
+ size_t r{mReadPtr.load(std::memory_order_acquire)};
+ w &= mSizeMask;
+ r &= mSizeMask;
+ const size_t free_cnt{(w-r) & mSizeMask};
+
+ const size_t cnt2{r + free_cnt};
+ if(cnt2 > mSizeMask+1)
+ {
+ /* Two part vector: the rest of the buffer after the current read ptr,
+ * plus some from the start of the buffer. */
+ ret.first.buf = const_cast<al::byte*>(mBuffer.data() + r*mElemSize);
+ ret.first.len = mSizeMask+1 - r;
+ ret.second.buf = const_cast<al::byte*>(mBuffer.data());
+ ret.second.len = cnt2 & mSizeMask;
+ }
+ else
+ {
+ /* Single part vector: just the rest of the buffer */
+ ret.first.buf = const_cast<al::byte*>(mBuffer.data() + r*mElemSize);
+ ret.first.len = free_cnt;
+ ret.second.buf = nullptr;
+ ret.second.len = 0;
+ }
+
+ return ret;
+}
+
+ll_ringbuffer_data_pair RingBuffer::getWriteVector() const noexcept
+{
+ ll_ringbuffer_data_pair ret;
+
+ size_t w{mWritePtr.load(std::memory_order_acquire)};
+ size_t r{mReadPtr.load(std::memory_order_acquire) + mWriteSize - mSizeMask};
+ w &= mSizeMask;
+ r &= mSizeMask;
+ const size_t free_cnt{(r-w-1) & mSizeMask};
+
+ const size_t cnt2{w + free_cnt};
+ if(cnt2 > mSizeMask+1)
+ {
+ /* Two part vector: the rest of the buffer after the current write ptr,
+ * plus some from the start of the buffer. */
+ ret.first.buf = const_cast<al::byte*>(mBuffer.data() + w*mElemSize);
+ ret.first.len = mSizeMask+1 - w;
+ ret.second.buf = const_cast<al::byte*>(mBuffer.data());
+ ret.second.len = cnt2 & mSizeMask;
+ }
+ else
+ {
+ ret.first.buf = const_cast<al::byte*>(mBuffer.data() + w*mElemSize);
+ ret.first.len = free_cnt;
+ ret.second.buf = nullptr;
+ ret.second.len = 0;
+ }
+
+ return ret;
+}
diff --git a/alc/ringbuffer.h b/alc/ringbuffer.h
new file mode 100644
index 00000000..84139b66
--- /dev/null
+++ b/alc/ringbuffer.h
@@ -0,0 +1,99 @@
+#ifndef RINGBUFFER_H
+#define RINGBUFFER_H
+
+#include <stddef.h>
+
+#include <atomic>
+#include <memory>
+#include <utility>
+
+#include "albyte.h"
+#include "almalloc.h"
+
+
+/* NOTE: This lockless ringbuffer implementation is copied from JACK, extended
+ * to include an element size. Consequently, parameters and return values for a
+ * size or count is in 'elements', not bytes. Additionally, it only supports
+ * single-consumer/single-provider operation.
+ */
+
+struct ll_ringbuffer_data {
+ al::byte *buf;
+ size_t len;
+};
+using ll_ringbuffer_data_pair = std::pair<ll_ringbuffer_data,ll_ringbuffer_data>;
+
+
+struct RingBuffer {
+ std::atomic<size_t> mWritePtr{0u};
+ std::atomic<size_t> mReadPtr{0u};
+ size_t mWriteSize{0u};
+ size_t mSizeMask{0u};
+ size_t mElemSize{0u};
+
+ al::FlexArray<al::byte, 16> mBuffer;
+
+ RingBuffer(const size_t count) : mBuffer{count} { }
+ RingBuffer(const RingBuffer&) = delete;
+ RingBuffer& operator=(const RingBuffer&) = delete;
+
+ /** Reset the read and write pointers to zero. This is not thread safe. */
+ void reset() noexcept;
+
+ /**
+ * The non-copying data reader. Returns two ringbuffer data pointers that
+ * hold the current readable data. If the readable data is in one segment
+ * the second segment has zero length.
+ */
+ ll_ringbuffer_data_pair getReadVector() const noexcept;
+ /**
+ * The non-copying data writer. Returns two ringbuffer data pointers that
+ * hold the current writeable data. If the writeable data is in one segment
+ * the second segment has zero length.
+ */
+ ll_ringbuffer_data_pair getWriteVector() const noexcept;
+
+ /**
+ * Return the number of elements available for reading. This is the number
+ * of elements in front of the read pointer and behind the write pointer.
+ */
+ size_t readSpace() const noexcept;
+ /**
+ * The copying data reader. Copy at most `cnt' elements into `dest'.
+ * Returns the actual number of elements copied.
+ */
+ size_t read(void *dest, size_t cnt) noexcept;
+ /**
+ * The copying data reader w/o read pointer advance. Copy at most `cnt'
+ * elements into `dest'. Returns the actual number of elements copied.
+ */
+ size_t peek(void *dest, size_t cnt) const noexcept;
+ /** Advance the read pointer `cnt' places. */
+ void readAdvance(size_t cnt) noexcept;
+
+ /**
+ * Return the number of elements available for writing. This is the number
+ * of elements in front of the write pointer and behind the read pointer.
+ */
+ size_t writeSpace() const noexcept;
+ /**
+ * The copying data writer. Copy at most `cnt' elements from `src'. Returns
+ * the actual number of elements copied.
+ */
+ size_t write(const void *src, size_t cnt) noexcept;
+ /** Advance the write pointer `cnt' places. */
+ void writeAdvance(size_t cnt) noexcept;
+
+ DEF_PLACE_NEWDEL()
+};
+using RingBufferPtr = std::unique_ptr<RingBuffer>;
+
+
+/**
+ * Create a new ringbuffer to hold at least `sz' elements of `elem_sz' bytes.
+ * The number of elements is rounded up to the next power of two (even if it is
+ * already a power of two, to ensure the requested amount can be written).
+ */
+RingBufferPtr CreateRingBuffer(size_t sz, size_t elem_sz, int limit_writes);
+
+#endif /* RINGBUFFER_H */
diff --git a/alc/uhjfilter.cpp b/alc/uhjfilter.cpp
new file mode 100644
index 00000000..55999647
--- /dev/null
+++ b/alc/uhjfilter.cpp
@@ -0,0 +1,131 @@
+
+#include "config.h"
+
+#include "uhjfilter.h"
+
+#include <algorithm>
+
+#include "alu.h"
+
+namespace {
+
+/* This is the maximum number of samples processed for each inner loop
+ * iteration. */
+#define MAX_UPDATE_SAMPLES 128
+
+
+constexpr ALfloat Filter1CoeffSqr[4] = {
+ 0.479400865589f, 0.876218493539f, 0.976597589508f, 0.997499255936f
+};
+constexpr ALfloat Filter2CoeffSqr[4] = {
+ 0.161758498368f, 0.733028932341f, 0.945349700329f, 0.990599156685f
+};
+
+void allpass_process(AllPassState *state, ALfloat *dst, const ALfloat *src, const ALfloat aa, ALsizei todo)
+{
+ ALfloat z1{state->z[0]};
+ ALfloat z2{state->z[1]};
+ auto proc_sample = [aa,&z1,&z2](ALfloat input) noexcept -> ALfloat
+ {
+ ALfloat output = input*aa + z1;
+ z1 = z2; z2 = output*aa - input;
+ return output;
+ };
+ std::transform(src, src+todo, dst, proc_sample);
+ state->z[0] = z1;
+ state->z[1] = z2;
+}
+
+} // namespace
+
+
+/* NOTE: There seems to be a bit of an inconsistency in how this encoding is
+ * supposed to work. Some references, such as
+ *
+ * http://members.tripod.com/martin_leese/Ambisonic/UHJ_file_format.html
+ *
+ * specify a pre-scaling of sqrt(2) on the W channel input, while other
+ * references, such as
+ *
+ * https://en.wikipedia.org/wiki/Ambisonic_UHJ_format#Encoding.5B1.5D
+ * and
+ * https://wiki.xiph.org/Ambisonics#UHJ_format
+ *
+ * do not. The sqrt(2) scaling is in line with B-Format decoder coefficients
+ * which include such a scaling for the W channel input, however the original
+ * source for this equation is a 1985 paper by Michael Gerzon, which does not
+ * apparently include the scaling. Applying the extra scaling creates a louder
+ * result with a narrower stereo image compared to not scaling, and I don't
+ * know which is the intended result.
+ */
+
+void Uhj2Encoder::encode(FloatBufferLine &LeftOut, FloatBufferLine &RightOut, FloatBufferLine *InSamples, const ALsizei SamplesToDo)
+{
+ alignas(16) ALfloat D[MAX_UPDATE_SAMPLES], S[MAX_UPDATE_SAMPLES];
+ alignas(16) ALfloat temp[MAX_UPDATE_SAMPLES];
+
+ ASSUME(SamplesToDo > 0);
+
+ auto winput = InSamples[0].cbegin();
+ auto xinput = InSamples[1].cbegin();
+ auto yinput = InSamples[2].cbegin();
+ for(ALsizei base{0};base < SamplesToDo;)
+ {
+ const ALsizei todo{mini(SamplesToDo - base, MAX_UPDATE_SAMPLES)};
+ ASSUME(todo > 0);
+
+ /* D = 0.6554516*Y */
+ std::transform(yinput, yinput+todo, std::begin(temp),
+ [](const float y) noexcept -> float { return 0.6554516f*y; });
+ allpass_process(&mFilter1_Y[0], temp, temp, Filter1CoeffSqr[0], todo);
+ allpass_process(&mFilter1_Y[1], temp, temp, Filter1CoeffSqr[1], todo);
+ allpass_process(&mFilter1_Y[2], temp, temp, Filter1CoeffSqr[2], todo);
+ allpass_process(&mFilter1_Y[3], temp, temp, Filter1CoeffSqr[3], todo);
+ /* NOTE: Filter1 requires a 1 sample delay for the final output, so
+ * take the last processed sample from the previous run as the first
+ * output sample.
+ */
+ D[0] = mLastY;
+ for(ALsizei i{1};i < todo;i++)
+ D[i] = temp[i-1];
+ mLastY = temp[todo-1];
+
+ /* D += j(-0.3420201*W + 0.5098604*X) */
+ std::transform(winput, winput+todo, xinput, std::begin(temp),
+ [](const float w, const float x) noexcept -> float
+ { return -0.3420201f*w + 0.5098604f*x; });
+ allpass_process(&mFilter2_WX[0], temp, temp, Filter2CoeffSqr[0], todo);
+ allpass_process(&mFilter2_WX[1], temp, temp, Filter2CoeffSqr[1], todo);
+ allpass_process(&mFilter2_WX[2], temp, temp, Filter2CoeffSqr[2], todo);
+ allpass_process(&mFilter2_WX[3], temp, temp, Filter2CoeffSqr[3], todo);
+ for(ALsizei i{0};i < todo;i++)
+ D[i] += temp[i];
+
+ /* S = 0.9396926*W + 0.1855740*X */
+ std::transform(winput, winput+todo, xinput, std::begin(temp),
+ [](const float w, const float x) noexcept -> float
+ { return 0.9396926f*w + 0.1855740f*x; });
+ allpass_process(&mFilter1_WX[0], temp, temp, Filter1CoeffSqr[0], todo);
+ allpass_process(&mFilter1_WX[1], temp, temp, Filter1CoeffSqr[1], todo);
+ allpass_process(&mFilter1_WX[2], temp, temp, Filter1CoeffSqr[2], todo);
+ allpass_process(&mFilter1_WX[3], temp, temp, Filter1CoeffSqr[3], todo);
+ S[0] = mLastWX;
+ for(ALsizei i{1};i < todo;i++)
+ S[i] = temp[i-1];
+ mLastWX = temp[todo-1];
+
+ /* Left = (S + D)/2.0 */
+ ALfloat *RESTRICT left = al::assume_aligned<16>(LeftOut.data()+base);
+ for(ALsizei i{0};i < todo;i++)
+ left[i] += (S[i] + D[i]) * 0.5f;
+ /* Right = (S - D)/2.0 */
+ ALfloat *RESTRICT right = al::assume_aligned<16>(RightOut.data()+base);
+ for(ALsizei i{0};i < todo;i++)
+ right[i] += (S[i] - D[i]) * 0.5f;
+
+ winput += todo;
+ xinput += todo;
+ yinput += todo;
+ base += todo;
+ }
+}
diff --git a/alc/uhjfilter.h b/alc/uhjfilter.h
new file mode 100644
index 00000000..53e4f89e
--- /dev/null
+++ b/alc/uhjfilter.h
@@ -0,0 +1,54 @@
+#ifndef UHJFILTER_H
+#define UHJFILTER_H
+
+#include "AL/al.h"
+
+#include "alcmain.h"
+#include "almalloc.h"
+
+
+struct AllPassState {
+ ALfloat z[2]{0.0f, 0.0f};
+};
+
+/* Encoding 2-channel UHJ from B-Format is done as:
+ *
+ * S = 0.9396926*W + 0.1855740*X
+ * D = j(-0.3420201*W + 0.5098604*X) + 0.6554516*Y
+ *
+ * Left = (S + D)/2.0
+ * Right = (S - D)/2.0
+ *
+ * where j is a wide-band +90 degree phase shift.
+ *
+ * The phase shift is done using a Hilbert transform, described here:
+ * https://web.archive.org/web/20060708031958/http://www.biochem.oulu.fi/~oniemita/dsp/hilbert/
+ * It works using 2 sets of 4 chained filters. The first filter chain produces
+ * a phase shift of varying magnitude over a wide range of frequencies, while
+ * the second filter chain produces a phase shift 90 degrees ahead of the
+ * first over the same range.
+ *
+ * Combining these two stages requires the use of three filter chains. S-
+ * channel output uses a Filter1 chain on the W and X channel mix, while the D-
+ * channel output uses a Filter1 chain on the Y channel plus a Filter2 chain on
+ * the W and X channel mix. This results in the W and X input mix on the D-
+ * channel output having the required +90 degree phase shift relative to the
+ * other inputs.
+ */
+
+struct Uhj2Encoder {
+ AllPassState mFilter1_Y[4];
+ AllPassState mFilter2_WX[4];
+ AllPassState mFilter1_WX[4];
+ ALfloat mLastY{0.0f}, mLastWX{0.0f};
+
+ /* Encodes a 2-channel UHJ (stereo-compatible) signal from a B-Format input
+ * signal. The input must use FuMa channel ordering and scaling.
+ */
+ void encode(FloatBufferLine &LeftOut, FloatBufferLine &RightOut, FloatBufferLine *InSamples,
+ const ALsizei SamplesToDo);
+
+ DEF_NEWDEL(Uhj2Encoder)
+};
+
+#endif /* UHJFILTER_H */
diff --git a/alc/vector.h b/alc/vector.h
new file mode 100644
index 00000000..1b69d6a7
--- /dev/null
+++ b/alc/vector.h
@@ -0,0 +1,15 @@
+#ifndef AL_VECTOR_H
+#define AL_VECTOR_H
+
+#include <vector>
+
+#include "almalloc.h"
+
+namespace al {
+
+template<typename T, size_t alignment=alignof(T)>
+using vector = std::vector<T, al::allocator<T, alignment>>;
+
+} // namespace al
+
+#endif /* AL_VECTOR_H */