aboutsummaryrefslogtreecommitdiffstats
path: root/OpenAL32/alSource.c
diff options
context:
space:
mode:
authorChris Robinson <[email protected]>2014-11-22 04:20:17 -0800
committerChris Robinson <[email protected]>2014-11-22 04:20:17 -0800
commita27e5e16523e1f6f166410e9992fc40886064eca (patch)
treea53e47af1b8afb1afa6c497247c9c462889d9067 /OpenAL32/alSource.c
parent38383671d7d2a4f143f0ac84b48e8a02b91a1ba2 (diff)
Use a different method for HRTF mixing
This new method mixes sources normally into a 14-channel buffer with the channels placed all around the listener. HRTF is then applied to the channels given their positions and written to a 2-channel buffer, which gets written out to the device. This method has the benefit that HRTF processing becomes more scalable. The costly HRTF filters are applied to the 14-channel buffer after the mix is done, turning it into a post-process with a fixed overhead. Mixing sources is done with normal non-HRTF methods, so increasing the number of playing sources only incurs normal mixing costs. Another benefit is that it improves B-Format playback since the soundfield gets mixed into speakers covering all three dimensions, which then get filtered based on their locations. The main downside to this is that the spatial resolution of the HRTF dataset does not play a big role anymore. However, the hope is that with ambisonics- based panning, the perceptual position of panned sounds will still be good. It is also an option to increase the number of virtual channels for systems that can handle it, or maybe even decrease it for weaker systems.
Diffstat (limited to 'OpenAL32/alSource.c')
-rw-r--r--OpenAL32/alSource.c15
1 files changed, 2 insertions, 13 deletions
diff --git a/OpenAL32/alSource.c b/OpenAL32/alSource.c
index a716eb74..12bd9436 100644
--- a/OpenAL32/alSource.c
+++ b/OpenAL32/alSource.c
@@ -2597,23 +2597,12 @@ ALvoid SetSourceState(ALsource *Source, ALCcontext *Context, ALenum state)
voice->Source = Source;
}
- voice->Direct.Moving = AL_FALSE;
+ voice->Direct.Moving = AL_FALSE;
voice->Direct.Counter = 0;
- for(i = 0;i < MAX_INPUT_CHANNELS;i++)
- {
- ALsizei j;
- for(j = 0;j < SRC_HISTORY_LENGTH;j++)
- voice->Direct.Mix.Hrtf.State[i].History[j] = 0.0f;
- for(j = 0;j < HRIR_LENGTH;j++)
- {
- voice->Direct.Mix.Hrtf.State[i].Values[j][0] = 0.0f;
- voice->Direct.Mix.Hrtf.State[i].Values[j][1] = 0.0f;
- }
- }
for(i = 0;i < (ALsizei)device->NumAuxSends;i++)
{
- voice->Send[i].Counter = 0;
voice->Send[i].Moving = AL_FALSE;
+ voice->Send[i].Counter = 0;
}
if(BufferList->buffer->FmtChannels == FmtMono)