aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Robinson <[email protected]>2019-02-21 17:48:08 -0800
committerChris Robinson <[email protected]>2019-02-21 17:48:08 -0800
commit7a9d67934fbbe9ce8220e23d82fd0b5bababee50 (patch)
tree32669171d90b3095b32e3dd0f9a0880e4f5f84ac
parent1eea3cb2d1e555752b8096dd4f235c1e56242452 (diff)
Mix B-Format sources directly to the dry buffer
Now the only thing that utilizes FOAOut is reverb output.
-rw-r--r--Alc/alu.cpp34
-rw-r--r--Alc/mixvoice.cpp21
2 files changed, 34 insertions, 21 deletions
diff --git a/Alc/alu.cpp b/Alc/alu.cpp
index 0eac5547..95889626 100644
--- a/Alc/alu.cpp
+++ b/Alc/alu.cpp
@@ -630,11 +630,8 @@ void CalcPanningAndFilters(ALvoice *voice, const ALfloat Azi, const ALfloat Elev
voice->Flags |= VOICE_HAS_NFC;
}
- /* Always render B-Format sources to the FOA output, to ensure
- * smooth changes if it switches between panned and unpanned.
- */
- voice->Direct.Buffer = Device->FOAOut.Buffer;
- voice->Direct.Channels = Device->FOAOut.NumChannels;
+ voice->Direct.Buffer = Device->Dry.Buffer;
+ voice->Direct.Channels = Device->Dry.NumChannels;
/* A scalar of 1.5 for plain stereo results in +/-60 degrees being
* moved to +/-90 degrees for direct right and left speaker
@@ -646,7 +643,7 @@ void CalcPanningAndFilters(ALvoice *voice, const ALfloat Azi, const ALfloat Elev
/* NOTE: W needs to be scaled due to FuMa normalization. */
const ALfloat &scale0 = AmbiScale::FromFuMa[0];
- ComputePanGains(&Device->FOAOut, coeffs, DryGain*scale0,
+ ComputePanGains(&Device->Dry, coeffs, DryGain*scale0,
voice->Direct.Params[0].Gains.Target);
for(ALsizei i{0};i < NumSends;i++)
{
@@ -697,26 +694,25 @@ void CalcPanningAndFilters(ALvoice *voice, const ALfloat Azi, const ALfloat Elev
const ALfloat &yscale = AmbiScale::FromFuMa[1];
const ALfloat &zscale = AmbiScale::FromFuMa[2];
const ALfloat &xscale = AmbiScale::FromFuMa[3];
- const alu::Matrix matrix{
- // ACN0 ACN1 ACN2 ACN3
- wscale, 0.0f, 0.0f, 0.0f, // FuMa W
- 0.0f, -N[0]*xscale, N[1]*xscale, -N[2]*xscale, // FuMa X
- 0.0f, U[0]*yscale, -U[1]*yscale, U[2]*yscale, // FuMa Y
- 0.0f, -V[0]*zscale, V[1]*zscale, -V[2]*zscale // FuMa Z
+ const ALfloat matrix[4][MAX_AMBI_CHANNELS]{
+ // ACN0 ACN1 ACN2 ACN3
+ { wscale, 0.0f, 0.0f, 0.0f }, // FuMa W
+ { 0.0f, -N[0]*xscale, N[1]*xscale, -N[2]*xscale }, // FuMa X
+ { 0.0f, U[0]*yscale, -U[1]*yscale, U[2]*yscale }, // FuMa Y
+ { 0.0f, -V[0]*zscale, V[1]*zscale, -V[2]*zscale } // FuMa Z
};
- voice->Direct.Buffer = Device->FOAOut.Buffer;
- voice->Direct.Channels = Device->FOAOut.NumChannels;
+ voice->Direct.Buffer = Device->Dry.Buffer;
+ voice->Direct.Channels = Device->Dry.NumChannels;
for(ALsizei c{0};c < num_channels;c++)
- ComputePanGains(&Device->FOAOut, matrix[c].data(), DryGain,
- voice->Direct.Params[c].Gains.Target);
+ ComputePanGains(&Device->Dry, matrix[c], DryGain,
+ voice->Direct.Params[c].Gains.Target);
for(ALsizei i{0};i < NumSends;i++)
{
if(const ALeffectslot *Slot{SendSlots[i]})
for(ALsizei c{0};c < num_channels;c++)
- ComputePanningGainsBF(Slot->ChanMap, Slot->WetBuffer.size(), matrix[c].data(),
- WetGain[i], voice->Send[i].Params[c].Gains.Target
- );
+ ComputePanningGainsBF(Slot->ChanMap, Slot->WetBuffer.size(), matrix[c],
+ WetGain[i], voice->Send[i].Params[c].Gains.Target);
}
}
}
diff --git a/Alc/mixvoice.cpp b/Alc/mixvoice.cpp
index a0518ed5..78ceea26 100644
--- a/Alc/mixvoice.cpp
+++ b/Alc/mixvoice.cpp
@@ -539,13 +539,30 @@ ALboolean MixSource(ALvoice *voice, const ALuint SourceID, ALCcontext *Context,
/* Store the last source samples used for next time. */
std::copy_n(&SrcData[(increment*DstBufferSize + DataPosFrac)>>FRACTIONBITS],
- voice->PrevSamples[chan].size(), std::begin(voice->PrevSamples[chan]));
+ voice->PrevSamples[chan].size(), std::begin(voice->PrevSamples[chan]));
- /* Now resample, then filter and mix to the appropriate outputs. */
+ /* Resample, then apply ambisonic upsampling as needed. */
const ALfloat *ResampledData{Resample(&voice->ResampleState,
&SrcData[MAX_RESAMPLE_PADDING], DataPosFrac, increment,
Device->TempBuffer[RESAMPLED_BUF], DstBufferSize
)};
+ if((voice->Flags&VOICE_IS_AMBISONIC))
+ {
+ /* TODO: Does not properly handle HOA sources. Currently only
+ * first-order sources are possible, but in the future it would
+ * be desirable.
+ */
+ const ALfloat hfscale{(chan==0) ? voice->AmbiScales[0] : voice->AmbiScales[1]};
+ ALfloat (&hfbuf)[BUFFERSIZE] = Device->TempBuffer[SOURCE_DATA_BUF];
+ ALfloat (&lfbuf)[BUFFERSIZE] = Device->TempBuffer[RESAMPLED_BUF];
+
+ voice->AmbiSplitter[chan].process(hfbuf, lfbuf, ResampledData, DstBufferSize);
+ MixRowSamples(lfbuf, &hfscale, &hfbuf, 1, 0, DstBufferSize);
+
+ ResampledData = lfbuf;
+ }
+
+ /* Now filter and mix to the appropriate outputs. */
{
DirectParams &parms = voice->Direct.Params[chan];
const ALfloat *samples{DoFilters(&parms.LowPass, &parms.HighPass,