/** * OpenAL cross platform audio library * Copyright (C) 2019 by Anis A. Hireche * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * Or go to http://www.gnu.org/copyleft/lgpl.html */ #include "config.h" #include #include #include #include #include "al/auxeffectslot.h" #include "alcmain.h" #include "alcontext.h" #include "alu.h" namespace { #define MAX_UPDATE_SAMPLES 128 #define NUM_FORMANTS 4 #define NUM_FILTERS 2 #define Q_FACTOR 5.0f #define VOWEL_A_INDEX 0 #define VOWEL_B_INDEX 1 #define WAVEFORM_FRACBITS 24 #define WAVEFORM_FRACONE (1<::Tau() / ALfloat{WAVEFORM_FRACONE}}; return std::sin(static_cast(index) * scale)*0.5f + 0.5f; } inline ALfloat Saw(ALsizei index) { return static_cast(index) / ALfloat{WAVEFORM_FRACONE}; } inline ALfloat Triangle(ALsizei index) { return std::fabs(static_cast(index)*(2.0f/WAVEFORM_FRACONE) - 1.0f); } inline ALfloat Half(ALsizei) { return 0.5f; } template void Oscillate(ALfloat *RESTRICT dst, ALsizei index, const ALsizei step, ALsizei todo) { for(ALsizei i{0};i < todo;i++) { index += step; index &= WAVEFORM_FRACMASK; dst[i] = func(index); } } struct FormantFilter { ALfloat f0norm{0.0f}; ALfloat fGain{1.0f}; ALfloat s1{0.0f}; ALfloat s2{0.0f}; FormantFilter() = default; FormantFilter(ALfloat f0norm_, ALfloat gain) : f0norm{f0norm_}, fGain{gain} { } inline void process(const ALfloat* samplesIn, ALfloat* samplesOut, const ALsizei numInput) { /* A state variable filter from a topology-preserving transform. * Based on a talk given by Ivan Cohen: https://www.youtube.com/watch?v=esjHXGPyrhg */ const ALfloat g = std::tan(al::MathDefs::Pi() * f0norm); const ALfloat h = 1.0f / (1 + (g / Q_FACTOR) + (g * g)); for (ALsizei i{0};i < numInput;i++) { const ALfloat H = h * (samplesIn[i] - (1.0f / Q_FACTOR + g) * s1 - s2); const ALfloat B = g * H + s1; const ALfloat L = g * B + s2; s1 = g * H + B; s2 = g * B + L; // Apply peak and accumulate samples. samplesOut[i] += B * fGain; } } inline void clear() { s1 = 0.0f; s2 = 0.0f; } }; struct VmorpherState final : public EffectState { struct { /* Effect parameters */ FormantFilter Formants[NUM_FILTERS][NUM_FORMANTS]; /* Effect gains for each channel */ ALfloat CurrentGains[MAX_OUTPUT_CHANNELS]{}; ALfloat TargetGains[MAX_OUTPUT_CHANNELS]{}; } mChans[MAX_AMBI_CHANNELS]; void (*mGetSamples)(ALfloat* RESTRICT, ALsizei, const ALsizei, ALsizei) {}; ALsizei mIndex{0}; ALsizei mStep{1}; /* Effects buffers */ ALfloat mSampleBufferA[MAX_UPDATE_SAMPLES]{}; ALfloat mSampleBufferB[MAX_UPDATE_SAMPLES]{}; ALboolean deviceUpdate(const ALCdevice *device) override; void update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) override; void process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span samplesOut) override; static std::array getFiltersByPhoneme(ALenum phoneme, ALfloat frequency, ALfloat pitch); DEF_NEWDEL(VmorpherState) }; std::array VmorpherState::getFiltersByPhoneme(ALenum phoneme, ALfloat frequency, ALfloat pitch) { /* Using soprano formant set of values to * better match mid-range frequency space. * * See: https://www.classes.cs.uchicago.edu/archive/1999/spring/CS295/Computing_Resources/Csound/CsManual3.48b1.HTML/Appendices/table3.html */ switch(phoneme) { case AL_VOCAL_MORPHER_PHONEME_A: return {{ {( 800 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */ {(1150 * pitch) / frequency, 0.501187f}, /* std::pow(10.0f, -6 / 20.0f); */ {(2900 * pitch) / frequency, 0.025118f}, /* std::pow(10.0f, -32 / 20.0f); */ {(3900 * pitch) / frequency, 0.100000f} /* std::pow(10.0f, -20 / 20.0f); */ }}; case AL_VOCAL_MORPHER_PHONEME_E: return {{ {( 350 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */ {(2000 * pitch) / frequency, 0.100000f}, /* std::pow(10.0f, -20 / 20.0f); */ {(2800 * pitch) / frequency, 0.177827f}, /* std::pow(10.0f, -15 / 20.0f); */ {(3600 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */ }}; case AL_VOCAL_MORPHER_PHONEME_I: return {{ {( 270 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */ {(2140 * pitch) / frequency, 0.251188f}, /* std::pow(10.0f, -12 / 20.0f); */ {(2950 * pitch) / frequency, 0.050118f}, /* std::pow(10.0f, -26 / 20.0f); */ {(3900 * pitch) / frequency, 0.050118f} /* std::pow(10.0f, -26 / 20.0f); */ }}; case AL_VOCAL_MORPHER_PHONEME_O: return {{ {( 450 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */ {( 800 * pitch) / frequency, 0.281838f}, /* std::pow(10.0f, -11 / 20.0f); */ {(2830 * pitch) / frequency, 0.079432f}, /* std::pow(10.0f, -22 / 20.0f); */ {(3800 * pitch) / frequency, 0.079432f} /* std::pow(10.0f, -22 / 20.0f); */ }}; case AL_VOCAL_MORPHER_PHONEME_U: return {{ {( 325 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */ {( 700 * pitch) / frequency, 0.158489f}, /* std::pow(10.0f, -16 / 20.0f); */ {(2700 * pitch) / frequency, 0.017782f}, /* std::pow(10.0f, -35 / 20.0f); */ {(3800 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */ }}; } return {}; } ALboolean VmorpherState::deviceUpdate(const ALCdevice* /*device*/) { for(auto &e : mChans) { std::for_each(std::begin(e.Formants[VOWEL_A_INDEX]), std::end(e.Formants[VOWEL_A_INDEX]), std::mem_fn(&FormantFilter::clear)); std::for_each(std::begin(e.Formants[VOWEL_B_INDEX]), std::end(e.Formants[VOWEL_B_INDEX]), std::mem_fn(&FormantFilter::clear)); std::fill(std::begin(e.CurrentGains), std::end(e.CurrentGains), 0.0f); } return AL_TRUE; } void VmorpherState::update(const ALCcontext *context, const ALeffectslot *slot, const EffectProps *props, const EffectTarget target) { const ALCdevice *device{context->mDevice}; const ALfloat frequency{static_cast(device->Frequency)}; const ALfloat step{props->Vmorpher.Rate / static_cast(device->Frequency)}; mStep = fastf2i(clampf(step*WAVEFORM_FRACONE, 0.0f, ALfloat{WAVEFORM_FRACONE-1})); if(mStep == 0) mGetSamples = Oscillate; else if(props->Vmorpher.Waveform == AL_VOCAL_MORPHER_WAVEFORM_SINUSOID) mGetSamples = Oscillate; else if(props->Vmorpher.Waveform == AL_VOCAL_MORPHER_WAVEFORM_SAWTOOTH) mGetSamples = Oscillate; else /*if(props->Vmorpher.Waveform == AL_VOCAL_MORPHER_WAVEFORM_TRIANGLE)*/ mGetSamples = Oscillate; const ALfloat pitchA{fastf2i(std::pow(2.0f, props->Vmorpher.PhonemeACoarseTuning*100.0f / 2400.0f)*FRACTIONONE) * (1.0f/FRACTIONONE)}; const ALfloat pitchB{fastf2i(std::pow(2.0f, props->Vmorpher.PhonemeBCoarseTuning*100.0f / 2400.0f)*FRACTIONONE) * (1.0f/FRACTIONONE)}; auto vowelA = getFiltersByPhoneme(props->Vmorpher.PhonemeA, frequency, pitchA); auto vowelB = getFiltersByPhoneme(props->Vmorpher.PhonemeB, frequency, pitchB); /* Copy the filter coefficients to the input channels. */ for(size_t i{0u};i < slot->Wet.Buffer.size();++i) { std::copy(vowelA.begin(), vowelA.end(), std::begin(mChans[i].Formants[VOWEL_A_INDEX])); std::copy(vowelB.begin(), vowelB.end(), std::begin(mChans[i].Formants[VOWEL_B_INDEX])); } mOutTarget = target.Main->Buffer; for(size_t i{0u};i < slot->Wet.Buffer.size();++i) { auto coeffs = GetAmbiIdentityRow(i); ComputePanGains(target.Main, coeffs.data(), slot->Params.Gain, mChans[i].TargetGains); } } void VmorpherState::process(const ALsizei samplesToDo, const FloatBufferLine *RESTRICT samplesIn, const ALsizei numInput, const al::span samplesOut) { /* Following the EFX specification for a conformant implementation which describes * the effect as a pair of 4-band formant filters blended together using an LFO. */ for(ALsizei base{0};base < samplesToDo;) { alignas(16) ALfloat lfo[MAX_UPDATE_SAMPLES]; const ALsizei td = mini(MAX_UPDATE_SAMPLES, samplesToDo-base); mGetSamples(lfo, mIndex, mStep, td); mIndex += (mStep * td) & WAVEFORM_FRACMASK; mIndex &= WAVEFORM_FRACMASK; ASSUME(numInput > 0); for(ALsizei c{0};c < numInput;c++) { for (ALsizei i{0};i < td;i++) { mSampleBufferA[i] = 0.0f; mSampleBufferB[i] = 0.0f; } auto& vowelA = mChans[c].Formants[VOWEL_A_INDEX]; auto& vowelB = mChans[c].Formants[VOWEL_B_INDEX]; /* Process first vowel. */ vowelA[0].process(&samplesIn[c][base], mSampleBufferA, td); vowelA[1].process(&samplesIn[c][base], mSampleBufferA, td); vowelA[2].process(&samplesIn[c][base], mSampleBufferA, td); vowelA[3].process(&samplesIn[c][base], mSampleBufferA, td); /* Process second vowel. */ vowelB[0].process(&samplesIn[c][base], mSampleBufferB, td); vowelB[1].process(&samplesIn[c][base], mSampleBufferB, td); vowelB[2].process(&samplesIn[c][base], mSampleBufferB, td); vowelB[3].process(&samplesIn[c][base], mSampleBufferB, td); alignas(16) ALfloat samplesBlended[MAX_UPDATE_SAMPLES]; for (ALsizei i{0};i < td;i++) samplesBlended[i] = lerp(mSampleBufferA[i], mSampleBufferB[i], lfo[i]); /* Now, mix the processed sound data to the output. */ MixSamples(samplesBlended, samplesOut, mChans[c].CurrentGains, mChans[c].TargetGains, samplesToDo-base, base, td); } base += td; } } void Vmorpher_setParami(EffectProps* props, ALCcontext *context, ALenum param, ALint val) { switch(param) { case AL_VOCAL_MORPHER_WAVEFORM: if(!(val >= AL_VOCAL_MORPHER_MIN_WAVEFORM && val <= AL_VOCAL_MORPHER_MAX_WAVEFORM)) SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher waveform out of range"); props->Vmorpher.Waveform = val; break; case AL_VOCAL_MORPHER_PHONEMEA: if(!(val >= AL_VOCAL_MORPHER_MIN_PHONEMEA && val <= AL_VOCAL_MORPHER_MAX_PHONEMEA)) SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher phoneme-a out of range"); props->Vmorpher.PhonemeA = val; break; case AL_VOCAL_MORPHER_PHONEMEB: if(!(val >= AL_VOCAL_MORPHER_MIN_PHONEMEB && val <= AL_VOCAL_MORPHER_MAX_PHONEMEB)) SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher phoneme-b out of range"); props->Vmorpher.PhonemeB = val; break; case AL_VOCAL_MORPHER_PHONEMEA_COARSE_TUNING: if(!(val >= AL_VOCAL_MORPHER_MIN_PHONEMEA_COARSE_TUNING && val <= AL_VOCAL_MORPHER_MAX_PHONEMEA_COARSE_TUNING)) SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher phoneme-a coarse tuning out of range"); props->Vmorpher.PhonemeACoarseTuning = val; break; case AL_VOCAL_MORPHER_PHONEMEB_COARSE_TUNING: if(!(val >= AL_VOCAL_MORPHER_MIN_PHONEMEB_COARSE_TUNING && val <= AL_VOCAL_MORPHER_MAX_PHONEMEB_COARSE_TUNING)) SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher phoneme-b coarse tuning out of range"); props->Vmorpher.PhonemeBCoarseTuning = val; break; default: context->setError(AL_INVALID_ENUM, "Invalid vocal morpher integer property 0x%04x", param); } } void Vmorpher_setParamiv(EffectProps*, ALCcontext *context, ALenum param, const ALint*) { context->setError(AL_INVALID_ENUM, "Invalid vocal morpher integer-vector property 0x%04x", param); } void Vmorpher_setParamf(EffectProps *props, ALCcontext *context, ALenum param, ALfloat val) { switch(param) { case AL_VOCAL_MORPHER_RATE: if(!(val >= AL_VOCAL_MORPHER_MIN_RATE && val <= AL_VOCAL_MORPHER_MAX_RATE)) SETERR_RETURN(context, AL_INVALID_VALUE,, "Vocal morpher rate out of range"); props->Vmorpher.Rate = val; break; default: context->setError(AL_INVALID_ENUM, "Invalid vocal morpher float property 0x%04x", param); } } void Vmorpher_setParamfv(EffectProps *props, ALCcontext *context, ALenum param, const ALfloat *vals) { Vmorpher_setParamf(props, context, param, vals[0]); } void Vmorpher_getParami(const EffectProps* props, ALCcontext *context, ALenum param, ALint* val) { switch(param) { case AL_VOCAL_MORPHER_PHONEMEA: *val = props->Vmorpher.PhonemeA; break; case AL_VOCAL_MORPHER_PHONEMEB: *val = props->Vmorpher.PhonemeB; break; case AL_VOCAL_MORPHER_PHONEMEA_COARSE_TUNING: *val = props->Vmorpher.PhonemeACoarseTuning; break; case AL_VOCAL_MORPHER_PHONEMEB_COARSE_TUNING: *val = props->Vmorpher.PhonemeBCoarseTuning; break; case AL_VOCAL_MORPHER_WAVEFORM: *val = props->Vmorpher.Waveform; break; default: context->setError(AL_INVALID_ENUM, "Invalid vocal morpher integer property 0x%04x", param); } } void Vmorpher_getParamiv(const EffectProps*, ALCcontext *context, ALenum param, ALint*) { context->setError(AL_INVALID_ENUM, "Invalid vocal morpher integer-vector property 0x%04x", param); } void Vmorpher_getParamf(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *val) { switch(param) { case AL_VOCAL_MORPHER_RATE: *val = props->Vmorpher.Rate; break; default: context->setError(AL_INVALID_ENUM, "Invalid vocal morpher float property 0x%04x", param); } } void Vmorpher_getParamfv(const EffectProps *props, ALCcontext *context, ALenum param, ALfloat *vals) { Vmorpher_getParamf(props, context, param, vals); } DEFINE_ALEFFECT_VTABLE(Vmorpher); struct VmorpherStateFactory final : public EffectStateFactory { EffectState *create() override { return new VmorpherState{}; } EffectProps getDefaultProps() const noexcept override; const EffectVtable *getEffectVtable() const noexcept override { return &Vmorpher_vtable; } }; EffectProps VmorpherStateFactory::getDefaultProps() const noexcept { EffectProps props{}; props.Vmorpher.Rate = AL_VOCAL_MORPHER_DEFAULT_RATE; props.Vmorpher.PhonemeA = AL_VOCAL_MORPHER_DEFAULT_PHONEMEA; props.Vmorpher.PhonemeB = AL_VOCAL_MORPHER_DEFAULT_PHONEMEB; props.Vmorpher.PhonemeACoarseTuning = AL_VOCAL_MORPHER_DEFAULT_PHONEMEA_COARSE_TUNING; props.Vmorpher.PhonemeBCoarseTuning = AL_VOCAL_MORPHER_DEFAULT_PHONEMEB_COARSE_TUNING; props.Vmorpher.Waveform = AL_VOCAL_MORPHER_DEFAULT_WAVEFORM; return props; } } // namespace EffectStateFactory *VmorpherStateFactory_getFactory() { static VmorpherStateFactory VmorpherFactory{}; return &VmorpherFactory; }