diff options
author | Chris Robinson <[email protected]> | 2014-02-23 21:17:09 -0800 |
---|---|---|
committer | Chris Robinson <[email protected]> | 2014-02-23 21:17:09 -0800 |
commit | 9a4ded249114007d07b8f96277a1fd50f2f48e6f (patch) | |
tree | 5321731ad58fd0ad00471532e25407dafab44515 /Alc | |
parent | c68ce288d06372ff5cfd4c060c1f7b00058ca4d9 (diff) |
Revert "Apply HRTF coefficient stepping separately"
This reverts commit 25b9c3d0c15e959d544f5d0ac7ea507ea5f6d69f.
Conflicts:
Alc/mixer_neon.c
Unfortunately this also undoes the Neon-enhanced ApplyCoeffsStep method.
Diffstat (limited to 'Alc')
-rw-r--r-- | Alc/mixer_c.c | 9 | ||||
-rw-r--r-- | Alc/mixer_inc.c | 9 | ||||
-rw-r--r-- | Alc/mixer_neon.c | 15 | ||||
-rw-r--r-- | Alc/mixer_sse.c | 60 |
4 files changed, 74 insertions, 19 deletions
diff --git a/Alc/mixer_c.c b/Alc/mixer_c.c index 36d8bf5a..34309b12 100644 --- a/Alc/mixer_c.c +++ b/Alc/mixer_c.c @@ -46,13 +46,18 @@ DECL_TEMPLATE(cubic32) #undef DECL_TEMPLATE -static inline void ApplyCoeffsStep(const ALuint IrSize, +static inline void ApplyCoeffsStep(ALuint Offset, ALfloat (*restrict Values)[2], + const ALuint IrSize, ALfloat (*restrict Coeffs)[2], - const ALfloat (*restrict CoeffStep)[2]) + const ALfloat (*restrict CoeffStep)[2], + ALfloat left, ALfloat right) { ALuint c; for(c = 0;c < IrSize;c++) { + const ALuint off = (Offset+c)&HRIR_MASK; + Values[off][0] += Coeffs[c][0] * left; + Values[off][1] += Coeffs[c][1] * right; Coeffs[c][0] += CoeffStep[c][0]; Coeffs[c][1] += CoeffStep[c][1]; } diff --git a/Alc/mixer_inc.c b/Alc/mixer_inc.c index 08a7b264..da8a7b31 100644 --- a/Alc/mixer_inc.c +++ b/Alc/mixer_inc.c @@ -19,9 +19,11 @@ #define MixDirect_Hrtf MERGE2(MixDirect_Hrtf_,SUFFIX) -static inline void ApplyCoeffsStep(const ALuint irSize, +static inline void ApplyCoeffsStep(ALuint Offset, ALfloat (*restrict Values)[2], + const ALuint irSize, ALfloat (*restrict Coeffs)[2], - const ALfloat (*restrict CoeffStep)[2]); + const ALfloat (*restrict CoeffStep)[2], + ALfloat left, ALfloat right); static inline void ApplyCoeffs(ALuint Offset, ALfloat (*restrict Values)[2], const ALuint irSize, ALfloat (*restrict Coeffs)[2], @@ -91,10 +93,9 @@ void MixDirect_Hrtf(const DirectParams *params, const ALfloat *restrict data, AL Values[(Offset+IrSize)&HRIR_MASK][1] = 0.0f; Offset++; - ApplyCoeffs(Offset, Values, IrSize, Coeffs, left, right); + ApplyCoeffsStep(Offset, Values, IrSize, Coeffs, CoeffStep, left, right); DryBuffer[FrontLeft][OutPos] += Values[Offset&HRIR_MASK][0]; DryBuffer[FrontRight][OutPos] += Values[Offset&HRIR_MASK][1]; - ApplyCoeffsStep(IrSize, Coeffs, CoeffStep); OutPos++; Counter--; diff --git a/Alc/mixer_neon.c b/Alc/mixer_neon.c index 0aa450ad..65e702eb 100644 --- a/Alc/mixer_neon.c +++ b/Alc/mixer_neon.c @@ -10,19 +10,22 @@ #include "alu.h" -static inline void ApplyCoeffsStep(const ALuint IrSize, +static inline void ApplyCoeffsStep(ALuint Offset, ALfloat (*restrict Values)[2], + const ALuint IrSize, ALfloat (*restrict Coeffs)[2], - const ALfloat (*restrict CoeffStep)[2]) + const ALfloat (*restrict CoeffStep)[2], + ALfloat left, ALfloat right) { float32x4_t coeffs, deltas; ALuint c; for(c = 0;c < IrSize;c += 2) { - coeffs = vld1q_f32(&Coeffs[c][0]); - deltas = vld1q_f32(&CoeffStep[c][0]); - coeffs = vaddq_f32(coeffs, deltas); - vst1q_f32(&Coeffs[c][0], coeffs); + const ALuint off = (Offset+c)&HRIR_MASK; + Values[off][0] += Coeffs[c][0] * left; + Values[off][1] += Coeffs[c][1] * right; + Coeffs[c][0] += CoeffStep[c][0]; + Coeffs[c][1] += CoeffStep[c][1]; } } diff --git a/Alc/mixer_sse.c b/Alc/mixer_sse.c index 719ebd23..56a4bdae 100644 --- a/Alc/mixer_sse.c +++ b/Alc/mixer_sse.c @@ -21,19 +21,65 @@ #include "mixer_defs.h" -static inline void ApplyCoeffsStep(const ALuint IrSize, +static inline void ApplyCoeffsStep(ALuint Offset, ALfloat (*restrict Values)[2], + const ALuint IrSize, ALfloat (*restrict Coeffs)[2], - const ALfloat (*restrict CoeffStep)[2]) + const ALfloat (*restrict CoeffStep)[2], + ALfloat left, ALfloat right) { - __m128 coeffs, deltas; + const __m128 lrlr = { left, right, left, right }; + __m128 coeffs, deltas, imp0, imp1; + __m128 vals = _mm_setzero_ps(); ALuint i; - for(i = 0;i < IrSize;i += 2) + if((Offset&1)) { - coeffs = _mm_load_ps(&Coeffs[i][0]); - deltas = _mm_load_ps(&CoeffStep[i][0]); + const ALuint o0 = Offset&HRIR_MASK; + const ALuint o1 = (Offset+IrSize-1)&HRIR_MASK; + + coeffs = _mm_load_ps(&Coeffs[0][0]); + deltas = _mm_load_ps(&CoeffStep[0][0]); + vals = _mm_loadl_pi(vals, (__m64*)&Values[o0][0]); + imp0 = _mm_mul_ps(lrlr, coeffs); coeffs = _mm_add_ps(coeffs, deltas); - _mm_store_ps(&Coeffs[i][0], coeffs); + vals = _mm_add_ps(imp0, vals); + _mm_store_ps(&Coeffs[0][0], coeffs); + _mm_storel_pi((__m64*)&Values[o0][0], vals); + for(i = 1;i < IrSize-1;i += 2) + { + const ALuint o2 = (Offset+i)&HRIR_MASK; + + coeffs = _mm_load_ps(&Coeffs[i+1][0]); + deltas = _mm_load_ps(&CoeffStep[i+1][0]); + vals = _mm_load_ps(&Values[o2][0]); + imp1 = _mm_mul_ps(lrlr, coeffs); + coeffs = _mm_add_ps(coeffs, deltas); + imp0 = _mm_shuffle_ps(imp0, imp1, _MM_SHUFFLE(1, 0, 3, 2)); + vals = _mm_add_ps(imp0, vals); + _mm_store_ps(&Coeffs[i+1][0], coeffs); + _mm_store_ps(&Values[o2][0], vals); + imp0 = imp1; + } + vals = _mm_loadl_pi(vals, (__m64*)&Values[o1][0]); + imp0 = _mm_movehl_ps(imp0, imp0); + vals = _mm_add_ps(imp0, vals); + _mm_storel_pi((__m64*)&Values[o1][0], vals); + } + else + { + for(i = 0;i < IrSize;i += 2) + { + const ALuint o = (Offset + i)&HRIR_MASK; + + coeffs = _mm_load_ps(&Coeffs[i][0]); + deltas = _mm_load_ps(&CoeffStep[i][0]); + vals = _mm_load_ps(&Values[o][0]); + imp0 = _mm_mul_ps(lrlr, coeffs); + coeffs = _mm_add_ps(coeffs, deltas); + vals = _mm_add_ps(imp0, vals); + _mm_store_ps(&Coeffs[i][0], coeffs); + _mm_store_ps(&Values[o][0], vals); + } } } |