aboutsummaryrefslogtreecommitdiffstats
path: root/Alc
diff options
context:
space:
mode:
Diffstat (limited to 'Alc')
-rw-r--r--Alc/mixer.c36
-rw-r--r--Alc/mixer_c.c6
-rw-r--r--Alc/mixer_defs.h7
-rw-r--r--Alc/mixer_sse3.c66
-rw-r--r--Alc/mixer_sse41.c72
5 files changed, 170 insertions, 17 deletions
diff --git a/Alc/mixer.c b/Alc/mixer.c
index 8061ab73..2f38ef3f 100644
--- a/Alc/mixer.c
+++ b/Alc/mixer.c
@@ -50,7 +50,7 @@ enum Resampler {
PointResampler,
LinearResampler,
FIR4Resampler,
- FIR6Resampler,
+ FIR8Resampler,
ResamplerMax,
};
@@ -65,7 +65,7 @@ static const ALsizei ResamplerPadding[ResamplerMax][2] = {
{0, 0}, /* Point */
{0, 1}, /* Linear */
{1, 2}, /* FIR4 */
- {2, 3}, /* FIR6 */
+ {3, 4}, /* FIR8 */
};
@@ -127,8 +127,16 @@ static inline ResamplerFunc SelectResampler(enum Resampler resampler)
return Resample_fir4_32_SSE3;
#endif
return Resample_fir4_32_C;
- case FIR6Resampler:
- return Resample_fir6_32_C;
+ case FIR8Resampler:
+#ifdef HAVE_SSE4_1
+ if((CPUCapFlags&CPU_CAP_SSE4_1))
+ return Resample_fir8_32_SSE41;
+#endif
+#ifdef HAVE_SSE3
+ if((CPUCapFlags&CPU_CAP_SSE3))
+ return Resample_fir8_32_SSE3;
+#endif
+ return Resample_fir8_32_C;
case ResamplerMax:
/* Shouldn't happen */
break;
@@ -161,8 +169,8 @@ void aluInitMixer(void)
DefaultResampler = LinearResampler;
else if(strcasecmp(str, "sinc4") == 0)
DefaultResampler = FIR4Resampler;
- else if(strcasecmp(str, "sinc6") == 0)
- DefaultResampler = FIR6Resampler;
+ else if(strcasecmp(str, "sinc8") == 0)
+ DefaultResampler = FIR8Resampler;
else if(strcasecmp(str, "cubic") == 0)
{
WARN("Resampler option \"cubic\" is deprecated, using sinc4\n");
@@ -179,16 +187,18 @@ void aluInitMixer(void)
}
}
- if(DefaultResampler == FIR6Resampler)
+ if(DefaultResampler == FIR8Resampler)
for(i = 0;i < FRACTIONONE;i++)
{
ALdouble mu = (ALdouble)i / FRACTIONONE;
- ResampleCoeffs.FIR6[i][0] = lanc(3.0, mu - -2.0);
- ResampleCoeffs.FIR6[i][1] = lanc(3.0, mu - -1.0);
- ResampleCoeffs.FIR6[i][2] = lanc(3.0, mu - 0.0);
- ResampleCoeffs.FIR6[i][3] = lanc(3.0, mu - 1.0);
- ResampleCoeffs.FIR6[i][4] = lanc(3.0, mu - 2.0);
- ResampleCoeffs.FIR6[i][5] = lanc(3.0, mu - 3.0);
+ ResampleCoeffs.FIR8[i][0] = lanc(4.0, mu - -3.0);
+ ResampleCoeffs.FIR8[i][1] = lanc(4.0, mu - -2.0);
+ ResampleCoeffs.FIR8[i][2] = lanc(4.0, mu - -1.0);
+ ResampleCoeffs.FIR8[i][3] = lanc(4.0, mu - 0.0);
+ ResampleCoeffs.FIR8[i][4] = lanc(4.0, mu - 1.0);
+ ResampleCoeffs.FIR8[i][5] = lanc(4.0, mu - 2.0);
+ ResampleCoeffs.FIR8[i][6] = lanc(4.0, mu - 3.0);
+ ResampleCoeffs.FIR8[i][7] = lanc(4.0, mu - 4.0);
}
else if(DefaultResampler == FIR4Resampler)
for(i = 0;i < FRACTIONONE;i++)
diff --git a/Alc/mixer_c.c b/Alc/mixer_c.c
index 0d3e99a6..c9fa730a 100644
--- a/Alc/mixer_c.c
+++ b/Alc/mixer_c.c
@@ -14,8 +14,8 @@ static inline ALfloat lerp32(const ALfloat *vals, ALuint frac)
{ return lerp(vals[0], vals[1], frac * (1.0f/FRACTIONONE)); }
static inline ALfloat fir4_32(const ALfloat *vals, ALuint frac)
{ return resample_fir4(vals[-1], vals[0], vals[1], vals[2], frac); }
-static inline ALfloat fir6_32(const ALfloat *vals, ALuint frac)
-{ return resample_fir6(vals[-2], vals[-1], vals[0], vals[1], vals[2], vals[3], frac); }
+static inline ALfloat fir8_32(const ALfloat *vals, ALuint frac)
+{ return resample_fir8(vals[-3], vals[-2], vals[-1], vals[0], vals[1], vals[2], vals[3], vals[4], frac); }
const ALfloat *Resample_copy32_C(const ALfloat *src, ALuint UNUSED(frac),
ALuint UNUSED(increment), ALfloat *restrict dst, ALuint numsamples)
@@ -48,7 +48,7 @@ const ALfloat *Resample_##Sampler##_C(const ALfloat *src, ALuint frac, \
DECL_TEMPLATE(point32)
DECL_TEMPLATE(lerp32)
DECL_TEMPLATE(fir4_32)
-DECL_TEMPLATE(fir6_32)
+DECL_TEMPLATE(fir8_32)
#undef DECL_TEMPLATE
diff --git a/Alc/mixer_defs.h b/Alc/mixer_defs.h
index bba8e9a1..9f3b4a6f 100644
--- a/Alc/mixer_defs.h
+++ b/Alc/mixer_defs.h
@@ -16,7 +16,7 @@ const ALfloat *Resample_copy32_C(const ALfloat *src, ALuint frac, ALuint increme
const ALfloat *Resample_point32_C(const ALfloat *src, ALuint frac, ALuint increment, ALfloat *restrict dst, ALuint dstlen);
const ALfloat *Resample_lerp32_C(const ALfloat *src, ALuint frac, ALuint increment, ALfloat *restrict dst, ALuint dstlen);
const ALfloat *Resample_fir4_32_C(const ALfloat *src, ALuint frac, ALuint increment, ALfloat *restrict dst, ALuint dstlen);
-const ALfloat *Resample_fir6_32_C(const ALfloat *src, ALuint frac, ALuint increment, ALfloat *restrict dst, ALuint dstlen);
+const ALfloat *Resample_fir8_32_C(const ALfloat *src, ALuint frac, ALuint increment, ALfloat *restrict dst, ALuint dstlen);
/* C mixers */
@@ -60,6 +60,11 @@ const ALfloat *Resample_fir4_32_SSE3(const ALfloat *src, ALuint frac, ALuint inc
const ALfloat *Resample_fir4_32_SSE41(const ALfloat *src, ALuint frac, ALuint increment,
ALfloat *restrict dst, ALuint numsamples);
+const ALfloat *Resample_fir8_32_SSE3(const ALfloat *src, ALuint frac, ALuint increment,
+ ALfloat *restrict dst, ALuint numsamples);
+const ALfloat *Resample_fir8_32_SSE41(const ALfloat *src, ALuint frac, ALuint increment,
+ ALfloat *restrict dst, ALuint numsamples);
+
/* Neon mixers */
void MixHrtf_Neon(ALfloat (*restrict OutBuffer)[BUFFERSIZE], const ALfloat *data,
ALuint Counter, ALuint Offset, ALuint OutPos, const ALuint IrSize,
diff --git a/Alc/mixer_sse3.c b/Alc/mixer_sse3.c
index ced90593..dbf963ff 100644
--- a/Alc/mixer_sse3.c
+++ b/Alc/mixer_sse3.c
@@ -91,3 +91,69 @@ const ALfloat *Resample_fir4_32_SSE3(const ALfloat *src, ALuint frac, ALuint inc
}
return dst;
}
+
+const ALfloat *Resample_fir8_32_SSE3(const ALfloat *src, ALuint frac, ALuint increment,
+ ALfloat *restrict dst, ALuint numsamples)
+{
+ const __m128i increment4 = _mm_set1_epi32(increment*4);
+ const __m128i fracMask4 = _mm_set1_epi32(FRACTIONMASK);
+ alignas(16) union { ALuint i[4]; float f[4]; } pos_;
+ alignas(16) union { ALuint i[4]; float f[4]; } frac_;
+ __m128i frac4, pos4;
+ ALuint pos;
+ ALuint i, j;
+
+ InitiatePositionArrays(frac, increment, frac_.i, pos_.i, 4);
+
+ frac4 = _mm_castps_si128(_mm_load_ps(frac_.f));
+ pos4 = _mm_castps_si128(_mm_load_ps(pos_.f));
+
+ src -= 3;
+ for(i = 0;numsamples-i > 3;i += 4)
+ {
+ __m128 out[2];
+ for(j = 0;j < 8;j+=4)
+ {
+ const __m128 val0 = _mm_loadu_ps(&src[pos_.i[0]+j]);
+ const __m128 val1 = _mm_loadu_ps(&src[pos_.i[1]+j]);
+ const __m128 val2 = _mm_loadu_ps(&src[pos_.i[2]+j]);
+ const __m128 val3 = _mm_loadu_ps(&src[pos_.i[3]+j]);
+ __m128 k0 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[0]][j]);
+ __m128 k1 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[1]][j]);
+ __m128 k2 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[2]][j]);
+ __m128 k3 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[3]][j]);
+
+ k0 = _mm_mul_ps(k0, val0);
+ k1 = _mm_mul_ps(k1, val1);
+ k2 = _mm_mul_ps(k2, val2);
+ k3 = _mm_mul_ps(k3, val3);
+ k0 = _mm_hadd_ps(k0, k1);
+ k2 = _mm_hadd_ps(k2, k3);
+ out[j>>2] = _mm_hadd_ps(k0, k2);
+ }
+
+ out[0] = _mm_add_ps(out[0], out[1]);
+ _mm_store_ps(&dst[i], out[0]);
+
+ frac4 = _mm_add_epi32(frac4, increment4);
+ pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
+ frac4 = _mm_and_si128(frac4, fracMask4);
+
+ _mm_store_ps(pos_.f, _mm_castsi128_ps(pos4));
+ _mm_store_ps(frac_.f, _mm_castsi128_ps(frac4));
+ }
+
+ pos = pos_.i[0];
+ frac = frac_.i[0];
+
+ for(;i < numsamples;i++)
+ {
+ dst[i] = resample_fir8(src[pos ], src[pos+1], src[pos+2], src[pos+3],
+ src[pos+4], src[pos+5], src[pos+6], src[pos+7], frac);
+
+ frac += increment;
+ pos += frac>>FRACTIONBITS;
+ frac &= FRACTIONMASK;
+ }
+ return dst;
+}
diff --git a/Alc/mixer_sse41.c b/Alc/mixer_sse41.c
index 90438e13..8fd5c4b5 100644
--- a/Alc/mixer_sse41.c
+++ b/Alc/mixer_sse41.c
@@ -147,3 +147,75 @@ const ALfloat *Resample_fir4_32_SSE41(const ALfloat *src, ALuint frac, ALuint in
}
return dst;
}
+
+const ALfloat *Resample_fir8_32_SSE41(const ALfloat *src, ALuint frac, ALuint increment,
+ ALfloat *restrict dst, ALuint numsamples)
+{
+ const __m128i increment4 = _mm_set1_epi32(increment*4);
+ const __m128i fracMask4 = _mm_set1_epi32(FRACTIONMASK);
+ alignas(16) union { ALuint i[4]; float f[4]; } pos_;
+ alignas(16) union { ALuint i[4]; float f[4]; } frac_;
+ __m128i frac4, pos4;
+ ALuint pos;
+ ALuint i, j;
+
+ InitiatePositionArrays(frac, increment, frac_.i, pos_.i, 4);
+
+ frac4 = _mm_castps_si128(_mm_load_ps(frac_.f));
+ pos4 = _mm_castps_si128(_mm_load_ps(pos_.f));
+
+ src -= 3;
+ for(i = 0;numsamples-i > 3;i += 4)
+ {
+ __m128 out[2];
+ for(j = 0;j < 8;j+=4)
+ {
+ const __m128 val0 = _mm_loadu_ps(&src[pos_.i[0]+j]);
+ const __m128 val1 = _mm_loadu_ps(&src[pos_.i[1]+j]);
+ const __m128 val2 = _mm_loadu_ps(&src[pos_.i[2]+j]);
+ const __m128 val3 = _mm_loadu_ps(&src[pos_.i[3]+j]);
+ __m128 k0 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[0]][j]);
+ __m128 k1 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[1]][j]);
+ __m128 k2 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[2]][j]);
+ __m128 k3 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[3]][j]);
+
+ k0 = _mm_mul_ps(k0, val0);
+ k1 = _mm_mul_ps(k1, val1);
+ k2 = _mm_mul_ps(k2, val2);
+ k3 = _mm_mul_ps(k3, val3);
+ k0 = _mm_hadd_ps(k0, k1);
+ k2 = _mm_hadd_ps(k2, k3);
+ out[j>>2] = _mm_hadd_ps(k0, k2);
+ }
+
+ out[0] = _mm_add_ps(out[0], out[1]);
+ _mm_store_ps(&dst[i], out[0]);
+
+ frac4 = _mm_add_epi32(frac4, increment4);
+ pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
+ frac4 = _mm_and_si128(frac4, fracMask4);
+
+ pos_.i[0] = _mm_extract_epi32(pos4, 0);
+ pos_.i[1] = _mm_extract_epi32(pos4, 1);
+ pos_.i[2] = _mm_extract_epi32(pos4, 2);
+ pos_.i[3] = _mm_extract_epi32(pos4, 3);
+ frac_.i[0] = _mm_extract_epi32(frac4, 0);
+ frac_.i[1] = _mm_extract_epi32(frac4, 1);
+ frac_.i[2] = _mm_extract_epi32(frac4, 2);
+ frac_.i[3] = _mm_extract_epi32(frac4, 3);
+ }
+
+ pos = pos_.i[0];
+ frac = frac_.i[0];
+
+ for(;i < numsamples;i++)
+ {
+ dst[i] = resample_fir8(src[pos ], src[pos+1], src[pos+2], src[pos+3],
+ src[pos+4], src[pos+5], src[pos+6], src[pos+7], frac);
+
+ frac += increment;
+ pos += frac>>FRACTIONBITS;
+ frac &= FRACTIONMASK;
+ }
+ return dst;
+}