aboutsummaryrefslogtreecommitdiffstats
path: root/utils/makemhr
diff options
context:
space:
mode:
Diffstat (limited to 'utils/makemhr')
-rw-r--r--utils/makemhr/loaddef.cpp163
-rw-r--r--utils/makemhr/loaddef.h4
-rw-r--r--utils/makemhr/loadsofa.cpp669
-rw-r--r--utils/makemhr/loadsofa.h4
-rw-r--r--utils/makemhr/makemhr.cpp1160
-rw-r--r--utils/makemhr/makemhr.h43
6 files changed, 841 insertions, 1202 deletions
diff --git a/utils/makemhr/loaddef.cpp b/utils/makemhr/loaddef.cpp
index 619f5104..e8092363 100644
--- a/utils/makemhr/loaddef.cpp
+++ b/utils/makemhr/loaddef.cpp
@@ -26,18 +26,22 @@
#include <algorithm>
#include <cctype>
#include <cmath>
-#include <cstring>
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
+#include <cstring>
#include <iterator>
#include <limits>
#include <memory>
+#include <cstdarg>
#include <vector>
#include "alfstream.h"
+#include "aloptional.h"
+#include "alspan.h"
#include "alstring.h"
#include "makemhr.h"
+#include "polyphase_resampler.h"
#include "mysofa.h"
@@ -1235,7 +1239,8 @@ static int ProcessMetrics(TokenReaderT *tr, const uint fftSize, const uint trunc
double distances[MAX_FD_COUNT];
uint fdCount = 0;
uint evCounts[MAX_FD_COUNT];
- std::vector<uint> azCounts(MAX_FD_COUNT * MAX_EV_COUNT);
+ auto azCounts = std::vector<std::array<uint,MAX_EV_COUNT>>(MAX_FD_COUNT);
+ for(auto &azs : azCounts) azs.fill(0u);
TrIndication(tr, &line, &col);
while(TrIsIdent(tr))
@@ -1384,7 +1389,7 @@ static int ProcessMetrics(TokenReaderT *tr, const uint fftSize, const uint trunc
{
if(!TrReadInt(tr, MIN_AZ_COUNT, MAX_AZ_COUNT, &intVal))
return 0;
- azCounts[(count * MAX_EV_COUNT) + evCounts[count]++] = static_cast<uint>(intVal);
+ azCounts[count][evCounts[count]++] = static_cast<uint>(intVal);
if(TrIsOperator(tr, ","))
{
if(evCounts[count] >= MAX_EV_COUNT)
@@ -1401,7 +1406,7 @@ static int ProcessMetrics(TokenReaderT *tr, const uint fftSize, const uint trunc
TrErrorAt(tr, line, col, "Did not reach the minimum of %d azimuth counts.\n", MIN_EV_COUNT);
return 0;
}
- if(azCounts[count * MAX_EV_COUNT] != 1 || azCounts[(count * MAX_EV_COUNT) + evCounts[count] - 1] != 1)
+ if(azCounts[count][0] != 1 || azCounts[count][evCounts[count] - 1] != 1)
{
TrError(tr, "Poles are not singular for field %d.\n", count - 1);
return 0;
@@ -1446,7 +1451,8 @@ static int ProcessMetrics(TokenReaderT *tr, const uint fftSize, const uint trunc
}
if(hData->mChannelType == CT_NONE)
hData->mChannelType = CT_MONO;
- if(!PrepareHrirData(fdCount, distances, evCounts, azCounts.data(), hData))
+ const auto azs = al::as_span(azCounts).first<MAX_FD_COUNT>();
+ if(!PrepareHrirData({distances, fdCount}, evCounts, azs, hData))
{
fprintf(stderr, "Error: Out of memory.\n");
exit(-1);
@@ -1459,9 +1465,9 @@ static int ReadIndexTriplet(TokenReaderT *tr, const HrirDataT *hData, uint *fi,
{
int intVal;
- if(hData->mFdCount > 1)
+ if(hData->mFds.size() > 1)
{
- if(!TrReadInt(tr, 0, static_cast<int>(hData->mFdCount) - 1, &intVal))
+ if(!TrReadInt(tr, 0, static_cast<int>(hData->mFds.size()-1), &intVal))
return 0;
*fi = static_cast<uint>(intVal);
if(!TrReadOperator(tr, ","))
@@ -1471,12 +1477,12 @@ static int ReadIndexTriplet(TokenReaderT *tr, const HrirDataT *hData, uint *fi,
{
*fi = 0;
}
- if(!TrReadInt(tr, 0, static_cast<int>(hData->mFds[*fi].mEvCount) - 1, &intVal))
+ if(!TrReadInt(tr, 0, static_cast<int>(hData->mFds[*fi].mEvs.size()-1), &intVal))
return 0;
*ei = static_cast<uint>(intVal);
if(!TrReadOperator(tr, ","))
return 0;
- if(!TrReadInt(tr, 0, static_cast<int>(hData->mFds[*fi].mEvs[*ei].mAzCount) - 1, &intVal))
+ if(!TrReadInt(tr, 0, static_cast<int>(hData->mFds[*fi].mEvs[*ei].mAzs.size()-1), &intVal))
return 0;
*ai = static_cast<uint>(intVal);
return 1;
@@ -1706,27 +1712,16 @@ static int MatchTargetEar(const char *ident)
// Calculate the onset time of an HRIR and average it with any existing
// timing for its field, elevation, azimuth, and ear.
-static double AverageHrirOnset(const uint rate, const uint n, const double *hrir, const double f, const double onset)
+static constexpr int OnsetRateMultiple{10};
+static double AverageHrirOnset(PPhaseResampler &rs, al::span<double> upsampled, const uint rate,
+ const uint n, const double *hrir, const double f, const double onset)
{
- std::vector<double> upsampled(10 * n);
- {
- ResamplerT rs;
- ResamplerSetup(&rs, rate, 10 * rate);
- ResamplerRun(&rs, n, hrir, 10 * n, upsampled.data());
- }
-
- double mag{0.0};
- for(uint i{0u};i < 10*n;i++)
- mag = std::max(std::abs(upsampled[i]), mag);
+ rs.process(n, hrir, static_cast<uint>(upsampled.size()), upsampled.data());
- mag *= 0.15;
- uint i{0u};
- for(;i < 10*n;i++)
- {
- if(std::abs(upsampled[i]) >= mag)
- break;
- }
- return Lerp(onset, static_cast<double>(i) / (10*rate), f);
+ auto abs_lt = [](const double &lhs, const double &rhs) -> bool
+ { return std::abs(lhs) < std::abs(rhs); };
+ auto iter = std::max_element(upsampled.cbegin(), upsampled.cend(), abs_lt);
+ return Lerp(onset, static_cast<double>(std::distance(upsampled.cbegin(), iter))/(10*rate), f);
}
// Calculate the magnitude response of an HRIR and average it with any
@@ -1738,9 +1733,9 @@ static void AverageHrirMagnitude(const uint points, const uint n, const double *
std::vector<double> r(n);
for(i = 0;i < points;i++)
- h[i] = complex_d{hrir[i], 0.0};
+ h[i] = hrir[i];
for(;i < n;i++)
- h[i] = complex_d{0.0, 0.0};
+ h[i] = 0.0;
FftForward(n, h.data());
MagnitudeResponse(n, h.data(), r.data());
for(i = 0;i < m;i++)
@@ -1748,18 +1743,29 @@ static void AverageHrirMagnitude(const uint points, const uint n, const double *
}
// Process the list of sources in the data set definition.
-static int ProcessSources(TokenReaderT *tr, HrirDataT *hData)
+static int ProcessSources(TokenReaderT *tr, HrirDataT *hData, const uint outRate)
{
- uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1;
+ const uint channels{(hData->mChannelType == CT_STEREO) ? 2u : 1u};
hData->mHrirsBase.resize(channels * hData->mIrCount * hData->mIrSize);
double *hrirs = hData->mHrirsBase.data();
- std::vector<double> hrir(hData->mIrPoints);
+ auto hrir = std::make_unique<double[]>(hData->mIrSize);
uint line, col, fi, ei, ai;
- int count;
+
+ std::vector<double> onsetSamples(OnsetRateMultiple * hData->mIrPoints);
+ PPhaseResampler onsetResampler;
+ onsetResampler.init(hData->mIrRate, OnsetRateMultiple*hData->mIrRate);
+
+ al::optional<PPhaseResampler> resampler;
+ if(outRate && outRate != hData->mIrRate)
+ resampler.emplace().init(hData->mIrRate, outRate);
+ const double rateScale{outRate ? static_cast<double>(outRate) / hData->mIrRate : 1.0};
+ const uint irPoints{outRate
+ ? std::min(static_cast<uint>(std::ceil(hData->mIrPoints*rateScale)), hData->mIrPoints)
+ : hData->mIrPoints};
printf("Loading sources...");
fflush(stdout);
- count = 0;
+ int count{0};
while(TrIsOperator(tr, "["))
{
double factor[2]{ 1.0, 1.0 };
@@ -1841,45 +1847,53 @@ static int ProcessSources(TokenReaderT *tr, HrirDataT *hData)
else
aer[0] = std::fmod(360.0f - aer[0], 360.0f);
- for(fi = 0;fi < hData->mFdCount;fi++)
- {
- double delta = aer[2] - hData->mFds[fi].mDistance;
- if(std::abs(delta) < 0.001) break;
- }
- if(fi >= hData->mFdCount)
+ auto field = std::find_if(hData->mFds.cbegin(), hData->mFds.cend(),
+ [&aer](const HrirFdT &fld) -> bool
+ { return (std::abs(aer[2] - fld.mDistance) < 0.001); });
+ if(field == hData->mFds.cend())
continue;
+ fi = static_cast<uint>(std::distance(hData->mFds.cbegin(), field));
- double ef{(90.0 + aer[1]) / 180.0 * (hData->mFds[fi].mEvCount - 1)};
+ const double evscale{180.0 / static_cast<double>(field->mEvs.size()-1)};
+ double ef{(90.0 + aer[1]) / evscale};
ei = static_cast<uint>(std::round(ef));
- ef = (ef - ei) * 180.0 / (hData->mFds[fi].mEvCount - 1);
+ ef = (ef - ei) * evscale;
if(std::abs(ef) >= 0.1)
continue;
- double af{aer[0] / 360.0 * hData->mFds[fi].mEvs[ei].mAzCount};
+ const double azscale{360.0 / static_cast<double>(field->mEvs[ei].mAzs.size())};
+ double af{aer[0] / azscale};
ai = static_cast<uint>(std::round(af));
- af = (af - ai) * 360.0 / hData->mFds[fi].mEvs[ei].mAzCount;
- ai = ai % hData->mFds[fi].mEvs[ei].mAzCount;
+ af = (af - ai) * azscale;
+ ai %= static_cast<uint>(field->mEvs[ei].mAzs.size());
if(std::abs(af) >= 0.1)
continue;
- HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
+ HrirAzT *azd = &field->mEvs[ei].mAzs[ai];
if(azd->mIrs[0] != nullptr)
{
TrErrorAt(tr, line, col, "Redefinition of source [ %d, %d, %d ].\n", fi, ei, ai);
return 0;
}
- ExtractSofaHrir(sofa, si, 0, src.mOffset, hData->mIrPoints, hrir.data());
+ ExtractSofaHrir(sofa, si, 0, src.mOffset, hData->mIrPoints, hrir.get());
azd->mIrs[0] = &hrirs[hData->mIrSize * azd->mIndex];
- azd->mDelays[0] = AverageHrirOnset(hData->mIrRate, hData->mIrPoints, hrir.data(), 1.0, azd->mDelays[0]);
- AverageHrirMagnitude(hData->mIrPoints, hData->mFftSize, hrir.data(), 1.0, azd->mIrs[0]);
+ azd->mDelays[0] = AverageHrirOnset(onsetResampler, onsetSamples, hData->mIrRate,
+ hData->mIrPoints, hrir.get(), 1.0, azd->mDelays[0]);
+ if(resampler)
+ resampler->process(hData->mIrPoints, hrir.get(), hData->mIrSize, hrir.get());
+ AverageHrirMagnitude(irPoints, hData->mFftSize, hrir.get(), 1.0, azd->mIrs[0]);
if(src.mChannel == 1)
{
- ExtractSofaHrir(sofa, si, 1, src.mOffset, hData->mIrPoints, hrir.data());
+ ExtractSofaHrir(sofa, si, 1, src.mOffset, hData->mIrPoints, hrir.get());
azd->mIrs[1] = &hrirs[hData->mIrSize * (hData->mIrCount + azd->mIndex)];
- azd->mDelays[1] = AverageHrirOnset(hData->mIrRate, hData->mIrPoints, hrir.data(), 1.0, azd->mDelays[1]);
- AverageHrirMagnitude(hData->mIrPoints, hData->mFftSize, hrir.data(), 1.0, azd->mIrs[1]);
+ azd->mDelays[1] = AverageHrirOnset(onsetResampler, onsetSamples,
+ hData->mIrRate, hData->mIrPoints, hrir.get(), 1.0, azd->mDelays[1]);
+ if(resampler)
+ resampler->process(hData->mIrPoints, hrir.get(), hData->mIrSize,
+ hrir.get());
+ AverageHrirMagnitude(irPoints, hData->mFftSize, hrir.get(), 1.0, azd->mIrs[1]);
}
// TODO: Since some SOFA files contain minimum phase HRIRs,
@@ -1918,7 +1932,7 @@ static int ProcessSources(TokenReaderT *tr, HrirDataT *hData)
printf("\rLoading sources... %d file%s", count, (count==1)?"":"s");
fflush(stdout);
- if(!LoadSource(&src, hData->mIrRate, hData->mIrPoints, hrir.data()))
+ if(!LoadSource(&src, hData->mIrRate, hData->mIrPoints, hrir.get()))
return 0;
uint ti{0};
@@ -1936,8 +1950,12 @@ static int ProcessSources(TokenReaderT *tr, HrirDataT *hData)
}
}
azd->mIrs[ti] = &hrirs[hData->mIrSize * (ti * hData->mIrCount + azd->mIndex)];
- azd->mDelays[ti] = AverageHrirOnset(hData->mIrRate, hData->mIrPoints, hrir.data(), 1.0 / factor[ti], azd->mDelays[ti]);
- AverageHrirMagnitude(hData->mIrPoints, hData->mFftSize, hrir.data(), 1.0 / factor[ti], azd->mIrs[ti]);
+ azd->mDelays[ti] = AverageHrirOnset(onsetResampler, onsetSamples, hData->mIrRate,
+ hData->mIrPoints, hrir.get(), 1.0 / factor[ti], azd->mDelays[ti]);
+ if(resampler)
+ resampler->process(hData->mIrPoints, hrir.get(), hData->mIrSize, hrir.get());
+ AverageHrirMagnitude(irPoints, hData->mFftSize, hrir.get(), 1.0 / factor[ti],
+ azd->mIrs[ti]);
factor[ti] += 1.0;
if(!TrIsOperator(tr, "+"))
break;
@@ -1958,28 +1976,35 @@ static int ProcessSources(TokenReaderT *tr, HrirDataT *hData)
}
}
printf("\n");
- for(fi = 0;fi < hData->mFdCount;fi++)
+ hrir = nullptr;
+ if(resampler)
+ {
+ hData->mIrRate = outRate;
+ hData->mIrPoints = irPoints;
+ resampler.reset();
+ }
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = 0;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzs.size();ai++)
{
HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
if(azd->mIrs[0] != nullptr)
break;
}
- if(ai < hData->mFds[fi].mEvs[ei].mAzCount)
+ if(ai < hData->mFds[fi].mEvs[ei].mAzs.size())
break;
}
- if(ei >= hData->mFds[fi].mEvCount)
+ if(ei >= hData->mFds[fi].mEvs.size())
{
TrError(tr, "Missing source references [ %d, *, * ].\n", fi);
return 0;
}
hData->mFds[fi].mEvStart = ei;
- for(;ei < hData->mFds[fi].mEvCount;ei++)
+ for(;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzs.size();ai++)
{
HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
@@ -1993,11 +2018,11 @@ static int ProcessSources(TokenReaderT *tr, HrirDataT *hData)
}
for(uint ti{0};ti < channels;ti++)
{
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = 0;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzs.size();ai++)
{
HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
@@ -2019,14 +2044,14 @@ static int ProcessSources(TokenReaderT *tr, HrirDataT *hData)
bool LoadDefInput(std::istream &istream, const char *startbytes, std::streamsize startbytecount,
- const char *filename, const uint fftSize, const uint truncSize, const ChannelModeT chanMode,
- HrirDataT *hData)
+ const char *filename, const uint fftSize, const uint truncSize, const uint outRate,
+ const ChannelModeT chanMode, HrirDataT *hData)
{
TokenReaderT tr{istream};
TrSetup(startbytes, startbytecount, filename, &tr);
if(!ProcessMetrics(&tr, fftSize, truncSize, chanMode, hData)
- || !ProcessSources(&tr, hData))
+ || !ProcessSources(&tr, hData, outRate))
return false;
return true;
diff --git a/utils/makemhr/loaddef.h b/utils/makemhr/loaddef.h
index 34fbb832..63600dcd 100644
--- a/utils/makemhr/loaddef.h
+++ b/utils/makemhr/loaddef.h
@@ -7,7 +7,7 @@
bool LoadDefInput(std::istream &istream, const char *startbytes, std::streamsize startbytecount,
- const char *filename, const uint fftSize, const uint truncSize, const ChannelModeT chanMode,
- HrirDataT *hData);
+ const char *filename, const uint fftSize, const uint truncSize, const uint outRate,
+ const ChannelModeT chanMode, HrirDataT *hData);
#endif /* LOADDEF_H */
diff --git a/utils/makemhr/loadsofa.cpp b/utils/makemhr/loadsofa.cpp
index c91613c8..dcb0a35e 100644
--- a/utils/makemhr/loadsofa.cpp
+++ b/utils/makemhr/loadsofa.cpp
@@ -24,146 +24,29 @@
#include "loadsofa.h"
#include <algorithm>
-#include <array>
+#include <atomic>
+#include <chrono>
#include <cmath>
#include <cstdio>
+#include <functional>
+#include <future>
#include <iterator>
#include <memory>
#include <numeric>
#include <string>
+#include <thread>
#include <vector>
+#include "aloptional.h"
+#include "alspan.h"
#include "makemhr.h"
+#include "polyphase_resampler.h"
+#include "sofa-support.h"
#include "mysofa.h"
-using double3 = std::array<double,3>;
-
-static const char *SofaErrorStr(int err)
-{
- switch(err)
- {
- case MYSOFA_OK: return "OK";
- case MYSOFA_INVALID_FORMAT: return "Invalid format";
- case MYSOFA_UNSUPPORTED_FORMAT: return "Unsupported format";
- case MYSOFA_INTERNAL_ERROR: return "Internal error";
- case MYSOFA_NO_MEMORY: return "Out of memory";
- case MYSOFA_READ_ERROR: return "Read error";
- }
- return "Unknown";
-}
-
-
-/* Produces a sorted array of unique elements from a particular axis of the
- * triplets array. The filters are used to focus on particular coordinates
- * of other axes as necessary. The epsilons are used to constrain the
- * equality of unique elements.
- */
-static uint GetUniquelySortedElems(const uint m, const double3 *aers, const uint axis,
- const double *const (&filters)[3], const double (&epsilons)[3], double *elems)
-{
- uint count{0u};
- for(uint i{0u};i < m;++i)
- {
- const double elem{aers[i][axis]};
-
- uint j;
- for(j = 0;j < 3;j++)
- {
- if(filters[j] && std::fabs(aers[i][j] - *filters[j]) > epsilons[j])
- break;
- }
- if(j < 3)
- continue;
-
- for(j = 0;j < count;j++)
- {
- const double delta{elem - elems[j]};
-
- if(delta > epsilons[axis])
- continue;
-
- if(delta >= -epsilons[axis])
- break;
-
- for(uint k{count};k > j;k--)
- elems[k] = elems[k - 1];
-
- elems[j] = elem;
- count++;
- break;
- }
-
- if(j >= count)
- elems[count++] = elem;
- }
-
- return count;
-}
-
-/* Given a list of elements, this will produce the smallest step size that
- * can uniformly cover a fair portion of the list. Ideally this will be over
- * half, but in degenerate cases this can fall to a minimum of 5 (the lower
- * limit on elevations necessary to build a layout).
- */
-static double GetUniformStepSize(const double epsilon, const uint m, const double *elems)
-{
- auto steps = std::vector<double>(m, 0.0);
- auto counts = std::vector<uint>(m, 0u);
- uint count{0u};
-
- for(uint stride{1u};stride < m/2;stride++)
- {
- for(uint i{0u};i < m-stride;i++)
- {
- const double step{elems[i + stride] - elems[i]};
-
- uint j;
- for(j = 0;j < count;j++)
- {
- if(std::fabs(step - steps[j]) < epsilon)
- {
- counts[j]++;
- break;
- }
- }
-
- if(j >= count)
- {
- steps[j] = step;
- counts[j] = 1;
- count++;
- }
- }
-
- for(uint i{1u};i < count;i++)
- {
- if(counts[i] > counts[0])
- {
- steps[0] = steps[i];
- counts[0] = counts[i];
- }
- }
-
- count = 1;
-
- if(counts[0] > m/2)
- break;
- }
-
- if(counts[0] > 255)
- {
- uint i{2u};
- while(counts[0]/i > 255 && (counts[0]%i) != 0)
- ++i;
- counts[0] /= i;
- steps[0] *= i;
- }
- if(counts[0] > 5)
- return steps[0];
- return 0.0;
-}
+using uint = unsigned int;
/* Attempts to produce a compatible layout. Most data sets tend to be
* uniform and have the same major axis as used by OpenAL Soft's HRTF model.
@@ -173,21 +56,10 @@ static double GetUniformStepSize(const double epsilon, const uint m, const doubl
*/
static bool PrepareLayout(const uint m, const float *xyzs, HrirDataT *hData)
{
- auto aers = std::vector<double3>(m, double3{});
- auto elems = std::vector<double>(m, 0.0);
+ fprintf(stdout, "Detecting compatible layout...\n");
- for(uint i{0u};i < m;++i)
- {
- float aer[3]{xyzs[i*3], xyzs[i*3 + 1], xyzs[i*3 + 2]};
- mysofa_c2s(&aer[0]);
- aers[i][0] = aer[0];
- aers[i][1] = aer[1];
- aers[i][2] = aer[2];
- }
-
- const uint fdCount{GetUniquelySortedElems(m, aers.data(), 2, { nullptr, nullptr, nullptr },
- { 0.1, 0.1, 0.001 }, elems.data())};
- if(fdCount > MAX_FD_COUNT)
+ auto fds = GetCompatibleLayout(m, xyzs);
+ if(fds.size() > MAX_FD_COUNT)
{
fprintf(stdout, "Incompatible layout (inumerable radii).\n");
return false;
@@ -195,107 +67,32 @@ static bool PrepareLayout(const uint m, const float *xyzs, HrirDataT *hData)
double distances[MAX_FD_COUNT]{};
uint evCounts[MAX_FD_COUNT]{};
- auto azCounts = std::vector<uint>(MAX_FD_COUNT*MAX_EV_COUNT, 0u);
- for(uint fi{0u};fi < fdCount;fi++)
- {
- distances[fi] = elems[fi];
- if(fi > 0 && distances[fi] <= distances[fi-1])
- {
- fprintf(stderr, "Distances must increase.\n");
- return 0;
- }
- }
- if(distances[0] < hData->mRadius)
- {
- fprintf(stderr, "Distance cannot start below head radius.\n");
- return 0;
- }
+ auto azCounts = std::vector<std::array<uint,MAX_EV_COUNT>>(MAX_FD_COUNT);
+ for(auto &azs : azCounts) azs.fill(0u);
- for(uint fi{0u};fi < fdCount;fi++)
+ uint fi{0u}, ir_total{0u};
+ for(const auto &field : fds)
{
- const double dist{distances[fi]};
- uint evCount{GetUniquelySortedElems(m, aers.data(), 1, { nullptr, nullptr, &dist },
- { 0.1, 0.1, 0.001 }, elems.data())};
-
- if(evCount > MAX_EV_COUNT)
- {
- fprintf(stderr, "Incompatible layout (innumerable elevations).\n");
- return false;
- }
-
- double step{GetUniformStepSize(0.1, evCount, elems.data())};
- if(step <= 0.0)
- {
- fprintf(stderr, "Incompatible layout (non-uniform elevations).\n");
- return false;
- }
+ distances[fi] = field.mDistance;
+ evCounts[fi] = field.mEvCount;
- uint evStart{0u};
- for(uint ei{0u};ei < evCount;ei++)
+ for(uint ei{0u};ei < field.mEvStart;ei++)
+ azCounts[fi][ei] = field.mAzCounts[field.mEvCount-ei-1];
+ for(uint ei{field.mEvStart};ei < field.mEvCount;ei++)
{
- double ev{90.0 + elems[ei]};
- double eif{std::round(ev / step)};
- const uint ei_start{static_cast<uint>(eif)};
-
- if(std::fabs(eif - static_cast<double>(ei_start)) < (0.1/step))
- {
- evStart = ei_start;
- break;
- }
- }
-
- evCount = static_cast<uint>(std::round(180.0 / step)) + 1;
- if(evCount < 5)
- {
- fprintf(stderr, "Incompatible layout (too few uniform elevations).\n");
- return false;
- }
-
- evCounts[fi] = evCount;
-
- for(uint ei{evStart};ei < evCount;ei++)
- {
- const double ev{-90.0 + ei*180.0/(evCount - 1)};
- const uint azCount{GetUniquelySortedElems(m, aers.data(), 0, { nullptr, &ev, &dist },
- { 0.1, 0.1, 0.001 }, elems.data())};
-
- if(ei > 0 && ei < (evCount - 1))
- {
- step = GetUniformStepSize(0.1, azCount, elems.data());
- if(step <= 0.0)
- {
- fprintf(stderr, "Incompatible layout (non-uniform azimuths).\n");
- return false;
- }
-
- azCounts[fi*MAX_EV_COUNT + ei] = static_cast<uint>(std::round(360.0 / step));
- if(azCounts[fi*MAX_EV_COUNT + ei] > MAX_AZ_COUNT)
- {
- fprintf(stderr,
- "Incompatible layout (too many azimuths on elev=%f, rad=%f, %u > %u).\n",
- ev, dist, azCounts[fi*MAX_EV_COUNT + ei], MAX_AZ_COUNT);
- return false;
- }
- }
- else if(azCount != 1)
- {
- fprintf(stderr, "Incompatible layout (non-singular poles).\n");
- return false;
- }
- else
- {
- azCounts[fi*MAX_EV_COUNT + ei] = 1;
- }
+ azCounts[fi][ei] = field.mAzCounts[ei];
+ ir_total += field.mAzCounts[ei];
}
- for(uint ei{0u};ei < evStart;ei++)
- azCounts[fi*MAX_EV_COUNT + ei] = azCounts[fi*MAX_EV_COUNT + evCount - ei - 1];
+ ++fi;
}
- return PrepareHrirData(fdCount, distances, evCounts, azCounts.data(), hData) != 0;
+ fprintf(stdout, "Using %u of %u IRs.\n", ir_total, m);
+ const auto azs = al::as_span(azCounts).first<MAX_FD_COUNT>();
+ return PrepareHrirData({distances, fi}, evCounts, azs, hData);
}
-bool PrepareSampleRate(MYSOFA_HRTF *sofaHrtf, HrirDataT *hData)
+float GetSampleRate(MYSOFA_HRTF *sofaHrtf)
{
const char *srate_dim{nullptr};
const char *srate_units{nullptr};
@@ -308,7 +105,7 @@ bool PrepareSampleRate(MYSOFA_HRTF *sofaHrtf, HrirDataT *hData)
if(srate_dim)
{
fprintf(stderr, "Duplicate SampleRate.DIMENSION_LIST\n");
- return false;
+ return 0.0f;
}
srate_dim = srate_attrs->value;
}
@@ -317,7 +114,7 @@ bool PrepareSampleRate(MYSOFA_HRTF *sofaHrtf, HrirDataT *hData)
if(srate_units)
{
fprintf(stderr, "Duplicate SampleRate.Units\n");
- return false;
+ return 0.0f;
}
srate_units = srate_attrs->value;
}
@@ -329,35 +126,40 @@ bool PrepareSampleRate(MYSOFA_HRTF *sofaHrtf, HrirDataT *hData)
if(!srate_dim)
{
fprintf(stderr, "Missing sample rate dimensions\n");
- return false;
+ return 0.0f;
}
if(srate_dim != std::string{"I"})
{
fprintf(stderr, "Unsupported sample rate dimensions: %s\n", srate_dim);
- return false;
+ return 0.0f;
}
if(!srate_units)
{
fprintf(stderr, "Missing sample rate unit type\n");
- return false;
+ return 0.0f;
}
if(srate_units != std::string{"hertz"})
{
fprintf(stderr, "Unsupported sample rate unit type: %s\n", srate_units);
- return false;
+ return 0.0f;
}
/* I dimensions guarantees 1 element, so just extract it. */
- hData->mIrRate = static_cast<uint>(srate_array->values[0] + 0.5f);
- if(hData->mIrRate < MIN_RATE || hData->mIrRate > MAX_RATE)
+ if(srate_array->values[0] < MIN_RATE || srate_array->values[0] > MAX_RATE)
{
- fprintf(stderr, "Sample rate out of range: %u (expected %u to %u)", hData->mIrRate,
+ fprintf(stderr, "Sample rate out of range: %f (expected %u to %u)", srate_array->values[0],
MIN_RATE, MAX_RATE);
- return false;
+ return 0.0f;
}
- return true;
+ return srate_array->values[0];
}
-bool PrepareDelay(MYSOFA_HRTF *sofaHrtf, HrirDataT *hData)
+enum class DelayType : uint8_t {
+ None,
+ I_R, /* [1][Channels] */
+ M_R, /* [HRIRs][Channels] */
+ Invalid,
+};
+DelayType PrepareDelay(MYSOFA_HRTF *sofaHrtf)
{
const char *delay_dim{nullptr};
MYSOFA_ARRAY *delay_array{&sofaHrtf->DataDelay};
@@ -369,38 +171,27 @@ bool PrepareDelay(MYSOFA_HRTF *sofaHrtf, HrirDataT *hData)
if(delay_dim)
{
fprintf(stderr, "Duplicate Delay.DIMENSION_LIST\n");
- return false;
+ return DelayType::Invalid;
}
delay_dim = delay_attrs->value;
}
else
fprintf(stderr, "Unexpected delay attribute: %s = %s\n", delay_attrs->name,
- delay_attrs->value);
+ delay_attrs->value ? delay_attrs->value : "<null>");
delay_attrs = delay_attrs->next;
}
if(!delay_dim)
{
fprintf(stderr, "Missing delay dimensions\n");
- /*return false;*/
- }
- else if(delay_dim != std::string{"I,R"})
- {
- fprintf(stderr, "Unsupported delay dimensions: %s\n", delay_dim);
- return false;
- }
- else if(hData->mChannelType == CT_STEREO)
- {
- /* I,R is 1xChannelCount. Makemhr currently removes any delay constant,
- * so we can ignore this as long as it's equal.
- */
- if(delay_array->values[0] != delay_array->values[1])
- {
- fprintf(stderr, "Mismatched delays not supported: %f, %f\n", delay_array->values[0],
- delay_array->values[1]);
- return false;
- }
+ return DelayType::None;
}
- return true;
+ if(delay_dim == std::string{"I,R"})
+ return DelayType::I_R;
+ else if(delay_dim == std::string{"M,R"})
+ return DelayType::M_R;
+
+ fprintf(stderr, "Unsupported delay dimensions: %s\n", delay_dim);
+ return DelayType::Invalid;
}
bool CheckIrData(MYSOFA_HRTF *sofaHrtf)
@@ -421,7 +212,7 @@ bool CheckIrData(MYSOFA_HRTF *sofaHrtf)
}
else
fprintf(stderr, "Unexpected IR attribute: %s = %s\n", ir_attrs->name,
- ir_attrs->value);
+ ir_attrs->value ? ir_attrs->value : "<null>");
ir_attrs = ir_attrs->next;
}
if(!ir_dim)
@@ -439,120 +230,183 @@ bool CheckIrData(MYSOFA_HRTF *sofaHrtf)
/* Calculate the onset time of a HRIR. */
-static double CalcHrirOnset(const uint rate, const uint n, std::vector<double> &upsampled,
- const double *hrir)
+static constexpr int OnsetRateMultiple{10};
+static double CalcHrirOnset(PPhaseResampler &rs, const uint rate, const uint n,
+ al::span<double> upsampled, const double *hrir)
{
- {
- ResamplerT rs;
- ResamplerSetup(&rs, rate, 10 * rate);
- ResamplerRun(&rs, n, hrir, 10 * n, upsampled.data());
- }
-
- double mag{std::accumulate(upsampled.cbegin(), upsampled.cend(), double{0.0},
- [](const double magnitude, const double sample) -> double
- { return std::max(magnitude, std::abs(sample)); })};
+ rs.process(n, hrir, static_cast<uint>(upsampled.size()), upsampled.data());
- mag *= 0.15;
- auto iter = std::find_if(upsampled.cbegin(), upsampled.cend(),
- [mag](const double sample) -> bool { return (std::abs(sample) >= mag); });
- return static_cast<double>(std::distance(upsampled.cbegin(), iter)) / (10.0*rate);
+ auto abs_lt = [](const double &lhs, const double &rhs) -> bool
+ { return std::abs(lhs) < std::abs(rhs); };
+ auto iter = std::max_element(upsampled.cbegin(), upsampled.cend(), abs_lt);
+ return static_cast<double>(std::distance(upsampled.cbegin(), iter)) /
+ (double{OnsetRateMultiple}*rate);
}
/* Calculate the magnitude response of a HRIR. */
-static void CalcHrirMagnitude(const uint points, const uint n, std::vector<complex_d> &h,
- const double *hrir, double *mag)
+static void CalcHrirMagnitude(const uint points, const uint n, al::span<complex_d> h, double *hrir)
{
auto iter = std::copy_n(hrir, points, h.begin());
std::fill(iter, h.end(), complex_d{0.0, 0.0});
FftForward(n, h.data());
- MagnitudeResponse(n, h.data(), mag);
+ MagnitudeResponse(n, h.data(), hrir);
}
-static bool LoadResponses(MYSOFA_HRTF *sofaHrtf, HrirDataT *hData)
+static bool LoadResponses(MYSOFA_HRTF *sofaHrtf, HrirDataT *hData, const DelayType delayType,
+ const uint outRate)
{
- const uint channels{(hData->mChannelType == CT_STEREO) ? 2u : 1u};
- hData->mHrirsBase.resize(channels * hData->mIrCount * hData->mIrSize);
- double *hrirs = hData->mHrirsBase.data();
-
- /* Temporary buffers used to calculate the IR's onset and frequency
- * magnitudes.
- */
- auto upsampled = std::vector<double>(10 * hData->mIrPoints);
- auto htemp = std::vector<complex_d>(hData->mFftSize);
- auto hrir = std::vector<double>(hData->mFftSize);
+ std::atomic<uint> loaded_count{0u};
- for(uint si{0u};si < sofaHrtf->M;si++)
+ auto load_proc = [sofaHrtf,hData,delayType,outRate,&loaded_count]() -> bool
{
- printf("\rLoading HRIRs... %d of %d", si+1, sofaHrtf->M);
- fflush(stdout);
+ const uint channels{(hData->mChannelType == CT_STEREO) ? 2u : 1u};
+ hData->mHrirsBase.resize(channels * hData->mIrCount * hData->mIrSize, 0.0);
+ double *hrirs = hData->mHrirsBase.data();
+
+ std::unique_ptr<double[]> restmp;
+ al::optional<PPhaseResampler> resampler;
+ if(outRate && outRate != hData->mIrRate)
+ {
+ resampler.emplace().init(hData->mIrRate, outRate);
+ restmp = std::make_unique<double[]>(sofaHrtf->N);
+ }
- float aer[3]{
- sofaHrtf->SourcePosition.values[3*si],
- sofaHrtf->SourcePosition.values[3*si + 1],
- sofaHrtf->SourcePosition.values[3*si + 2]
- };
- mysofa_c2s(aer);
+ for(uint si{0u};si < sofaHrtf->M;++si)
+ {
+ loaded_count.fetch_add(1u);
- if(std::abs(aer[1]) >= 89.999f)
- aer[0] = 0.0f;
- else
- aer[0] = std::fmod(360.0f - aer[0], 360.0f);
+ float aer[3]{
+ sofaHrtf->SourcePosition.values[3*si],
+ sofaHrtf->SourcePosition.values[3*si + 1],
+ sofaHrtf->SourcePosition.values[3*si + 2]
+ };
+ mysofa_c2s(aer);
- auto field = std::find_if(hData->mFds.cbegin(), hData->mFds.cend(),
- [&aer](const HrirFdT &fld) -> bool
+ if(std::abs(aer[1]) >= 89.999f)
+ aer[0] = 0.0f;
+ else
+ aer[0] = std::fmod(360.0f - aer[0], 360.0f);
+
+ auto field = std::find_if(hData->mFds.cbegin(), hData->mFds.cend(),
+ [&aer](const HrirFdT &fld) -> bool
+ { return (std::abs(aer[2] - fld.mDistance) < 0.001); });
+ if(field == hData->mFds.cend())
+ continue;
+
+ const double evscale{180.0 / static_cast<double>(field->mEvs.size()-1)};
+ double ef{(90.0 + aer[1]) / evscale};
+ auto ei = static_cast<uint>(std::round(ef));
+ ef = (ef - ei) * evscale;
+ if(std::abs(ef) >= 0.1) continue;
+
+ const double azscale{360.0 / static_cast<double>(field->mEvs[ei].mAzs.size())};
+ double af{aer[0] / azscale};
+ auto ai = static_cast<uint>(std::round(af));
+ af = (af-ai) * azscale;
+ ai %= static_cast<uint>(field->mEvs[ei].mAzs.size());
+ if(std::abs(af) >= 0.1) continue;
+
+ HrirAzT *azd = &field->mEvs[ei].mAzs[ai];
+ if(azd->mIrs[0] != nullptr)
{
- double delta = aer[2] - fld.mDistance;
- return (std::abs(delta) < 0.001);
- });
- if(field == hData->mFds.cend())
- continue;
-
- double ef{(90.0+aer[1]) / 180.0 * (field->mEvCount-1)};
- auto ei = static_cast<int>(std::round(ef));
- ef = (ef-ei) * 180.0 / (field->mEvCount-1);
- if(std::abs(ef) >= 0.1) continue;
-
- double af{aer[0] / 360.0 * field->mEvs[ei].mAzCount};
- auto ai = static_cast<int>(std::round(af));
- af = (af-ai) * 360.0 / field->mEvs[ei].mAzCount;
- ai %= field->mEvs[ei].mAzCount;
- if(std::abs(af) >= 0.1) continue;
-
- HrirAzT *azd = &field->mEvs[ei].mAzs[ai];
- if(azd->mIrs[0] != nullptr)
- {
- fprintf(stderr, "Multiple measurements near [ a=%f, e=%f, r=%f ].\n",
- aer[0], aer[1], aer[2]);
- return false;
+ fprintf(stderr, "\nMultiple measurements near [ a=%f, e=%f, r=%f ].\n",
+ aer[0], aer[1], aer[2]);
+ return false;
+ }
+
+ for(uint ti{0u};ti < channels;++ti)
+ {
+ azd->mIrs[ti] = &hrirs[hData->mIrSize * (hData->mIrCount*ti + azd->mIndex)];
+ if(!resampler)
+ std::copy_n(&sofaHrtf->DataIR.values[(si*sofaHrtf->R + ti)*sofaHrtf->N],
+ sofaHrtf->N, azd->mIrs[ti]);
+ else
+ {
+ std::copy_n(&sofaHrtf->DataIR.values[(si*sofaHrtf->R + ti)*sofaHrtf->N],
+ sofaHrtf->N, restmp.get());
+ resampler->process(sofaHrtf->N, restmp.get(), hData->mIrSize, azd->mIrs[ti]);
+ }
+ }
+
+ /* Include any per-channel or per-HRIR delays. */
+ if(delayType == DelayType::I_R)
+ {
+ const float *delayValues{sofaHrtf->DataDelay.values};
+ for(uint ti{0u};ti < channels;++ti)
+ azd->mDelays[ti] = delayValues[ti] / static_cast<float>(hData->mIrRate);
+ }
+ else if(delayType == DelayType::M_R)
+ {
+ const float *delayValues{sofaHrtf->DataDelay.values};
+ for(uint ti{0u};ti < channels;++ti)
+ azd->mDelays[ti] = delayValues[si*sofaHrtf->R + ti] /
+ static_cast<float>(hData->mIrRate);
+ }
}
- for(uint ti{0u};ti < channels;++ti)
+ if(outRate && outRate != hData->mIrRate)
{
- std::copy_n(&sofaHrtf->DataIR.values[(si*sofaHrtf->R + ti)*sofaHrtf->N],
- hData->mIrPoints, hrir.begin());
- azd->mIrs[ti] = &hrirs[hData->mIrSize * (hData->mIrCount*ti + azd->mIndex)];
- azd->mDelays[ti] = CalcHrirOnset(hData->mIrRate, hData->mIrPoints, upsampled,
- hrir.data());
- CalcHrirMagnitude(hData->mIrPoints, hData->mFftSize, htemp, hrir.data(),
- azd->mIrs[ti]);
+ const double scale{static_cast<double>(outRate) / hData->mIrRate};
+ hData->mIrRate = outRate;
+ hData->mIrPoints = std::min(static_cast<uint>(std::ceil(hData->mIrPoints*scale)),
+ hData->mIrSize);
}
-
- // TODO: Since some SOFA files contain minimum phase HRIRs,
- // it would be beneficial to check for per-measurement delays
- // (when available) to reconstruct the HRTDs.
- }
- printf("\n");
- return true;
+ return true;
+ };
+
+ std::future_status load_status{};
+ auto load_future = std::async(std::launch::async, load_proc);
+ do {
+ load_status = load_future.wait_for(std::chrono::milliseconds{50});
+ printf("\rLoading HRIRs... %u of %u", loaded_count.load(), sofaHrtf->M);
+ fflush(stdout);
+ } while(load_status != std::future_status::ready);
+ fputc('\n', stdout);
+ return load_future.get();
}
-struct MySofaHrtfDeleter {
- void operator()(MYSOFA_HRTF *ptr) { mysofa_free(ptr); }
+
+/* Calculates the frequency magnitudes of the HRIR set. Work is delegated to
+ * this struct, which runs asynchronously on one or more threads (sharing the
+ * same calculator object).
+ */
+struct MagCalculator {
+ const uint mFftSize{};
+ const uint mIrPoints{};
+ std::vector<double*> mIrs{};
+ std::atomic<size_t> mCurrent{};
+ std::atomic<size_t> mDone{};
+
+ void Worker()
+ {
+ auto htemp = std::vector<complex_d>(mFftSize);
+
+ while(1)
+ {
+ /* Load the current index to process. */
+ size_t idx{mCurrent.load()};
+ do {
+ /* If the index is at the end, we're done. */
+ if(idx >= mIrs.size())
+ return;
+ /* Otherwise, increment the current index atomically so other
+ * threads know to go to the next one. If this call fails, the
+ * current index was just changed by another thread and the new
+ * value is loaded into idx, which we'll recheck.
+ */
+ } while(!mCurrent.compare_exchange_weak(idx, idx+1, std::memory_order_relaxed));
+
+ CalcHrirMagnitude(mIrPoints, mFftSize, htemp, mIrs[idx]);
+
+ /* Increment the number of IRs done. */
+ mDone.fetch_add(1);
+ }
+ }
};
-using MySofaHrtfPtr = std::unique_ptr<MYSOFA_HRTF,MySofaHrtfDeleter>;
-bool LoadSofaFile(const char *filename, const uint fftSize, const uint truncSize,
- const ChannelModeT chanMode, HrirDataT *hData)
+bool LoadSofaFile(const char *filename, const uint numThreads, const uint fftSize,
+ const uint truncSize, const uint outRate, const ChannelModeT chanMode, HrirDataT *hData)
{
int err;
MySofaHrtfPtr sofaHrtf{mysofa_load(filename, &err)};
@@ -605,39 +459,45 @@ bool LoadSofaFile(const char *filename, const uint fftSize, const uint truncSize
/* Assume a default head radius of 9cm. */
hData->mRadius = 0.09;
- if(!PrepareSampleRate(sofaHrtf.get(), hData) || !PrepareDelay(sofaHrtf.get(), hData)
- || !CheckIrData(sofaHrtf.get()))
+ hData->mIrRate = static_cast<uint>(GetSampleRate(sofaHrtf.get()) + 0.5f);
+ if(!hData->mIrRate)
return false;
- if(!PrepareLayout(sofaHrtf->M, sofaHrtf->SourcePosition.values, hData))
+
+ DelayType delayType = PrepareDelay(sofaHrtf.get());
+ if(delayType == DelayType::Invalid)
return false;
- if(!LoadResponses(sofaHrtf.get(), hData))
+ if(!CheckIrData(sofaHrtf.get()))
+ return false;
+ if(!PrepareLayout(sofaHrtf->M, sofaHrtf->SourcePosition.values, hData))
+ return false;
+ if(!LoadResponses(sofaHrtf.get(), hData, delayType, outRate))
return false;
sofaHrtf = nullptr;
- for(uint fi{0u};fi < hData->mFdCount;fi++)
+ for(uint fi{0u};fi < hData->mFds.size();fi++)
{
uint ei{0u};
- for(;ei < hData->mFds[fi].mEvCount;ei++)
+ for(;ei < hData->mFds[fi].mEvs.size();ei++)
{
uint ai{0u};
- for(;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(;ai < hData->mFds[fi].mEvs[ei].mAzs.size();ai++)
{
HrirAzT &azd = hData->mFds[fi].mEvs[ei].mAzs[ai];
if(azd.mIrs[0] != nullptr) break;
}
- if(ai < hData->mFds[fi].mEvs[ei].mAzCount)
+ if(ai < hData->mFds[fi].mEvs[ei].mAzs.size())
break;
}
- if(ei >= hData->mFds[fi].mEvCount)
+ if(ei >= hData->mFds[fi].mEvs.size())
{
fprintf(stderr, "Missing source references [ %d, *, * ].\n", fi);
return false;
}
hData->mFds[fi].mEvStart = ei;
- for(;ei < hData->mFds[fi].mEvCount;ei++)
+ for(;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(uint ai{0u};ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(uint ai{0u};ai < hData->mFds[fi].mEvs[ei].mAzs.size();ai++)
{
HrirAzT &azd = hData->mFds[fi].mEvs[ei].mAzs[ai];
if(azd.mIrs[0] == nullptr)
@@ -649,20 +509,95 @@ bool LoadSofaFile(const char *filename, const uint fftSize, const uint truncSize
}
}
+
+ size_t hrir_total{0};
const uint channels{(hData->mChannelType == CT_STEREO) ? 2u : 1u};
double *hrirs = hData->mHrirsBase.data();
- for(uint fi{0u};fi < hData->mFdCount;fi++)
+ for(uint fi{0u};fi < hData->mFds.size();fi++)
{
- for(uint ei{0u};ei < hData->mFds[fi].mEvCount;ei++)
+ for(uint ei{0u};ei < hData->mFds[fi].mEvStart;ei++)
{
- for(uint ai{0u};ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(uint ai{0u};ai < hData->mFds[fi].mEvs[ei].mAzs.size();ai++)
{
HrirAzT &azd = hData->mFds[fi].mEvs[ei].mAzs[ai];
for(uint ti{0u};ti < channels;ti++)
azd.mIrs[ti] = &hrirs[hData->mIrSize * (hData->mIrCount*ti + azd.mIndex)];
}
}
+
+ for(uint ei{hData->mFds[fi].mEvStart};ei < hData->mFds[fi].mEvs.size();ei++)
+ hrir_total += hData->mFds[fi].mEvs[ei].mAzs.size() * channels;
}
+ std::atomic<size_t> hrir_done{0};
+ auto onset_proc = [hData,channels,&hrir_done]() -> bool
+ {
+ /* Temporary buffer used to calculate the IR's onset. */
+ auto upsampled = std::vector<double>(OnsetRateMultiple * hData->mIrPoints);
+ /* This resampler is used to help detect the response onset. */
+ PPhaseResampler rs;
+ rs.init(hData->mIrRate, OnsetRateMultiple*hData->mIrRate);
+
+ for(auto &field : hData->mFds)
+ {
+ for(auto &elev : field.mEvs.subspan(field.mEvStart))
+ {
+ for(auto &azd : elev.mAzs)
+ {
+ for(uint ti{0};ti < channels;ti++)
+ {
+ hrir_done.fetch_add(1u, std::memory_order_acq_rel);
+ azd.mDelays[ti] += CalcHrirOnset(rs, hData->mIrRate, hData->mIrPoints,
+ upsampled, azd.mIrs[ti]);
+ }
+ }
+ }
+ }
+ return true;
+ };
+
+ std::future_status load_status{};
+ auto load_future = std::async(std::launch::async, onset_proc);
+ do {
+ load_status = load_future.wait_for(std::chrono::milliseconds{50});
+ printf("\rCalculating HRIR onsets... %zu of %zu", hrir_done.load(), hrir_total);
+ fflush(stdout);
+ } while(load_status != std::future_status::ready);
+ fputc('\n', stdout);
+ if(!load_future.get())
+ return false;
+
+ MagCalculator calculator{hData->mFftSize, hData->mIrPoints};
+ for(auto &field : hData->mFds)
+ {
+ for(auto &elev : field.mEvs.subspan(field.mEvStart))
+ {
+ for(auto &azd : elev.mAzs)
+ {
+ for(uint ti{0};ti < channels;ti++)
+ calculator.mIrs.push_back(azd.mIrs[ti]);
+ }
+ }
+ }
+
+ std::vector<std::thread> thrds;
+ thrds.reserve(numThreads);
+ for(size_t i{0};i < numThreads;++i)
+ thrds.emplace_back(std::mem_fn(&MagCalculator::Worker), &calculator);
+ size_t count;
+ do {
+ std::this_thread::sleep_for(std::chrono::milliseconds{50});
+ count = calculator.mDone.load();
+
+ printf("\rCalculating HRIR magnitudes... %zu of %zu", count, calculator.mIrs.size());
+ fflush(stdout);
+ } while(count != calculator.mIrs.size());
+ fputc('\n', stdout);
+
+ for(auto &thrd : thrds)
+ {
+ if(thrd.joinable())
+ thrd.join();
+ }
return true;
}
diff --git a/utils/makemhr/loadsofa.h b/utils/makemhr/loadsofa.h
index 93bf1704..82dce85a 100644
--- a/utils/makemhr/loadsofa.h
+++ b/utils/makemhr/loadsofa.h
@@ -4,7 +4,7 @@
#include "makemhr.h"
-bool LoadSofaFile(const char *filename, const uint fftSize, const uint truncSize,
- const ChannelModeT chanMode, HrirDataT *hData);
+bool LoadSofaFile(const char *filename, const uint numThreads, const uint fftSize,
+ const uint truncSize, const uint outRate, const ChannelModeT chanMode, HrirDataT *hData);
#endif /* LOADSOFA_H */
diff --git a/utils/makemhr/makemhr.cpp b/utils/makemhr/makemhr.cpp
index 1e28ca4b..ae301dc3 100644
--- a/utils/makemhr/makemhr.cpp
+++ b/utils/makemhr/makemhr.cpp
@@ -88,7 +88,9 @@
#include "../getopt.h"
#endif
+#include "alcomplex.h"
#include "alfstream.h"
+#include "alspan.h"
#include "alstring.h"
#include "loaddef.h"
#include "loadsofa.h"
@@ -107,6 +109,8 @@ using namespace std::placeholders;
#endif
+HrirDataT::~HrirDataT() = default;
+
// Head model used for calculating the impulse delays.
enum HeadModelT {
HM_NONE,
@@ -128,22 +132,18 @@ enum HeadModelT {
// The limits to the truncation window size on the command line.
#define MIN_TRUNCSIZE (16)
-#define MAX_TRUNCSIZE (512)
+#define MAX_TRUNCSIZE (128)
// The limits to the custom head radius on the command line.
#define MIN_CUSTOM_RADIUS (0.05)
#define MAX_CUSTOM_RADIUS (0.15)
-// The truncation window size must be a multiple of the below value to allow
-// for vectorized convolution.
-#define MOD_TRUNCSIZE (8)
-
// The defaults for the command line options.
#define DEFAULT_FFTSIZE (65536)
#define DEFAULT_EQUALIZE (1)
#define DEFAULT_SURFACE (1)
#define DEFAULT_LIMIT (24.0)
-#define DEFAULT_TRUNCSIZE (32)
+#define DEFAULT_TRUNCSIZE (64)
#define DEFAULT_HEAD_MODEL (HM_DATASET)
#define DEFAULT_CUSTOM_RADIUS (0.0)
@@ -151,8 +151,8 @@ enum HeadModelT {
#define MAX_HRTD (63.0)
// The OpenAL Soft HRTF format marker. It stands for minimum-phase head
-// response protocol 02.
-#define MHR_FORMAT ("MinPHR02")
+// response protocol 03.
+#define MHR_FORMAT ("MinPHR03")
/* Channel index enums. Mono uses LeftChannel only. */
enum ChannelIndex : uint {
@@ -165,42 +165,31 @@ enum ChannelIndex : uint {
* pattern string are replaced with the replacement string. The result is
* truncated if necessary.
*/
-static int StrSubst(const char *in, const char *pat, const char *rep, const size_t maxLen, char *out)
+static std::string StrSubst(al::span<const char> in, const al::span<const char> pat,
+ const al::span<const char> rep)
{
- size_t inLen, patLen, repLen;
- size_t si, di;
- int truncated;
-
- inLen = strlen(in);
- patLen = strlen(pat);
- repLen = strlen(rep);
- si = 0;
- di = 0;
- truncated = 0;
- while(si < inLen && di < maxLen)
+ std::string ret;
+ ret.reserve(in.size() + pat.size());
+
+ while(in.size() >= pat.size())
{
- if(patLen <= inLen-si)
+ if(al::strncasecmp(in.data(), pat.data(), pat.size()) == 0)
{
- if(al::strncasecmp(&in[si], pat, patLen) == 0)
- {
- if(repLen > maxLen-di)
- {
- repLen = maxLen - di;
- truncated = 1;
- }
- strncpy(&out[di], rep, repLen);
- si += patLen;
- di += repLen;
- }
+ in = in.subspan(pat.size());
+ ret.append(rep.data(), rep.size());
+ }
+ else
+ {
+ size_t endpos{1};
+ while(endpos < in.size() && in[endpos] != pat.front())
+ ++endpos;
+ ret.append(in.data(), endpos);
+ in = in.subspan(endpos);
}
- out[di] = in[si];
- si++;
- di++;
}
- if(si < inLen)
- truncated = 1;
- out[di] = '\0';
- return !truncated;
+ ret.append(in.data(), in.size());
+
+ return ret;
}
@@ -236,94 +225,14 @@ static void TpdfDither(double *RESTRICT out, const double *RESTRICT in, const do
}
}
-/* Fast Fourier transform routines. The number of points must be a power of
- * two.
- */
-
-// Performs bit-reversal ordering.
-static void FftArrange(const uint n, complex_d *inout)
-{
- // Handle in-place arrangement.
- uint rk{0u};
- for(uint k{0u};k < n;k++)
- {
- if(rk > k)
- std::swap(inout[rk], inout[k]);
-
- uint m{n};
- while(rk&(m >>= 1))
- rk &= ~m;
- rk |= m;
- }
-}
-
-// Performs the summation.
-static void FftSummation(const uint n, const double s, complex_d *cplx)
-{
- double pi;
- uint m, m2;
- uint i, k, mk;
-
- pi = s * M_PI;
- for(m = 1, m2 = 2;m < n; m <<= 1, m2 <<= 1)
- {
- // v = Complex (-2.0 * sin (0.5 * pi / m) * sin (0.5 * pi / m), -sin (pi / m))
- double sm = std::sin(0.5 * pi / m);
- auto v = complex_d{-2.0*sm*sm, -std::sin(pi / m)};
- auto w = complex_d{1.0, 0.0};
- for(i = 0;i < m;i++)
- {
- for(k = i;k < n;k += m2)
- {
- mk = k + m;
- auto t = w * cplx[mk];
- cplx[mk] = cplx[k] - t;
- cplx[k] = cplx[k] + t;
- }
- w += v*w;
- }
- }
-}
-
-// Performs a forward FFT.
-void FftForward(const uint n, complex_d *inout)
-{
- FftArrange(n, inout);
- FftSummation(n, 1.0, inout);
-}
-
-// Performs an inverse FFT.
-void FftInverse(const uint n, complex_d *inout)
-{
- FftArrange(n, inout);
- FftSummation(n, -1.0, inout);
- double f{1.0 / n};
- for(uint i{0};i < n;i++)
- inout[i] *= f;
-}
/* Calculate the complex helical sequence (or discrete-time analytical signal)
* of the given input using the Hilbert transform. Given the natural logarithm
* of a signal's magnitude response, the imaginary components can be used as
* the angles for minimum-phase reconstruction.
*/
-static void Hilbert(const uint n, complex_d *inout)
-{
- uint i;
-
- // Handle in-place operation.
- for(i = 0;i < n;i++)
- inout[i].imag(0.0);
-
- FftInverse(n, inout);
- for(i = 1;i < (n+1)/2;i++)
- inout[i] *= 2.0;
- /* Increment i if n is even. */
- i += (n&1)^1;
- for(;i < n;i++)
- inout[i] = complex_d{0.0, 0.0};
- FftForward(n, inout);
-}
+inline static void Hilbert(const uint n, complex_d *inout)
+{ complex_hilbert({inout, n}); }
/* Calculate the magnitude response of the given input. This is used in
* place of phase decomposition, since the phase residuals are discarded for
@@ -372,17 +281,13 @@ static void LimitMagnitudeResponse(const uint n, const uint m, const double limi
* residuals (which were discarded). The mirrored half of the response is
* reconstructed.
*/
-static void MinimumPhase(const uint n, const double *in, complex_d *out)
+static void MinimumPhase(const uint n, double *mags, complex_d *out)
{
- const uint m = 1 + (n / 2);
- std::vector<double> mags(n);
+ const uint m{(n/2) + 1};
uint i;
for(i = 0;i < m;i++)
- {
- mags[i] = std::max(EPSILON, in[i]);
- out[i] = complex_d{std::log(mags[i]), 0.0};
- }
+ out[i] = std::log(mags[i]);
for(;i < n;i++)
{
mags[i] = mags[n - i];
@@ -394,238 +299,7 @@ static void MinimumPhase(const uint n, const double *in, complex_d *out)
for(i = 0;i < n;i++)
{
auto a = std::exp(complex_d{0.0, out[i].imag()});
- out[i] = complex_d{mags[i], 0.0} * a;
- }
-}
-
-
-/***************************
- *** Resampler functions ***
- ***************************/
-
-/* This is the normalized cardinal sine (sinc) function.
- *
- * sinc(x) = { 1, x = 0
- * { sin(pi x) / (pi x), otherwise.
- */
-static double Sinc(const double x)
-{
- if(std::abs(x) < EPSILON)
- return 1.0;
- return std::sin(M_PI * x) / (M_PI * x);
-}
-
-/* The zero-order modified Bessel function of the first kind, used for the
- * Kaiser window.
- *
- * I_0(x) = sum_{k=0}^inf (1 / k!)^2 (x / 2)^(2 k)
- * = sum_{k=0}^inf ((x / 2)^k / k!)^2
- */
-static double BesselI_0(const double x)
-{
- double term, sum, x2, y, last_sum;
- int k;
-
- // Start at k=1 since k=0 is trivial.
- term = 1.0;
- sum = 1.0;
- x2 = x/2.0;
- k = 1;
-
- // Let the integration converge until the term of the sum is no longer
- // significant.
- do {
- y = x2 / k;
- k++;
- last_sum = sum;
- term *= y * y;
- sum += term;
- } while(sum != last_sum);
- return sum;
-}
-
-/* Calculate a Kaiser window from the given beta value and a normalized k
- * [-1, 1].
- *
- * w(k) = { I_0(B sqrt(1 - k^2)) / I_0(B), -1 <= k <= 1
- * { 0, elsewhere.
- *
- * Where k can be calculated as:
- *
- * k = i / l, where -l <= i <= l.
- *
- * or:
- *
- * k = 2 i / M - 1, where 0 <= i <= M.
- */
-static double Kaiser(const double b, const double k)
-{
- if(!(k >= -1.0 && k <= 1.0))
- return 0.0;
- return BesselI_0(b * std::sqrt(1.0 - k*k)) / BesselI_0(b);
-}
-
-// Calculates the greatest common divisor of a and b.
-static uint Gcd(uint x, uint y)
-{
- while(y > 0)
- {
- uint z{y};
- y = x % y;
- x = z;
- }
- return x;
-}
-
-/* Calculates the size (order) of the Kaiser window. Rejection is in dB and
- * the transition width is normalized frequency (0.5 is nyquist).
- *
- * M = { ceil((r - 7.95) / (2.285 2 pi f_t)), r > 21
- * { ceil(5.79 / 2 pi f_t), r <= 21.
- *
- */
-static uint CalcKaiserOrder(const double rejection, const double transition)
-{
- double w_t = 2.0 * M_PI * transition;
- if(rejection > 21.0)
- return static_cast<uint>(std::ceil((rejection - 7.95) / (2.285 * w_t)));
- return static_cast<uint>(std::ceil(5.79 / w_t));
-}
-
-// Calculates the beta value of the Kaiser window. Rejection is in dB.
-static double CalcKaiserBeta(const double rejection)
-{
- if(rejection > 50.0)
- return 0.1102 * (rejection - 8.7);
- if(rejection >= 21.0)
- return (0.5842 * std::pow(rejection - 21.0, 0.4)) +
- (0.07886 * (rejection - 21.0));
- return 0.0;
-}
-
-/* Calculates a point on the Kaiser-windowed sinc filter for the given half-
- * width, beta, gain, and cutoff. The point is specified in non-normalized
- * samples, from 0 to M, where M = (2 l + 1).
- *
- * w(k) 2 p f_t sinc(2 f_t x)
- *
- * x -- centered sample index (i - l)
- * k -- normalized and centered window index (x / l)
- * w(k) -- window function (Kaiser)
- * p -- gain compensation factor when sampling
- * f_t -- normalized center frequency (or cutoff; 0.5 is nyquist)
- */
-static double SincFilter(const uint l, const double b, const double gain, const double cutoff, const uint i)
-{
- return Kaiser(b, static_cast<double>(i - l) / l) * 2.0 * gain * cutoff * Sinc(2.0 * cutoff * (i - l));
-}
-
-/* This is a polyphase sinc-filtered resampler.
- *
- * Upsample Downsample
- *
- * p/q = 3/2 p/q = 3/5
- *
- * M-+-+-+-> M-+-+-+->
- * -------------------+ ---------------------+
- * p s * f f f f|f| | p s * f f f f f |
- * | 0 * 0 0 0|0|0 | | 0 * 0 0 0 0|0| |
- * v 0 * 0 0|0|0 0 | v 0 * 0 0 0|0|0 |
- * s * f|f|f f f | s * f f|f|f f |
- * 0 * |0|0 0 0 0 | 0 * 0|0|0 0 0 |
- * --------+=+--------+ 0 * |0|0 0 0 0 |
- * d . d .|d|. d . d ----------+=+--------+
- * d . . . .|d|. . . .
- * q->
- * q-+-+-+->
- *
- * P_f(i,j) = q i mod p + pj
- * P_s(i,j) = floor(q i / p) - j
- * d[i=0..N-1] = sum_{j=0}^{floor((M - 1) / p)} {
- * { f[P_f(i,j)] s[P_s(i,j)], P_f(i,j) < M
- * { 0, P_f(i,j) >= M. }
- */
-
-// Calculate the resampling metrics and build the Kaiser-windowed sinc filter
-// that's used to cut frequencies above the destination nyquist.
-void ResamplerSetup(ResamplerT *rs, const uint srcRate, const uint dstRate)
-{
- const uint gcd{Gcd(srcRate, dstRate)};
- rs->mP = dstRate / gcd;
- rs->mQ = srcRate / gcd;
-
- /* The cutoff is adjusted by half the transition width, so the transition
- * ends before the nyquist (0.5). Both are scaled by the downsampling
- * factor.
- */
- double cutoff, width;
- if(rs->mP > rs->mQ)
- {
- cutoff = 0.475 / rs->mP;
- width = 0.05 / rs->mP;
- }
- else
- {
- cutoff = 0.475 / rs->mQ;
- width = 0.05 / rs->mQ;
- }
- // A rejection of -180 dB is used for the stop band. Round up when
- // calculating the left offset to avoid increasing the transition width.
- const uint l{(CalcKaiserOrder(180.0, width)+1) / 2};
- const double beta{CalcKaiserBeta(180.0)};
- rs->mM = l*2 + 1;
- rs->mL = l;
- rs->mF.resize(rs->mM);
- for(uint i{0};i < rs->mM;i++)
- rs->mF[i] = SincFilter(l, beta, rs->mP, cutoff, i);
-}
-
-// Perform the upsample-filter-downsample resampling operation using a
-// polyphase filter implementation.
-void ResamplerRun(ResamplerT *rs, const uint inN, const double *in, const uint outN, double *out)
-{
- const uint p = rs->mP, q = rs->mQ, m = rs->mM, l = rs->mL;
- std::vector<double> workspace;
- const double *f = rs->mF.data();
- uint j_f, j_s;
- double *work;
- uint i;
-
- if(outN == 0)
- return;
-
- // Handle in-place operation.
- if(in == out)
- {
- workspace.resize(outN);
- work = workspace.data();
- }
- else
- work = out;
- // Resample the input.
- for(i = 0;i < outN;i++)
- {
- double r = 0.0;
- // Input starts at l to compensate for the filter delay. This will
- // drop any build-up from the first half of the filter.
- j_f = (l + (q * i)) % p;
- j_s = (l + (q * i)) / p;
- while(j_f < m)
- {
- // Only take input when 0 <= j_s < inN. This single unsigned
- // comparison catches both cases.
- if(j_s < inN)
- r += f[j_f] * in[j_s];
- j_f += p;
- j_s--;
- }
- work[i] = r;
- }
- // Clean up after in-place operation.
- if(work != out)
- {
- for(i = 0;i < outN;i++)
- out[i] = work[i];
+ out[i] = a * mags[i];
}
}
@@ -670,11 +344,11 @@ static int WriteBin4(const uint bytes, const uint32_t in, FILE *fp, const char *
// Store the OpenAL Soft HRTF data set.
static int StoreMhr(const HrirDataT *hData, const char *filename)
{
- uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1;
- uint n = hData->mIrPoints;
- FILE *fp;
+ const uint channels{(hData->mChannelType == CT_STEREO) ? 2u : 1u};
+ const uint n{hData->mIrPoints};
+ uint dither_seed{22222};
uint fi, ei, ai, i;
- uint dither_seed = 22222;
+ FILE *fp;
if((fp=fopen(filename, "wb")) == nullptr)
{
@@ -685,38 +359,35 @@ static int StoreMhr(const HrirDataT *hData, const char *filename)
return 0;
if(!WriteBin4(4, hData->mIrRate, fp, filename))
return 0;
- if(!WriteBin4(1, static_cast<uint32_t>(hData->mSampleType), fp, filename))
- return 0;
if(!WriteBin4(1, static_cast<uint32_t>(hData->mChannelType), fp, filename))
return 0;
if(!WriteBin4(1, hData->mIrPoints, fp, filename))
return 0;
- if(!WriteBin4(1, hData->mFdCount, fp, filename))
+ if(!WriteBin4(1, static_cast<uint>(hData->mFds.size()), fp, filename))
return 0;
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = static_cast<uint>(hData->mFds.size()-1);fi < hData->mFds.size();fi--)
{
auto fdist = static_cast<uint32_t>(std::round(1000.0 * hData->mFds[fi].mDistance));
if(!WriteBin4(2, fdist, fp, filename))
return 0;
- if(!WriteBin4(1, hData->mFds[fi].mEvCount, fp, filename))
+ if(!WriteBin4(1, static_cast<uint32_t>(hData->mFds[fi].mEvs.size()), fp, filename))
return 0;
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = 0;ei < hData->mFds[fi].mEvs.size();ei++)
{
- if(!WriteBin4(1, hData->mFds[fi].mEvs[ei].mAzCount, fp, filename))
+ const auto &elev = hData->mFds[fi].mEvs[ei];
+ if(!WriteBin4(1, static_cast<uint32_t>(elev.mAzs.size()), fp, filename))
return 0;
}
}
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = static_cast<uint>(hData->mFds.size()-1);fi < hData->mFds.size();fi--)
{
- const double scale = (hData->mSampleType == ST_S16) ? 32767.0 :
- ((hData->mSampleType == ST_S24) ? 8388607.0 : 0.0);
- const uint bps = (hData->mSampleType == ST_S16) ? 2 :
- ((hData->mSampleType == ST_S24) ? 3 : 0);
+ constexpr double scale{8388607.0};
+ constexpr uint bps{3u};
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = 0;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzs.size();ai++)
{
HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
double out[2 * MAX_TRUNCSIZE];
@@ -726,30 +397,27 @@ static int StoreMhr(const HrirDataT *hData, const char *filename)
TpdfDither(out+1, azd->mIrs[1], scale, n, channels, &dither_seed);
for(i = 0;i < (channels * n);i++)
{
- int v = static_cast<int>(Clamp(out[i], -scale-1.0, scale));
+ const auto v = static_cast<int>(Clamp(out[i], -scale-1.0, scale));
if(!WriteBin4(bps, static_cast<uint32_t>(v), fp, filename))
return 0;
}
}
}
}
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = static_cast<uint>(hData->mFds.size()-1);fi < hData->mFds.size();fi--)
{
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ /* Delay storage has 2 bits of extra precision. */
+ constexpr double DelayPrecScale{4.0};
+ for(ei = 0;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(const auto &azd : hData->mFds[fi].mEvs[ei].mAzs)
{
- const HrirAzT &azd = hData->mFds[fi].mEvs[ei].mAzs[ai];
- int v = static_cast<int>(std::min(std::round(hData->mIrRate * azd.mDelays[0]), MAX_HRTD));
-
- if(!WriteBin4(1, static_cast<uint32_t>(v), fp, filename))
- return 0;
+ auto v = static_cast<uint>(std::round(azd.mDelays[0]*DelayPrecScale));
+ if(!WriteBin4(1, v, fp, filename)) return 0;
if(hData->mChannelType == CT_STEREO)
{
- v = static_cast<int>(std::min(std::round(hData->mIrRate * azd.mDelays[1]), MAX_HRTD));
-
- if(!WriteBin4(1, static_cast<uint32_t>(v), fp, filename))
- return 0;
+ v = static_cast<uint>(std::round(azd.mDelays[1]*DelayPrecScale));
+ if(!WriteBin4(1, v, fp, filename)) return 0;
}
}
}
@@ -770,22 +438,21 @@ static int StoreMhr(const HrirDataT *hData, const char *filename)
static void BalanceFieldMagnitudes(const HrirDataT *hData, const uint channels, const uint m)
{
double maxMags[MAX_FD_COUNT];
- uint fi, ei, ai, ti, i;
+ uint fi, ei, ti, i;
double maxMag{0.0};
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
maxMags[fi] = 0.0;
- for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(const auto &azd : hData->mFds[fi].mEvs[ei].mAzs)
{
- HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
for(ti = 0;ti < channels;ti++)
{
for(i = 0;i < m;i++)
- maxMags[fi] = std::max(azd->mIrs[ti][i], maxMags[fi]);
+ maxMags[fi] = std::max(azd.mIrs[ti][i], maxMags[fi]);
}
}
}
@@ -793,19 +460,18 @@ static void BalanceFieldMagnitudes(const HrirDataT *hData, const uint channels,
maxMag = std::max(maxMags[fi], maxMag);
}
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
const double magFactor{maxMag / maxMags[fi]};
- for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(const auto &azd : hData->mFds[fi].mEvs[ei].mAzs)
{
- HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
for(ti = 0;ti < channels;ti++)
{
for(i = 0;i < m;i++)
- azd->mIrs[ti][i] *= magFactor;
+ azd.mIrs[ti][i] *= magFactor;
}
}
}
@@ -825,30 +491,32 @@ static void CalculateDfWeights(const HrirDataT *hData, double *weights)
sum = 0.0;
// The head radius acts as the limit for the inner radius.
innerRa = hData->mRadius;
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
// Each volume ends half way between progressive field measurements.
- if((fi + 1) < hData->mFdCount)
+ if((fi + 1) < hData->mFds.size())
outerRa = 0.5f * (hData->mFds[fi].mDistance + hData->mFds[fi + 1].mDistance);
// The final volume has its limit extended to some practical value.
// This is done to emphasize the far-field responses in the average.
else
outerRa = 10.0f;
- evs = M_PI / 2.0 / (hData->mFds[fi].mEvCount - 1);
- for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++)
+ const double raPowDiff{std::pow(outerRa, 3.0) - std::pow(innerRa, 3.0)};
+ evs = M_PI / 2.0 / static_cast<double>(hData->mFds[fi].mEvs.size() - 1);
+ for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvs.size();ei++)
{
+ const auto &elev = hData->mFds[fi].mEvs[ei];
// For each elevation, calculate the upper and lower limits of
// the patch band.
- ev = hData->mFds[fi].mEvs[ei].mElevation;
+ ev = elev.mElevation;
lowerEv = std::max(-M_PI / 2.0, ev - evs);
upperEv = std::min(M_PI / 2.0, ev + evs);
// Calculate the surface area of the patch band.
solidAngle = 2.0 * M_PI * (std::sin(upperEv) - std::sin(lowerEv));
// Then the volume of the extruded patch band.
- solidVolume = solidAngle * (std::pow(outerRa, 3.0) - std::pow(innerRa, 3.0)) / 3.0;
+ solidVolume = solidAngle * raPowDiff / 3.0;
// Each weight is the volume of one extruded patch.
- weights[(fi * MAX_EV_COUNT) + ei] = solidVolume / hData->mFds[fi].mEvs[ei].mAzCount;
+ weights[(fi*MAX_EV_COUNT) + ei] = solidVolume / static_cast<double>(elev.mAzs.size());
// Sum the total coverage volume of the HRIRs for all fields.
sum += solidAngle;
}
@@ -856,11 +524,11 @@ static void CalculateDfWeights(const HrirDataT *hData, double *weights)
innerRa = outerRa;
}
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
// Normalize the weights given the total surface coverage for all
// fields.
- for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvs.size();ei++)
weights[(fi * MAX_EV_COUNT) + ei] /= sum;
}
}
@@ -870,9 +538,10 @@ static void CalculateDfWeights(const HrirDataT *hData, double *weights)
* coverage of each HRIR. The final average can then be limited by the
* specified magnitude range (in positive dB; 0.0 to skip).
*/
-static void CalculateDiffuseFieldAverage(const HrirDataT *hData, const uint channels, const uint m, const int weighted, const double limit, double *dfa)
+static void CalculateDiffuseFieldAverage(const HrirDataT *hData, const uint channels, const uint m,
+ const int weighted, const double limit, double *dfa)
{
- std::vector<double> weights(hData->mFdCount * MAX_EV_COUNT);
+ std::vector<double> weights(hData->mFds.size() * MAX_EV_COUNT);
uint count, ti, fi, ei, i, ai;
if(weighted)
@@ -887,16 +556,16 @@ static void CalculateDiffuseFieldAverage(const HrirDataT *hData, const uint chan
// If coverage weighting is not used, the weights still need to be
// averaged by the number of existing HRIRs.
count = hData->mIrCount;
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
for(ei = 0;ei < hData->mFds[fi].mEvStart;ei++)
- count -= hData->mFds[fi].mEvs[ei].mAzCount;
+ count -= static_cast<uint>(hData->mFds[fi].mEvs[ei].mAzs.size());
}
weight = 1.0 / count;
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
- for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvs.size();ei++)
weights[(fi * MAX_EV_COUNT) + ei] = weight;
}
}
@@ -904,11 +573,11 @@ static void CalculateDiffuseFieldAverage(const HrirDataT *hData, const uint chan
{
for(i = 0;i < m;i++)
dfa[(ti * m) + i] = 0.0;
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
- for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzs.size();ai++)
{
HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
// Get the weight for this HRIR's contribution.
@@ -934,167 +603,36 @@ static void CalculateDiffuseFieldAverage(const HrirDataT *hData, const uint chan
// set using the given average response.
static void DiffuseFieldEqualize(const uint channels, const uint m, const double *dfa, const HrirDataT *hData)
{
- uint ti, fi, ei, ai, i;
+ uint ti, fi, ei, i;
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(fi = 0;fi < hData->mFds.size();fi++)
{
- for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++)
+ for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvs.size();ei++)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(auto &azd : hData->mFds[fi].mEvs[ei].mAzs)
{
- HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
-
for(ti = 0;ti < channels;ti++)
{
for(i = 0;i < m;i++)
- azd->mIrs[ti][i] /= dfa[(ti * m) + i];
+ azd.mIrs[ti][i] /= dfa[(ti * m) + i];
}
}
}
}
}
-/* Perform minimum-phase reconstruction using the magnitude responses of the
- * HRIR set. Work is delegated to this struct, which runs asynchronously on one
- * or more threads (sharing the same reconstructor object).
- */
-struct HrirReconstructor {
- std::vector<double*> mIrs;
- std::atomic<size_t> mCurrent;
- std::atomic<size_t> mDone;
- uint mFftSize;
- uint mIrPoints;
-
- void Worker()
- {
- auto h = std::vector<complex_d>(mFftSize);
-
- while(1)
- {
- /* Load the current index to process. */
- size_t idx{mCurrent.load()};
- do {
- /* If the index is at the end, we're done. */
- if(idx >= mIrs.size())
- return;
- /* Otherwise, increment the current index atomically so other
- * threads know to go to the next one. If this call fails, the
- * current index was just changed by another thread and the new
- * value is loaded into idx, which we'll recheck.
- */
- } while(!mCurrent.compare_exchange_weak(idx, idx+1, std::memory_order_relaxed));
-
- /* Now do the reconstruction, and apply the inverse FFT to get the
- * time-domain response.
- */
- MinimumPhase(mFftSize, mIrs[idx], h.data());
- FftInverse(mFftSize, h.data());
- for(uint i{0u};i < mIrPoints;++i)
- mIrs[idx][i] = h[i].real();
-
- /* Increment the number of IRs done. */
- mDone.fetch_add(1);
- }
- }
-};
-
-static void ReconstructHrirs(const HrirDataT *hData)
-{
- const uint channels{(hData->mChannelType == CT_STEREO) ? 2u : 1u};
-
- /* Count the number of IRs to process (excluding elevations that will be
- * synthesized later).
- */
- size_t total{hData->mIrCount};
- for(uint fi{0u};fi < hData->mFdCount;fi++)
- {
- for(uint ei{0u};ei < hData->mFds[fi].mEvStart;ei++)
- total -= hData->mFds[fi].mEvs[ei].mAzCount;
- }
- total *= channels;
-
- /* Set up the reconstructor with the needed size info and pointers to the
- * IRs to process.
- */
- HrirReconstructor reconstructor;
- reconstructor.mIrs.reserve(total);
- reconstructor.mCurrent.store(0, std::memory_order_relaxed);
- reconstructor.mDone.store(0, std::memory_order_relaxed);
- reconstructor.mFftSize = hData->mFftSize;
- reconstructor.mIrPoints = hData->mIrPoints;
- for(uint fi{0u};fi < hData->mFdCount;fi++)
- {
- const HrirFdT &field = hData->mFds[fi];
- for(uint ei{field.mEvStart};ei < field.mEvCount;ei++)
- {
- const HrirEvT &elev = field.mEvs[ei];
- for(uint ai{0u};ai < elev.mAzCount;ai++)
- {
- const HrirAzT &azd = elev.mAzs[ai];
- for(uint ti{0u};ti < channels;ti++)
- reconstructor.mIrs.push_back(azd.mIrs[ti]);
- }
- }
- }
-
- /* Launch two threads to work on reconstruction. */
- std::thread thrd1{std::mem_fn(&HrirReconstructor::Worker), &reconstructor};
- std::thread thrd2{std::mem_fn(&HrirReconstructor::Worker), &reconstructor};
-
- /* Keep track of the number of IRs done, periodically reporting it. */
- size_t count;
- while((count=reconstructor.mDone.load()) != total)
- {
- size_t pcdone{count * 100 / total};
-
- printf("\r%3zu%% done (%zu of %zu)", pcdone, count, total);
- fflush(stdout);
-
- std::this_thread::sleep_for(std::chrono::milliseconds{50});
- }
- size_t pcdone{count * 100 / total};
- printf("\r%3zu%% done (%zu of %zu)\n", pcdone, count, total);
-
- if(thrd2.joinable()) thrd2.join();
- if(thrd1.joinable()) thrd1.join();
-}
-
-// Resamples the HRIRs for use at the given sampling rate.
-static void ResampleHrirs(const uint rate, HrirDataT *hData)
-{
- uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1;
- uint n = hData->mIrPoints;
- uint ti, fi, ei, ai;
- ResamplerT rs;
-
- ResamplerSetup(&rs, hData->mIrRate, rate);
- for(fi = 0;fi < hData->mFdCount;fi++)
- {
- for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++)
- {
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
- {
- HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
- for(ti = 0;ti < channels;ti++)
- ResamplerRun(&rs, n, azd->mIrs[ti], n, azd->mIrs[ti]);
- }
- }
- }
- hData->mIrRate = rate;
-}
-
/* Given field and elevation indices and an azimuth, calculate the indices of
* the two HRIRs that bound the coordinate along with a factor for
* calculating the continuous HRIR using interpolation.
*/
static void CalcAzIndices(const HrirFdT &field, const uint ei, const double az, uint *a0, uint *a1, double *af)
{
- double f{(2.0*M_PI + az) * field.mEvs[ei].mAzCount / (2.0*M_PI)};
- uint i{static_cast<uint>(f) % field.mEvs[ei].mAzCount};
+ double f{(2.0*M_PI + az) * static_cast<double>(field.mEvs[ei].mAzs.size()) / (2.0*M_PI)};
+ const uint i{static_cast<uint>(f) % static_cast<uint>(field.mEvs[ei].mAzs.size())};
f -= std::floor(f);
*a0 = i;
- *a1 = (i + 1) % field.mEvs[ei].mAzCount;
+ *a1 = (i + 1) % static_cast<uint>(field.mEvs[ei].mAzs.size());
*af = f;
}
@@ -1124,13 +662,13 @@ static void SynthesizeOnsets(HrirDataT *hData)
/* Take the polar opposite position of the desired measurement and
* swap the ears.
*/
- field.mEvs[0].mAzs[0].mDelays[0] = field.mEvs[field.mEvCount-1].mAzs[0].mDelays[1];
- field.mEvs[0].mAzs[0].mDelays[1] = field.mEvs[field.mEvCount-1].mAzs[0].mDelays[0];
+ field.mEvs[0].mAzs[0].mDelays[0] = field.mEvs[field.mEvs.size()-1].mAzs[0].mDelays[1];
+ field.mEvs[0].mAzs[0].mDelays[1] = field.mEvs[field.mEvs.size()-1].mAzs[0].mDelays[0];
for(ei = 1u;ei < (upperElevReal+1)/2;++ei)
{
- const uint topElev{field.mEvCount-ei-1};
+ const uint topElev{static_cast<uint>(field.mEvs.size()-ei-1)};
- for(uint ai{0u};ai < field.mEvs[ei].mAzCount;ai++)
+ for(uint ai{0u};ai < field.mEvs[ei].mAzs.size();ai++)
{
uint a0, a1;
double af;
@@ -1154,12 +692,12 @@ static void SynthesizeOnsets(HrirDataT *hData)
}
else
{
- field.mEvs[0].mAzs[0].mDelays[0] = field.mEvs[field.mEvCount-1].mAzs[0].mDelays[0];
+ field.mEvs[0].mAzs[0].mDelays[0] = field.mEvs[field.mEvs.size()-1].mAzs[0].mDelays[0];
for(ei = 1u;ei < (upperElevReal+1)/2;++ei)
{
- const uint topElev{field.mEvCount-ei-1};
+ const uint topElev{static_cast<uint>(field.mEvs.size()-ei-1)};
- for(uint ai{0u};ai < field.mEvs[ei].mAzCount;ai++)
+ for(uint ai{0u};ai < field.mEvs[ei].mAzs.size();ai++)
{
uint a0, a1;
double af;
@@ -1192,7 +730,7 @@ static void SynthesizeOnsets(HrirDataT *hData)
const double ef{(field.mEvs[upperElevReal].mElevation - field.mEvs[ei].mElevation) /
(field.mEvs[upperElevReal].mElevation - field.mEvs[lowerElevFake].mElevation)};
- for(uint ai{0u};ai < field.mEvs[ei].mAzCount;ai++)
+ for(uint ai{0u};ai < field.mEvs[ei].mAzs.size();ai++)
{
uint a0, a1, a2, a3;
double af0, af1;
@@ -1218,103 +756,222 @@ static void SynthesizeOnsets(HrirDataT *hData)
}
}
};
- std::for_each(hData->mFds.begin(), hData->mFds.begin()+hData->mFdCount, proc_field);
+ std::for_each(hData->mFds.begin(), hData->mFds.end(), proc_field);
}
/* Attempt to synthesize any missing HRIRs at the bottom elevations of each
* field. Right now this just blends the lowest elevation HRIRs together and
- * applies some attenuation and high frequency damping. It is a simple, if
+ * applies a low-pass filter to simulate body occlusion. It is a simple, if
* inaccurate model.
*/
static void SynthesizeHrirs(HrirDataT *hData)
{
const uint channels{(hData->mChannelType == CT_STEREO) ? 2u : 1u};
- const uint irSize{hData->mIrPoints};
+ auto htemp = std::vector<complex_d>(hData->mFftSize);
+ const uint m{hData->mFftSize/2u + 1u};
+ auto filter = std::vector<double>(m);
const double beta{3.5e-6 * hData->mIrRate};
- auto proc_field = [channels,irSize,beta](HrirFdT &field) -> void
+ auto proc_field = [channels,m,beta,&htemp,&filter](HrirFdT &field) -> void
{
const uint oi{field.mEvStart};
if(oi <= 0) return;
for(uint ti{0u};ti < channels;ti++)
{
- for(uint i{0u};i < irSize;i++)
- field.mEvs[0].mAzs[0].mIrs[ti][i] = 0.0;
- /* Blend the lowest defined elevation's responses for an average
- * -90 degree elevation response.
+ uint a0, a1;
+ double af;
+
+ /* Use the lowest immediate-left response for the left ear and
+ * lowest immediate-right response for the right ear. Given no comb
+ * effects as a result of the left response reaching the right ear
+ * and vice-versa, this produces a decent phantom-center response
+ * underneath the head.
*/
- double blend_count{0.0};
- for(uint ai{0u};ai < field.mEvs[oi].mAzCount;ai++)
+ CalcAzIndices(field, oi, ((ti==0) ? -M_PI : M_PI) / 2.0, &a0, &a1, &af);
+ for(uint i{0u};i < m;i++)
{
- /* Only include the left responses for the left ear, and the
- * right responses for the right ear. This removes the cross-
- * talk that shouldn't exist for the -90 degree elevation
- * response (and would be mistimed anyway). NOTE: Azimuth goes
- * from 0...2pi rather than -pi...+pi (0 in front, clockwise).
- */
- if(std::abs(field.mEvs[oi].mAzs[ai].mAzimuth) < EPSILON ||
- (ti == LeftChannel && field.mEvs[oi].mAzs[ai].mAzimuth > M_PI-EPSILON) ||
- (ti == RightChannel && field.mEvs[oi].mAzs[ai].mAzimuth < M_PI+EPSILON))
- {
- for(uint i{0u};i < irSize;i++)
- field.mEvs[0].mAzs[0].mIrs[ti][i] += field.mEvs[oi].mAzs[ai].mIrs[ti][i];
- blend_count += 1.0;
- }
+ field.mEvs[0].mAzs[0].mIrs[ti][i] = Lerp(field.mEvs[oi].mAzs[a0].mIrs[ti][i],
+ field.mEvs[oi].mAzs[a1].mIrs[ti][i], af);
}
- if(blend_count > 0.0)
+ }
+
+ for(uint ei{1u};ei < field.mEvStart;ei++)
+ {
+ const double of{static_cast<double>(ei) / field.mEvStart};
+ const double b{(1.0 - of) * beta};
+ double lp[4]{};
+
+ /* Calculate a low-pass filter to simulate body occlusion. */
+ lp[0] = Lerp(1.0, lp[0], b);
+ lp[1] = Lerp(lp[0], lp[1], b);
+ lp[2] = Lerp(lp[1], lp[2], b);
+ lp[3] = Lerp(lp[2], lp[3], b);
+ htemp[0] = lp[3];
+ for(size_t i{1u};i < htemp.size();i++)
{
- for(uint i{0u};i < irSize;i++)
- field.mEvs[0].mAzs[0].mIrs[ti][i] /= blend_count;
+ lp[0] = Lerp(0.0, lp[0], b);
+ lp[1] = Lerp(lp[0], lp[1], b);
+ lp[2] = Lerp(lp[1], lp[2], b);
+ lp[3] = Lerp(lp[2], lp[3], b);
+ htemp[i] = lp[3];
}
+ /* Get the filter's frequency-domain response and extract the
+ * frequency magnitudes (phase will be reconstructed later)).
+ */
+ FftForward(static_cast<uint>(htemp.size()), htemp.data());
+ std::transform(htemp.cbegin(), htemp.cbegin()+m, filter.begin(),
+ [](const complex_d &c) -> double { return std::abs(c); });
- for(uint ei{1u};ei < field.mEvStart;ei++)
+ for(uint ai{0u};ai < field.mEvs[ei].mAzs.size();ai++)
{
- const double of{static_cast<double>(ei) / field.mEvStart};
- const double b{(1.0 - of) * beta};
- for(uint ai{0u};ai < field.mEvs[ei].mAzCount;ai++)
- {
- uint a0, a1;
- double af;
+ uint a0, a1;
+ double af;
- CalcAzIndices(field, oi, field.mEvs[ei].mAzs[ai].mAzimuth, &a0, &a1, &af);
- double lp[4]{};
- for(uint i{0u};i < irSize;i++)
+ CalcAzIndices(field, oi, field.mEvs[ei].mAzs[ai].mAzimuth, &a0, &a1, &af);
+ for(uint ti{0u};ti < channels;ti++)
+ {
+ for(uint i{0u};i < m;i++)
{
/* Blend the two defined HRIRs closest to this azimuth,
* then blend that with the synthesized -90 elevation.
*/
const double s1{Lerp(field.mEvs[oi].mAzs[a0].mIrs[ti][i],
field.mEvs[oi].mAzs[a1].mIrs[ti][i], af)};
- const double s0{Lerp(field.mEvs[0].mAzs[0].mIrs[ti][i], s1, of)};
- /* Apply a low-pass to simulate body occlusion. */
- lp[0] = Lerp(s0, lp[0], b);
- lp[1] = Lerp(lp[0], lp[1], b);
- lp[2] = Lerp(lp[1], lp[2], b);
- lp[3] = Lerp(lp[2], lp[3], b);
- field.mEvs[ei].mAzs[ai].mIrs[ti][i] = lp[3];
+ const double s{Lerp(field.mEvs[0].mAzs[0].mIrs[ti][i], s1, of)};
+ field.mEvs[ei].mAzs[ai].mIrs[ti][i] = s * filter[i];
}
}
}
- const double b{beta};
- double lp[4]{};
- for(uint i{0u};i < irSize;i++)
- {
- const double s0{field.mEvs[0].mAzs[0].mIrs[ti][i]};
- lp[0] = Lerp(s0, lp[0], b);
- lp[1] = Lerp(lp[0], lp[1], b);
- lp[2] = Lerp(lp[1], lp[2], b);
- lp[3] = Lerp(lp[2], lp[3], b);
- field.mEvs[0].mAzs[0].mIrs[ti][i] = lp[3];
- }
}
- field.mEvStart = 0;
+ const double b{beta};
+ double lp[4]{};
+ lp[0] = Lerp(1.0, lp[0], b);
+ lp[1] = Lerp(lp[0], lp[1], b);
+ lp[2] = Lerp(lp[1], lp[2], b);
+ lp[3] = Lerp(lp[2], lp[3], b);
+ htemp[0] = lp[3];
+ for(size_t i{1u};i < htemp.size();i++)
+ {
+ lp[0] = Lerp(0.0, lp[0], b);
+ lp[1] = Lerp(lp[0], lp[1], b);
+ lp[2] = Lerp(lp[1], lp[2], b);
+ lp[3] = Lerp(lp[2], lp[3], b);
+ htemp[i] = lp[3];
+ }
+ FftForward(static_cast<uint>(htemp.size()), htemp.data());
+ std::transform(htemp.cbegin(), htemp.cbegin()+m, filter.begin(),
+ [](const complex_d &c) -> double { return std::abs(c); });
+
+ for(uint ti{0u};ti < channels;ti++)
+ {
+ for(uint i{0u};i < m;i++)
+ field.mEvs[0].mAzs[0].mIrs[ti][i] *= filter[i];
+ }
};
- std::for_each(hData->mFds.begin(), hData->mFds.begin()+hData->mFdCount, proc_field);
+ std::for_each(hData->mFds.begin(), hData->mFds.end(), proc_field);
}
// The following routines assume a full set of HRIRs for all elevations.
+/* Perform minimum-phase reconstruction using the magnitude responses of the
+ * HRIR set. Work is delegated to this struct, which runs asynchronously on one
+ * or more threads (sharing the same reconstructor object).
+ */
+struct HrirReconstructor {
+ std::vector<double*> mIrs;
+ std::atomic<size_t> mCurrent;
+ std::atomic<size_t> mDone;
+ uint mFftSize;
+ uint mIrPoints;
+
+ void Worker()
+ {
+ auto h = std::vector<complex_d>(mFftSize);
+ auto mags = std::vector<double>(mFftSize);
+ size_t m{(mFftSize/2) + 1};
+
+ while(1)
+ {
+ /* Load the current index to process. */
+ size_t idx{mCurrent.load()};
+ do {
+ /* If the index is at the end, we're done. */
+ if(idx >= mIrs.size())
+ return;
+ /* Otherwise, increment the current index atomically so other
+ * threads know to go to the next one. If this call fails, the
+ * current index was just changed by another thread and the new
+ * value is loaded into idx, which we'll recheck.
+ */
+ } while(!mCurrent.compare_exchange_weak(idx, idx+1, std::memory_order_relaxed));
+
+ /* Now do the reconstruction, and apply the inverse FFT to get the
+ * time-domain response.
+ */
+ for(size_t i{0};i < m;++i)
+ mags[i] = std::max(mIrs[idx][i], EPSILON);
+ MinimumPhase(mFftSize, mags.data(), h.data());
+ FftInverse(mFftSize, h.data());
+ for(uint i{0u};i < mIrPoints;++i)
+ mIrs[idx][i] = h[i].real();
+
+ /* Increment the number of IRs done. */
+ mDone.fetch_add(1);
+ }
+ }
+};
+
+static void ReconstructHrirs(const HrirDataT *hData, const uint numThreads)
+{
+ const uint channels{(hData->mChannelType == CT_STEREO) ? 2u : 1u};
+
+ /* Set up the reconstructor with the needed size info and pointers to the
+ * IRs to process.
+ */
+ HrirReconstructor reconstructor;
+ reconstructor.mCurrent.store(0, std::memory_order_relaxed);
+ reconstructor.mDone.store(0, std::memory_order_relaxed);
+ reconstructor.mFftSize = hData->mFftSize;
+ reconstructor.mIrPoints = hData->mIrPoints;
+ for(const auto &field : hData->mFds)
+ {
+ for(auto &elev : field.mEvs)
+ {
+ for(const auto &azd : elev.mAzs)
+ {
+ for(uint ti{0u};ti < channels;ti++)
+ reconstructor.mIrs.push_back(azd.mIrs[ti]);
+ }
+ }
+ }
+
+ /* Launch threads to work on reconstruction. */
+ std::vector<std::thread> thrds;
+ thrds.reserve(numThreads);
+ for(size_t i{0};i < numThreads;++i)
+ thrds.emplace_back(std::mem_fn(&HrirReconstructor::Worker), &reconstructor);
+
+ /* Keep track of the number of IRs done, periodically reporting it. */
+ size_t count;
+ do {
+ std::this_thread::sleep_for(std::chrono::milliseconds{50});
+
+ count = reconstructor.mDone.load();
+ size_t pcdone{count * 100 / reconstructor.mIrs.size()};
+
+ printf("\r%3zu%% done (%zu of %zu)", pcdone, count, reconstructor.mIrs.size());
+ fflush(stdout);
+ } while(count < reconstructor.mIrs.size());
+ fputc('\n', stdout);
+
+ for(auto &thrd : thrds)
+ {
+ if(thrd.joinable())
+ thrd.join();
+ }
+}
+
// Normalize the HRIR set and slightly attenuate the result.
static void NormalizeHrirs(HrirDataT *hData)
{
@@ -1323,35 +980,28 @@ static void NormalizeHrirs(HrirDataT *hData)
/* Find the maximum amplitude and RMS out of all the IRs. */
struct LevelPair { double amp, rms; };
- auto proc0_field = [channels,irSize](const LevelPair levels0, const HrirFdT &field) -> LevelPair
+ auto mesasure_channel = [irSize](const LevelPair levels, const double *ir)
{
- auto proc_elev = [channels,irSize](const LevelPair levels1, const HrirEvT &elev) -> LevelPair
- {
- auto proc_azi = [channels,irSize](const LevelPair levels2, const HrirAzT &azi) -> LevelPair
+ /* Calculate the peak amplitude and RMS of this IR. */
+ auto current = std::accumulate(ir, ir+irSize, LevelPair{0.0, 0.0},
+ [](const LevelPair cur, const double impulse)
{
- auto proc_channel = [irSize](const LevelPair levels3, const double *ir) -> LevelPair
- {
- /* Calculate the peak amplitude and RMS of this IR. */
- auto current = std::accumulate(ir, ir+irSize, LevelPair{0.0, 0.0},
- [](const LevelPair cur, const double impulse) -> LevelPair
- {
- return {std::max(std::abs(impulse), cur.amp),
- cur.rms + impulse*impulse};
- });
- current.rms = std::sqrt(current.rms / irSize);
-
- /* Accumulate levels by taking the maximum amplitude and RMS. */
- return LevelPair{std::max(current.amp, levels3.amp),
- std::max(current.rms, levels3.rms)};
- };
- return std::accumulate(azi.mIrs, azi.mIrs+channels, levels2, proc_channel);
- };
- return std::accumulate(elev.mAzs, elev.mAzs+elev.mAzCount, levels1, proc_azi);
- };
- return std::accumulate(field.mEvs, field.mEvs+field.mEvCount, levels0, proc_elev);
+ return LevelPair{std::max(std::abs(impulse), cur.amp), cur.rms + impulse*impulse};
+ });
+ current.rms = std::sqrt(current.rms / irSize);
+
+ /* Accumulate levels by taking the maximum amplitude and RMS. */
+ return LevelPair{std::max(current.amp, levels.amp), std::max(current.rms, levels.rms)};
};
- const auto maxlev = std::accumulate(hData->mFds.begin(), hData->mFds.begin()+hData->mFdCount,
- LevelPair{0.0, 0.0}, proc0_field);
+ auto measure_azi = [channels,mesasure_channel](const LevelPair levels, const HrirAzT &azi)
+ { return std::accumulate(azi.mIrs, azi.mIrs+channels, levels, mesasure_channel); };
+ auto measure_elev = [measure_azi](const LevelPair levels, const HrirEvT &elev)
+ { return std::accumulate(elev.mAzs.cbegin(), elev.mAzs.cend(), levels, measure_azi); };
+ auto measure_field = [measure_elev](const LevelPair levels, const HrirFdT &field)
+ { return std::accumulate(field.mEvs.cbegin(), field.mEvs.cend(), levels, measure_elev); };
+
+ const auto maxlev = std::accumulate(hData->mFds.begin(), hData->mFds.end(),
+ LevelPair{0.0, 0.0}, measure_field);
/* Normalize using the maximum RMS of the HRIRs. The RMS measure for the
* non-filtered signal is of an impulse with equal length (to the filter):
@@ -1368,24 +1018,16 @@ static void NormalizeHrirs(HrirDataT *hData)
factor = std::min(factor, 0.99/maxlev.amp);
/* Now scale all IRs by the given factor. */
- auto proc1_field = [channels,irSize,factor](HrirFdT &field) -> void
- {
- auto proc_elev = [channels,irSize,factor](HrirEvT &elev) -> void
- {
- auto proc_azi = [channels,irSize,factor](HrirAzT &azi) -> void
- {
- auto proc_channel = [irSize,factor](double *ir) -> void
- {
- std::transform(ir, ir+irSize, ir,
- std::bind(std::multiplies<double>{}, _1, factor));
- };
- std::for_each(azi.mIrs, azi.mIrs+channels, proc_channel);
- };
- std::for_each(elev.mAzs, elev.mAzs+elev.mAzCount, proc_azi);
- };
- std::for_each(field.mEvs, field.mEvs+field.mEvCount, proc_elev);
- };
- std::for_each(hData->mFds.begin(), hData->mFds.begin()+hData->mFdCount, proc1_field);
+ auto proc_channel = [irSize,factor](double *ir)
+ { std::transform(ir, ir+irSize, ir, [factor](double s){ return s * factor; }); };
+ auto proc_azi = [channels,proc_channel](HrirAzT &azi)
+ { std::for_each(azi.mIrs, azi.mIrs+channels, proc_channel); };
+ auto proc_elev = [proc_azi](HrirEvT &elev)
+ { std::for_each(elev.mAzs.begin(), elev.mAzs.end(), proc_azi); };
+ auto proc1_field = [proc_elev](HrirFdT &field)
+ { std::for_each(field.mEvs.begin(), field.mEvs.end(), proc_elev); };
+
+ std::for_each(hData->mFds.begin(), hData->mFds.end(), proc1_field);
}
// Calculate the left-ear time delay using a spherical head model.
@@ -1408,111 +1050,115 @@ static void CalculateHrtds(const HeadModelT model, const double radius, HrirData
{
uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1;
double customRatio{radius / hData->mRadius};
- uint ti, fi, ei, ai;
+ uint ti;
if(model == HM_SPHERE)
{
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(auto &field : hData->mFds)
{
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ for(auto &elev : field.mEvs)
{
- HrirEvT *evd = &hData->mFds[fi].mEvs[ei];
-
- for(ai = 0;ai < evd->mAzCount;ai++)
+ for(auto &azd : elev.mAzs)
{
- HrirAzT *azd = &evd->mAzs[ai];
-
for(ti = 0;ti < channels;ti++)
- azd->mDelays[ti] = CalcLTD(evd->mElevation, azd->mAzimuth, radius, hData->mFds[fi].mDistance);
+ azd.mDelays[ti] = CalcLTD(elev.mElevation, azd.mAzimuth, radius, field.mDistance);
}
}
}
}
else if(customRatio != 1.0)
{
- for(fi = 0;fi < hData->mFdCount;fi++)
+ for(auto &field : hData->mFds)
{
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ for(auto &elev : field.mEvs)
{
- HrirEvT *evd = &hData->mFds[fi].mEvs[ei];
-
- for(ai = 0;ai < evd->mAzCount;ai++)
+ for(auto &azd : elev.mAzs)
{
- HrirAzT *azd = &evd->mAzs[ai];
for(ti = 0;ti < channels;ti++)
- azd->mDelays[ti] *= customRatio;
+ azd.mDelays[ti] *= customRatio;
}
}
}
}
- for(fi = 0;fi < hData->mFdCount;fi++)
+ double maxHrtd{0.0};
+ for(auto &field : hData->mFds)
{
double minHrtd{std::numeric_limits<double>::infinity()};
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ for(auto &elev : field.mEvs)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(auto &azd : elev.mAzs)
{
- HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
-
for(ti = 0;ti < channels;ti++)
- minHrtd = std::min(azd->mDelays[ti], minHrtd);
+ minHrtd = std::min(azd.mDelays[ti], minHrtd);
}
}
- for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++)
+ for(auto &elev : field.mEvs)
{
- for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++)
+ for(auto &azd : elev.mAzs)
{
- HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai];
-
for(ti = 0;ti < channels;ti++)
- azd->mDelays[ti] -= minHrtd;
+ {
+ azd.mDelays[ti] = (azd.mDelays[ti]-minHrtd) * hData->mIrRate;
+ maxHrtd = std::max(maxHrtd, azd.mDelays[ti]);
+ }
+ }
+ }
+ }
+ if(maxHrtd > MAX_HRTD)
+ {
+ fprintf(stdout, " Scaling for max delay of %f samples to %f\n...\n", maxHrtd, MAX_HRTD);
+ const double scale{MAX_HRTD / maxHrtd};
+ for(auto &field : hData->mFds)
+ {
+ for(auto &elev : field.mEvs)
+ {
+ for(auto &azd : elev.mAzs)
+ {
+ for(ti = 0;ti < channels;ti++)
+ azd.mDelays[ti] *= scale;
+ }
}
}
}
}
// Allocate and configure dynamic HRIR structures.
-int PrepareHrirData(const uint fdCount, const double (&distances)[MAX_FD_COUNT],
- const uint (&evCounts)[MAX_FD_COUNT], const uint azCounts[MAX_FD_COUNT * MAX_EV_COUNT],
- HrirDataT *hData)
+bool PrepareHrirData(const al::span<const double> distances,
+ const al::span<const uint,MAX_FD_COUNT> evCounts,
+ const al::span<const std::array<uint,MAX_EV_COUNT>,MAX_FD_COUNT> azCounts, HrirDataT *hData)
{
- uint evTotal = 0, azTotal = 0, fi, ei, ai;
+ uint evTotal{0}, azTotal{0};
- for(fi = 0;fi < fdCount;fi++)
+ for(size_t fi{0};fi < distances.size();++fi)
{
evTotal += evCounts[fi];
- for(ei = 0;ei < evCounts[fi];ei++)
- azTotal += azCounts[(fi * MAX_EV_COUNT) + ei];
+ for(size_t ei{0};ei < evCounts[fi];++ei)
+ azTotal += azCounts[fi][ei];
}
- if(!fdCount || !evTotal || !azTotal)
- return 0;
+ if(!evTotal || !azTotal)
+ return false;
hData->mEvsBase.resize(evTotal);
hData->mAzsBase.resize(azTotal);
- hData->mFds.resize(fdCount);
+ hData->mFds.resize(distances.size());
hData->mIrCount = azTotal;
- hData->mFdCount = fdCount;
evTotal = 0;
azTotal = 0;
- for(fi = 0;fi < fdCount;fi++)
+ for(size_t fi{0};fi < distances.size();++fi)
{
hData->mFds[fi].mDistance = distances[fi];
- hData->mFds[fi].mEvCount = evCounts[fi];
hData->mFds[fi].mEvStart = 0;
- hData->mFds[fi].mEvs = &hData->mEvsBase[evTotal];
+ hData->mFds[fi].mEvs = {&hData->mEvsBase[evTotal], evCounts[fi]};
evTotal += evCounts[fi];
- for(ei = 0;ei < evCounts[fi];ei++)
+ for(uint ei{0};ei < evCounts[fi];++ei)
{
- uint azCount = azCounts[(fi * MAX_EV_COUNT) + ei];
+ uint azCount = azCounts[fi][ei];
- hData->mFds[fi].mIrCount += azCount;
hData->mFds[fi].mEvs[ei].mElevation = -M_PI / 2.0 + M_PI * ei / (evCounts[fi] - 1);
- hData->mFds[fi].mEvs[ei].mIrCount += azCount;
- hData->mFds[fi].mEvs[ei].mAzCount = azCount;
- hData->mFds[fi].mEvs[ei].mAzs = &hData->mAzsBase[azTotal];
- for(ai = 0;ai < azCount;ai++)
+ hData->mFds[fi].mEvs[ei].mAzs = {&hData->mAzsBase[azTotal], azCount};
+ for(uint ai{0};ai < azCount;ai++)
{
hData->mFds[fi].mEvs[ei].mAzs[ai].mAzimuth = 2.0 * M_PI * ai / azCount;
hData->mFds[fi].mEvs[ei].mAzs[ai].mIndex = azTotal + ai;
@@ -1524,7 +1170,7 @@ int PrepareHrirData(const uint fdCount, const double (&distances)[MAX_FD_COUNT],
azTotal += azCount;
}
}
- return 1;
+ return true;
}
@@ -1533,17 +1179,18 @@ int PrepareHrirData(const uint fdCount, const double (&distances)[MAX_FD_COUNT],
* from standard input.
*/
static int ProcessDefinition(const char *inName, const uint outRate, const ChannelModeT chanMode,
- const uint fftSize, const int equalize, const int surface, const double limit,
- const uint truncSize, const HeadModelT model, const double radius, const char *outName)
+ const bool farfield, const uint numThreads, const uint fftSize, const int equalize,
+ const int surface, const double limit, const uint truncSize, const HeadModelT model,
+ const double radius, const char *outName)
{
- char rateStr[8+1], expName[MAX_PATH_LEN];
HrirDataT hData;
+ fprintf(stdout, "Using %u thread%s.\n", numThreads, (numThreads==1)?"":"s");
if(!inName)
{
inName = "stdin";
fprintf(stdout, "Reading HRIR definition from %s...\n", inName);
- if(!LoadDefInput(std::cin, nullptr, 0, inName, fftSize, truncSize, chanMode, &hData))
+ if(!LoadDefInput(std::cin, nullptr, 0, inName, fftSize, truncSize, outRate, chanMode, &hData))
return 0;
}
else
@@ -1569,13 +1216,13 @@ static int ProcessDefinition(const char *inName, const uint outRate, const Chann
{
input = nullptr;
fprintf(stdout, "Reading HRTF data from %s...\n", inName);
- if(!LoadSofaFile(inName, fftSize, truncSize, chanMode, &hData))
+ if(!LoadSofaFile(inName, numThreads, fftSize, truncSize, outRate, chanMode, &hData))
return 0;
}
else
{
fprintf(stdout, "Reading HRIR definition from %s...\n", inName);
- if(!LoadDefInput(*input, startbytes, startbytecount, inName, fftSize, truncSize, chanMode, &hData))
+ if(!LoadDefInput(*input, startbytes, startbytecount, inName, fftSize, truncSize, outRate, chanMode, &hData))
return 0;
}
}
@@ -1586,7 +1233,7 @@ static int ProcessDefinition(const char *inName, const uint outRate, const Chann
uint m{hData.mFftSize/2u + 1u};
auto dfa = std::vector<double>(c * m);
- if(hData.mFdCount > 1)
+ if(hData.mFds.size() > 1)
{
fprintf(stdout, "Balancing field magnitudes...\n");
BalanceFieldMagnitudes(&hData, c, m);
@@ -1596,27 +1243,37 @@ static int ProcessDefinition(const char *inName, const uint outRate, const Chann
fprintf(stdout, "Performing diffuse-field equalization...\n");
DiffuseFieldEqualize(c, m, dfa.data(), &hData);
}
- fprintf(stdout, "Performing minimum phase reconstruction...\n");
- ReconstructHrirs(&hData);
- if(outRate != 0 && outRate != hData.mIrRate)
+ if(hData.mFds.size() > 1)
{
- fprintf(stdout, "Resampling HRIRs...\n");
- ResampleHrirs(outRate, &hData);
+ fprintf(stdout, "Sorting %zu fields...\n", hData.mFds.size());
+ std::sort(hData.mFds.begin(), hData.mFds.end(),
+ [](const HrirFdT &lhs, const HrirFdT &rhs) noexcept
+ { return lhs.mDistance < rhs.mDistance; });
+ if(farfield)
+ {
+ fprintf(stdout, "Clearing %zu near field%s...\n", hData.mFds.size()-1,
+ (hData.mFds.size()-1 != 1) ? "s" : "");
+ hData.mFds.erase(hData.mFds.cbegin(), hData.mFds.cend()-1);
+ }
}
- fprintf(stdout, "Truncating minimum-phase HRIRs...\n");
- hData.mIrPoints = truncSize;
fprintf(stdout, "Synthesizing missing elevations...\n");
if(model == HM_DATASET)
SynthesizeOnsets(&hData);
SynthesizeHrirs(&hData);
+ fprintf(stdout, "Performing minimum phase reconstruction...\n");
+ ReconstructHrirs(&hData, numThreads);
+ fprintf(stdout, "Truncating minimum-phase HRIRs...\n");
+ hData.mIrPoints = truncSize;
fprintf(stdout, "Normalizing final HRIRs...\n");
NormalizeHrirs(&hData);
fprintf(stdout, "Calculating impulse delays...\n");
CalculateHrtds(model, (radius > DEFAULT_CUSTOM_RADIUS) ? radius : hData.mRadius, &hData);
- snprintf(rateStr, sizeof(rateStr), "%u", hData.mIrRate);
- StrSubst(outName, "%r", rateStr, sizeof(expName), expName);
- fprintf(stdout, "Creating MHR data set %s...\n", expName);
- return StoreMhr(&hData, expName);
+
+ const auto rateStr = std::to_string(hData.mIrRate);
+ const auto expName = StrSubst({outName, strlen(outName)}, {"%r", 2},
+ {rateStr.data(), rateStr.size()});
+ fprintf(stdout, "Creating MHR data set %s...\n", expName.c_str());
+ return StoreMhr(&hData, expName.c_str());
}
static void PrintHelp(const char *argv0, FILE *ofile)
@@ -1627,6 +1284,8 @@ static void PrintHelp(const char *argv0, FILE *ofile)
fprintf(ofile, " resample the HRIRs accordingly.\n");
fprintf(ofile, " -m Change the data set to mono, mirroring the left ear for the\n");
fprintf(ofile, " right ear.\n");
+ fprintf(ofile, " -a Change the data set to single field, using the farthest field.\n");
+ fprintf(ofile, " -j <threads> Number of threads used to process HRIRs (default: 2).\n");
fprintf(ofile, " -f <points> Override the FFT window size (default: %u).\n", DEFAULT_FFTSIZE);
fprintf(ofile, " -e {on|off} Toggle diffuse-field equalization (default: %s).\n", (DEFAULT_EQUALIZE ? "on" : "off"));
fprintf(ofile, " -s {on|off} Toggle surface-weighted diffuse-field average (default: %s).\n", (DEFAULT_SURFACE ? "on" : "off"));
@@ -1651,13 +1310,13 @@ int main(int argc, char *argv[])
char *end = nullptr;
ChannelModeT chanMode;
HeadModelT model;
+ uint numThreads;
uint truncSize;
double radius;
+ bool farfield;
double limit;
int opt;
- GET_UNICODE_ARGS(&argc, &argv);
-
if(argc < 2)
{
fprintf(stdout, "HRTF Processing and Composition Utility\n\n");
@@ -1672,11 +1331,13 @@ int main(int argc, char *argv[])
equalize = DEFAULT_EQUALIZE;
surface = DEFAULT_SURFACE;
limit = DEFAULT_LIMIT;
+ numThreads = 2;
truncSize = DEFAULT_TRUNCSIZE;
model = DEFAULT_HEAD_MODEL;
radius = DEFAULT_CUSTOM_RADIUS;
+ farfield = false;
- while((opt=getopt(argc, argv, "r:mf:e:s:l:w:d:c:e:i:o:h")) != -1)
+ while((opt=getopt(argc, argv, "r:maj:f:e:s:l:w:d:c:e:i:o:h")) != -1)
{
switch(opt)
{
@@ -1693,6 +1354,21 @@ int main(int argc, char *argv[])
chanMode = CM_ForceMono;
break;
+ case 'a':
+ farfield = true;
+ break;
+
+ case 'j':
+ numThreads = static_cast<uint>(strtoul(optarg, &end, 10));
+ if(end[0] != '\0' || numThreads > 64)
+ {
+ fprintf(stderr, "\nError: Got unexpected value \"%s\" for option -%c, expected between %u to %u.\n", optarg, opt, 0, 64);
+ exit(EXIT_FAILURE);
+ }
+ if(numThreads == 0)
+ numThreads = std::thread::hardware_concurrency();
+ break;
+
case 'f':
fftSize = static_cast<uint>(strtoul(optarg, &end, 10));
if(end[0] != '\0' || (fftSize&(fftSize-1)) || fftSize < MIN_FFTSIZE || fftSize > MAX_FFTSIZE)
@@ -1742,9 +1418,9 @@ int main(int argc, char *argv[])
case 'w':
truncSize = static_cast<uint>(strtoul(optarg, &end, 10));
- if(end[0] != '\0' || truncSize < MIN_TRUNCSIZE || truncSize > MAX_TRUNCSIZE || (truncSize%MOD_TRUNCSIZE))
+ if(end[0] != '\0' || truncSize < MIN_TRUNCSIZE || truncSize > MAX_TRUNCSIZE)
{
- fprintf(stderr, "\nError: Got unexpected value \"%s\" for option -%c, expected multiple of %u between %u to %u.\n", optarg, opt, MOD_TRUNCSIZE, MIN_TRUNCSIZE, MAX_TRUNCSIZE);
+ fprintf(stderr, "\nError: Got unexpected value \"%s\" for option -%c, expected between %u to %u.\n", optarg, opt, MIN_TRUNCSIZE, MAX_TRUNCSIZE);
exit(EXIT_FAILURE);
}
break;
@@ -1788,8 +1464,8 @@ int main(int argc, char *argv[])
}
}
- int ret = ProcessDefinition(inName, outRate, chanMode, fftSize, equalize, surface, limit,
- truncSize, model, radius, outName);
+ int ret = ProcessDefinition(inName, outRate, chanMode, farfield, numThreads, fftSize, equalize,
+ surface, limit, truncSize, model, radius, outName);
if(!ret) return -1;
fprintf(stdout, "Operation completed.\n");
diff --git a/utils/makemhr/makemhr.h b/utils/makemhr/makemhr.h
index ba19efbe..13b5b2d1 100644
--- a/utils/makemhr/makemhr.h
+++ b/utils/makemhr/makemhr.h
@@ -4,6 +4,9 @@
#include <vector>
#include <complex>
+#include "alcomplex.h"
+#include "polyphase_resampler.h"
+
// The maximum path length used when processing filenames.
#define MAX_PATH_LEN (256)
@@ -71,17 +74,13 @@ struct HrirAzT {
struct HrirEvT {
double mElevation{0.0};
- uint mIrCount{0u};
- uint mAzCount{0u};
- HrirAzT *mAzs{nullptr};
+ al::span<HrirAzT> mAzs;
};
struct HrirFdT {
double mDistance{0.0};
- uint mIrCount{0u};
- uint mEvCount{0u};
uint mEvStart{0u};
- HrirEvT *mEvs{nullptr};
+ al::span<HrirEvT> mEvs;
};
// The HRIR metrics and data set used when loading, processing, and storing
@@ -95,31 +94,35 @@ struct HrirDataT {
uint mIrSize{0u};
double mRadius{0.0};
uint mIrCount{0u};
- uint mFdCount{0u};
std::vector<double> mHrirsBase;
std::vector<HrirEvT> mEvsBase;
std::vector<HrirAzT> mAzsBase;
std::vector<HrirFdT> mFds;
+
+ /* GCC warns when it tries to inline this. */
+ ~HrirDataT();
};
-int PrepareHrirData(const uint fdCount, const double (&distances)[MAX_FD_COUNT], const uint (&evCounts)[MAX_FD_COUNT], const uint azCounts[MAX_FD_COUNT * MAX_EV_COUNT], HrirDataT *hData);
+bool PrepareHrirData(const al::span<const double> distances,
+ const al::span<const uint,MAX_FD_COUNT> evCounts,
+ const al::span<const std::array<uint,MAX_EV_COUNT>,MAX_FD_COUNT> azCounts, HrirDataT *hData);
void MagnitudeResponse(const uint n, const complex_d *in, double *out);
-void FftForward(const uint n, complex_d *inout);
-void FftInverse(const uint n, complex_d *inout);
-
-
-// The resampler metrics and FIR filter.
-struct ResamplerT {
- uint mP, mQ, mM, mL;
- std::vector<double> mF;
-};
-
-void ResamplerSetup(ResamplerT *rs, const uint srcRate, const uint dstRate);
-void ResamplerRun(ResamplerT *rs, const uint inN, const double *in, const uint outN, double *out);
+// Performs a forward FFT.
+inline void FftForward(const uint n, complex_d *inout)
+{ forward_fft(al::as_span(inout, n)); }
+
+// Performs an inverse FFT.
+inline void FftInverse(const uint n, complex_d *inout)
+{
+ inverse_fft(al::as_span(inout, n));
+ double f{1.0 / n};
+ for(uint i{0};i < n;i++)
+ inout[i] *= f;
+}
// Performs linear interpolation.
inline double Lerp(const double a, const double b, const double f)