diff options
Diffstat (limited to 'utils/makehrtf.c')
-rw-r--r-- | utils/makehrtf.c | 1383 |
1 files changed, 914 insertions, 469 deletions
diff --git a/utils/makehrtf.c b/utils/makehrtf.c index ca810b84..461df597 100644 --- a/utils/makehrtf.c +++ b/utils/makehrtf.c @@ -129,6 +129,9 @@ typedef unsigned __int64 uint64_t; #define MIN_POINTS (16) #define MAX_POINTS (8192) +// The limit to the number of 'distances' listed in the data set definition. +#define MAX_FD_COUNT (16) + // The limits to the number of 'azimuths' listed in the data set definition. #define MIN_EV_COUNT (5) #define MAX_EV_COUNT (128) @@ -141,10 +144,10 @@ typedef unsigned __int64 uint64_t; #define MIN_RADIUS (0.05) #define MAX_RADIUS (0.15) -// The limits for the 'distance' from source to listener in the definition -// file. -#define MIN_DISTANCE (0.5) -#define MAX_DISTANCE (2.5) +// The limits for the 'distance' from source to listener for each field in +// the definition file. +#define MIN_DISTANCE (0.05) +#define MAX_DISTANCE (2.50) // The maximum number of channels that can be addressed for a WAVE file // source listed in the data set definition. @@ -212,20 +215,20 @@ typedef unsigned __int64 uint64_t; #define MAX_HRTD (63.0) // The OpenAL Soft HRTF format marker. It stands for minimum-phase head -// response protocol 01. -#define MHR_FORMAT ("MinPHR01") +// response protocol 02. +#define MHR_FORMAT ("MinPHR02") -#define MHR_FORMAT_EXPERIMENTAL ("MinPHRTEMPDONOTUSE") - -// Sample and channel type enum values +// Sample and channel type enum values. typedef enum SampleTypeT { ST_S16 = 0, ST_S24 = 1 } SampleTypeT; +// Certain iterations rely on these integer enum values. typedef enum ChannelTypeT { - CT_LEFTONLY = 0, - CT_LEFTRIGHT = 1 + CT_NONE = -1, + CT_MONO = 0, + CT_STEREO = 1 } ChannelTypeT; // Byte order for the serialization routines. @@ -290,25 +293,42 @@ typedef struct SourceRefT { char mPath[MAX_PATH_LEN+1]; } SourceRefT; +// Structured HRIR storage for stereo azimuth pairs, elevations, and fields. +typedef struct HrirAzT { + double mAzimuth; + uint mIndex; + double mDelays[2]; + double *mIrs[2]; +} HrirAzT; + +typedef struct HrirEvT { + double mElevation; + uint mIrCount; + uint mAzCount; + HrirAzT *mAzs; +} HrirEvT; + +typedef struct HrirFdT { + double mDistance; + uint mIrCount; + uint mEvCount; + uint mEvStart; + HrirEvT *mEvs; +} HrirFdT; + // The HRIR metrics and data set used when loading, processing, and storing // the resulting HRTF. typedef struct HrirDataT { uint mIrRate; SampleTypeT mSampleType; ChannelTypeT mChannelType; - uint mIrCount; - uint mIrSize; uint mIrPoints; uint mFftSize; - uint mEvCount; - uint mEvStart; - uint mAzCount[MAX_EV_COUNT]; - uint mEvOffset[MAX_EV_COUNT]; + uint mIrSize; double mRadius; - double mDistance; - double *mHrirs; - double *mHrtds; - double mMaxHrtd; + uint mIrCount; + uint mFdCount; + HrirFdT *mFds; } HrirDataT; // The resampler metrics and FIR filter. @@ -533,6 +553,19 @@ static void TrIndication(TokenReaderT *tr, uint *line, uint *column) if(column) *column = tr->mColumn; } +// Checks to see if a token is (likely to be) an identifier. It does not +// display any errors and will not proceed to the next token. +static int TrIsIdent(TokenReaderT *tr) +{ + char ch; + + if(!TrSkipWhitespace(tr)) + return 0; + ch = tr->mRing[tr->mOut&TR_RING_MASK]; + return ch == '_' || isalpha(ch); +} + + // Checks to see if a token is the given operator. It does not display any // errors and will not proceed to the next token. static int TrIsOperator(TokenReaderT *tr, const char *op) @@ -641,9 +674,9 @@ static int TrReadInt(TokenReaderT *tr, const int loBound, const int hiBound, int if(*value < loBound || *value > hiBound) { TrErrorAt(tr, tr->mLine, col, "Expected a value from %d to %d.\n", loBound, hiBound); - return (0); + return 0; } - return (1); + return 1; } } TrErrorAt(tr, tr->mLine, col, "Expected an integer.\n"); @@ -736,7 +769,7 @@ static int TrReadFloat(TokenReaderT *tr, const double loBound, const double hiBo *value = strtod(temp, NULL); if(*value < loBound || *value > hiBound) { - TrErrorAt (tr, tr->mLine, col, "Expected a value from %f to %f.\n", loBound, hiBound); + TrErrorAt(tr, tr->mLine, col, "Expected a value from %f to %f.\n", loBound, hiBound); return 0; } return 1; @@ -772,7 +805,7 @@ static int TrReadString(TokenReaderT *tr, const uint maxLen, char *text) break; if(ch == '\n') { - TrErrorAt (tr, tr->mLine, col, "Unterminated string at end of line.\n"); + TrErrorAt(tr, tr->mLine, col, "Unterminated string at end of line.\n"); return 0; } if(len < maxLen) @@ -788,7 +821,7 @@ static int TrReadString(TokenReaderT *tr, const uint maxLen, char *text) tr->mColumn += 2 + len; if(len > maxLen) { - TrErrorAt (tr, tr->mLine, col, "String is too long.\n"); + TrErrorAt(tr, tr->mLine, col, "String is too long.\n"); return 0; } text[len] = '\0'; @@ -901,34 +934,40 @@ static double Clamp(const double val, const double lower, const double upper) // Performs linear interpolation. static double Lerp(const double a, const double b, const double f) { - return a + (f * (b - a)); + return a + f * (b - a); } static inline uint dither_rng(uint *seed) { - *seed = (*seed * 96314165) + 907633515; + *seed = *seed * 96314165 + 907633515; return *seed; } -// Performs a triangular probability density function dither. It assumes the -// input sample is already scaled. -static inline double TpdfDither(const double in, uint *seed) +// Performs a triangular probability density function dither. The input samples +// should be normalized (-1 to +1). +static void TpdfDither(double *restrict out, const double *restrict in, const double scale, + const int count, const int step, uint *seed) { static const double PRNG_SCALE = 1.0 / UINT_MAX; uint prn0, prn1; + int i; - prn0 = dither_rng(seed); - prn1 = dither_rng(seed); - return round(in + (prn0*PRNG_SCALE - prn1*PRNG_SCALE)); + for(i = 0;i < count;i++) + { + prn0 = dither_rng(seed); + prn1 = dither_rng(seed); + out[i*step] = round(in[i]*scale + (prn0*PRNG_SCALE - prn1*PRNG_SCALE)); + } } // Allocates an array of doubles. -static double *CreateArray(size_t n) +static double *CreateDoubles(size_t n) { double *a; - if(n == 0) n = 1; - a = calloc(n, sizeof(double)); + if(n == 0) + n = 1; + a = calloc(n, sizeof(*a)); if(a == NULL) { fprintf(stderr, "Error: Out of memory.\n"); @@ -937,9 +976,21 @@ static double *CreateArray(size_t n) return a; } -// Frees an array of doubles. -static void DestroyArray(double *a) -{ free(a); } +// Allocates an array of complex numbers. +static Complex *CreateComplexes(size_t n) +{ + Complex *a; + + if(n == 0) + n = 1; + a = calloc(n, sizeof(*a)); + if(a == NULL) + { + fprintf(stderr, "Error: Out of memory.\n"); + exit(-1); + } + return a; +} /* Fast Fourier transform routines. The number of points must be a power of * two. In-place operation is possible only if both the real and imaginary @@ -1081,9 +1132,8 @@ static void MagnitudeResponse(const uint n, const Complex *in, double *out) * to adjust the effects of the diffuse-field average on the equalization * process. */ -static void LimitMagnitudeResponse(const uint n, const double limit, const double *in, double *out) +static void LimitMagnitudeResponse(const uint n, const uint m, const double limit, const double *in, double *out) { - const uint m = 1 + (n / 2); double halfLim; uint i, lower, upper; double ave; @@ -1118,7 +1168,7 @@ static void MinimumPhase(const uint n, const double *in, Complex *out) double *mags; uint i; - mags = CreateArray(n); + mags = CreateDoubles(n); for(i = 0;i < m;i++) { mags[i] = fmax(EPSILON, in[i]); @@ -1137,7 +1187,7 @@ static void MinimumPhase(const uint n, const double *in, Complex *out) Complex a = c_exp(MakeComplex(0.0, out[i].Imag)); out[i] = c_mul(MakeComplex(mags[i], 0.0), a); } - DestroyArray(mags); + free(mags); } @@ -1319,7 +1369,7 @@ static void ResamplerSetup(ResamplerT *rs, const uint srcRate, const uint dstRat beta = CalcKaiserBeta(180.0); rs->mM = l*2 + 1; rs->mL = l; - rs->mF = CreateArray(rs->mM); + rs->mF = CreateDoubles(rs->mM); for(i = 0;i < ((int)rs->mM);i++) rs->mF[i] = SincFilter((int)l, beta, rs->mP, cutoff, i); } @@ -1327,7 +1377,7 @@ static void ResamplerSetup(ResamplerT *rs, const uint srcRate, const uint dstRat // Clean up after the resampler. static void ResamplerClear(ResamplerT *rs) { - DestroyArray(rs->mF); + free(rs->mF); rs->mF = NULL; } @@ -1346,7 +1396,7 @@ static void ResamplerRun(ResamplerT *rs, const uint inN, const double *in, const // Handle in-place operation. if(in == out) - work = CreateArray(outN); + work = CreateDoubles(outN); else work = out; // Resample the input. @@ -1373,7 +1423,7 @@ static void ResamplerRun(ResamplerT *rs, const uint inN, const double *in, const { for(i = 0;i < outN;i++) out[i] = work[i]; - DestroyArray(work); + free(work); } } @@ -1536,18 +1586,18 @@ static int ReadWaveFormat(FILE *fp, const ByteOrderT order, const uint hrirRate, chunkSize = 0; do { - if (chunkSize > 0) - fseek (fp, (long) chunkSize, SEEK_CUR); + if(chunkSize > 0) + fseek (fp, (long) chunkSize, SEEK_CUR); if(!ReadBin4(fp, src->mPath, BO_LITTLE, 4, &fourCC) || !ReadBin4(fp, src->mPath, order, 4, &chunkSize)) - return 0; + return 0; } while(fourCC != FOURCC_FMT); - if(!ReadBin4(fp, src->mPath, order, 2, & format) || - !ReadBin4(fp, src->mPath, order, 2, & channels) || - !ReadBin4(fp, src->mPath, order, 4, & rate) || - !ReadBin4(fp, src->mPath, order, 4, & dummy) || - !ReadBin4(fp, src->mPath, order, 2, & block)) - return (0); + if(!ReadBin4(fp, src->mPath, order, 2, &format) || + !ReadBin4(fp, src->mPath, order, 2, &channels) || + !ReadBin4(fp, src->mPath, order, 4, &rate) || + !ReadBin4(fp, src->mPath, order, 4, &dummy) || + !ReadBin4(fp, src->mPath, order, 2, &block)) + return 0; block /= channels; if(chunkSize > 14) { @@ -1654,10 +1704,11 @@ static int ReadWaveList(FILE *fp, const SourceRefT *src, const ByteOrderT order, uint block, skip, offset, i; double lastSample; - for (;;) { - if(!ReadBin4(fp, src->mPath, BO_LITTLE, 4, & fourCC) || - !ReadBin4(fp, src->mPath, order, 4, & chunkSize)) - return (0); + for(;;) + { + if(!ReadBin4(fp, src->mPath, BO_LITTLE, 4, &fourCC) || + !ReadBin4(fp, src->mPath, order, 4, &chunkSize)) + return 0; if(fourCC == FOURCC_DATA) { @@ -1811,7 +1862,7 @@ static int LoadAsciiSource(FILE *fp, const SourceRefT *src, const uint n, double for(i = 0;i < src->mOffset;i++) { if(!ReadAsciiAsDouble(&tr, src->mPath, src->mType, (uint)src->mBits, &dummy)) - return (0); + return 0; } for(i = 0;i < n;i++) { @@ -1832,7 +1883,7 @@ static int LoadSource(SourceRefT *src, const uint hrirRate, const uint n, double int result; FILE *fp; - if (src->mFormat == SF_ASCII) + if(src->mFormat == SF_ASCII) fp = fopen(src->mPath, "r"); else fp = fopen(src->mPath, "rb"); @@ -1902,63 +1953,90 @@ static int WriteBin4(const ByteOrderT order, const uint bytes, const uint32 in, } // Store the OpenAL Soft HRTF data set. -static int StoreMhr(const HrirDataT *hData, const int experimental, const char *filename) +static int StoreMhr(const HrirDataT *hData, const char *filename) { - uint e, step, end, n, j, i; - uint dither_seed; + uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1; + uint n = hData->mIrPoints; FILE *fp; - int v; + uint fi, ei, ai, i; + uint dither_seed = 22222; if((fp=fopen(filename, "wb")) == NULL) { fprintf(stderr, "Error: Could not open MHR file '%s'.\n", filename); return 0; } - if(!WriteAscii(experimental ? MHR_FORMAT_EXPERIMENTAL : MHR_FORMAT, fp, filename)) + if(!WriteAscii(MHR_FORMAT, fp, filename)) return 0; if(!WriteBin4(BO_LITTLE, 4, (uint32)hData->mIrRate, fp, filename)) return 0; - if(experimental) - { - if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mSampleType, fp, filename)) - return 0; - if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mChannelType, fp, filename)) - return 0; - } + if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mSampleType, fp, filename)) + return 0; + if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mChannelType, fp, filename)) + return 0; if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mIrPoints, fp, filename)) return 0; - if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mEvCount, fp, filename)) + if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mFdCount, fp, filename)) return 0; - for(e = 0;e < hData->mEvCount;e++) + for(fi = 0;fi < hData->mFdCount;fi++) { - if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mAzCount[e], fp, filename)) + if(!WriteBin4(BO_LITTLE, 2, (uint32)(1000.0 * hData->mFds[fi].mDistance), fp, filename)) + return 0; + if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mFds[fi].mEvCount, fp, filename)) return 0; + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) + { + if(!WriteBin4(BO_LITTLE, 1, (uint32)hData->mFds[fi].mEvs[ei].mAzCount, fp, filename)) + return 0; + } } - step = hData->mIrSize; - end = hData->mIrCount * step; - n = hData->mIrPoints; - dither_seed = 22222; - for(j = 0;j < end;j += step) + + for(fi = 0;fi < hData->mFdCount;fi++) { - const double scale = (!experimental || hData->mSampleType == ST_S16) ? 32767.0 : + const double scale = (hData->mSampleType == ST_S16) ? 32767.0 : ((hData->mSampleType == ST_S24) ? 8388607.0 : 0.0); - const int bps = (!experimental || hData->mSampleType == ST_S16) ? 2 : + const int bps = (hData->mSampleType == ST_S16) ? 2 : ((hData->mSampleType == ST_S24) ? 3 : 0); - double out[MAX_TRUNCSIZE]; - for(i = 0;i < n;i++) - out[i] = TpdfDither(scale * hData->mHrirs[j+i], &dither_seed); - for(i = 0;i < n;i++) + + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) { - v = (int)Clamp(out[i], -scale-1.0, scale); - if(!WriteBin4(BO_LITTLE, bps, (uint32)v, fp, filename)) - return 0; + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + double out[2 * MAX_TRUNCSIZE]; + + TpdfDither(out, azd->mIrs[0], scale, n, channels, &dither_seed); + if(hData->mChannelType == CT_STEREO) + TpdfDither(out+1, azd->mIrs[1], scale, n, channels, &dither_seed); + for(i = 0;i < (channels * n);i++) + { + int v = (int)Clamp(out[i], -scale-1.0, scale); + if(!WriteBin4(BO_LITTLE, bps, (uint32)v, fp, filename)) + return 0; + } + } } } - for(j = 0;j < hData->mIrCount;j++) + for(fi = 0;fi < hData->mFdCount;fi++) { - v = (int)fmin(round(hData->mIrRate * hData->mHrtds[j]), MAX_HRTD); - if(!WriteBin4(BO_LITTLE, 1, (uint32)v, fp, filename)) - return 0; + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + int v = (int)fmin(round(hData->mIrRate * azd->mDelays[0]), MAX_HRTD); + + if(!WriteBin4(BO_LITTLE, 1, (uint32)v, fp, filename)) + return 0; + if(hData->mChannelType == CT_STEREO) + { + v = (int)fmin(round(hData->mIrRate * azd->mDelays[1]), MAX_HRTD); + + if(!WriteBin4(BO_LITTLE, 1, (uint32)v, fp, filename)) + return 0; + } + } + } } fclose(fp); return 1; @@ -1970,14 +2048,12 @@ static int StoreMhr(const HrirDataT *hData, const int experimental, const char * ***********************/ // Calculate the onset time of an HRIR and average it with any existing -// timing for its elevation and azimuth. -static void AverageHrirOnset(const double *hrir, const double f, const uint ei, const uint ai, const HrirDataT *hData) +// timing for its field, elevation, azimuth, and ear. +static double AverageHrirOnset(const uint rate, const uint n, const double *hrir, const double f, const double onset) { - double mag; - uint n, i, j; + double mag = 0.0; + uint i; - mag = 0.0; - n = hData->mIrPoints; for(i = 0;i < n;i++) mag = fmax(fabs(hrir[i]), mag); mag *= 0.15; @@ -1986,33 +2062,27 @@ static void AverageHrirOnset(const double *hrir, const double f, const uint ei, if(fabs(hrir[i]) >= mag) break; } - j = hData->mEvOffset[ei] + ai; - hData->mHrtds[j] = Lerp(hData->mHrtds[j], ((double)i) / hData->mIrRate, f); + return Lerp(onset, (double)i / rate, f); } // Calculate the magnitude response of an HRIR and average it with any -// existing responses for its elevation and azimuth. -static void AverageHrirMagnitude(const double *hrir, const double f, const uint ei, const uint ai, const HrirDataT *hData) +// existing responses for its field, elevation, azimuth, and ear. +static void AverageHrirMagnitude(const uint points, const uint n, const double *hrir, const double f, double *mag) { - uint n, m, i, j; - Complex *cplx; - double *mags; + uint m = 1 + (n / 2), i; + Complex *h = CreateComplexes(n); + double *r = CreateDoubles(n); - n = hData->mFftSize; - cplx = calloc(sizeof(*cplx), n); - mags = calloc(sizeof(*mags), n); - for(i = 0;i < hData->mIrPoints;i++) - cplx[i] = MakeComplex(hrir[i], 0.0); + for(i = 0;i < points;i++) + h[i] = MakeComplex(hrir[i], 0.0); for(;i < n;i++) - cplx[i] = MakeComplex(0.0, 0.0); - FftForward(n, cplx, cplx); - MagnitudeResponse(n, cplx, mags); - m = 1 + (n / 2); - j = (hData->mEvOffset[ei] + ai) * hData->mIrSize; + h[i] = MakeComplex(0.0, 0.0); + FftForward(n, h, h); + MagnitudeResponse(n, h, r); for(i = 0;i < m;i++) - hData->mHrirs[j+i] = Lerp(hData->mHrirs[j+i], mags[i], f); - free(mags); - free(cplx); + mag[i] = Lerp(mag[i], r[i], f); + free(r); + free(h); } /* Calculate the contribution of each HRIR to the diffuse-field average based @@ -2021,34 +2091,39 @@ static void AverageHrirMagnitude(const double *hrir, const double f, const uint */ static void CalculateDfWeights(const HrirDataT *hData, double *weights) { - double evs, sum, ev, up_ev, down_ev, solidAngle; - uint ei; + double sum, evs, ev, upperEv, lowerEv, solidAngle; + uint fi, ei; - evs = 90.0 / (hData->mEvCount - 1); sum = 0.0; - for(ei = hData->mEvStart;ei < hData->mEvCount;ei++) + for(fi = 0;fi < hData->mFdCount;fi++) { - // For each elevation, calculate the upper and lower limits of the - // patch band. - ev = -90.0 + (ei * 2.0 * evs); - if(ei < (hData->mEvCount - 1)) - up_ev = (ev + evs) * M_PI / 180.0; - else - up_ev = M_PI / 2.0; - if(ei > 0) - down_ev = (ev - evs) * M_PI / 180.0; - else - down_ev = -M_PI / 2.0; - // Calculate the area of the patch band. - solidAngle = 2.0 * M_PI * (sin(up_ev) - sin(down_ev)); - // Each weight is the area of one patch. - weights[ei] = solidAngle / hData->mAzCount [ei]; - // Sum the total surface area covered by the HRIRs. - sum += solidAngle; + evs = M_PI / 2.0 / (hData->mFds[fi].mEvCount - 1); + for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++) + { + // For each elevation, calculate the upper and lower limits of + // the patch band. + ev = hData->mFds[fi].mEvs[ei].mElevation; + lowerEv = fmax(-M_PI / 2.0, ev - evs); + upperEv = fmin(M_PI / 2.0, ev + evs); + // Calculate the area of the patch band. + solidAngle = 2.0 * M_PI * (sin(upperEv) - sin(lowerEv)); + // Each weight is the area of one patch. + weights[(fi * MAX_EV_COUNT) + ei] = solidAngle / hData->mFds[fi].mEvs[ei].mAzCount; + // Sum the total surface area covered by the HRIRs of all fields. + sum += solidAngle; + } + } + /* TODO: It may be interesting to experiment with how a volume-based + weighting performs compared to the existing distance-indepenent + surface patches. + */ + for(fi = 0;fi < hData->mFdCount;fi++) + { + // Normalize the weights given the total surface coverage for all + // fields. + for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++) + weights[(fi * MAX_EV_COUNT) + ei] /= sum; } - // Normalize the weights given the total surface coverage. - for(ei = hData->mEvStart;ei < hData->mEvCount;ei++) - weights[ei] /= sum; } /* Calculate the diffuse-field average from the given magnitude responses of @@ -2056,12 +2131,11 @@ static void CalculateDfWeights(const HrirDataT *hData, double *weights) * surface area covered by each HRIR. The final average can then be limited * by the specified magnitude range (in positive dB; 0.0 to skip). */ -static void CalculateDiffuseFieldAverage(const HrirDataT *hData, const int weighted, const double limit, double *dfa) +static void CalculateDiffuseFieldAverage(const HrirDataT *hData, const uint channels, const uint m, const int weighted, const double limit, double *dfa) { - uint ei, ai, count, step, start, end, m, j, i; - double *weights; + double *weights = CreateDoubles(hData->mFdCount * MAX_EV_COUNT); + uint count, ti, fi, ei, i, ai; - weights = CreateArray(hData->mEvCount); if(weighted) { // Use coverage weighting to calculate the average. @@ -2069,61 +2143,76 @@ static void CalculateDiffuseFieldAverage(const HrirDataT *hData, const int weigh } else { + double weight; + // If coverage weighting is not used, the weights still need to be - // averaged by the number of HRIRs. - count = 0; - for(ei = hData->mEvStart;ei < hData->mEvCount;ei++) - count += hData->mAzCount [ei]; - for(ei = hData->mEvStart;ei < hData->mEvCount;ei++) - weights[ei] = 1.0 / count; - } - ei = hData->mEvStart; - ai = 0; - step = hData->mIrSize; - start = hData->mEvOffset[ei] * step; - end = hData->mIrCount * step; - m = 1 + (hData->mFftSize / 2); - for(i = 0;i < m;i++) - dfa[i] = 0.0; - for(j = start;j < end;j += step) + // averaged by the number of existing HRIRs. + count = hData->mIrCount; + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = 0;ei < hData->mFds[fi].mEvStart;ei++) + count -= hData->mFds[fi].mEvs[ei].mAzCount; + } + weight = 1.0 / count; + + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++) + weights[(fi * MAX_EV_COUNT) + ei] = weight; + } + } + for(ti = 0;ti < channels;ti++) { - // Get the weight for this HRIR's contribution. - double weight = weights[ei]; - // Add this HRIR's weighted power average to the total. for(i = 0;i < m;i++) - dfa[i] += weight * hData->mHrirs[j+i] * hData->mHrirs[j+i]; - // Determine the next weight to use. - ai++; - if(ai >= hData->mAzCount[ei]) + dfa[(ti * m) + i] = 0.0; + for(fi = 0;fi < hData->mFdCount;fi++) { - ei++; - ai = 0; + for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + // Get the weight for this HRIR's contribution. + double weight = weights[(fi * MAX_EV_COUNT) + ei]; + + // Add this HRIR's weighted power average to the total. + for(i = 0;i < m;i++) + dfa[(ti * m) + i] += weight * azd->mIrs[ti][i] * azd->mIrs[ti][i]; + } + } } + // Finish the average calculation and keep it from being too small. + for(i = 0;i < m;i++) + dfa[(ti * m) + i] = fmax(sqrt(dfa[(ti * m) + i]), EPSILON); + // Apply a limit to the magnitude range of the diffuse-field average + // if desired. + if(limit > 0.0) + LimitMagnitudeResponse(hData->mFftSize, m, limit, &dfa[ti * m], &dfa[ti * m]); } - // Finish the average calculation and keep it from being too small. - for(i = 0;i < m;i++) - dfa[i] = fmax(sqrt(dfa[i]), EPSILON); - // Apply a limit to the magnitude range of the diffuse-field average if - // desired. - if(limit > 0.0) - LimitMagnitudeResponse(hData->mFftSize, limit, dfa, dfa); - DestroyArray(weights); + free(weights); } // Perform diffuse-field equalization on the magnitude responses of the HRIR // set using the given average response. -static void DiffuseFieldEqualize(const double *dfa, const HrirDataT *hData) +static void DiffuseFieldEqualize(const uint channels, const uint m, const double *dfa, const HrirDataT *hData) { - uint step, start, end, m, j, i; + uint ti, fi, ei, ai, i; - step = hData->mIrSize; - start = hData->mEvOffset[hData->mEvStart] * step; - end = hData->mIrCount * step; - m = 1 + (hData->mFftSize / 2); - for(j = start;j < end;j += step) + for(ti = 0;ti < channels;ti++) { - for(i = 0;i < m;i++) - hData->mHrirs[j+i] /= dfa[i]; + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + for(i = 0;i < m;i++) + azd->mIrs[ti][i] /= dfa[(ti * m) + i]; + } + } + } } } @@ -2131,187 +2220,241 @@ static void DiffuseFieldEqualize(const double *dfa, const HrirDataT *hData) // HRIR set. static void ReconstructHrirs(const HrirDataT *hData) { - uint step, start, end, n, j, i; - uint pcdone, lastpc; - Complex *cplx; + uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1; + uint n = hData->mFftSize; + uint ti, fi, ei, ai, i; + Complex *h = CreateComplexes(n); + uint total, count, pcdone, lastpc; - pcdone = lastpc = 0; + total = hData->mIrCount; + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = 0;ei < hData->mFds[fi].mEvStart;ei++) + total -= hData->mFds[fi].mEvs[ei].mAzCount; + } + total *= channels; + count = pcdone = lastpc = 0; printf("%3d%% done.", pcdone); fflush(stdout); - - step = hData->mIrSize; - start = hData->mEvOffset[hData->mEvStart] * step; - end = hData->mIrCount * step; - n = hData->mFftSize; - cplx = calloc(sizeof(*cplx), n); - for(j = start;j < end;j += step) - { - MinimumPhase(n, &hData->mHrirs[j], cplx); - FftInverse(n, cplx, cplx); - for(i = 0;i < hData->mIrPoints;i++) - hData->mHrirs[j+i] = cplx[i].Real; - pcdone = (j+step-start) * 100 / (end-start); - if(pcdone != lastpc) - { - lastpc = pcdone; - printf("\r%3d%% done.", pcdone); - fflush(stdout); + for(ti = 0;ti < channels;ti++) + { + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + MinimumPhase(n, azd->mIrs[ti], h); + FftInverse(n, h, h); + for(i = 0;i < hData->mIrPoints;i++) + azd->mIrs[ti][i] = h[i].Real; + pcdone = ++count * 100 / total; + if(pcdone != lastpc) + { + lastpc = pcdone; + printf("\r%3d%% done.", pcdone); + fflush(stdout); + } + } + } } } - free(cplx); printf("\n"); + free(h); } // Resamples the HRIRs for use at the given sampling rate. static void ResampleHrirs(const uint rate, HrirDataT *hData) { - uint n, step, start, end, j; + uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1; + uint n = hData->mIrPoints; + uint ti, fi, ei, ai; ResamplerT rs; ResamplerSetup(&rs, hData->mIrRate, rate); - n = hData->mIrPoints; - step = hData->mIrSize; - start = hData->mEvOffset[hData->mEvStart] * step; - end = hData->mIrCount * step; - for(j = start;j < end;j += step) - ResamplerRun(&rs, n, &hData->mHrirs[j], n, &hData->mHrirs[j]); - ResamplerClear(&rs); + for(ti = 0;ti < channels;ti++) + { + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = hData->mFds[fi].mEvStart;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + ResamplerRun(&rs, n, azd->mIrs[ti], n, azd->mIrs[ti]); + } + } + } + } hData->mIrRate = rate; + ResamplerClear(&rs); } -/* Given an elevation index and an azimuth, calculate the indices of the two - * HRIRs that bound the coordinate along with a factor for calculating the - * continous HRIR using interpolation. +/* Given field and elevation indices and an azimuth, calculate the indices of + * the two HRIRs that bound the coordinate along with a factor for + * calculating the continuous HRIR using interpolation. */ -static void CalcAzIndices(const HrirDataT *hData, const uint ei, const double az, uint *j0, uint *j1, double *jf) +static void CalcAzIndices(const HrirDataT *hData, const uint fi, const uint ei, const double az, uint *a0, uint *a1, double *af) { - double af; - uint ai; - - af = ((2.0*M_PI) + az) * hData->mAzCount[ei] / (2.0*M_PI); - ai = ((uint)af) % hData->mAzCount[ei]; - af -= floor(af); + double f = (2.0*M_PI + az) * hData->mFds[fi].mEvs[ei].mAzCount / (2.0*M_PI); + uint i = (uint)f % hData->mFds[fi].mEvs[ei].mAzCount; - *j0 = hData->mEvOffset[ei] + ai; - *j1 = hData->mEvOffset[ei] + ((ai+1) % hData->mAzCount [ei]); - *jf = af; + f -= floor(f); + *a0 = i; + *a1 = (i + 1) % hData->mFds[fi].mEvs[ei].mAzCount; + *af = f; } -// Synthesize any missing onset timings at the bottom elevations. This just -// blends between slightly exaggerated known onsets. Not an accurate model. +// Synthesize any missing onset timings at the bottom elevations of each +// field. This just blends between slightly exaggerated known onsets (not +// an accurate model). static void SynthesizeOnsets(HrirDataT *hData) { - uint oi, e, a, j0, j1; - double t, of, jf; + uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1; + uint ti, fi, oi, ai, ei, a0, a1; + double t, of, af; - oi = hData->mEvStart; - t = 0.0; - for(a = 0;a < hData->mAzCount[oi];a++) - t += hData->mHrtds[hData->mEvOffset[oi] + a]; - hData->mHrtds[0] = 1.32e-4 + (t / hData->mAzCount[oi]); - for(e = 1;e < hData->mEvStart;e++) + for(ti = 0;ti < channels;ti++) { - of = ((double)e) / hData->mEvStart; - for(a = 0;a < hData->mAzCount[e];a++) + for(fi = 0;fi < hData->mFdCount;fi++) { - CalcAzIndices(hData, oi, a * 2.0 * M_PI / hData->mAzCount[e], &j0, &j1, &jf); - hData->mHrtds[hData->mEvOffset[e] + a] = Lerp(hData->mHrtds[0], Lerp(hData->mHrtds[j0], hData->mHrtds[j1], jf), of); + if(hData->mFds[fi].mEvStart <= 0) + continue; + oi = hData->mFds[fi].mEvStart; + t = 0.0; + for(ai = 0;ai < hData->mFds[fi].mEvs[oi].mAzCount;ai++) + t += hData->mFds[fi].mEvs[oi].mAzs[ai].mDelays[ti]; + hData->mFds[fi].mEvs[0].mAzs[0].mDelays[ti] = 1.32e-4 + (t / hData->mFds[fi].mEvs[oi].mAzCount); + for(ei = 1;ei < hData->mFds[fi].mEvStart;ei++) + { + of = (double)ei / hData->mFds[fi].mEvStart; + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + CalcAzIndices(hData, fi, oi, hData->mFds[fi].mEvs[ei].mAzs[ai].mAzimuth, &a0, &a1, &af); + hData->mFds[fi].mEvs[ei].mAzs[ai].mDelays[ti] = Lerp(hData->mFds[fi].mEvs[0].mAzs[0].mDelays[ti], Lerp(hData->mFds[fi].mEvs[oi].mAzs[a0].mDelays[ti], hData->mFds[fi].mEvs[oi].mAzs[a1].mDelays[ti], af), of); + } + } } } } -/* Attempt to synthesize any missing HRIRs at the bottom elevations. Right - * now this just blends the lowest elevation HRIRs together and applies some - * attenuation and high frequency damping. It is a simple, if inaccurate - * model. +/* Attempt to synthesize any missing HRIRs at the bottom elevations of each + * field. Right now this just blends the lowest elevation HRIRs together and + * applies some attenuation and high frequency damping. It is a simple, if + * inaccurate model. */ -static void SynthesizeHrirs (HrirDataT *hData) +static void SynthesizeHrirs(HrirDataT *hData) { - uint oi, a, e, step, n, i, j; + uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1; + uint n = hData->mIrPoints; + uint ti, fi, oi, ai, ei, i; double lp[4], s0, s1; double of, b; - uint j0, j1; - double jf; + uint a0, a1; + double af; - if(hData->mEvStart <= 0) - return; - step = hData->mIrSize; - oi = hData->mEvStart; - n = hData->mIrPoints; - for(i = 0;i < n;i++) - hData->mHrirs[i] = 0.0; - for(a = 0;a < hData->mAzCount[oi];a++) + for(ti = 0;ti < channels;ti++) { - j = (hData->mEvOffset[oi] + a) * step; - for(i = 0;i < n;i++) - hData->mHrirs[i] += hData->mHrirs[j+i] / hData->mAzCount[oi]; - } - for(e = 1;e < hData->mEvStart;e++) - { - of = ((double)e) / hData->mEvStart; - b = (1.0 - of) * (3.5e-6 * hData->mIrRate); - for(a = 0;a < hData->mAzCount[e];a++) + for(fi = 0;fi < hData->mFdCount;fi++) { - j = (hData->mEvOffset[e] + a) * step; - CalcAzIndices(hData, oi, a * 2.0 * M_PI / hData->mAzCount[e], &j0, &j1, &jf); - j0 *= step; - j1 *= step; + if(hData->mFds[fi].mEvStart <= 0) + continue; + oi = hData->mFds[fi].mEvStart; + for(i = 0;i < n;i++) + hData->mFds[fi].mEvs[0].mAzs[0].mIrs[ti][i] = 0.0; + for(ai = 0;ai < hData->mFds[fi].mEvs[oi].mAzCount;ai++) + { + for(i = 0;i < n;i++) + hData->mFds[fi].mEvs[0].mAzs[0].mIrs[ti][i] += hData->mFds[fi].mEvs[oi].mAzs[ai].mIrs[ti][i] / hData->mFds[fi].mEvs[oi].mAzCount; + } + for(ei = 1;ei < hData->mFds[fi].mEvStart;ei++) + { + of = (double)ei / hData->mFds[fi].mEvStart; + b = (1.0 - of) * (3.5e-6 * hData->mIrRate); + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + CalcAzIndices(hData, fi, oi, hData->mFds[fi].mEvs[ei].mAzs[ai].mAzimuth, &a0, &a1, &af); + lp[0] = 0.0; + lp[1] = 0.0; + lp[2] = 0.0; + lp[3] = 0.0; + for(i = 0;i < n;i++) + { + s0 = hData->mFds[fi].mEvs[0].mAzs[0].mIrs[ti][i]; + s1 = Lerp(hData->mFds[fi].mEvs[oi].mAzs[a0].mIrs[ti][i], hData->mFds[fi].mEvs[oi].mAzs[a1].mIrs[ti][i], af); + s0 = Lerp(s0, s1, of); + lp[0] = Lerp(s0, lp[0], b); + lp[1] = Lerp(lp[0], lp[1], b); + lp[2] = Lerp(lp[1], lp[2], b); + lp[3] = Lerp(lp[2], lp[3], b); + hData->mFds[fi].mEvs[ei].mAzs[ai].mIrs[ti][i] = lp[3]; + } + } + } + b = 3.5e-6 * hData->mIrRate; lp[0] = 0.0; lp[1] = 0.0; lp[2] = 0.0; lp[3] = 0.0; for(i = 0;i < n;i++) { - s0 = hData->mHrirs[i]; - s1 = Lerp(hData->mHrirs[j0+i], hData->mHrirs[j1+i], jf); - s0 = Lerp(s0, s1, of); + s0 = hData->mFds[fi].mEvs[0].mAzs[0].mIrs[ti][i]; lp[0] = Lerp(s0, lp[0], b); lp[1] = Lerp(lp[0], lp[1], b); lp[2] = Lerp(lp[1], lp[2], b); lp[3] = Lerp(lp[2], lp[3], b); - hData->mHrirs[j+i] = lp[3]; + hData->mFds[fi].mEvs[0].mAzs[0].mIrs[ti][i] = lp[3]; } + hData->mFds[fi].mEvStart = 0; } } - b = 3.5e-6 * hData->mIrRate; - lp[0] = 0.0; - lp[1] = 0.0; - lp[2] = 0.0; - lp[3] = 0.0; - for(i = 0;i < n;i++) - { - s0 = hData->mHrirs[i]; - lp[0] = Lerp(s0, lp[0], b); - lp[1] = Lerp(lp[0], lp[1], b); - lp[2] = Lerp(lp[1], lp[2], b); - lp[3] = Lerp(lp[2], lp[3], b); - hData->mHrirs[i] = lp[3]; - } - hData->mEvStart = 0; } // The following routines assume a full set of HRIRs for all elevations. // Normalize the HRIR set and slightly attenuate the result. -static void NormalizeHrirs (const HrirDataT *hData) +static void NormalizeHrirs(const HrirDataT *hData) { - uint step, end, n, j, i; - double maxLevel; + uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1; + uint n = hData->mIrPoints; + uint ti, fi, ei, ai, i; + double maxLevel = 0.0; - step = hData->mIrSize; - end = hData->mIrCount * step; - n = hData->mIrPoints; - maxLevel = 0.0; - for(j = 0;j < end;j += step) + for(ti = 0;ti < channels;ti++) { - for(i = 0;i < n;i++) - maxLevel = fmax(fabs(hData->mHrirs[j+i]), maxLevel); + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + for(i = 0;i < n;i++) + maxLevel = fmax(fabs(azd->mIrs[ti][i]), maxLevel); + } + } + } } maxLevel = 1.01 * maxLevel; - for(j = 0;j < end;j += step) + for(ti = 0;ti < channels;ti++) { - for(i = 0;i < n;i++) - hData->mHrirs[j+i] /= maxLevel; + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + for(i = 0;i < n;i++) + azd->mIrs[ti][i] /= maxLevel; + } + } + } } } @@ -2326,93 +2469,259 @@ static double CalcLTD(const double ev, const double az, const double rad, const al = (0.5 * M_PI) + azp; if(dlp > l) dlp = l + (rad * (al - acos(rad / dist))); - return (dlp / 343.3); + return dlp / 343.3; } // Calculate the effective head-related time delays for each minimum-phase // HRIR. -static void CalculateHrtds (const HeadModelT model, const double radius, HrirDataT *hData) +static void CalculateHrtds(const HeadModelT model, const double radius, HrirDataT *hData) { - double minHrtd, maxHrtd; - uint e, a, j; + uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1; + double minHrtd = INFINITY, maxHrtd = -INFINITY; + uint ti, fi, ei, ai; double t; - minHrtd = 1000.0; - maxHrtd = -1000.0; - for(e = 0;e < hData->mEvCount;e++) + if(model == HM_DATASET) { - for(a = 0;a < hData->mAzCount[e];a++) + for(ti = 0;ti < channels;ti++) { - j = hData->mEvOffset[e] + a; - if(model == HM_DATASET) - t = hData->mHrtds[j] * radius / hData->mRadius; - else - t = CalcLTD((-90.0 + (e * 180.0 / (hData->mEvCount - 1))) * M_PI / 180.0, - (a * 360.0 / hData->mAzCount [e]) * M_PI / 180.0, - radius, hData->mDistance); - hData->mHrtds[j] = t; - maxHrtd = fmax(t, maxHrtd); - minHrtd = fmin(t, minHrtd); + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + t = azd->mDelays[ti] * radius / hData->mRadius; + azd->mDelays[ti] = t; + maxHrtd = fmax(t, maxHrtd); + minHrtd = fmin(t, minHrtd); + } + } + } + } + } + else + { + for(ti = 0;ti < channels;ti++) + { + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) + { + HrirEvT *evd = &hData->mFds[fi].mEvs[ei]; + + for(ai = 0;ai < evd->mAzCount;ai++) + { + HrirAzT *azd = &evd->mAzs[ai]; + + t = CalcLTD(evd->mElevation, azd->mAzimuth, radius, hData->mFds[fi].mDistance); + azd->mDelays[ti] = t; + maxHrtd = fmax(t, maxHrtd); + minHrtd = fmin(t, minHrtd); + } + } + } + } + } + for(ti = 0;ti < channels;ti++) + { + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + hData->mFds[fi].mEvs[ei].mAzs[ai].mDelays[ti] -= minHrtd; + } + } + } +} + +// Clear the initial HRIR data state. +static void ResetHrirData(HrirDataT *hData) +{ + hData->mIrRate = 0; + hData->mSampleType = ST_S24; + hData->mChannelType = CT_NONE; + hData->mIrPoints = 0; + hData->mFftSize = 0; + hData->mIrSize = 0; + hData->mRadius = 0.0; + hData->mIrCount = 0; + hData->mFdCount = 0; + hData->mFds = NULL; +} + +// Allocate and configure dynamic HRIR structures. +static int PrepareHrirData(const uint fdCount, const double distances[MAX_FD_COUNT], const uint evCounts[MAX_FD_COUNT], const uint azCounts[MAX_FD_COUNT * MAX_EV_COUNT], HrirDataT *hData) +{ + uint evTotal = 0, azTotal = 0, fi, ei, ai; + + for(fi = 0;fi < fdCount;fi++) + { + evTotal += evCounts[fi]; + for(ei = 0;ei < evCounts[fi];ei++) + azTotal += azCounts[(fi * MAX_EV_COUNT) + ei]; + } + hData->mFds = calloc(fdCount, sizeof(*hData->mFds)); + if(hData->mFds == NULL) + return 0; + hData->mFds[0].mEvs = calloc(evTotal, sizeof(*hData->mFds[0].mEvs)); + if(hData->mFds[0].mEvs == NULL) + return 0; + hData->mFds[0].mEvs[0].mAzs = calloc(azTotal, sizeof(*hData->mFds[0].mEvs[0].mAzs)); + if(hData->mFds[0].mEvs[0].mAzs == NULL) + return 0; + hData->mIrCount = azTotal; + hData->mFdCount = fdCount; + evTotal = 0; + azTotal = 0; + for(fi = 0;fi < fdCount;fi++) + { + hData->mFds[fi].mDistance = distances[fi]; + hData->mFds[fi].mEvCount = evCounts[fi]; + hData->mFds[fi].mEvStart = 0; + hData->mFds[fi].mEvs = &hData->mFds[0].mEvs[evTotal]; + evTotal += evCounts[fi]; + for(ei = 0;ei < evCounts[fi];ei++) + { + uint azCount = azCounts[(fi * MAX_EV_COUNT) + ei]; + + hData->mFds[fi].mIrCount += azCount; + hData->mFds[fi].mEvs[ei].mElevation = -M_PI / 2.0 + M_PI * ei / (evCounts[fi] - 1); + hData->mFds[fi].mEvs[ei].mIrCount += azCount; + hData->mFds[fi].mEvs[ei].mAzCount = azCount; + hData->mFds[fi].mEvs[ei].mAzs = &hData->mFds[0].mEvs[0].mAzs[azTotal]; + for(ai = 0;ai < azCount;ai++) + { + hData->mFds[fi].mEvs[ei].mAzs[ai].mAzimuth = 2.0 * M_PI * ai / azCount; + hData->mFds[fi].mEvs[ei].mAzs[ai].mIndex = azTotal + ai; + hData->mFds[fi].mEvs[ei].mAzs[ai].mDelays[0] = 0.0; + hData->mFds[fi].mEvs[ei].mAzs[ai].mDelays[1] = 0.0; + hData->mFds[fi].mEvs[ei].mAzs[ai].mIrs[0] = NULL; + hData->mFds[fi].mEvs[ei].mAzs[ai].mIrs[1] = NULL; + } + azTotal += azCount; } } - maxHrtd -= minHrtd; - for(j = 0;j < hData->mIrCount;j++) - hData->mHrtds[j] -= minHrtd; - hData->mMaxHrtd = maxHrtd; + return 1; } +// Clean up HRIR data. +static void FreeHrirData(HrirDataT *hData) +{ + if(hData->mFds != NULL) + { + if(hData->mFds[0].mEvs != NULL) + { + if(hData->mFds[0].mEvs[0].mAzs) + { + if(hData->mFds[0].mEvs[0].mAzs[0].mIrs[0] != NULL) + free(hData->mFds[0].mEvs[0].mAzs[0].mIrs[0]); + free(hData->mFds[0].mEvs[0].mAzs); + } + free(hData->mFds[0].mEvs); + } + free(hData->mFds); + hData->mFds = NULL; + } +} + +// Match the channel type from a given identifier. +static ChannelTypeT MatchChannelType(const char *ident) +{ + if(strcasecmp(ident, "mono") == 0) + return CT_MONO; + if(strcasecmp(ident, "stereo") == 0) + return CT_STEREO; + return CT_NONE; +} // Process the data set definition to read and validate the data set metrics. static int ProcessMetrics(TokenReaderT *tr, const uint fftSize, const uint truncSize, HrirDataT *hData) { - int hasRate = 0, hasPoints = 0, hasAzimuths = 0; - int hasRadius = 0, hasDistance = 0; + int hasRate = 0, hasType = 0, hasPoints = 0, hasRadius = 0; + int hasDistance = 0, hasAzimuths = 0; char ident[MAX_IDENT_LEN+1]; uint line, col; double fpVal; uint points; int intVal; + double distances[MAX_FD_COUNT]; + uint fdCount = 0; + uint evCounts[MAX_FD_COUNT]; + uint *azCounts = calloc(MAX_FD_COUNT * MAX_EV_COUNT, sizeof(*azCounts)); - while(!(hasRate && hasPoints && hasAzimuths && hasRadius && hasDistance)) + if(azCounts == NULL) { - TrIndication(tr, & line, & col); + fprintf(stderr, "Error: Out of memory.\n"); + exit(-1); + } + TrIndication(tr, &line, &col); + while(TrIsIdent(tr)) + { + TrIndication(tr, &line, &col); if(!TrReadIdent(tr, MAX_IDENT_LEN, ident)) - return 0; + goto error; if(strcasecmp(ident, "rate") == 0) { if(hasRate) { TrErrorAt(tr, line, col, "Redefinition of 'rate'.\n"); - return 0; + goto error; } if(!TrReadOperator(tr, "=")) - return 0; + goto error; if(!TrReadInt(tr, MIN_RATE, MAX_RATE, &intVal)) - return 0; + goto error; hData->mIrRate = (uint)intVal; hasRate = 1; } + else if(strcasecmp(ident, "type") == 0) + { + char type[MAX_IDENT_LEN+1]; + + if(hasType) + { + TrErrorAt(tr, line, col, "Redefinition of 'type'.\n"); + goto error; + } + if(!TrReadOperator(tr, "=")) + goto error; + + if(!TrReadIdent(tr, MAX_IDENT_LEN, type)) + goto error; + hData->mChannelType = MatchChannelType(type); + if(hData->mChannelType == CT_NONE) + { + TrErrorAt(tr, line, col, "Expected a channel type.\n"); + goto error; + } + hasType = 1; + } else if(strcasecmp(ident, "points") == 0) { - if (hasPoints) { + if(hasPoints) + { TrErrorAt(tr, line, col, "Redefinition of 'points'.\n"); - return 0; + goto error; } if(!TrReadOperator(tr, "=")) - return 0; + goto error; TrIndication(tr, &line, &col); if(!TrReadInt(tr, MIN_POINTS, MAX_POINTS, &intVal)) - return 0; + goto error; points = (uint)intVal; if(fftSize > 0 && points > fftSize) { TrErrorAt(tr, line, col, "Value exceeds the overridden FFT size.\n"); - return 0; + goto error; } if(points < truncSize) { TrErrorAt(tr, line, col, "Value is below the truncation size.\n"); - return 0; + goto error; } hData->mIrPoints = points; if(fftSize <= 0) @@ -2429,90 +2738,178 @@ static int ProcessMetrics(TokenReaderT *tr, const uint fftSize, const uint trunc } hasPoints = 1; } - else if(strcasecmp(ident, "azimuths") == 0) + else if(strcasecmp(ident, "radius") == 0) { - if(hasAzimuths) + if(hasRadius) { - TrErrorAt(tr, line, col, "Redefinition of 'azimuths'.\n"); - return 0; + TrErrorAt(tr, line, col, "Redefinition of 'radius'.\n"); + goto error; } if(!TrReadOperator(tr, "=")) - return 0; - hData->mIrCount = 0; - hData->mEvCount = 0; - hData->mEvOffset[0] = 0; + goto error; + if(!TrReadFloat(tr, MIN_RADIUS, MAX_RADIUS, &fpVal)) + goto error; + hData->mRadius = fpVal; + hasRadius = 1; + } + else if(strcasecmp(ident, "distance") == 0) + { + uint count = 0; + + if(hasDistance) + { + TrErrorAt(tr, line, col, "Redefinition of 'distance'.\n"); + goto error; + } + if(!TrReadOperator(tr, "=")) + goto error; + for(;;) { - if(!TrReadInt(tr, MIN_AZ_COUNT, MAX_AZ_COUNT, &intVal)) - return 0; - hData->mAzCount[hData->mEvCount] = (uint)intVal; - hData->mIrCount += (uint)intVal; - hData->mEvCount ++; + if(!TrReadFloat(tr, MIN_DISTANCE, MAX_DISTANCE, &fpVal)) + goto error; + if(count > 0 && fpVal <= distances[count - 1]) + { + TrError(tr, "Distances are not ascending.\n"); + goto error; + } + distances[count++] = fpVal; if(!TrIsOperator(tr, ",")) break; - if(hData->mEvCount >= MAX_EV_COUNT) + if(count >= MAX_FD_COUNT) { - TrError(tr, "Exceeded the maximum of %d elevations.\n", MAX_EV_COUNT); - return 0; + TrError(tr, "Exceeded the maximum of %d fields.\n", MAX_FD_COUNT); + goto error; } - hData->mEvOffset[hData->mEvCount] = hData->mEvOffset[hData->mEvCount - 1] + ((uint)intVal); TrReadOperator(tr, ","); } - if(hData->mEvCount < MIN_EV_COUNT) + if(fdCount != 0 && count != fdCount) { - TrErrorAt(tr, line, col, "Did not reach the minimum of %d azimuth counts.\n", MIN_EV_COUNT); - return 0; + TrError(tr, "Did not match the specified number of %d fields.\n", fdCount); + goto error; } - hasAzimuths = 1; + fdCount = count; + hasDistance = 1; } - else if(strcasecmp(ident, "radius") == 0) + else if(strcasecmp(ident, "azimuths") == 0) { - if(hasRadius) + uint count = 0; + + if(hasAzimuths) { - TrErrorAt(tr, line, col, "Redefinition of 'radius'.\n"); - return 0; + TrErrorAt(tr, line, col, "Redefinition of 'azimuths'.\n"); + goto error; } if(!TrReadOperator(tr, "=")) - return 0; - if(!TrReadFloat(tr, MIN_RADIUS, MAX_RADIUS, &fpVal)) - return 0; - hData->mRadius = fpVal; - hasRadius = 1; - } - else if(strcasecmp(ident, "distance") == 0) - { - if(hasDistance) + goto error; + + evCounts[0] = 0; + for(;;) { - TrErrorAt(tr, line, col, "Redefinition of 'distance'.\n"); - return 0; + if(!TrReadInt(tr, MIN_AZ_COUNT, MAX_AZ_COUNT, &intVal)) + goto error; + azCounts[(count * MAX_EV_COUNT) + evCounts[count]++] = (uint)intVal; + if(TrIsOperator(tr, ",")) + { + if(evCounts[count] >= MAX_EV_COUNT) + { + TrError(tr, "Exceeded the maximum of %d elevations.\n", MAX_EV_COUNT); + goto error; + } + TrReadOperator(tr, ","); + } + else + { + if(evCounts[count] < MIN_EV_COUNT) + { + TrErrorAt(tr, line, col, "Did not reach the minimum of %d azimuth counts.\n", MIN_EV_COUNT); + goto error; + } + if(azCounts[count * MAX_EV_COUNT] != 1 || azCounts[(count * MAX_EV_COUNT) + evCounts[count] - 1] != 1) + { + TrError(tr, "Poles are not singular for field %d.\n", count - 1); + goto error; + } + count++; + if(TrIsOperator(tr, ";")) + { + if(count >= MAX_FD_COUNT) + { + TrError(tr, "Exceeded the maximum number of %d fields.\n", MAX_FD_COUNT); + goto error; + } + evCounts[count] = 0; + TrReadOperator(tr, ";"); + } + else + { + break; + } + } } - if(!TrReadOperator(tr, "=")) - return 0; - if(!TrReadFloat(tr, MIN_DISTANCE, MAX_DISTANCE, & fpVal)) - return 0; - hData->mDistance = fpVal; - hasDistance = 1; + if(fdCount != 0 && count != fdCount) + { + TrError(tr, "Did not match the specified number of %d fields.\n", fdCount); + goto error; + } + fdCount = count; + hasAzimuths = 1; } else { TrErrorAt(tr, line, col, "Expected a metric name.\n"); - return 0; + goto error; } - TrSkipWhitespace (tr); + TrSkipWhitespace(tr); + } + if(!(hasRate && hasPoints && hasRadius && hasDistance && hasAzimuths)) + { + TrErrorAt(tr, line, col, "Expected a metric name.\n"); + goto error; } + if(distances[0] < hData->mRadius) + { + TrError(tr, "Distance cannot start below head radius.\n"); + goto error; + } + if(hData->mChannelType == CT_NONE) + hData->mChannelType = CT_MONO; + if(!PrepareHrirData(fdCount, distances, evCounts, azCounts, hData)) + { + fprintf(stderr, "Error: Out of memory.\n"); + exit(-1); + } + free(azCounts); return 1; + +error: + free(azCounts); + return 0; } -// Parse an index pair from the data set definition. -static int ReadIndexPair(TokenReaderT *tr, const HrirDataT *hData, uint *ei, uint *ai) +// Parse an index triplet from the data set definition. +static int ReadIndexTriplet(TokenReaderT *tr, const HrirDataT *hData, uint *fi, uint *ei, uint *ai) { int intVal; - if(!TrReadInt(tr, 0, (int)hData->mEvCount, &intVal)) + + if(hData->mFdCount > 1) + { + if(!TrReadInt(tr, 0, (int)hData->mFdCount - 1, &intVal)) + return 0; + *fi = (uint)intVal; + if(!TrReadOperator(tr, ",")) + return 0; + } + else + { + *fi = 0; + } + if(!TrReadInt(tr, 0, (int)hData->mFds[*fi].mEvCount - 1, &intVal)) return 0; *ei = (uint)intVal; if(!TrReadOperator(tr, ",")) return 0; - if(!TrReadInt(tr, 0, (int)hData->mAzCount[*ei], &intVal)) + if(!TrReadInt(tr, 0, (int)hData->mFds[*fi].mEvs[*ei].mAzCount - 1, &intVal)) return 0; *ai = (uint)intVal; return 1; @@ -2598,7 +2995,7 @@ static int ReadSourceRef(TokenReaderT *tr, SourceRefT *src) TrIndication(tr, &line, &col); if(!TrReadInt(tr, -2147483647-1, 2147483647, &intVal)) return 0; - if(abs(intVal) < MIN_BIN_BITS || ((uint)abs(intVal)) > (8*src->mSize)) + if(abs(intVal) < MIN_BIN_BITS || (uint)abs(intVal) > (8*src->mSize)) { TrErrorAt(tr, line, col, "Expected a value of (+/-) %d to %d.\n", MIN_BIN_BITS, 8*src->mSize); return 0; @@ -2640,7 +3037,7 @@ static int ReadSourceRef(TokenReaderT *tr, SourceRefT *src) else { TrReadOperator(tr, ";"); - if(!TrReadInt (tr, 0, 0x7FFFFFFF, &intVal)) + if(!TrReadInt(tr, 0, 0x7FFFFFFF, &intVal)) return 0; src->mSkip = (uint)intVal; } @@ -2663,32 +3060,41 @@ static int ReadSourceRef(TokenReaderT *tr, SourceRefT *src) return 1; } +// Match the target ear (index) from a given identifier. +static int MatchTargetEar(const char *ident) +{ + if(strcasecmp(ident, "left") == 0) + return 0; + if(strcasecmp(ident, "right") == 0) + return 1; + return -1; +} + // Process the list of sources in the data set definition. static int ProcessSources(const HeadModelT model, TokenReaderT *tr, HrirDataT *hData) { - uint *setCount, *setFlag; - uint line, col, ei, ai; - SourceRefT src; - double factor; - double *hrir; + uint channels = (hData->mChannelType == CT_STEREO) ? 2 : 1; + double *hrirs = CreateDoubles(channels * hData->mIrCount * hData->mIrSize); + double *hrir = CreateDoubles(hData->mIrPoints); + uint line, col, fi, ei, ai, ti; int count; printf("Loading sources..."); fflush(stdout); - count = 0; - setCount = (uint*)calloc(hData->mEvCount, sizeof(uint)); - setFlag = (uint*)calloc(hData->mIrCount, sizeof(uint)); - hrir = CreateArray(hData->mIrPoints); while(TrIsOperator(tr, "[")) { - TrIndication(tr, & line, & col); + double factor[2] = { 1.0, 1.0 }; + + TrIndication(tr, &line, &col); TrReadOperator(tr, "["); - if(!ReadIndexPair(tr, hData, &ei, &ai)) + if(!ReadIndexTriplet(tr, hData, &fi, &ei, &ai)) goto error; if(!TrReadOperator(tr, "]")) goto error; - if(setFlag[hData->mEvOffset[ei] + ai]) + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + if(azd->mIrs[0] != NULL) { TrErrorAt(tr, line, col, "Redefinition of source.\n"); goto error; @@ -2696,9 +3102,11 @@ static int ProcessSources(const HeadModelT model, TokenReaderT *tr, HrirDataT *h if(!TrReadOperator(tr, "=")) goto error; - factor = 1.0; for(;;) { + SourceRefT src; + uint ti = 0; + if(!ReadSourceRef(tr, &src)) goto error; @@ -2712,48 +3120,101 @@ static int ProcessSources(const HeadModelT model, TokenReaderT *tr, HrirDataT *h if(!LoadSource(&src, hData->mIrRate, hData->mIrPoints, hrir)) goto error; + if(hData->mChannelType == CT_STEREO) + { + char ident[MAX_IDENT_LEN+1]; + + if(!TrReadIdent(tr, MAX_IDENT_LEN, ident)) + goto error; + ti = MatchTargetEar(ident); + if((int)ti < 0) + { + TrErrorAt(tr, line, col, "Expected a target ear.\n"); + goto error; + } + } + azd->mIrs[ti] = &hrirs[hData->mIrSize * (ti * hData->mIrCount + azd->mIndex)]; if(model == HM_DATASET) - AverageHrirOnset(hrir, 1.0 / factor, ei, ai, hData); - AverageHrirMagnitude(hrir, 1.0 / factor, ei, ai, hData); - factor += 1.0; + azd->mDelays[ti] = AverageHrirOnset(hData->mIrRate, hData->mIrPoints, hrir, 1.0 / factor[ti], azd->mDelays[ti]); + AverageHrirMagnitude(hData->mIrPoints, hData->mFftSize, hrir, 1.0 / factor[ti], azd->mIrs[ti]); + factor[ti] += 1.0; if(!TrIsOperator(tr, "+")) break; TrReadOperator(tr, "+"); } - setFlag[hData->mEvOffset[ei] + ai] = 1; - setCount[ei]++; + if(hData->mChannelType == CT_STEREO) + { + if(azd->mIrs[0] == NULL) + { + TrErrorAt(tr, line, col, "Missing left ear source reference(s).\n"); + goto error; + } + else if(azd->mIrs[1] == NULL) + { + TrErrorAt(tr, line, col, "Missing right ear source reference(s).\n"); + goto error; + } + } } printf("\n"); + for(fi = 0;fi < hData->mFdCount;fi++) + { + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; - ei = 0; - while(ei < hData->mEvCount && setCount[ei] < 1) - ei++; - if(ei < hData->mEvCount) + if(azd->mIrs[0] != NULL) + break; + } + if(ai < hData->mFds[fi].mEvs[ei].mAzCount) + break; + } + if(ei >= hData->mFds[fi].mEvCount) + { + TrError(tr, "Missing source references [ %d, *, * ].\n", fi); + goto error; + } + hData->mFds[fi].mEvStart = ei; + for(;ei < hData->mFds[fi].mEvCount;ei++) + { + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + if(azd->mIrs[0] == NULL) + { + TrError(tr, "Missing source reference [ %d, %d, %d ].\n", fi, ei, ai); + goto error; + } + } + } + } + for(ti = 0;ti < channels;ti++) { - hData->mEvStart = ei; - while(ei < hData->mEvCount && setCount[ei] == hData->mAzCount[ei]) - ei++; - if(ei >= hData->mEvCount) + for(fi = 0;fi < hData->mFdCount;fi++) { - if(!TrLoad(tr)) + for(ei = 0;ei < hData->mFds[fi].mEvCount;ei++) { - DestroyArray(hrir); - free(setFlag); - free(setCount); - return 1; + for(ai = 0;ai < hData->mFds[fi].mEvs[ei].mAzCount;ai++) + { + HrirAzT *azd = &hData->mFds[fi].mEvs[ei].mAzs[ai]; + + azd->mIrs[ti] = &hrirs[hData->mIrSize * (ti * hData->mIrCount + azd->mIndex)]; + } } - TrError(tr, "Errant data at end of source list.\n"); } - else - TrError(tr, "Missing sources for elevation index %d.\n", ei); } - else - TrError(tr, "Missing source references.\n"); + if(!TrLoad(tr)) + { + free(hrir); + return 1; + } + TrError(tr, "Errant data at end of source list.\n"); error: - DestroyArray(hrir); - free(setFlag); - free(setCount); + free(hrir); return 0; } @@ -2761,7 +3222,7 @@ error: * resulting data set as desired. If the input name is NULL it will read * from standard input. */ -static int ProcessDefinition(const char *inName, const uint outRate, const uint fftSize, const int equalize, const int surface, const double limit, const uint truncSize, const HeadModelT model, const double radius, const int experimental, const char *outName) +static int ProcessDefinition(const char *inName, const uint outRate, const uint fftSize, const int equalize, const int surface, const double limit, const uint truncSize, const HeadModelT model, const double radius, const char *outName) { char rateStr[8+1], expName[MAX_PATH_LEN]; TokenReaderT tr; @@ -2769,16 +3230,7 @@ static int ProcessDefinition(const char *inName, const uint outRate, const uint FILE *fp; int ret; - hData.mIrRate = 0; - hData.mSampleType = ST_S24; - hData.mChannelType = CT_LEFTONLY; - hData.mIrPoints = 0; - hData.mFftSize = 0; - hData.mIrSize = 0; - hData.mIrCount = 0; - hData.mEvCount = 0; - hData.mRadius = 0; - hData.mDistance = 0; + ResetHrirData(&hData); fprintf(stdout, "Reading HRIR definition from %s...\n", inName?inName:"stdin"); if(inName != NULL) { @@ -2801,12 +3253,9 @@ static int ProcessDefinition(const char *inName, const uint outRate, const uint fclose(fp); return 0; } - hData.mHrirs = CreateArray(hData.mIrCount * hData.mIrSize); - hData.mHrtds = CreateArray(hData.mIrCount); if(!ProcessSources(model, &tr, &hData)) { - DestroyArray(hData.mHrtds); - DestroyArray(hData.mHrirs); + FreeHrirData(&hData); if(inName != NULL) fclose(fp); return 0; @@ -2815,12 +3264,15 @@ static int ProcessDefinition(const char *inName, const uint outRate, const uint fclose(fp); if(equalize) { - double *dfa = CreateArray(1 + (hData.mFftSize/2)); + uint c = (hData.mChannelType == CT_STEREO) ? 2 : 1; + uint m = 1 + hData.mFftSize / 2; + double *dfa = CreateDoubles(c * m); + fprintf(stdout, "Calculating diffuse-field average...\n"); - CalculateDiffuseFieldAverage(&hData, surface, limit, dfa); + CalculateDiffuseFieldAverage(&hData, c, m, surface, limit, dfa); fprintf(stdout, "Performing diffuse-field equalization...\n"); - DiffuseFieldEqualize(dfa, &hData); - DestroyArray(dfa); + DiffuseFieldEqualize(c, m, dfa, &hData); + free(dfa); } fprintf(stdout, "Performing minimum phase reconstruction...\n"); ReconstructHrirs(&hData); @@ -2842,16 +3294,15 @@ static int ProcessDefinition(const char *inName, const uint outRate, const uint snprintf(rateStr, 8, "%u", hData.mIrRate); StrSubst(outName, "%r", rateStr, MAX_PATH_LEN, expName); fprintf(stdout, "Creating MHR data set %s...\n", expName); - ret = StoreMhr(&hData, experimental, expName); + ret = StoreMhr(&hData, expName); - DestroyArray(hData.mHrtds); - DestroyArray(hData.mHrirs); + FreeHrirData(&hData); return ret; } static void PrintHelp(const char *argv0, FILE *ofile) { - fprintf(ofile, "Usage: %s <command> [<option>...]\n\n", argv0); + fprintf(ofile, "Usage: %s [<option>...]\n\n", argv0); fprintf(ofile, "Options:\n"); fprintf(ofile, " -m Ignored for compatibility.\n"); fprintf(ofile, " -r <rate> Change the data set sample rate to the specified value and\n"); @@ -2867,8 +3318,8 @@ static void PrintHelp(const char *argv0, FILE *ofile) fprintf(ofile, " sphere} values (default: %s).\n", ((DEFAULT_HEAD_MODEL == HM_DATASET) ? "dataset" : "sphere")); fprintf(ofile, " -c <size> Use a customized head radius measured ear-to-ear in meters.\n"); fprintf(ofile, " -i <filename> Specify an HRIR definition file to use (defaults to stdin).\n"); - fprintf(ofile, " -o <filename> Specify an output file. Overrides command-selected default.\n"); - fprintf(ofile, " Use of '%%r' will be substituted with the data set sample rate.\n"); + fprintf(ofile, " -o <filename> Specify an output file. Use of '%%r' will be substituted with\n"); + fprintf(ofile, " the data set sample rate.\n"); } // Standard command line dispatch. @@ -2877,7 +3328,6 @@ int main(int argc, char *argv[]) const char *inName = NULL, *outName = NULL; uint outRate, fftSize; int equalize, surface; - int experimental; char *end = NULL; HeadModelT model; uint truncSize; @@ -2903,9 +3353,8 @@ int main(int argc, char *argv[]) truncSize = DEFAULT_TRUNCSIZE; model = DEFAULT_HEAD_MODEL; radius = DEFAULT_CUSTOM_RADIUS; - experimental = 0; - while((opt=getopt(argc, argv, "mr:f:e:s:l:w:d:c:e:i:o:xh")) != -1) + while((opt=getopt(argc, argv, "mr:f:e:s:l:w:d:c:e:i:o:h")) != -1) { switch(opt) { @@ -3007,10 +3456,6 @@ int main(int argc, char *argv[]) outName = optarg; break; - case 'x': - experimental = 1; - break; - case 'h': PrintHelp(argv[0], stdout); exit(EXIT_SUCCESS); @@ -3022,7 +3467,7 @@ int main(int argc, char *argv[]) } if(!ProcessDefinition(inName, outRate, fftSize, equalize, surface, limit, - truncSize, model, radius, experimental, outName)) + truncSize, model, radius, outName)) return -1; fprintf(stdout, "Operation completed.\n"); |