From e28a3b39e1e8caf3f6cf3bfe82efdaae818a6c7b Mon Sep 17 00:00:00 2001
From: Sven Gothel
+ * Time -> Byte Count
+ *
+ * Byte Count -> Time
+ *
+ * Sample Count -> Time
+ *
* ( 1000f * sampleCount ) / sampleRate
*
+ * frameDuration
can be derived by sample count per frame and channel
* via {@link #getSamplesDuration(int)}.
*
+ * Frame Time -> Frame Count + *
* @param millisecs time in milliseconds * @param frameDuration duration per frame in milliseconds. */ @@ -130,21 +162,44 @@ public interface AudioSink { * sampleCount * ( sampleSize / 8 ) * *
- * Note: To retrieve the byte size for all channels, you need to pre-multiply sampleCount
- * with {@link #channelCount}.
+ * Note: To retrieve the byte size for all channels,
+ * you need to pre-multiply sampleCount
with {@link #channelCount}.
*
+ * Sample Count -> Byte Count + *
* @param sampleCount sample count */ - public final int getSamplesByteSize(int sampleCount) { + public final int getSamplesByteCount(int sampleCount) { return sampleCount * ( sampleSize >>> 3 ); } + /** + * Returns the sample count of given byte count + * according to the {@link #sampleSize}, i.e.: + *+ * ( byteCount * 8 ) / sampleSize + *+ *
+ * Note: If byteCount
covers all channels and you request the sample size per channel,
+ * you need to divide the result by sampleCount
by {@link #channelCount}.
+ *
+ * Byte Count -> Sample Count + *
+ * @param sampleCount sample count + */ + public final int getBytesSampleCount(int byteCount) { + return ( byteCount << 3 ) / sampleSize; + } + public String toString() { - return "AudioDataFormat[type "+dataType+", sampleRate "+sampleRate+", sampleSize "+sampleSize+", channelCount "+channelCount+ - ", signed "+signed+", fixedP "+fixedP+", "+(littleEndian?"little":"big")+"endian]"; } + return "AudioDataFormat[sampleRate "+sampleRate+", sampleSize "+sampleSize+", channelCount "+channelCount+ + ", signed "+signed+", fixedP "+fixedP+", "+(planar?"planar":"packed")+", "+(littleEndian?"little":"big")+"-endian]"; } } - /** Default {@link AudioDataFormat}, [type PCM, sampleRate 44100, sampleSize 16, channelCount 2, signed, fixedP, littleEndian]. */ - public static final AudioDataFormat DefaultFormat = new AudioDataFormat(AudioDataType.PCM, 44100, 16, 2, true /* signed */, true /* fixed point */, true /* littleEndian */); + /** Default {@link AudioFormat}, [type PCM, sampleRate 44100, sampleSize 16, channelCount 2, signed, fixedP, !planar, littleEndian]. */ + public static final AudioFormat DefaultFormat = new AudioFormat(44100, 16, 2, true /* signed */, + true /* fixed point */, false /* planar */, true /* littleEndian */); public static abstract class AudioFrame extends TimeFrameI { protected int byteSize; @@ -227,38 +282,54 @@ public interface AudioSink { public boolean setVolume(float v); /** - * Returns the preferred {@link AudioDataFormat} by this sink. + * Returns the preferred {@link AudioFormat} by this sink. *- * The preferred format shall reflect this sinks most native format, + * The preferred format is guaranteed to be supported + * and shall reflect this sinks most native format, * i.e. best performance w/o data conversion. *
- * @see #initSink(AudioDataFormat) + *+ * Known {@link #AudioFormat} attributes considered by implementations: + *
- * Implementation shall try to match the given requestedFormat
{@link AudioDataFormat}
- * as close as possible, regarding it's capabilities.
+ * Implementation must match the given requestedFormat
{@link AudioFormat}.
*
- * A user may consider {@link #getPreferredFormat()} and pass this value - * to utilize best performance and behavior. - *
- * The {@link #DefaultFormat} should be supported by all implementations. + * Caller shall validaterequestedFormat
via {@link #isSupported(AudioFormat)}
+ * beforehand and try to find a suitable supported one.
+ * {@link #getPreferredFormat()} and {@link #getMaxSupportedChannels()} may help.
*
- * @param requestedFormat the requested {@link AudioDataFormat}.
+ * @param requestedFormat the requested {@link AudioFormat}.
* @param frameDuration average or fixed frame duration in milliseconds
* helping a caching {@link AudioFrame} based implementation to determine the frame count in the queue.
* See {@link #DefaultFrameDuration}.
* @param initialQueueSize initial time in milliseconds to queue in this sink, see {@link #DefaultInitialQueueSize}.
* @param queueGrowAmount time in milliseconds to grow queue if full, see {@link #DefaultQueueGrowAmount}.
* @param queueLimit maximum time in milliseconds the queue can hold (and grow), see {@link #DefaultQueueLimitWithVideo} and {@link #DefaultQueueLimitAudioOnly}.
- * @return if successful the chosen AudioDataFormat based on the requestedFormat
and this sinks capabilities, otherwise null
.
+ * @return true if successful, otherwise false
*/
- public AudioDataFormat init(AudioDataFormat requestedFormat, float frameDuration,
- int initialQueueSize, int queueGrowAmount, int queueLimit);
+ public boolean init(AudioFormat requestedFormat, float frameDuration,
+ int initialQueueSize, int queueGrowAmount, int queueLimit);
/**
* Returns true, if {@link #play()} has been requested and the sink is still playing,
@@ -285,7 +356,7 @@ public interface AudioSink {
/**
* Flush all queued buffers, implies {@link #pause()}.
* - * {@link #init(AudioDataFormat, float, int, int, int)} must be called first. + * {@link #init(AudioFormat, float, int, int, int)} must be called first. *
* @see #play() * @see #pause() @@ -298,17 +369,17 @@ public interface AudioSink { /** * Returns the number of allocated buffers as requested by - * {@link #init(AudioDataFormat, float, int, int, int)}. + * {@link #init(AudioFormat, float, int, int, int)}. */ public int getFrameCount(); - /** @return the current enqueued frames count since {@link #init(AudioDataFormat, float, int, int, int)}. */ + /** @return the current enqueued frames count since {@link #init(AudioFormat, float, int, int, int)}. */ public int getEnqueuedFrameCount(); /** * Returns the current number of frames queued for playing. *- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first. + * {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedFrameCount(); @@ -316,7 +387,7 @@ public interface AudioSink { /** * Returns the current number of bytes queued for playing. *- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first. + * {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedByteCount(); @@ -324,7 +395,7 @@ public interface AudioSink { /** * Returns the current queued frame time in milliseconds for playing. *- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first. + * {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedTime(); @@ -337,7 +408,7 @@ public interface AudioSink { /** * Returns the current number of frames in the sink available for writing. *- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first. + * {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getFreeFrameCount(); @@ -345,10 +416,10 @@ public interface AudioSink { /** * Enqueue the remaining bytes of the given {@link AudioDataFrame}'s direct ByteBuffer to this sink. *- * The data must comply with the chosen {@link AudioDataFormat} as returned by {@link #initSink(AudioDataFormat)}. + * The data must comply with the chosen {@link AudioFormat} as returned by {@link #initSink(AudioFormat)}. *
*- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first. + * {@link #init(AudioFormat, float, int, int, int)} must be called first. *
* @returns the enqueued internal {@link AudioFrame}, which may differ from the inputaudioDataFrame
.
* @deprecated User shall use {@link #enqueueData(int, ByteBuffer, int)}, which allows implementation
@@ -359,10 +430,10 @@ public interface AudioSink {
/**
* Enqueue byteCount
bytes of the remaining bytes of the given NIO {@link ByteBuffer} to this sink.
* - * The data must comply with the chosen {@link AudioDataFormat} as returned by {@link #initSink(AudioDataFormat)}. + * The data must comply with the chosen {@link AudioFormat} as returned by {@link #initSink(AudioFormat)}. *
*- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first. + * {@link #init(AudioFormat, float, int, int, int)} must be called first. *
* @returns the enqueued internal {@link AudioFrame}. */ diff --git a/src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java b/src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java index d3964a2cf..3daedd52e 100644 --- a/src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java +++ b/src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java @@ -39,7 +39,9 @@ import com.jogamp.openal.AL; import com.jogamp.openal.ALC; import com.jogamp.openal.ALCcontext; import com.jogamp.openal.ALCdevice; +import com.jogamp.openal.ALExt; import com.jogamp.openal.ALFactory; +import com.jogamp.openal.util.ALHelpers; import com.jogamp.opengl.util.av.AudioSink; /*** @@ -47,12 +49,16 @@ import com.jogamp.opengl.util.av.AudioSink; */ public class ALAudioSink implements AudioSink { + private static final String AL_SOFT_buffer_samples = "AL_SOFT_buffer_samples"; private static final ALC alc; private static final AL al; + private static final ALExt alExt; private static final boolean staticAvailable; private String deviceSpecifier; private ALCdevice device; + private boolean hasSOFTBufferSamples; + private AudioFormat preferredAudioFormat; private ALCcontext context; private final RecursiveLock lock = LockFactory.createRecursiveLock(); @@ -91,8 +97,10 @@ public class ALAudioSink implements AudioSink { private volatile int enqueuedFrameCount; private int[] alSource = null; - private AudioDataFormat chosenFormat; - private int alFormat; + private AudioFormat chosenFormat; + private int alChannelLayout; + private int alSampleType; + private int alFormat; private boolean initialized; private volatile boolean playRequested = false; @@ -100,9 +108,11 @@ public class ALAudioSink implements AudioSink { static { ALC _alc = null; AL _al = null; + ALExt _alExt = null; try { _alc = ALFactory.getALC(); _al = ALFactory.getAL(); + _alExt = ALFactory.getALExt(); } catch(Throwable t) { if( DEBUG ) { System.err.println("ALAudioSink: Catched "+t.getClass().getName()+": "+t.getMessage()); @@ -111,7 +121,8 @@ public class ALAudioSink implements AudioSink { } alc = _alc; al = _al; - staticAvailable = null != alc && null != al; + alExt = _alExt; + staticAvailable = null != alc && null != al && null != alExt; } public ALAudioSink() { @@ -138,7 +149,7 @@ public class ALAudioSink implements AudioSink { // Create audio context. context = alc.alcCreateContext(device, null); if (context == null) { - throw new RuntimeException("ALAudioSink: Error creating OpenAL context"); + throw new RuntimeException("ALAudioSink: Error creating OpenAL context for "+deviceSpecifier); } lockContext(); @@ -148,6 +159,16 @@ public class ALAudioSink implements AudioSink { throw new RuntimeException("ALAudioSink: Error making OpenAL context current"); } + hasSOFTBufferSamples = al.alIsExtensionPresent(AL_SOFT_buffer_samples); + preferredAudioFormat = queryPreferredAudioFormat(); + if( DEBUG | true ) { + System.out.println("ALAudioSink: OpenAL Extensions:"+al.alGetString(AL.AL_EXTENSIONS)); + System.out.println("ALAudioSink: Null device OpenAL Extensions:"+alc.alcGetString(null, ALC.ALC_EXTENSIONS)); + System.out.println("ALAudioSink: Device "+deviceSpecifier+" OpenAL Extensions:"+alc.alcGetString(device, ALC.ALC_EXTENSIONS)); + System.out.println("ALAudioSink: hasSOFTBufferSamples "+hasSOFTBufferSamples); + System.out.println("ALAudioSink: preferredAudioFormat "+preferredAudioFormat); + } + // Create source { alSource = new int[1]; @@ -175,6 +196,16 @@ public class ALAudioSink implements AudioSink { } } + private final AudioFormat queryPreferredAudioFormat() { + int sampleRate = DefaultFormat.sampleRate; + final int[] value = new int[1]; + alc.alcGetIntegerv(device, ALC.ALC_FREQUENCY, 1, value, 0); + if ( alc.alcGetError(device) == ALC.ALC_NO_ERROR ) { + sampleRate = value[0]; + } + return new AudioFormat(sampleRate, DefaultFormat.sampleSize, DefaultFormat.channelCount, DefaultFormat.signed, DefaultFormat.fixedP, DefaultFormat.planar, DefaultFormat.littleEndian); + } + private final void lockContext() { lock.lock(); alc.alcMakeContextCurrent(context); @@ -212,53 +243,78 @@ public class ALAudioSink implements AudioSink { final int alBuffersLen = null != alBufferNames ? alBufferNames.length : 0; final int ctxHash = context != null ? context.hashCode() : 0; return "ALAudioSink[init "+initialized+", playRequested "+playRequested+", device "+deviceSpecifier+", ctx "+toHexString(ctxHash)+", alSource "+alSrcName+ - ", chosen "+chosenFormat+", alFormat "+toHexString(alFormat)+ - ", playSpeed "+playSpeed+", buffers[total "+alBuffersLen+", avail "+alFramesAvail.size()+", "+ + ", chosen "+chosenFormat+ + ", al[chan "+ALHelpers.alChannelLayoutName(alChannelLayout)+", type "+ALHelpers.alSampleTypeName(alSampleType)+ + ", fmt "+toHexString(alFormat)+", soft "+hasSOFTBufferSamples+ + "], playSpeed "+playSpeed+", buffers[total "+alBuffersLen+", avail "+alFramesAvail.size()+", "+ "queued["+alFramesPlaying.size()+", apts "+getPTS()+", "+getQueuedTime() + " ms, " + alBufferBytesQueued+" bytes], "+ "queue[g "+frameGrowAmount+", l "+frameLimit+"]"; } + public final String getPerfString() { final int alBuffersLen = null != alBufferNames ? alBufferNames.length : 0; return "Play [buffer "+alFramesPlaying.size()+"/"+alBuffersLen+", apts "+getPTS()+", "+getQueuedTime() + " ms, " + alBufferBytesQueued+" bytes]"; } @Override - public final AudioDataFormat getPreferredFormat() { - return DefaultFormat; + public final AudioFormat getPreferredFormat() { + if( !staticAvailable ) { + return null; + } + return preferredAudioFormat; + } + + @Override + public final int getMaxSupportedChannels() { + if( !staticAvailable ) { + return 0; + } + return hasSOFTBufferSamples ? 8 : 2; } @Override - public final AudioDataFormat init(AudioDataFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) { + public final boolean isSupported(AudioFormat format) { if( !staticAvailable ) { - return null; + return false; } - if( !requestedFormat.fixedP || - !requestedFormat.littleEndian || - ( 1 != requestedFormat.channelCount && requestedFormat.channelCount != 2 ) || - ( 8 != requestedFormat.sampleSize && requestedFormat.sampleSize != 16 ) - ) { - return null; // not supported w/ OpenAL - } - // final float samplePeriod = 1.0f / requestedFormat.sampleRate; - switch( requestedFormat.channelCount ) { - case 1: { - switch ( requestedFormat.sampleSize ) { - case 8: - alFormat = AL.AL_FORMAT_MONO8; break; - case 16: - alFormat = AL.AL_FORMAT_MONO16; break; - } - } break; - case 2: - switch ( requestedFormat.sampleSize ) { - case 8: - alFormat = AL.AL_FORMAT_STEREO8; break; - case 16: - alFormat = AL.AL_FORMAT_STEREO16; break; + if( format.planar || !format.littleEndian ) { + // FIXME big-endian supported w/ SOFT where it's native format! + return false; + } + final int alChannelLayout = ALHelpers.getDefaultALChannelLayout(format.channelCount); + if( AL.AL_NONE != alChannelLayout ) { + final int alSampleType = ALHelpers.getALSampleType(format.sampleSize, format.signed, format.fixedP); + if( AL.AL_NONE != alSampleType ) { + lockContext(); + try { + final int alFormat = ALHelpers.getALFormat(alChannelLayout, alSampleType, hasSOFTBufferSamples, al, alExt); + return AL.AL_NONE != alFormat; + } finally { + unlockContext(); } + } } + return false; + } + + @Override + public final boolean init(AudioFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) { + if( !staticAvailable ) { + return false; + } + alChannelLayout = ALHelpers.getDefaultALChannelLayout(requestedFormat.channelCount); + alSampleType = ALHelpers.getALSampleType(requestedFormat.sampleSize, requestedFormat.signed, requestedFormat.fixedP); lockContext(); try { + if( AL.AL_NONE != alChannelLayout && AL.AL_NONE != alSampleType ) { + alFormat = ALHelpers.getALFormat(alChannelLayout, alSampleType, hasSOFTBufferSamples, al, alExt); + } else { + alFormat = AL.AL_NONE; + } + if( AL.AL_NONE == alFormat ) { + // not supported + return false; + } // Allocate buffers destroyBuffers(); { @@ -290,7 +346,7 @@ public class ALAudioSink implements AudioSink { } chosenFormat = requestedFormat; - return chosenFormat; + return true; } private static int[] concat(int[] first, int[] second) { @@ -548,7 +604,15 @@ public class ALAudioSink implements AudioSink { if( !alFramesPlaying.put( alFrame ) ) { throw new InternalError("Internal Error: "+this); } - al.alBufferData(alFrame.alBuffer, alFormat, bytes, byteCount, chosenFormat.sampleRate); + if( hasSOFTBufferSamples ) { + final int samplesPerChannel = chosenFormat.getBytesSampleCount(byteCount) / chosenFormat.channelCount; + // final int samplesPerChannel = ALHelpers.bytesToSampleCount(byteCount, alChannelLayout, alSampleType); + alExt.alBufferSamplesSOFT(alFrame.alBuffer, chosenFormat.sampleRate, alFormat, + samplesPerChannel, alChannelLayout, alSampleType, bytes); + } else { + al.alBufferData(alFrame.alBuffer, alFormat, bytes, byteCount, chosenFormat.sampleRate); + } + final int[] alBufferNames = new int[] { alFrame.alBuffer }; al.alSourceQueueBuffers(alSource[0], 1, alBufferNames, 0); alErr = al.alGetError(); diff --git a/src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java b/src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java index b1b9c1b6c..85fab96a4 100644 --- a/src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java +++ b/src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java @@ -33,7 +33,7 @@ public class JavaSoundAudioSink implements AudioSink { private int bufferCount; private byte [] sampleData = new byte[BUFFER_SIZE]; private boolean initialized = false; - private AudioDataFormat chosenFormat = null; + private AudioSink.AudioFormat chosenFormat = null; private volatile boolean playRequested = false; private float volume = 1.0f; @@ -77,14 +77,24 @@ public class JavaSoundAudioSink implements AudioSink { } @Override - public AudioDataFormat getPreferredFormat() { + public AudioSink.AudioFormat getPreferredFormat() { return DefaultFormat; } @Override - public AudioDataFormat init(AudioDataFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) { + public final int getMaxSupportedChannels() { + return 2; + } + + @Override + public final boolean isSupported(AudioSink.AudioFormat format) { + return true; + } + + @Override + public boolean init(AudioSink.AudioFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) { if( !staticAvailable ) { - return null; + return false; } // Create the audio format we wish to use format = new javax.sound.sampled.AudioFormat(requestedFormat.sampleRate, requestedFormat.sampleSize, requestedFormat.channelCount, requestedFormat.signed, !requestedFormat.littleEndian); @@ -105,7 +115,7 @@ public class JavaSoundAudioSink implements AudioSink { } catch (Exception e) { initialized=false; } - return chosenFormat; + return true; } @Override diff --git a/src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java b/src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java index 83799b074..723bb9dd1 100644 --- a/src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java +++ b/src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java @@ -42,13 +42,23 @@ public class NullAudioSink implements AudioSink { } @Override - public AudioDataFormat getPreferredFormat() { + public AudioFormat getPreferredFormat() { return DefaultFormat; } @Override - public AudioDataFormat init(AudioDataFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) { - return requestedFormat; + public final int getMaxSupportedChannels() { + return 8; + } + + @Override + public final boolean isSupported(AudioFormat format) { + return true; + } + + @Override + public boolean init(AudioFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) { + return true; } @Override -- cgit v1.2.3