aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorSven Gothel <[email protected]>2013-08-26 09:59:47 +0200
committerSven Gothel <[email protected]>2013-08-26 09:59:47 +0200
commite28a3b39e1e8caf3f6cf3bfe82efdaae818a6c7b (patch)
tree92166bb7d9df0829ed45db383181b6f999c95d6d /src
parent871c7cac1939e6c7fbcd33aa031b7861f63da6ae (diff)
AudioSink: Fixe type names ; Enhance AudioFormat negotiation ; ALAudioSink adds AL_SOFT_buffer_samples support w/ full AL caps
- Fixe type names: - Remove AudioDataType, we only support PCM here anyways - AudioDataFormat -> AudioFormat / Add 'planar' attribute to distingush packed/planar data type - Validate float types - Enhance AudioFormat negotiation - Add 'isSupported(AudioFormat format)' which _shall_ be used before 'init(..)' to test/negotiate format - Add getMaxSupportedChannels(), which may be used w/ getPreferredFormat() if orig requested format fails via 'isSupported(..)' - 'init(..)' returns boolean only. - ALAudioSink adds AL_SOFT_buffer_samples support w/ full AL caps - Determine whether AL_SOFT_buffer_samples is supported - Use new JOAL ALHelper to convert AudioFormat -> AL-types, which also answers the 'isSupported(..)' query. - Now allows multiple: channles, sample-types, etc.
Diffstat (limited to 'src')
-rw-r--r--src/jogl/classes/com/jogamp/opengl/util/av/AudioSink.java169
-rw-r--r--src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java134
-rw-r--r--src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java20
-rw-r--r--src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java16
4 files changed, 247 insertions, 92 deletions
diff --git a/src/jogl/classes/com/jogamp/opengl/util/av/AudioSink.java b/src/jogl/classes/com/jogamp/opengl/util/av/AudioSink.java
index 7f477a57d..8751fc816 100644
--- a/src/jogl/classes/com/jogamp/opengl/util/av/AudioSink.java
+++ b/src/jogl/classes/com/jogamp/opengl/util/av/AudioSink.java
@@ -39,33 +39,46 @@ public interface AudioSink {
/** Default frame duration in millisecond, i.e. 1 frame per {@value} ms. */
public static final int DefaultFrameDuration = 32;
- /** Initial audio queue size in milliseconds. {@value} ms, i.e. 16 frames per 32 ms. See {@link #init(AudioDataFormat, float, int, int, int)}.*/
+ /** Initial audio queue size in milliseconds. {@value} ms, i.e. 16 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/
public static final int DefaultInitialQueueSize = 16 * 32; // 512 ms
- /** Audio queue grow size in milliseconds. {@value} ms, i.e. 16 frames per 32 ms. See {@link #init(AudioDataFormat, float, int, int, int)}.*/
+ /** Audio queue grow size in milliseconds. {@value} ms, i.e. 16 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/
public static final int DefaultQueueGrowAmount = 16 * 32; // 512 ms
- /** Audio queue limit w/ video in milliseconds. {@value} ms, i.e. 96 frames per 32 ms. See {@link #init(AudioDataFormat, float, int, int, int)}.*/
+ /** Audio queue limit w/ video in milliseconds. {@value} ms, i.e. 96 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/
public static final int DefaultQueueLimitWithVideo = 96 * 32; // 3072 ms
- /** Audio queue limit w/o video in milliseconds. {@value} ms, i.e. 32 frames per 32 ms. See {@link #init(AudioDataFormat, float, int, int, int)}.*/
+ /** Audio queue limit w/o video in milliseconds. {@value} ms, i.e. 32 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/
public static final int DefaultQueueLimitAudioOnly = 32 * 32; // 1024 ms
- /** Specifies the audio data type. Currently only PCM is supported. */
- public static enum AudioDataType { PCM };
-
/**
- * Specifies the audio data format.
+ * Specifies the linear audio PCM format.
*/
- public static class AudioDataFormat {
- public AudioDataFormat(AudioDataType dataType, int sampleRate, int sampleSize, int channelCount, boolean signed, boolean fixedP, boolean littleEndian) {
- this.dataType = dataType;
+ public static class AudioFormat {
+ /**
+ * @param sampleRate sample rate in Hz (1/s)
+ * @param sampleSize sample size in bits
+ * @param channelCount number of channels
+ * @param signed true if signed number, false for unsigned
+ * @param fixedP true for fixed point value, false for unsigned floating point value with a sampleSize of 32 (float) or 64 (double)
+ * @param planar true for planar data package (each channel in own data buffer), false for packed data channels interleaved in one buffer.
+ * @param littleEndian true for little-endian, false for big endian
+ */
+ public AudioFormat(int sampleRate, int sampleSize, int channelCount, boolean signed, boolean fixedP, boolean planar, boolean littleEndian) {
this.sampleRate = sampleRate;
this.sampleSize = sampleSize;
this.channelCount = channelCount;
this.signed = signed;
this.fixedP = fixedP;
+ this.planar = planar;
this.littleEndian = littleEndian;
+ if( !fixedP ) {
+ if( sampleSize != 32 && sampleSize != 64 ) {
+ throw new IllegalArgumentException("Floating point: sampleSize "+sampleSize+" bits");
+ }
+ if( !signed ) {
+ throw new IllegalArgumentException("Floating point: unsigned");
+ }
+ }
}
- /** Audio data type. */
- public final AudioDataType dataType;
+
/** Sample rate in Hz (1/s). */
public final int sampleRate;
/** Sample size in bits. */
@@ -73,15 +86,25 @@ public interface AudioSink {
/** Number of channels. */
public final int channelCount;
public final boolean signed;
- /** Fixed or floating point values. Floating point 'float' has {@link #sampleSize} 32, 'double' has {@link #sampleSize} 64, */
+ /** Fixed or floating point values. Floating point 'float' has {@link #sampleSize} 32, 'double' has {@link #sampleSize} 64. */
public final boolean fixedP;
+ /** Planar or packed samples. If planar, each channel has their own data buffer. If packed, channel data is interleaved in one buffer. */
+ public final boolean planar;
public final boolean littleEndian;
+
+ //
+ // Time <-> Bytes
+ //
+
/**
* Returns the byte size of the given milliseconds
- * according to {@link #sampleSize}, {@link #channelCount} and {@link #sampleRate}.
+ * according to {@link #sampleSize}, {@link #channelCount} and {@link #sampleRate}.
+ * <p>
+ * Time -> Byte Count
+ * </p>
*/
- public final int getByteSize(int millisecs) {
+ public final int getDurationsByteSize(int millisecs) {
final int bytesPerSample = sampleSize >>> 3; // /8
return millisecs * ( channelCount * bytesPerSample * ( sampleRate / 1000 ) );
}
@@ -89,6 +112,9 @@ public interface AudioSink {
/**
* Returns the duration in milliseconds of the given byte count
* according to {@link #sampleSize}, {@link #channelCount} and {@link #sampleRate}.
+ * <p>
+ * Byte Count -> Time
+ * </p>
*/
public final int getBytesDuration(int byteCount) {
final int bytesPerSample = sampleSize >>> 3; // /8
@@ -96,11 +122,14 @@ public interface AudioSink {
}
/**
- * Returns the duration in milliseconds of the given and sample count per frame and channel
+ * Returns the duration in milliseconds of the given sample count per frame and channel
* according to the {@link #sampleRate}, i.e.
* <pre>
* ( 1000f * sampleCount ) / sampleRate
* </pre>
+ * <p>
+ * Sample Count -> Time
+ * </p>
* @param sampleCount sample count per frame and channel
*/
public final float getSamplesDuration(int sampleCount) {
@@ -116,6 +145,9 @@ public interface AudioSink {
* Note: <code>frameDuration</code> can be derived by <i>sample count per frame and channel</i>
* via {@link #getSamplesDuration(int)}.
* </p>
+ * <p>
+ * Frame Time -> Frame Count
+ * </p>
* @param millisecs time in milliseconds
* @param frameDuration duration per frame in milliseconds.
*/
@@ -130,21 +162,44 @@ public interface AudioSink {
* sampleCount * ( sampleSize / 8 )
* </pre>
* <p>
- * Note: To retrieve the byte size for all channels, you need to pre-multiply <code>sampleCount</code>
- * with {@link #channelCount}.
+ * Note: To retrieve the byte size for all channels,
+ * you need to pre-multiply <code>sampleCount</code> with {@link #channelCount}.
* </p>
+ * <p>
+ * Sample Count -> Byte Count
+ * </p>
* @param sampleCount sample count
*/
- public final int getSamplesByteSize(int sampleCount) {
+ public final int getSamplesByteCount(int sampleCount) {
return sampleCount * ( sampleSize >>> 3 );
}
+ /**
+ * Returns the sample count of given byte count
+ * according to the {@link #sampleSize}, i.e.:
+ * <pre>
+ * ( byteCount * 8 ) / sampleSize
+ * </pre>
+ * <p>
+ * Note: If <code>byteCount</code> covers all channels and you request the sample size per channel,
+ * you need to divide the result by <code>sampleCount</code> by {@link #channelCount}.
+ * </p>
+ * <p>
+ * Byte Count -> Sample Count
+ * </p>
+ * @param sampleCount sample count
+ */
+ public final int getBytesSampleCount(int byteCount) {
+ return ( byteCount << 3 ) / sampleSize;
+ }
+
public String toString() {
- return "AudioDataFormat[type "+dataType+", sampleRate "+sampleRate+", sampleSize "+sampleSize+", channelCount "+channelCount+
- ", signed "+signed+", fixedP "+fixedP+", "+(littleEndian?"little":"big")+"endian]"; }
+ return "AudioDataFormat[sampleRate "+sampleRate+", sampleSize "+sampleSize+", channelCount "+channelCount+
+ ", signed "+signed+", fixedP "+fixedP+", "+(planar?"planar":"packed")+", "+(littleEndian?"little":"big")+"-endian]"; }
}
- /** Default {@link AudioDataFormat}, [type PCM, sampleRate 44100, sampleSize 16, channelCount 2, signed, fixedP, littleEndian]. */
- public static final AudioDataFormat DefaultFormat = new AudioDataFormat(AudioDataType.PCM, 44100, 16, 2, true /* signed */, true /* fixed point */, true /* littleEndian */);
+ /** Default {@link AudioFormat}, [type PCM, sampleRate 44100, sampleSize 16, channelCount 2, signed, fixedP, !planar, littleEndian]. */
+ public static final AudioFormat DefaultFormat = new AudioFormat(44100, 16, 2, true /* signed */,
+ true /* fixed point */, false /* planar */, true /* littleEndian */);
public static abstract class AudioFrame extends TimeFrameI {
protected int byteSize;
@@ -227,38 +282,54 @@ public interface AudioSink {
public boolean setVolume(float v);
/**
- * Returns the preferred {@link AudioDataFormat} by this sink.
+ * Returns the preferred {@link AudioFormat} by this sink.
* <p>
- * The preferred format shall reflect this sinks most native format,
+ * The preferred format is guaranteed to be supported
+ * and shall reflect this sinks most native format,
* i.e. best performance w/o data conversion.
* </p>
- * @see #initSink(AudioDataFormat)
+ * <p>
+ * Known {@link #AudioFormat} attributes considered by implementations:
+ * <ul>
+ * <li>ALAudioSink: {@link AudioFormat#sampleRate}.
+ * </ul>
+ * </p>
+ * @see #initSink(AudioFormat)
+ * @see #isSupported(AudioFormat)
+ */
+ public AudioFormat getPreferredFormat();
+
+ /** Return the maximum number of supported channels. */
+ public int getMaxSupportedChannels();
+
+ /**
+ * Returns true if the given format is supported by the sink, otherwise false.
+ * @see #initSink(AudioFormat)
+ * @see #getPreferredFormat()
*/
- public AudioDataFormat getPreferredFormat();
+ public boolean isSupported(AudioFormat format);
/**
* Initializes the sink.
* <p>
- * Implementation shall try to match the given <code>requestedFormat</code> {@link AudioDataFormat}
- * as close as possible, regarding it's capabilities.
+ * Implementation must match the given <code>requestedFormat</code> {@link AudioFormat}.
* </p>
* <p>
- * A user may consider {@link #getPreferredFormat()} and pass this value
- * to utilize best performance and <i>behavior</i>.
- * </p>
- * The {@link #DefaultFormat} <i>should be</i> supported by all implementations.
+ * Caller shall validate <code>requestedFormat</code> via {@link #isSupported(AudioFormat)}
+ * beforehand and try to find a suitable supported one.
+ * {@link #getPreferredFormat()} and {@link #getMaxSupportedChannels()} may help.
* </p>
- * @param requestedFormat the requested {@link AudioDataFormat}.
+ * @param requestedFormat the requested {@link AudioFormat}.
* @param frameDuration average or fixed frame duration in milliseconds
* helping a caching {@link AudioFrame} based implementation to determine the frame count in the queue.
* See {@link #DefaultFrameDuration}.
* @param initialQueueSize initial time in milliseconds to queue in this sink, see {@link #DefaultInitialQueueSize}.
* @param queueGrowAmount time in milliseconds to grow queue if full, see {@link #DefaultQueueGrowAmount}.
* @param queueLimit maximum time in milliseconds the queue can hold (and grow), see {@link #DefaultQueueLimitWithVideo} and {@link #DefaultQueueLimitAudioOnly}.
- * @return if successful the chosen AudioDataFormat based on the <code>requestedFormat</code> and this sinks capabilities, otherwise <code>null</code>.
+ * @return true if successful, otherwise false
*/
- public AudioDataFormat init(AudioDataFormat requestedFormat, float frameDuration,
- int initialQueueSize, int queueGrowAmount, int queueLimit);
+ public boolean init(AudioFormat requestedFormat, float frameDuration,
+ int initialQueueSize, int queueGrowAmount, int queueLimit);
/**
* Returns true, if {@link #play()} has been requested <i>and</i> the sink is still playing,
@@ -285,7 +356,7 @@ public interface AudioSink {
/**
* Flush all queued buffers, implies {@link #pause()}.
* <p>
- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first.
+ * {@link #init(AudioFormat, float, int, int, int)} must be called first.
* </p>
* @see #play()
* @see #pause()
@@ -298,17 +369,17 @@ public interface AudioSink {
/**
* Returns the number of allocated buffers as requested by
- * {@link #init(AudioDataFormat, float, int, int, int)}.
+ * {@link #init(AudioFormat, float, int, int, int)}.
*/
public int getFrameCount();
- /** @return the current enqueued frames count since {@link #init(AudioDataFormat, float, int, int, int)}. */
+ /** @return the current enqueued frames count since {@link #init(AudioFormat, float, int, int, int)}. */
public int getEnqueuedFrameCount();
/**
* Returns the current number of frames queued for playing.
* <p>
- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first.
+ * {@link #init(AudioFormat, float, int, int, int)} must be called first.
* </p>
*/
public int getQueuedFrameCount();
@@ -316,7 +387,7 @@ public interface AudioSink {
/**
* Returns the current number of bytes queued for playing.
* <p>
- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first.
+ * {@link #init(AudioFormat, float, int, int, int)} must be called first.
* </p>
*/
public int getQueuedByteCount();
@@ -324,7 +395,7 @@ public interface AudioSink {
/**
* Returns the current queued frame time in milliseconds for playing.
* <p>
- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first.
+ * {@link #init(AudioFormat, float, int, int, int)} must be called first.
* </p>
*/
public int getQueuedTime();
@@ -337,7 +408,7 @@ public interface AudioSink {
/**
* Returns the current number of frames in the sink available for writing.
* <p>
- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first.
+ * {@link #init(AudioFormat, float, int, int, int)} must be called first.
* </p>
*/
public int getFreeFrameCount();
@@ -345,10 +416,10 @@ public interface AudioSink {
/**
* Enqueue the remaining bytes of the given {@link AudioDataFrame}'s direct ByteBuffer to this sink.
* <p>
- * The data must comply with the chosen {@link AudioDataFormat} as returned by {@link #initSink(AudioDataFormat)}.
+ * The data must comply with the chosen {@link AudioFormat} as returned by {@link #initSink(AudioFormat)}.
* </p>
* <p>
- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first.
+ * {@link #init(AudioFormat, float, int, int, int)} must be called first.
* </p>
* @returns the enqueued internal {@link AudioFrame}, which may differ from the input <code>audioDataFrame</code>.
* @deprecated User shall use {@link #enqueueData(int, ByteBuffer, int)}, which allows implementation
@@ -359,10 +430,10 @@ public interface AudioSink {
/**
* Enqueue <code>byteCount</code> bytes of the remaining bytes of the given NIO {@link ByteBuffer} to this sink.
* <p>
- * The data must comply with the chosen {@link AudioDataFormat} as returned by {@link #initSink(AudioDataFormat)}.
+ * The data must comply with the chosen {@link AudioFormat} as returned by {@link #initSink(AudioFormat)}.
* </p>
* <p>
- * {@link #init(AudioDataFormat, float, int, int, int)} must be called first.
+ * {@link #init(AudioFormat, float, int, int, int)} must be called first.
* </p>
* @returns the enqueued internal {@link AudioFrame}.
*/
diff --git a/src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java b/src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java
index d3964a2cf..3daedd52e 100644
--- a/src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java
+++ b/src/jogl/classes/jogamp/opengl/openal/av/ALAudioSink.java
@@ -39,7 +39,9 @@ import com.jogamp.openal.AL;
import com.jogamp.openal.ALC;
import com.jogamp.openal.ALCcontext;
import com.jogamp.openal.ALCdevice;
+import com.jogamp.openal.ALExt;
import com.jogamp.openal.ALFactory;
+import com.jogamp.openal.util.ALHelpers;
import com.jogamp.opengl.util.av.AudioSink;
/***
@@ -47,12 +49,16 @@ import com.jogamp.opengl.util.av.AudioSink;
*/
public class ALAudioSink implements AudioSink {
+ private static final String AL_SOFT_buffer_samples = "AL_SOFT_buffer_samples";
private static final ALC alc;
private static final AL al;
+ private static final ALExt alExt;
private static final boolean staticAvailable;
private String deviceSpecifier;
private ALCdevice device;
+ private boolean hasSOFTBufferSamples;
+ private AudioFormat preferredAudioFormat;
private ALCcontext context;
private final RecursiveLock lock = LockFactory.createRecursiveLock();
@@ -91,8 +97,10 @@ public class ALAudioSink implements AudioSink {
private volatile int enqueuedFrameCount;
private int[] alSource = null;
- private AudioDataFormat chosenFormat;
- private int alFormat;
+ private AudioFormat chosenFormat;
+ private int alChannelLayout;
+ private int alSampleType;
+ private int alFormat;
private boolean initialized;
private volatile boolean playRequested = false;
@@ -100,9 +108,11 @@ public class ALAudioSink implements AudioSink {
static {
ALC _alc = null;
AL _al = null;
+ ALExt _alExt = null;
try {
_alc = ALFactory.getALC();
_al = ALFactory.getAL();
+ _alExt = ALFactory.getALExt();
} catch(Throwable t) {
if( DEBUG ) {
System.err.println("ALAudioSink: Catched "+t.getClass().getName()+": "+t.getMessage());
@@ -111,7 +121,8 @@ public class ALAudioSink implements AudioSink {
}
alc = _alc;
al = _al;
- staticAvailable = null != alc && null != al;
+ alExt = _alExt;
+ staticAvailable = null != alc && null != al && null != alExt;
}
public ALAudioSink() {
@@ -138,7 +149,7 @@ public class ALAudioSink implements AudioSink {
// Create audio context.
context = alc.alcCreateContext(device, null);
if (context == null) {
- throw new RuntimeException("ALAudioSink: Error creating OpenAL context");
+ throw new RuntimeException("ALAudioSink: Error creating OpenAL context for "+deviceSpecifier);
}
lockContext();
@@ -148,6 +159,16 @@ public class ALAudioSink implements AudioSink {
throw new RuntimeException("ALAudioSink: Error making OpenAL context current");
}
+ hasSOFTBufferSamples = al.alIsExtensionPresent(AL_SOFT_buffer_samples);
+ preferredAudioFormat = queryPreferredAudioFormat();
+ if( DEBUG | true ) {
+ System.out.println("ALAudioSink: OpenAL Extensions:"+al.alGetString(AL.AL_EXTENSIONS));
+ System.out.println("ALAudioSink: Null device OpenAL Extensions:"+alc.alcGetString(null, ALC.ALC_EXTENSIONS));
+ System.out.println("ALAudioSink: Device "+deviceSpecifier+" OpenAL Extensions:"+alc.alcGetString(device, ALC.ALC_EXTENSIONS));
+ System.out.println("ALAudioSink: hasSOFTBufferSamples "+hasSOFTBufferSamples);
+ System.out.println("ALAudioSink: preferredAudioFormat "+preferredAudioFormat);
+ }
+
// Create source
{
alSource = new int[1];
@@ -175,6 +196,16 @@ public class ALAudioSink implements AudioSink {
}
}
+ private final AudioFormat queryPreferredAudioFormat() {
+ int sampleRate = DefaultFormat.sampleRate;
+ final int[] value = new int[1];
+ alc.alcGetIntegerv(device, ALC.ALC_FREQUENCY, 1, value, 0);
+ if ( alc.alcGetError(device) == ALC.ALC_NO_ERROR ) {
+ sampleRate = value[0];
+ }
+ return new AudioFormat(sampleRate, DefaultFormat.sampleSize, DefaultFormat.channelCount, DefaultFormat.signed, DefaultFormat.fixedP, DefaultFormat.planar, DefaultFormat.littleEndian);
+ }
+
private final void lockContext() {
lock.lock();
alc.alcMakeContextCurrent(context);
@@ -212,53 +243,78 @@ public class ALAudioSink implements AudioSink {
final int alBuffersLen = null != alBufferNames ? alBufferNames.length : 0;
final int ctxHash = context != null ? context.hashCode() : 0;
return "ALAudioSink[init "+initialized+", playRequested "+playRequested+", device "+deviceSpecifier+", ctx "+toHexString(ctxHash)+", alSource "+alSrcName+
- ", chosen "+chosenFormat+", alFormat "+toHexString(alFormat)+
- ", playSpeed "+playSpeed+", buffers[total "+alBuffersLen+", avail "+alFramesAvail.size()+", "+
+ ", chosen "+chosenFormat+
+ ", al[chan "+ALHelpers.alChannelLayoutName(alChannelLayout)+", type "+ALHelpers.alSampleTypeName(alSampleType)+
+ ", fmt "+toHexString(alFormat)+", soft "+hasSOFTBufferSamples+
+ "], playSpeed "+playSpeed+", buffers[total "+alBuffersLen+", avail "+alFramesAvail.size()+", "+
"queued["+alFramesPlaying.size()+", apts "+getPTS()+", "+getQueuedTime() + " ms, " + alBufferBytesQueued+" bytes], "+
"queue[g "+frameGrowAmount+", l "+frameLimit+"]";
}
+
public final String getPerfString() {
final int alBuffersLen = null != alBufferNames ? alBufferNames.length : 0;
return "Play [buffer "+alFramesPlaying.size()+"/"+alBuffersLen+", apts "+getPTS()+", "+getQueuedTime() + " ms, " + alBufferBytesQueued+" bytes]";
}
@Override
- public final AudioDataFormat getPreferredFormat() {
- return DefaultFormat;
+ public final AudioFormat getPreferredFormat() {
+ if( !staticAvailable ) {
+ return null;
+ }
+ return preferredAudioFormat;
+ }
+
+ @Override
+ public final int getMaxSupportedChannels() {
+ if( !staticAvailable ) {
+ return 0;
+ }
+ return hasSOFTBufferSamples ? 8 : 2;
}
@Override
- public final AudioDataFormat init(AudioDataFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) {
+ public final boolean isSupported(AudioFormat format) {
if( !staticAvailable ) {
- return null;
+ return false;
}
- if( !requestedFormat.fixedP ||
- !requestedFormat.littleEndian ||
- ( 1 != requestedFormat.channelCount && requestedFormat.channelCount != 2 ) ||
- ( 8 != requestedFormat.sampleSize && requestedFormat.sampleSize != 16 )
- ) {
- return null; // not supported w/ OpenAL
- }
- // final float samplePeriod = 1.0f / requestedFormat.sampleRate;
- switch( requestedFormat.channelCount ) {
- case 1: {
- switch ( requestedFormat.sampleSize ) {
- case 8:
- alFormat = AL.AL_FORMAT_MONO8; break;
- case 16:
- alFormat = AL.AL_FORMAT_MONO16; break;
- }
- } break;
- case 2:
- switch ( requestedFormat.sampleSize ) {
- case 8:
- alFormat = AL.AL_FORMAT_STEREO8; break;
- case 16:
- alFormat = AL.AL_FORMAT_STEREO16; break;
+ if( format.planar || !format.littleEndian ) {
+ // FIXME big-endian supported w/ SOFT where it's native format!
+ return false;
+ }
+ final int alChannelLayout = ALHelpers.getDefaultALChannelLayout(format.channelCount);
+ if( AL.AL_NONE != alChannelLayout ) {
+ final int alSampleType = ALHelpers.getALSampleType(format.sampleSize, format.signed, format.fixedP);
+ if( AL.AL_NONE != alSampleType ) {
+ lockContext();
+ try {
+ final int alFormat = ALHelpers.getALFormat(alChannelLayout, alSampleType, hasSOFTBufferSamples, al, alExt);
+ return AL.AL_NONE != alFormat;
+ } finally {
+ unlockContext();
}
+ }
}
+ return false;
+ }
+
+ @Override
+ public final boolean init(AudioFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) {
+ if( !staticAvailable ) {
+ return false;
+ }
+ alChannelLayout = ALHelpers.getDefaultALChannelLayout(requestedFormat.channelCount);
+ alSampleType = ALHelpers.getALSampleType(requestedFormat.sampleSize, requestedFormat.signed, requestedFormat.fixedP);
lockContext();
try {
+ if( AL.AL_NONE != alChannelLayout && AL.AL_NONE != alSampleType ) {
+ alFormat = ALHelpers.getALFormat(alChannelLayout, alSampleType, hasSOFTBufferSamples, al, alExt);
+ } else {
+ alFormat = AL.AL_NONE;
+ }
+ if( AL.AL_NONE == alFormat ) {
+ // not supported
+ return false;
+ }
// Allocate buffers
destroyBuffers();
{
@@ -290,7 +346,7 @@ public class ALAudioSink implements AudioSink {
}
chosenFormat = requestedFormat;
- return chosenFormat;
+ return true;
}
private static int[] concat(int[] first, int[] second) {
@@ -548,7 +604,15 @@ public class ALAudioSink implements AudioSink {
if( !alFramesPlaying.put( alFrame ) ) {
throw new InternalError("Internal Error: "+this);
}
- al.alBufferData(alFrame.alBuffer, alFormat, bytes, byteCount, chosenFormat.sampleRate);
+ if( hasSOFTBufferSamples ) {
+ final int samplesPerChannel = chosenFormat.getBytesSampleCount(byteCount) / chosenFormat.channelCount;
+ // final int samplesPerChannel = ALHelpers.bytesToSampleCount(byteCount, alChannelLayout, alSampleType);
+ alExt.alBufferSamplesSOFT(alFrame.alBuffer, chosenFormat.sampleRate, alFormat,
+ samplesPerChannel, alChannelLayout, alSampleType, bytes);
+ } else {
+ al.alBufferData(alFrame.alBuffer, alFormat, bytes, byteCount, chosenFormat.sampleRate);
+ }
+
final int[] alBufferNames = new int[] { alFrame.alBuffer };
al.alSourceQueueBuffers(alSource[0], 1, alBufferNames, 0);
alErr = al.alGetError();
diff --git a/src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java b/src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java
index b1b9c1b6c..85fab96a4 100644
--- a/src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java
+++ b/src/jogl/classes/jogamp/opengl/util/av/JavaSoundAudioSink.java
@@ -33,7 +33,7 @@ public class JavaSoundAudioSink implements AudioSink {
private int bufferCount;
private byte [] sampleData = new byte[BUFFER_SIZE];
private boolean initialized = false;
- private AudioDataFormat chosenFormat = null;
+ private AudioSink.AudioFormat chosenFormat = null;
private volatile boolean playRequested = false;
private float volume = 1.0f;
@@ -77,14 +77,24 @@ public class JavaSoundAudioSink implements AudioSink {
}
@Override
- public AudioDataFormat getPreferredFormat() {
+ public AudioSink.AudioFormat getPreferredFormat() {
return DefaultFormat;
}
@Override
- public AudioDataFormat init(AudioDataFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) {
+ public final int getMaxSupportedChannels() {
+ return 2;
+ }
+
+ @Override
+ public final boolean isSupported(AudioSink.AudioFormat format) {
+ return true;
+ }
+
+ @Override
+ public boolean init(AudioSink.AudioFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) {
if( !staticAvailable ) {
- return null;
+ return false;
}
// Create the audio format we wish to use
format = new javax.sound.sampled.AudioFormat(requestedFormat.sampleRate, requestedFormat.sampleSize, requestedFormat.channelCount, requestedFormat.signed, !requestedFormat.littleEndian);
@@ -105,7 +115,7 @@ public class JavaSoundAudioSink implements AudioSink {
} catch (Exception e) {
initialized=false;
}
- return chosenFormat;
+ return true;
}
@Override
diff --git a/src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java b/src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java
index 83799b074..723bb9dd1 100644
--- a/src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java
+++ b/src/jogl/classes/jogamp/opengl/util/av/NullAudioSink.java
@@ -42,13 +42,23 @@ public class NullAudioSink implements AudioSink {
}
@Override
- public AudioDataFormat getPreferredFormat() {
+ public AudioFormat getPreferredFormat() {
return DefaultFormat;
}
@Override
- public AudioDataFormat init(AudioDataFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) {
- return requestedFormat;
+ public final int getMaxSupportedChannels() {
+ return 8;
+ }
+
+ @Override
+ public final boolean isSupported(AudioFormat format) {
+ return true;
+ }
+
+ @Override
+ public boolean init(AudioFormat requestedFormat, float frameDuration, int initialQueueSize, int queueGrowAmount, int queueLimit) {
+ return true;
}
@Override