/** * Copyright 2013 JogAmp Community. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY JogAmp Community ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JogAmp Community OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those of the * authors and should not be interpreted as representing official policies, either expressed * or implied, of JogAmp Community. */ package com.jogamp.opengl.util.av; import java.nio.ByteBuffer; import com.jogamp.opengl.util.TimeFrameI; import jogamp.opengl.Debug; public interface AudioSink { public static final boolean DEBUG = Debug.debug("AudioSink"); /** Default frame duration in millisecond, i.e. 1 frame per {@value} ms. */ public static final int DefaultFrameDuration = 32; /** Initial audio queue size in milliseconds. {@value} ms, i.e. 16 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/ public static final int DefaultInitialQueueSize = 16 * 32; // 512 ms /** Audio queue grow size in milliseconds. {@value} ms, i.e. 16 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/ public static final int DefaultQueueGrowAmount = 16 * 32; // 512 ms /** Audio queue limit w/ video in milliseconds. {@value} ms, i.e. 96 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/ public static final int DefaultQueueLimitWithVideo = 96 * 32; // 3072 ms /** Audio queue limit w/o video in milliseconds. {@value} ms, i.e. 32 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/ public static final int DefaultQueueLimitAudioOnly = 32 * 32; // 1024 ms /** * Specifies the linear audio PCM format. */ public static class AudioFormat { /** * @param sampleRate sample rate in Hz (1/s) * @param sampleSize sample size in bits * @param channelCount number of channels * @param signed true if signed number, false for unsigned * @param fixedP true for fixed point value, false for unsigned floating point value with a sampleSize of 32 (float) or 64 (double) * @param planar true for planar data package (each channel in own data buffer), false for packed data channels interleaved in one buffer. * @param littleEndian true for little-endian, false for big endian */ public AudioFormat(int sampleRate, int sampleSize, int channelCount, boolean signed, boolean fixedP, boolean planar, boolean littleEndian) { this.sampleRate = sampleRate; this.sampleSize = sampleSize; this.channelCount = channelCount; this.signed = signed; this.fixedP = fixedP; this.planar = planar; this.littleEndian = littleEndian; if( !fixedP ) { if( sampleSize != 32 && sampleSize != 64 ) { throw new IllegalArgumentException("Floating point: sampleSize "+sampleSize+" bits"); } if( !signed ) { throw new IllegalArgumentException("Floating point: unsigned"); } } } /** Sample rate in Hz (1/s). */ public final int sampleRate; /** Sample size in bits. */ public final int sampleSize; /** Number of channels. */ public final int channelCount; public final boolean signed; /** Fixed or floating point values. Floating point 'float' has {@link #sampleSize} 32, 'double' has {@link #sampleSize} 64. */ public final boolean fixedP; /** Planar or packed samples. If planar, each channel has their own data buffer. If packed, channel data is interleaved in one buffer. */ public final boolean planar; public final boolean littleEndian; // // Time <-> Bytes // /** * Returns the byte size of the given milliseconds * according to {@link #sampleSize}, {@link #channelCount} and {@link #sampleRate}. *
* Time -> Byte Count *
*/ public final int getDurationsByteSize(int millisecs) { final int bytesPerSample = sampleSize >>> 3; // /8 return millisecs * ( channelCount * bytesPerSample * ( sampleRate / 1000 ) ); } /** * Returns the duration in milliseconds of the given byte count * according to {@link #sampleSize}, {@link #channelCount} and {@link #sampleRate}. ** Byte Count -> Time *
*/ public final int getBytesDuration(int byteCount) { final int bytesPerSample = sampleSize >>> 3; // /8 return byteCount / ( channelCount * bytesPerSample * ( sampleRate / 1000 ) ); } /** * Returns the duration in milliseconds of the given sample count per frame and channel * according to the {@link #sampleRate}, i.e. ** ( 1000f * sampleCount ) / sampleRate **
* Sample Count -> Time *
* @param sampleCount sample count per frame and channel */ public final float getSamplesDuration(int sampleCount) { return ( 1000f * (float) sampleCount ) / (float)sampleRate; } /** * Returns the rounded frame count of the given milliseconds and frame duration. ** Math.max( 1, millisecs / frameDuration + 0.5f ) **
* Note: frameDuration
can be derived by sample count per frame and channel
* via {@link #getSamplesDuration(int)}.
*
* Frame Time -> Frame Count *
* @param millisecs time in milliseconds * @param frameDuration duration per frame in milliseconds. */ public final int getFrameCount(int millisecs, float frameDuration) { return Math.max(1, (int) ( (float)millisecs / frameDuration + 0.5f )); } /** * Returns the byte size of given sample count * according to the {@link #sampleSize}, i.e.: ** sampleCount * ( sampleSize / 8 ) **
* Note: To retrieve the byte size for all channels,
* you need to pre-multiply sampleCount
with {@link #channelCount}.
*
* Sample Count -> Byte Count *
* @param sampleCount sample count */ public final int getSamplesByteCount(int sampleCount) { return sampleCount * ( sampleSize >>> 3 ); } /** * Returns the sample count of given byte count * according to the {@link #sampleSize}, i.e.: ** ( byteCount * 8 ) / sampleSize **
* Note: If byteCount
covers all channels and you request the sample size per channel,
* you need to divide the result by sampleCount
by {@link #channelCount}.
*
* Byte Count -> Sample Count *
* @param sampleCount sample count */ public final int getBytesSampleCount(int byteCount) { return ( byteCount << 3 ) / sampleSize; } @Override public String toString() { return "AudioDataFormat[sampleRate "+sampleRate+", sampleSize "+sampleSize+", channelCount "+channelCount+ ", signed "+signed+", fixedP "+fixedP+", "+(planar?"planar":"packed")+", "+(littleEndian?"little":"big")+"-endian]"; } } /** Default {@link AudioFormat}, [type PCM, sampleRate 44100, sampleSize 16, channelCount 2, signed, fixedP, !planar, littleEndian]. */ public static final AudioFormat DefaultFormat = new AudioFormat(44100, 16, 2, true /* signed */, true /* fixed point */, false /* planar */, true /* littleEndian */); public static abstract class AudioFrame extends TimeFrameI { protected int byteSize; public AudioFrame() { this.byteSize = 0; } public AudioFrame(int pts, int duration, int byteCount) { super(pts, duration); this.byteSize=byteCount; } /** Get this frame's size in bytes. */ public final int getByteSize() { return byteSize; } /** Set this frame's size in bytes. */ public final void setByteSize(int size) { this.byteSize=size; } @Override public String toString() { return "AudioFrame[pts " + pts + " ms, l " + duration + " ms, "+byteSize + " bytes]"; } } public static class AudioDataFrame extends AudioFrame { protected final ByteBuffer data; public AudioDataFrame(int pts, int duration, ByteBuffer bytes, int byteCount) { super(pts, duration, byteCount); if( byteCount > bytes.remaining() ) { throw new IllegalArgumentException("Give size "+byteCount+" exceeds remaining bytes in ls "+bytes+". "+this); } this.data=bytes; } /** Get this frame's data. */ public final ByteBuffer getData() { return data; } @Override public String toString() { return "AudioDataFrame[pts " + pts + " ms, l " + duration + " ms, "+byteSize + " bytes, " + data + "]"; } } /** * Returns theinitialized state
of this instance.
*
* The initialized state
is affected by this instance
* overall availability, i.e. after instantiation,
* as well as by {@link #destroy()}.
*
* To simplify test, play speed is normalized, i.e. *
1.0f
: if Math.abs(1.0f - rate) < 0.01f
* To simplify test, volume is normalized, i.e. *
0.0f
: if Math.abs(v) < 0.01f
1.0f
: if Math.abs(1.0f - v) < 0.01f
* The preferred format is guaranteed to be supported * and shall reflect this sinks most native format, * i.e. best performance w/o data conversion. *
** Known {@link #AudioFormat} attributes considered by implementations: *
* Implementation must match the given requestedFormat
{@link AudioFormat}.
*
* Caller shall validate requestedFormat
via {@link #isSupported(AudioFormat)}
* beforehand and try to find a suitable supported one.
* {@link #getPreferredFormat()} and {@link #getMaxSupportedChannels()} may help.
*
* {@link #init(AudioFormat, float, int, int, int)} must be called first. *
* @see #play() * @see #pause() * @see #enqueueData(AudioFrame) */ public void flush(); /** Destroys this instance, i.e. closes all streams and devices allocated. */ public void destroy(); /** * Returns the number of allocated buffers as requested by * {@link #init(AudioFormat, float, int, int, int)}. */ public int getFrameCount(); /** @return the current enqueued frames count since {@link #init(AudioFormat, float, int, int, int)}. */ public int getEnqueuedFrameCount(); /** * Returns the current number of frames queued for playing. ** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedFrameCount(); /** * Returns the current number of bytes queued for playing. ** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedByteCount(); /** * Returns the current queued frame time in milliseconds for playing. ** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedTime(); /** * Return the current audio presentation timestamp (PTS) in milliseconds. */ public int getPTS(); /** * Returns the current number of frames in the sink available for writing. ** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getFreeFrameCount(); /** * Enqueue the remaining bytes of the given {@link AudioDataFrame}'s direct ByteBuffer to this sink. ** The data must comply with the chosen {@link AudioFormat} as returned by {@link #initSink(AudioFormat)}. *
** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
* @returns the enqueued internal {@link AudioFrame}, which may differ from the inputaudioDataFrame
.
* @deprecated User shall use {@link #enqueueData(int, ByteBuffer, int)}, which allows implementation
* to reuse specialized {@link AudioFrame} instances.
*/
public AudioFrame enqueueData(AudioDataFrame audioDataFrame);
/**
* Enqueue byteCount
bytes of the remaining bytes of the given NIO {@link ByteBuffer} to this sink.
* * The data must comply with the chosen {@link AudioFormat} as returned by {@link #initSink(AudioFormat)}. *
** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
* @returns the enqueued internal {@link AudioFrame}. */ public AudioFrame enqueueData(int pts, ByteBuffer bytes, int byteCount); }