/**
* Copyright 2013-2023 JogAmp Community. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY JogAmp Community ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JogAmp Community OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are those of the
* authors and should not be interpreted as representing official policies, either expressed
* or implied, of JogAmp Community.
*/
package com.jogamp.common.av;
import java.nio.ByteBuffer;
import jogamp.common.Debug;
public interface AudioSink {
public static final boolean DEBUG = Debug.debug("AudioSink");
/** Default frame duration in millisecond, i.e. 1 frame per {@value} ms. */
public static final int DefaultFrameDuration = 32;
/** Initial audio queue size in milliseconds. {@value} ms, i.e. 16 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/
public static final int DefaultInitialQueueSize = 16 * 32; // 512 ms
/** Audio queue grow size in milliseconds. {@value} ms, i.e. 16 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/
public static final int DefaultQueueGrowAmount = 16 * 32; // 512 ms
/** Audio queue limit w/ video in milliseconds. {@value} ms, i.e. 96 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/
public static final int DefaultQueueLimitWithVideo = 96 * 32; // 3072 ms
/** Audio queue limit w/o video in milliseconds. {@value} ms, i.e. 32 frames per 32 ms. See {@link #init(AudioFormat, float, int, int, int)}.*/
public static final int DefaultQueueLimitAudioOnly = 32 * 32; // 1024 ms
/** Default {@link AudioFormat}, [type PCM, sampleRate 44100, sampleSize 16, channelCount 2, signed, fixedP, !planar, littleEndian]. */
public static final AudioFormat DefaultFormat = new AudioFormat(44100, 16, 2, true /* signed */,
true /* fixed point */, false /* planar */, true /* littleEndian */);
public static abstract class AudioFrame extends TimeFrameI {
protected int byteSize;
public AudioFrame() {
this.byteSize = 0;
}
public AudioFrame(final int pts, final int duration, final int byteCount) {
super(pts, duration);
this.byteSize=byteCount;
}
/** Get this frame's size in bytes. */
public final int getByteSize() { return byteSize; }
/** Set this frame's size in bytes. */
public final void setByteSize(final int size) { this.byteSize=size; }
@Override
public String toString() {
return "AudioFrame[pts " + pts + " ms, l " + duration + " ms, "+byteSize + " bytes]";
}
}
public static class AudioDataFrame extends AudioFrame {
protected final ByteBuffer data;
public AudioDataFrame(final int pts, final int duration, final ByteBuffer bytes, final int byteCount) {
super(pts, duration, byteCount);
if( byteCount > bytes.remaining() ) {
throw new IllegalArgumentException("Give size "+byteCount+" exceeds remaining bytes in ls "+bytes+". "+this);
}
this.data=bytes;
}
/** Get this frame's data. */
public final ByteBuffer getData() { return data; }
@Override
public String toString() {
return "AudioDataFrame[pts " + pts + " ms, l " + duration + " ms, "+byteSize + " bytes, " + data + "]";
}
}
/**
* Exclusively locks this instance for the calling thread, if implementation utilizes locking.
* @see #unlockExclusive()
*/
public void lockExclusive();
/**
* Releases the exclusive lock for the calling thread, if implementation utilizes locking.
* @see #lockExclusive()
*/
public void unlockExclusive();
/**
* Returns the available state
of this instance.
*
* The available state
is affected by this instance
* overall availability, i.e. after instantiation,
* as well as by {@link #destroy()}.
*
* To simplify test, play speed is normalized, i.e. *
1.0f
: if Math.abs(1.0f - rate) < 0.01f
* To simplify test, volume is normalized, i.e. *
0.0f
: if Math.abs(v) < 0.01f
1.0f
: if Math.abs(1.0f - v) < 0.01f
* The preferred sample-rate is guaranteed to be supported * and shall reflect this sinks most native format, * i.e. best performance w/o data conversion. *
* @see #initSink(AudioFormat) * @see #isSupported(AudioFormat) * @see #getPreferredSampleRate() */ public int getPreferredSampleRate(); /** * Returns the preferred {@link AudioFormat} by this sink. ** The preferred format is guaranteed to be supported * and shall reflect this sinks most native format, * i.e. best performance w/o data conversion. *
** Known {@link #AudioFormat} attributes considered by implementations: *
* Implementation must match the given requestedFormat
{@link AudioFormat}.
*
* Caller shall validate requestedFormat
via {@link #isSupported(AudioFormat)}
* beforehand and try to find a suitable supported one.
* {@link #getPreferredFormat()} and {@link #getMaxSupportedChannels()} may help.
*
* {@link #init(AudioFormat, float, int, int, int)} must be called first. *
* @see #play() * @see #pause() * @see #enqueueData(AudioFrame) */ public void flush(); /** Destroys this instance, i.e. closes all streams and devices allocated. */ public void destroy(); /** * Returns the number of allocated buffers as requested by * {@link #init(AudioFormat, float, int, int, int)}. */ public int getFrameCount(); /** @return the current enqueued frames count since {@link #init(AudioFormat, float, int, int, int)}. */ public int getEnqueuedFrameCount(); /** * Returns the current number of frames queued for playing. ** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedFrameCount(); /** * Returns the current number of bytes queued for playing. ** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedByteCount(); /** * Returns the current queued frame time in milliseconds for playing. ** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getQueuedTime(); /** * Return the current audio presentation timestamp (PTS) in milliseconds. */ public int getPTS(); /** * Returns the current number of frames in the sink available for writing. ** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
*/ public int getFreeFrameCount(); /** * EnqueuebyteCount
bytes of the remaining bytes of the given NIO {@link ByteBuffer} to this sink.
* * The data must comply with the chosen {@link AudioFormat} as returned by {@link #initSink(AudioFormat)}. *
** {@link #init(AudioFormat, float, int, int, int)} must be called first. *
* @returns the enqueued internal {@link AudioFrame}. */ public AudioFrame enqueueData(int pts, ByteBuffer bytes, int byteCount); }