aboutsummaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
authorSven Gothel <[email protected]>2019-04-07 23:39:04 +0200
committerSven Gothel <[email protected]>2019-04-07 23:39:04 +0200
commit73233ce69919fc19c53ce8663c5b8cc05227f07e (patch)
treef2b6ccc1a14d7c387f33398a44ea4511d7ecb212 /examples
parent8efa4c7ba5ee8eb399d31a9884e45f743d4625ad (diff)
parent99a55c445211fea77af6ab61cbc6a6ec4fbdc9b9 (diff)
Merge branch 'v1.19' of git://repo.or.cz/openal-soft into v1.19v1.19
Diffstat (limited to 'examples')
-rw-r--r--examples/alffplay.c1533
-rw-r--r--examples/alffplay.cpp1915
-rw-r--r--examples/alhrtf.c154
-rw-r--r--examples/allatency.c100
-rw-r--r--examples/alloopback.c40
-rw-r--r--examples/almultireverb.c696
-rw-r--r--examples/alplay.c177
-rw-r--r--examples/alrecord.c394
-rw-r--r--examples/alreverb.c104
-rw-r--r--examples/alstream.c134
-rw-r--r--examples/altonegen.c99
-rw-r--r--examples/common/alhelpers.c273
-rw-r--r--examples/common/alhelpers.h36
-rw-r--r--examples/common/sdl_sound.c164
-rw-r--r--examples/common/sdl_sound.h43
15 files changed, 3614 insertions, 2248 deletions
diff --git a/examples/alffplay.c b/examples/alffplay.c
deleted file mode 100644
index 17f6d3bc..00000000
--- a/examples/alffplay.c
+++ /dev/null
@@ -1,1533 +0,0 @@
-/*
- * alffplay.c
- *
- * A pedagogical video player that really works! Now with seeking features.
- *
- * Code based on FFplay, Copyright (c) 2003 Fabrice Bellard, and a tutorial by
- * Martin Bohme <[email protected]>.
- *
- * Requires C99.
- */
-
-#include <stdio.h>
-#include <math.h>
-
-#include <libavcodec/avcodec.h>
-#include <libavformat/avformat.h>
-#include <libavformat/avio.h>
-#include <libavutil/time.h>
-#include <libavutil/avstring.h>
-#include <libavutil/channel_layout.h>
-#include <libswscale/swscale.h>
-#include <libswresample/swresample.h>
-
-#include <SDL.h>
-#include <SDL_thread.h>
-#include <SDL_video.h>
-
-#include "threads.h"
-#include "bool.h"
-
-#include "AL/al.h"
-#include "AL/alc.h"
-#include "AL/alext.h"
-
-
-static bool has_latency_check = false;
-static LPALGETSOURCEDVSOFT alGetSourcedvSOFT;
-
-#define AUDIO_BUFFER_TIME 100 /* In milliseconds, per-buffer */
-#define AUDIO_BUFFER_QUEUE_SIZE 8 /* Number of buffers to queue */
-#define MAX_AUDIOQ_SIZE (5 * 16 * 1024) /* Bytes of compressed audio data to keep queued */
-#define MAX_VIDEOQ_SIZE (5 * 256 * 1024) /* Bytes of compressed video data to keep queued */
-#define AV_SYNC_THRESHOLD 0.01
-#define AV_NOSYNC_THRESHOLD 10.0
-#define SAMPLE_CORRECTION_MAX_DIFF 0.1
-#define AUDIO_DIFF_AVG_NB 20
-#define VIDEO_PICTURE_QUEUE_SIZE 16
-
-enum {
- FF_UPDATE_EVENT = SDL_USEREVENT,
- FF_REFRESH_EVENT,
- FF_QUIT_EVENT
-};
-
-
-typedef struct PacketQueue {
- AVPacketList *first_pkt, *last_pkt;
- volatile int nb_packets;
- volatile int size;
- volatile bool flushing;
- almtx_t mutex;
- alcnd_t cond;
-} PacketQueue;
-
-typedef struct VideoPicture {
- SDL_Texture *bmp;
- int width, height; /* Logical image size (actual size may be larger) */
- volatile bool updated;
- double pts;
-} VideoPicture;
-
-typedef struct AudioState {
- AVStream *st;
-
- PacketQueue q;
- AVPacket pkt;
-
- /* Used for clock difference average computation */
- double diff_accum;
- double diff_avg_coef;
- double diff_threshold;
-
- /* Time (in seconds) of the next sample to be buffered */
- double current_pts;
-
- /* Decompressed sample frame, and swresample context for conversion */
- AVFrame *decoded_aframe;
- struct SwrContext *swres_ctx;
-
- /* Conversion format, for what gets fed to OpenAL */
- int dst_ch_layout;
- enum AVSampleFormat dst_sample_fmt;
-
- /* Storage of converted samples */
- uint8_t *samples;
- ssize_t samples_len; /* In samples */
- ssize_t samples_pos;
- int samples_max;
-
- /* OpenAL format */
- ALenum format;
- ALint frame_size;
-
- ALuint source;
- ALuint buffer[AUDIO_BUFFER_QUEUE_SIZE];
- ALuint buffer_idx;
- almtx_t src_mutex;
-
- althrd_t thread;
-} AudioState;
-
-typedef struct VideoState {
- AVStream *st;
-
- PacketQueue q;
-
- double clock;
- double frame_timer;
- double frame_last_pts;
- double frame_last_delay;
- double current_pts;
- /* time (av_gettime) at which we updated current_pts - used to have running video pts */
- int64_t current_pts_time;
-
- /* Decompressed video frame, and swscale context for conversion */
- AVFrame *decoded_vframe;
- struct SwsContext *swscale_ctx;
-
- VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
- int pictq_size, pictq_rindex, pictq_windex;
- almtx_t pictq_mutex;
- alcnd_t pictq_cond;
-
- althrd_t thread;
-} VideoState;
-
-typedef struct MovieState {
- AVFormatContext *pFormatCtx;
- int videoStream, audioStream;
-
- volatile bool seek_req;
- int64_t seek_pos;
-
- int av_sync_type;
-
- int64_t external_clock_base;
-
- AudioState audio;
- VideoState video;
-
- althrd_t parse_thread;
-
- char filename[1024];
-
- volatile bool quit;
-} MovieState;
-
-enum {
- AV_SYNC_AUDIO_MASTER,
- AV_SYNC_VIDEO_MASTER,
- AV_SYNC_EXTERNAL_MASTER,
-
- DEFAULT_AV_SYNC_TYPE = AV_SYNC_EXTERNAL_MASTER
-};
-
-static AVPacket flush_pkt = { .data = (uint8_t*)"FLUSH" };
-
-static void packet_queue_init(PacketQueue *q)
-{
- memset(q, 0, sizeof(PacketQueue));
- almtx_init(&q->mutex, almtx_plain);
- alcnd_init(&q->cond);
-}
-static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
-{
- AVPacketList *pkt1;
- if(pkt != &flush_pkt && !pkt->buf && av_dup_packet(pkt) < 0)
- return -1;
-
- pkt1 = av_malloc(sizeof(AVPacketList));
- if(!pkt1) return -1;
- pkt1->pkt = *pkt;
- pkt1->next = NULL;
-
- almtx_lock(&q->mutex);
- if(!q->last_pkt)
- q->first_pkt = pkt1;
- else
- q->last_pkt->next = pkt1;
- q->last_pkt = pkt1;
- q->nb_packets++;
- q->size += pkt1->pkt.size;
- almtx_unlock(&q->mutex);
-
- alcnd_signal(&q->cond);
- return 0;
-}
-static int packet_queue_get(PacketQueue *q, AVPacket *pkt, MovieState *state)
-{
- AVPacketList *pkt1;
- int ret = -1;
-
- almtx_lock(&q->mutex);
- while(!state->quit)
- {
- pkt1 = q->first_pkt;
- if(pkt1)
- {
- q->first_pkt = pkt1->next;
- if(!q->first_pkt)
- q->last_pkt = NULL;
- q->nb_packets--;
- q->size -= pkt1->pkt.size;
- *pkt = pkt1->pkt;
- av_free(pkt1);
- ret = 1;
- break;
- }
-
- if(q->flushing)
- {
- ret = 0;
- break;
- }
- alcnd_wait(&q->cond, &q->mutex);
- }
- almtx_unlock(&q->mutex);
- return ret;
-}
-static void packet_queue_clear(PacketQueue *q)
-{
- AVPacketList *pkt, *pkt1;
-
- almtx_lock(&q->mutex);
- for(pkt = q->first_pkt;pkt != NULL;pkt = pkt1)
- {
- pkt1 = pkt->next;
- if(pkt->pkt.data != flush_pkt.data)
- av_free_packet(&pkt->pkt);
- av_freep(&pkt);
- }
- q->last_pkt = NULL;
- q->first_pkt = NULL;
- q->nb_packets = 0;
- q->size = 0;
- almtx_unlock(&q->mutex);
-}
-static void packet_queue_flush(PacketQueue *q)
-{
- almtx_lock(&q->mutex);
- q->flushing = true;
- almtx_unlock(&q->mutex);
- alcnd_signal(&q->cond);
-}
-static void packet_queue_deinit(PacketQueue *q)
-{
- packet_queue_clear(q);
- alcnd_destroy(&q->cond);
- almtx_destroy(&q->mutex);
-}
-
-
-static double get_audio_clock(AudioState *state)
-{
- double pts;
-
- almtx_lock(&state->src_mutex);
- /* The audio clock is the timestamp of the sample currently being heard.
- * It's based on 4 components:
- * 1 - The timestamp of the next sample to buffer (state->current_pts)
- * 2 - The length of the source's buffer queue (AL_SEC_LENGTH_SOFT)
- * 3 - The offset OpenAL is currently at in the source (the first value
- * from AL_SEC_OFFSET_LATENCY_SOFT)
- * 4 - The latency between OpenAL and the DAC (the second value from
- * AL_SEC_OFFSET_LATENCY_SOFT)
- *
- * Subtracting the length of the source queue from the next sample's
- * timestamp gives the timestamp of the sample at start of the source
- * queue. Adding the source offset to that results in the timestamp for
- * OpenAL's current position, and subtracting the source latency from that
- * gives the timestamp of the sample currently at the DAC.
- */
- pts = state->current_pts;
- if(state->source)
- {
- ALdouble offset[2] = { 0.0, 0.0 };
- ALdouble queue_len = 0.0;
- ALint status;
-
- /* NOTE: The source state must be checked last, in case an underrun
- * occurs and the source stops between retrieving the offset+latency
- * and getting the state. */
- if(has_latency_check)
- {
- alGetSourcedvSOFT(state->source, AL_SEC_OFFSET_LATENCY_SOFT, offset);
- alGetSourcedvSOFT(state->source, AL_SEC_LENGTH_SOFT, &queue_len);
- }
- else
- {
- ALint ioffset, ilen;
- alGetSourcei(state->source, AL_SAMPLE_OFFSET, &ioffset);
- alGetSourcei(state->source, AL_SAMPLE_LENGTH_SOFT, &ilen);
- offset[0] = (double)ioffset / state->st->codec->sample_rate;
- queue_len = (double)ilen / state->st->codec->sample_rate;
- }
- alGetSourcei(state->source, AL_SOURCE_STATE, &status);
-
- /* If the source is AL_STOPPED, then there was an underrun and all
- * buffers are processed, so ignore the source queue. The audio thread
- * will put the source into an AL_INITIAL state and clear the queue
- * when it starts recovery. */
- if(status != AL_STOPPED)
- pts = pts - queue_len + offset[0];
- if(status == AL_PLAYING)
- pts = pts - offset[1];
- }
- almtx_unlock(&state->src_mutex);
-
- return (pts >= 0.0) ? pts : 0.0;
-}
-static double get_video_clock(VideoState *state)
-{
- double delta = (av_gettime() - state->current_pts_time) / 1000000.0;
- return state->current_pts + delta;
-}
-static double get_external_clock(MovieState *movState)
-{
- return (av_gettime()-movState->external_clock_base) / 1000000.0;
-}
-
-double get_master_clock(MovieState *movState)
-{
- if(movState->av_sync_type == AV_SYNC_VIDEO_MASTER)
- return get_video_clock(&movState->video);
- if(movState->av_sync_type == AV_SYNC_AUDIO_MASTER)
- return get_audio_clock(&movState->audio);
- return get_external_clock(movState);
-}
-
-/* Return how many samples to skip to maintain sync (negative means to
- * duplicate samples). */
-static int synchronize_audio(MovieState *movState)
-{
- double diff, avg_diff;
- double ref_clock;
-
- if(movState->av_sync_type == AV_SYNC_AUDIO_MASTER)
- return 0;
-
- ref_clock = get_master_clock(movState);
- diff = ref_clock - get_audio_clock(&movState->audio);
-
- if(!(diff < AV_NOSYNC_THRESHOLD))
- {
- /* Difference is TOO big; reset diff stuff */
- movState->audio.diff_accum = 0.0;
- return 0;
- }
-
- /* Accumulate the diffs */
- movState->audio.diff_accum = movState->audio.diff_accum*movState->audio.diff_avg_coef + diff;
- avg_diff = movState->audio.diff_accum*(1.0 - movState->audio.diff_avg_coef);
- if(fabs(avg_diff) < movState->audio.diff_threshold)
- return 0;
-
- /* Constrain the per-update difference to avoid exceedingly large skips */
- if(!(diff <= SAMPLE_CORRECTION_MAX_DIFF))
- diff = SAMPLE_CORRECTION_MAX_DIFF;
- else if(!(diff >= -SAMPLE_CORRECTION_MAX_DIFF))
- diff = -SAMPLE_CORRECTION_MAX_DIFF;
- return (int)(diff*movState->audio.st->codec->sample_rate);
-}
-
-static int audio_decode_frame(MovieState *movState)
-{
- AVPacket *pkt = &movState->audio.pkt;
-
- while(!movState->quit)
- {
- while(!movState->quit && pkt->size == 0)
- {
- av_free_packet(pkt);
-
- /* Get the next packet */
- int err;
- if((err=packet_queue_get(&movState->audio.q, pkt, movState)) <= 0)
- {
- if(err == 0)
- break;
- return err;
- }
- if(pkt->data == flush_pkt.data)
- {
- avcodec_flush_buffers(movState->audio.st->codec);
- movState->audio.diff_accum = 0.0;
- movState->audio.current_pts = av_q2d(movState->audio.st->time_base)*pkt->pts;
-
- alSourceRewind(movState->audio.source);
- alSourcei(movState->audio.source, AL_BUFFER, 0);
-
- av_new_packet(pkt, 0);
-
- return -1;
- }
-
- /* If provided, update w/ pts */
- if(pkt->pts != AV_NOPTS_VALUE)
- movState->audio.current_pts = av_q2d(movState->audio.st->time_base)*pkt->pts;
- }
-
- AVFrame *frame = movState->audio.decoded_aframe;
- int got_frame = 0;
- int len1 = avcodec_decode_audio4(movState->audio.st->codec, frame,
- &got_frame, pkt);
- if(len1 < 0) break;
-
- if(len1 <= pkt->size)
- {
- /* Move the unread data to the front and clear the end bits */
- int remaining = pkt->size - len1;
- memmove(pkt->data, &pkt->data[len1], remaining);
- av_shrink_packet(pkt, remaining);
- }
-
- if(!got_frame || frame->nb_samples <= 0)
- {
- av_frame_unref(frame);
- continue;
- }
-
- if(frame->nb_samples > movState->audio.samples_max)
- {
- av_freep(&movState->audio.samples);
- av_samples_alloc(
- &movState->audio.samples, NULL, movState->audio.st->codec->channels,
- frame->nb_samples, movState->audio.dst_sample_fmt, 0
- );
- movState->audio.samples_max = frame->nb_samples;
- }
- /* Return the amount of sample frames converted */
- int data_size = swr_convert(movState->audio.swres_ctx,
- &movState->audio.samples, frame->nb_samples,
- (const uint8_t**)frame->data, frame->nb_samples
- );
-
- av_frame_unref(frame);
- return data_size;
- }
-
- return -1;
-}
-
-static int read_audio(MovieState *movState, uint8_t *samples, int length)
-{
- int sample_skip = synchronize_audio(movState);
- int audio_size = 0;
-
- /* Read the next chunk of data, refill the buffer, and queue it
- * on the source */
- length /= movState->audio.frame_size;
- while(audio_size < length)
- {
- if(movState->audio.samples_len <= 0 || movState->audio.samples_pos >= movState->audio.samples_len)
- {
- int frame_len = audio_decode_frame(movState);
- if(frame_len < 0) return -1;
-
- movState->audio.samples_len = frame_len;
- if(movState->audio.samples_len == 0)
- break;
-
- movState->audio.samples_pos = (movState->audio.samples_len < sample_skip) ?
- movState->audio.samples_len : sample_skip;
- sample_skip -= movState->audio.samples_pos;
-
- movState->audio.current_pts += (double)movState->audio.samples_pos /
- (double)movState->audio.st->codec->sample_rate;
- continue;
- }
-
- int rem = length - audio_size;
- if(movState->audio.samples_pos >= 0)
- {
- int n = movState->audio.frame_size;
- int len = movState->audio.samples_len - movState->audio.samples_pos;
- if(rem > len) rem = len;
- memcpy(samples + audio_size*n,
- movState->audio.samples + movState->audio.samples_pos*n,
- rem*n);
- }
- else
- {
- int n = movState->audio.frame_size;
- int len = -movState->audio.samples_pos;
- if(rem > len) rem = len;
-
- /* Add samples by copying the first sample */
- if(n == 1)
- {
- uint8_t sample = ((uint8_t*)movState->audio.samples)[0];
- uint8_t *q = (uint8_t*)samples + audio_size;
- for(int i = 0;i < rem;i++)
- *(q++) = sample;
- }
- else if(n == 2)
- {
- uint16_t sample = ((uint16_t*)movState->audio.samples)[0];
- uint16_t *q = (uint16_t*)samples + audio_size;
- for(int i = 0;i < rem;i++)
- *(q++) = sample;
- }
- else if(n == 4)
- {
- uint32_t sample = ((uint32_t*)movState->audio.samples)[0];
- uint32_t *q = (uint32_t*)samples + audio_size;
- for(int i = 0;i < rem;i++)
- *(q++) = sample;
- }
- else if(n == 8)
- {
- uint64_t sample = ((uint64_t*)movState->audio.samples)[0];
- uint64_t *q = (uint64_t*)samples + audio_size;
- for(int i = 0;i < rem;i++)
- *(q++) = sample;
- }
- else
- {
- uint8_t *sample = movState->audio.samples;
- uint8_t *q = samples + audio_size*n;
- for(int i = 0;i < rem;i++)
- {
- memcpy(q, sample, n);
- q += n;
- }
- }
- }
-
- movState->audio.samples_pos += rem;
- movState->audio.current_pts += (double)rem / movState->audio.st->codec->sample_rate;
- audio_size += rem;
- }
-
- return audio_size * movState->audio.frame_size;
-}
-
-static int audio_thread(void *userdata)
-{
- MovieState *movState = (MovieState*)userdata;
- uint8_t *samples = NULL;
- ALsizei buffer_len;
- ALenum fmt;
-
- alGenBuffers(AUDIO_BUFFER_QUEUE_SIZE, movState->audio.buffer);
- alGenSources(1, &movState->audio.source);
-
- alSourcei(movState->audio.source, AL_SOURCE_RELATIVE, AL_TRUE);
- alSourcei(movState->audio.source, AL_ROLLOFF_FACTOR, 0);
-
- av_new_packet(&movState->audio.pkt, 0);
-
- /* Find a suitable format for OpenAL. */
- movState->audio.format = AL_NONE;
- if(movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_U8 ||
- movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_U8P)
- {
- movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_U8;
- movState->audio.frame_size = 1;
- if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
- alIsExtensionPresent("AL_EXT_MCFORMATS") &&
- (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
- {
- movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
- movState->audio.frame_size *= 8;
- movState->audio.format = fmt;
- }
- if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
- movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
- alIsExtensionPresent("AL_EXT_MCFORMATS") &&
- (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
- {
- movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
- movState->audio.frame_size *= 6;
- movState->audio.format = fmt;
- }
- if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
- {
- movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
- movState->audio.frame_size *= 1;
- movState->audio.format = AL_FORMAT_MONO8;
- }
- if(movState->audio.format == AL_NONE)
- {
- movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
- movState->audio.frame_size *= 2;
- movState->audio.format = AL_FORMAT_STEREO8;
- }
- }
- if((movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_FLT ||
- movState->audio.st->codec->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
- alIsExtensionPresent("AL_EXT_FLOAT32"))
- {
- movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_FLT;
- movState->audio.frame_size = 4;
- if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
- alIsExtensionPresent("AL_EXT_MCFORMATS") &&
- (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
- {
- movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
- movState->audio.frame_size *= 8;
- movState->audio.format = fmt;
- }
- if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
- movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
- alIsExtensionPresent("AL_EXT_MCFORMATS") &&
- (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
- {
- movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
- movState->audio.frame_size *= 6;
- movState->audio.format = fmt;
- }
- if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
- {
- movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
- movState->audio.frame_size *= 1;
- movState->audio.format = AL_FORMAT_MONO_FLOAT32;
- }
- if(movState->audio.format == AL_NONE)
- {
- movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
- movState->audio.frame_size *= 2;
- movState->audio.format = AL_FORMAT_STEREO_FLOAT32;
- }
- }
- if(movState->audio.format == AL_NONE)
- {
- movState->audio.dst_sample_fmt = AV_SAMPLE_FMT_S16;
- movState->audio.frame_size = 2;
- if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_7POINT1 &&
- alIsExtensionPresent("AL_EXT_MCFORMATS") &&
- (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
- {
- movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
- movState->audio.frame_size *= 8;
- movState->audio.format = fmt;
- }
- if((movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1 ||
- movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
- alIsExtensionPresent("AL_EXT_MCFORMATS") &&
- (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
- {
- movState->audio.dst_ch_layout = movState->audio.st->codec->channel_layout;
- movState->audio.frame_size *= 6;
- movState->audio.format = fmt;
- }
- if(movState->audio.st->codec->channel_layout == AV_CH_LAYOUT_MONO)
- {
- movState->audio.dst_ch_layout = AV_CH_LAYOUT_MONO;
- movState->audio.frame_size *= 1;
- movState->audio.format = AL_FORMAT_MONO16;
- }
- if(movState->audio.format == AL_NONE)
- {
- movState->audio.dst_ch_layout = AV_CH_LAYOUT_STEREO;
- movState->audio.frame_size *= 2;
- movState->audio.format = AL_FORMAT_STEREO16;
- }
- }
- buffer_len = AUDIO_BUFFER_TIME * movState->audio.st->codec->sample_rate / 1000 *
- movState->audio.frame_size;
- samples = av_malloc(buffer_len);
-
- movState->audio.samples = NULL;
- movState->audio.samples_max = 0;
- movState->audio.samples_pos = 0;
- movState->audio.samples_len = 0;
-
- if(!(movState->audio.decoded_aframe=av_frame_alloc()))
- {
- fprintf(stderr, "Failed to allocate audio frame\n");
- goto finish;
- }
-
- movState->audio.swres_ctx = swr_alloc_set_opts(NULL,
- movState->audio.dst_ch_layout,
- movState->audio.dst_sample_fmt,
- movState->audio.st->codec->sample_rate,
- movState->audio.st->codec->channel_layout ?
- movState->audio.st->codec->channel_layout :
- av_get_default_channel_layout(movState->audio.st->codec->channels),
- movState->audio.st->codec->sample_fmt,
- movState->audio.st->codec->sample_rate,
- 0, NULL
- );
- if(!movState->audio.swres_ctx || swr_init(movState->audio.swres_ctx) != 0)
- {
- fprintf(stderr, "Failed to initialize audio converter\n");
- goto finish;
- }
-
- almtx_lock(&movState->audio.src_mutex);
- while(alGetError() == AL_NO_ERROR && !movState->quit)
- {
- /* First remove any processed buffers. */
- ALint processed;
- alGetSourcei(movState->audio.source, AL_BUFFERS_PROCESSED, &processed);
- alSourceUnqueueBuffers(movState->audio.source, processed, (ALuint[AUDIO_BUFFER_QUEUE_SIZE]){});
-
- /* Refill the buffer queue. */
- ALint queued;
- alGetSourcei(movState->audio.source, AL_BUFFERS_QUEUED, &queued);
- while(queued < AUDIO_BUFFER_QUEUE_SIZE)
- {
- int audio_size;
-
- /* Read the next chunk of data, fill the buffer, and queue it on
- * the source */
- audio_size = read_audio(movState, samples, buffer_len);
- if(audio_size < 0) break;
-
- ALuint bufid = movState->audio.buffer[movState->audio.buffer_idx++];
- movState->audio.buffer_idx %= AUDIO_BUFFER_QUEUE_SIZE;
-
- alBufferData(bufid, movState->audio.format, samples, audio_size,
- movState->audio.st->codec->sample_rate);
- alSourceQueueBuffers(movState->audio.source, 1, &bufid);
- queued++;
- }
-
- /* Check that the source is playing. */
- ALint state;
- alGetSourcei(movState->audio.source, AL_SOURCE_STATE, &state);
- if(state == AL_STOPPED)
- {
- /* AL_STOPPED means there was an underrun. Double-check that all
- * processed buffers are removed, then rewind the source to get it
- * back into an AL_INITIAL state. */
- alGetSourcei(movState->audio.source, AL_BUFFERS_PROCESSED, &processed);
- alSourceUnqueueBuffers(movState->audio.source, processed, (ALuint[AUDIO_BUFFER_QUEUE_SIZE]){});
- alSourceRewind(movState->audio.source);
- continue;
- }
-
- almtx_unlock(&movState->audio.src_mutex);
-
- /* (re)start the source if needed, and wait for a buffer to finish */
- if(state != AL_PLAYING && state != AL_PAUSED)
- {
- alGetSourcei(movState->audio.source, AL_BUFFERS_QUEUED, &queued);
- if(queued > 0) alSourcePlay(movState->audio.source);
- }
- SDL_Delay(AUDIO_BUFFER_TIME);
-
- almtx_lock(&movState->audio.src_mutex);
- }
- almtx_unlock(&movState->audio.src_mutex);
-
-finish:
- av_frame_free(&movState->audio.decoded_aframe);
- swr_free(&movState->audio.swres_ctx);
-
- av_freep(&samples);
- av_freep(&movState->audio.samples);
-
- alDeleteSources(1, &movState->audio.source);
- alDeleteBuffers(AUDIO_BUFFER_QUEUE_SIZE, movState->audio.buffer);
-
- return 0;
-}
-
-
-static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
-{
- (void)interval;
-
- SDL_PushEvent(&(SDL_Event){ .user={.type=FF_REFRESH_EVENT, .data1=opaque} });
- return 0; /* 0 means stop timer */
-}
-
-/* Schedule a video refresh in 'delay' ms */
-static void schedule_refresh(MovieState *movState, int delay)
-{
- SDL_AddTimer(delay, sdl_refresh_timer_cb, movState);
-}
-
-static void video_display(MovieState *movState, SDL_Window *screen, SDL_Renderer *renderer)
-{
- VideoPicture *vp = &movState->video.pictq[movState->video.pictq_rindex];
-
- if(!vp->bmp)
- return;
-
- float aspect_ratio;
- int win_w, win_h;
- int w, h, x, y;
-
- if(movState->video.st->codec->sample_aspect_ratio.num == 0)
- aspect_ratio = 0.0f;
- else
- {
- aspect_ratio = av_q2d(movState->video.st->codec->sample_aspect_ratio) *
- movState->video.st->codec->width /
- movState->video.st->codec->height;
- }
- if(aspect_ratio <= 0.0f)
- {
- aspect_ratio = (float)movState->video.st->codec->width /
- (float)movState->video.st->codec->height;
- }
-
- SDL_GetWindowSize(screen, &win_w, &win_h);
- h = win_h;
- w = ((int)rint(h * aspect_ratio) + 3) & ~3;
- if(w > win_w)
- {
- w = win_w;
- h = ((int)rint(w / aspect_ratio) + 3) & ~3;
- }
- x = (win_w - w) / 2;
- y = (win_h - h) / 2;
-
- SDL_RenderCopy(renderer, vp->bmp,
- &(SDL_Rect){ .x=0, .y=0, .w=vp->width, .h=vp->height },
- &(SDL_Rect){ .x=x, .y=y, .w=w, .h=h }
- );
- SDL_RenderPresent(renderer);
-}
-
-static void video_refresh_timer(MovieState *movState, SDL_Window *screen, SDL_Renderer *renderer)
-{
- if(!movState->video.st)
- {
- schedule_refresh(movState, 100);
- return;
- }
-
- almtx_lock(&movState->video.pictq_mutex);
-retry:
- if(movState->video.pictq_size == 0)
- schedule_refresh(movState, 1);
- else
- {
- VideoPicture *vp = &movState->video.pictq[movState->video.pictq_rindex];
- double actual_delay, delay, sync_threshold, ref_clock, diff;
-
- movState->video.current_pts = vp->pts;
- movState->video.current_pts_time = av_gettime();
-
- delay = vp->pts - movState->video.frame_last_pts; /* the pts from last time */
- if(delay <= 0 || delay >= 1.0)
- {
- /* if incorrect delay, use previous one */
- delay = movState->video.frame_last_delay;
- }
- /* save for next time */
- movState->video.frame_last_delay = delay;
- movState->video.frame_last_pts = vp->pts;
-
- /* Update delay to sync to clock if not master source. */
- if(movState->av_sync_type != AV_SYNC_VIDEO_MASTER)
- {
- ref_clock = get_master_clock(movState);
- diff = vp->pts - ref_clock;
-
- /* Skip or repeat the frame. Take delay into account. */
- sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
- if(fabs(diff) < AV_NOSYNC_THRESHOLD)
- {
- if(diff <= -sync_threshold)
- delay = 0;
- else if(diff >= sync_threshold)
- delay = 2 * delay;
- }
- }
-
- movState->video.frame_timer += delay;
- /* Compute the REAL delay. */
- actual_delay = movState->video.frame_timer - (av_gettime() / 1000000.0);
- if(!(actual_delay >= 0.010))
- {
- /* We don't have time to handle this picture, just skip to the next one. */
- movState->video.pictq_rindex = (movState->video.pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE;
- movState->video.pictq_size--;
- alcnd_signal(&movState->video.pictq_cond);
- goto retry;
- }
- schedule_refresh(movState, (int)(actual_delay*1000.0 + 0.5));
-
- /* Show the picture! */
- video_display(movState, screen, renderer);
-
- /* Update queue for next picture. */
- movState->video.pictq_rindex = (movState->video.pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE;
- movState->video.pictq_size--;
- alcnd_signal(&movState->video.pictq_cond);
- }
- almtx_unlock(&movState->video.pictq_mutex);
-}
-
-
-static void update_picture(MovieState *movState, bool *first_update, SDL_Window *screen, SDL_Renderer *renderer)
-{
- VideoPicture *vp = &movState->video.pictq[movState->video.pictq_windex];
-
- /* allocate or resize the buffer! */
- if(!vp->bmp || vp->width != movState->video.st->codec->width ||
- vp->height != movState->video.st->codec->height)
- {
- if(vp->bmp)
- SDL_DestroyTexture(vp->bmp);
- vp->bmp = SDL_CreateTexture(
- renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING,
- movState->video.st->codec->coded_width, movState->video.st->codec->coded_height
- );
- if(!vp->bmp)
- fprintf(stderr, "Failed to create YV12 texture!\n");
- vp->width = movState->video.st->codec->width;
- vp->height = movState->video.st->codec->height;
-
- if(*first_update && vp->width > 0 && vp->height > 0)
- {
- /* For the first update, set the window size to the video size. */
- *first_update = false;
-
- int w = vp->width;
- int h = vp->height;
- if(movState->video.st->codec->sample_aspect_ratio.num != 0 &&
- movState->video.st->codec->sample_aspect_ratio.den != 0)
- {
- double aspect_ratio = av_q2d(movState->video.st->codec->sample_aspect_ratio);
- if(aspect_ratio >= 1.0)
- w = (int)(w*aspect_ratio + 0.5);
- else if(aspect_ratio > 0.0)
- h = (int)(h/aspect_ratio + 0.5);
- }
- SDL_SetWindowSize(screen, w, h);
- }
- }
-
- if(vp->bmp)
- {
- AVFrame *frame = movState->video.decoded_vframe;
- void *pixels = NULL;
- int pitch = 0;
-
- if(movState->video.st->codec->pix_fmt == PIX_FMT_YUV420P)
- SDL_UpdateYUVTexture(vp->bmp, NULL,
- frame->data[0], frame->linesize[0],
- frame->data[1], frame->linesize[1],
- frame->data[2], frame->linesize[2]
- );
- else if(SDL_LockTexture(vp->bmp, NULL, &pixels, &pitch) != 0)
- fprintf(stderr, "Failed to lock texture\n");
- else
- {
- // Convert the image into YUV format that SDL uses
- int coded_w = movState->video.st->codec->coded_width;
- int coded_h = movState->video.st->codec->coded_height;
- int w = movState->video.st->codec->width;
- int h = movState->video.st->codec->height;
- if(!movState->video.swscale_ctx)
- movState->video.swscale_ctx = sws_getContext(
- w, h, movState->video.st->codec->pix_fmt,
- w, h, PIX_FMT_YUV420P, SWS_X, NULL, NULL, NULL
- );
-
- /* point pict at the queue */
- AVPicture pict;
- pict.data[0] = pixels;
- pict.data[2] = pict.data[0] + coded_w*coded_h;
- pict.data[1] = pict.data[2] + coded_w*coded_h/4;
-
- pict.linesize[0] = pitch;
- pict.linesize[2] = pitch / 2;
- pict.linesize[1] = pitch / 2;
-
- sws_scale(movState->video.swscale_ctx, (const uint8_t**)frame->data,
- frame->linesize, 0, h, pict.data, pict.linesize);
- SDL_UnlockTexture(vp->bmp);
- }
- }
-
- almtx_lock(&movState->video.pictq_mutex);
- vp->updated = true;
- almtx_unlock(&movState->video.pictq_mutex);
- alcnd_signal(&movState->video.pictq_cond);
-}
-
-static int queue_picture(MovieState *movState, double pts)
-{
- /* Wait until we have space for a new pic */
- almtx_lock(&movState->video.pictq_mutex);
- while(movState->video.pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !movState->quit)
- alcnd_wait(&movState->video.pictq_cond, &movState->video.pictq_mutex);
- almtx_unlock(&movState->video.pictq_mutex);
-
- if(movState->quit)
- return -1;
-
- VideoPicture *vp = &movState->video.pictq[movState->video.pictq_windex];
-
- /* We have to create/update the picture in the main thread */
- vp->updated = false;
- SDL_PushEvent(&(SDL_Event){ .user={.type=FF_UPDATE_EVENT, .data1=movState} });
-
- /* Wait until the picture is updated. */
- almtx_lock(&movState->video.pictq_mutex);
- while(!vp->updated && !movState->quit)
- alcnd_wait(&movState->video.pictq_cond, &movState->video.pictq_mutex);
- almtx_unlock(&movState->video.pictq_mutex);
- if(movState->quit)
- return -1;
- vp->pts = pts;
-
- movState->video.pictq_windex = (movState->video.pictq_windex+1)%VIDEO_PICTURE_QUEUE_SIZE;
- almtx_lock(&movState->video.pictq_mutex);
- movState->video.pictq_size++;
- almtx_unlock(&movState->video.pictq_mutex);
-
- return 0;
-}
-
-static double synchronize_video(MovieState *movState, double pts)
-{
- double frame_delay;
-
- if(pts == 0.0) /* if we aren't given a pts, set it to the clock */
- pts = movState->video.clock;
- else /* if we have pts, set video clock to it */
- movState->video.clock = pts;
-
- /* update the video clock */
- frame_delay = av_q2d(movState->video.st->codec->time_base);
- /* if we are repeating a frame, adjust clock accordingly */
- frame_delay += movState->video.decoded_vframe->repeat_pict * (frame_delay * 0.5);
- movState->video.clock += frame_delay;
- return pts;
-}
-
-int video_thread(void *arg)
-{
- MovieState *movState = (MovieState*)arg;
- AVPacket *packet = (AVPacket[1]){};
- int64_t saved_pts, pkt_pts;
- int frameFinished;
-
- movState->video.decoded_vframe = av_frame_alloc();
- while(packet_queue_get(&movState->video.q, packet, movState) >= 0)
- {
- if(packet->data == flush_pkt.data)
- {
- avcodec_flush_buffers(movState->video.st->codec);
-
- almtx_lock(&movState->video.pictq_mutex);
- movState->video.pictq_size = 0;
- movState->video.pictq_rindex = 0;
- movState->video.pictq_windex = 0;
- almtx_unlock(&movState->video.pictq_mutex);
-
- movState->video.clock = av_q2d(movState->video.st->time_base)*packet->pts;
- movState->video.current_pts = movState->video.clock;
- movState->video.current_pts_time = av_gettime();
- continue;
- }
-
- pkt_pts = packet->pts;
-
- /* Decode video frame */
- avcodec_decode_video2(movState->video.st->codec, movState->video.decoded_vframe,
- &frameFinished, packet);
- if(pkt_pts != AV_NOPTS_VALUE && !movState->video.decoded_vframe->opaque)
- {
- /* Store the packet's original pts in the frame, in case the frame
- * is not finished decoding yet. */
- saved_pts = pkt_pts;
- movState->video.decoded_vframe->opaque = &saved_pts;
- }
-
- av_free_packet(packet);
-
- if(frameFinished)
- {
- double pts = av_q2d(movState->video.st->time_base);
- if(packet->dts != AV_NOPTS_VALUE)
- pts *= packet->dts;
- else if(movState->video.decoded_vframe->opaque)
- pts *= *(int64_t*)movState->video.decoded_vframe->opaque;
- else
- pts *= 0.0;
- movState->video.decoded_vframe->opaque = NULL;
-
- pts = synchronize_video(movState, pts);
- if(queue_picture(movState, pts) < 0)
- break;
- }
- }
-
- sws_freeContext(movState->video.swscale_ctx);
- movState->video.swscale_ctx = NULL;
- av_frame_free(&movState->video.decoded_vframe);
- return 0;
-}
-
-
-static int stream_component_open(MovieState *movState, int stream_index)
-{
- AVFormatContext *pFormatCtx = movState->pFormatCtx;
- AVCodecContext *codecCtx;
- AVCodec *codec;
-
- if(stream_index < 0 || (unsigned int)stream_index >= pFormatCtx->nb_streams)
- return -1;
-
- /* Get a pointer to the codec context for the video stream, and open the
- * associated codec */
- codecCtx = pFormatCtx->streams[stream_index]->codec;
-
- codec = avcodec_find_decoder(codecCtx->codec_id);
- if(!codec || avcodec_open2(codecCtx, codec, NULL) < 0)
- {
- fprintf(stderr, "Unsupported codec!\n");
- return -1;
- }
-
- /* Initialize and start the media type handler */
- switch(codecCtx->codec_type)
- {
- case AVMEDIA_TYPE_AUDIO:
- movState->audioStream = stream_index;
- movState->audio.st = pFormatCtx->streams[stream_index];
-
- /* Averaging filter for audio sync */
- movState->audio.diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
- /* Correct audio only if larger error than this */
- movState->audio.diff_threshold = 2.0 * 0.050/* 50 ms */;
-
- memset(&movState->audio.pkt, 0, sizeof(movState->audio.pkt));
- if(althrd_create(&movState->audio.thread, audio_thread, movState) != althrd_success)
- {
- movState->audioStream = -1;
- movState->audio.st = NULL;
- }
- break;
-
- case AVMEDIA_TYPE_VIDEO:
- movState->videoStream = stream_index;
- movState->video.st = pFormatCtx->streams[stream_index];
-
- movState->video.current_pts_time = av_gettime();
- movState->video.frame_timer = (double)movState->video.current_pts_time /
- 1000000.0;
- movState->video.frame_last_delay = 40e-3;
-
- if(althrd_create(&movState->video.thread, video_thread, movState) != althrd_success)
- {
- movState->videoStream = -1;
- movState->video.st = NULL;
- }
- break;
-
- default:
- break;
- }
-
- return 0;
-}
-
-static int decode_interrupt_cb(void *ctx)
-{
- return ((MovieState*)ctx)->quit;
-}
-
-int decode_thread(void *arg)
-{
- MovieState *movState = (MovieState *)arg;
- AVFormatContext *fmtCtx = movState->pFormatCtx;
- AVPacket *packet = (AVPacket[1]){};
- int video_index = -1;
- int audio_index = -1;
-
- movState->videoStream = -1;
- movState->audioStream = -1;
-
- /* Dump information about file onto standard error */
- av_dump_format(fmtCtx, 0, movState->filename, 0);
-
- /* Find the first video and audio streams */
- for(unsigned int i = 0;i < fmtCtx->nb_streams;i++)
- {
- if(fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
- video_index = i;
- else if(fmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
- audio_index = i;
- }
- movState->external_clock_base = av_gettime();
- if(audio_index >= 0)
- stream_component_open(movState, audio_index);
- if(video_index >= 0)
- stream_component_open(movState, video_index);
-
- if(movState->videoStream < 0 && movState->audioStream < 0)
- {
- fprintf(stderr, "%s: could not open codecs\n", movState->filename);
- goto fail;
- }
-
- /* Main packet handling loop */
- while(!movState->quit)
- {
- if(movState->seek_req)
- {
- int64_t seek_target = movState->seek_pos;
- int stream_index= -1;
-
- /* Prefer seeking on the video stream. */
- if(movState->videoStream >= 0)
- stream_index = movState->videoStream;
- else if(movState->audioStream >= 0)
- stream_index = movState->audioStream;
-
- /* Get a seek timestamp for the appropriate stream. */
- int64_t timestamp = seek_target;
- if(stream_index >= 0)
- timestamp = av_rescale_q(seek_target, AV_TIME_BASE_Q, fmtCtx->streams[stream_index]->time_base);
-
- if(av_seek_frame(movState->pFormatCtx, stream_index, timestamp, 0) < 0)
- fprintf(stderr, "%s: error while seeking\n", movState->pFormatCtx->filename);
- else
- {
- /* Seek successful, clear the packet queues and send a special
- * 'flush' packet with the new stream clock time. */
- if(movState->audioStream >= 0)
- {
- packet_queue_clear(&movState->audio.q);
- flush_pkt.pts = av_rescale_q(seek_target, AV_TIME_BASE_Q,
- fmtCtx->streams[movState->audioStream]->time_base
- );
- packet_queue_put(&movState->audio.q, &flush_pkt);
- }
- if(movState->videoStream >= 0)
- {
- packet_queue_clear(&movState->video.q);
- flush_pkt.pts = av_rescale_q(seek_target, AV_TIME_BASE_Q,
- fmtCtx->streams[movState->videoStream]->time_base
- );
- packet_queue_put(&movState->video.q, &flush_pkt);
- }
- movState->external_clock_base = av_gettime() - seek_target;
- }
- movState->seek_req = false;
- }
-
- if(movState->audio.q.size >= MAX_AUDIOQ_SIZE ||
- movState->video.q.size >= MAX_VIDEOQ_SIZE)
- {
- SDL_Delay(10);
- continue;
- }
-
- if(av_read_frame(movState->pFormatCtx, packet) < 0)
- {
- packet_queue_flush(&movState->video.q);
- packet_queue_flush(&movState->audio.q);
- break;
- }
-
- /* Place the packet in the queue it's meant for, or discard it. */
- if(packet->stream_index == movState->videoStream)
- packet_queue_put(&movState->video.q, packet);
- else if(packet->stream_index == movState->audioStream)
- packet_queue_put(&movState->audio.q, packet);
- else
- av_free_packet(packet);
- }
-
- /* all done - wait for it */
- while(!movState->quit)
- {
- if(movState->audio.q.nb_packets == 0 && movState->video.q.nb_packets == 0)
- break;
- SDL_Delay(100);
- }
-
-fail:
- movState->quit = true;
- packet_queue_flush(&movState->video.q);
- packet_queue_flush(&movState->audio.q);
-
- if(movState->videoStream >= 0)
- althrd_join(movState->video.thread, NULL);
- if(movState->audioStream >= 0)
- althrd_join(movState->audio.thread, NULL);
-
- SDL_PushEvent(&(SDL_Event){ .user={.type=FF_QUIT_EVENT, .data1=movState} });
-
- return 0;
-}
-
-
-static void stream_seek(MovieState *movState, double incr)
-{
- if(!movState->seek_req)
- {
- double newtime = get_master_clock(movState)+incr;
- if(newtime <= 0.0) movState->seek_pos = 0;
- else movState->seek_pos = (int64_t)(newtime * AV_TIME_BASE);
- movState->seek_req = true;
- }
-}
-
-int main(int argc, char *argv[])
-{
- SDL_Event event;
- MovieState *movState;
- bool first_update = true;
- SDL_Window *screen;
- SDL_Renderer *renderer;
- ALCdevice *device;
- ALCcontext *context;
-
- if(argc < 2)
- {
- fprintf(stderr, "Usage: %s <file>\n", argv[0]);
- return 1;
- }
- /* Register all formats and codecs */
- av_register_all();
- /* Initialize networking protocols */
- avformat_network_init();
-
- if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
- {
- fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
- return 1;
- }
-
- /* Make a window to put our video */
- screen = SDL_CreateWindow("alffplay", 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
- if(!screen)
- {
- fprintf(stderr, "SDL: could not set video mode - exiting\n");
- return 1;
- }
- /* Make a renderer to handle the texture image surface and rendering. */
- renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED);
- if(renderer)
- {
- SDL_RendererInfo rinf;
- bool ok = false;
-
- /* Make sure the renderer supports YV12 textures. If not, fallback to a
- * software renderer. */
- if(SDL_GetRendererInfo(renderer, &rinf) == 0)
- {
- for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
- ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_YV12);
- }
- if(!ok)
- {
- fprintf(stderr, "YV12 pixelformat textures not supported on renderer %s\n", rinf.name);
- SDL_DestroyRenderer(renderer);
- renderer = NULL;
- }
- }
- if(!renderer)
- renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_SOFTWARE);
- if(!renderer)
- {
- fprintf(stderr, "SDL: could not create renderer - exiting\n");
- return 1;
- }
- SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
- SDL_RenderFillRect(renderer, NULL);
- SDL_RenderPresent(renderer);
-
- /* Open an audio device */
- device = alcOpenDevice(NULL);
- if(!device)
- {
- fprintf(stderr, "OpenAL: could not open device - exiting\n");
- return 1;
- }
- context = alcCreateContext(device, NULL);
- if(!context)
- {
- fprintf(stderr, "OpenAL: could not create context - exiting\n");
- return 1;
- }
- if(alcMakeContextCurrent(context) == ALC_FALSE)
- {
- fprintf(stderr, "OpenAL: could not make context current - exiting\n");
- return 1;
- }
-
- if(!alIsExtensionPresent("AL_SOFT_source_length"))
- {
- fprintf(stderr, "Required AL_SOFT_source_length not supported - exiting\n");
- return 1;
- }
-
- if(!alIsExtensionPresent("AL_SOFT_source_latency"))
- fprintf(stderr, "AL_SOFT_source_latency not supported, audio may be a bit laggy.\n");
- else
- {
- alGetSourcedvSOFT = alGetProcAddress("alGetSourcedvSOFT");
- has_latency_check = true;
- }
-
-
- movState = av_mallocz(sizeof(MovieState));
-
- av_strlcpy(movState->filename, argv[1], sizeof(movState->filename));
-
- packet_queue_init(&movState->audio.q);
- packet_queue_init(&movState->video.q);
-
- almtx_init(&movState->video.pictq_mutex, almtx_plain);
- alcnd_init(&movState->video.pictq_cond);
- almtx_init(&movState->audio.src_mutex, almtx_recursive);
-
- movState->av_sync_type = DEFAULT_AV_SYNC_TYPE;
-
- movState->pFormatCtx = avformat_alloc_context();
- movState->pFormatCtx->interrupt_callback = (AVIOInterruptCB){.callback=decode_interrupt_cb, .opaque=movState};
-
- if(avio_open2(&movState->pFormatCtx->pb, movState->filename, AVIO_FLAG_READ,
- &movState->pFormatCtx->interrupt_callback, NULL))
- {
- fprintf(stderr, "Failed to open %s\n", movState->filename);
- return 1;
- }
-
- /* Open movie file */
- if(avformat_open_input(&movState->pFormatCtx, movState->filename, NULL, NULL) != 0)
- {
- fprintf(stderr, "Failed to open %s\n", movState->filename);
- return 1;
- }
-
- /* Retrieve stream information */
- if(avformat_find_stream_info(movState->pFormatCtx, NULL) < 0)
- {
- fprintf(stderr, "%s: failed to find stream info\n", movState->filename);
- return 1;
- }
-
- schedule_refresh(movState, 40);
-
-
- if(althrd_create(&movState->parse_thread, decode_thread, movState) != althrd_success)
- {
- fprintf(stderr, "Failed to create parse thread!\n");
- return 1;
- }
- while(SDL_WaitEvent(&event) == 1)
- {
- switch(event.type)
- {
- case SDL_KEYDOWN:
- switch(event.key.keysym.sym)
- {
- case SDLK_ESCAPE:
- movState->quit = true;
- break;
-
- case SDLK_LEFT:
- stream_seek(movState, -10.0);
- break;
- case SDLK_RIGHT:
- stream_seek(movState, 10.0);
- break;
- case SDLK_UP:
- stream_seek(movState, 30.0);
- break;
- case SDLK_DOWN:
- stream_seek(movState, -30.0);
- break;
-
- default:
- break;
- }
- break;
-
- case SDL_WINDOWEVENT:
- switch(event.window.event)
- {
- case SDL_WINDOWEVENT_RESIZED:
- SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
- SDL_RenderFillRect(renderer, NULL);
- break;
-
- default:
- break;
- }
- break;
-
- case SDL_QUIT:
- movState->quit = true;
- break;
-
- case FF_UPDATE_EVENT:
- update_picture(event.user.data1, &first_update, screen, renderer);
- break;
-
- case FF_REFRESH_EVENT:
- video_refresh_timer(event.user.data1, screen, renderer);
- break;
-
- case FF_QUIT_EVENT:
- althrd_join(movState->parse_thread, NULL);
-
- avformat_close_input(&movState->pFormatCtx);
-
- almtx_destroy(&movState->audio.src_mutex);
- almtx_destroy(&movState->video.pictq_mutex);
- alcnd_destroy(&movState->video.pictq_cond);
- packet_queue_deinit(&movState->video.q);
- packet_queue_deinit(&movState->audio.q);
-
- alcMakeContextCurrent(NULL);
- alcDestroyContext(context);
- alcCloseDevice(device);
-
- SDL_Quit();
- exit(0);
-
- default:
- break;
- }
- }
-
- fprintf(stderr, "SDL_WaitEvent error - %s\n", SDL_GetError());
- return 1;
-}
diff --git a/examples/alffplay.cpp b/examples/alffplay.cpp
new file mode 100644
index 00000000..27520a6d
--- /dev/null
+++ b/examples/alffplay.cpp
@@ -0,0 +1,1915 @@
+/*
+ * An example showing how to play a stream sync'd to video, using ffmpeg.
+ *
+ * Requires C++11.
+ */
+
+#include <condition_variable>
+#include <functional>
+#include <algorithm>
+#include <iostream>
+#include <iomanip>
+#include <cstring>
+#include <limits>
+#include <thread>
+#include <chrono>
+#include <atomic>
+#include <vector>
+#include <mutex>
+#include <deque>
+#include <array>
+#include <cmath>
+#include <string>
+
+extern "C" {
+#include "libavcodec/avcodec.h"
+#include "libavformat/avformat.h"
+#include "libavformat/avio.h"
+#include "libavutil/time.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libswscale/swscale.h"
+#include "libswresample/swresample.h"
+}
+
+#include "SDL.h"
+
+#include "AL/alc.h"
+#include "AL/al.h"
+#include "AL/alext.h"
+
+#include "common/alhelpers.h"
+
+extern "C" {
+/* Undefine this to disable use of experimental extensions. Don't use for
+ * production code! Interfaces and behavior may change prior to being
+ * finalized.
+ */
+#define ALLOW_EXPERIMENTAL_EXTS
+
+#ifdef ALLOW_EXPERIMENTAL_EXTS
+#ifndef AL_SOFT_map_buffer
+#define AL_SOFT_map_buffer 1
+typedef unsigned int ALbitfieldSOFT;
+#define AL_MAP_READ_BIT_SOFT 0x00000001
+#define AL_MAP_WRITE_BIT_SOFT 0x00000002
+#define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
+#define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
+typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
+typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
+typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
+typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
+#endif
+
+#ifndef AL_SOFT_events
+#define AL_SOFT_events 1
+#define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x1220
+#define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x1221
+#define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x1222
+#define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x1223
+#define AL_EVENT_TYPE_ERROR_SOFT 0x1224
+#define AL_EVENT_TYPE_PERFORMANCE_SOFT 0x1225
+#define AL_EVENT_TYPE_DEPRECATED_SOFT 0x1226
+#define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x1227
+typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param,
+ ALsizei length, const ALchar *message,
+ void *userParam);
+typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable);
+typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam);
+typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname);
+typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values);
+#endif
+#endif /* ALLOW_EXPERIMENTAL_EXTS */
+}
+
+namespace {
+
+#ifndef M_PI
+#define M_PI (3.14159265358979323846)
+#endif
+
+using nanoseconds = std::chrono::nanoseconds;
+using microseconds = std::chrono::microseconds;
+using milliseconds = std::chrono::milliseconds;
+using seconds = std::chrono::seconds;
+using seconds_d64 = std::chrono::duration<double>;
+
+const std::string AppName("alffplay");
+
+bool EnableDirectOut = false;
+bool EnableWideStereo = false;
+LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
+LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT;
+
+#ifdef AL_SOFT_map_buffer
+LPALBUFFERSTORAGESOFT alBufferStorageSOFT;
+LPALMAPBUFFERSOFT alMapBufferSOFT;
+LPALUNMAPBUFFERSOFT alUnmapBufferSOFT;
+#endif
+
+#ifdef AL_SOFT_events
+LPALEVENTCONTROLSOFT alEventControlSOFT;
+LPALEVENTCALLBACKSOFT alEventCallbackSOFT;
+#endif
+
+const seconds AVNoSyncThreshold(10);
+
+const milliseconds VideoSyncThreshold(10);
+#define VIDEO_PICTURE_QUEUE_SIZE 16
+
+const seconds_d64 AudioSyncThreshold(0.03);
+const milliseconds AudioSampleCorrectionMax(50);
+/* Averaging filter coefficient for audio sync. */
+#define AUDIO_DIFF_AVG_NB 20
+const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB);
+/* Per-buffer size, in time */
+const milliseconds AudioBufferTime(20);
+/* Buffer total size, in time (should be divisible by the buffer time) */
+const milliseconds AudioBufferTotalTime(800);
+
+#define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */
+
+enum {
+ FF_UPDATE_EVENT = SDL_USEREVENT,
+ FF_REFRESH_EVENT,
+ FF_MOVIE_DONE_EVENT
+};
+
+enum class SyncMaster {
+ Audio,
+ Video,
+ External,
+
+ Default = External
+};
+
+
+inline microseconds get_avtime()
+{ return microseconds(av_gettime()); }
+
+/* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */
+struct AVIOContextDeleter {
+ void operator()(AVIOContext *ptr) { avio_closep(&ptr); }
+};
+using AVIOContextPtr = std::unique_ptr<AVIOContext,AVIOContextDeleter>;
+
+struct AVFormatCtxDeleter {
+ void operator()(AVFormatContext *ptr) { avformat_close_input(&ptr); }
+};
+using AVFormatCtxPtr = std::unique_ptr<AVFormatContext,AVFormatCtxDeleter>;
+
+struct AVCodecCtxDeleter {
+ void operator()(AVCodecContext *ptr) { avcodec_free_context(&ptr); }
+};
+using AVCodecCtxPtr = std::unique_ptr<AVCodecContext,AVCodecCtxDeleter>;
+
+struct AVFrameDeleter {
+ void operator()(AVFrame *ptr) { av_frame_free(&ptr); }
+};
+using AVFramePtr = std::unique_ptr<AVFrame,AVFrameDeleter>;
+
+struct SwrContextDeleter {
+ void operator()(SwrContext *ptr) { swr_free(&ptr); }
+};
+using SwrContextPtr = std::unique_ptr<SwrContext,SwrContextDeleter>;
+
+struct SwsContextDeleter {
+ void operator()(SwsContext *ptr) { sws_freeContext(ptr); }
+};
+using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>;
+
+
+class PacketQueue {
+ std::deque<AVPacket> mPackets;
+ size_t mTotalSize{0};
+
+public:
+ ~PacketQueue() { clear(); }
+
+ bool empty() const noexcept { return mPackets.empty(); }
+ size_t totalSize() const noexcept { return mTotalSize; }
+
+ void put(const AVPacket *pkt)
+ {
+ mPackets.push_back(AVPacket{});
+ if(av_packet_ref(&mPackets.back(), pkt) != 0)
+ mPackets.pop_back();
+ else
+ mTotalSize += mPackets.back().size;
+ }
+
+ AVPacket *front() noexcept
+ { return &mPackets.front(); }
+
+ void pop()
+ {
+ AVPacket *pkt = &mPackets.front();
+ mTotalSize -= pkt->size;
+ av_packet_unref(pkt);
+ mPackets.pop_front();
+ }
+
+ void clear()
+ {
+ for(AVPacket &pkt : mPackets)
+ av_packet_unref(&pkt);
+ mPackets.clear();
+ mTotalSize = 0;
+ }
+};
+
+
+struct MovieState;
+
+struct AudioState {
+ MovieState &mMovie;
+
+ AVStream *mStream{nullptr};
+ AVCodecCtxPtr mCodecCtx;
+
+ std::mutex mQueueMtx;
+ std::condition_variable mQueueCond;
+
+ /* Used for clock difference average computation */
+ seconds_d64 mClockDiffAvg{0};
+
+ /* Time of the next sample to be buffered */
+ nanoseconds mCurrentPts{0};
+
+ /* Device clock time that the stream started at. */
+ nanoseconds mDeviceStartTime{nanoseconds::min()};
+
+ /* Decompressed sample frame, and swresample context for conversion */
+ AVFramePtr mDecodedFrame;
+ SwrContextPtr mSwresCtx;
+
+ /* Conversion format, for what gets fed to OpenAL */
+ int mDstChanLayout{0};
+ AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE};
+
+ /* Storage of converted samples */
+ uint8_t *mSamples{nullptr};
+ int mSamplesLen{0}; /* In samples */
+ int mSamplesPos{0};
+ int mSamplesMax{0};
+
+ /* OpenAL format */
+ ALenum mFormat{AL_NONE};
+ ALsizei mFrameSize{0};
+
+ std::mutex mSrcMutex;
+ std::condition_variable mSrcCond;
+ std::atomic_flag mConnected;
+ ALuint mSource{0};
+ std::vector<ALuint> mBuffers;
+ ALsizei mBufferIdx{0};
+
+ AudioState(MovieState &movie) : mMovie(movie)
+ { mConnected.test_and_set(std::memory_order_relaxed); }
+ ~AudioState()
+ {
+ if(mSource)
+ alDeleteSources(1, &mSource);
+ if(!mBuffers.empty())
+ alDeleteBuffers(mBuffers.size(), mBuffers.data());
+
+ av_freep(&mSamples);
+ }
+
+#ifdef AL_SOFT_events
+ static void AL_APIENTRY EventCallback(ALenum eventType, ALuint object, ALuint param,
+ ALsizei length, const ALchar *message,
+ void *userParam);
+#endif
+
+ nanoseconds getClockNoLock();
+ nanoseconds getClock()
+ {
+ std::lock_guard<std::mutex> lock(mSrcMutex);
+ return getClockNoLock();
+ }
+
+ bool isBufferFilled();
+ void startPlayback();
+
+ int getSync();
+ int decodeFrame();
+ bool readAudio(uint8_t *samples, int length);
+
+ int handler();
+};
+
+struct VideoState {
+ MovieState &mMovie;
+
+ AVStream *mStream{nullptr};
+ AVCodecCtxPtr mCodecCtx;
+
+ std::mutex mQueueMtx;
+ std::condition_variable mQueueCond;
+
+ nanoseconds mClock{0};
+ nanoseconds mFrameTimer{0};
+ nanoseconds mFrameLastPts{0};
+ nanoseconds mFrameLastDelay{0};
+ nanoseconds mCurrentPts{0};
+ /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */
+ microseconds mCurrentPtsTime{0};
+
+ /* Decompressed video frame, and swscale context for conversion */
+ AVFramePtr mDecodedFrame;
+ SwsContextPtr mSwscaleCtx;
+
+ struct Picture {
+ SDL_Texture *mImage{nullptr};
+ int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */
+ std::atomic<bool> mUpdated{false};
+ nanoseconds mPts{0};
+
+ ~Picture()
+ {
+ if(mImage)
+ SDL_DestroyTexture(mImage);
+ mImage = nullptr;
+ }
+ };
+ std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ;
+ size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0};
+ std::mutex mPictQMutex;
+ std::condition_variable mPictQCond;
+ bool mFirstUpdate{true};
+ std::atomic<bool> mEOS{false};
+ std::atomic<bool> mFinalUpdate{false};
+
+ VideoState(MovieState &movie) : mMovie(movie) { }
+
+ nanoseconds getClock();
+ bool isBufferFilled();
+
+ static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque);
+ void schedRefresh(milliseconds delay);
+ void display(SDL_Window *screen, SDL_Renderer *renderer);
+ void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer);
+ void updatePicture(SDL_Window *screen, SDL_Renderer *renderer);
+ int queuePicture(nanoseconds pts);
+ int handler();
+};
+
+struct MovieState {
+ AVIOContextPtr mIOContext;
+ AVFormatCtxPtr mFormatCtx;
+
+ SyncMaster mAVSyncType{SyncMaster::Default};
+
+ microseconds mClockBase{0};
+ std::atomic<bool> mPlaying{false};
+
+ std::mutex mSendMtx;
+ std::condition_variable mSendCond;
+ /* NOTE: false/clear = need data, true/set = no data needed */
+ std::atomic_flag mSendDataGood;
+
+ std::atomic<bool> mQuit{false};
+
+ AudioState mAudio;
+ VideoState mVideo;
+
+ std::thread mParseThread;
+ std::thread mAudioThread;
+ std::thread mVideoThread;
+
+ std::string mFilename;
+
+ MovieState(std::string fname)
+ : mAudio(*this), mVideo(*this), mFilename(std::move(fname))
+ { }
+ ~MovieState()
+ {
+ mQuit = true;
+ if(mParseThread.joinable())
+ mParseThread.join();
+ }
+
+ static int decode_interrupt_cb(void *ctx);
+ bool prepare();
+ void setTitle(SDL_Window *window);
+
+ nanoseconds getClock();
+
+ nanoseconds getMasterClock();
+
+ nanoseconds getDuration();
+
+ int streamComponentOpen(int stream_index);
+ int parse_handler();
+};
+
+
+nanoseconds AudioState::getClockNoLock()
+{
+ // The audio clock is the timestamp of the sample currently being heard.
+ if(alcGetInteger64vSOFT)
+ {
+ // If device start time = min, we aren't playing yet.
+ if(mDeviceStartTime == nanoseconds::min())
+ return nanoseconds::zero();
+
+ // Get the current device clock time and latency.
+ auto device = alcGetContextsDevice(alcGetCurrentContext());
+ ALCint64SOFT devtimes[2] = {0,0};
+ alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes);
+ auto latency = nanoseconds(devtimes[1]);
+ auto device_time = nanoseconds(devtimes[0]);
+
+ // The clock is simply the current device time relative to the recorded
+ // start time. We can also subtract the latency to get more a accurate
+ // position of where the audio device actually is in the output stream.
+ return device_time - mDeviceStartTime - latency;
+ }
+
+ /* The source-based clock is based on 4 components:
+ * 1 - The timestamp of the next sample to buffer (mCurrentPts)
+ * 2 - The length of the source's buffer queue
+ * (AudioBufferTime*AL_BUFFERS_QUEUED)
+ * 3 - The offset OpenAL is currently at in the source (the first value
+ * from AL_SAMPLE_OFFSET_LATENCY_SOFT)
+ * 4 - The latency between OpenAL and the DAC (the second value from
+ * AL_SAMPLE_OFFSET_LATENCY_SOFT)
+ *
+ * Subtracting the length of the source queue from the next sample's
+ * timestamp gives the timestamp of the sample at the start of the source
+ * queue. Adding the source offset to that results in the timestamp for the
+ * sample at OpenAL's current position, and subtracting the source latency
+ * from that gives the timestamp of the sample currently at the DAC.
+ */
+ nanoseconds pts = mCurrentPts;
+ if(mSource)
+ {
+ ALint64SOFT offset[2];
+ ALint queued;
+ ALint status;
+
+ /* NOTE: The source state must be checked last, in case an underrun
+ * occurs and the source stops between retrieving the offset+latency
+ * and getting the state. */
+ if(alGetSourcei64vSOFT)
+ alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_LATENCY_SOFT, offset);
+ else
+ {
+ ALint ioffset;
+ alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset);
+ offset[0] = (ALint64SOFT)ioffset << 32;
+ offset[1] = 0;
+ }
+ alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
+ alGetSourcei(mSource, AL_SOURCE_STATE, &status);
+
+ /* If the source is AL_STOPPED, then there was an underrun and all
+ * buffers are processed, so ignore the source queue. The audio thread
+ * will put the source into an AL_INITIAL state and clear the queue
+ * when it starts recovery. */
+ if(status != AL_STOPPED)
+ {
+ using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
+
+ pts -= AudioBufferTime*queued;
+ pts += std::chrono::duration_cast<nanoseconds>(
+ fixed32(offset[0] / mCodecCtx->sample_rate)
+ );
+ }
+ /* Don't offset by the latency if the source isn't playing. */
+ if(status == AL_PLAYING)
+ pts -= nanoseconds(offset[1]);
+ }
+
+ return std::max(pts, nanoseconds::zero());
+}
+
+bool AudioState::isBufferFilled()
+{
+ /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as
+ * does the source gen. So when we're able to grab the lock and the source
+ * is valid, the queue must be full.
+ */
+ std::lock_guard<std::mutex> lock(mSrcMutex);
+ return mSource != 0;
+}
+
+void AudioState::startPlayback()
+{
+ alSourcePlay(mSource);
+ if(alcGetInteger64vSOFT)
+ {
+ using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>;
+
+ // Subtract the total buffer queue time from the current pts to get the
+ // pts of the start of the queue.
+ nanoseconds startpts = mCurrentPts - AudioBufferTotalTime;
+ int64_t srctimes[2]={0,0};
+ alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
+ auto device_time = nanoseconds(srctimes[1]);
+ auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) /
+ mCodecCtx->sample_rate;
+
+ // The mixer may have ticked and incremented the device time and sample
+ // offset, so subtract the source offset from the device time to get
+ // the device time the source started at. Also subtract startpts to get
+ // the device time the stream would have started at to reach where it
+ // is now.
+ mDeviceStartTime = device_time - src_offset - startpts;
+ }
+}
+
+int AudioState::getSync()
+{
+ if(mMovie.mAVSyncType == SyncMaster::Audio)
+ return 0;
+
+ auto ref_clock = mMovie.getMasterClock();
+ auto diff = ref_clock - getClockNoLock();
+
+ if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
+ {
+ /* Difference is TOO big; reset accumulated average */
+ mClockDiffAvg = seconds_d64::zero();
+ return 0;
+ }
+
+ /* Accumulate the diffs */
+ mClockDiffAvg = mClockDiffAvg*AudioAvgFilterCoeff + diff;
+ auto avg_diff = mClockDiffAvg*(1.0 - AudioAvgFilterCoeff);
+ if(avg_diff < AudioSyncThreshold/2.0 && avg_diff > -AudioSyncThreshold)
+ return 0;
+
+ /* Constrain the per-update difference to avoid exceedingly large skips */
+ diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax),
+ AudioSampleCorrectionMax);
+ return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count();
+}
+
+int AudioState::decodeFrame()
+{
+ while(!mMovie.mQuit.load(std::memory_order_relaxed))
+ {
+ std::unique_lock<std::mutex> lock(mQueueMtx);
+ int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
+ if(ret == AVERROR(EAGAIN))
+ {
+ mMovie.mSendDataGood.clear(std::memory_order_relaxed);
+ std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
+ mMovie.mSendCond.notify_one();
+ do {
+ mQueueCond.wait(lock);
+ ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
+ } while(ret == AVERROR(EAGAIN));
+ }
+ lock.unlock();
+ if(ret == AVERROR_EOF) break;
+ mMovie.mSendDataGood.clear(std::memory_order_relaxed);
+ mMovie.mSendCond.notify_one();
+ if(ret < 0)
+ {
+ std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
+ return 0;
+ }
+
+ if(mDecodedFrame->nb_samples <= 0)
+ {
+ av_frame_unref(mDecodedFrame.get());
+ continue;
+ }
+
+ /* If provided, update w/ pts */
+ if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
+ mCurrentPts = std::chrono::duration_cast<nanoseconds>(
+ seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
+ );
+
+ if(mDecodedFrame->nb_samples > mSamplesMax)
+ {
+ av_freep(&mSamples);
+ av_samples_alloc(
+ &mSamples, nullptr, mCodecCtx->channels,
+ mDecodedFrame->nb_samples, mDstSampleFmt, 0
+ );
+ mSamplesMax = mDecodedFrame->nb_samples;
+ }
+ /* Return the amount of sample frames converted */
+ int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples,
+ (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples
+ );
+
+ av_frame_unref(mDecodedFrame.get());
+ return data_size;
+ }
+
+ return 0;
+}
+
+/* Duplicates the sample at in to out, count times. The frame size is a
+ * multiple of the template type size.
+ */
+template<typename T>
+static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size)
+{
+ const T *sample = reinterpret_cast<const T*>(in);
+ T *dst = reinterpret_cast<T*>(out);
+ if(frame_size == sizeof(T))
+ std::fill_n(dst, count, *sample);
+ else
+ {
+ /* NOTE: frame_size is a multiple of sizeof(T). */
+ int type_mult = frame_size / sizeof(T);
+ int i = 0;
+ std::generate_n(dst, count*type_mult,
+ [sample,type_mult,&i]() -> T
+ {
+ T ret = sample[i];
+ i = (i+1)%type_mult;
+ return ret;
+ }
+ );
+ }
+}
+
+
+bool AudioState::readAudio(uint8_t *samples, int length)
+{
+ int sample_skip = getSync();
+ int audio_size = 0;
+
+ /* Read the next chunk of data, refill the buffer, and queue it
+ * on the source */
+ length /= mFrameSize;
+ while(audio_size < length)
+ {
+ if(mSamplesLen <= 0 || mSamplesPos >= mSamplesLen)
+ {
+ int frame_len = decodeFrame();
+ if(frame_len <= 0) break;
+
+ mSamplesLen = frame_len;
+ mSamplesPos = std::min(mSamplesLen, sample_skip);
+ sample_skip -= mSamplesPos;
+
+ // Adjust the device start time and current pts by the amount we're
+ // skipping/duplicating, so that the clock remains correct for the
+ // current stream position.
+ auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate;
+ mDeviceStartTime -= skip;
+ mCurrentPts += skip;
+ continue;
+ }
+
+ int rem = length - audio_size;
+ if(mSamplesPos >= 0)
+ {
+ int len = mSamplesLen - mSamplesPos;
+ if(rem > len) rem = len;
+ memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize);
+ }
+ else
+ {
+ rem = std::min(rem, -mSamplesPos);
+
+ /* Add samples by copying the first sample */
+ if((mFrameSize&7) == 0)
+ sample_dup<uint64_t>(samples, mSamples, rem, mFrameSize);
+ else if((mFrameSize&3) == 0)
+ sample_dup<uint32_t>(samples, mSamples, rem, mFrameSize);
+ else if((mFrameSize&1) == 0)
+ sample_dup<uint16_t>(samples, mSamples, rem, mFrameSize);
+ else
+ sample_dup<uint8_t>(samples, mSamples, rem, mFrameSize);
+ }
+
+ mSamplesPos += rem;
+ mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
+ samples += rem*mFrameSize;
+ audio_size += rem;
+ }
+ if(audio_size <= 0)
+ return false;
+
+ if(audio_size < length)
+ {
+ int rem = length - audio_size;
+ std::fill_n(samples, rem*mFrameSize,
+ (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00);
+ mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate;
+ audio_size += rem;
+ }
+ return true;
+}
+
+
+#ifdef AL_SOFT_events
+void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALuint param,
+ ALsizei length, const ALchar *message,
+ void *userParam)
+{
+ AudioState *self = reinterpret_cast<AudioState*>(userParam);
+
+ if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT)
+ {
+ /* Temporarily lock the source mutex to ensure it's not between
+ * checking the processed count and going to sleep.
+ */
+ std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
+ self->mSrcCond.notify_one();
+ return;
+ }
+
+ std::cout<< "\n---- AL Event on AudioState "<<self<<" ----\nEvent: ";
+ switch(eventType)
+ {
+ case AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT: std::cout<< "Buffer completed"; break;
+ case AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT: std::cout<< "Source state changed"; break;
+ case AL_EVENT_TYPE_ERROR_SOFT: std::cout<< "API error"; break;
+ case AL_EVENT_TYPE_PERFORMANCE_SOFT: std::cout<< "Performance"; break;
+ case AL_EVENT_TYPE_DEPRECATED_SOFT: std::cout<< "Deprecated"; break;
+ case AL_EVENT_TYPE_DISCONNECTED_SOFT: std::cout<< "Disconnected"; break;
+ default: std::cout<< "0x"<<std::hex<<std::setw(4)<<std::setfill('0')<<eventType<<
+ std::dec<<std::setw(0)<<std::setfill(' '); break;
+ }
+ std::cout<< "\n"
+ "Object ID: "<<object<<"\n"
+ "Parameter: "<<param<<"\n"
+ "Message: "<<std::string(message, length)<<"\n----"<<
+ std::endl;
+
+ if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT)
+ {
+ { std::lock_guard<std::mutex> lock(self->mSrcMutex);
+ self->mConnected.clear(std::memory_order_release);
+ }
+ std::unique_lock<std::mutex>(self->mSrcMutex).unlock();
+ self->mSrcCond.notify_one();
+ }
+}
+#endif
+
+int AudioState::handler()
+{
+ std::unique_lock<std::mutex> lock(mSrcMutex);
+ milliseconds sleep_time = AudioBufferTime / 3;
+ ALenum fmt;
+
+#ifdef AL_SOFT_events
+ const std::array<ALenum,6> evt_types{{
+ AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT, AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT,
+ AL_EVENT_TYPE_ERROR_SOFT, AL_EVENT_TYPE_PERFORMANCE_SOFT, AL_EVENT_TYPE_DEPRECATED_SOFT,
+ AL_EVENT_TYPE_DISCONNECTED_SOFT
+ }};
+ if(alEventControlSOFT)
+ {
+ alEventControlSOFT(evt_types.size(), evt_types.data(), AL_TRUE);
+ alEventCallbackSOFT(EventCallback, this);
+ sleep_time = AudioBufferTotalTime;
+ }
+#endif
+
+ /* Find a suitable format for OpenAL. */
+ mDstChanLayout = 0;
+ if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P)
+ {
+ mDstSampleFmt = AV_SAMPLE_FMT_U8;
+ mFrameSize = 1;
+ if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
+ alIsExtensionPresent("AL_EXT_MCFORMATS") &&
+ (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 8;
+ mFormat = fmt;
+ }
+ if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
+ mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
+ alIsExtensionPresent("AL_EXT_MCFORMATS") &&
+ (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 6;
+ mFormat = fmt;
+ }
+ if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 1;
+ mFormat = AL_FORMAT_MONO8;
+ }
+ if(!mDstChanLayout)
+ {
+ mDstChanLayout = AV_CH_LAYOUT_STEREO;
+ mFrameSize *= 2;
+ mFormat = AL_FORMAT_STEREO8;
+ }
+ }
+ if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) &&
+ alIsExtensionPresent("AL_EXT_FLOAT32"))
+ {
+ mDstSampleFmt = AV_SAMPLE_FMT_FLT;
+ mFrameSize = 4;
+ if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
+ alIsExtensionPresent("AL_EXT_MCFORMATS") &&
+ (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 8;
+ mFormat = fmt;
+ }
+ if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
+ mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
+ alIsExtensionPresent("AL_EXT_MCFORMATS") &&
+ (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 6;
+ mFormat = fmt;
+ }
+ if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 1;
+ mFormat = AL_FORMAT_MONO_FLOAT32;
+ }
+ if(!mDstChanLayout)
+ {
+ mDstChanLayout = AV_CH_LAYOUT_STEREO;
+ mFrameSize *= 2;
+ mFormat = AL_FORMAT_STEREO_FLOAT32;
+ }
+ }
+ if(!mDstChanLayout)
+ {
+ mDstSampleFmt = AV_SAMPLE_FMT_S16;
+ mFrameSize = 2;
+ if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 &&
+ alIsExtensionPresent("AL_EXT_MCFORMATS") &&
+ (fmt=alGetEnumValue("AL_FORMAT_71CHN16")) != AL_NONE && fmt != -1)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 8;
+ mFormat = fmt;
+ }
+ if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 ||
+ mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) &&
+ alIsExtensionPresent("AL_EXT_MCFORMATS") &&
+ (fmt=alGetEnumValue("AL_FORMAT_51CHN16")) != AL_NONE && fmt != -1)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 6;
+ mFormat = fmt;
+ }
+ if(mCodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
+ {
+ mDstChanLayout = mCodecCtx->channel_layout;
+ mFrameSize *= 1;
+ mFormat = AL_FORMAT_MONO16;
+ }
+ if(!mDstChanLayout)
+ {
+ mDstChanLayout = AV_CH_LAYOUT_STEREO;
+ mFrameSize *= 2;
+ mFormat = AL_FORMAT_STEREO16;
+ }
+ }
+ void *samples = nullptr;
+ ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>(
+ mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize;
+
+ mSamples = NULL;
+ mSamplesMax = 0;
+ mSamplesPos = 0;
+ mSamplesLen = 0;
+
+ mDecodedFrame.reset(av_frame_alloc());
+ if(!mDecodedFrame)
+ {
+ std::cerr<< "Failed to allocate audio frame" <<std::endl;
+ goto finish;
+ }
+
+ mSwresCtx.reset(swr_alloc_set_opts(nullptr,
+ mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate,
+ mCodecCtx->channel_layout ? mCodecCtx->channel_layout :
+ (uint64_t)av_get_default_channel_layout(mCodecCtx->channels),
+ mCodecCtx->sample_fmt, mCodecCtx->sample_rate,
+ 0, nullptr
+ ));
+ if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0)
+ {
+ std::cerr<< "Failed to initialize audio converter" <<std::endl;
+ goto finish;
+ }
+
+ mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0);
+ alGenBuffers(mBuffers.size(), mBuffers.data());
+ alGenSources(1, &mSource);
+
+ if(EnableDirectOut)
+ alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE);
+ if(EnableWideStereo)
+ {
+ ALfloat angles[2] = { (ALfloat)(M_PI/3.0), (ALfloat)(-M_PI/3.0) };
+ alSourcefv(mSource, AL_STEREO_ANGLES, angles);
+ }
+
+ if(alGetError() != AL_NO_ERROR)
+ goto finish;
+
+#ifdef AL_SOFT_map_buffer
+ if(alBufferStorageSOFT)
+ {
+ for(ALuint bufid : mBuffers)
+ alBufferStorageSOFT(bufid, mFormat, nullptr, buffer_len, mCodecCtx->sample_rate,
+ AL_MAP_WRITE_BIT_SOFT);
+ if(alGetError() != AL_NO_ERROR)
+ {
+ fprintf(stderr, "Failed to use mapped buffers\n");
+ samples = av_malloc(buffer_len);
+ }
+ }
+ else
+#endif
+ samples = av_malloc(buffer_len);
+
+ while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed) &&
+ mConnected.test_and_set(std::memory_order_relaxed))
+ {
+ /* First remove any processed buffers. */
+ ALint processed;
+ alGetSourcei(mSource, AL_BUFFERS_PROCESSED, &processed);
+ while(processed > 0)
+ {
+ std::array<ALuint,4> bids;
+ alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed),
+ bids.data());
+ processed -= std::min<ALsizei>(bids.size(), processed);
+ }
+
+ /* Refill the buffer queue. */
+ ALint queued;
+ alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued);
+ while((ALuint)queued < mBuffers.size())
+ {
+ ALuint bufid = mBuffers[mBufferIdx];
+
+ uint8_t *ptr = reinterpret_cast<uint8_t*>(samples
+#ifdef AL_SOFT_map_buffer
+ ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT)
+#endif
+ );
+ if(!ptr) break;
+
+ /* Read the next chunk of data, filling the buffer, and queue it on
+ * the source */
+ bool got_audio = readAudio(ptr, buffer_len);
+#ifdef AL_SOFT_map_buffer
+ if(!samples) alUnmapBufferSOFT(bufid);
+#endif
+ if(!got_audio) break;
+
+ if(samples)
+ alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
+
+ alSourceQueueBuffers(mSource, 1, &bufid);
+ mBufferIdx = (mBufferIdx+1) % mBuffers.size();
+ ++queued;
+ }
+ if(queued == 0)
+ break;
+
+ /* Check that the source is playing. */
+ ALint state;
+ alGetSourcei(mSource, AL_SOURCE_STATE, &state);
+ if(state == AL_STOPPED)
+ {
+ /* AL_STOPPED means there was an underrun. Clear the buffer queue
+ * since this likely means we're late, and rewind the source to get
+ * it back into an AL_INITIAL state.
+ */
+ alSourceRewind(mSource);
+ alSourcei(mSource, AL_BUFFER, 0);
+ continue;
+ }
+
+ /* (re)start the source if needed, and wait for a buffer to finish */
+ if(state != AL_PLAYING && state != AL_PAUSED &&
+ mMovie.mPlaying.load(std::memory_order_relaxed))
+ startPlayback();
+
+ mSrcCond.wait_for(lock, sleep_time);
+ }
+
+ alSourceRewind(mSource);
+ alSourcei(mSource, AL_BUFFER, 0);
+
+finish:
+ av_freep(&samples);
+
+#ifdef AL_SOFT_events
+ if(alEventControlSOFT)
+ {
+ alEventControlSOFT(evt_types.size(), evt_types.data(), AL_FALSE);
+ alEventCallbackSOFT(nullptr, nullptr);
+ }
+#endif
+
+ return 0;
+}
+
+
+nanoseconds VideoState::getClock()
+{
+ /* NOTE: This returns incorrect times while not playing. */
+ auto delta = get_avtime() - mCurrentPtsTime;
+ return mCurrentPts + delta;
+}
+
+bool VideoState::isBufferFilled()
+{
+ std::unique_lock<std::mutex> lock(mPictQMutex);
+ return mPictQSize >= mPictQ.size();
+}
+
+Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque)
+{
+ SDL_Event evt{};
+ evt.user.type = FF_REFRESH_EVENT;
+ evt.user.data1 = opaque;
+ SDL_PushEvent(&evt);
+ return 0; /* 0 means stop timer */
+}
+
+/* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */
+void VideoState::schedRefresh(milliseconds delay)
+{
+ SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this);
+}
+
+/* Called by VideoState::refreshTimer to display the next video frame. */
+void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer)
+{
+ Picture *vp = &mPictQ[mPictQRead];
+
+ if(!vp->mImage)
+ return;
+
+ float aspect_ratio;
+ int win_w, win_h;
+ int w, h, x, y;
+
+ if(mCodecCtx->sample_aspect_ratio.num == 0)
+ aspect_ratio = 0.0f;
+ else
+ {
+ aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width /
+ mCodecCtx->height;
+ }
+ if(aspect_ratio <= 0.0f)
+ aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height;
+
+ SDL_GetWindowSize(screen, &win_w, &win_h);
+ h = win_h;
+ w = ((int)rint(h * aspect_ratio) + 3) & ~3;
+ if(w > win_w)
+ {
+ w = win_w;
+ h = ((int)rint(w / aspect_ratio) + 3) & ~3;
+ }
+ x = (win_w - w) / 2;
+ y = (win_h - h) / 2;
+
+ SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight };
+ SDL_Rect dst_rect{ x, y, w, h };
+ SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect);
+ SDL_RenderPresent(renderer);
+}
+
+/* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer
+ * was created. It handles the display of the next decoded video frame (if not
+ * falling behind), and sets up the timer for the following video frame.
+ */
+void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer)
+{
+ if(!mStream)
+ {
+ if(mEOS)
+ {
+ mFinalUpdate = true;
+ std::unique_lock<std::mutex>(mPictQMutex).unlock();
+ mPictQCond.notify_all();
+ return;
+ }
+ schedRefresh(milliseconds(100));
+ return;
+ }
+ if(!mMovie.mPlaying.load(std::memory_order_relaxed))
+ {
+ schedRefresh(milliseconds(1));
+ return;
+ }
+
+ std::unique_lock<std::mutex> lock(mPictQMutex);
+retry:
+ if(mPictQSize == 0)
+ {
+ if(mEOS)
+ mFinalUpdate = true;
+ else
+ schedRefresh(milliseconds(1));
+ lock.unlock();
+ mPictQCond.notify_all();
+ return;
+ }
+
+ Picture *vp = &mPictQ[mPictQRead];
+ mCurrentPts = vp->mPts;
+ mCurrentPtsTime = get_avtime();
+
+ /* Get delay using the frame pts and the pts from last frame. */
+ auto delay = vp->mPts - mFrameLastPts;
+ if(delay <= seconds::zero() || delay >= seconds(1))
+ {
+ /* If incorrect delay, use previous one. */
+ delay = mFrameLastDelay;
+ }
+ /* Save for next frame. */
+ mFrameLastDelay = delay;
+ mFrameLastPts = vp->mPts;
+
+ /* Update delay to sync to clock if not master source. */
+ if(mMovie.mAVSyncType != SyncMaster::Video)
+ {
+ auto ref_clock = mMovie.getMasterClock();
+ auto diff = vp->mPts - ref_clock;
+
+ /* Skip or repeat the frame. Take delay into account. */
+ auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold);
+ if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold))
+ {
+ if(diff <= -sync_threshold)
+ delay = nanoseconds::zero();
+ else if(diff >= sync_threshold)
+ delay *= 2;
+ }
+ }
+
+ mFrameTimer += delay;
+ /* Compute the REAL delay. */
+ auto actual_delay = mFrameTimer - get_avtime();
+ if(!(actual_delay >= VideoSyncThreshold))
+ {
+ /* We don't have time to handle this picture, just skip to the next one. */
+ mPictQRead = (mPictQRead+1)%mPictQ.size();
+ mPictQSize--;
+ goto retry;
+ }
+ schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay));
+
+ /* Show the picture! */
+ display(screen, renderer);
+
+ /* Update queue for next picture. */
+ mPictQRead = (mPictQRead+1)%mPictQ.size();
+ mPictQSize--;
+ lock.unlock();
+ mPictQCond.notify_all();
+}
+
+/* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the
+ * main thread where the renderer was created.
+ */
+void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer)
+{
+ Picture *vp = &mPictQ[mPictQWrite];
+ bool fmt_updated = false;
+
+ /* allocate or resize the buffer! */
+ if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height)
+ {
+ fmt_updated = true;
+ if(vp->mImage)
+ SDL_DestroyTexture(vp->mImage);
+ vp->mImage = SDL_CreateTexture(
+ renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
+ mCodecCtx->coded_width, mCodecCtx->coded_height
+ );
+ if(!vp->mImage)
+ std::cerr<< "Failed to create YV12 texture!" <<std::endl;
+ vp->mWidth = mCodecCtx->width;
+ vp->mHeight = mCodecCtx->height;
+
+ if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0)
+ {
+ /* For the first update, set the window size to the video size. */
+ mFirstUpdate = false;
+
+ int w = vp->mWidth;
+ int h = vp->mHeight;
+ if(mCodecCtx->sample_aspect_ratio.den != 0)
+ {
+ double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio);
+ if(aspect_ratio >= 1.0)
+ w = (int)(w*aspect_ratio + 0.5);
+ else if(aspect_ratio > 0.0)
+ h = (int)(h/aspect_ratio + 0.5);
+ }
+ SDL_SetWindowSize(screen, w, h);
+ }
+ }
+
+ if(vp->mImage)
+ {
+ AVFrame *frame = mDecodedFrame.get();
+ void *pixels = nullptr;
+ int pitch = 0;
+
+ if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P)
+ SDL_UpdateYUVTexture(vp->mImage, nullptr,
+ frame->data[0], frame->linesize[0],
+ frame->data[1], frame->linesize[1],
+ frame->data[2], frame->linesize[2]
+ );
+ else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0)
+ std::cerr<< "Failed to lock texture" <<std::endl;
+ else
+ {
+ // Convert the image into YUV format that SDL uses
+ int coded_w = mCodecCtx->coded_width;
+ int coded_h = mCodecCtx->coded_height;
+ int w = mCodecCtx->width;
+ int h = mCodecCtx->height;
+ if(!mSwscaleCtx || fmt_updated)
+ {
+ mSwscaleCtx.reset(sws_getContext(
+ w, h, mCodecCtx->pix_fmt,
+ w, h, AV_PIX_FMT_YUV420P, 0,
+ nullptr, nullptr, nullptr
+ ));
+ }
+
+ /* point pict at the queue */
+ uint8_t *pict_data[3];
+ pict_data[0] = reinterpret_cast<uint8_t*>(pixels);
+ pict_data[1] = pict_data[0] + coded_w*coded_h;
+ pict_data[2] = pict_data[1] + coded_w*coded_h/4;
+
+ int pict_linesize[3];
+ pict_linesize[0] = pitch;
+ pict_linesize[1] = pitch / 2;
+ pict_linesize[2] = pitch / 2;
+
+ sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data,
+ frame->linesize, 0, h, pict_data, pict_linesize);
+ SDL_UnlockTexture(vp->mImage);
+ }
+ }
+
+ vp->mUpdated.store(true, std::memory_order_release);
+ std::unique_lock<std::mutex>(mPictQMutex).unlock();
+ mPictQCond.notify_one();
+}
+
+int VideoState::queuePicture(nanoseconds pts)
+{
+ /* Wait until we have space for a new pic */
+ std::unique_lock<std::mutex> lock(mPictQMutex);
+ while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed))
+ mPictQCond.wait(lock);
+ lock.unlock();
+
+ if(mMovie.mQuit.load(std::memory_order_relaxed))
+ return -1;
+
+ Picture *vp = &mPictQ[mPictQWrite];
+
+ /* We have to create/update the picture in the main thread */
+ vp->mUpdated.store(false, std::memory_order_relaxed);
+ SDL_Event evt{};
+ evt.user.type = FF_UPDATE_EVENT;
+ evt.user.data1 = this;
+ SDL_PushEvent(&evt);
+
+ /* Wait until the picture is updated. */
+ lock.lock();
+ while(!vp->mUpdated.load(std::memory_order_relaxed))
+ {
+ if(mMovie.mQuit.load(std::memory_order_relaxed))
+ return -1;
+ mPictQCond.wait(lock);
+ }
+ if(mMovie.mQuit.load(std::memory_order_relaxed))
+ return -1;
+ vp->mPts = pts;
+
+ mPictQWrite = (mPictQWrite+1)%mPictQ.size();
+ mPictQSize++;
+ lock.unlock();
+
+ return 0;
+}
+
+int VideoState::handler()
+{
+ mDecodedFrame.reset(av_frame_alloc());
+ while(!mMovie.mQuit.load(std::memory_order_relaxed))
+ {
+ std::unique_lock<std::mutex> lock(mQueueMtx);
+ /* Decode video frame */
+ int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
+ if(ret == AVERROR(EAGAIN))
+ {
+ mMovie.mSendDataGood.clear(std::memory_order_relaxed);
+ std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock();
+ mMovie.mSendCond.notify_one();
+ do {
+ mQueueCond.wait(lock);
+ ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get());
+ } while(ret == AVERROR(EAGAIN));
+ }
+ lock.unlock();
+ if(ret == AVERROR_EOF) break;
+ mMovie.mSendDataGood.clear(std::memory_order_relaxed);
+ mMovie.mSendCond.notify_one();
+ if(ret < 0)
+ {
+ std::cerr<< "Failed to decode frame: "<<ret <<std::endl;
+ continue;
+ }
+
+ /* Get the PTS for this frame. */
+ nanoseconds pts;
+ if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
+ mClock = std::chrono::duration_cast<nanoseconds>(
+ seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp)
+ );
+ pts = mClock;
+
+ /* Update the video clock to the next expected PTS. */
+ auto frame_delay = av_q2d(mCodecCtx->time_base);
+ frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5);
+ mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay));
+
+ if(queuePicture(pts) < 0)
+ break;
+ av_frame_unref(mDecodedFrame.get());
+ }
+ mEOS = true;
+
+ std::unique_lock<std::mutex> lock(mPictQMutex);
+ if(mMovie.mQuit.load(std::memory_order_relaxed))
+ {
+ mPictQRead = 0;
+ mPictQWrite = 0;
+ mPictQSize = 0;
+ }
+ while(!mFinalUpdate)
+ mPictQCond.wait(lock);
+
+ return 0;
+}
+
+
+int MovieState::decode_interrupt_cb(void *ctx)
+{
+ return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed);
+}
+
+bool MovieState::prepare()
+{
+ AVIOContext *avioctx = nullptr;
+ AVIOInterruptCB intcb = { decode_interrupt_cb, this };
+ if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr))
+ {
+ std::cerr<< "Failed to open "<<mFilename <<std::endl;
+ return false;
+ }
+ mIOContext.reset(avioctx);
+
+ /* Open movie file. If avformat_open_input fails it will automatically free
+ * this context, so don't set it onto a smart pointer yet.
+ */
+ AVFormatContext *fmtctx = avformat_alloc_context();
+ fmtctx->pb = mIOContext.get();
+ fmtctx->interrupt_callback = intcb;
+ if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0)
+ {
+ std::cerr<< "Failed to open "<<mFilename <<std::endl;
+ return false;
+ }
+ mFormatCtx.reset(fmtctx);
+
+ /* Retrieve stream information */
+ if(avformat_find_stream_info(mFormatCtx.get(), nullptr) < 0)
+ {
+ std::cerr<< mFilename<<": failed to find stream info" <<std::endl;
+ return false;
+ }
+
+ mVideo.schedRefresh(milliseconds(40));
+
+ mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this);
+ return true;
+}
+
+void MovieState::setTitle(SDL_Window *window)
+{
+ auto pos1 = mFilename.rfind('/');
+ auto pos2 = mFilename.rfind('\\');
+ auto fpos = ((pos1 == std::string::npos) ? pos2 :
+ (pos2 == std::string::npos) ? pos1 :
+ std::max(pos1, pos2)) + 1;
+ SDL_SetWindowTitle(window, (mFilename.substr(fpos)+" - "+AppName).c_str());
+}
+
+nanoseconds MovieState::getClock()
+{
+ if(!mPlaying.load(std::memory_order_relaxed))
+ return nanoseconds::zero();
+ return get_avtime() - mClockBase;
+}
+
+nanoseconds MovieState::getMasterClock()
+{
+ if(mAVSyncType == SyncMaster::Video)
+ return mVideo.getClock();
+ if(mAVSyncType == SyncMaster::Audio)
+ return mAudio.getClock();
+ return getClock();
+}
+
+nanoseconds MovieState::getDuration()
+{ return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); }
+
+int MovieState::streamComponentOpen(int stream_index)
+{
+ if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams)
+ return -1;
+
+ /* Get a pointer to the codec context for the stream, and open the
+ * associated codec.
+ */
+ AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr));
+ if(!avctx) return -1;
+
+ if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar))
+ return -1;
+
+ AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
+ if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0)
+ {
+ std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id)
+ << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl;
+ return -1;
+ }
+
+ /* Initialize and start the media type handler */
+ switch(avctx->codec_type)
+ {
+ case AVMEDIA_TYPE_AUDIO:
+ mAudio.mStream = mFormatCtx->streams[stream_index];
+ mAudio.mCodecCtx = std::move(avctx);
+
+ mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio);
+ break;
+
+ case AVMEDIA_TYPE_VIDEO:
+ mVideo.mStream = mFormatCtx->streams[stream_index];
+ mVideo.mCodecCtx = std::move(avctx);
+
+ mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo);
+ break;
+
+ default:
+ return -1;
+ }
+
+ return stream_index;
+}
+
+int MovieState::parse_handler()
+{
+ int video_index = -1;
+ int audio_index = -1;
+
+ /* Dump information about file onto standard error */
+ av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0);
+
+ /* Find the first video and audio streams */
+ for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++)
+ {
+ auto codecpar = mFormatCtx->streams[i]->codecpar;
+ if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0)
+ video_index = streamComponentOpen(i);
+ else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0)
+ audio_index = streamComponentOpen(i);
+ }
+
+ if(video_index < 0 && audio_index < 0)
+ {
+ std::cerr<< mFilename<<": could not open codecs" <<std::endl;
+ mQuit = true;
+ }
+
+ PacketQueue audio_queue, video_queue;
+ bool input_finished = false;
+
+ /* Main packet reading/dispatching loop */
+ while(!mQuit.load(std::memory_order_relaxed) && !input_finished)
+ {
+ AVPacket packet;
+ if(av_read_frame(mFormatCtx.get(), &packet) < 0)
+ input_finished = true;
+ else
+ {
+ /* Copy the packet into the queue it's meant for. */
+ if(packet.stream_index == video_index)
+ video_queue.put(&packet);
+ else if(packet.stream_index == audio_index)
+ audio_queue.put(&packet);
+ av_packet_unref(&packet);
+ }
+
+ do {
+ /* Send whatever queued packets we have. */
+ if(!audio_queue.empty())
+ {
+ std::unique_lock<std::mutex> lock(mAudio.mQueueMtx);
+ int ret;
+ do {
+ ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front());
+ if(ret != AVERROR(EAGAIN)) audio_queue.pop();
+ } while(ret != AVERROR(EAGAIN) && !audio_queue.empty());
+ lock.unlock();
+ mAudio.mQueueCond.notify_one();
+ }
+ if(!video_queue.empty())
+ {
+ std::unique_lock<std::mutex> lock(mVideo.mQueueMtx);
+ int ret;
+ do {
+ ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front());
+ if(ret != AVERROR(EAGAIN)) video_queue.pop();
+ } while(ret != AVERROR(EAGAIN) && !video_queue.empty());
+ lock.unlock();
+ mVideo.mQueueCond.notify_one();
+ }
+ /* If the queues are completely empty, or it's not full and there's
+ * more input to read, go get more.
+ */
+ size_t queue_size = audio_queue.totalSize() + video_queue.totalSize();
+ if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished))
+ break;
+
+ if(!mPlaying.load(std::memory_order_relaxed))
+ {
+ if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) &&
+ (!mVideo.mCodecCtx || mVideo.isBufferFilled()))
+ {
+ /* Set the base time 50ms ahead of the current av time. */
+ mClockBase = get_avtime() + milliseconds(50);
+ mVideo.mCurrentPtsTime = mClockBase;
+ mVideo.mFrameTimer = mVideo.mCurrentPtsTime;
+ mAudio.startPlayback();
+ mPlaying.store(std::memory_order_release);
+ }
+ }
+ /* Nothing to send or get for now, wait a bit and try again. */
+ { std::unique_lock<std::mutex> lock(mSendMtx);
+ if(mSendDataGood.test_and_set(std::memory_order_relaxed))
+ mSendCond.wait_for(lock, milliseconds(10));
+ }
+ } while(!mQuit.load(std::memory_order_relaxed));
+ }
+ /* Pass a null packet to finish the send buffers (the receive functions
+ * will get AVERROR_EOF when emptied).
+ */
+ if(mVideo.mCodecCtx)
+ {
+ { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx);
+ avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr);
+ }
+ mVideo.mQueueCond.notify_one();
+ }
+ if(mAudio.mCodecCtx)
+ {
+ { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx);
+ avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr);
+ }
+ mAudio.mQueueCond.notify_one();
+ }
+ video_queue.clear();
+ audio_queue.clear();
+
+ /* all done - wait for it */
+ if(mVideoThread.joinable())
+ mVideoThread.join();
+ if(mAudioThread.joinable())
+ mAudioThread.join();
+
+ mVideo.mEOS = true;
+ std::unique_lock<std::mutex> lock(mVideo.mPictQMutex);
+ while(!mVideo.mFinalUpdate)
+ mVideo.mPictQCond.wait(lock);
+ lock.unlock();
+
+ SDL_Event evt{};
+ evt.user.type = FF_MOVIE_DONE_EVENT;
+ SDL_PushEvent(&evt);
+
+ return 0;
+}
+
+
+// Helper class+method to print the time with human-readable formatting.
+struct PrettyTime {
+ seconds mTime;
+};
+inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
+{
+ using hours = std::chrono::hours;
+ using minutes = std::chrono::minutes;
+ using std::chrono::duration_cast;
+
+ seconds t = rhs.mTime;
+ if(t.count() < 0)
+ {
+ os << '-';
+ t *= -1;
+ }
+
+ // Only handle up to hour formatting
+ if(t >= hours(1))
+ os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2)
+ << (duration_cast<minutes>(t).count() % 60) << 'm';
+ else
+ os << duration_cast<minutes>(t).count() << 'm' << std::setfill('0');
+ os << std::setw(2) << (duration_cast<seconds>(t).count() % 60) << 's' << std::setw(0)
+ << std::setfill(' ');
+ return os;
+}
+
+} // namespace
+
+
+int main(int argc, char *argv[])
+{
+ std::unique_ptr<MovieState> movState;
+
+ if(argc < 2)
+ {
+ std::cerr<< "Usage: "<<argv[0]<<" [-device <device name>] [-direct] <files...>" <<std::endl;
+ return 1;
+ }
+ /* Register all formats and codecs */
+ av_register_all();
+ /* Initialize networking protocols */
+ avformat_network_init();
+
+ if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER))
+ {
+ std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl;
+ return 1;
+ }
+
+ /* Make a window to put our video */
+ SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE);
+ if(!screen)
+ {
+ std::cerr<< "SDL: could not set video mode - exiting" <<std::endl;
+ return 1;
+ }
+ /* Make a renderer to handle the texture image surface and rendering. */
+ Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC;
+ SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags);
+ if(renderer)
+ {
+ SDL_RendererInfo rinf{};
+ bool ok = false;
+
+ /* Make sure the renderer supports IYUV textures. If not, fallback to a
+ * software renderer. */
+ if(SDL_GetRendererInfo(renderer, &rinf) == 0)
+ {
+ for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++)
+ ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV);
+ }
+ if(!ok)
+ {
+ std::cerr<< "IYUV pixelformat textures not supported on renderer "<<rinf.name <<std::endl;
+ SDL_DestroyRenderer(renderer);
+ renderer = nullptr;
+ }
+ }
+ if(!renderer)
+ {
+ render_flags = SDL_RENDERER_SOFTWARE | SDL_RENDERER_PRESENTVSYNC;
+ renderer = SDL_CreateRenderer(screen, -1, render_flags);
+ }
+ if(!renderer)
+ {
+ std::cerr<< "SDL: could not create renderer - exiting" <<std::endl;
+ return 1;
+ }
+ SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
+ SDL_RenderFillRect(renderer, nullptr);
+ SDL_RenderPresent(renderer);
+
+ /* Open an audio device */
+ ++argv; --argc;
+ if(InitAL(&argv, &argc))
+ {
+ std::cerr<< "Failed to set up audio device" <<std::endl;
+ return 1;
+ }
+
+ { auto device = alcGetContextsDevice(alcGetCurrentContext());
+ if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock"))
+ {
+ std::cout<< "Found ALC_SOFT_device_clock" <<std::endl;
+ alcGetInteger64vSOFT = reinterpret_cast<LPALCGETINTEGER64VSOFT>(
+ alcGetProcAddress(device, "alcGetInteger64vSOFT")
+ );
+ }
+ }
+
+ if(alIsExtensionPresent("AL_SOFT_source_latency"))
+ {
+ std::cout<< "Found AL_SOFT_source_latency" <<std::endl;
+ alGetSourcei64vSOFT = reinterpret_cast<LPALGETSOURCEI64VSOFT>(
+ alGetProcAddress("alGetSourcei64vSOFT")
+ );
+ }
+#ifdef AL_SOFT_map_buffer
+ if(alIsExtensionPresent("AL_SOFTX_map_buffer"))
+ {
+ std::cout<< "Found AL_SOFT_map_buffer" <<std::endl;
+ alBufferStorageSOFT = reinterpret_cast<LPALBUFFERSTORAGESOFT>(
+ alGetProcAddress("alBufferStorageSOFT"));
+ alMapBufferSOFT = reinterpret_cast<LPALMAPBUFFERSOFT>(
+ alGetProcAddress("alMapBufferSOFT"));
+ alUnmapBufferSOFT = reinterpret_cast<LPALUNMAPBUFFERSOFT>(
+ alGetProcAddress("alUnmapBufferSOFT"));
+ }
+#endif
+#ifdef AL_SOFT_events
+ if(alIsExtensionPresent("AL_SOFTX_events"))
+ {
+ std::cout<< "Found AL_SOFT_events" <<std::endl;
+ alEventControlSOFT = reinterpret_cast<LPALEVENTCONTROLSOFT>(
+ alGetProcAddress("alEventControlSOFT"));
+ alEventCallbackSOFT = reinterpret_cast<LPALEVENTCALLBACKSOFT>(
+ alGetProcAddress("alEventCallbackSOFT"));
+ }
+#endif
+
+ int fileidx = 0;
+ for(;fileidx < argc;++fileidx)
+ {
+ if(strcmp(argv[fileidx], "-direct") == 0)
+ {
+ if(!alIsExtensionPresent("AL_SOFT_direct_channels"))
+ std::cerr<< "AL_SOFT_direct_channels not supported for direct output" <<std::endl;
+ else
+ {
+ std::cout<< "Found AL_SOFT_direct_channels" <<std::endl;
+ EnableDirectOut = true;
+ }
+ }
+ else if(strcmp(argv[fileidx], "-wide") == 0)
+ {
+ if(!alIsExtensionPresent("AL_EXT_STEREO_ANGLES"))
+ std::cerr<< "AL_EXT_STEREO_ANGLES not supported for wide stereo" <<std::endl;
+ else
+ {
+ std::cout<< "Found AL_EXT_STEREO_ANGLES" <<std::endl;
+ EnableWideStereo = true;
+ }
+ }
+ else
+ break;
+ }
+
+ while(fileidx < argc && !movState)
+ {
+ movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
+ if(!movState->prepare()) movState = nullptr;
+ }
+ if(!movState)
+ {
+ std::cerr<< "Could not start a video" <<std::endl;
+ return 1;
+ }
+ movState->setTitle(screen);
+
+ /* Default to going to the next movie at the end of one. */
+ enum class EomAction {
+ Next, Quit
+ } eom_action = EomAction::Next;
+ seconds last_time(-1);
+ SDL_Event event;
+ while(1)
+ {
+ int have_evt = SDL_WaitEventTimeout(&event, 10);
+
+ auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock());
+ if(cur_time != last_time)
+ {
+ auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration());
+ std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush;
+ last_time = cur_time;
+ }
+ if(!have_evt) continue;
+
+ switch(event.type)
+ {
+ case SDL_KEYDOWN:
+ switch(event.key.keysym.sym)
+ {
+ case SDLK_ESCAPE:
+ movState->mQuit = true;
+ eom_action = EomAction::Quit;
+ break;
+
+ case SDLK_n:
+ movState->mQuit = true;
+ eom_action = EomAction::Next;
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case SDL_WINDOWEVENT:
+ switch(event.window.event)
+ {
+ case SDL_WINDOWEVENT_RESIZED:
+ SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
+ SDL_RenderFillRect(renderer, nullptr);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case SDL_QUIT:
+ movState->mQuit = true;
+ eom_action = EomAction::Quit;
+ break;
+
+ case FF_UPDATE_EVENT:
+ reinterpret_cast<VideoState*>(event.user.data1)->updatePicture(
+ screen, renderer
+ );
+ break;
+
+ case FF_REFRESH_EVENT:
+ reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer(
+ screen, renderer
+ );
+ break;
+
+ case FF_MOVIE_DONE_EVENT:
+ std::cout<<'\n';
+ last_time = seconds(-1);
+ if(eom_action != EomAction::Quit)
+ {
+ movState = nullptr;
+ while(fileidx < argc && !movState)
+ {
+ movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++]));
+ if(!movState->prepare()) movState = nullptr;
+ }
+ if(movState)
+ {
+ movState->setTitle(screen);
+ break;
+ }
+ }
+
+ /* Nothing more to play. Shut everything down and quit. */
+ movState = nullptr;
+
+ CloseAL();
+
+ SDL_DestroyRenderer(renderer);
+ renderer = nullptr;
+ SDL_DestroyWindow(screen);
+ screen = nullptr;
+
+ SDL_Quit();
+ exit(0);
+
+ default:
+ break;
+ }
+ }
+
+ std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl;
+ return 1;
+}
diff --git a/examples/alhrtf.c b/examples/alhrtf.c
index 6dac5308..f9150ae1 100644
--- a/examples/alhrtf.c
+++ b/examples/alhrtf.c
@@ -28,12 +28,13 @@
#include <assert.h>
#include <math.h>
+#include <SDL_sound.h>
+
#include "AL/al.h"
#include "AL/alc.h"
#include "AL/alext.h"
#include "common/alhelpers.h"
-#include "common/sdl_sound.h"
#ifndef M_PI
@@ -44,47 +45,63 @@ static LPALCGETSTRINGISOFT alcGetStringiSOFT;
static LPALCRESETDEVICESOFT alcResetDeviceSOFT;
/* LoadBuffer loads the named audio file into an OpenAL buffer object, and
- * returns the new buffer ID. */
+ * returns the new buffer ID.
+ */
static ALuint LoadSound(const char *filename)
{
- ALenum err, format, type, channels;
- ALuint rate, buffer;
- size_t datalen;
- void *data;
- FilePtr sound;
+ Sound_Sample *sample;
+ ALenum err, format;
+ ALuint buffer;
+ Uint32 slen;
/* Open the audio file */
- sound = openAudioFile(filename, 1000);
- if(!sound)
+ sample = Sound_NewSampleFromFile(filename, NULL, 65536);
+ if(!sample)
{
fprintf(stderr, "Could not open audio in %s\n", filename);
- closeAudioFile(sound);
return 0;
}
/* Get the sound format, and figure out the OpenAL format */
- if(getAudioInfo(sound, &rate, &channels, &type) != 0)
+ if(sample->actual.channels == 1)
{
- fprintf(stderr, "Error getting audio info for %s\n", filename);
- closeAudioFile(sound);
- return 0;
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_MONO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_MONO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
}
-
- format = GetFormat(channels, type, NULL);
- if(format == AL_NONE)
+ else if(sample->actual.channels == 2)
{
- fprintf(stderr, "Unsupported format (%s, %s) for %s\n",
- ChannelsName(channels), TypeName(type), filename);
- closeAudioFile(sound);
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_STEREO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_STEREO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+ }
+ else
+ {
+ fprintf(stderr, "Unsupported channel count: %d\n", sample->actual.channels);
+ Sound_FreeSample(sample);
return 0;
}
/* Decode the whole audio stream to a buffer. */
- data = decodeAudioStream(sound, &datalen);
- if(!data)
+ slen = Sound_DecodeAll(sample);
+ if(!sample->buffer || slen == 0)
{
fprintf(stderr, "Failed to read audio from %s\n", filename);
- closeAudioFile(sound);
+ Sound_FreeSample(sample);
return 0;
}
@@ -92,9 +109,8 @@ static ALuint LoadSound(const char *filename)
* close the file. */
buffer = 0;
alGenBuffers(1, &buffer);
- alBufferData(buffer, format, data, datalen, rate);
- free(data);
- closeAudioFile(sound);
+ alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate);
+ Sound_FreeSample(sample);
/* Check if an error occured, and clean up if so. */
err = alGetError();
@@ -113,6 +129,7 @@ static ALuint LoadSound(const char *filename)
int main(int argc, char **argv)
{
ALCdevice *device;
+ ALboolean has_angle_ext;
ALuint source, buffer;
const char *soundname;
const char *hrtfname;
@@ -121,28 +138,18 @@ int main(int argc, char **argv)
ALdouble angle;
ALenum state;
- /* Print out usage if no file was specified */
- if(argc < 2 || (strcmp(argv[1], "-hrtf") == 0 && argc < 4))
+ /* Print out usage if no arguments were specified */
+ if(argc < 2)
{
- fprintf(stderr, "Usage: %s [-hrtf <name>] <soundfile>\n", argv[0]);
+ fprintf(stderr, "Usage: %s [-device <name>] [-hrtf <name>] <soundfile>\n", argv[0]);
return 1;
}
- /* Initialize OpenAL with the default device, and check for HRTF support. */
- if(InitAL() != 0)
+ /* Initialize OpenAL, and check for HRTF support. */
+ argv++; argc--;
+ if(InitAL(&argv, &argc) != 0)
return 1;
- if(strcmp(argv[1], "-hrtf") == 0)
- {
- hrtfname = argv[2];
- soundname = argv[3];
- }
- else
- {
- hrtfname = NULL;
- soundname = argv[1];
- }
-
device = alcGetContextsDevice(alcGetCurrentContext());
if(!alcIsExtensionPresent(device, "ALC_SOFT_HRTF"))
{
@@ -157,6 +164,24 @@ int main(int argc, char **argv)
LOAD_PROC(device, alcResetDeviceSOFT);
#undef LOAD_PROC
+ /* Check for the AL_EXT_STEREO_ANGLES extension to be able to also rotate
+ * stereo sources.
+ */
+ has_angle_ext = alIsExtensionPresent("AL_EXT_STEREO_ANGLES");
+ printf("AL_EXT_STEREO_ANGLES%s found\n", has_angle_ext?"":" not");
+
+ /* Check for user-preferred HRTF */
+ if(strcmp(argv[0], "-hrtf") == 0)
+ {
+ hrtfname = argv[1];
+ soundname = argv[2];
+ }
+ else
+ {
+ hrtfname = NULL;
+ soundname = argv[0];
+ }
+
/* Enumerate available HRTFs, and reset the device using one. */
alcGetIntegerv(device, ALC_NUM_HRTF_SPECIFIERS_SOFT, 1, &num_hrtf);
if(!num_hrtf)
@@ -178,19 +203,22 @@ int main(int argc, char **argv)
index = i;
}
+ i = 0;
+ attr[i++] = ALC_HRTF_SOFT;
+ attr[i++] = ALC_TRUE;
if(index == -1)
{
if(hrtfname)
printf("HRTF \"%s\" not found\n", hrtfname);
- index = 0;
+ printf("Using default HRTF...\n");
}
- printf("Selecting HRTF %d...\n", index);
-
- attr[0] = ALC_HRTF_SOFT;
- attr[1] = ALC_TRUE;
- attr[2] = ALC_HRTF_ID_SOFT;
- attr[3] = index;
- attr[4] = 0;
+ else
+ {
+ printf("Selecting HRTF %d...\n", index);
+ attr[i++] = ALC_HRTF_ID_SOFT;
+ attr[i++] = index;
+ }
+ attr[i] = 0;
if(!alcResetDeviceSOFT(device, attr))
printf("Failed to reset device: %s\n", alcGetString(device, alcGetError(device)));
@@ -207,10 +235,14 @@ int main(int argc, char **argv)
}
fflush(stdout);
+ /* Initialize SDL_sound. */
+ Sound_Init();
+
/* Load the sound into a buffer. */
buffer = LoadSound(soundname);
if(!buffer)
{
+ Sound_Quit();
CloseAL();
return 1;
}
@@ -227,21 +259,35 @@ int main(int argc, char **argv)
angle = 0.0;
alSourcePlay(source);
do {
- Sleep(10);
+ al_nssleep(10000000);
- /* Rotate the source around the listener by about 1/4 cycle per second.
- * Only affects mono sounds.
+ /* Rotate the source around the listener by about 1/4 cycle per second,
+ * and keep it within -pi...+pi.
*/
angle += 0.01 * M_PI * 0.5;
+ if(angle > M_PI)
+ angle -= M_PI*2.0;
+
+ /* This only rotates mono sounds. */
alSource3f(source, AL_POSITION, (ALfloat)sin(angle), 0.0f, -(ALfloat)cos(angle));
+ if(has_angle_ext)
+ {
+ /* This rotates stereo sounds with the AL_EXT_STEREO_ANGLES
+ * extension. Angles are specified counter-clockwise in radians.
+ */
+ ALfloat angles[2] = { (ALfloat)(M_PI/6.0 - angle), (ALfloat)(-M_PI/6.0 - angle) };
+ alSourcefv(source, AL_STEREO_ANGLES, angles);
+ }
+
alGetSourcei(source, AL_SOURCE_STATE, &state);
} while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);
- /* All done. Delete resources, and close OpenAL. */
+ /* All done. Delete resources, and close down SDL_sound and OpenAL. */
alDeleteSources(1, &source);
alDeleteBuffers(1, &buffer);
+ Sound_Quit();
CloseAL();
return 0;
diff --git a/examples/allatency.c b/examples/allatency.c
index deb13d3b..d561373f 100644
--- a/examples/allatency.c
+++ b/examples/allatency.c
@@ -27,16 +27,14 @@
#include <stdio.h>
#include <assert.h>
+#include <SDL_sound.h>
+
#include "AL/al.h"
#include "AL/alc.h"
#include "AL/alext.h"
#include "common/alhelpers.h"
-#include "common/sdl_sound.h"
-
-static LPALBUFFERSAMPLESSOFT alBufferSamplesSOFT = wrap_BufferSamples;
-static LPALISBUFFERFORMATSUPPORTEDSOFT alIsBufferFormatSupportedSOFT;
static LPALSOURCEDSOFT alSourcedSOFT;
static LPALSOURCE3DSOFT alSource3dSOFT;
@@ -52,47 +50,63 @@ static LPALGETSOURCE3I64SOFT alGetSource3i64SOFT;
static LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT;
/* LoadBuffer loads the named audio file into an OpenAL buffer object, and
- * returns the new buffer ID. */
+ * returns the new buffer ID.
+ */
static ALuint LoadSound(const char *filename)
{
- ALenum err, format, type, channels;
- ALuint rate, buffer;
- size_t datalen;
- void *data;
- FilePtr sound;
+ Sound_Sample *sample;
+ ALenum err, format;
+ ALuint buffer;
+ Uint32 slen;
/* Open the audio file */
- sound = openAudioFile(filename, 1000);
- if(!sound)
+ sample = Sound_NewSampleFromFile(filename, NULL, 65536);
+ if(!sample)
{
fprintf(stderr, "Could not open audio in %s\n", filename);
- closeAudioFile(sound);
return 0;
}
/* Get the sound format, and figure out the OpenAL format */
- if(getAudioInfo(sound, &rate, &channels, &type) != 0)
+ if(sample->actual.channels == 1)
{
- fprintf(stderr, "Error getting audio info for %s\n", filename);
- closeAudioFile(sound);
- return 0;
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_MONO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_MONO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
}
-
- format = GetFormat(channels, type, alIsBufferFormatSupportedSOFT);
- if(format == AL_NONE)
+ else if(sample->actual.channels == 2)
{
- fprintf(stderr, "Unsupported format (%s, %s) for %s\n",
- ChannelsName(channels), TypeName(type), filename);
- closeAudioFile(sound);
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_STEREO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_STEREO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+ }
+ else
+ {
+ fprintf(stderr, "Unsupported channel count: %d\n", sample->actual.channels);
+ Sound_FreeSample(sample);
return 0;
}
/* Decode the whole audio stream to a buffer. */
- data = decodeAudioStream(sound, &datalen);
- if(!data)
+ slen = Sound_DecodeAll(sample);
+ if(!sample->buffer || slen == 0)
{
fprintf(stderr, "Failed to read audio from %s\n", filename);
- closeAudioFile(sound);
+ Sound_FreeSample(sample);
return 0;
}
@@ -100,17 +114,15 @@ static ALuint LoadSound(const char *filename)
* close the file. */
buffer = 0;
alGenBuffers(1, &buffer);
- alBufferSamplesSOFT(buffer, rate, format, BytesToFrames(datalen, channels, type),
- channels, type, data);
- free(data);
- closeAudioFile(sound);
+ alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate);
+ Sound_FreeSample(sample);
/* Check if an error occured, and clean up if so. */
err = alGetError();
if(err != AL_NO_ERROR)
{
fprintf(stderr, "OpenAL Error: %s\n", alGetString(err));
- if(alIsBuffer(buffer))
+ if(buffer && alIsBuffer(buffer))
alDeleteBuffers(1, &buffer);
return 0;
}
@@ -125,15 +137,16 @@ int main(int argc, char **argv)
ALdouble offsets[2];
ALenum state;
- /* Print out usage if no file was specified */
+ /* Print out usage if no arguments were specified */
if(argc < 2)
{
- fprintf(stderr, "Usage: %s <filename>\n", argv[0]);
+ fprintf(stderr, "Usage: %s [-device <name>] <filename>\n", argv[0]);
return 1;
}
- /* Initialize OpenAL with the default device, and check for EFX support. */
- if(InitAL() != 0)
+ /* Initialize OpenAL, and check for source_latency support. */
+ argv++; argc--;
+ if(InitAL(&argv, &argc) != 0)
return 1;
if(!alIsExtensionPresent("AL_SOFT_source_latency"))
@@ -157,18 +170,16 @@ int main(int argc, char **argv)
LOAD_PROC(alGetSourcei64SOFT);
LOAD_PROC(alGetSource3i64SOFT);
LOAD_PROC(alGetSourcei64vSOFT);
-
- if(alIsExtensionPresent("AL_SOFT_buffer_samples"))
- {
- LOAD_PROC(alBufferSamplesSOFT);
- LOAD_PROC(alIsBufferFormatSupportedSOFT);
- }
#undef LOAD_PROC
+ /* Initialize SDL_sound. */
+ Sound_Init();
+
/* Load the sound into a buffer. */
- buffer = LoadSound(argv[1]);
+ buffer = LoadSound(argv[0]);
if(!buffer)
{
+ Sound_Quit();
CloseAL();
return 1;
}
@@ -182,7 +193,7 @@ int main(int argc, char **argv)
/* Play the sound until it finishes. */
alSourcePlay(source);
do {
- Sleep(10);
+ al_nssleep(10000000);
alGetSourcei(source, AL_SOURCE_STATE, &state);
/* Get the source offset and latency. AL_SEC_OFFSET_LATENCY_SOFT will
@@ -194,10 +205,11 @@ int main(int argc, char **argv)
} while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);
printf("\n");
- /* All done. Delete resources, and close OpenAL. */
+ /* All done. Delete resources, and close down SDL_sound and OpenAL. */
alDeleteSources(1, &source);
alDeleteBuffers(1, &buffer);
+ Sound_Quit();
CloseAL();
return 0;
diff --git a/examples/alloopback.c b/examples/alloopback.c
index 04c92818..16553f9b 100644
--- a/examples/alloopback.c
+++ b/examples/alloopback.c
@@ -38,6 +38,13 @@
#include "common/alhelpers.h"
+#ifndef SDL_AUDIO_MASK_BITSIZE
+#define SDL_AUDIO_MASK_BITSIZE (0xFF)
+#endif
+#ifndef SDL_AUDIO_BITSIZE
+#define SDL_AUDIO_BITSIZE(x) (x & SDL_AUDIO_MASK_BITSIZE)
+#endif
+
#ifndef M_PI
#define M_PI (3.14159265358979323846)
#endif
@@ -61,6 +68,35 @@ void SDLCALL RenderSDLSamples(void *userdata, Uint8 *stream, int len)
}
+static const char *ChannelsName(ALCenum chans)
+{
+ switch(chans)
+ {
+ case ALC_MONO_SOFT: return "Mono";
+ case ALC_STEREO_SOFT: return "Stereo";
+ case ALC_QUAD_SOFT: return "Quadraphonic";
+ case ALC_5POINT1_SOFT: return "5.1 Surround";
+ case ALC_6POINT1_SOFT: return "6.1 Surround";
+ case ALC_7POINT1_SOFT: return "7.1 Surround";
+ }
+ return "Unknown Channels";
+}
+
+static const char *TypeName(ALCenum type)
+{
+ switch(type)
+ {
+ case ALC_BYTE_SOFT: return "S8";
+ case ALC_UNSIGNED_BYTE_SOFT: return "U8";
+ case ALC_SHORT_SOFT: return "S16";
+ case ALC_UNSIGNED_SHORT_SOFT: return "U16";
+ case ALC_INT_SOFT: return "S32";
+ case ALC_UNSIGNED_INT_SOFT: return "U32";
+ case ALC_FLOAT_SOFT: return "Float32";
+ }
+ return "Unknown Type";
+}
+
/* Creates a one second buffer containing a sine wave, and returns the new
* buffer ID. */
static ALuint CreateSineWave(void)
@@ -169,7 +205,7 @@ int main(int argc, char *argv[])
attrs[6] = 0; /* end of list */
- playback.FrameSize = FramesToBytes(1, attrs[1], attrs[3]);
+ playback.FrameSize = obtained.channels * SDL_AUDIO_BITSIZE(obtained.format) / 8;
/* Initialize OpenAL loopback device, using our format attributes. */
playback.Device = alcLoopbackOpenDeviceSOFT(NULL);
@@ -216,7 +252,7 @@ int main(int argc, char *argv[])
/* Play the sound until it finishes. */
alSourcePlay(source);
do {
- Sleep(10);
+ al_nssleep(10000000);
alGetSourcei(source, AL_SOURCE_STATE, &state);
} while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);
diff --git a/examples/almultireverb.c b/examples/almultireverb.c
new file mode 100644
index 00000000..a2587585
--- /dev/null
+++ b/examples/almultireverb.c
@@ -0,0 +1,696 @@
+/*
+ * OpenAL Multi-Zone Reverb Example
+ *
+ * Copyright (c) 2018 by Chris Robinson <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/* This file contains an example for controlling multiple reverb zones to
+ * smoothly transition between reverb environments. The general concept is to
+ * extend single-reverb by also tracking the closest adjacent environment, and
+ * utilize EAX Reverb's panning vectors to position them relative to the
+ * listener.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <math.h>
+
+#include <SDL_sound.h>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/alext.h"
+#include "AL/efx-presets.h"
+
+#include "common/alhelpers.h"
+
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+
+/* Filter object functions */
+static LPALGENFILTERS alGenFilters;
+static LPALDELETEFILTERS alDeleteFilters;
+static LPALISFILTER alIsFilter;
+static LPALFILTERI alFilteri;
+static LPALFILTERIV alFilteriv;
+static LPALFILTERF alFilterf;
+static LPALFILTERFV alFilterfv;
+static LPALGETFILTERI alGetFilteri;
+static LPALGETFILTERIV alGetFilteriv;
+static LPALGETFILTERF alGetFilterf;
+static LPALGETFILTERFV alGetFilterfv;
+
+/* Effect object functions */
+static LPALGENEFFECTS alGenEffects;
+static LPALDELETEEFFECTS alDeleteEffects;
+static LPALISEFFECT alIsEffect;
+static LPALEFFECTI alEffecti;
+static LPALEFFECTIV alEffectiv;
+static LPALEFFECTF alEffectf;
+static LPALEFFECTFV alEffectfv;
+static LPALGETEFFECTI alGetEffecti;
+static LPALGETEFFECTIV alGetEffectiv;
+static LPALGETEFFECTF alGetEffectf;
+static LPALGETEFFECTFV alGetEffectfv;
+
+/* Auxiliary Effect Slot object functions */
+static LPALGENAUXILIARYEFFECTSLOTS alGenAuxiliaryEffectSlots;
+static LPALDELETEAUXILIARYEFFECTSLOTS alDeleteAuxiliaryEffectSlots;
+static LPALISAUXILIARYEFFECTSLOT alIsAuxiliaryEffectSlot;
+static LPALAUXILIARYEFFECTSLOTI alAuxiliaryEffectSloti;
+static LPALAUXILIARYEFFECTSLOTIV alAuxiliaryEffectSlotiv;
+static LPALAUXILIARYEFFECTSLOTF alAuxiliaryEffectSlotf;
+static LPALAUXILIARYEFFECTSLOTFV alAuxiliaryEffectSlotfv;
+static LPALGETAUXILIARYEFFECTSLOTI alGetAuxiliaryEffectSloti;
+static LPALGETAUXILIARYEFFECTSLOTIV alGetAuxiliaryEffectSlotiv;
+static LPALGETAUXILIARYEFFECTSLOTF alGetAuxiliaryEffectSlotf;
+static LPALGETAUXILIARYEFFECTSLOTFV alGetAuxiliaryEffectSlotfv;
+
+
+/* LoadEffect loads the given initial reverb properties into the given OpenAL
+ * effect object, and returns non-zero on success.
+ */
+static int LoadEffect(ALuint effect, const EFXEAXREVERBPROPERTIES *reverb)
+{
+ ALenum err;
+
+ alGetError();
+
+ /* Prepare the effect for EAX Reverb (standard reverb doesn't contain
+ * the needed panning vectors).
+ */
+ alEffecti(effect, AL_EFFECT_TYPE, AL_EFFECT_EAXREVERB);
+ if((err=alGetError()) != AL_NO_ERROR)
+ {
+ fprintf(stderr, "Failed to set EAX Reverb: %s (0x%04x)\n", alGetString(err), err);
+ return 0;
+ }
+
+ /* Load the reverb properties. */
+ alEffectf(effect, AL_EAXREVERB_DENSITY, reverb->flDensity);
+ alEffectf(effect, AL_EAXREVERB_DIFFUSION, reverb->flDiffusion);
+ alEffectf(effect, AL_EAXREVERB_GAIN, reverb->flGain);
+ alEffectf(effect, AL_EAXREVERB_GAINHF, reverb->flGainHF);
+ alEffectf(effect, AL_EAXREVERB_GAINLF, reverb->flGainLF);
+ alEffectf(effect, AL_EAXREVERB_DECAY_TIME, reverb->flDecayTime);
+ alEffectf(effect, AL_EAXREVERB_DECAY_HFRATIO, reverb->flDecayHFRatio);
+ alEffectf(effect, AL_EAXREVERB_DECAY_LFRATIO, reverb->flDecayLFRatio);
+ alEffectf(effect, AL_EAXREVERB_REFLECTIONS_GAIN, reverb->flReflectionsGain);
+ alEffectf(effect, AL_EAXREVERB_REFLECTIONS_DELAY, reverb->flReflectionsDelay);
+ alEffectfv(effect, AL_EAXREVERB_REFLECTIONS_PAN, reverb->flReflectionsPan);
+ alEffectf(effect, AL_EAXREVERB_LATE_REVERB_GAIN, reverb->flLateReverbGain);
+ alEffectf(effect, AL_EAXREVERB_LATE_REVERB_DELAY, reverb->flLateReverbDelay);
+ alEffectfv(effect, AL_EAXREVERB_LATE_REVERB_PAN, reverb->flLateReverbPan);
+ alEffectf(effect, AL_EAXREVERB_ECHO_TIME, reverb->flEchoTime);
+ alEffectf(effect, AL_EAXREVERB_ECHO_DEPTH, reverb->flEchoDepth);
+ alEffectf(effect, AL_EAXREVERB_MODULATION_TIME, reverb->flModulationTime);
+ alEffectf(effect, AL_EAXREVERB_MODULATION_DEPTH, reverb->flModulationDepth);
+ alEffectf(effect, AL_EAXREVERB_AIR_ABSORPTION_GAINHF, reverb->flAirAbsorptionGainHF);
+ alEffectf(effect, AL_EAXREVERB_HFREFERENCE, reverb->flHFReference);
+ alEffectf(effect, AL_EAXREVERB_LFREFERENCE, reverb->flLFReference);
+ alEffectf(effect, AL_EAXREVERB_ROOM_ROLLOFF_FACTOR, reverb->flRoomRolloffFactor);
+ alEffecti(effect, AL_EAXREVERB_DECAY_HFLIMIT, reverb->iDecayHFLimit);
+
+ /* Check if an error occured, and return failure if so. */
+ if((err=alGetError()) != AL_NO_ERROR)
+ {
+ fprintf(stderr, "Error setting up reverb: %s\n", alGetString(err));
+ return 0;
+ }
+
+ return 1;
+}
+
+
+/* LoadBuffer loads the named audio file into an OpenAL buffer object, and
+ * returns the new buffer ID.
+ */
+static ALuint LoadSound(const char *filename)
+{
+ Sound_Sample *sample;
+ ALenum err, format;
+ ALuint buffer;
+ Uint32 slen;
+
+ /* Open the audio file */
+ sample = Sound_NewSampleFromFile(filename, NULL, 65536);
+ if(!sample)
+ {
+ fprintf(stderr, "Could not open audio in %s\n", filename);
+ return 0;
+ }
+
+ /* Get the sound format, and figure out the OpenAL format */
+ if(sample->actual.channels == 1)
+ {
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_MONO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_MONO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+ }
+ else if(sample->actual.channels == 2)
+ {
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_STEREO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_STEREO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+ }
+ else
+ {
+ fprintf(stderr, "Unsupported channel count: %d\n", sample->actual.channels);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+
+ /* Decode the whole audio stream to a buffer. */
+ slen = Sound_DecodeAll(sample);
+ if(!sample->buffer || slen == 0)
+ {
+ fprintf(stderr, "Failed to read audio from %s\n", filename);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+
+ /* Buffer the audio data into a new buffer object, then free the data and
+ * close the file. */
+ buffer = 0;
+ alGenBuffers(1, &buffer);
+ alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate);
+ Sound_FreeSample(sample);
+
+ /* Check if an error occured, and clean up if so. */
+ err = alGetError();
+ if(err != AL_NO_ERROR)
+ {
+ fprintf(stderr, "OpenAL Error: %s\n", alGetString(err));
+ if(buffer && alIsBuffer(buffer))
+ alDeleteBuffers(1, &buffer);
+ return 0;
+ }
+
+ return buffer;
+}
+
+
+/* Helper to calculate the dot-product of the two given vectors. */
+static ALfloat dot_product(const ALfloat vec0[3], const ALfloat vec1[3])
+{
+ return vec0[0]*vec1[0] + vec0[1]*vec1[1] + vec0[2]*vec1[2];
+}
+
+/* Helper to normalize a given vector. */
+static void normalize(ALfloat vec[3])
+{
+ ALfloat mag = sqrtf(dot_product(vec, vec));
+ if(mag > 0.00001f)
+ {
+ vec[0] /= mag;
+ vec[1] /= mag;
+ vec[2] /= mag;
+ }
+ else
+ {
+ vec[0] = 0.0f;
+ vec[1] = 0.0f;
+ vec[2] = 0.0f;
+ }
+}
+
+
+/* The main update function to update the listener and environment effects. */
+static void UpdateListenerAndEffects(float timediff, const ALuint slots[2], const ALuint effects[2], const EFXEAXREVERBPROPERTIES reverbs[2])
+{
+ static const ALfloat listener_move_scale = 10.0f;
+ /* Individual reverb zones are connected via "portals". Each portal has a
+ * position (center point of the connecting area), a normal (facing
+ * direction), and a radius (approximate size of the connecting area).
+ */
+ const ALfloat portal_pos[3] = { 0.0f, 0.0f, 0.0f };
+ const ALfloat portal_norm[3] = { sqrtf(0.5f), 0.0f, -sqrtf(0.5f) };
+ const ALfloat portal_radius = 2.5f;
+ ALfloat other_dir[3], this_dir[3];
+ ALfloat listener_pos[3];
+ ALfloat local_norm[3];
+ ALfloat local_dir[3];
+ ALfloat near_edge[3];
+ ALfloat far_edge[3];
+ ALfloat dist, edist;
+
+ /* Update the listener position for the amount of time passed. This uses a
+ * simple triangular LFO to offset the position (moves along the X axis
+ * between -listener_move_scale and +listener_move_scale for each
+ * transition).
+ */
+ listener_pos[0] = (fabsf(2.0f - timediff/2.0f) - 1.0f) * listener_move_scale;
+ listener_pos[1] = 0.0f;
+ listener_pos[2] = 0.0f;
+ alListenerfv(AL_POSITION, listener_pos);
+
+ /* Calculate local_dir, which represents the listener-relative point to the
+ * adjacent zone (should also include orientation). Because EAX Reverb uses
+ * left-handed coordinates instead of right-handed like the rest of OpenAL,
+ * negate Z for the local values.
+ */
+ local_dir[0] = portal_pos[0] - listener_pos[0];
+ local_dir[1] = portal_pos[1] - listener_pos[1];
+ local_dir[2] = -(portal_pos[2] - listener_pos[2]);
+ /* A normal application would also rotate the portal's normal given the
+ * listener orientation, to get the listener-relative normal.
+ */
+ local_norm[0] = portal_norm[0];
+ local_norm[1] = portal_norm[1];
+ local_norm[2] = -portal_norm[2];
+
+ /* Calculate the distance from the listener to the portal, and ensure it's
+ * far enough away to not suffer severe floating-point precision issues.
+ */
+ dist = sqrtf(dot_product(local_dir, local_dir));
+ if(dist > 0.00001f)
+ {
+ const EFXEAXREVERBPROPERTIES *other_reverb, *this_reverb;
+ ALuint other_effect, this_effect;
+ ALfloat magnitude, dir_dot_norm;
+
+ /* Normalize the direction to the portal. */
+ local_dir[0] /= dist;
+ local_dir[1] /= dist;
+ local_dir[2] /= dist;
+
+ /* Calculate the dot product of the portal's local direction and local
+ * normal, which is used for angular and side checks later on.
+ */
+ dir_dot_norm = dot_product(local_dir, local_norm);
+
+ /* Figure out which zone we're in. */
+ if(dir_dot_norm <= 0.0f)
+ {
+ /* We're in front of the portal, so we're in Zone 0. */
+ this_effect = effects[0];
+ other_effect = effects[1];
+ this_reverb = &reverbs[0];
+ other_reverb = &reverbs[1];
+ }
+ else
+ {
+ /* We're behind the portal, so we're in Zone 1. */
+ this_effect = effects[1];
+ other_effect = effects[0];
+ this_reverb = &reverbs[1];
+ other_reverb = &reverbs[0];
+ }
+
+ /* Calculate the listener-relative extents of the portal. */
+ /* First, project the listener-to-portal vector onto the portal's plane
+ * to get the portal-relative direction along the plane that goes away
+ * from the listener (toward the farthest edge of the portal).
+ */
+ far_edge[0] = local_dir[0] - local_norm[0]*dir_dot_norm;
+ far_edge[1] = local_dir[1] - local_norm[1]*dir_dot_norm;
+ far_edge[2] = local_dir[2] - local_norm[2]*dir_dot_norm;
+
+ edist = sqrtf(dot_product(far_edge, far_edge));
+ if(edist > 0.0001f)
+ {
+ /* Rescale the portal-relative vector to be at the radius edge. */
+ ALfloat mag = portal_radius / edist;
+ far_edge[0] *= mag;
+ far_edge[1] *= mag;
+ far_edge[2] *= mag;
+
+ /* Calculate the closest edge of the portal by negating the
+ * farthest, and add an offset to make them both relative to the
+ * listener.
+ */
+ near_edge[0] = local_dir[0]*dist - far_edge[0];
+ near_edge[1] = local_dir[1]*dist - far_edge[1];
+ near_edge[2] = local_dir[2]*dist - far_edge[2];
+ far_edge[0] += local_dir[0]*dist;
+ far_edge[1] += local_dir[1]*dist;
+ far_edge[2] += local_dir[2]*dist;
+
+ /* Normalize the listener-relative extents of the portal, then
+ * calculate the panning magnitude for the other zone given the
+ * apparent size of the opening. The panning magnitude affects the
+ * envelopment of the environment, with 1 being a point, 0.5 being
+ * half coverage around the listener, and 0 being full coverage.
+ */
+ normalize(far_edge);
+ normalize(near_edge);
+ magnitude = 1.0f - acosf(dot_product(far_edge, near_edge))/(float)(M_PI*2.0);
+
+ /* Recalculate the panning direction, to be directly between the
+ * direction of the two extents.
+ */
+ local_dir[0] = far_edge[0] + near_edge[0];
+ local_dir[1] = far_edge[1] + near_edge[1];
+ local_dir[2] = far_edge[2] + near_edge[2];
+ normalize(local_dir);
+ }
+ else
+ {
+ /* If we get here, the listener is directly in front of or behind
+ * the center of the portal, making all aperture edges effectively
+ * equidistant. Calculating the panning magnitude is simplified,
+ * using the arctangent of the radius and distance.
+ */
+ magnitude = 1.0f - (atan2f(portal_radius, dist) / (float)M_PI);
+ }
+
+ /* Scale the other zone's panning vector. */
+ other_dir[0] = local_dir[0] * magnitude;
+ other_dir[1] = local_dir[1] * magnitude;
+ other_dir[2] = local_dir[2] * magnitude;
+ /* Pan the current zone to the opposite direction of the portal, and
+ * take the remaining percentage of the portal's magnitude.
+ */
+ this_dir[0] = local_dir[0] * (magnitude-1.0f);
+ this_dir[1] = local_dir[1] * (magnitude-1.0f);
+ this_dir[2] = local_dir[2] * (magnitude-1.0f);
+
+ /* Now set the effects' panning vectors and gain. Energy is shared
+ * between environments, so attenuate according to each zone's
+ * contribution (note: gain^2 = energy).
+ */
+ alEffectf(this_effect, AL_EAXREVERB_REFLECTIONS_GAIN, this_reverb->flReflectionsGain * sqrtf(magnitude));
+ alEffectf(this_effect, AL_EAXREVERB_LATE_REVERB_GAIN, this_reverb->flLateReverbGain * sqrtf(magnitude));
+ alEffectfv(this_effect, AL_EAXREVERB_REFLECTIONS_PAN, this_dir);
+ alEffectfv(this_effect, AL_EAXREVERB_LATE_REVERB_PAN, this_dir);
+
+ alEffectf(other_effect, AL_EAXREVERB_REFLECTIONS_GAIN, other_reverb->flReflectionsGain * sqrtf(1.0f-magnitude));
+ alEffectf(other_effect, AL_EAXREVERB_LATE_REVERB_GAIN, other_reverb->flLateReverbGain * sqrtf(1.0f-magnitude));
+ alEffectfv(other_effect, AL_EAXREVERB_REFLECTIONS_PAN, other_dir);
+ alEffectfv(other_effect, AL_EAXREVERB_LATE_REVERB_PAN, other_dir);
+ }
+ else
+ {
+ /* We're practically in the center of the portal. Give the panning
+ * vectors a 50/50 split, with Zone 0 covering the half in front of
+ * the normal, and Zone 1 covering the half behind.
+ */
+ this_dir[0] = local_norm[0] / 2.0f;
+ this_dir[1] = local_norm[1] / 2.0f;
+ this_dir[2] = local_norm[2] / 2.0f;
+
+ other_dir[0] = local_norm[0] / -2.0f;
+ other_dir[1] = local_norm[1] / -2.0f;
+ other_dir[2] = local_norm[2] / -2.0f;
+
+ alEffectf(effects[0], AL_EAXREVERB_REFLECTIONS_GAIN, reverbs[0].flReflectionsGain * sqrtf(0.5f));
+ alEffectf(effects[0], AL_EAXREVERB_LATE_REVERB_GAIN, reverbs[0].flLateReverbGain * sqrtf(0.5f));
+ alEffectfv(effects[0], AL_EAXREVERB_REFLECTIONS_PAN, this_dir);
+ alEffectfv(effects[0], AL_EAXREVERB_LATE_REVERB_PAN, this_dir);
+
+ alEffectf(effects[1], AL_EAXREVERB_REFLECTIONS_GAIN, reverbs[1].flReflectionsGain * sqrtf(0.5f));
+ alEffectf(effects[1], AL_EAXREVERB_LATE_REVERB_GAIN, reverbs[1].flLateReverbGain * sqrtf(0.5f));
+ alEffectfv(effects[1], AL_EAXREVERB_REFLECTIONS_PAN, other_dir);
+ alEffectfv(effects[1], AL_EAXREVERB_LATE_REVERB_PAN, other_dir);
+ }
+
+ /* Finally, update the effect slots with the updated effect parameters. */
+ alAuxiliaryEffectSloti(slots[0], AL_EFFECTSLOT_EFFECT, effects[0]);
+ alAuxiliaryEffectSloti(slots[1], AL_EFFECTSLOT_EFFECT, effects[1]);
+}
+
+
+int main(int argc, char **argv)
+{
+ static const int MaxTransitions = 8;
+ EFXEAXREVERBPROPERTIES reverbs[2] = {
+ EFX_REVERB_PRESET_CARPETEDHALLWAY,
+ EFX_REVERB_PRESET_BATHROOM
+ };
+ struct timespec basetime;
+ ALCdevice *device = NULL;
+ ALCcontext *context = NULL;
+ ALuint effects[2] = { 0, 0 };
+ ALuint slots[2] = { 0, 0 };
+ ALuint direct_filter = 0;
+ ALuint buffer = 0;
+ ALuint source = 0;
+ ALCint num_sends = 0;
+ ALenum state = AL_INITIAL;
+ ALfloat direct_gain = 1.0f;
+ int loops = 0;
+
+ /* Print out usage if no arguments were specified */
+ if(argc < 2)
+ {
+ fprintf(stderr, "Usage: %s [-device <name>] [options] <filename>\n\n"
+ "Options:\n"
+ "\t-nodirect\tSilence direct path output (easier to hear reverb)\n\n",
+ argv[0]);
+ return 1;
+ }
+
+ /* Initialize OpenAL, and check for EFX support with at least 2 auxiliary
+ * sends (if multiple sends are supported, 2 are provided by default; if
+ * you want more, you have to request it through alcCreateContext).
+ */
+ argv++; argc--;
+ if(InitAL(&argv, &argc) != 0)
+ return 1;
+
+ while(argc > 0)
+ {
+ if(strcmp(argv[0], "-nodirect") == 0)
+ direct_gain = 0.0f;
+ else
+ break;
+ argv++;
+ argc--;
+ }
+ if(argc < 1)
+ {
+ fprintf(stderr, "No filename spacified.\n");
+ CloseAL();
+ return 1;
+ }
+
+ context = alcGetCurrentContext();
+ device = alcGetContextsDevice(context);
+
+ if(!alcIsExtensionPresent(device, "ALC_EXT_EFX"))
+ {
+ fprintf(stderr, "Error: EFX not supported\n");
+ CloseAL();
+ return 1;
+ }
+
+ num_sends = 0;
+ alcGetIntegerv(device, ALC_MAX_AUXILIARY_SENDS, 1, &num_sends);
+ if(alcGetError(device) != ALC_NO_ERROR || num_sends < 2)
+ {
+ fprintf(stderr, "Error: Device does not support multiple sends (got %d, need 2)\n",
+ num_sends);
+ CloseAL();
+ return 1;
+ }
+
+ /* Define a macro to help load the function pointers. */
+#define LOAD_PROC(x) ((x) = alGetProcAddress(#x))
+ LOAD_PROC(alGenFilters);
+ LOAD_PROC(alDeleteFilters);
+ LOAD_PROC(alIsFilter);
+ LOAD_PROC(alFilteri);
+ LOAD_PROC(alFilteriv);
+ LOAD_PROC(alFilterf);
+ LOAD_PROC(alFilterfv);
+ LOAD_PROC(alGetFilteri);
+ LOAD_PROC(alGetFilteriv);
+ LOAD_PROC(alGetFilterf);
+ LOAD_PROC(alGetFilterfv);
+
+ LOAD_PROC(alGenEffects);
+ LOAD_PROC(alDeleteEffects);
+ LOAD_PROC(alIsEffect);
+ LOAD_PROC(alEffecti);
+ LOAD_PROC(alEffectiv);
+ LOAD_PROC(alEffectf);
+ LOAD_PROC(alEffectfv);
+ LOAD_PROC(alGetEffecti);
+ LOAD_PROC(alGetEffectiv);
+ LOAD_PROC(alGetEffectf);
+ LOAD_PROC(alGetEffectfv);
+
+ LOAD_PROC(alGenAuxiliaryEffectSlots);
+ LOAD_PROC(alDeleteAuxiliaryEffectSlots);
+ LOAD_PROC(alIsAuxiliaryEffectSlot);
+ LOAD_PROC(alAuxiliaryEffectSloti);
+ LOAD_PROC(alAuxiliaryEffectSlotiv);
+ LOAD_PROC(alAuxiliaryEffectSlotf);
+ LOAD_PROC(alAuxiliaryEffectSlotfv);
+ LOAD_PROC(alGetAuxiliaryEffectSloti);
+ LOAD_PROC(alGetAuxiliaryEffectSlotiv);
+ LOAD_PROC(alGetAuxiliaryEffectSlotf);
+ LOAD_PROC(alGetAuxiliaryEffectSlotfv);
+#undef LOAD_PROC
+
+ /* Initialize SDL_sound. */
+ Sound_Init();
+
+ /* Load the sound into a buffer. */
+ buffer = LoadSound(argv[0]);
+ if(!buffer)
+ {
+ CloseAL();
+ Sound_Quit();
+ return 1;
+ }
+
+ /* Generate two effects for two "zones", and load a reverb into each one.
+ * Note that unlike single-zone reverb, where you can store one effect per
+ * preset, for multi-zone reverb you should have one effect per environment
+ * instance, or one per audible zone. This is because we'll be changing the
+ * effects' properties in real-time based on the environment instance
+ * relative to the listener.
+ */
+ alGenEffects(2, effects);
+ if(!LoadEffect(effects[0], &reverbs[0]) || !LoadEffect(effects[1], &reverbs[1]))
+ {
+ alDeleteEffects(2, effects);
+ alDeleteBuffers(1, &buffer);
+ Sound_Quit();
+ CloseAL();
+ return 1;
+ }
+
+ /* Create the effect slot objects, one for each "active" effect. */
+ alGenAuxiliaryEffectSlots(2, slots);
+
+ /* Tell the effect slots to use the loaded effect objects, with slot 0 for
+ * Zone 0 and slot 1 for Zone 1. Note that this effectively copies the
+ * effect properties. Modifying or deleting the effect object afterward
+ * won't directly affect the effect slot until they're reapplied like this.
+ */
+ alAuxiliaryEffectSloti(slots[0], AL_EFFECTSLOT_EFFECT, effects[0]);
+ alAuxiliaryEffectSloti(slots[1], AL_EFFECTSLOT_EFFECT, effects[1]);
+ assert(alGetError()==AL_NO_ERROR && "Failed to set effect slot");
+
+ /* For the purposes of this example, prepare a filter that optionally
+ * silences the direct path which allows us to hear just the reverberation.
+ * A filter like this is normally used for obstruction, where the path
+ * directly between the listener and source is blocked (the exact
+ * properties depending on the type and thickness of the obstructing
+ * material).
+ */
+ alGenFilters(1, &direct_filter);
+ alFilteri(direct_filter, AL_FILTER_TYPE, AL_FILTER_LOWPASS);
+ alFilterf(direct_filter, AL_LOWPASS_GAIN, direct_gain);
+ assert(alGetError()==AL_NO_ERROR && "Failed to set direct filter");
+
+ /* Create the source to play the sound with, place it in front of the
+ * listener's path in the left zone.
+ */
+ source = 0;
+ alGenSources(1, &source);
+ alSourcei(source, AL_LOOPING, AL_TRUE);
+ alSource3f(source, AL_POSITION, -5.0f, 0.0f, -2.0f);
+ alSourcei(source, AL_DIRECT_FILTER, direct_filter);
+ alSourcei(source, AL_BUFFER, buffer);
+
+ /* Connect the source to the effect slots. Here, we connect source send 0
+ * to Zone 0's slot, and send 1 to Zone 1's slot. Filters can be specified
+ * to occlude the source from each zone by varying amounts; for example, a
+ * source within a particular zone would be unfiltered, while a source that
+ * can only see a zone through a window or thin wall may be attenuated for
+ * that zone.
+ */
+ alSource3i(source, AL_AUXILIARY_SEND_FILTER, slots[0], 0, AL_FILTER_NULL);
+ alSource3i(source, AL_AUXILIARY_SEND_FILTER, slots[1], 1, AL_FILTER_NULL);
+ assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source");
+
+ /* Get the current time as the base for timing in the main loop. */
+ altimespec_get(&basetime, AL_TIME_UTC);
+ loops = 0;
+ printf("Transition %d of %d...\n", loops+1, MaxTransitions);
+
+ /* Play the sound for a while. */
+ alSourcePlay(source);
+ do {
+ struct timespec curtime;
+ ALfloat timediff;
+
+ /* Start a batch update, to ensure all changes apply simultaneously. */
+ alcSuspendContext(context);
+
+ /* Get the current time to track the amount of time that passed.
+ * Convert the difference to seconds.
+ */
+ altimespec_get(&curtime, AL_TIME_UTC);
+ timediff = (ALfloat)(curtime.tv_sec - basetime.tv_sec);
+ timediff += (ALfloat)(curtime.tv_nsec - basetime.tv_nsec) / 1000000000.0f;
+
+ /* Avoid negative time deltas, in case of non-monotonic clocks. */
+ if(timediff < 0.0f)
+ timediff = 0.0f;
+ else while(timediff >= 4.0f*((loops&1)+1))
+ {
+ /* For this example, each transition occurs over 4 seconds, and
+ * there's 2 transitions per cycle.
+ */
+ if(++loops < MaxTransitions)
+ printf("Transition %d of %d...\n", loops+1, MaxTransitions);
+ if(!(loops&1))
+ {
+ /* Cycle completed. Decrease the delta and increase the base
+ * time to start a new cycle.
+ */
+ timediff -= 8.0f;
+ basetime.tv_sec += 8;
+ }
+ }
+
+ /* Update the listener and effects, and finish the batch. */
+ UpdateListenerAndEffects(timediff, slots, effects, reverbs);
+ alcProcessContext(context);
+
+ al_nssleep(10000000);
+
+ alGetSourcei(source, AL_SOURCE_STATE, &state);
+ } while(alGetError() == AL_NO_ERROR && state == AL_PLAYING && loops < MaxTransitions);
+
+ /* All done. Delete resources, and close down SDL_sound and OpenAL. */
+ alDeleteSources(1, &source);
+ alDeleteAuxiliaryEffectSlots(2, slots);
+ alDeleteEffects(2, effects);
+ alDeleteFilters(1, &direct_filter);
+ alDeleteBuffers(1, &buffer);
+
+ Sound_Quit();
+ CloseAL();
+
+ return 0;
+}
diff --git a/examples/alplay.c b/examples/alplay.c
new file mode 100644
index 00000000..81cb56d5
--- /dev/null
+++ b/examples/alplay.c
@@ -0,0 +1,177 @@
+/*
+ * OpenAL Source Play Example
+ *
+ * Copyright (c) 2017 by Chris Robinson <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/* This file contains an example for playing a sound buffer. */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include <SDL_sound.h>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+
+#include "common/alhelpers.h"
+
+
+/* LoadBuffer loads the named audio file into an OpenAL buffer object, and
+ * returns the new buffer ID.
+ */
+static ALuint LoadSound(const char *filename)
+{
+ Sound_Sample *sample;
+ ALenum err, format;
+ ALuint buffer;
+ Uint32 slen;
+
+ /* Open the audio file */
+ sample = Sound_NewSampleFromFile(filename, NULL, 65536);
+ if(!sample)
+ {
+ fprintf(stderr, "Could not open audio in %s\n", filename);
+ return 0;
+ }
+
+ /* Get the sound format, and figure out the OpenAL format */
+ if(sample->actual.channels == 1)
+ {
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_MONO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_MONO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+ }
+ else if(sample->actual.channels == 2)
+ {
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_STEREO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_STEREO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+ }
+ else
+ {
+ fprintf(stderr, "Unsupported channel count: %d\n", sample->actual.channels);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+
+ /* Decode the whole audio stream to a buffer. */
+ slen = Sound_DecodeAll(sample);
+ if(!sample->buffer || slen == 0)
+ {
+ fprintf(stderr, "Failed to read audio from %s\n", filename);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+
+ /* Buffer the audio data into a new buffer object, then free the data and
+ * close the file. */
+ buffer = 0;
+ alGenBuffers(1, &buffer);
+ alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate);
+ Sound_FreeSample(sample);
+
+ /* Check if an error occured, and clean up if so. */
+ err = alGetError();
+ if(err != AL_NO_ERROR)
+ {
+ fprintf(stderr, "OpenAL Error: %s\n", alGetString(err));
+ if(buffer && alIsBuffer(buffer))
+ alDeleteBuffers(1, &buffer);
+ return 0;
+ }
+
+ return buffer;
+}
+
+
+int main(int argc, char **argv)
+{
+ ALuint source, buffer;
+ ALfloat offset;
+ ALenum state;
+
+ /* Print out usage if no arguments were specified */
+ if(argc < 2)
+ {
+ fprintf(stderr, "Usage: %s [-device <name>] <filename>\n", argv[0]);
+ return 1;
+ }
+
+ /* Initialize OpenAL. */
+ argv++; argc--;
+ if(InitAL(&argv, &argc) != 0)
+ return 1;
+
+ /* Initialize SDL_sound. */
+ Sound_Init();
+
+ /* Load the sound into a buffer. */
+ buffer = LoadSound(argv[0]);
+ if(!buffer)
+ {
+ Sound_Quit();
+ CloseAL();
+ return 1;
+ }
+
+ /* Create the source to play the sound with. */
+ source = 0;
+ alGenSources(1, &source);
+ alSourcei(source, AL_BUFFER, buffer);
+ assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source");
+
+ /* Play the sound until it finishes. */
+ alSourcePlay(source);
+ do {
+ al_nssleep(10000000);
+ alGetSourcei(source, AL_SOURCE_STATE, &state);
+
+ /* Get the source offset. */
+ alGetSourcef(source, AL_SEC_OFFSET, &offset);
+ printf("\rOffset: %f ", offset);
+ fflush(stdout);
+ } while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);
+ printf("\n");
+
+ /* All done. Delete resources, and close down SDL_sound and OpenAL. */
+ alDeleteSources(1, &source);
+ alDeleteBuffers(1, &buffer);
+
+ Sound_Quit();
+ CloseAL();
+
+ return 0;
+}
diff --git a/examples/alrecord.c b/examples/alrecord.c
new file mode 100644
index 00000000..43b26d35
--- /dev/null
+++ b/examples/alrecord.c
@@ -0,0 +1,394 @@
+/*
+ * OpenAL Recording Example
+ *
+ * Copyright (c) 2017 by Chris Robinson <[email protected]>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/* This file contains a relatively simple recorder. */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+
+#include "AL/al.h"
+#include "AL/alc.h"
+#include "AL/alext.h"
+
+#include "common/alhelpers.h"
+
+
+#if defined(_WIN64)
+#define SZFMT "%I64u"
+#elif defined(_WIN32)
+#define SZFMT "%u"
+#else
+#define SZFMT "%zu"
+#endif
+
+
+#if defined(_MSC_VER) && (_MSC_VER < 1900)
+static float msvc_strtof(const char *str, char **end)
+{ return (float)strtod(str, end); }
+#define strtof msvc_strtof
+#endif
+
+
+static void fwrite16le(ALushort val, FILE *f)
+{
+ ALubyte data[2] = { val&0xff, (val>>8)&0xff };
+ fwrite(data, 1, 2, f);
+}
+
+static void fwrite32le(ALuint val, FILE *f)
+{
+ ALubyte data[4] = { val&0xff, (val>>8)&0xff, (val>>16)&0xff, (val>>24)&0xff };
+ fwrite(data, 1, 4, f);
+}
+
+
+typedef struct Recorder {
+ ALCdevice *mDevice;
+
+ FILE *mFile;
+ long mDataSizeOffset;
+ ALuint mDataSize;
+ float mRecTime;
+
+ int mChannels;
+ int mBits;
+ int mSampleRate;
+ ALuint mFrameSize;
+ ALbyte *mBuffer;
+ ALsizei mBufferSize;
+} Recorder;
+
+int main(int argc, char **argv)
+{
+ static const char optlist[] =
+" --channels/-c <channels> Set channel count (1 or 2)\n"
+" --bits/-b <bits> Set channel count (8, 16, or 32)\n"
+" --rate/-r <rate> Set sample rate (8000 to 96000)\n"
+" --time/-t <time> Time in seconds to record (1 to 10)\n"
+" --outfile/-o <filename> Output filename (default: record.wav)";
+ const char *fname = "record.wav";
+ const char *devname = NULL;
+ const char *progname;
+ Recorder recorder;
+ long total_size;
+ ALenum format;
+ ALCenum err;
+
+ progname = argv[0];
+ if(argc < 2)
+ {
+ fprintf(stderr, "Record from a device to a wav file.\n\n"
+ "Usage: %s [-device <name>] [options...]\n\n"
+ "Available options:\n%s\n", progname, optlist);
+ return 0;
+ }
+
+ recorder.mDevice = NULL;
+ recorder.mFile = NULL;
+ recorder.mDataSizeOffset = 0;
+ recorder.mDataSize = 0;
+ recorder.mRecTime = 4.0f;
+ recorder.mChannels = 1;
+ recorder.mBits = 16;
+ recorder.mSampleRate = 44100;
+ recorder.mFrameSize = recorder.mChannels * recorder.mBits / 8;
+ recorder.mBuffer = NULL;
+ recorder.mBufferSize = 0;
+
+ argv++; argc--;
+ if(argc > 1 && strcmp(argv[0], "-device") == 0)
+ {
+ devname = argv[1];
+ argv += 2;
+ argc -= 2;
+ }
+
+ while(argc > 0)
+ {
+ char *end;
+ if(strcmp(argv[0], "--") == 0)
+ break;
+ else if(strcmp(argv[0], "--channels") == 0 || strcmp(argv[0], "-c") == 0)
+ {
+ if(!(argc > 1))
+ {
+ fprintf(stderr, "Missing argument for option: %s\n", argv[0]);
+ return 1;
+ }
+
+ recorder.mChannels = strtol(argv[1], &end, 0);
+ if((recorder.mChannels != 1 && recorder.mChannels != 2) || (end && *end != '\0'))
+ {
+ fprintf(stderr, "Invalid channels: %s\n", argv[1]);
+ return 1;
+ }
+ argv += 2;
+ argc -= 2;
+ }
+ else if(strcmp(argv[0], "--bits") == 0 || strcmp(argv[0], "-b") == 0)
+ {
+ if(!(argc > 1))
+ {
+ fprintf(stderr, "Missing argument for option: %s\n", argv[0]);
+ return 1;
+ }
+
+ recorder.mBits = strtol(argv[1], &end, 0);
+ if((recorder.mBits != 8 && recorder.mBits != 16 && recorder.mBits != 32) ||
+ (end && *end != '\0'))
+ {
+ fprintf(stderr, "Invalid bit count: %s\n", argv[1]);
+ return 1;
+ }
+ argv += 2;
+ argc -= 2;
+ }
+ else if(strcmp(argv[0], "--rate") == 0 || strcmp(argv[0], "-r") == 0)
+ {
+ if(!(argc > 1))
+ {
+ fprintf(stderr, "Missing argument for option: %s\n", argv[0]);
+ return 1;
+ }
+
+ recorder.mSampleRate = strtol(argv[1], &end, 0);
+ if(!(recorder.mSampleRate >= 8000 && recorder.mSampleRate <= 96000) || (end && *end != '\0'))
+ {
+ fprintf(stderr, "Invalid sample rate: %s\n", argv[1]);
+ return 1;
+ }
+ argv += 2;
+ argc -= 2;
+ }
+ else if(strcmp(argv[0], "--time") == 0 || strcmp(argv[0], "-t") == 0)
+ {
+ if(!(argc > 1))
+ {
+ fprintf(stderr, "Missing argument for option: %s\n", argv[0]);
+ return 1;
+ }
+
+ recorder.mRecTime = strtof(argv[1], &end);
+ if(!(recorder.mRecTime >= 1.0f && recorder.mRecTime <= 10.0f) || (end && *end != '\0'))
+ {
+ fprintf(stderr, "Invalid record time: %s\n", argv[1]);
+ return 1;
+ }
+ argv += 2;
+ argc -= 2;
+ }
+ else if(strcmp(argv[0], "--outfile") == 0 || strcmp(argv[0], "-o") == 0)
+ {
+ if(!(argc > 1))
+ {
+ fprintf(stderr, "Missing argument for option: %s\n", argv[0]);
+ return 1;
+ }
+
+ fname = argv[1];
+ argv += 2;
+ argc -= 2;
+ }
+ else if(strcmp(argv[0], "--help") == 0 || strcmp(argv[0], "-h") == 0)
+ {
+ fprintf(stderr, "Record from a device to a wav file.\n\n"
+ "Usage: %s [-device <name>] [options...]\n\n"
+ "Available options:\n%s\n", progname, optlist);
+ return 0;
+ }
+ else
+ {
+ fprintf(stderr, "Invalid option '%s'.\n\n"
+ "Usage: %s [-device <name>] [options...]\n\n"
+ "Available options:\n%s\n", argv[0], progname, optlist);
+ return 0;
+ }
+ }
+
+ recorder.mFrameSize = recorder.mChannels * recorder.mBits / 8;
+
+ format = AL_NONE;
+ if(recorder.mChannels == 1)
+ {
+ if(recorder.mBits == 8)
+ format = AL_FORMAT_MONO8;
+ else if(recorder.mBits == 16)
+ format = AL_FORMAT_MONO16;
+ else if(recorder.mBits == 32)
+ format = AL_FORMAT_MONO_FLOAT32;
+ }
+ else if(recorder.mChannels == 2)
+ {
+ if(recorder.mBits == 8)
+ format = AL_FORMAT_STEREO8;
+ else if(recorder.mBits == 16)
+ format = AL_FORMAT_STEREO16;
+ else if(recorder.mBits == 32)
+ format = AL_FORMAT_STEREO_FLOAT32;
+ }
+
+ recorder.mDevice = alcCaptureOpenDevice(devname, recorder.mSampleRate, format, 32768);
+ if(!recorder.mDevice)
+ {
+ fprintf(stderr, "Failed to open %s, %s %d-bit, %s, %dhz (%d samples)\n",
+ devname ? devname : "default device",
+ (recorder.mBits == 32) ? "Float" :
+ (recorder.mBits != 8) ? "Signed" : "Unsigned", recorder.mBits,
+ (recorder.mChannels == 1) ? "Mono" : "Stereo", recorder.mSampleRate,
+ 32768
+ );
+ return 1;
+ }
+ fprintf(stderr, "Opened \"%s\"\n", alcGetString(
+ recorder.mDevice, ALC_CAPTURE_DEVICE_SPECIFIER
+ ));
+
+ recorder.mFile = fopen(fname, "wb");
+ if(!recorder.mFile)
+ {
+ fprintf(stderr, "Failed to open '%s' for writing\n", fname);
+ alcCaptureCloseDevice(recorder.mDevice);
+ return 1;
+ }
+
+ fputs("RIFF", recorder.mFile);
+ fwrite32le(0xFFFFFFFF, recorder.mFile); // 'RIFF' header len; filled in at close
+
+ fputs("WAVE", recorder.mFile);
+
+ fputs("fmt ", recorder.mFile);
+ fwrite32le(18, recorder.mFile); // 'fmt ' header len
+
+ // 16-bit val, format type id (1 = integer PCM, 3 = float PCM)
+ fwrite16le((recorder.mBits == 32) ? 0x0003 : 0x0001, recorder.mFile);
+ // 16-bit val, channel count
+ fwrite16le(recorder.mChannels, recorder.mFile);
+ // 32-bit val, frequency
+ fwrite32le(recorder.mSampleRate, recorder.mFile);
+ // 32-bit val, bytes per second
+ fwrite32le(recorder.mSampleRate * recorder.mFrameSize, recorder.mFile);
+ // 16-bit val, frame size
+ fwrite16le(recorder.mFrameSize, recorder.mFile);
+ // 16-bit val, bits per sample
+ fwrite16le(recorder.mBits, recorder.mFile);
+ // 16-bit val, extra byte count
+ fwrite16le(0, recorder.mFile);
+
+ fputs("data", recorder.mFile);
+ fwrite32le(0xFFFFFFFF, recorder.mFile); // 'data' header len; filled in at close
+
+ recorder.mDataSizeOffset = ftell(recorder.mFile) - 4;
+ if(ferror(recorder.mFile) || recorder.mDataSizeOffset < 0)
+ {
+ fprintf(stderr, "Error writing header: %s\n", strerror(errno));
+ fclose(recorder.mFile);
+ alcCaptureCloseDevice(recorder.mDevice);
+ return 1;
+ }
+
+ fprintf(stderr, "Recording '%s', %s %d-bit, %s, %dhz (%g second%s)\n", fname,
+ (recorder.mBits == 32) ? "Float" :
+ (recorder.mBits != 8) ? "Signed" : "Unsigned", recorder.mBits,
+ (recorder.mChannels == 1) ? "Mono" : "Stereo", recorder.mSampleRate,
+ recorder.mRecTime, (recorder.mRecTime != 1.0f) ? "s" : ""
+ );
+
+ alcCaptureStart(recorder.mDevice);
+ while((double)recorder.mDataSize/(double)recorder.mSampleRate < recorder.mRecTime &&
+ (err=alcGetError(recorder.mDevice)) == ALC_NO_ERROR && !ferror(recorder.mFile))
+ {
+ ALCint count = 0;
+ fprintf(stderr, "\rCaptured %u samples", recorder.mDataSize);
+ alcGetIntegerv(recorder.mDevice, ALC_CAPTURE_SAMPLES, 1, &count);
+ if(count < 1)
+ {
+ al_nssleep(10000000);
+ continue;
+ }
+ if(count > recorder.mBufferSize)
+ {
+ ALbyte *data = calloc(recorder.mFrameSize, count);
+ free(recorder.mBuffer);
+ recorder.mBuffer = data;
+ recorder.mBufferSize = count;
+ }
+ alcCaptureSamples(recorder.mDevice, recorder.mBuffer, count);
+#if defined(__BYTE_ORDER) && __BYTE_ORDER == __BIG_ENDIAN
+ /* Byteswap multibyte samples on big-endian systems (wav needs little-
+ * endian, and OpenAL gives the system's native-endian).
+ */
+ if(recorder.mBits == 16)
+ {
+ ALCint i;
+ for(i = 0;i < count*recorder.mChannels;i++)
+ {
+ ALbyte b = recorder.mBuffer[i*2 + 0];
+ recorder.mBuffer[i*2 + 0] = recorder.mBuffer[i*2 + 1];
+ recorder.mBuffer[i*2 + 1] = b;
+ }
+ }
+ else if(recorder.mBits == 32)
+ {
+ ALCint i;
+ for(i = 0;i < count*recorder.mChannels;i++)
+ {
+ ALbyte b0 = recorder.mBuffer[i*4 + 0];
+ ALbyte b1 = recorder.mBuffer[i*4 + 1];
+ recorder.mBuffer[i*4 + 0] = recorder.mBuffer[i*4 + 3];
+ recorder.mBuffer[i*4 + 1] = recorder.mBuffer[i*4 + 2];
+ recorder.mBuffer[i*4 + 2] = b1;
+ recorder.mBuffer[i*4 + 3] = b0;
+ }
+ }
+#endif
+ recorder.mDataSize += (ALuint)fwrite(recorder.mBuffer, recorder.mFrameSize, count,
+ recorder.mFile);
+ }
+ alcCaptureStop(recorder.mDevice);
+ fprintf(stderr, "\rCaptured %u samples\n", recorder.mDataSize);
+ if(err != ALC_NO_ERROR)
+ fprintf(stderr, "Got device error 0x%04x: %s\n", err, alcGetString(recorder.mDevice, err));
+
+ alcCaptureCloseDevice(recorder.mDevice);
+ recorder.mDevice = NULL;
+
+ free(recorder.mBuffer);
+ recorder.mBuffer = NULL;
+ recorder.mBufferSize = 0;
+
+ total_size = ftell(recorder.mFile);
+ if(fseek(recorder.mFile, recorder.mDataSizeOffset, SEEK_SET) == 0)
+ {
+ fwrite32le(recorder.mDataSize*recorder.mFrameSize, recorder.mFile);
+ if(fseek(recorder.mFile, 4, SEEK_SET) == 0)
+ fwrite32le(total_size - 8, recorder.mFile);
+ }
+
+ fclose(recorder.mFile);
+ recorder.mFile = NULL;
+
+ return 0;
+}
diff --git a/examples/alreverb.c b/examples/alreverb.c
index 420b1c55..e6c9e606 100644
--- a/examples/alreverb.c
+++ b/examples/alreverb.c
@@ -27,17 +27,15 @@
#include <stdio.h>
#include <assert.h>
+#include <SDL_sound.h>
+
#include "AL/al.h"
#include "AL/alc.h"
#include "AL/alext.h"
#include "AL/efx-presets.h"
#include "common/alhelpers.h"
-#include "common/sdl_sound.h"
-
-static LPALBUFFERSAMPLESSOFT alBufferSamplesSOFT = wrap_BufferSamples;
-static LPALISBUFFERFORMATSUPPORTEDSOFT alIsBufferFormatSupportedSOFT;
/* Effect object functions */
static LPALGENEFFECTS alGenEffects;
@@ -145,46 +143,63 @@ static ALuint LoadEffect(const EFXEAXREVERBPROPERTIES *reverb)
/* LoadBuffer loads the named audio file into an OpenAL buffer object, and
- * returns the new buffer ID. */
+ * returns the new buffer ID.
+ */
static ALuint LoadSound(const char *filename)
{
- ALenum err, format, type, channels;
- ALuint rate, buffer;
- size_t datalen;
- void *data;
- FilePtr sound;
-
- /* Open the file and get the first stream from it */
- sound = openAudioFile(filename, 1000);
- if(!sound)
+ Sound_Sample *sample;
+ ALenum err, format;
+ ALuint buffer;
+ Uint32 slen;
+
+ /* Open the audio file */
+ sample = Sound_NewSampleFromFile(filename, NULL, 65536);
+ if(!sample)
{
fprintf(stderr, "Could not open audio in %s\n", filename);
return 0;
}
/* Get the sound format, and figure out the OpenAL format */
- if(getAudioInfo(sound, &rate, &channels, &type) != 0)
+ if(sample->actual.channels == 1)
{
- fprintf(stderr, "Error getting audio info for %s\n", filename);
- closeAudioFile(sound);
- return 0;
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_MONO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_MONO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
}
-
- format = GetFormat(channels, type, alIsBufferFormatSupportedSOFT);
- if(format == AL_NONE)
+ else if(sample->actual.channels == 2)
+ {
+ if(sample->actual.format == AUDIO_U8)
+ format = AL_FORMAT_STEREO8;
+ else if(sample->actual.format == AUDIO_S16SYS)
+ format = AL_FORMAT_STEREO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", sample->actual.format);
+ Sound_FreeSample(sample);
+ return 0;
+ }
+ }
+ else
{
- fprintf(stderr, "Unsupported format (%s, %s) for %s\n",
- ChannelsName(channels), TypeName(type), filename);
- closeAudioFile(sound);
+ fprintf(stderr, "Unsupported channel count: %d\n", sample->actual.channels);
+ Sound_FreeSample(sample);
return 0;
}
/* Decode the whole audio stream to a buffer. */
- data = decodeAudioStream(sound, &datalen);
- if(!data)
+ slen = Sound_DecodeAll(sample);
+ if(!sample->buffer || slen == 0)
{
fprintf(stderr, "Failed to read audio from %s\n", filename);
- closeAudioFile(sound);
+ Sound_FreeSample(sample);
return 0;
}
@@ -192,17 +207,15 @@ static ALuint LoadSound(const char *filename)
* close the file. */
buffer = 0;
alGenBuffers(1, &buffer);
- alBufferSamplesSOFT(buffer, rate, format, BytesToFrames(datalen, channels, type),
- channels, type, data);
- free(data);
- closeAudioFile(sound);
+ alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate);
+ Sound_FreeSample(sample);
/* Check if an error occured, and clean up if so. */
err = alGetError();
if(err != AL_NO_ERROR)
{
fprintf(stderr, "OpenAL Error: %s\n", alGetString(err));
- if(alIsBuffer(buffer))
+ if(buffer && alIsBuffer(buffer))
alDeleteBuffers(1, &buffer);
return 0;
}
@@ -217,15 +230,16 @@ int main(int argc, char **argv)
ALuint source, buffer, effect, slot;
ALenum state;
- /* Print out usage if no file was specified */
+ /* Print out usage if no arguments were specified */
if(argc < 2)
{
- fprintf(stderr, "Usage: %s <filename>\n", argv[0]);
+ fprintf(stderr, "Usage: %s [-device <name] <filename>\n", argv[0]);
return 1;
}
- /* Initialize OpenAL with the default device, and check for EFX support. */
- if(InitAL() != 0)
+ /* Initialize OpenAL, and check for EFX support. */
+ argv++; argc--;
+ if(InitAL(&argv, &argc) != 0)
return 1;
if(!alcIsExtensionPresent(alcGetContextsDevice(alcGetCurrentContext()), "ALC_EXT_EFX"))
@@ -260,19 +274,17 @@ int main(int argc, char **argv)
LOAD_PROC(alGetAuxiliaryEffectSlotiv);
LOAD_PROC(alGetAuxiliaryEffectSlotf);
LOAD_PROC(alGetAuxiliaryEffectSlotfv);
-
- if(alIsExtensionPresent("AL_SOFT_buffer_samples"))
- {
- LOAD_PROC(alBufferSamplesSOFT);
- LOAD_PROC(alIsBufferFormatSupportedSOFT);
- }
#undef LOAD_PROC
+ /* Initialize SDL_sound. */
+ Sound_Init();
+
/* Load the sound into a buffer. */
- buffer = LoadSound(argv[1]);
+ buffer = LoadSound(argv[0]);
if(!buffer)
{
CloseAL();
+ Sound_Quit();
return 1;
}
@@ -281,6 +293,7 @@ int main(int argc, char **argv)
if(!effect)
{
alDeleteBuffers(1, &buffer);
+ Sound_Quit();
CloseAL();
return 1;
}
@@ -311,16 +324,17 @@ int main(int argc, char **argv)
/* Play the sound until it finishes. */
alSourcePlay(source);
do {
- Sleep(10);
+ al_nssleep(10000000);
alGetSourcei(source, AL_SOURCE_STATE, &state);
} while(alGetError() == AL_NO_ERROR && state == AL_PLAYING);
- /* All done. Delete resources, and close OpenAL. */
+ /* All done. Delete resources, and close down SDL_sound and OpenAL. */
alDeleteSources(1, &source);
alDeleteAuxiliaryEffectSlots(1, &slot);
alDeleteEffects(1, &effect);
alDeleteBuffers(1, &buffer);
+ Sound_Quit();
CloseAL();
return 0;
diff --git a/examples/alstream.c b/examples/alstream.c
index 2972d375..68115e8d 100644
--- a/examples/alstream.c
+++ b/examples/alstream.c
@@ -30,17 +30,21 @@
#include <signal.h>
#include <assert.h>
+#include <SDL_sound.h>
+
#include "AL/al.h"
#include "AL/alc.h"
#include "AL/alext.h"
#include "common/alhelpers.h"
-#include "common/sdl_sound.h"
-
-static LPALBUFFERSAMPLESSOFT alBufferSamplesSOFT = wrap_BufferSamples;
-static LPALISBUFFERFORMATSUPPORTEDSOFT alIsBufferFormatSupportedSOFT;
+#ifndef SDL_AUDIO_MASK_BITSIZE
+#define SDL_AUDIO_MASK_BITSIZE (0xFF)
+#endif
+#ifndef SDL_AUDIO_BITSIZE
+#define SDL_AUDIO_BITSIZE(x) (x & SDL_AUDIO_MASK_BITSIZE)
+#endif
/* Define the number of buffers and buffer size (in milliseconds) to use. 4
* buffers with 200ms each gives a nice per-chunk size, and lets the queue last
@@ -54,13 +58,11 @@ typedef struct StreamPlayer {
ALuint source;
/* Handle for the audio file */
- FilePtr file;
+ Sound_Sample *sample;
/* The format of the output stream */
ALenum format;
- ALenum channels;
- ALenum type;
- ALuint rate;
+ ALsizei srate;
} StreamPlayer;
static StreamPlayer *NewPlayer(void);
@@ -77,11 +79,9 @@ static StreamPlayer *NewPlayer(void)
{
StreamPlayer *player;
- player = malloc(sizeof(*player));
+ player = calloc(1, sizeof(*player));
assert(player != NULL);
- memset(player, 0, sizeof(*player));
-
/* Generate the buffers and source */
alGenBuffers(NUM_BUFFERS, player->buffers);
assert(alGetError() == AL_NO_ERROR && "Could not create buffers");
@@ -119,37 +119,63 @@ static void DeletePlayer(StreamPlayer *player)
* it will be closed first. */
static int OpenPlayerFile(StreamPlayer *player, const char *filename)
{
+ Uint32 frame_size;
+
ClosePlayerFile(player);
/* Open the file and get the first stream from it */
- player->file = openAudioFile(filename, BUFFER_TIME_MS);
- if(!player->file)
+ player->sample = Sound_NewSampleFromFile(filename, NULL, 0);
+ if(!player->sample)
{
fprintf(stderr, "Could not open audio in %s\n", filename);
goto error;
}
/* Get the stream format, and figure out the OpenAL format */
- if(getAudioInfo(player->file, &player->rate, &player->channels, &player->type) != 0)
+ if(player->sample->actual.channels == 1)
{
- fprintf(stderr, "Error getting audio info for %s\n", filename);
- goto error;
+ if(player->sample->actual.format == AUDIO_U8)
+ player->format = AL_FORMAT_MONO8;
+ else if(player->sample->actual.format == AUDIO_S16SYS)
+ player->format = AL_FORMAT_MONO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", player->sample->actual.format);
+ goto error;
+ }
}
-
- player->format = GetFormat(player->channels, player->type, alIsBufferFormatSupportedSOFT);
- if(player->format == 0)
+ else if(player->sample->actual.channels == 2)
+ {
+ if(player->sample->actual.format == AUDIO_U8)
+ player->format = AL_FORMAT_STEREO8;
+ else if(player->sample->actual.format == AUDIO_S16SYS)
+ player->format = AL_FORMAT_STEREO16;
+ else
+ {
+ fprintf(stderr, "Unsupported sample format: 0x%04x\n", player->sample->actual.format);
+ goto error;
+ }
+ }
+ else
{
- fprintf(stderr, "Unsupported format (%s, %s) for %s\n",
- ChannelsName(player->channels), TypeName(player->type),
- filename);
+ fprintf(stderr, "Unsupported channel count: %d\n", player->sample->actual.channels);
goto error;
}
+ player->srate = player->sample->actual.rate;
+
+ frame_size = player->sample->actual.channels *
+ SDL_AUDIO_BITSIZE(player->sample->actual.format) / 8;
+
+ /* Set the buffer size, given the desired millisecond length. */
+ Sound_SetBufferSize(player->sample, (Uint32)((Uint64)player->srate*BUFFER_TIME_MS/1000) *
+ frame_size);
return 1;
error:
- closeAudioFile(player->file);
- player->file = NULL;
+ if(player->sample)
+ Sound_FreeSample(player->sample);
+ player->sample = NULL;
return 0;
}
@@ -157,8 +183,9 @@ error:
/* Closes the audio file stream */
static void ClosePlayerFile(StreamPlayer *player)
{
- closeAudioFile(player->file);
- player->file = NULL;
+ if(player->sample)
+ Sound_FreeSample(player->sample);
+ player->sample = NULL;
}
@@ -174,16 +201,12 @@ static int StartPlayer(StreamPlayer *player)
/* Fill the buffer queue */
for(i = 0;i < NUM_BUFFERS;i++)
{
- uint8_t *data;
- size_t got;
-
/* Get some data to give it to the buffer */
- data = getAudioData(player->file, &got);
- if(!data) break;
+ Uint32 slen = Sound_Decode(player->sample);
+ if(slen == 0) break;
- alBufferSamplesSOFT(player->buffers[i], player->rate, player->format,
- BytesToFrames(got, player->channels, player->type),
- player->channels, player->type, data);
+ alBufferData(player->buffers[i], player->format,
+ player->sample->buffer, slen, player->srate);
}
if(alGetError() != AL_NO_ERROR)
{
@@ -220,20 +243,21 @@ static int UpdatePlayer(StreamPlayer *player)
while(processed > 0)
{
ALuint bufid;
- uint8_t *data;
- size_t got;
+ Uint32 slen;
alSourceUnqueueBuffers(player->source, 1, &bufid);
processed--;
+ if((player->sample->flags&(SOUND_SAMPLEFLAG_EOF|SOUND_SAMPLEFLAG_ERROR)))
+ continue;
+
/* Read the next chunk of data, refill the buffer, and queue it
* back on the source */
- data = getAudioData(player->file, &got);
- if(data != NULL)
+ slen = Sound_Decode(player->sample);
+ if(slen > 0)
{
- alBufferSamplesSOFT(bufid, player->rate, player->format,
- BytesToFrames(got, player->channels, player->type),
- player->channels, player->type, data);
+ alBufferData(bufid, player->format, player->sample->buffer, slen,
+ player->srate);
alSourceQueueBuffers(player->source, 1, &bufid);
}
if(alGetError() != AL_NO_ERROR)
@@ -270,29 +294,23 @@ int main(int argc, char **argv)
StreamPlayer *player;
int i;
- /* Print out usage if no file was specified */
+ /* Print out usage if no arguments were specified */
if(argc < 2)
{
- fprintf(stderr, "Usage: %s <filenames...>\n", argv[0]);
+ fprintf(stderr, "Usage: %s [-device <name>] <filenames...>\n", argv[0]);
return 1;
}
- if(InitAL() != 0)
+ argv++; argc--;
+ if(InitAL(&argv, &argc) != 0)
return 1;
- if(alIsExtensionPresent("AL_SOFT_buffer_samples"))
- {
- printf("AL_SOFT_buffer_samples supported!\n");
- alBufferSamplesSOFT = alGetProcAddress("alBufferSamplesSOFT");
- alIsBufferFormatSupportedSOFT = alGetProcAddress("alIsBufferFormatSupportedSOFT");
- }
- else
- printf("AL_SOFT_buffer_samples not supported\n");
+ Sound_Init();
player = NewPlayer();
/* Play each file listed on the command line */
- for(i = 1;i < argc;i++)
+ for(i = 0;i < argc;i++)
{
const char *namepart;
@@ -306,9 +324,8 @@ int main(int argc, char **argv)
else
namepart = argv[i];
- printf("Playing: %s (%s, %s, %dhz)\n", namepart,
- TypeName(player->type), ChannelsName(player->channels),
- player->rate);
+ printf("Playing: %s (%s, %dhz)\n", namepart, FormatName(player->format),
+ player->srate);
fflush(stdout);
if(!StartPlayer(player))
@@ -318,17 +335,18 @@ int main(int argc, char **argv)
}
while(UpdatePlayer(player))
- Sleep(10);
+ al_nssleep(10000000);
/* All done with this file. Close it and go to the next */
ClosePlayerFile(player);
}
printf("Done.\n");
- /* All files done. Delete the player, and close OpenAL */
+ /* All files done. Delete the player, and close down SDL_sound and OpenAL */
DeletePlayer(player);
player = NULL;
+ Sound_Quit();
CloseAL();
return 0;
diff --git a/examples/altonegen.c b/examples/altonegen.c
index 65980529..628e695d 100644
--- a/examples/altonegen.c
+++ b/examples/altonegen.c
@@ -35,6 +35,7 @@
#include <stdlib.h>
#include <string.h>
#include <assert.h>
+#include <limits.h>
#include <math.h>
#include "AL/al.h"
@@ -53,6 +54,7 @@ enum WaveType {
WT_Sawtooth,
WT_Triangle,
WT_Impulse,
+ WT_WhiteNoise,
};
static const char *GetWaveTypeName(enum WaveType type)
@@ -64,10 +66,17 @@ static const char *GetWaveTypeName(enum WaveType type)
case WT_Sawtooth: return "sawtooth";
case WT_Triangle: return "triangle";
case WT_Impulse: return "impulse";
+ case WT_WhiteNoise: return "noise";
}
return "(unknown)";
}
+static inline ALuint dither_rng(ALuint *seed)
+{
+ *seed = (*seed * 96314165) + 907633515;
+ return *seed;
+}
+
static void ApplySin(ALfloat *data, ALdouble g, ALuint srate, ALuint freq)
{
ALdouble smps_per_cycle = (ALdouble)srate / freq;
@@ -81,6 +90,7 @@ static void ApplySin(ALfloat *data, ALdouble g, ALuint srate, ALuint freq)
*/
static ALuint CreateWave(enum WaveType type, ALuint freq, ALuint srate)
{
+ ALuint seed = 22222;
ALint data_size;
ALfloat *data;
ALuint buffer;
@@ -89,25 +99,44 @@ static ALuint CreateWave(enum WaveType type, ALuint freq, ALuint srate)
data_size = srate * sizeof(ALfloat);
data = calloc(1, data_size);
- if(type == WT_Sine)
- ApplySin(data, 1.0, srate, freq);
- else if(type == WT_Square)
- for(i = 1;freq*i < srate/2;i+=2)
- ApplySin(data, 4.0/M_PI * 1.0/i, srate, freq*i);
- else if(type == WT_Sawtooth)
- for(i = 1;freq*i < srate/2;i++)
- ApplySin(data, 2.0/M_PI * ((i&1)*2 - 1.0) / i, srate, freq*i);
- else if(type == WT_Triangle)
- for(i = 1;freq*i < srate/2;i+=2)
- ApplySin(data, 8.0/(M_PI*M_PI) * (1.0 - (i&2)) / (i*i), srate, freq*i);
- else if(type == WT_Impulse)
+ switch(type)
{
- /* NOTE: Impulse isn't really a waveform, but it can still be useful to
- * test (other than resampling, the ALSOFT_DEFAULT_REVERB environment
- * variable can prove useful here to test the reverb response).
- */
- for(i = 0;i < srate;i++)
- data[i] = (i%(srate/freq)) ? 0.0f : 1.0f;
+ case WT_Sine:
+ ApplySin(data, 1.0, srate, freq);
+ break;
+ case WT_Square:
+ for(i = 1;freq*i < srate/2;i+=2)
+ ApplySin(data, 4.0/M_PI * 1.0/i, srate, freq*i);
+ break;
+ case WT_Sawtooth:
+ for(i = 1;freq*i < srate/2;i++)
+ ApplySin(data, 2.0/M_PI * ((i&1)*2 - 1.0) / i, srate, freq*i);
+ break;
+ case WT_Triangle:
+ for(i = 1;freq*i < srate/2;i+=2)
+ ApplySin(data, 8.0/(M_PI*M_PI) * (1.0 - (i&2)) / (i*i), srate, freq*i);
+ break;
+ case WT_Impulse:
+ /* NOTE: Impulse isn't handled using additive synthesis, and is
+ * instead just a non-0 sample at a given rate. This can still be
+ * useful to test (other than resampling, the ALSOFT_DEFAULT_REVERB
+ * environment variable can prove useful here to test the reverb
+ * response).
+ */
+ for(i = 0;i < srate;i++)
+ data[i] = (i%(srate/freq)) ? 0.0f : 1.0f;
+ break;
+ case WT_WhiteNoise:
+ /* NOTE: WhiteNoise is just uniform set of uncorrelated values, and
+ * is not influenced by the waveform frequency.
+ */
+ for(i = 0;i < srate;i++)
+ {
+ ALuint rng0 = dither_rng(&seed);
+ ALuint rng1 = dither_rng(&seed);
+ data[i] = (ALfloat)(rng0*(1.0/UINT_MAX) - rng1*(1.0/UINT_MAX));
+ }
+ break;
}
/* Buffer the audio data into a new buffer object. */
@@ -133,6 +162,7 @@ static ALuint CreateWave(enum WaveType type, ALuint freq, ALuint srate)
int main(int argc, char *argv[])
{
enum WaveType wavetype = WT_Sine;
+ const char *appname = argv[0];
ALuint source, buffer;
ALint last_pos, num_loops;
ALint max_loops = 4;
@@ -142,23 +172,35 @@ int main(int argc, char *argv[])
ALenum state;
int i;
- for(i = 1;i < argc;i++)
+ argv++; argc--;
+ if(InitAL(&argv, &argc) != 0)
+ return 1;
+
+ if(!alIsExtensionPresent("AL_EXT_FLOAT32"))
+ {
+ fprintf(stderr, "Required AL_EXT_FLOAT32 extension not supported on this device!\n");
+ CloseAL();
+ return 1;
+ }
+
+ for(i = 0;i < argc;i++)
{
if(strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0)
{
fprintf(stderr, "OpenAL Tone Generator\n"
"\n"
-"Usage: %s <options>\n"
+"Usage: %s [-device <name>] <options>\n"
"\n"
"Available options:\n"
" --help/-h This help text\n"
" -t <seconds> Time to play a tone (default 5 seconds)\n"
" --waveform/-w <type> Waveform type: sine (default), square, sawtooth,\n"
-" triangle, impulse\n"
+" triangle, impulse, noise\n"
" --freq/-f <hz> Tone frequency (default 1000 hz)\n"
" --srate/-s <sample rate> Sampling rate (default output rate)\n",
- argv[0]
+ appname
);
+ CloseAL();
return 1;
}
else if(i+1 < argc && strcmp(argv[i], "-t") == 0)
@@ -179,6 +221,8 @@ int main(int argc, char *argv[])
wavetype = WT_Triangle;
else if(strcmp(argv[i], "impulse") == 0)
wavetype = WT_Impulse;
+ else if(strcmp(argv[i], "noise") == 0)
+ wavetype = WT_WhiteNoise;
else
fprintf(stderr, "Unhandled waveform: %s\n", argv[i]);
}
@@ -204,15 +248,6 @@ int main(int argc, char *argv[])
}
}
- InitAL();
-
- if(!alIsExtensionPresent("AL_EXT_FLOAT32"))
- {
- fprintf(stderr, "Required AL_EXT_FLOAT32 extension not supported on this device!\n");
- CloseAL();
- return 1;
- }
-
{
ALCdevice *device = alcGetContextsDevice(alcGetCurrentContext());
alcGetIntegerv(device, ALC_FREQUENCY, 1, &dev_rate);
@@ -246,7 +281,7 @@ int main(int argc, char *argv[])
alSourcePlay(source);
do {
ALint pos;
- Sleep(10);
+ al_nssleep(10000000);
alGetSourcei(source, AL_SAMPLE_OFFSET, &pos);
alGetSourcei(source, AL_SOURCE_STATE, &state);
if(pos < last_pos && state == AL_PLAYING)
diff --git a/examples/common/alhelpers.c b/examples/common/alhelpers.c
index 4582321c..fab039e9 100644
--- a/examples/common/alhelpers.c
+++ b/examples/common/alhelpers.c
@@ -29,6 +29,7 @@
* channel configs and sample types. */
#include <stdio.h>
+#include <string.h>
#include "AL/al.h"
#include "AL/alc.h"
@@ -37,15 +38,26 @@
#include "alhelpers.h"
-/* InitAL opens the default device and sets up a context using default
- * attributes, making the program ready to call OpenAL functions. */
-int InitAL(void)
+/* InitAL opens a device and sets up a context using default attributes, making
+ * the program ready to call OpenAL functions. */
+int InitAL(char ***argv, int *argc)
{
+ const ALCchar *name;
ALCdevice *device;
ALCcontext *ctx;
- /* Open and initialize a device with default settings */
- device = alcOpenDevice(NULL);
+ /* Open and initialize a device */
+ device = NULL;
+ if(argc && argv && *argc > 1 && strcmp((*argv)[0], "-device") == 0)
+ {
+ device = alcOpenDevice((*argv)[1]);
+ if(!device)
+ fprintf(stderr, "Failed to open \"%s\", trying default\n", (*argv)[1]);
+ (*argv) += 2;
+ (*argc) -= 2;
+ }
+ if(!device)
+ device = alcOpenDevice(NULL);
if(!device)
{
fprintf(stderr, "Could not open a device!\n");
@@ -62,7 +74,13 @@ int InitAL(void)
return 1;
}
- printf("Opened \"%s\"\n", alcGetString(device, ALC_DEVICE_SPECIFIER));
+ name = NULL;
+ if(alcIsExtensionPresent(device, "ALC_ENUMERATE_ALL_EXT"))
+ name = alcGetString(device, ALC_ALL_DEVICES_SPECIFIER);
+ if(!name || alcGetError(device) != AL_NO_ERROR)
+ name = alcGetString(device, ALC_DEVICE_SPECIFIER);
+ printf("Opened \"%s\"\n", name);
+
return 0;
}
@@ -85,243 +103,14 @@ void CloseAL(void)
}
-/* GetFormat retrieves a compatible buffer format given the channel config and
- * sample type. If an alIsBufferFormatSupportedSOFT-compatible function is
- * provided, it will be called to find the closest-matching format from
- * AL_SOFT_buffer_samples. Returns AL_NONE (0) if no supported format can be
- * found. */
-ALenum GetFormat(ALenum channels, ALenum type, LPALISBUFFERFORMATSUPPORTEDSOFT palIsBufferFormatSupportedSOFT)
-{
- ALenum format = AL_NONE;
-
- /* If using AL_SOFT_buffer_samples, try looking through its formats */
- if(palIsBufferFormatSupportedSOFT)
- {
- /* AL_SOFT_buffer_samples is more lenient with matching formats. The
- * specified sample type does not need to match the returned format,
- * but it is nice to try to get something close. */
- if(type == AL_UNSIGNED_BYTE_SOFT || type == AL_BYTE_SOFT)
- {
- if(channels == AL_MONO_SOFT) format = AL_MONO8_SOFT;
- else if(channels == AL_STEREO_SOFT) format = AL_STEREO8_SOFT;
- else if(channels == AL_QUAD_SOFT) format = AL_QUAD8_SOFT;
- else if(channels == AL_5POINT1_SOFT) format = AL_5POINT1_8_SOFT;
- else if(channels == AL_6POINT1_SOFT) format = AL_6POINT1_8_SOFT;
- else if(channels == AL_7POINT1_SOFT) format = AL_7POINT1_8_SOFT;
- }
- else if(type == AL_UNSIGNED_SHORT_SOFT || type == AL_SHORT_SOFT)
- {
- if(channels == AL_MONO_SOFT) format = AL_MONO16_SOFT;
- else if(channels == AL_STEREO_SOFT) format = AL_STEREO16_SOFT;
- else if(channels == AL_QUAD_SOFT) format = AL_QUAD16_SOFT;
- else if(channels == AL_5POINT1_SOFT) format = AL_5POINT1_16_SOFT;
- else if(channels == AL_6POINT1_SOFT) format = AL_6POINT1_16_SOFT;
- else if(channels == AL_7POINT1_SOFT) format = AL_7POINT1_16_SOFT;
- }
- else if(type == AL_UNSIGNED_BYTE3_SOFT || type == AL_BYTE3_SOFT ||
- type == AL_UNSIGNED_INT_SOFT || type == AL_INT_SOFT ||
- type == AL_FLOAT_SOFT || type == AL_DOUBLE_SOFT)
- {
- if(channels == AL_MONO_SOFT) format = AL_MONO32F_SOFT;
- else if(channels == AL_STEREO_SOFT) format = AL_STEREO32F_SOFT;
- else if(channels == AL_QUAD_SOFT) format = AL_QUAD32F_SOFT;
- else if(channels == AL_5POINT1_SOFT) format = AL_5POINT1_32F_SOFT;
- else if(channels == AL_6POINT1_SOFT) format = AL_6POINT1_32F_SOFT;
- else if(channels == AL_7POINT1_SOFT) format = AL_7POINT1_32F_SOFT;
- }
-
- if(format != AL_NONE && !palIsBufferFormatSupportedSOFT(format))
- format = AL_NONE;
-
- /* A matching format was not found or supported. Try 32-bit float. */
- if(format == AL_NONE)
- {
- if(channels == AL_MONO_SOFT) format = AL_MONO32F_SOFT;
- else if(channels == AL_STEREO_SOFT) format = AL_STEREO32F_SOFT;
- else if(channels == AL_QUAD_SOFT) format = AL_QUAD32F_SOFT;
- else if(channels == AL_5POINT1_SOFT) format = AL_5POINT1_32F_SOFT;
- else if(channels == AL_6POINT1_SOFT) format = AL_6POINT1_32F_SOFT;
- else if(channels == AL_7POINT1_SOFT) format = AL_7POINT1_32F_SOFT;
-
- if(format != AL_NONE && !palIsBufferFormatSupportedSOFT(format))
- format = AL_NONE;
- }
- /* 32-bit float not supported. Try 16-bit int. */
- if(format == AL_NONE)
- {
- if(channels == AL_MONO_SOFT) format = AL_MONO16_SOFT;
- else if(channels == AL_STEREO_SOFT) format = AL_STEREO16_SOFT;
- else if(channels == AL_QUAD_SOFT) format = AL_QUAD16_SOFT;
- else if(channels == AL_5POINT1_SOFT) format = AL_5POINT1_16_SOFT;
- else if(channels == AL_6POINT1_SOFT) format = AL_6POINT1_16_SOFT;
- else if(channels == AL_7POINT1_SOFT) format = AL_7POINT1_16_SOFT;
-
- if(format != AL_NONE && !palIsBufferFormatSupportedSOFT(format))
- format = AL_NONE;
- }
- /* 16-bit int not supported. Try 8-bit int. */
- if(format == AL_NONE)
- {
- if(channels == AL_MONO_SOFT) format = AL_MONO8_SOFT;
- else if(channels == AL_STEREO_SOFT) format = AL_STEREO8_SOFT;
- else if(channels == AL_QUAD_SOFT) format = AL_QUAD8_SOFT;
- else if(channels == AL_5POINT1_SOFT) format = AL_5POINT1_8_SOFT;
- else if(channels == AL_6POINT1_SOFT) format = AL_6POINT1_8_SOFT;
- else if(channels == AL_7POINT1_SOFT) format = AL_7POINT1_8_SOFT;
-
- if(format != AL_NONE && !palIsBufferFormatSupportedSOFT(format))
- format = AL_NONE;
- }
-
- return format;
- }
-
- /* We use the AL_EXT_MCFORMATS extension to provide output of Quad, 5.1,
- * and 7.1 channel configs, AL_EXT_FLOAT32 for 32-bit float samples, and
- * AL_EXT_DOUBLE for 64-bit float samples. */
- if(type == AL_UNSIGNED_BYTE_SOFT)
- {
- if(channels == AL_MONO_SOFT)
- format = AL_FORMAT_MONO8;
- else if(channels == AL_STEREO_SOFT)
- format = AL_FORMAT_STEREO8;
- else if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
- {
- if(channels == AL_QUAD_SOFT)
- format = alGetEnumValue("AL_FORMAT_QUAD8");
- else if(channels == AL_5POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_51CHN8");
- else if(channels == AL_6POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_61CHN8");
- else if(channels == AL_7POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_71CHN8");
- }
- }
- else if(type == AL_SHORT_SOFT)
- {
- if(channels == AL_MONO_SOFT)
- format = AL_FORMAT_MONO16;
- else if(channels == AL_STEREO_SOFT)
- format = AL_FORMAT_STEREO16;
- else if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
- {
- if(channels == AL_QUAD_SOFT)
- format = alGetEnumValue("AL_FORMAT_QUAD16");
- else if(channels == AL_5POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_51CHN16");
- else if(channels == AL_6POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_61CHN16");
- else if(channels == AL_7POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_71CHN16");
- }
- }
- else if(type == AL_FLOAT_SOFT && alIsExtensionPresent("AL_EXT_FLOAT32"))
- {
- if(channels == AL_MONO_SOFT)
- format = alGetEnumValue("AL_FORMAT_MONO_FLOAT32");
- else if(channels == AL_STEREO_SOFT)
- format = alGetEnumValue("AL_FORMAT_STEREO_FLOAT32");
- else if(alIsExtensionPresent("AL_EXT_MCFORMATS"))
- {
- if(channels == AL_QUAD_SOFT)
- format = alGetEnumValue("AL_FORMAT_QUAD32");
- else if(channels == AL_5POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_51CHN32");
- else if(channels == AL_6POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_61CHN32");
- else if(channels == AL_7POINT1_SOFT)
- format = alGetEnumValue("AL_FORMAT_71CHN32");
- }
- }
- else if(type == AL_DOUBLE_SOFT && alIsExtensionPresent("AL_EXT_DOUBLE"))
- {
- if(channels == AL_MONO_SOFT)
- format = alGetEnumValue("AL_FORMAT_MONO_DOUBLE");
- else if(channels == AL_STEREO_SOFT)
- format = alGetEnumValue("AL_FORMAT_STEREO_DOUBLE");
- }
-
- /* NOTE: It seems OSX returns -1 from alGetEnumValue for unknown enums, as
- * opposed to 0. Correct it. */
- if(format == -1)
- format = 0;
-
- return format;
-}
-
-
-void AL_APIENTRY wrap_BufferSamples(ALuint buffer, ALuint samplerate,
- ALenum internalformat, ALsizei samples,
- ALenum channels, ALenum type,
- const ALvoid *data)
-{
- alBufferData(buffer, internalformat, data,
- FramesToBytes(samples, channels, type),
- samplerate);
-}
-
-
-const char *ChannelsName(ALenum chans)
+const char *FormatName(ALenum format)
{
- switch(chans)
+ switch(format)
{
- case AL_MONO_SOFT: return "Mono";
- case AL_STEREO_SOFT: return "Stereo";
- case AL_REAR_SOFT: return "Rear";
- case AL_QUAD_SOFT: return "Quadraphonic";
- case AL_5POINT1_SOFT: return "5.1 Surround";
- case AL_6POINT1_SOFT: return "6.1 Surround";
- case AL_7POINT1_SOFT: return "7.1 Surround";
+ case AL_FORMAT_MONO8: return "Mono, U8";
+ case AL_FORMAT_MONO16: return "Mono, S16";
+ case AL_FORMAT_STEREO8: return "Stereo, U8";
+ case AL_FORMAT_STEREO16: return "Stereo, S16";
}
- return "Unknown Channels";
-}
-
-const char *TypeName(ALenum type)
-{
- switch(type)
- {
- case AL_BYTE_SOFT: return "S8";
- case AL_UNSIGNED_BYTE_SOFT: return "U8";
- case AL_SHORT_SOFT: return "S16";
- case AL_UNSIGNED_SHORT_SOFT: return "U16";
- case AL_INT_SOFT: return "S32";
- case AL_UNSIGNED_INT_SOFT: return "U32";
- case AL_FLOAT_SOFT: return "Float32";
- case AL_DOUBLE_SOFT: return "Float64";
- }
- return "Unknown Type";
-}
-
-
-ALsizei FramesToBytes(ALsizei size, ALenum channels, ALenum type)
-{
- switch(channels)
- {
- case AL_MONO_SOFT: size *= 1; break;
- case AL_STEREO_SOFT: size *= 2; break;
- case AL_REAR_SOFT: size *= 2; break;
- case AL_QUAD_SOFT: size *= 4; break;
- case AL_5POINT1_SOFT: size *= 6; break;
- case AL_6POINT1_SOFT: size *= 7; break;
- case AL_7POINT1_SOFT: size *= 8; break;
- }
-
- switch(type)
- {
- case AL_BYTE_SOFT: size *= sizeof(ALbyte); break;
- case AL_UNSIGNED_BYTE_SOFT: size *= sizeof(ALubyte); break;
- case AL_SHORT_SOFT: size *= sizeof(ALshort); break;
- case AL_UNSIGNED_SHORT_SOFT: size *= sizeof(ALushort); break;
- case AL_INT_SOFT: size *= sizeof(ALint); break;
- case AL_UNSIGNED_INT_SOFT: size *= sizeof(ALuint); break;
- case AL_FLOAT_SOFT: size *= sizeof(ALfloat); break;
- case AL_DOUBLE_SOFT: size *= sizeof(ALdouble); break;
- }
-
- return size;
-}
-
-ALsizei BytesToFrames(ALsizei size, ALenum channels, ALenum type)
-{
- return size / FramesToBytes(1, channels, type);
+ return "Unknown Format";
}
diff --git a/examples/common/alhelpers.h b/examples/common/alhelpers.h
index 62ed5be2..41a7ce58 100644
--- a/examples/common/alhelpers.h
+++ b/examples/common/alhelpers.h
@@ -1,47 +1,21 @@
#ifndef ALHELPERS_H
#define ALHELPERS_H
-#ifndef _WIN32
-#include <unistd.h>
-#define Sleep(x) usleep((x)*1000)
-#else
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#endif
-
#include "AL/alc.h"
#include "AL/al.h"
#include "AL/alext.h"
+#include "threads.h"
+
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
-/* Some helper functions to get the name from the channel and type enums. */
-const char *ChannelsName(ALenum chans);
-const char *TypeName(ALenum type);
-
-/* Helpers to convert frame counts and byte lengths. */
-ALsizei FramesToBytes(ALsizei size, ALenum channels, ALenum type);
-ALsizei BytesToFrames(ALsizei size, ALenum channels, ALenum type);
-
-/* Retrieves a compatible buffer format given the channel configuration and
- * sample type. If an alIsBufferFormatSupportedSOFT-compatible function is
- * provided, it will be called to find the closest-matching format from
- * AL_SOFT_buffer_samples. Returns AL_NONE (0) if no supported format can be
- * found. */
-ALenum GetFormat(ALenum channels, ALenum type, LPALISBUFFERFORMATSUPPORTEDSOFT palIsBufferFormatSupportedSOFT);
-
-/* Loads samples into a buffer using the standard alBufferData call, but with a
- * LPALBUFFERSAMPLESSOFT-compatible prototype. Assumes internalformat is valid
- * for alBufferData, and that channels and type match it. */
-void AL_APIENTRY wrap_BufferSamples(ALuint buffer, ALuint samplerate,
- ALenum internalformat, ALsizei samples,
- ALenum channels, ALenum type,
- const ALvoid *data);
+/* Some helper functions to get the name from the format enums. */
+const char *FormatName(ALenum type);
/* Easy device init/deinit functions. InitAL returns 0 on success. */
-int InitAL(void);
+int InitAL(char ***argv, int *argc);
void CloseAL(void);
#ifdef __cplusplus
diff --git a/examples/common/sdl_sound.c b/examples/common/sdl_sound.c
deleted file mode 100644
index 79a5bf32..00000000
--- a/examples/common/sdl_sound.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * SDL_sound Decoder Helpers
- *
- * Copyright (c) 2013 by Chris Robinson <[email protected]>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-
-/* This file contains routines for helping to decode audio using SDL_sound.
- * There's very little OpenAL-specific code here.
- */
-#include "sdl_sound.h"
-
-#include <string.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <signal.h>
-#include <assert.h>
-
-#include <SDL_sound.h>
-
-#include "AL/al.h"
-#include "AL/alc.h"
-#include "AL/alext.h"
-
-#include "alhelpers.h"
-
-
-static int done_init = 0;
-
-FilePtr openAudioFile(const char *fname, size_t buftime_ms)
-{
- FilePtr file;
- ALuint rate;
- Uint32 bufsize;
- ALenum chans, type;
-
- /* We need to make sure SDL_sound is initialized. */
- if(!done_init)
- {
- Sound_Init();
- done_init = 1;
- }
-
- file = Sound_NewSampleFromFile(fname, NULL, 0);
- if(!file)
- {
- fprintf(stderr, "Failed to open %s: %s\n", fname, Sound_GetError());
- return NULL;
- }
-
- if(getAudioInfo(file, &rate, &chans, &type) != 0)
- {
- Sound_FreeSample(file);
- return NULL;
- }
-
- bufsize = FramesToBytes((ALsizei)(buftime_ms/1000.0*rate), chans, type);
- if(Sound_SetBufferSize(file, bufsize) == 0)
- {
- fprintf(stderr, "Failed to set buffer size to %u bytes: %s\n", bufsize, Sound_GetError());
- Sound_FreeSample(file);
- return NULL;
- }
-
- return file;
-}
-
-void closeAudioFile(FilePtr file)
-{
- if(file)
- Sound_FreeSample(file);
-}
-
-
-int getAudioInfo(FilePtr file, ALuint *rate, ALenum *channels, ALenum *type)
-{
- if(file->actual.channels == 1)
- *channels = AL_MONO_SOFT;
- else if(file->actual.channels == 2)
- *channels = AL_STEREO_SOFT;
- else
- {
- fprintf(stderr, "Unsupported channel count: %d\n", file->actual.channels);
- return 1;
- }
-
- if(file->actual.format == AUDIO_U8)
- *type = AL_UNSIGNED_BYTE_SOFT;
- else if(file->actual.format == AUDIO_S8)
- *type = AL_BYTE_SOFT;
- else if(file->actual.format == AUDIO_U16LSB || file->actual.format == AUDIO_U16MSB)
- *type = AL_UNSIGNED_SHORT_SOFT;
- else if(file->actual.format == AUDIO_S16LSB || file->actual.format == AUDIO_S16MSB)
- *type = AL_SHORT_SOFT;
- else
- {
- fprintf(stderr, "Unsupported sample format: 0x%04x\n", file->actual.format);
- return 1;
- }
-
- *rate = file->actual.rate;
-
- return 0;
-}
-
-
-uint8_t *getAudioData(FilePtr file, size_t *length)
-{
- *length = Sound_Decode(file);
- if(*length == 0)
- return NULL;
- if((file->actual.format == AUDIO_U16LSB && AUDIO_U16LSB != AUDIO_U16SYS) ||
- (file->actual.format == AUDIO_U16MSB && AUDIO_U16MSB != AUDIO_U16SYS) ||
- (file->actual.format == AUDIO_S16LSB && AUDIO_S16LSB != AUDIO_S16SYS) ||
- (file->actual.format == AUDIO_S16MSB && AUDIO_S16MSB != AUDIO_S16SYS))
- {
- /* Swap bytes if the decoded endianness doesn't match the system. */
- char *buffer = file->buffer;
- size_t i;
- for(i = 0;i < *length;i+=2)
- {
- char b = buffer[i];
- buffer[i] = buffer[i+1];
- buffer[i+1] = b;
- }
- }
- return file->buffer;
-}
-
-void *decodeAudioStream(FilePtr file, size_t *length)
-{
- Uint32 got;
- char *mem;
-
- got = Sound_DecodeAll(file);
- if(got == 0)
- {
- *length = 0;
- return NULL;
- }
-
- mem = malloc(got);
- memcpy(mem, file->buffer, got);
-
- *length = got;
- return mem;
-}
diff --git a/examples/common/sdl_sound.h b/examples/common/sdl_sound.h
deleted file mode 100644
index e93ab92b..00000000
--- a/examples/common/sdl_sound.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef EXAMPLES_SDL_SOUND_H
-#define EXAMPLES_SDL_SOUND_H
-
-#include "AL/al.h"
-
-#include <SDL_sound.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/* Opaque handles to files and streams. Apps don't need to concern themselves
- * with the internals */
-typedef Sound_Sample *FilePtr;
-
-/* Opens a file with SDL_sound, and specifies the size of the sample buffer in
- * milliseconds. */
-FilePtr openAudioFile(const char *fname, size_t buftime_ms);
-
-/* Closes/frees an opened file */
-void closeAudioFile(FilePtr file);
-
-/* Returns information about the given audio stream. Returns 0 on success. */
-int getAudioInfo(FilePtr file, ALuint *rate, ALenum *channels, ALenum *type);
-
-/* Returns a pointer to the next available chunk of decoded audio. The size (in
- * bytes) of the returned data buffer is stored in 'length', and the returned
- * pointer is only valid until the next call to getAudioData. */
-uint8_t *getAudioData(FilePtr file, size_t *length);
-
-/* Decodes all remaining data from the stream and returns a buffer containing
- * the audio data, with the size stored in 'length'. The returned pointer must
- * be freed with a call to free(). Note that since this decodes the whole
- * stream, using it on lengthy streams (eg, music) will use a lot of memory.
- * Such streams are better handled using getAudioData to keep smaller chunks in
- * memory at any given time. */
-void *decodeAudioStream(FilePtr, size_t *length);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* EXAMPLES_SDL_SOUND_H */