diff options
Diffstat (limited to 'examples')
-rw-r--r-- | examples/alffplay.cpp | 1340 | ||||
-rw-r--r-- | examples/alhrtf.c | 26 | ||||
-rw-r--r-- | examples/allatency.c | 35 | ||||
-rw-r--r-- | examples/alloopback.c | 17 | ||||
-rw-r--r-- | examples/almultireverb.c | 114 | ||||
-rw-r--r-- | examples/alplay.c | 9 | ||||
-rw-r--r-- | examples/alrecord.c | 42 | ||||
-rw-r--r-- | examples/alreverb.c | 62 | ||||
-rw-r--r-- | examples/alstream.c | 24 | ||||
-rw-r--r-- | examples/altonegen.c | 15 | ||||
-rw-r--r-- | examples/common/alhelpers.c | 74 | ||||
-rw-r--r-- | examples/common/alhelpers.h | 14 |
12 files changed, 911 insertions, 861 deletions
diff --git a/examples/alffplay.cpp b/examples/alffplay.cpp index 27520a6d..655ffc96 100644 --- a/examples/alffplay.cpp +++ b/examples/alffplay.cpp @@ -8,29 +8,44 @@ #include <functional> #include <algorithm> #include <iostream> +#include <utility> #include <iomanip> +#include <cstdint> #include <cstring> -#include <limits> -#include <thread> -#include <chrono> +#include <cstdlib> #include <atomic> +#include <cerrno> +#include <chrono> +#include <cstdio> +#include <memory> +#include <string> +#include <thread> #include <vector> -#include <mutex> -#include <deque> #include <array> #include <cmath> -#include <string> +#include <deque> +#include <mutex> +#include <ratio> extern "C" { #include "libavcodec/avcodec.h" #include "libavformat/avformat.h" #include "libavformat/avio.h" -#include "libavutil/time.h" +#include "libavformat/version.h" +#include "libavutil/avutil.h" +#include "libavutil/error.h" +#include "libavutil/frame.h" +#include "libavutil/mem.h" #include "libavutil/pixfmt.h" -#include "libavutil/avstring.h" +#include "libavutil/rational.h" +#include "libavutil/samplefmt.h" +#include "libavutil/time.h" +#include "libavutil/version.h" #include "libavutil/channel_layout.h" #include "libswscale/swscale.h" #include "libswresample/swresample.h" + +struct SwsContext; } #include "SDL.h" @@ -85,20 +100,24 @@ typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values); namespace { +inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); } + #ifndef M_PI #define M_PI (3.14159265358979323846) #endif +using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1_i64<<32)>>; using nanoseconds = std::chrono::nanoseconds; using microseconds = std::chrono::microseconds; using milliseconds = std::chrono::milliseconds; using seconds = std::chrono::seconds; using seconds_d64 = std::chrono::duration<double>; -const std::string AppName("alffplay"); +const std::string AppName{"alffplay"}; -bool EnableDirectOut = false; -bool EnableWideStereo = false; +bool EnableDirectOut{false}; +bool EnableWideStereo{false}; +bool DisableVideo{false}; LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT; LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT; @@ -113,27 +132,23 @@ LPALEVENTCONTROLSOFT alEventControlSOFT; LPALEVENTCALLBACKSOFT alEventCallbackSOFT; #endif -const seconds AVNoSyncThreshold(10); +const seconds AVNoSyncThreshold{10}; -const milliseconds VideoSyncThreshold(10); -#define VIDEO_PICTURE_QUEUE_SIZE 16 +const milliseconds VideoSyncThreshold{10}; +#define VIDEO_PICTURE_QUEUE_SIZE 24 -const seconds_d64 AudioSyncThreshold(0.03); -const milliseconds AudioSampleCorrectionMax(50); +const seconds_d64 AudioSyncThreshold{0.03}; +const milliseconds AudioSampleCorrectionMax{50}; /* Averaging filter coefficient for audio sync. */ #define AUDIO_DIFF_AVG_NB 20 -const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB); +const double AudioAvgFilterCoeff{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB)}; /* Per-buffer size, in time */ -const milliseconds AudioBufferTime(20); +const milliseconds AudioBufferTime{20}; /* Buffer total size, in time (should be divisible by the buffer time) */ -const milliseconds AudioBufferTotalTime(800); - -#define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */ +const milliseconds AudioBufferTotalTime{800}; enum { - FF_UPDATE_EVENT = SDL_USEREVENT, - FF_REFRESH_EVENT, - FF_MOVIE_DONE_EVENT + FF_MOVIE_DONE_EVENT = SDL_USEREVENT }; enum class SyncMaster { @@ -146,7 +161,7 @@ enum class SyncMaster { inline microseconds get_avtime() -{ return microseconds(av_gettime()); } +{ return microseconds{av_gettime()}; } /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */ struct AVIOContextDeleter { @@ -180,43 +195,83 @@ struct SwsContextDeleter { using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>; +template<size_t SizeLimit> class PacketQueue { + std::mutex mMutex; + std::condition_variable mCondVar; std::deque<AVPacket> mPackets; size_t mTotalSize{0}; + bool mFinished{false}; -public: - ~PacketQueue() { clear(); } - - bool empty() const noexcept { return mPackets.empty(); } - size_t totalSize() const noexcept { return mTotalSize; } - - void put(const AVPacket *pkt) + AVPacket *getPacket(std::unique_lock<std::mutex> &lock) { - mPackets.push_back(AVPacket{}); - if(av_packet_ref(&mPackets.back(), pkt) != 0) - mPackets.pop_back(); - else - mTotalSize += mPackets.back().size; + while(mPackets.empty() && !mFinished) + mCondVar.wait(lock); + return mPackets.empty() ? nullptr : &mPackets.front(); } - AVPacket *front() noexcept - { return &mPackets.front(); } - void pop() { AVPacket *pkt = &mPackets.front(); - mTotalSize -= pkt->size; + mTotalSize -= static_cast<unsigned int>(pkt->size); av_packet_unref(pkt); mPackets.pop_front(); } - void clear() +public: + ~PacketQueue() { for(AVPacket &pkt : mPackets) av_packet_unref(&pkt); mPackets.clear(); mTotalSize = 0; } + + int sendTo(AVCodecContext *codecctx) + { + std::unique_lock<std::mutex> lock{mMutex}; + + AVPacket *pkt{getPacket(lock)}; + if(!pkt) return avcodec_send_packet(codecctx, nullptr); + + const int ret{avcodec_send_packet(codecctx, pkt)}; + if(ret != AVERROR(EAGAIN)) + { + if(ret < 0) + std::cerr<< "Failed to send packet: "<<ret <<std::endl; + pop(); + } + return ret; + } + + void setFinished() + { + { + std::lock_guard<std::mutex> _{mMutex}; + mFinished = true; + } + mCondVar.notify_one(); + } + + bool put(const AVPacket *pkt) + { + { + std::unique_lock<std::mutex> lock{mMutex}; + if(mTotalSize >= SizeLimit) + return false; + + mPackets.push_back(AVPacket{}); + if(av_packet_ref(&mPackets.back(), pkt) != 0) + { + mPackets.pop_back(); + return true; + } + + mTotalSize += static_cast<unsigned int>(mPackets.back().size); + } + mCondVar.notify_one(); + return true; + } }; @@ -228,8 +283,7 @@ struct AudioState { AVStream *mStream{nullptr}; AVCodecCtxPtr mCodecCtx; - std::mutex mQueueMtx; - std::condition_variable mQueueCond; + PacketQueue<2*1024*1024> mPackets; /* Used for clock difference average computation */ seconds_d64 mClockDiffAvg{0}; @@ -245,7 +299,7 @@ struct AudioState { SwrContextPtr mSwresCtx; /* Conversion format, for what gets fed to OpenAL */ - int mDstChanLayout{0}; + uint64_t mDstChanLayout{0}; AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE}; /* Storage of converted samples */ @@ -256,14 +310,14 @@ struct AudioState { /* OpenAL format */ ALenum mFormat{AL_NONE}; - ALsizei mFrameSize{0}; + ALuint mFrameSize{0}; std::mutex mSrcMutex; std::condition_variable mSrcCond; std::atomic_flag mConnected; ALuint mSource{0}; std::vector<ALuint> mBuffers; - ALsizei mBufferIdx{0}; + ALuint mBufferIdx{0}; AudioState(MovieState &movie) : mMovie(movie) { mConnected.test_and_set(std::memory_order_relaxed); } @@ -272,7 +326,7 @@ struct AudioState { if(mSource) alDeleteSources(1, &mSource); if(!mBuffers.empty()) - alDeleteBuffers(mBuffers.size(), mBuffers.data()); + alDeleteBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data()); av_freep(&mSamples); } @@ -286,16 +340,15 @@ struct AudioState { nanoseconds getClockNoLock(); nanoseconds getClock() { - std::lock_guard<std::mutex> lock(mSrcMutex); + std::lock_guard<std::mutex> lock{mSrcMutex}; return getClockNoLock(); } - bool isBufferFilled(); void startPlayback(); int getSync(); int decodeFrame(); - bool readAudio(uint8_t *samples, int length); + bool readAudio(uint8_t *samples, unsigned int length); int handler(); }; @@ -306,53 +359,46 @@ struct VideoState { AVStream *mStream{nullptr}; AVCodecCtxPtr mCodecCtx; - std::mutex mQueueMtx; - std::condition_variable mQueueCond; + PacketQueue<14*1024*1024> mPackets; - nanoseconds mClock{0}; - nanoseconds mFrameTimer{0}; - nanoseconds mFrameLastPts{0}; - nanoseconds mFrameLastDelay{0}; - nanoseconds mCurrentPts{0}; - /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */ - microseconds mCurrentPtsTime{0}; + /* The pts of the currently displayed frame, and the time (av_gettime) it + * was last updated - used to have running video pts + */ + nanoseconds mDisplayPts{0}; + microseconds mDisplayPtsTime{microseconds::min()}; + std::mutex mDispPtsMutex; - /* Decompressed video frame, and swscale context for conversion */ - AVFramePtr mDecodedFrame; + /* Swscale context for format conversion */ SwsContextPtr mSwscaleCtx; struct Picture { - SDL_Texture *mImage{nullptr}; - int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */ - std::atomic<bool> mUpdated{false}; - nanoseconds mPts{0}; - - ~Picture() - { - if(mImage) - SDL_DestroyTexture(mImage); - mImage = nullptr; - } + AVFramePtr mFrame{}; + nanoseconds mPts{nanoseconds::min()}; }; std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ; - size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0}; + std::atomic<size_t> mPictQRead{0u}, mPictQWrite{1u}; std::mutex mPictQMutex; std::condition_variable mPictQCond; + + SDL_Texture *mImage{nullptr}; + int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */ bool mFirstUpdate{true}; + std::atomic<bool> mEOS{false}; std::atomic<bool> mFinalUpdate{false}; VideoState(MovieState &movie) : mMovie(movie) { } + ~VideoState() + { + if(mImage) + SDL_DestroyTexture(mImage); + mImage = nullptr; + } nanoseconds getClock(); - bool isBufferFilled(); - static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque); - void schedRefresh(milliseconds delay); void display(SDL_Window *screen, SDL_Renderer *renderer); - void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer); - void updatePicture(SDL_Window *screen, SDL_Renderer *renderer); - int queuePicture(nanoseconds pts); + void updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw); int handler(); }; @@ -362,13 +408,7 @@ struct MovieState { SyncMaster mAVSyncType{SyncMaster::Default}; - microseconds mClockBase{0}; - std::atomic<bool> mPlaying{false}; - - std::mutex mSendMtx; - std::condition_variable mSendCond; - /* NOTE: false/clear = need data, true/set = no data needed */ - std::atomic_flag mSendDataGood; + microseconds mClockBase{microseconds::min()}; std::atomic<bool> mQuit{false}; @@ -401,7 +441,7 @@ struct MovieState { nanoseconds getDuration(); - int streamComponentOpen(int stream_index); + int streamComponentOpen(unsigned int stream_index); int parse_handler(); }; @@ -417,10 +457,10 @@ nanoseconds AudioState::getClockNoLock() // Get the current device clock time and latency. auto device = alcGetContextsDevice(alcGetCurrentContext()); - ALCint64SOFT devtimes[2] = {0,0}; + ALCint64SOFT devtimes[2]{0,0}; alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes); - auto latency = nanoseconds(devtimes[1]); - auto device_time = nanoseconds(devtimes[0]); + auto latency = nanoseconds{devtimes[1]}; + auto device_time = nanoseconds{devtimes[0]}; // The clock is simply the current device time relative to the recorded // start time. We can also subtract the latency to get more a accurate @@ -443,12 +483,10 @@ nanoseconds AudioState::getClockNoLock() * sample at OpenAL's current position, and subtracting the source latency * from that gives the timestamp of the sample currently at the DAC. */ - nanoseconds pts = mCurrentPts; + nanoseconds pts{mCurrentPts}; if(mSource) { ALint64SOFT offset[2]; - ALint queued; - ALint status; /* NOTE: The source state must be checked last, in case an underrun * occurs and the source stops between retrieving the offset+latency @@ -459,9 +497,10 @@ nanoseconds AudioState::getClockNoLock() { ALint ioffset; alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset); - offset[0] = (ALint64SOFT)ioffset << 32; + offset[0] = ALint64SOFT{ioffset} << 32; offset[1] = 0; } + ALint queued, status; alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued); alGetSourcei(mSource, AL_SOURCE_STATE, &status); @@ -471,46 +510,31 @@ nanoseconds AudioState::getClockNoLock() * when it starts recovery. */ if(status != AL_STOPPED) { - using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>; - pts -= AudioBufferTime*queued; pts += std::chrono::duration_cast<nanoseconds>( - fixed32(offset[0] / mCodecCtx->sample_rate) - ); + fixed32{offset[0] / mCodecCtx->sample_rate}); } /* Don't offset by the latency if the source isn't playing. */ if(status == AL_PLAYING) - pts -= nanoseconds(offset[1]); + pts -= nanoseconds{offset[1]}; } return std::max(pts, nanoseconds::zero()); } -bool AudioState::isBufferFilled() -{ - /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as - * does the source gen. So when we're able to grab the lock and the source - * is valid, the queue must be full. - */ - std::lock_guard<std::mutex> lock(mSrcMutex); - return mSource != 0; -} - void AudioState::startPlayback() { alSourcePlay(mSource); if(alcGetInteger64vSOFT) { - using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>; - // Subtract the total buffer queue time from the current pts to get the // pts of the start of the queue. - nanoseconds startpts = mCurrentPts - AudioBufferTotalTime; - int64_t srctimes[2]={0,0}; + nanoseconds startpts{mCurrentPts - AudioBufferTotalTime}; + int64_t srctimes[2]{0,0}; alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes); - auto device_time = nanoseconds(srctimes[1]); - auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) / - mCodecCtx->sample_rate; + auto device_time = nanoseconds{srctimes[1]}; + auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32{srctimes[0]}) / + mCodecCtx->sample_rate; // The mixer may have ticked and incremented the device time and sample // offset, so subtract the source offset from the device time to get @@ -543,47 +567,31 @@ int AudioState::getSync() return 0; /* Constrain the per-update difference to avoid exceedingly large skips */ - diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax), - AudioSampleCorrectionMax); - return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count(); + diff = std::min<nanoseconds>(diff, AudioSampleCorrectionMax); + return static_cast<int>(std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count()); } int AudioState::decodeFrame() { while(!mMovie.mQuit.load(std::memory_order_relaxed)) { - std::unique_lock<std::mutex> lock(mQueueMtx); - int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get()); - if(ret == AVERROR(EAGAIN)) - { - mMovie.mSendDataGood.clear(std::memory_order_relaxed); - std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock(); - mMovie.mSendCond.notify_one(); - do { - mQueueCond.wait(lock); - ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get()); - } while(ret == AVERROR(EAGAIN)); - } - lock.unlock(); - if(ret == AVERROR_EOF) break; - mMovie.mSendDataGood.clear(std::memory_order_relaxed); - mMovie.mSendCond.notify_one(); - if(ret < 0) + int ret; + while((ret=avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get())) == AVERROR(EAGAIN)) + mPackets.sendTo(mCodecCtx.get()); + if(ret != 0) { - std::cerr<< "Failed to decode frame: "<<ret <<std::endl; - return 0; + if(ret == AVERROR_EOF) break; + std::cerr<< "Failed to receive frame: "<<ret <<std::endl; + continue; } if(mDecodedFrame->nb_samples <= 0) - { - av_frame_unref(mDecodedFrame.get()); continue; - } /* If provided, update w/ pts */ if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE) mCurrentPts = std::chrono::duration_cast<nanoseconds>( - seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp) + seconds_d64{av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp} ); if(mDecodedFrame->nb_samples > mSamplesMax) @@ -596,9 +604,8 @@ int AudioState::decodeFrame() mSamplesMax = mDecodedFrame->nb_samples; } /* Return the amount of sample frames converted */ - int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples, - (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples - ); + int data_size{swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples, + const_cast<const uint8_t**>(mDecodedFrame->data), mDecodedFrame->nb_samples)}; av_frame_unref(mDecodedFrame.get()); return data_size; @@ -611,17 +618,17 @@ int AudioState::decodeFrame() * multiple of the template type size. */ template<typename T> -static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size) +static void sample_dup(uint8_t *out, const uint8_t *in, unsigned int count, size_t frame_size) { - const T *sample = reinterpret_cast<const T*>(in); - T *dst = reinterpret_cast<T*>(out); + auto *sample = reinterpret_cast<const T*>(in); + auto *dst = reinterpret_cast<T*>(out); if(frame_size == sizeof(T)) std::fill_n(dst, count, *sample); else { /* NOTE: frame_size is a multiple of sizeof(T). */ - int type_mult = frame_size / sizeof(T); - int i = 0; + size_t type_mult{frame_size / sizeof(T)}; + size_t i{0}; std::generate_n(dst, count*type_mult, [sample,type_mult,&i]() -> T { @@ -634,10 +641,10 @@ static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_siz } -bool AudioState::readAudio(uint8_t *samples, int length) +bool AudioState::readAudio(uint8_t *samples, unsigned int length) { - int sample_skip = getSync(); - int audio_size = 0; + int sample_skip{getSync()}; + unsigned int audio_size{0}; /* Read the next chunk of data, refill the buffer, and queue it * on the source */ @@ -656,22 +663,23 @@ bool AudioState::readAudio(uint8_t *samples, int length) // Adjust the device start time and current pts by the amount we're // skipping/duplicating, so that the clock remains correct for the // current stream position. - auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate; + auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate; mDeviceStartTime -= skip; mCurrentPts += skip; continue; } - int rem = length - audio_size; + unsigned int rem{length - audio_size}; if(mSamplesPos >= 0) { - int len = mSamplesLen - mSamplesPos; + const auto len = static_cast<unsigned int>(mSamplesLen - mSamplesPos); if(rem > len) rem = len; - memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize); + std::copy_n(mSamples + static_cast<unsigned int>(mSamplesPos)*mFrameSize, + rem*mFrameSize, samples); } else { - rem = std::min(rem, -mSamplesPos); + rem = std::min(rem, static_cast<unsigned int>(-mSamplesPos)); /* Add samples by copying the first sample */ if((mFrameSize&7) == 0) @@ -685,7 +693,7 @@ bool AudioState::readAudio(uint8_t *samples, int length) } mSamplesPos += rem; - mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate; + mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate; samples += rem*mFrameSize; audio_size += rem; } @@ -694,10 +702,10 @@ bool AudioState::readAudio(uint8_t *samples, int length) if(audio_size < length) { - int rem = length - audio_size; + const unsigned int rem{length - audio_size}; std::fill_n(samples, rem*mFrameSize, (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00); - mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate; + mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate; audio_size += rem; } return true; @@ -709,14 +717,14 @@ void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALui ALsizei length, const ALchar *message, void *userParam) { - AudioState *self = reinterpret_cast<AudioState*>(userParam); + auto self = static_cast<AudioState*>(userParam); if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT) { /* Temporarily lock the source mutex to ensure it's not between * checking the processed count and going to sleep. */ - std::unique_lock<std::mutex>(self->mSrcMutex).unlock(); + std::unique_lock<std::mutex>{self->mSrcMutex}.unlock(); self->mSrcCond.notify_one(); return; } @@ -736,15 +744,15 @@ void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALui std::cout<< "\n" "Object ID: "<<object<<"\n" "Parameter: "<<param<<"\n" - "Message: "<<std::string(message, length)<<"\n----"<< + "Message: "<<std::string{message, static_cast<ALuint>(length)}<<"\n----"<< std::endl; if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT) { - { std::lock_guard<std::mutex> lock(self->mSrcMutex); + { + std::lock_guard<std::mutex> lock{self->mSrcMutex}; self->mConnected.clear(std::memory_order_release); } - std::unique_lock<std::mutex>(self->mSrcMutex).unlock(); self->mSrcCond.notify_one(); } } @@ -752,8 +760,8 @@ void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALui int AudioState::handler() { - std::unique_lock<std::mutex> lock(mSrcMutex); - milliseconds sleep_time = AudioBufferTime / 3; + std::unique_lock<std::mutex> srclock{mSrcMutex, std::defer_lock}; + milliseconds sleep_time{AudioBufferTime / 3}; ALenum fmt; #ifdef AL_SOFT_events @@ -772,13 +780,15 @@ int AudioState::handler() /* Find a suitable format for OpenAL. */ mDstChanLayout = 0; - if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P) + mFormat = AL_NONE; + if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) && + alIsExtensionPresent("AL_EXT_FLOAT32")) { - mDstSampleFmt = AV_SAMPLE_FMT_U8; - mFrameSize = 1; + mDstSampleFmt = AV_SAMPLE_FMT_FLT; + mFrameSize = 4; if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 && alIsExtensionPresent("AL_EXT_MCFORMATS") && - (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1) + (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1) { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 8; @@ -787,7 +797,7 @@ int AudioState::handler() if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) && alIsExtensionPresent("AL_EXT_MCFORMATS") && - (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1) + (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1) { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 6; @@ -797,23 +807,42 @@ int AudioState::handler() { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 1; - mFormat = AL_FORMAT_MONO8; + mFormat = AL_FORMAT_MONO_FLOAT32; } - if(!mDstChanLayout) + /* Assume 3D B-Format (ambisonics) if the channel layout is blank and + * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to + * have no way to specify if the source is actually B-Format (let alone + * if it's 2D or 3D). + */ + if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 && + alIsExtensionPresent("AL_EXT_BFORMAT") && + (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32")) != AL_NONE && fmt != -1) + { + int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1}; + if((order+1)*(order+1) == mCodecCtx->channels || + (order+1)*(order+1) + 2 == mCodecCtx->channels) + { + /* OpenAL only supports first-order with AL_EXT_BFORMAT, which + * is 4 channels for 3D buffers. + */ + mFrameSize *= 4; + mFormat = fmt; + } + } + if(!mFormat) { mDstChanLayout = AV_CH_LAYOUT_STEREO; mFrameSize *= 2; - mFormat = AL_FORMAT_STEREO8; + mFormat = AL_FORMAT_STEREO_FLOAT32; } } - if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) && - alIsExtensionPresent("AL_EXT_FLOAT32")) + if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P) { - mDstSampleFmt = AV_SAMPLE_FMT_FLT; - mFrameSize = 4; + mDstSampleFmt = AV_SAMPLE_FMT_U8; + mFrameSize = 1; if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 && alIsExtensionPresent("AL_EXT_MCFORMATS") && - (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1) + (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1) { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 8; @@ -822,7 +851,7 @@ int AudioState::handler() if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) && alIsExtensionPresent("AL_EXT_MCFORMATS") && - (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1) + (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1) { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 6; @@ -832,16 +861,28 @@ int AudioState::handler() { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 1; - mFormat = AL_FORMAT_MONO_FLOAT32; + mFormat = AL_FORMAT_MONO8; } - if(!mDstChanLayout) + if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 && + alIsExtensionPresent("AL_EXT_BFORMAT") && + (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D8")) != AL_NONE && fmt != -1) + { + int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1}; + if((order+1)*(order+1) == mCodecCtx->channels || + (order+1)*(order+1) + 2 == mCodecCtx->channels) + { + mFrameSize *= 4; + mFormat = fmt; + } + } + if(!mFormat) { mDstChanLayout = AV_CH_LAYOUT_STEREO; mFrameSize *= 2; - mFormat = AL_FORMAT_STEREO_FLOAT32; + mFormat = AL_FORMAT_STEREO8; } } - if(!mDstChanLayout) + if(!mFormat) { mDstSampleFmt = AV_SAMPLE_FMT_S16; mFrameSize = 2; @@ -868,18 +909,30 @@ int AudioState::handler() mFrameSize *= 1; mFormat = AL_FORMAT_MONO16; } - if(!mDstChanLayout) + if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 && + alIsExtensionPresent("AL_EXT_BFORMAT") && + (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D16")) != AL_NONE && fmt != -1) + { + int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1}; + if((order+1)*(order+1) == mCodecCtx->channels || + (order+1)*(order+1) + 2 == mCodecCtx->channels) + { + mFrameSize *= 4; + mFormat = fmt; + } + } + if(!mFormat) { mDstChanLayout = AV_CH_LAYOUT_STEREO; mFrameSize *= 2; mFormat = AL_FORMAT_STEREO16; } } - void *samples = nullptr; - ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>( - mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize; + void *samples{nullptr}; + ALsizei buffer_len = static_cast<int>(std::chrono::duration_cast<seconds>( + mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize); - mSamples = NULL; + mSamples = nullptr; mSamplesMax = 0; mSamplesPos = 0; mSamplesLen = 0; @@ -891,13 +944,36 @@ int AudioState::handler() goto finish; } - mSwresCtx.reset(swr_alloc_set_opts(nullptr, - mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate, - mCodecCtx->channel_layout ? mCodecCtx->channel_layout : - (uint64_t)av_get_default_channel_layout(mCodecCtx->channels), - mCodecCtx->sample_fmt, mCodecCtx->sample_rate, - 0, nullptr - )); + if(!mDstChanLayout) + { + /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so + * we have to drop any extra channels. It also only supports FuMa + * channel ordering and normalization, so a custom matrix is needed to + * scale and reorder the source from AmbiX. + */ + mSwresCtx.reset(swr_alloc_set_opts(nullptr, + (1_i64<<4)-1, mDstSampleFmt, mCodecCtx->sample_rate, + (1_i64<<mCodecCtx->channels)-1, mCodecCtx->sample_fmt, mCodecCtx->sample_rate, + 0, nullptr)); + + /* Note that ffmpeg/libavcodec has no method to check the ambisonic + * channel order and normalization, so we can only assume AmbiX as the + * defacto-standard. This is not true for .amb files, which use FuMa. + */ + std::vector<double> mtx(64*64, 0.0); + mtx[0 + 0*64] = std::sqrt(0.5); + mtx[3 + 1*64] = 1.0; + mtx[1 + 2*64] = 1.0; + mtx[2 + 3*64] = 1.0; + swr_set_matrix(mSwresCtx.get(), mtx.data(), 64); + } + else + mSwresCtx.reset(swr_alloc_set_opts(nullptr, + static_cast<int64_t>(mDstChanLayout), mDstSampleFmt, mCodecCtx->sample_rate, + mCodecCtx->channel_layout ? static_cast<int64_t>(mCodecCtx->channel_layout) : + av_get_default_channel_layout(mCodecCtx->channels), + mCodecCtx->sample_fmt, mCodecCtx->sample_rate, + 0, nullptr)); if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0) { std::cerr<< "Failed to initialize audio converter" <<std::endl; @@ -905,14 +981,14 @@ int AudioState::handler() } mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0); - alGenBuffers(mBuffers.size(), mBuffers.data()); + alGenBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data()); alGenSources(1, &mSource); if(EnableDirectOut) alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE); - if(EnableWideStereo) - { - ALfloat angles[2] = { (ALfloat)(M_PI/3.0), (ALfloat)(-M_PI/3.0) }; + if (EnableWideStereo) { + ALfloat angles[2] = {static_cast<ALfloat>(M_PI / 3.0), + static_cast<ALfloat>(-M_PI / 3.0)}; alSourcefv(mSource, AL_STEREO_ANGLES, angles); } @@ -928,13 +1004,28 @@ int AudioState::handler() if(alGetError() != AL_NO_ERROR) { fprintf(stderr, "Failed to use mapped buffers\n"); - samples = av_malloc(buffer_len); + samples = av_malloc(static_cast<ALuint>(buffer_len)); } } else #endif - samples = av_malloc(buffer_len); + samples = av_malloc(static_cast<ALuint>(buffer_len)); + /* Prefill the codec buffer. */ + do { + const int ret{mPackets.sendTo(mCodecCtx.get())}; + if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + break; + } while(1); + + srclock.lock(); + if(alcGetInteger64vSOFT) + { + int64_t devtime{}; + alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT, + 1, &devtime); + mDeviceStartTime = nanoseconds{devtime} - mCurrentPts; + } while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed) && mConnected.test_and_set(std::memory_order_relaxed)) { @@ -944,35 +1035,37 @@ int AudioState::handler() while(processed > 0) { std::array<ALuint,4> bids; - alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed), - bids.data()); - processed -= std::min<ALsizei>(bids.size(), processed); + const ALsizei todq{std::min<ALsizei>(bids.size(), processed)}; + alSourceUnqueueBuffers(mSource, todq, bids.data()); + processed -= todq; } /* Refill the buffer queue. */ ALint queued; alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued); - while((ALuint)queued < mBuffers.size()) + while(static_cast<ALuint>(queued) < mBuffers.size()) { - ALuint bufid = mBuffers[mBufferIdx]; - - uint8_t *ptr = reinterpret_cast<uint8_t*>(samples -#ifdef AL_SOFT_map_buffer - ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT) -#endif - ); - if(!ptr) break; - + const ALuint bufid{mBuffers[mBufferIdx]}; /* Read the next chunk of data, filling the buffer, and queue it on - * the source */ - bool got_audio = readAudio(ptr, buffer_len); + * the source. + */ #ifdef AL_SOFT_map_buffer - if(!samples) alUnmapBufferSOFT(bufid); + if(!samples) + { + auto ptr = static_cast<uint8_t*>(alMapBufferSOFT(bufid, 0, buffer_len, + AL_MAP_WRITE_BIT_SOFT)); + bool got_audio{readAudio(ptr, static_cast<unsigned int>(buffer_len))}; + alUnmapBufferSOFT(bufid); + if(!got_audio) break; + } + else #endif - if(!got_audio) break; - - if(samples) + { + auto ptr = static_cast<uint8_t*>(samples); + if(!readAudio(ptr, static_cast<unsigned int>(buffer_len))) + break; alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate); + } alSourceQueueBuffers(mSource, 1, &bufid); mBufferIdx = (mBufferIdx+1) % mBuffers.size(); @@ -992,19 +1085,29 @@ int AudioState::handler() */ alSourceRewind(mSource); alSourcei(mSource, AL_BUFFER, 0); + if(alcGetInteger64vSOFT) + { + /* Also update the device start time with the current device + * clock, so the decoder knows we're running behind. + */ + int64_t devtime{}; + alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), + ALC_DEVICE_CLOCK_SOFT, 1, &devtime); + mDeviceStartTime = nanoseconds{devtime} - mCurrentPts; + } continue; } /* (re)start the source if needed, and wait for a buffer to finish */ - if(state != AL_PLAYING && state != AL_PAUSED && - mMovie.mPlaying.load(std::memory_order_relaxed)) + if(state != AL_PLAYING && state != AL_PAUSED) startPlayback(); - mSrcCond.wait_for(lock, sleep_time); + mSrcCond.wait_for(srclock, sleep_time); } alSourceRewind(mSource); alSourcei(mSource, AL_BUFFER, 0); + srclock.unlock(); finish: av_freep(&samples); @@ -1024,350 +1127,267 @@ finish: nanoseconds VideoState::getClock() { /* NOTE: This returns incorrect times while not playing. */ - auto delta = get_avtime() - mCurrentPtsTime; - return mCurrentPts + delta; -} - -bool VideoState::isBufferFilled() -{ - std::unique_lock<std::mutex> lock(mPictQMutex); - return mPictQSize >= mPictQ.size(); -} - -Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque) -{ - SDL_Event evt{}; - evt.user.type = FF_REFRESH_EVENT; - evt.user.data1 = opaque; - SDL_PushEvent(&evt); - return 0; /* 0 means stop timer */ -} - -/* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */ -void VideoState::schedRefresh(milliseconds delay) -{ - SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this); + std::lock_guard<std::mutex> _{mDispPtsMutex}; + if(mDisplayPtsTime == microseconds::min()) + return nanoseconds::zero(); + auto delta = get_avtime() - mDisplayPtsTime; + return mDisplayPts + delta; } -/* Called by VideoState::refreshTimer to display the next video frame. */ +/* Called by VideoState::updateVideo to display the next video frame. */ void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer) { - Picture *vp = &mPictQ[mPictQRead]; - - if(!vp->mImage) + if(!mImage) return; - float aspect_ratio; + double aspect_ratio; int win_w, win_h; int w, h, x, y; if(mCodecCtx->sample_aspect_ratio.num == 0) - aspect_ratio = 0.0f; + aspect_ratio = 0.0; else { aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width / mCodecCtx->height; } - if(aspect_ratio <= 0.0f) - aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height; + if(aspect_ratio <= 0.0) + aspect_ratio = static_cast<double>(mCodecCtx->width) / mCodecCtx->height; SDL_GetWindowSize(screen, &win_w, &win_h); h = win_h; - w = ((int)rint(h * aspect_ratio) + 3) & ~3; + w = (static_cast<int>(std::rint(h * aspect_ratio)) + 3) & ~3; if(w > win_w) { w = win_w; - h = ((int)rint(w / aspect_ratio) + 3) & ~3; + h = (static_cast<int>(std::rint(w / aspect_ratio)) + 3) & ~3; } x = (win_w - w) / 2; y = (win_h - h) / 2; - SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight }; + SDL_Rect src_rect{ 0, 0, mWidth, mHeight }; SDL_Rect dst_rect{ x, y, w, h }; - SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect); + SDL_RenderCopy(renderer, mImage, &src_rect, &dst_rect); SDL_RenderPresent(renderer); } -/* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer - * was created. It handles the display of the next decoded video frame (if not - * falling behind), and sets up the timer for the following video frame. +/* Called regularly on the main thread where the SDL_Renderer was created. It + * handles updating the textures of decoded frames and displaying the latest + * frame. */ -void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer) +void VideoState::updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw) { - if(!mStream) - { - if(mEOS) - { - mFinalUpdate = true; - std::unique_lock<std::mutex>(mPictQMutex).unlock(); - mPictQCond.notify_all(); - return; - } - schedRefresh(milliseconds(100)); - return; - } - if(!mMovie.mPlaying.load(std::memory_order_relaxed)) + size_t read_idx{mPictQRead.load(std::memory_order_relaxed)}; + Picture *vp{&mPictQ[read_idx]}; + + auto clocktime = mMovie.getMasterClock(); + bool updated{false}; + while(1) { - schedRefresh(milliseconds(1)); - return; - } + size_t next_idx{(read_idx+1)%mPictQ.size()}; + if(next_idx == mPictQWrite.load(std::memory_order_acquire)) + break; + Picture *nextvp{&mPictQ[next_idx]}; + if(clocktime < nextvp->mPts) + break; - std::unique_lock<std::mutex> lock(mPictQMutex); -retry: - if(mPictQSize == 0) + vp = nextvp; + updated = true; + read_idx = next_idx; + } + if(mMovie.mQuit.load(std::memory_order_relaxed)) { if(mEOS) mFinalUpdate = true; - else - schedRefresh(milliseconds(1)); - lock.unlock(); - mPictQCond.notify_all(); + mPictQRead.store(read_idx, std::memory_order_release); + std::unique_lock<std::mutex>{mPictQMutex}.unlock(); + mPictQCond.notify_one(); return; } - Picture *vp = &mPictQ[mPictQRead]; - mCurrentPts = vp->mPts; - mCurrentPtsTime = get_avtime(); - - /* Get delay using the frame pts and the pts from last frame. */ - auto delay = vp->mPts - mFrameLastPts; - if(delay <= seconds::zero() || delay >= seconds(1)) - { - /* If incorrect delay, use previous one. */ - delay = mFrameLastDelay; - } - /* Save for next frame. */ - mFrameLastDelay = delay; - mFrameLastPts = vp->mPts; - - /* Update delay to sync to clock if not master source. */ - if(mMovie.mAVSyncType != SyncMaster::Video) + if(updated) { - auto ref_clock = mMovie.getMasterClock(); - auto diff = vp->mPts - ref_clock; + mPictQRead.store(read_idx, std::memory_order_release); + std::unique_lock<std::mutex>{mPictQMutex}.unlock(); + mPictQCond.notify_one(); - /* Skip or repeat the frame. Take delay into account. */ - auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold); - if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold)) + /* allocate or resize the buffer! */ + bool fmt_updated{false}; + if(!mImage || mWidth != mCodecCtx->width || mHeight != mCodecCtx->height) { - if(diff <= -sync_threshold) - delay = nanoseconds::zero(); - else if(diff >= sync_threshold) - delay *= 2; - } - } - - mFrameTimer += delay; - /* Compute the REAL delay. */ - auto actual_delay = mFrameTimer - get_avtime(); - if(!(actual_delay >= VideoSyncThreshold)) - { - /* We don't have time to handle this picture, just skip to the next one. */ - mPictQRead = (mPictQRead+1)%mPictQ.size(); - mPictQSize--; - goto retry; - } - schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay)); + fmt_updated = true; + if(mImage) + SDL_DestroyTexture(mImage); + mImage = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, + mCodecCtx->coded_width, mCodecCtx->coded_height); + if(!mImage) + std::cerr<< "Failed to create YV12 texture!" <<std::endl; + mWidth = mCodecCtx->width; + mHeight = mCodecCtx->height; + + if(mFirstUpdate && mWidth > 0 && mHeight > 0) + { + /* For the first update, set the window size to the video size. */ + mFirstUpdate = false; - /* Show the picture! */ - display(screen, renderer); + int w{mWidth}; + int h{mHeight}; + if(mCodecCtx->sample_aspect_ratio.den != 0) + { + double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio); + if(aspect_ratio >= 1.0) + w = static_cast<int>(w*aspect_ratio + 0.5); + else if(aspect_ratio > 0.0) + h = static_cast<int>(h/aspect_ratio + 0.5); + } + SDL_SetWindowSize(screen, w, h); + } + } - /* Update queue for next picture. */ - mPictQRead = (mPictQRead+1)%mPictQ.size(); - mPictQSize--; - lock.unlock(); - mPictQCond.notify_all(); -} + if(mImage) + { + AVFrame *frame{vp->mFrame.get()}; + void *pixels{nullptr}; + int pitch{0}; + + if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P) + SDL_UpdateYUVTexture(mImage, nullptr, + frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], + frame->data[2], frame->linesize[2] + ); + else if(SDL_LockTexture(mImage, nullptr, &pixels, &pitch) != 0) + std::cerr<< "Failed to lock texture" <<std::endl; + else + { + // Convert the image into YUV format that SDL uses + int coded_w{mCodecCtx->coded_width}; + int coded_h{mCodecCtx->coded_height}; + int w{mCodecCtx->width}; + int h{mCodecCtx->height}; + if(!mSwscaleCtx || fmt_updated) + { + mSwscaleCtx.reset(sws_getContext( + w, h, mCodecCtx->pix_fmt, + w, h, AV_PIX_FMT_YUV420P, 0, + nullptr, nullptr, nullptr + )); + } -/* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the - * main thread where the renderer was created. - */ -void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer) -{ - Picture *vp = &mPictQ[mPictQWrite]; - bool fmt_updated = false; - - /* allocate or resize the buffer! */ - if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height) - { - fmt_updated = true; - if(vp->mImage) - SDL_DestroyTexture(vp->mImage); - vp->mImage = SDL_CreateTexture( - renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, - mCodecCtx->coded_width, mCodecCtx->coded_height - ); - if(!vp->mImage) - std::cerr<< "Failed to create YV12 texture!" <<std::endl; - vp->mWidth = mCodecCtx->width; - vp->mHeight = mCodecCtx->height; + /* point pict at the queue */ + uint8_t *pict_data[3]; + pict_data[0] = static_cast<uint8_t*>(pixels); + pict_data[1] = pict_data[0] + coded_w*coded_h; + pict_data[2] = pict_data[1] + coded_w*coded_h/4; - if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0) - { - /* For the first update, set the window size to the video size. */ - mFirstUpdate = false; + int pict_linesize[3]; + pict_linesize[0] = pitch; + pict_linesize[1] = pitch / 2; + pict_linesize[2] = pitch / 2; - int w = vp->mWidth; - int h = vp->mHeight; - if(mCodecCtx->sample_aspect_ratio.den != 0) - { - double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio); - if(aspect_ratio >= 1.0) - w = (int)(w*aspect_ratio + 0.5); - else if(aspect_ratio > 0.0) - h = (int)(h/aspect_ratio + 0.5); + sws_scale(mSwscaleCtx.get(), reinterpret_cast<uint8_t**>(frame->data), frame->linesize, + 0, h, pict_data, pict_linesize); + SDL_UnlockTexture(mImage); } - SDL_SetWindowSize(screen, w, h); } + + redraw = true; } - if(vp->mImage) + if(redraw) { - AVFrame *frame = mDecodedFrame.get(); - void *pixels = nullptr; - int pitch = 0; - - if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P) - SDL_UpdateYUVTexture(vp->mImage, nullptr, - frame->data[0], frame->linesize[0], - frame->data[1], frame->linesize[1], - frame->data[2], frame->linesize[2] - ); - else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0) - std::cerr<< "Failed to lock texture" <<std::endl; - else - { - // Convert the image into YUV format that SDL uses - int coded_w = mCodecCtx->coded_width; - int coded_h = mCodecCtx->coded_height; - int w = mCodecCtx->width; - int h = mCodecCtx->height; - if(!mSwscaleCtx || fmt_updated) - { - mSwscaleCtx.reset(sws_getContext( - w, h, mCodecCtx->pix_fmt, - w, h, AV_PIX_FMT_YUV420P, 0, - nullptr, nullptr, nullptr - )); - } - - /* point pict at the queue */ - uint8_t *pict_data[3]; - pict_data[0] = reinterpret_cast<uint8_t*>(pixels); - pict_data[1] = pict_data[0] + coded_w*coded_h; - pict_data[2] = pict_data[1] + coded_w*coded_h/4; + /* Show the picture! */ + display(screen, renderer); + } - int pict_linesize[3]; - pict_linesize[0] = pitch; - pict_linesize[1] = pitch / 2; - pict_linesize[2] = pitch / 2; + if(updated) + { + auto disp_time = get_avtime(); - sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data, - frame->linesize, 0, h, pict_data, pict_linesize); - SDL_UnlockTexture(vp->mImage); + std::lock_guard<std::mutex> _{mDispPtsMutex}; + mDisplayPts = vp->mPts; + mDisplayPtsTime = disp_time; + } + if(mEOS.load(std::memory_order_acquire)) + { + if((read_idx+1)%mPictQ.size() == mPictQWrite.load(std::memory_order_acquire)) + { + mFinalUpdate = true; + std::unique_lock<std::mutex>{mPictQMutex}.unlock(); + mPictQCond.notify_one(); } } - - vp->mUpdated.store(true, std::memory_order_release); - std::unique_lock<std::mutex>(mPictQMutex).unlock(); - mPictQCond.notify_one(); } -int VideoState::queuePicture(nanoseconds pts) +int VideoState::handler() { - /* Wait until we have space for a new pic */ - std::unique_lock<std::mutex> lock(mPictQMutex); - while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed)) - mPictQCond.wait(lock); - lock.unlock(); - - if(mMovie.mQuit.load(std::memory_order_relaxed)) - return -1; - - Picture *vp = &mPictQ[mPictQWrite]; - - /* We have to create/update the picture in the main thread */ - vp->mUpdated.store(false, std::memory_order_relaxed); - SDL_Event evt{}; - evt.user.type = FF_UPDATE_EVENT; - evt.user.data1 = this; - SDL_PushEvent(&evt); + std::for_each(mPictQ.begin(), mPictQ.end(), + [](Picture &pict) -> void + { pict.mFrame = AVFramePtr{av_frame_alloc()}; }); + + /* Prefill the codec buffer. */ + do { + const int ret{mPackets.sendTo(mCodecCtx.get())}; + if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + break; + } while(1); - /* Wait until the picture is updated. */ - lock.lock(); - while(!vp->mUpdated.load(std::memory_order_relaxed)) { - if(mMovie.mQuit.load(std::memory_order_relaxed)) - return -1; - mPictQCond.wait(lock); + std::lock_guard<std::mutex> _{mDispPtsMutex}; + mDisplayPtsTime = get_avtime(); } - if(mMovie.mQuit.load(std::memory_order_relaxed)) - return -1; - vp->mPts = pts; - - mPictQWrite = (mPictQWrite+1)%mPictQ.size(); - mPictQSize++; - lock.unlock(); - return 0; -} - -int VideoState::handler() -{ - mDecodedFrame.reset(av_frame_alloc()); + auto current_pts = nanoseconds::zero(); while(!mMovie.mQuit.load(std::memory_order_relaxed)) { - std::unique_lock<std::mutex> lock(mQueueMtx); - /* Decode video frame */ - int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get()); - if(ret == AVERROR(EAGAIN)) - { - mMovie.mSendDataGood.clear(std::memory_order_relaxed); - std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock(); - mMovie.mSendCond.notify_one(); - do { - mQueueCond.wait(lock); - ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get()); - } while(ret == AVERROR(EAGAIN)); - } - lock.unlock(); - if(ret == AVERROR_EOF) break; - mMovie.mSendDataGood.clear(std::memory_order_relaxed); - mMovie.mSendCond.notify_one(); - if(ret < 0) + size_t write_idx{mPictQWrite.load(std::memory_order_relaxed)}; + Picture *vp{&mPictQ[write_idx]}; + + /* Retrieve video frame. */ + AVFrame *decoded_frame{vp->mFrame.get()}; + int ret; + while((ret=avcodec_receive_frame(mCodecCtx.get(), decoded_frame)) == AVERROR(EAGAIN)) + mPackets.sendTo(mCodecCtx.get()); + if(ret != 0) { - std::cerr<< "Failed to decode frame: "<<ret <<std::endl; + if(ret == AVERROR_EOF) break; + std::cerr<< "Failed to receive frame: "<<ret <<std::endl; continue; } /* Get the PTS for this frame. */ - nanoseconds pts; - if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE) - mClock = std::chrono::duration_cast<nanoseconds>( - seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp) - ); - pts = mClock; + if(decoded_frame->best_effort_timestamp != AV_NOPTS_VALUE) + current_pts = std::chrono::duration_cast<nanoseconds>( + seconds_d64{av_q2d(mStream->time_base)*decoded_frame->best_effort_timestamp}); + vp->mPts = current_pts; /* Update the video clock to the next expected PTS. */ auto frame_delay = av_q2d(mCodecCtx->time_base); - frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5); - mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay)); + frame_delay += decoded_frame->repeat_pict * (frame_delay * 0.5); + current_pts += std::chrono::duration_cast<nanoseconds>(seconds_d64{frame_delay}); - if(queuePicture(pts) < 0) - break; - av_frame_unref(mDecodedFrame.get()); + /* Put the frame in the queue to be loaded into a texture and displayed + * by the rendering thread. + */ + write_idx = (write_idx+1)%mPictQ.size(); + mPictQWrite.store(write_idx, std::memory_order_release); + + /* Send a packet now so it's hopefully ready by the time it's needed. */ + mPackets.sendTo(mCodecCtx.get()); + + if(write_idx == mPictQRead.load(std::memory_order_acquire)) + { + /* Wait until we have space for a new pic */ + std::unique_lock<std::mutex> lock{mPictQMutex}; + while(write_idx == mPictQRead.load(std::memory_order_acquire) && + !mMovie.mQuit.load(std::memory_order_relaxed)) + mPictQCond.wait(lock); + } } mEOS = true; - std::unique_lock<std::mutex> lock(mPictQMutex); - if(mMovie.mQuit.load(std::memory_order_relaxed)) - { - mPictQRead = 0; - mPictQWrite = 0; - mPictQSize = 0; - } - while(!mFinalUpdate) - mPictQCond.wait(lock); + std::unique_lock<std::mutex> lock{mPictQMutex}; + while(!mFinalUpdate) mPictQCond.wait(lock); return 0; } @@ -1375,13 +1395,13 @@ int VideoState::handler() int MovieState::decode_interrupt_cb(void *ctx) { - return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed); + return static_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed); } bool MovieState::prepare() { - AVIOContext *avioctx = nullptr; - AVIOInterruptCB intcb = { decode_interrupt_cb, this }; + AVIOContext *avioctx{nullptr}; + AVIOInterruptCB intcb{decode_interrupt_cb, this}; if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr)) { std::cerr<< "Failed to open "<<mFilename <<std::endl; @@ -1392,7 +1412,7 @@ bool MovieState::prepare() /* Open movie file. If avformat_open_input fails it will automatically free * this context, so don't set it onto a smart pointer yet. */ - AVFormatContext *fmtctx = avformat_alloc_context(); + AVFormatContext *fmtctx{avformat_alloc_context()}; fmtctx->pb = mIOContext.get(); fmtctx->interrupt_callback = intcb; if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0) @@ -1409,9 +1429,7 @@ bool MovieState::prepare() return false; } - mVideo.schedRefresh(milliseconds(40)); - - mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this); + mParseThread = std::thread{std::mem_fn(&MovieState::parse_handler), this}; return true; } @@ -1427,7 +1445,7 @@ void MovieState::setTitle(SDL_Window *window) nanoseconds MovieState::getClock() { - if(!mPlaying.load(std::memory_order_relaxed)) + if(mClockBase == microseconds::min()) return nanoseconds::zero(); return get_avtime() - mClockBase; } @@ -1444,25 +1462,25 @@ nanoseconds MovieState::getMasterClock() nanoseconds MovieState::getDuration() { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); } -int MovieState::streamComponentOpen(int stream_index) +int MovieState::streamComponentOpen(unsigned int stream_index) { - if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams) + if(stream_index >= mFormatCtx->nb_streams) return -1; /* Get a pointer to the codec context for the stream, and open the * associated codec. */ - AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr)); + AVCodecCtxPtr avctx{avcodec_alloc_context3(nullptr)}; if(!avctx) return -1; if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar)) return -1; - AVCodec *codec = avcodec_find_decoder(avctx->codec_id); + AVCodec *codec{avcodec_find_decoder(avctx->codec_id)}; if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0) { std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id) - << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl; + << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl; return -1; } @@ -1472,37 +1490,36 @@ int MovieState::streamComponentOpen(int stream_index) case AVMEDIA_TYPE_AUDIO: mAudio.mStream = mFormatCtx->streams[stream_index]; mAudio.mCodecCtx = std::move(avctx); - - mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio); break; case AVMEDIA_TYPE_VIDEO: mVideo.mStream = mFormatCtx->streams[stream_index]; mVideo.mCodecCtx = std::move(avctx); - - mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo); break; default: return -1; } - return stream_index; + return static_cast<int>(stream_index); } int MovieState::parse_handler() { - int video_index = -1; - int audio_index = -1; + auto &audio_queue = mAudio.mPackets; + auto &video_queue = mVideo.mPackets; + + int video_index{-1}; + int audio_index{-1}; /* Dump information about file onto standard error */ av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0); /* Find the first video and audio streams */ - for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++) + for(unsigned int i{0u};i < mFormatCtx->nb_streams;i++) { auto codecpar = mFormatCtx->streams[i]->codecpar; - if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) + if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !DisableVideo && video_index < 0) video_index = streamComponentOpen(i); else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) audio_index = streamComponentOpen(i); @@ -1514,95 +1531,38 @@ int MovieState::parse_handler() mQuit = true; } - PacketQueue audio_queue, video_queue; - bool input_finished = false; + /* Set the base time 750ms ahead of the current av time. */ + mClockBase = get_avtime() + milliseconds{750}; + + if(audio_index >= 0) + mAudioThread = std::thread{std::mem_fn(&AudioState::handler), &mAudio}; + if(video_index >= 0) + mVideoThread = std::thread{std::mem_fn(&VideoState::handler), &mVideo}; /* Main packet reading/dispatching loop */ - while(!mQuit.load(std::memory_order_relaxed) && !input_finished) + while(!mQuit.load(std::memory_order_relaxed)) { AVPacket packet; if(av_read_frame(mFormatCtx.get(), &packet) < 0) - input_finished = true; - else - { - /* Copy the packet into the queue it's meant for. */ - if(packet.stream_index == video_index) - video_queue.put(&packet); - else if(packet.stream_index == audio_index) - audio_queue.put(&packet); - av_packet_unref(&packet); - } - - do { - /* Send whatever queued packets we have. */ - if(!audio_queue.empty()) - { - std::unique_lock<std::mutex> lock(mAudio.mQueueMtx); - int ret; - do { - ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front()); - if(ret != AVERROR(EAGAIN)) audio_queue.pop(); - } while(ret != AVERROR(EAGAIN) && !audio_queue.empty()); - lock.unlock(); - mAudio.mQueueCond.notify_one(); - } - if(!video_queue.empty()) - { - std::unique_lock<std::mutex> lock(mVideo.mQueueMtx); - int ret; - do { - ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front()); - if(ret != AVERROR(EAGAIN)) video_queue.pop(); - } while(ret != AVERROR(EAGAIN) && !video_queue.empty()); - lock.unlock(); - mVideo.mQueueCond.notify_one(); - } - /* If the queues are completely empty, or it's not full and there's - * more input to read, go get more. - */ - size_t queue_size = audio_queue.totalSize() + video_queue.totalSize(); - if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished)) - break; + break; - if(!mPlaying.load(std::memory_order_relaxed)) - { - if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) && - (!mVideo.mCodecCtx || mVideo.isBufferFilled())) - { - /* Set the base time 50ms ahead of the current av time. */ - mClockBase = get_avtime() + milliseconds(50); - mVideo.mCurrentPtsTime = mClockBase; - mVideo.mFrameTimer = mVideo.mCurrentPtsTime; - mAudio.startPlayback(); - mPlaying.store(std::memory_order_release); - } - } - /* Nothing to send or get for now, wait a bit and try again. */ - { std::unique_lock<std::mutex> lock(mSendMtx); - if(mSendDataGood.test_and_set(std::memory_order_relaxed)) - mSendCond.wait_for(lock, milliseconds(10)); - } - } while(!mQuit.load(std::memory_order_relaxed)); - } - /* Pass a null packet to finish the send buffers (the receive functions - * will get AVERROR_EOF when emptied). - */ - if(mVideo.mCodecCtx) - { - { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx); - avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr); + /* Copy the packet into the queue it's meant for. */ + if(packet.stream_index == video_index) + { + while(!mQuit.load(std::memory_order_acquire) && !video_queue.put(&packet)) + std::this_thread::sleep_for(milliseconds{100}); } - mVideo.mQueueCond.notify_one(); - } - if(mAudio.mCodecCtx) - { - { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx); - avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr); + else if(packet.stream_index == audio_index) + { + while(!mQuit.load(std::memory_order_acquire) && !audio_queue.put(&packet)) + std::this_thread::sleep_for(milliseconds{100}); } - mAudio.mQueueCond.notify_one(); + + av_packet_unref(&packet); } - video_queue.clear(); - audio_queue.clear(); + /* Finish the queues so the receivers know nothing more is coming. */ + if(mVideo.mCodecCtx) video_queue.setFinished(); + if(mAudio.mCodecCtx) audio_queue.setFinished(); /* all done - wait for it */ if(mVideoThread.joinable()) @@ -1611,7 +1571,7 @@ int MovieState::parse_handler() mAudioThread.join(); mVideo.mEOS = true; - std::unique_lock<std::mutex> lock(mVideo.mPictQMutex); + std::unique_lock<std::mutex> lock{mVideo.mPictQMutex}; while(!mVideo.mFinalUpdate) mVideo.mPictQCond.wait(lock); lock.unlock(); @@ -1628,13 +1588,13 @@ int MovieState::parse_handler() struct PrettyTime { seconds mTime; }; -inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs) +std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs) { using hours = std::chrono::hours; using minutes = std::chrono::minutes; using std::chrono::duration_cast; - seconds t = rhs.mTime; + seconds t{rhs.mTime}; if(t.count() < 0) { os << '-'; @@ -1642,7 +1602,7 @@ inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs) } // Only handle up to hour formatting - if(t >= hours(1)) + if(t >= hours{1}) os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2) << (duration_cast<minutes>(t).count() % 60) << 'm'; else @@ -1665,36 +1625,38 @@ int main(int argc, char *argv[]) return 1; } /* Register all formats and codecs */ +#if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100)) av_register_all(); +#endif /* Initialize networking protocols */ avformat_network_init(); - if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER)) + if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS)) { std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl; return 1; } /* Make a window to put our video */ - SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE); + SDL_Window *screen{SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE)}; if(!screen) { std::cerr<< "SDL: could not set video mode - exiting" <<std::endl; return 1; } /* Make a renderer to handle the texture image surface and rendering. */ - Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC; - SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags); + Uint32 render_flags{SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC}; + SDL_Renderer *renderer{SDL_CreateRenderer(screen, -1, render_flags)}; if(renderer) { SDL_RendererInfo rinf{}; - bool ok = false; + bool ok{false}; /* Make sure the renderer supports IYUV textures. If not, fallback to a * software renderer. */ if(SDL_GetRendererInfo(renderer, &rinf) == 0) { - for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++) + for(Uint32 i{0u};!ok && i < rinf.num_texture_formats;i++) ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV); } if(!ok) @@ -1726,7 +1688,8 @@ int main(int argc, char *argv[]) return 1; } - { auto device = alcGetContextsDevice(alcGetCurrentContext()); + { + auto device = alcGetContextsDevice(alcGetCurrentContext()); if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock")) { std::cout<< "Found ALC_SOFT_device_clock" <<std::endl; @@ -1766,7 +1729,7 @@ int main(int argc, char *argv[]) } #endif - int fileidx = 0; + int fileidx{0}; for(;fileidx < argc;++fileidx) { if(strcmp(argv[fileidx], "-direct") == 0) @@ -1789,13 +1752,15 @@ int main(int argc, char *argv[]) EnableWideStereo = true; } } + else if(strcmp(argv[fileidx], "-novideo") == 0) + DisableVideo = true; else break; } while(fileidx < argc && !movState) { - movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++])); + movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}}; if(!movState->prepare()) movState = nullptr; } if(!movState) @@ -1808,106 +1773,103 @@ int main(int argc, char *argv[]) /* Default to going to the next movie at the end of one. */ enum class EomAction { Next, Quit - } eom_action = EomAction::Next; - seconds last_time(-1); - SDL_Event event; + } eom_action{EomAction::Next}; + seconds last_time{seconds::min()}; while(1) { - int have_evt = SDL_WaitEventTimeout(&event, 10); + SDL_Event event{}; + int have_evt{SDL_WaitEventTimeout(&event, 10)}; auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock()); if(cur_time != last_time) { auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration()); - std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush; + std::cout<< " \r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush; last_time = cur_time; } - if(!have_evt) continue; - - switch(event.type) - { - case SDL_KEYDOWN: - switch(event.key.keysym.sym) - { - case SDLK_ESCAPE: - movState->mQuit = true; - eom_action = EomAction::Quit; - break; - - case SDLK_n: - movState->mQuit = true; - eom_action = EomAction::Next; - break; - - default: - break; - } - break; - - case SDL_WINDOWEVENT: - switch(event.window.event) - { - case SDL_WINDOWEVENT_RESIZED: - SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255); - SDL_RenderFillRect(renderer, nullptr); - break; - - default: - break; - } - break; - - case SDL_QUIT: - movState->mQuit = true; - eom_action = EomAction::Quit; - break; - - case FF_UPDATE_EVENT: - reinterpret_cast<VideoState*>(event.user.data1)->updatePicture( - screen, renderer - ); - break; - case FF_REFRESH_EVENT: - reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer( - screen, renderer - ); - break; + bool force_redraw{false}; + if(have_evt) do { + switch(event.type) + { + case SDL_KEYDOWN: + switch(event.key.keysym.sym) + { + case SDLK_ESCAPE: + movState->mQuit = true; + eom_action = EomAction::Quit; + break; + + case SDLK_n: + movState->mQuit = true; + eom_action = EomAction::Next; + break; + + default: + break; + } + break; - case FF_MOVIE_DONE_EVENT: - std::cout<<'\n'; - last_time = seconds(-1); - if(eom_action != EomAction::Quit) - { - movState = nullptr; - while(fileidx < argc && !movState) + case SDL_WINDOWEVENT: + switch(event.window.event) { - movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++])); - if(!movState->prepare()) movState = nullptr; + case SDL_WINDOWEVENT_RESIZED: + SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255); + SDL_RenderFillRect(renderer, nullptr); + force_redraw = true; + break; + + case SDL_WINDOWEVENT_EXPOSED: + force_redraw = true; + break; + + default: + break; } - if(movState) + break; + + case SDL_QUIT: + movState->mQuit = true; + eom_action = EomAction::Quit; + break; + + case FF_MOVIE_DONE_EVENT: + std::cout<<'\n'; + last_time = seconds::min(); + if(eom_action != EomAction::Quit) { - movState->setTitle(screen); - break; + movState = nullptr; + while(fileidx < argc && !movState) + { + movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}}; + if(!movState->prepare()) movState = nullptr; + } + if(movState) + { + movState->setTitle(screen); + break; + } } - } - /* Nothing more to play. Shut everything down and quit. */ - movState = nullptr; + /* Nothing more to play. Shut everything down and quit. */ + movState = nullptr; - CloseAL(); + CloseAL(); - SDL_DestroyRenderer(renderer); - renderer = nullptr; - SDL_DestroyWindow(screen); - screen = nullptr; + SDL_DestroyRenderer(renderer); + renderer = nullptr; + SDL_DestroyWindow(screen); + screen = nullptr; - SDL_Quit(); - exit(0); + SDL_Quit(); + exit(0); - default: - break; - } + default: + break; + } + } while(SDL_PollEvent(&event)); + + movState->mVideo.updateVideo(screen, renderer, force_redraw); } std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl; diff --git a/examples/alhrtf.c b/examples/alhrtf.c index f9150ae1..2be28a91 100644 --- a/examples/alhrtf.c +++ b/examples/alhrtf.c @@ -24,11 +24,14 @@ /* This file contains an example for selecting an HRTF. */ -#include <stdio.h> #include <assert.h> #include <math.h> +#include <stdio.h> +#include <string.h> -#include <SDL_sound.h> +#include "SDL_sound.h" +#include "SDL_audio.h" +#include "SDL_stdinc.h" #include "AL/al.h" #include "AL/alc.h" @@ -109,7 +112,7 @@ static ALuint LoadSound(const char *filename) * close the file. */ buffer = 0; alGenBuffers(1, &buffer); - alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate); + alBufferData(buffer, format, sample->buffer, (ALsizei)slen, (ALsizei)sample->actual.rate); Sound_FreeSample(sample); /* Check if an error occured, and clean up if so. */ @@ -129,6 +132,7 @@ static ALuint LoadSound(const char *filename) int main(int argc, char **argv) { ALCdevice *device; + ALCcontext *context; ALboolean has_angle_ext; ALuint source, buffer; const char *soundname; @@ -150,7 +154,8 @@ int main(int argc, char **argv) if(InitAL(&argv, &argc) != 0) return 1; - device = alcGetContextsDevice(alcGetCurrentContext()); + context = alcGetCurrentContext(); + device = alcGetContextsDevice(context); if(!alcIsExtensionPresent(device, "ALC_SOFT_HRTF")) { fprintf(stderr, "Error: ALC_SOFT_HRTF not supported\n"); @@ -159,16 +164,16 @@ int main(int argc, char **argv) } /* Define a macro to help load the function pointers. */ -#define LOAD_PROC(d, x) ((x) = alcGetProcAddress((d), #x)) - LOAD_PROC(device, alcGetStringiSOFT); - LOAD_PROC(device, alcResetDeviceSOFT); +#define LOAD_PROC(d, T, x) ((x) = (T)alcGetProcAddress((d), #x)) + LOAD_PROC(device, LPALCGETSTRINGISOFT, alcGetStringiSOFT); + LOAD_PROC(device, LPALCRESETDEVICESOFT, alcResetDeviceSOFT); #undef LOAD_PROC /* Check for the AL_EXT_STEREO_ANGLES extension to be able to also rotate * stereo sources. */ has_angle_ext = alIsExtensionPresent("AL_EXT_STEREO_ANGLES"); - printf("AL_EXT_STEREO_ANGLES%s found\n", has_angle_ext?"":" not"); + printf("AL_EXT_STEREO_ANGLES %sfound\n", has_angle_ext?"":"not "); /* Check for user-preferred HRTF */ if(strcmp(argv[0], "-hrtf") == 0) @@ -252,7 +257,7 @@ int main(int argc, char **argv) alGenSources(1, &source); alSourcei(source, AL_SOURCE_RELATIVE, AL_TRUE); alSource3f(source, AL_POSITION, 0.0f, 0.0f, -1.0f); - alSourcei(source, AL_BUFFER, buffer); + alSourcei(source, AL_BUFFER, (ALint)buffer); assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source"); /* Play the sound until it finishes. */ @@ -261,6 +266,8 @@ int main(int argc, char **argv) do { al_nssleep(10000000); + alcSuspendContext(context); + /* Rotate the source around the listener by about 1/4 cycle per second, * and keep it within -pi...+pi. */ @@ -279,6 +286,7 @@ int main(int argc, char **argv) ALfloat angles[2] = { (ALfloat)(M_PI/6.0 - angle), (ALfloat)(-M_PI/6.0 - angle) }; alSourcefv(source, AL_STEREO_ANGLES, angles); } + alcProcessContext(context); alGetSourcei(source, AL_SOURCE_STATE, &state); } while(alGetError() == AL_NO_ERROR && state == AL_PLAYING); diff --git a/examples/allatency.c b/examples/allatency.c index d561373f..a61fb820 100644 --- a/examples/allatency.c +++ b/examples/allatency.c @@ -27,10 +27,11 @@ #include <stdio.h> #include <assert.h> -#include <SDL_sound.h> +#include "SDL_sound.h" +#include "SDL_audio.h" +#include "SDL_stdinc.h" #include "AL/al.h" -#include "AL/alc.h" #include "AL/alext.h" #include "common/alhelpers.h" @@ -114,7 +115,7 @@ static ALuint LoadSound(const char *filename) * close the file. */ buffer = 0; alGenBuffers(1, &buffer); - alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate); + alBufferData(buffer, format, sample->buffer, (ALsizei)slen, (ALsizei)sample->actual.rate); Sound_FreeSample(sample); /* Check if an error occured, and clean up if so. */ @@ -157,19 +158,19 @@ int main(int argc, char **argv) } /* Define a macro to help load the function pointers. */ -#define LOAD_PROC(x) ((x) = alGetProcAddress(#x)) - LOAD_PROC(alSourcedSOFT); - LOAD_PROC(alSource3dSOFT); - LOAD_PROC(alSourcedvSOFT); - LOAD_PROC(alGetSourcedSOFT); - LOAD_PROC(alGetSource3dSOFT); - LOAD_PROC(alGetSourcedvSOFT); - LOAD_PROC(alSourcei64SOFT); - LOAD_PROC(alSource3i64SOFT); - LOAD_PROC(alSourcei64vSOFT); - LOAD_PROC(alGetSourcei64SOFT); - LOAD_PROC(alGetSource3i64SOFT); - LOAD_PROC(alGetSourcei64vSOFT); +#define LOAD_PROC(T, x) ((x) = (T)alGetProcAddress(#x)) + LOAD_PROC(LPALSOURCEDSOFT, alSourcedSOFT); + LOAD_PROC(LPALSOURCE3DSOFT, alSource3dSOFT); + LOAD_PROC(LPALSOURCEDVSOFT, alSourcedvSOFT); + LOAD_PROC(LPALGETSOURCEDSOFT, alGetSourcedSOFT); + LOAD_PROC(LPALGETSOURCE3DSOFT, alGetSource3dSOFT); + LOAD_PROC(LPALGETSOURCEDVSOFT, alGetSourcedvSOFT); + LOAD_PROC(LPALSOURCEI64SOFT, alSourcei64SOFT); + LOAD_PROC(LPALSOURCE3I64SOFT, alSource3i64SOFT); + LOAD_PROC(LPALSOURCEI64VSOFT, alSourcei64vSOFT); + LOAD_PROC(LPALGETSOURCEI64SOFT, alGetSourcei64SOFT); + LOAD_PROC(LPALGETSOURCE3I64SOFT, alGetSource3i64SOFT); + LOAD_PROC(LPALGETSOURCEI64VSOFT, alGetSourcei64vSOFT); #undef LOAD_PROC /* Initialize SDL_sound. */ @@ -187,7 +188,7 @@ int main(int argc, char **argv) /* Create the source to play the sound with. */ source = 0; alGenSources(1, &source); - alSourcei(source, AL_BUFFER, buffer); + alSourcei(source, AL_BUFFER, (ALint)buffer); assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source"); /* Play the sound until it finishes. */ diff --git a/examples/alloopback.c b/examples/alloopback.c index 16553f9b..844efa74 100644 --- a/examples/alloopback.c +++ b/examples/alloopback.c @@ -26,11 +26,14 @@ * output handling. */ -#include <stdio.h> #include <assert.h> #include <math.h> +#include <stdio.h> -#include <SDL.h> +#include "SDL.h" +#include "SDL_audio.h" +#include "SDL_error.h" +#include "SDL_stdinc.h" #include "AL/al.h" #include "AL/alc.h" @@ -146,10 +149,10 @@ int main(int argc, char *argv[]) } /* Define a macro to help load the function pointers. */ -#define LOAD_PROC(x) ((x) = alcGetProcAddress(NULL, #x)) - LOAD_PROC(alcLoopbackOpenDeviceSOFT); - LOAD_PROC(alcIsRenderFormatSupportedSOFT); - LOAD_PROC(alcRenderSamplesSOFT); +#define LOAD_PROC(T, x) ((x) = (T)alcGetProcAddress(NULL, #x)) + LOAD_PROC(LPALCLOOPBACKOPENDEVICESOFT, alcLoopbackOpenDeviceSOFT); + LOAD_PROC(LPALCISRENDERFORMATSUPPORTEDSOFT, alcIsRenderFormatSupportedSOFT); + LOAD_PROC(LPALCRENDERSAMPLESSOFT, alcRenderSamplesSOFT); #undef LOAD_PROC if(SDL_Init(SDL_INIT_AUDIO) == -1) @@ -246,7 +249,7 @@ int main(int argc, char *argv[]) /* Create the source to play the sound with. */ source = 0; alGenSources(1, &source); - alSourcei(source, AL_BUFFER, buffer); + alSourcei(source, AL_BUFFER, (ALint)buffer); assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source"); /* Play the sound until it finishes. */ diff --git a/examples/almultireverb.c b/examples/almultireverb.c index a2587585..a90b3368 100644 --- a/examples/almultireverb.c +++ b/examples/almultireverb.c @@ -29,15 +29,18 @@ * listener. */ -#include <stdio.h> #include <assert.h> #include <math.h> +#include <stdio.h> +#include <string.h> -#include <SDL_sound.h> +#include "SDL_sound.h" +#include "SDL_audio.h" +#include "SDL_stdinc.h" #include "AL/al.h" #include "AL/alc.h" -#include "AL/alext.h" +#include "AL/efx.h" #include "AL/efx-presets.h" #include "common/alhelpers.h" @@ -208,7 +211,7 @@ static ALuint LoadSound(const char *filename) * close the file. */ buffer = 0; alGenBuffers(1, &buffer); - alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate); + alBufferData(buffer, format, sample->buffer, (ALsizei)slen, (ALsizei)sample->actual.rate); Sound_FreeSample(sample); /* Check if an error occured, and clean up if so. */ @@ -440,8 +443,8 @@ static void UpdateListenerAndEffects(float timediff, const ALuint slots[2], cons } /* Finally, update the effect slots with the updated effect parameters. */ - alAuxiliaryEffectSloti(slots[0], AL_EFFECTSLOT_EFFECT, effects[0]); - alAuxiliaryEffectSloti(slots[1], AL_EFFECTSLOT_EFFECT, effects[1]); + alAuxiliaryEffectSloti(slots[0], AL_EFFECTSLOT_EFFECT, (ALint)effects[0]); + alAuxiliaryEffectSloti(slots[1], AL_EFFECTSLOT_EFFECT, (ALint)effects[1]); } @@ -452,7 +455,6 @@ int main(int argc, char **argv) EFX_REVERB_PRESET_CARPETEDHALLWAY, EFX_REVERB_PRESET_BATHROOM }; - struct timespec basetime; ALCdevice *device = NULL; ALCcontext *context = NULL; ALuint effects[2] = { 0, 0 }; @@ -463,6 +465,7 @@ int main(int argc, char **argv) ALCint num_sends = 0; ALenum state = AL_INITIAL; ALfloat direct_gain = 1.0f; + int basetime = 0; int loops = 0; /* Print out usage if no arguments were specified */ @@ -520,42 +523,42 @@ int main(int argc, char **argv) } /* Define a macro to help load the function pointers. */ -#define LOAD_PROC(x) ((x) = alGetProcAddress(#x)) - LOAD_PROC(alGenFilters); - LOAD_PROC(alDeleteFilters); - LOAD_PROC(alIsFilter); - LOAD_PROC(alFilteri); - LOAD_PROC(alFilteriv); - LOAD_PROC(alFilterf); - LOAD_PROC(alFilterfv); - LOAD_PROC(alGetFilteri); - LOAD_PROC(alGetFilteriv); - LOAD_PROC(alGetFilterf); - LOAD_PROC(alGetFilterfv); - - LOAD_PROC(alGenEffects); - LOAD_PROC(alDeleteEffects); - LOAD_PROC(alIsEffect); - LOAD_PROC(alEffecti); - LOAD_PROC(alEffectiv); - LOAD_PROC(alEffectf); - LOAD_PROC(alEffectfv); - LOAD_PROC(alGetEffecti); - LOAD_PROC(alGetEffectiv); - LOAD_PROC(alGetEffectf); - LOAD_PROC(alGetEffectfv); - - LOAD_PROC(alGenAuxiliaryEffectSlots); - LOAD_PROC(alDeleteAuxiliaryEffectSlots); - LOAD_PROC(alIsAuxiliaryEffectSlot); - LOAD_PROC(alAuxiliaryEffectSloti); - LOAD_PROC(alAuxiliaryEffectSlotiv); - LOAD_PROC(alAuxiliaryEffectSlotf); - LOAD_PROC(alAuxiliaryEffectSlotfv); - LOAD_PROC(alGetAuxiliaryEffectSloti); - LOAD_PROC(alGetAuxiliaryEffectSlotiv); - LOAD_PROC(alGetAuxiliaryEffectSlotf); - LOAD_PROC(alGetAuxiliaryEffectSlotfv); +#define LOAD_PROC(T, x) ((x) = (T)alGetProcAddress(#x)) + LOAD_PROC(LPALGENFILTERS, alGenFilters); + LOAD_PROC(LPALDELETEFILTERS, alDeleteFilters); + LOAD_PROC(LPALISFILTER, alIsFilter); + LOAD_PROC(LPALFILTERI, alFilteri); + LOAD_PROC(LPALFILTERIV, alFilteriv); + LOAD_PROC(LPALFILTERF, alFilterf); + LOAD_PROC(LPALFILTERFV, alFilterfv); + LOAD_PROC(LPALGETFILTERI, alGetFilteri); + LOAD_PROC(LPALGETFILTERIV, alGetFilteriv); + LOAD_PROC(LPALGETFILTERF, alGetFilterf); + LOAD_PROC(LPALGETFILTERFV, alGetFilterfv); + + LOAD_PROC(LPALGENEFFECTS, alGenEffects); + LOAD_PROC(LPALDELETEEFFECTS, alDeleteEffects); + LOAD_PROC(LPALISEFFECT, alIsEffect); + LOAD_PROC(LPALEFFECTI, alEffecti); + LOAD_PROC(LPALEFFECTIV, alEffectiv); + LOAD_PROC(LPALEFFECTF, alEffectf); + LOAD_PROC(LPALEFFECTFV, alEffectfv); + LOAD_PROC(LPALGETEFFECTI, alGetEffecti); + LOAD_PROC(LPALGETEFFECTIV, alGetEffectiv); + LOAD_PROC(LPALGETEFFECTF, alGetEffectf); + LOAD_PROC(LPALGETEFFECTFV, alGetEffectfv); + + LOAD_PROC(LPALGENAUXILIARYEFFECTSLOTS, alGenAuxiliaryEffectSlots); + LOAD_PROC(LPALDELETEAUXILIARYEFFECTSLOTS, alDeleteAuxiliaryEffectSlots); + LOAD_PROC(LPALISAUXILIARYEFFECTSLOT, alIsAuxiliaryEffectSlot); + LOAD_PROC(LPALAUXILIARYEFFECTSLOTI, alAuxiliaryEffectSloti); + LOAD_PROC(LPALAUXILIARYEFFECTSLOTIV, alAuxiliaryEffectSlotiv); + LOAD_PROC(LPALAUXILIARYEFFECTSLOTF, alAuxiliaryEffectSlotf); + LOAD_PROC(LPALAUXILIARYEFFECTSLOTFV, alAuxiliaryEffectSlotfv); + LOAD_PROC(LPALGETAUXILIARYEFFECTSLOTI, alGetAuxiliaryEffectSloti); + LOAD_PROC(LPALGETAUXILIARYEFFECTSLOTIV, alGetAuxiliaryEffectSlotiv); + LOAD_PROC(LPALGETAUXILIARYEFFECTSLOTF, alGetAuxiliaryEffectSlotf); + LOAD_PROC(LPALGETAUXILIARYEFFECTSLOTFV, alGetAuxiliaryEffectSlotfv); #undef LOAD_PROC /* Initialize SDL_sound. */ @@ -595,8 +598,8 @@ int main(int argc, char **argv) * effect properties. Modifying or deleting the effect object afterward * won't directly affect the effect slot until they're reapplied like this. */ - alAuxiliaryEffectSloti(slots[0], AL_EFFECTSLOT_EFFECT, effects[0]); - alAuxiliaryEffectSloti(slots[1], AL_EFFECTSLOT_EFFECT, effects[1]); + alAuxiliaryEffectSloti(slots[0], AL_EFFECTSLOT_EFFECT, (ALint)effects[0]); + alAuxiliaryEffectSloti(slots[1], AL_EFFECTSLOT_EFFECT, (ALint)effects[1]); assert(alGetError()==AL_NO_ERROR && "Failed to set effect slot"); /* For the purposes of this example, prepare a filter that optionally @@ -618,8 +621,8 @@ int main(int argc, char **argv) alGenSources(1, &source); alSourcei(source, AL_LOOPING, AL_TRUE); alSource3f(source, AL_POSITION, -5.0f, 0.0f, -2.0f); - alSourcei(source, AL_DIRECT_FILTER, direct_filter); - alSourcei(source, AL_BUFFER, buffer); + alSourcei(source, AL_DIRECT_FILTER, (ALint)direct_filter); + alSourcei(source, AL_BUFFER, (ALint)buffer); /* Connect the source to the effect slots. Here, we connect source send 0 * to Zone 0's slot, and send 1 to Zone 1's slot. Filters can be specified @@ -628,19 +631,19 @@ int main(int argc, char **argv) * can only see a zone through a window or thin wall may be attenuated for * that zone. */ - alSource3i(source, AL_AUXILIARY_SEND_FILTER, slots[0], 0, AL_FILTER_NULL); - alSource3i(source, AL_AUXILIARY_SEND_FILTER, slots[1], 1, AL_FILTER_NULL); + alSource3i(source, AL_AUXILIARY_SEND_FILTER, (ALint)slots[0], 0, AL_FILTER_NULL); + alSource3i(source, AL_AUXILIARY_SEND_FILTER, (ALint)slots[1], 1, AL_FILTER_NULL); assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source"); /* Get the current time as the base for timing in the main loop. */ - altimespec_get(&basetime, AL_TIME_UTC); + basetime = altime_get(); loops = 0; printf("Transition %d of %d...\n", loops+1, MaxTransitions); /* Play the sound for a while. */ alSourcePlay(source); do { - struct timespec curtime; + int curtime; ALfloat timediff; /* Start a batch update, to ensure all changes apply simultaneously. */ @@ -649,14 +652,13 @@ int main(int argc, char **argv) /* Get the current time to track the amount of time that passed. * Convert the difference to seconds. */ - altimespec_get(&curtime, AL_TIME_UTC); - timediff = (ALfloat)(curtime.tv_sec - basetime.tv_sec); - timediff += (ALfloat)(curtime.tv_nsec - basetime.tv_nsec) / 1000000000.0f; + curtime = altime_get(); + timediff = (float)(curtime - basetime) / 1000.0f; /* Avoid negative time deltas, in case of non-monotonic clocks. */ if(timediff < 0.0f) timediff = 0.0f; - else while(timediff >= 4.0f*((loops&1)+1)) + else while(timediff >= 4.0f*(float)((loops&1)+1)) { /* For this example, each transition occurs over 4 seconds, and * there's 2 transitions per cycle. @@ -669,7 +671,7 @@ int main(int argc, char **argv) * time to start a new cycle. */ timediff -= 8.0f; - basetime.tv_sec += 8; + basetime += 8000; } } diff --git a/examples/alplay.c b/examples/alplay.c index 81cb56d5..09ad96b4 100644 --- a/examples/alplay.c +++ b/examples/alplay.c @@ -27,10 +27,11 @@ #include <stdio.h> #include <assert.h> -#include <SDL_sound.h> +#include "SDL_sound.h" +#include "SDL_audio.h" +#include "SDL_stdinc.h" #include "AL/al.h" -#include "AL/alc.h" #include "common/alhelpers.h" @@ -100,7 +101,7 @@ static ALuint LoadSound(const char *filename) * close the file. */ buffer = 0; alGenBuffers(1, &buffer); - alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate); + alBufferData(buffer, format, sample->buffer, (ALsizei)slen, (ALsizei)sample->actual.rate); Sound_FreeSample(sample); /* Check if an error occured, and clean up if so. */ @@ -150,7 +151,7 @@ int main(int argc, char **argv) /* Create the source to play the sound with. */ source = 0; alGenSources(1, &source); - alSourcei(source, AL_BUFFER, buffer); + alSourcei(source, AL_BUFFER, (ALint)buffer); assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source"); /* Play the sound until it finishes. */ diff --git a/examples/alrecord.c b/examples/alrecord.c index 43b26d35..a66e5471 100644 --- a/examples/alrecord.c +++ b/examples/alrecord.c @@ -27,7 +27,7 @@ #include <string.h> #include <stdlib.h> #include <stdio.h> -#include <math.h> +#include <errno.h> #include "AL/al.h" #include "AL/alc.h" @@ -54,13 +54,14 @@ static float msvc_strtof(const char *str, char **end) static void fwrite16le(ALushort val, FILE *f) { - ALubyte data[2] = { val&0xff, (val>>8)&0xff }; + ALubyte data[2] = { (ALubyte)(val&0xff), (ALubyte)((val>>8)&0xff) }; fwrite(data, 1, 2, f); } static void fwrite32le(ALuint val, FILE *f) { - ALubyte data[4] = { val&0xff, (val>>8)&0xff, (val>>16)&0xff, (val>>24)&0xff }; + ALubyte data[4] = { (ALubyte)(val&0xff), (ALubyte)((val>>8)&0xff), (ALubyte)((val>>16)&0xff), + (ALubyte)((val>>24)&0xff) }; fwrite(data, 1, 4, f); } @@ -73,9 +74,9 @@ typedef struct Recorder { ALuint mDataSize; float mRecTime; - int mChannels; - int mBits; - int mSampleRate; + ALuint mChannels; + ALuint mBits; + ALuint mSampleRate; ALuint mFrameSize; ALbyte *mBuffer; ALsizei mBufferSize; @@ -133,13 +134,13 @@ int main(int argc, char **argv) break; else if(strcmp(argv[0], "--channels") == 0 || strcmp(argv[0], "-c") == 0) { - if(!(argc > 1)) + if(argc < 2) { fprintf(stderr, "Missing argument for option: %s\n", argv[0]); return 1; } - recorder.mChannels = strtol(argv[1], &end, 0); + recorder.mChannels = (ALuint)strtoul(argv[1], &end, 0); if((recorder.mChannels != 1 && recorder.mChannels != 2) || (end && *end != '\0')) { fprintf(stderr, "Invalid channels: %s\n", argv[1]); @@ -150,13 +151,13 @@ int main(int argc, char **argv) } else if(strcmp(argv[0], "--bits") == 0 || strcmp(argv[0], "-b") == 0) { - if(!(argc > 1)) + if(argc < 2) { fprintf(stderr, "Missing argument for option: %s\n", argv[0]); return 1; } - recorder.mBits = strtol(argv[1], &end, 0); + recorder.mBits = (ALuint)strtoul(argv[1], &end, 0); if((recorder.mBits != 8 && recorder.mBits != 16 && recorder.mBits != 32) || (end && *end != '\0')) { @@ -168,13 +169,13 @@ int main(int argc, char **argv) } else if(strcmp(argv[0], "--rate") == 0 || strcmp(argv[0], "-r") == 0) { - if(!(argc > 1)) + if(argc < 2) { fprintf(stderr, "Missing argument for option: %s\n", argv[0]); return 1; } - recorder.mSampleRate = strtol(argv[1], &end, 0); + recorder.mSampleRate = (ALuint)strtoul(argv[1], &end, 0); if(!(recorder.mSampleRate >= 8000 && recorder.mSampleRate <= 96000) || (end && *end != '\0')) { fprintf(stderr, "Invalid sample rate: %s\n", argv[1]); @@ -185,7 +186,7 @@ int main(int argc, char **argv) } else if(strcmp(argv[0], "--time") == 0 || strcmp(argv[0], "-t") == 0) { - if(!(argc > 1)) + if(argc < 2) { fprintf(stderr, "Missing argument for option: %s\n", argv[0]); return 1; @@ -202,7 +203,7 @@ int main(int argc, char **argv) } else if(strcmp(argv[0], "--outfile") == 0 || strcmp(argv[0], "-o") == 0) { - if(!(argc > 1)) + if(argc < 2) { fprintf(stderr, "Missing argument for option: %s\n", argv[0]); return 1; @@ -285,15 +286,15 @@ int main(int argc, char **argv) // 16-bit val, format type id (1 = integer PCM, 3 = float PCM) fwrite16le((recorder.mBits == 32) ? 0x0003 : 0x0001, recorder.mFile); // 16-bit val, channel count - fwrite16le(recorder.mChannels, recorder.mFile); + fwrite16le((ALushort)recorder.mChannels, recorder.mFile); // 32-bit val, frequency fwrite32le(recorder.mSampleRate, recorder.mFile); // 32-bit val, bytes per second fwrite32le(recorder.mSampleRate * recorder.mFrameSize, recorder.mFile); // 16-bit val, frame size - fwrite16le(recorder.mFrameSize, recorder.mFile); + fwrite16le((ALushort)recorder.mFrameSize, recorder.mFile); // 16-bit val, bits per sample - fwrite16le(recorder.mBits, recorder.mFile); + fwrite16le((ALushort)recorder.mBits, recorder.mFile); // 16-bit val, extra byte count fwrite16le(0, recorder.mFile); @@ -316,6 +317,7 @@ int main(int argc, char **argv) recorder.mRecTime, (recorder.mRecTime != 1.0f) ? "s" : "" ); + err = ALC_NO_ERROR; alcCaptureStart(recorder.mDevice); while((double)recorder.mDataSize/(double)recorder.mSampleRate < recorder.mRecTime && (err=alcGetError(recorder.mDevice)) == ALC_NO_ERROR && !ferror(recorder.mFile)) @@ -330,7 +332,7 @@ int main(int argc, char **argv) } if(count > recorder.mBufferSize) { - ALbyte *data = calloc(recorder.mFrameSize, count); + ALbyte *data = calloc(recorder.mFrameSize, (ALuint)count); free(recorder.mBuffer); recorder.mBuffer = data; recorder.mBufferSize = count; @@ -364,7 +366,7 @@ int main(int argc, char **argv) } } #endif - recorder.mDataSize += (ALuint)fwrite(recorder.mBuffer, recorder.mFrameSize, count, + recorder.mDataSize += (ALuint)fwrite(recorder.mBuffer, recorder.mFrameSize, (ALuint)count, recorder.mFile); } alcCaptureStop(recorder.mDevice); @@ -384,7 +386,7 @@ int main(int argc, char **argv) { fwrite32le(recorder.mDataSize*recorder.mFrameSize, recorder.mFile); if(fseek(recorder.mFile, 4, SEEK_SET) == 0) - fwrite32le(total_size - 8, recorder.mFile); + fwrite32le((ALuint)total_size - 8, recorder.mFile); } fclose(recorder.mFile); diff --git a/examples/alreverb.c b/examples/alreverb.c index e6c9e606..d789dffe 100644 --- a/examples/alreverb.c +++ b/examples/alreverb.c @@ -27,11 +27,13 @@ #include <stdio.h> #include <assert.h> -#include <SDL_sound.h> +#include "SDL_sound.h" +#include "SDL_audio.h" +#include "SDL_stdinc.h" #include "AL/al.h" #include "AL/alc.h" -#include "AL/alext.h" +#include "AL/efx.h" #include "AL/efx-presets.h" #include "common/alhelpers.h" @@ -207,7 +209,7 @@ static ALuint LoadSound(const char *filename) * close the file. */ buffer = 0; alGenBuffers(1, &buffer); - alBufferData(buffer, format, sample->buffer, slen, sample->actual.rate); + alBufferData(buffer, format, sample->buffer, (ALsizei)slen, (ALsizei)sample->actual.rate); Sound_FreeSample(sample); /* Check if an error occured, and clean up if so. */ @@ -250,30 +252,30 @@ int main(int argc, char **argv) } /* Define a macro to help load the function pointers. */ -#define LOAD_PROC(x) ((x) = alGetProcAddress(#x)) - LOAD_PROC(alGenEffects); - LOAD_PROC(alDeleteEffects); - LOAD_PROC(alIsEffect); - LOAD_PROC(alEffecti); - LOAD_PROC(alEffectiv); - LOAD_PROC(alEffectf); - LOAD_PROC(alEffectfv); - LOAD_PROC(alGetEffecti); - LOAD_PROC(alGetEffectiv); - LOAD_PROC(alGetEffectf); - LOAD_PROC(alGetEffectfv); - - LOAD_PROC(alGenAuxiliaryEffectSlots); - LOAD_PROC(alDeleteAuxiliaryEffectSlots); - LOAD_PROC(alIsAuxiliaryEffectSlot); - LOAD_PROC(alAuxiliaryEffectSloti); - LOAD_PROC(alAuxiliaryEffectSlotiv); - LOAD_PROC(alAuxiliaryEffectSlotf); - LOAD_PROC(alAuxiliaryEffectSlotfv); - LOAD_PROC(alGetAuxiliaryEffectSloti); - LOAD_PROC(alGetAuxiliaryEffectSlotiv); - LOAD_PROC(alGetAuxiliaryEffectSlotf); - LOAD_PROC(alGetAuxiliaryEffectSlotfv); +#define LOAD_PROC(T, x) ((x) = (T)alGetProcAddress(#x)) + LOAD_PROC(LPALGENEFFECTS, alGenEffects); + LOAD_PROC(LPALDELETEEFFECTS, alDeleteEffects); + LOAD_PROC(LPALISEFFECT, alIsEffect); + LOAD_PROC(LPALEFFECTI, alEffecti); + LOAD_PROC(LPALEFFECTIV, alEffectiv); + LOAD_PROC(LPALEFFECTF, alEffectf); + LOAD_PROC(LPALEFFECTFV, alEffectfv); + LOAD_PROC(LPALGETEFFECTI, alGetEffecti); + LOAD_PROC(LPALGETEFFECTIV, alGetEffectiv); + LOAD_PROC(LPALGETEFFECTF, alGetEffectf); + LOAD_PROC(LPALGETEFFECTFV, alGetEffectfv); + + LOAD_PROC(LPALGENAUXILIARYEFFECTSLOTS, alGenAuxiliaryEffectSlots); + LOAD_PROC(LPALDELETEAUXILIARYEFFECTSLOTS, alDeleteAuxiliaryEffectSlots); + LOAD_PROC(LPALISAUXILIARYEFFECTSLOT, alIsAuxiliaryEffectSlot); + LOAD_PROC(LPALAUXILIARYEFFECTSLOTI, alAuxiliaryEffectSloti); + LOAD_PROC(LPALAUXILIARYEFFECTSLOTIV, alAuxiliaryEffectSlotiv); + LOAD_PROC(LPALAUXILIARYEFFECTSLOTF, alAuxiliaryEffectSlotf); + LOAD_PROC(LPALAUXILIARYEFFECTSLOTFV, alAuxiliaryEffectSlotfv); + LOAD_PROC(LPALGETAUXILIARYEFFECTSLOTI, alGetAuxiliaryEffectSloti); + LOAD_PROC(LPALGETAUXILIARYEFFECTSLOTIV, alGetAuxiliaryEffectSlotiv); + LOAD_PROC(LPALGETAUXILIARYEFFECTSLOTF, alGetAuxiliaryEffectSlotf); + LOAD_PROC(LPALGETAUXILIARYEFFECTSLOTFV, alGetAuxiliaryEffectSlotfv); #undef LOAD_PROC /* Initialize SDL_sound. */ @@ -307,18 +309,18 @@ int main(int argc, char **argv) * effectively copies the effect properties. You can modify or delete the * effect object afterward without affecting the effect slot. */ - alAuxiliaryEffectSloti(slot, AL_EFFECTSLOT_EFFECT, effect); + alAuxiliaryEffectSloti(slot, AL_EFFECTSLOT_EFFECT, (ALint)effect); assert(alGetError()==AL_NO_ERROR && "Failed to set effect slot"); /* Create the source to play the sound with. */ source = 0; alGenSources(1, &source); - alSourcei(source, AL_BUFFER, buffer); + alSourcei(source, AL_BUFFER, (ALint)buffer); /* Connect the source to the effect slot. This tells the source to use the * effect slot 'slot', on send #0 with the AL_FILTER_NULL filter object. */ - alSource3i(source, AL_AUXILIARY_SEND_FILTER, slot, 0, AL_FILTER_NULL); + alSource3i(source, AL_AUXILIARY_SEND_FILTER, (ALint)slot, 0, AL_FILTER_NULL); assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source"); /* Play the sound until it finishes. */ diff --git a/examples/alstream.c b/examples/alstream.c index 68115e8d..56505ddb 100644 --- a/examples/alstream.c +++ b/examples/alstream.c @@ -27,14 +27,13 @@ #include <string.h> #include <stdlib.h> #include <stdio.h> -#include <signal.h> #include <assert.h> -#include <SDL_sound.h> +#include "SDL_sound.h" +#include "SDL_audio.h" +#include "SDL_stdinc.h" #include "AL/al.h" -#include "AL/alc.h" -#include "AL/alext.h" #include "common/alhelpers.h" @@ -161,10 +160,10 @@ static int OpenPlayerFile(StreamPlayer *player, const char *filename) fprintf(stderr, "Unsupported channel count: %d\n", player->sample->actual.channels); goto error; } - player->srate = player->sample->actual.rate; + player->srate = (ALsizei)player->sample->actual.rate; frame_size = player->sample->actual.channels * - SDL_AUDIO_BITSIZE(player->sample->actual.format) / 8; + SDL_AUDIO_BITSIZE(player->sample->actual.format) / 8; /* Set the buffer size, given the desired millisecond length. */ Sound_SetBufferSize(player->sample, (Uint32)((Uint64)player->srate*BUFFER_TIME_MS/1000) * @@ -192,7 +191,7 @@ static void ClosePlayerFile(StreamPlayer *player) /* Prebuffers some audio from the file, and starts playing the source */ static int StartPlayer(StreamPlayer *player) { - size_t i; + ALsizei i; /* Rewind the source position and clear the buffer queue */ alSourceRewind(player->source); @@ -205,8 +204,8 @@ static int StartPlayer(StreamPlayer *player) Uint32 slen = Sound_Decode(player->sample); if(slen == 0) break; - alBufferData(player->buffers[i], player->format, - player->sample->buffer, slen, player->srate); + alBufferData(player->buffers[i], player->format, player->sample->buffer, (ALsizei)slen, + player->srate); } if(alGetError() != AL_NO_ERROR) { @@ -256,8 +255,8 @@ static int UpdatePlayer(StreamPlayer *player) slen = Sound_Decode(player->sample); if(slen > 0) { - alBufferData(bufid, player->format, player->sample->buffer, slen, - player->srate); + alBufferData(bufid, player->format, player->sample->buffer, (ALsizei)slen, + player->srate); alSourceQueueBuffers(player->source, 1, &bufid); } if(alGetError() != AL_NO_ERROR) @@ -324,8 +323,7 @@ int main(int argc, char **argv) else namepart = argv[i]; - printf("Playing: %s (%s, %dhz)\n", namepart, FormatName(player->format), - player->srate); + printf("Playing: %s (%s, %dhz)\n", namepart, FormatName(player->format), player->srate); fflush(stdout); if(!StartPlayer(player)) diff --git a/examples/altonegen.c b/examples/altonegen.c index 628e695d..553bc996 100644 --- a/examples/altonegen.c +++ b/examples/altonegen.c @@ -82,7 +82,10 @@ static void ApplySin(ALfloat *data, ALdouble g, ALuint srate, ALuint freq) ALdouble smps_per_cycle = (ALdouble)srate / freq; ALuint i; for(i = 0;i < srate;i++) - data[i] += (ALfloat)(sin(i/smps_per_cycle * 2.0*M_PI) * g); + { + ALdouble ival; + data[i] += (ALfloat)(sin(modf(i/smps_per_cycle, &ival) * 2.0*M_PI) * g); + } } /* Generates waveforms using additive synthesis. Each waveform is constructed @@ -91,13 +94,13 @@ static void ApplySin(ALfloat *data, ALdouble g, ALuint srate, ALuint freq) static ALuint CreateWave(enum WaveType type, ALuint freq, ALuint srate) { ALuint seed = 22222; - ALint data_size; + ALuint data_size; ALfloat *data; ALuint buffer; ALenum err; ALuint i; - data_size = srate * sizeof(ALfloat); + data_size = (ALuint)(srate * sizeof(ALfloat)); data = calloc(1, data_size); switch(type) { @@ -142,7 +145,7 @@ static ALuint CreateWave(enum WaveType type, ALuint freq, ALuint srate) /* Buffer the audio data into a new buffer object. */ buffer = 0; alGenBuffers(1, &buffer); - alBufferData(buffer, AL_FORMAT_MONO_FLOAT32, data, data_size, srate); + alBufferData(buffer, AL_FORMAT_MONO_FLOAT32, data, (ALsizei)data_size, (ALsizei)srate); free(data); /* Check if an error occured, and clean up if so. */ @@ -257,7 +260,7 @@ int main(int argc, char *argv[]) srate = dev_rate; /* Load the sound into a buffer. */ - buffer = CreateWave(wavetype, tone_freq, srate); + buffer = CreateWave(wavetype, (ALuint)tone_freq, (ALuint)srate); if(!buffer) { CloseAL(); @@ -271,7 +274,7 @@ int main(int argc, char *argv[]) /* Create the source to play the sound with. */ source = 0; alGenSources(1, &source); - alSourcei(source, AL_BUFFER, buffer); + alSourcei(source, AL_BUFFER, (ALint)buffer); assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source"); /* Play the sound for a while. */ diff --git a/examples/common/alhelpers.c b/examples/common/alhelpers.c index fab039e9..0febef43 100644 --- a/examples/common/alhelpers.c +++ b/examples/common/alhelpers.c @@ -28,14 +28,14 @@ * finding an appropriate buffer format, and getting readable strings for * channel configs and sample types. */ +#include "alhelpers.h" + #include <stdio.h> +#include <errno.h> #include <string.h> #include "AL/al.h" #include "AL/alc.h" -#include "AL/alext.h" - -#include "alhelpers.h" /* InitAL opens a device and sets up a context using default attributes, making @@ -114,3 +114,71 @@ const char *FormatName(ALenum format) } return "Unknown Format"; } + + +#ifdef _WIN32 + +#define WIN32_LEAN_AND_MEAN +#include <windows.h> +#include <mmsystem.h> + +int altime_get(void) +{ + static int start_time = 0; + int cur_time; + union { + FILETIME ftime; + ULARGE_INTEGER ulint; + } systime; + GetSystemTimeAsFileTime(&systime.ftime); + /* FILETIME is in 100-nanosecond units, or 1/10th of a microsecond. */ + cur_time = (int)(systime.ulint.QuadPart/10000); + + if(!start_time) + start_time = cur_time; + return cur_time - start_time; +} + +void al_nssleep(unsigned long nsec) +{ + Sleep(nsec / 1000000); +} + +#else + +#include <sys/time.h> +#include <unistd.h> +#include <time.h> + +int altime_get(void) +{ + static int start_time = 0u; + int cur_time; + +#if _POSIX_TIMERS > 0 + struct timespec ts; + int ret = clock_gettime(CLOCK_REALTIME, &ts); + if(ret != 0) return 0; + cur_time = (int)(ts.tv_sec*1000 + ts.tv_nsec/1000000); +#else /* _POSIX_TIMERS > 0 */ + struct timeval tv; + int ret = gettimeofday(&tv, NULL); + if(ret != 0) return 0; + cur_time = (int)(tv.tv_sec*1000 + tv.tv_usec/1000); +#endif + + if(!start_time) + start_time = cur_time; + return cur_time - start_time; +} + +void al_nssleep(unsigned long nsec) +{ + struct timespec ts, rem; + ts.tv_sec = (time_t)(nsec / 1000000000ul); + ts.tv_nsec = (long)(nsec % 1000000000ul); + while(nanosleep(&ts, &rem) == -1 && errno == EINTR) + ts = rem; +} + +#endif diff --git a/examples/common/alhelpers.h b/examples/common/alhelpers.h index 41a7ce58..3752d218 100644 --- a/examples/common/alhelpers.h +++ b/examples/common/alhelpers.h @@ -1,15 +1,11 @@ #ifndef ALHELPERS_H #define ALHELPERS_H -#include "AL/alc.h" #include "AL/al.h" -#include "AL/alext.h" - -#include "threads.h" #ifdef __cplusplus extern "C" { -#endif /* __cplusplus */ +#endif /* Some helper functions to get the name from the format enums. */ const char *FormatName(ALenum type); @@ -18,8 +14,12 @@ const char *FormatName(ALenum type); int InitAL(char ***argv, int *argc); void CloseAL(void); +/* Cross-platform timeget and sleep functions. */ +int altime_get(void); +void al_nssleep(unsigned long nsec); + #ifdef __cplusplus -} -#endif /* __cplusplus */ +} // extern "C" +#endif #endif /* ALHELPERS_H */ |