diff options
Diffstat (limited to 'examples/alffplay.cpp')
-rw-r--r-- | examples/alffplay.cpp | 1340 |
1 files changed, 651 insertions, 689 deletions
diff --git a/examples/alffplay.cpp b/examples/alffplay.cpp index 27520a6d..655ffc96 100644 --- a/examples/alffplay.cpp +++ b/examples/alffplay.cpp @@ -8,29 +8,44 @@ #include <functional> #include <algorithm> #include <iostream> +#include <utility> #include <iomanip> +#include <cstdint> #include <cstring> -#include <limits> -#include <thread> -#include <chrono> +#include <cstdlib> #include <atomic> +#include <cerrno> +#include <chrono> +#include <cstdio> +#include <memory> +#include <string> +#include <thread> #include <vector> -#include <mutex> -#include <deque> #include <array> #include <cmath> -#include <string> +#include <deque> +#include <mutex> +#include <ratio> extern "C" { #include "libavcodec/avcodec.h" #include "libavformat/avformat.h" #include "libavformat/avio.h" -#include "libavutil/time.h" +#include "libavformat/version.h" +#include "libavutil/avutil.h" +#include "libavutil/error.h" +#include "libavutil/frame.h" +#include "libavutil/mem.h" #include "libavutil/pixfmt.h" -#include "libavutil/avstring.h" +#include "libavutil/rational.h" +#include "libavutil/samplefmt.h" +#include "libavutil/time.h" +#include "libavutil/version.h" #include "libavutil/channel_layout.h" #include "libswscale/swscale.h" #include "libswresample/swresample.h" + +struct SwsContext; } #include "SDL.h" @@ -85,20 +100,24 @@ typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values); namespace { +inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); } + #ifndef M_PI #define M_PI (3.14159265358979323846) #endif +using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1_i64<<32)>>; using nanoseconds = std::chrono::nanoseconds; using microseconds = std::chrono::microseconds; using milliseconds = std::chrono::milliseconds; using seconds = std::chrono::seconds; using seconds_d64 = std::chrono::duration<double>; -const std::string AppName("alffplay"); +const std::string AppName{"alffplay"}; -bool EnableDirectOut = false; -bool EnableWideStereo = false; +bool EnableDirectOut{false}; +bool EnableWideStereo{false}; +bool DisableVideo{false}; LPALGETSOURCEI64VSOFT alGetSourcei64vSOFT; LPALCGETINTEGER64VSOFT alcGetInteger64vSOFT; @@ -113,27 +132,23 @@ LPALEVENTCONTROLSOFT alEventControlSOFT; LPALEVENTCALLBACKSOFT alEventCallbackSOFT; #endif -const seconds AVNoSyncThreshold(10); +const seconds AVNoSyncThreshold{10}; -const milliseconds VideoSyncThreshold(10); -#define VIDEO_PICTURE_QUEUE_SIZE 16 +const milliseconds VideoSyncThreshold{10}; +#define VIDEO_PICTURE_QUEUE_SIZE 24 -const seconds_d64 AudioSyncThreshold(0.03); -const milliseconds AudioSampleCorrectionMax(50); +const seconds_d64 AudioSyncThreshold{0.03}; +const milliseconds AudioSampleCorrectionMax{50}; /* Averaging filter coefficient for audio sync. */ #define AUDIO_DIFF_AVG_NB 20 -const double AudioAvgFilterCoeff = std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB); +const double AudioAvgFilterCoeff{std::pow(0.01, 1.0/AUDIO_DIFF_AVG_NB)}; /* Per-buffer size, in time */ -const milliseconds AudioBufferTime(20); +const milliseconds AudioBufferTime{20}; /* Buffer total size, in time (should be divisible by the buffer time) */ -const milliseconds AudioBufferTotalTime(800); - -#define MAX_QUEUE_SIZE (15 * 1024 * 1024) /* Bytes of compressed data to keep queued */ +const milliseconds AudioBufferTotalTime{800}; enum { - FF_UPDATE_EVENT = SDL_USEREVENT, - FF_REFRESH_EVENT, - FF_MOVIE_DONE_EVENT + FF_MOVIE_DONE_EVENT = SDL_USEREVENT }; enum class SyncMaster { @@ -146,7 +161,7 @@ enum class SyncMaster { inline microseconds get_avtime() -{ return microseconds(av_gettime()); } +{ return microseconds{av_gettime()}; } /* Define unique_ptrs to auto-cleanup associated ffmpeg objects. */ struct AVIOContextDeleter { @@ -180,43 +195,83 @@ struct SwsContextDeleter { using SwsContextPtr = std::unique_ptr<SwsContext,SwsContextDeleter>; +template<size_t SizeLimit> class PacketQueue { + std::mutex mMutex; + std::condition_variable mCondVar; std::deque<AVPacket> mPackets; size_t mTotalSize{0}; + bool mFinished{false}; -public: - ~PacketQueue() { clear(); } - - bool empty() const noexcept { return mPackets.empty(); } - size_t totalSize() const noexcept { return mTotalSize; } - - void put(const AVPacket *pkt) + AVPacket *getPacket(std::unique_lock<std::mutex> &lock) { - mPackets.push_back(AVPacket{}); - if(av_packet_ref(&mPackets.back(), pkt) != 0) - mPackets.pop_back(); - else - mTotalSize += mPackets.back().size; + while(mPackets.empty() && !mFinished) + mCondVar.wait(lock); + return mPackets.empty() ? nullptr : &mPackets.front(); } - AVPacket *front() noexcept - { return &mPackets.front(); } - void pop() { AVPacket *pkt = &mPackets.front(); - mTotalSize -= pkt->size; + mTotalSize -= static_cast<unsigned int>(pkt->size); av_packet_unref(pkt); mPackets.pop_front(); } - void clear() +public: + ~PacketQueue() { for(AVPacket &pkt : mPackets) av_packet_unref(&pkt); mPackets.clear(); mTotalSize = 0; } + + int sendTo(AVCodecContext *codecctx) + { + std::unique_lock<std::mutex> lock{mMutex}; + + AVPacket *pkt{getPacket(lock)}; + if(!pkt) return avcodec_send_packet(codecctx, nullptr); + + const int ret{avcodec_send_packet(codecctx, pkt)}; + if(ret != AVERROR(EAGAIN)) + { + if(ret < 0) + std::cerr<< "Failed to send packet: "<<ret <<std::endl; + pop(); + } + return ret; + } + + void setFinished() + { + { + std::lock_guard<std::mutex> _{mMutex}; + mFinished = true; + } + mCondVar.notify_one(); + } + + bool put(const AVPacket *pkt) + { + { + std::unique_lock<std::mutex> lock{mMutex}; + if(mTotalSize >= SizeLimit) + return false; + + mPackets.push_back(AVPacket{}); + if(av_packet_ref(&mPackets.back(), pkt) != 0) + { + mPackets.pop_back(); + return true; + } + + mTotalSize += static_cast<unsigned int>(mPackets.back().size); + } + mCondVar.notify_one(); + return true; + } }; @@ -228,8 +283,7 @@ struct AudioState { AVStream *mStream{nullptr}; AVCodecCtxPtr mCodecCtx; - std::mutex mQueueMtx; - std::condition_variable mQueueCond; + PacketQueue<2*1024*1024> mPackets; /* Used for clock difference average computation */ seconds_d64 mClockDiffAvg{0}; @@ -245,7 +299,7 @@ struct AudioState { SwrContextPtr mSwresCtx; /* Conversion format, for what gets fed to OpenAL */ - int mDstChanLayout{0}; + uint64_t mDstChanLayout{0}; AVSampleFormat mDstSampleFmt{AV_SAMPLE_FMT_NONE}; /* Storage of converted samples */ @@ -256,14 +310,14 @@ struct AudioState { /* OpenAL format */ ALenum mFormat{AL_NONE}; - ALsizei mFrameSize{0}; + ALuint mFrameSize{0}; std::mutex mSrcMutex; std::condition_variable mSrcCond; std::atomic_flag mConnected; ALuint mSource{0}; std::vector<ALuint> mBuffers; - ALsizei mBufferIdx{0}; + ALuint mBufferIdx{0}; AudioState(MovieState &movie) : mMovie(movie) { mConnected.test_and_set(std::memory_order_relaxed); } @@ -272,7 +326,7 @@ struct AudioState { if(mSource) alDeleteSources(1, &mSource); if(!mBuffers.empty()) - alDeleteBuffers(mBuffers.size(), mBuffers.data()); + alDeleteBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data()); av_freep(&mSamples); } @@ -286,16 +340,15 @@ struct AudioState { nanoseconds getClockNoLock(); nanoseconds getClock() { - std::lock_guard<std::mutex> lock(mSrcMutex); + std::lock_guard<std::mutex> lock{mSrcMutex}; return getClockNoLock(); } - bool isBufferFilled(); void startPlayback(); int getSync(); int decodeFrame(); - bool readAudio(uint8_t *samples, int length); + bool readAudio(uint8_t *samples, unsigned int length); int handler(); }; @@ -306,53 +359,46 @@ struct VideoState { AVStream *mStream{nullptr}; AVCodecCtxPtr mCodecCtx; - std::mutex mQueueMtx; - std::condition_variable mQueueCond; + PacketQueue<14*1024*1024> mPackets; - nanoseconds mClock{0}; - nanoseconds mFrameTimer{0}; - nanoseconds mFrameLastPts{0}; - nanoseconds mFrameLastDelay{0}; - nanoseconds mCurrentPts{0}; - /* time (av_gettime) at which we updated mCurrentPts - used to have running video pts */ - microseconds mCurrentPtsTime{0}; + /* The pts of the currently displayed frame, and the time (av_gettime) it + * was last updated - used to have running video pts + */ + nanoseconds mDisplayPts{0}; + microseconds mDisplayPtsTime{microseconds::min()}; + std::mutex mDispPtsMutex; - /* Decompressed video frame, and swscale context for conversion */ - AVFramePtr mDecodedFrame; + /* Swscale context for format conversion */ SwsContextPtr mSwscaleCtx; struct Picture { - SDL_Texture *mImage{nullptr}; - int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */ - std::atomic<bool> mUpdated{false}; - nanoseconds mPts{0}; - - ~Picture() - { - if(mImage) - SDL_DestroyTexture(mImage); - mImage = nullptr; - } + AVFramePtr mFrame{}; + nanoseconds mPts{nanoseconds::min()}; }; std::array<Picture,VIDEO_PICTURE_QUEUE_SIZE> mPictQ; - size_t mPictQSize{0}, mPictQRead{0}, mPictQWrite{0}; + std::atomic<size_t> mPictQRead{0u}, mPictQWrite{1u}; std::mutex mPictQMutex; std::condition_variable mPictQCond; + + SDL_Texture *mImage{nullptr}; + int mWidth{0}, mHeight{0}; /* Logical image size (actual size may be larger) */ bool mFirstUpdate{true}; + std::atomic<bool> mEOS{false}; std::atomic<bool> mFinalUpdate{false}; VideoState(MovieState &movie) : mMovie(movie) { } + ~VideoState() + { + if(mImage) + SDL_DestroyTexture(mImage); + mImage = nullptr; + } nanoseconds getClock(); - bool isBufferFilled(); - static Uint32 SDLCALL sdl_refresh_timer_cb(Uint32 interval, void *opaque); - void schedRefresh(milliseconds delay); void display(SDL_Window *screen, SDL_Renderer *renderer); - void refreshTimer(SDL_Window *screen, SDL_Renderer *renderer); - void updatePicture(SDL_Window *screen, SDL_Renderer *renderer); - int queuePicture(nanoseconds pts); + void updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw); int handler(); }; @@ -362,13 +408,7 @@ struct MovieState { SyncMaster mAVSyncType{SyncMaster::Default}; - microseconds mClockBase{0}; - std::atomic<bool> mPlaying{false}; - - std::mutex mSendMtx; - std::condition_variable mSendCond; - /* NOTE: false/clear = need data, true/set = no data needed */ - std::atomic_flag mSendDataGood; + microseconds mClockBase{microseconds::min()}; std::atomic<bool> mQuit{false}; @@ -401,7 +441,7 @@ struct MovieState { nanoseconds getDuration(); - int streamComponentOpen(int stream_index); + int streamComponentOpen(unsigned int stream_index); int parse_handler(); }; @@ -417,10 +457,10 @@ nanoseconds AudioState::getClockNoLock() // Get the current device clock time and latency. auto device = alcGetContextsDevice(alcGetCurrentContext()); - ALCint64SOFT devtimes[2] = {0,0}; + ALCint64SOFT devtimes[2]{0,0}; alcGetInteger64vSOFT(device, ALC_DEVICE_CLOCK_LATENCY_SOFT, 2, devtimes); - auto latency = nanoseconds(devtimes[1]); - auto device_time = nanoseconds(devtimes[0]); + auto latency = nanoseconds{devtimes[1]}; + auto device_time = nanoseconds{devtimes[0]}; // The clock is simply the current device time relative to the recorded // start time. We can also subtract the latency to get more a accurate @@ -443,12 +483,10 @@ nanoseconds AudioState::getClockNoLock() * sample at OpenAL's current position, and subtracting the source latency * from that gives the timestamp of the sample currently at the DAC. */ - nanoseconds pts = mCurrentPts; + nanoseconds pts{mCurrentPts}; if(mSource) { ALint64SOFT offset[2]; - ALint queued; - ALint status; /* NOTE: The source state must be checked last, in case an underrun * occurs and the source stops between retrieving the offset+latency @@ -459,9 +497,10 @@ nanoseconds AudioState::getClockNoLock() { ALint ioffset; alGetSourcei(mSource, AL_SAMPLE_OFFSET, &ioffset); - offset[0] = (ALint64SOFT)ioffset << 32; + offset[0] = ALint64SOFT{ioffset} << 32; offset[1] = 0; } + ALint queued, status; alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued); alGetSourcei(mSource, AL_SOURCE_STATE, &status); @@ -471,46 +510,31 @@ nanoseconds AudioState::getClockNoLock() * when it starts recovery. */ if(status != AL_STOPPED) { - using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>; - pts -= AudioBufferTime*queued; pts += std::chrono::duration_cast<nanoseconds>( - fixed32(offset[0] / mCodecCtx->sample_rate) - ); + fixed32{offset[0] / mCodecCtx->sample_rate}); } /* Don't offset by the latency if the source isn't playing. */ if(status == AL_PLAYING) - pts -= nanoseconds(offset[1]); + pts -= nanoseconds{offset[1]}; } return std::max(pts, nanoseconds::zero()); } -bool AudioState::isBufferFilled() -{ - /* All of OpenAL's buffer queueing happens under the mSrcMutex lock, as - * does the source gen. So when we're able to grab the lock and the source - * is valid, the queue must be full. - */ - std::lock_guard<std::mutex> lock(mSrcMutex); - return mSource != 0; -} - void AudioState::startPlayback() { alSourcePlay(mSource); if(alcGetInteger64vSOFT) { - using fixed32 = std::chrono::duration<int64_t,std::ratio<1,(1ll<<32)>>; - // Subtract the total buffer queue time from the current pts to get the // pts of the start of the queue. - nanoseconds startpts = mCurrentPts - AudioBufferTotalTime; - int64_t srctimes[2]={0,0}; + nanoseconds startpts{mCurrentPts - AudioBufferTotalTime}; + int64_t srctimes[2]{0,0}; alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes); - auto device_time = nanoseconds(srctimes[1]); - auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32(srctimes[0])) / - mCodecCtx->sample_rate; + auto device_time = nanoseconds{srctimes[1]}; + auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32{srctimes[0]}) / + mCodecCtx->sample_rate; // The mixer may have ticked and incremented the device time and sample // offset, so subtract the source offset from the device time to get @@ -543,47 +567,31 @@ int AudioState::getSync() return 0; /* Constrain the per-update difference to avoid exceedingly large skips */ - diff = std::min<nanoseconds>(std::max<nanoseconds>(diff, -AudioSampleCorrectionMax), - AudioSampleCorrectionMax); - return (int)std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count(); + diff = std::min<nanoseconds>(diff, AudioSampleCorrectionMax); + return static_cast<int>(std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count()); } int AudioState::decodeFrame() { while(!mMovie.mQuit.load(std::memory_order_relaxed)) { - std::unique_lock<std::mutex> lock(mQueueMtx); - int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get()); - if(ret == AVERROR(EAGAIN)) - { - mMovie.mSendDataGood.clear(std::memory_order_relaxed); - std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock(); - mMovie.mSendCond.notify_one(); - do { - mQueueCond.wait(lock); - ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get()); - } while(ret == AVERROR(EAGAIN)); - } - lock.unlock(); - if(ret == AVERROR_EOF) break; - mMovie.mSendDataGood.clear(std::memory_order_relaxed); - mMovie.mSendCond.notify_one(); - if(ret < 0) + int ret; + while((ret=avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get())) == AVERROR(EAGAIN)) + mPackets.sendTo(mCodecCtx.get()); + if(ret != 0) { - std::cerr<< "Failed to decode frame: "<<ret <<std::endl; - return 0; + if(ret == AVERROR_EOF) break; + std::cerr<< "Failed to receive frame: "<<ret <<std::endl; + continue; } if(mDecodedFrame->nb_samples <= 0) - { - av_frame_unref(mDecodedFrame.get()); continue; - } /* If provided, update w/ pts */ if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE) mCurrentPts = std::chrono::duration_cast<nanoseconds>( - seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp) + seconds_d64{av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp} ); if(mDecodedFrame->nb_samples > mSamplesMax) @@ -596,9 +604,8 @@ int AudioState::decodeFrame() mSamplesMax = mDecodedFrame->nb_samples; } /* Return the amount of sample frames converted */ - int data_size = swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples, - (const uint8_t**)mDecodedFrame->data, mDecodedFrame->nb_samples - ); + int data_size{swr_convert(mSwresCtx.get(), &mSamples, mDecodedFrame->nb_samples, + const_cast<const uint8_t**>(mDecodedFrame->data), mDecodedFrame->nb_samples)}; av_frame_unref(mDecodedFrame.get()); return data_size; @@ -611,17 +618,17 @@ int AudioState::decodeFrame() * multiple of the template type size. */ template<typename T> -static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_size) +static void sample_dup(uint8_t *out, const uint8_t *in, unsigned int count, size_t frame_size) { - const T *sample = reinterpret_cast<const T*>(in); - T *dst = reinterpret_cast<T*>(out); + auto *sample = reinterpret_cast<const T*>(in); + auto *dst = reinterpret_cast<T*>(out); if(frame_size == sizeof(T)) std::fill_n(dst, count, *sample); else { /* NOTE: frame_size is a multiple of sizeof(T). */ - int type_mult = frame_size / sizeof(T); - int i = 0; + size_t type_mult{frame_size / sizeof(T)}; + size_t i{0}; std::generate_n(dst, count*type_mult, [sample,type_mult,&i]() -> T { @@ -634,10 +641,10 @@ static void sample_dup(uint8_t *out, const uint8_t *in, int count, int frame_siz } -bool AudioState::readAudio(uint8_t *samples, int length) +bool AudioState::readAudio(uint8_t *samples, unsigned int length) { - int sample_skip = getSync(); - int audio_size = 0; + int sample_skip{getSync()}; + unsigned int audio_size{0}; /* Read the next chunk of data, refill the buffer, and queue it * on the source */ @@ -656,22 +663,23 @@ bool AudioState::readAudio(uint8_t *samples, int length) // Adjust the device start time and current pts by the amount we're // skipping/duplicating, so that the clock remains correct for the // current stream position. - auto skip = nanoseconds(seconds(mSamplesPos)) / mCodecCtx->sample_rate; + auto skip = nanoseconds{seconds{mSamplesPos}} / mCodecCtx->sample_rate; mDeviceStartTime -= skip; mCurrentPts += skip; continue; } - int rem = length - audio_size; + unsigned int rem{length - audio_size}; if(mSamplesPos >= 0) { - int len = mSamplesLen - mSamplesPos; + const auto len = static_cast<unsigned int>(mSamplesLen - mSamplesPos); if(rem > len) rem = len; - memcpy(samples, mSamples + mSamplesPos*mFrameSize, rem*mFrameSize); + std::copy_n(mSamples + static_cast<unsigned int>(mSamplesPos)*mFrameSize, + rem*mFrameSize, samples); } else { - rem = std::min(rem, -mSamplesPos); + rem = std::min(rem, static_cast<unsigned int>(-mSamplesPos)); /* Add samples by copying the first sample */ if((mFrameSize&7) == 0) @@ -685,7 +693,7 @@ bool AudioState::readAudio(uint8_t *samples, int length) } mSamplesPos += rem; - mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate; + mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate; samples += rem*mFrameSize; audio_size += rem; } @@ -694,10 +702,10 @@ bool AudioState::readAudio(uint8_t *samples, int length) if(audio_size < length) { - int rem = length - audio_size; + const unsigned int rem{length - audio_size}; std::fill_n(samples, rem*mFrameSize, (mDstSampleFmt == AV_SAMPLE_FMT_U8) ? 0x80 : 0x00); - mCurrentPts += nanoseconds(seconds(rem)) / mCodecCtx->sample_rate; + mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate; audio_size += rem; } return true; @@ -709,14 +717,14 @@ void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALui ALsizei length, const ALchar *message, void *userParam) { - AudioState *self = reinterpret_cast<AudioState*>(userParam); + auto self = static_cast<AudioState*>(userParam); if(eventType == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT) { /* Temporarily lock the source mutex to ensure it's not between * checking the processed count and going to sleep. */ - std::unique_lock<std::mutex>(self->mSrcMutex).unlock(); + std::unique_lock<std::mutex>{self->mSrcMutex}.unlock(); self->mSrcCond.notify_one(); return; } @@ -736,15 +744,15 @@ void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALui std::cout<< "\n" "Object ID: "<<object<<"\n" "Parameter: "<<param<<"\n" - "Message: "<<std::string(message, length)<<"\n----"<< + "Message: "<<std::string{message, static_cast<ALuint>(length)}<<"\n----"<< std::endl; if(eventType == AL_EVENT_TYPE_DISCONNECTED_SOFT) { - { std::lock_guard<std::mutex> lock(self->mSrcMutex); + { + std::lock_guard<std::mutex> lock{self->mSrcMutex}; self->mConnected.clear(std::memory_order_release); } - std::unique_lock<std::mutex>(self->mSrcMutex).unlock(); self->mSrcCond.notify_one(); } } @@ -752,8 +760,8 @@ void AL_APIENTRY AudioState::EventCallback(ALenum eventType, ALuint object, ALui int AudioState::handler() { - std::unique_lock<std::mutex> lock(mSrcMutex); - milliseconds sleep_time = AudioBufferTime / 3; + std::unique_lock<std::mutex> srclock{mSrcMutex, std::defer_lock}; + milliseconds sleep_time{AudioBufferTime / 3}; ALenum fmt; #ifdef AL_SOFT_events @@ -772,13 +780,15 @@ int AudioState::handler() /* Find a suitable format for OpenAL. */ mDstChanLayout = 0; - if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P) + mFormat = AL_NONE; + if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) && + alIsExtensionPresent("AL_EXT_FLOAT32")) { - mDstSampleFmt = AV_SAMPLE_FMT_U8; - mFrameSize = 1; + mDstSampleFmt = AV_SAMPLE_FMT_FLT; + mFrameSize = 4; if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 && alIsExtensionPresent("AL_EXT_MCFORMATS") && - (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1) + (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1) { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 8; @@ -787,7 +797,7 @@ int AudioState::handler() if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) && alIsExtensionPresent("AL_EXT_MCFORMATS") && - (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1) + (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1) { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 6; @@ -797,23 +807,42 @@ int AudioState::handler() { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 1; - mFormat = AL_FORMAT_MONO8; + mFormat = AL_FORMAT_MONO_FLOAT32; } - if(!mDstChanLayout) + /* Assume 3D B-Format (ambisonics) if the channel layout is blank and + * there's 4 or more channels. FFmpeg/libavcodec otherwise seems to + * have no way to specify if the source is actually B-Format (let alone + * if it's 2D or 3D). + */ + if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 && + alIsExtensionPresent("AL_EXT_BFORMAT") && + (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D_FLOAT32")) != AL_NONE && fmt != -1) + { + int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1}; + if((order+1)*(order+1) == mCodecCtx->channels || + (order+1)*(order+1) + 2 == mCodecCtx->channels) + { + /* OpenAL only supports first-order with AL_EXT_BFORMAT, which + * is 4 channels for 3D buffers. + */ + mFrameSize *= 4; + mFormat = fmt; + } + } + if(!mFormat) { mDstChanLayout = AV_CH_LAYOUT_STEREO; mFrameSize *= 2; - mFormat = AL_FORMAT_STEREO8; + mFormat = AL_FORMAT_STEREO_FLOAT32; } } - if((mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_FLTP) && - alIsExtensionPresent("AL_EXT_FLOAT32")) + if(mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8 || mCodecCtx->sample_fmt == AV_SAMPLE_FMT_U8P) { - mDstSampleFmt = AV_SAMPLE_FMT_FLT; - mFrameSize = 4; + mDstSampleFmt = AV_SAMPLE_FMT_U8; + mFrameSize = 1; if(mCodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1 && alIsExtensionPresent("AL_EXT_MCFORMATS") && - (fmt=alGetEnumValue("AL_FORMAT_71CHN32")) != AL_NONE && fmt != -1) + (fmt=alGetEnumValue("AL_FORMAT_71CHN8")) != AL_NONE && fmt != -1) { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 8; @@ -822,7 +851,7 @@ int AudioState::handler() if((mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1 || mCodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) && alIsExtensionPresent("AL_EXT_MCFORMATS") && - (fmt=alGetEnumValue("AL_FORMAT_51CHN32")) != AL_NONE && fmt != -1) + (fmt=alGetEnumValue("AL_FORMAT_51CHN8")) != AL_NONE && fmt != -1) { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 6; @@ -832,16 +861,28 @@ int AudioState::handler() { mDstChanLayout = mCodecCtx->channel_layout; mFrameSize *= 1; - mFormat = AL_FORMAT_MONO_FLOAT32; + mFormat = AL_FORMAT_MONO8; } - if(!mDstChanLayout) + if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 && + alIsExtensionPresent("AL_EXT_BFORMAT") && + (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D8")) != AL_NONE && fmt != -1) + { + int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1}; + if((order+1)*(order+1) == mCodecCtx->channels || + (order+1)*(order+1) + 2 == mCodecCtx->channels) + { + mFrameSize *= 4; + mFormat = fmt; + } + } + if(!mFormat) { mDstChanLayout = AV_CH_LAYOUT_STEREO; mFrameSize *= 2; - mFormat = AL_FORMAT_STEREO_FLOAT32; + mFormat = AL_FORMAT_STEREO8; } } - if(!mDstChanLayout) + if(!mFormat) { mDstSampleFmt = AV_SAMPLE_FMT_S16; mFrameSize = 2; @@ -868,18 +909,30 @@ int AudioState::handler() mFrameSize *= 1; mFormat = AL_FORMAT_MONO16; } - if(!mDstChanLayout) + if(mCodecCtx->channel_layout == 0 && mCodecCtx->channels >= 4 && + alIsExtensionPresent("AL_EXT_BFORMAT") && + (fmt=alGetEnumValue("AL_FORMAT_BFORMAT3D16")) != AL_NONE && fmt != -1) + { + int order{static_cast<int>(std::sqrt(mCodecCtx->channels)) - 1}; + if((order+1)*(order+1) == mCodecCtx->channels || + (order+1)*(order+1) + 2 == mCodecCtx->channels) + { + mFrameSize *= 4; + mFormat = fmt; + } + } + if(!mFormat) { mDstChanLayout = AV_CH_LAYOUT_STEREO; mFrameSize *= 2; mFormat = AL_FORMAT_STEREO16; } } - void *samples = nullptr; - ALsizei buffer_len = std::chrono::duration_cast<std::chrono::duration<int>>( - mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize; + void *samples{nullptr}; + ALsizei buffer_len = static_cast<int>(std::chrono::duration_cast<seconds>( + mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize); - mSamples = NULL; + mSamples = nullptr; mSamplesMax = 0; mSamplesPos = 0; mSamplesLen = 0; @@ -891,13 +944,36 @@ int AudioState::handler() goto finish; } - mSwresCtx.reset(swr_alloc_set_opts(nullptr, - mDstChanLayout, mDstSampleFmt, mCodecCtx->sample_rate, - mCodecCtx->channel_layout ? mCodecCtx->channel_layout : - (uint64_t)av_get_default_channel_layout(mCodecCtx->channels), - mCodecCtx->sample_fmt, mCodecCtx->sample_rate, - 0, nullptr - )); + if(!mDstChanLayout) + { + /* OpenAL only supports first-order ambisonics with AL_EXT_BFORMAT, so + * we have to drop any extra channels. It also only supports FuMa + * channel ordering and normalization, so a custom matrix is needed to + * scale and reorder the source from AmbiX. + */ + mSwresCtx.reset(swr_alloc_set_opts(nullptr, + (1_i64<<4)-1, mDstSampleFmt, mCodecCtx->sample_rate, + (1_i64<<mCodecCtx->channels)-1, mCodecCtx->sample_fmt, mCodecCtx->sample_rate, + 0, nullptr)); + + /* Note that ffmpeg/libavcodec has no method to check the ambisonic + * channel order and normalization, so we can only assume AmbiX as the + * defacto-standard. This is not true for .amb files, which use FuMa. + */ + std::vector<double> mtx(64*64, 0.0); + mtx[0 + 0*64] = std::sqrt(0.5); + mtx[3 + 1*64] = 1.0; + mtx[1 + 2*64] = 1.0; + mtx[2 + 3*64] = 1.0; + swr_set_matrix(mSwresCtx.get(), mtx.data(), 64); + } + else + mSwresCtx.reset(swr_alloc_set_opts(nullptr, + static_cast<int64_t>(mDstChanLayout), mDstSampleFmt, mCodecCtx->sample_rate, + mCodecCtx->channel_layout ? static_cast<int64_t>(mCodecCtx->channel_layout) : + av_get_default_channel_layout(mCodecCtx->channels), + mCodecCtx->sample_fmt, mCodecCtx->sample_rate, + 0, nullptr)); if(!mSwresCtx || swr_init(mSwresCtx.get()) != 0) { std::cerr<< "Failed to initialize audio converter" <<std::endl; @@ -905,14 +981,14 @@ int AudioState::handler() } mBuffers.assign(AudioBufferTotalTime / AudioBufferTime, 0); - alGenBuffers(mBuffers.size(), mBuffers.data()); + alGenBuffers(static_cast<ALsizei>(mBuffers.size()), mBuffers.data()); alGenSources(1, &mSource); if(EnableDirectOut) alSourcei(mSource, AL_DIRECT_CHANNELS_SOFT, AL_TRUE); - if(EnableWideStereo) - { - ALfloat angles[2] = { (ALfloat)(M_PI/3.0), (ALfloat)(-M_PI/3.0) }; + if (EnableWideStereo) { + ALfloat angles[2] = {static_cast<ALfloat>(M_PI / 3.0), + static_cast<ALfloat>(-M_PI / 3.0)}; alSourcefv(mSource, AL_STEREO_ANGLES, angles); } @@ -928,13 +1004,28 @@ int AudioState::handler() if(alGetError() != AL_NO_ERROR) { fprintf(stderr, "Failed to use mapped buffers\n"); - samples = av_malloc(buffer_len); + samples = av_malloc(static_cast<ALuint>(buffer_len)); } } else #endif - samples = av_malloc(buffer_len); + samples = av_malloc(static_cast<ALuint>(buffer_len)); + /* Prefill the codec buffer. */ + do { + const int ret{mPackets.sendTo(mCodecCtx.get())}; + if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + break; + } while(1); + + srclock.lock(); + if(alcGetInteger64vSOFT) + { + int64_t devtime{}; + alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), ALC_DEVICE_CLOCK_SOFT, + 1, &devtime); + mDeviceStartTime = nanoseconds{devtime} - mCurrentPts; + } while(alGetError() == AL_NO_ERROR && !mMovie.mQuit.load(std::memory_order_relaxed) && mConnected.test_and_set(std::memory_order_relaxed)) { @@ -944,35 +1035,37 @@ int AudioState::handler() while(processed > 0) { std::array<ALuint,4> bids; - alSourceUnqueueBuffers(mSource, std::min<ALsizei>(bids.size(), processed), - bids.data()); - processed -= std::min<ALsizei>(bids.size(), processed); + const ALsizei todq{std::min<ALsizei>(bids.size(), processed)}; + alSourceUnqueueBuffers(mSource, todq, bids.data()); + processed -= todq; } /* Refill the buffer queue. */ ALint queued; alGetSourcei(mSource, AL_BUFFERS_QUEUED, &queued); - while((ALuint)queued < mBuffers.size()) + while(static_cast<ALuint>(queued) < mBuffers.size()) { - ALuint bufid = mBuffers[mBufferIdx]; - - uint8_t *ptr = reinterpret_cast<uint8_t*>(samples -#ifdef AL_SOFT_map_buffer - ? samples : alMapBufferSOFT(bufid, 0, buffer_len, AL_MAP_WRITE_BIT_SOFT) -#endif - ); - if(!ptr) break; - + const ALuint bufid{mBuffers[mBufferIdx]}; /* Read the next chunk of data, filling the buffer, and queue it on - * the source */ - bool got_audio = readAudio(ptr, buffer_len); + * the source. + */ #ifdef AL_SOFT_map_buffer - if(!samples) alUnmapBufferSOFT(bufid); + if(!samples) + { + auto ptr = static_cast<uint8_t*>(alMapBufferSOFT(bufid, 0, buffer_len, + AL_MAP_WRITE_BIT_SOFT)); + bool got_audio{readAudio(ptr, static_cast<unsigned int>(buffer_len))}; + alUnmapBufferSOFT(bufid); + if(!got_audio) break; + } + else #endif - if(!got_audio) break; - - if(samples) + { + auto ptr = static_cast<uint8_t*>(samples); + if(!readAudio(ptr, static_cast<unsigned int>(buffer_len))) + break; alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate); + } alSourceQueueBuffers(mSource, 1, &bufid); mBufferIdx = (mBufferIdx+1) % mBuffers.size(); @@ -992,19 +1085,29 @@ int AudioState::handler() */ alSourceRewind(mSource); alSourcei(mSource, AL_BUFFER, 0); + if(alcGetInteger64vSOFT) + { + /* Also update the device start time with the current device + * clock, so the decoder knows we're running behind. + */ + int64_t devtime{}; + alcGetInteger64vSOFT(alcGetContextsDevice(alcGetCurrentContext()), + ALC_DEVICE_CLOCK_SOFT, 1, &devtime); + mDeviceStartTime = nanoseconds{devtime} - mCurrentPts; + } continue; } /* (re)start the source if needed, and wait for a buffer to finish */ - if(state != AL_PLAYING && state != AL_PAUSED && - mMovie.mPlaying.load(std::memory_order_relaxed)) + if(state != AL_PLAYING && state != AL_PAUSED) startPlayback(); - mSrcCond.wait_for(lock, sleep_time); + mSrcCond.wait_for(srclock, sleep_time); } alSourceRewind(mSource); alSourcei(mSource, AL_BUFFER, 0); + srclock.unlock(); finish: av_freep(&samples); @@ -1024,350 +1127,267 @@ finish: nanoseconds VideoState::getClock() { /* NOTE: This returns incorrect times while not playing. */ - auto delta = get_avtime() - mCurrentPtsTime; - return mCurrentPts + delta; -} - -bool VideoState::isBufferFilled() -{ - std::unique_lock<std::mutex> lock(mPictQMutex); - return mPictQSize >= mPictQ.size(); -} - -Uint32 SDLCALL VideoState::sdl_refresh_timer_cb(Uint32 /*interval*/, void *opaque) -{ - SDL_Event evt{}; - evt.user.type = FF_REFRESH_EVENT; - evt.user.data1 = opaque; - SDL_PushEvent(&evt); - return 0; /* 0 means stop timer */ -} - -/* Schedules an FF_REFRESH_EVENT event to occur in 'delay' ms. */ -void VideoState::schedRefresh(milliseconds delay) -{ - SDL_AddTimer(delay.count(), sdl_refresh_timer_cb, this); + std::lock_guard<std::mutex> _{mDispPtsMutex}; + if(mDisplayPtsTime == microseconds::min()) + return nanoseconds::zero(); + auto delta = get_avtime() - mDisplayPtsTime; + return mDisplayPts + delta; } -/* Called by VideoState::refreshTimer to display the next video frame. */ +/* Called by VideoState::updateVideo to display the next video frame. */ void VideoState::display(SDL_Window *screen, SDL_Renderer *renderer) { - Picture *vp = &mPictQ[mPictQRead]; - - if(!vp->mImage) + if(!mImage) return; - float aspect_ratio; + double aspect_ratio; int win_w, win_h; int w, h, x, y; if(mCodecCtx->sample_aspect_ratio.num == 0) - aspect_ratio = 0.0f; + aspect_ratio = 0.0; else { aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio) * mCodecCtx->width / mCodecCtx->height; } - if(aspect_ratio <= 0.0f) - aspect_ratio = (float)mCodecCtx->width / (float)mCodecCtx->height; + if(aspect_ratio <= 0.0) + aspect_ratio = static_cast<double>(mCodecCtx->width) / mCodecCtx->height; SDL_GetWindowSize(screen, &win_w, &win_h); h = win_h; - w = ((int)rint(h * aspect_ratio) + 3) & ~3; + w = (static_cast<int>(std::rint(h * aspect_ratio)) + 3) & ~3; if(w > win_w) { w = win_w; - h = ((int)rint(w / aspect_ratio) + 3) & ~3; + h = (static_cast<int>(std::rint(w / aspect_ratio)) + 3) & ~3; } x = (win_w - w) / 2; y = (win_h - h) / 2; - SDL_Rect src_rect{ 0, 0, vp->mWidth, vp->mHeight }; + SDL_Rect src_rect{ 0, 0, mWidth, mHeight }; SDL_Rect dst_rect{ x, y, w, h }; - SDL_RenderCopy(renderer, vp->mImage, &src_rect, &dst_rect); + SDL_RenderCopy(renderer, mImage, &src_rect, &dst_rect); SDL_RenderPresent(renderer); } -/* FF_REFRESH_EVENT handler called on the main thread where the SDL_Renderer - * was created. It handles the display of the next decoded video frame (if not - * falling behind), and sets up the timer for the following video frame. +/* Called regularly on the main thread where the SDL_Renderer was created. It + * handles updating the textures of decoded frames and displaying the latest + * frame. */ -void VideoState::refreshTimer(SDL_Window *screen, SDL_Renderer *renderer) +void VideoState::updateVideo(SDL_Window *screen, SDL_Renderer *renderer, bool redraw) { - if(!mStream) - { - if(mEOS) - { - mFinalUpdate = true; - std::unique_lock<std::mutex>(mPictQMutex).unlock(); - mPictQCond.notify_all(); - return; - } - schedRefresh(milliseconds(100)); - return; - } - if(!mMovie.mPlaying.load(std::memory_order_relaxed)) + size_t read_idx{mPictQRead.load(std::memory_order_relaxed)}; + Picture *vp{&mPictQ[read_idx]}; + + auto clocktime = mMovie.getMasterClock(); + bool updated{false}; + while(1) { - schedRefresh(milliseconds(1)); - return; - } + size_t next_idx{(read_idx+1)%mPictQ.size()}; + if(next_idx == mPictQWrite.load(std::memory_order_acquire)) + break; + Picture *nextvp{&mPictQ[next_idx]}; + if(clocktime < nextvp->mPts) + break; - std::unique_lock<std::mutex> lock(mPictQMutex); -retry: - if(mPictQSize == 0) + vp = nextvp; + updated = true; + read_idx = next_idx; + } + if(mMovie.mQuit.load(std::memory_order_relaxed)) { if(mEOS) mFinalUpdate = true; - else - schedRefresh(milliseconds(1)); - lock.unlock(); - mPictQCond.notify_all(); + mPictQRead.store(read_idx, std::memory_order_release); + std::unique_lock<std::mutex>{mPictQMutex}.unlock(); + mPictQCond.notify_one(); return; } - Picture *vp = &mPictQ[mPictQRead]; - mCurrentPts = vp->mPts; - mCurrentPtsTime = get_avtime(); - - /* Get delay using the frame pts and the pts from last frame. */ - auto delay = vp->mPts - mFrameLastPts; - if(delay <= seconds::zero() || delay >= seconds(1)) - { - /* If incorrect delay, use previous one. */ - delay = mFrameLastDelay; - } - /* Save for next frame. */ - mFrameLastDelay = delay; - mFrameLastPts = vp->mPts; - - /* Update delay to sync to clock if not master source. */ - if(mMovie.mAVSyncType != SyncMaster::Video) + if(updated) { - auto ref_clock = mMovie.getMasterClock(); - auto diff = vp->mPts - ref_clock; + mPictQRead.store(read_idx, std::memory_order_release); + std::unique_lock<std::mutex>{mPictQMutex}.unlock(); + mPictQCond.notify_one(); - /* Skip or repeat the frame. Take delay into account. */ - auto sync_threshold = std::min<nanoseconds>(delay, VideoSyncThreshold); - if(!(diff < AVNoSyncThreshold && diff > -AVNoSyncThreshold)) + /* allocate or resize the buffer! */ + bool fmt_updated{false}; + if(!mImage || mWidth != mCodecCtx->width || mHeight != mCodecCtx->height) { - if(diff <= -sync_threshold) - delay = nanoseconds::zero(); - else if(diff >= sync_threshold) - delay *= 2; - } - } - - mFrameTimer += delay; - /* Compute the REAL delay. */ - auto actual_delay = mFrameTimer - get_avtime(); - if(!(actual_delay >= VideoSyncThreshold)) - { - /* We don't have time to handle this picture, just skip to the next one. */ - mPictQRead = (mPictQRead+1)%mPictQ.size(); - mPictQSize--; - goto retry; - } - schedRefresh(std::chrono::duration_cast<milliseconds>(actual_delay)); + fmt_updated = true; + if(mImage) + SDL_DestroyTexture(mImage); + mImage = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, + mCodecCtx->coded_width, mCodecCtx->coded_height); + if(!mImage) + std::cerr<< "Failed to create YV12 texture!" <<std::endl; + mWidth = mCodecCtx->width; + mHeight = mCodecCtx->height; + + if(mFirstUpdate && mWidth > 0 && mHeight > 0) + { + /* For the first update, set the window size to the video size. */ + mFirstUpdate = false; - /* Show the picture! */ - display(screen, renderer); + int w{mWidth}; + int h{mHeight}; + if(mCodecCtx->sample_aspect_ratio.den != 0) + { + double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio); + if(aspect_ratio >= 1.0) + w = static_cast<int>(w*aspect_ratio + 0.5); + else if(aspect_ratio > 0.0) + h = static_cast<int>(h/aspect_ratio + 0.5); + } + SDL_SetWindowSize(screen, w, h); + } + } - /* Update queue for next picture. */ - mPictQRead = (mPictQRead+1)%mPictQ.size(); - mPictQSize--; - lock.unlock(); - mPictQCond.notify_all(); -} + if(mImage) + { + AVFrame *frame{vp->mFrame.get()}; + void *pixels{nullptr}; + int pitch{0}; + + if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P) + SDL_UpdateYUVTexture(mImage, nullptr, + frame->data[0], frame->linesize[0], + frame->data[1], frame->linesize[1], + frame->data[2], frame->linesize[2] + ); + else if(SDL_LockTexture(mImage, nullptr, &pixels, &pitch) != 0) + std::cerr<< "Failed to lock texture" <<std::endl; + else + { + // Convert the image into YUV format that SDL uses + int coded_w{mCodecCtx->coded_width}; + int coded_h{mCodecCtx->coded_height}; + int w{mCodecCtx->width}; + int h{mCodecCtx->height}; + if(!mSwscaleCtx || fmt_updated) + { + mSwscaleCtx.reset(sws_getContext( + w, h, mCodecCtx->pix_fmt, + w, h, AV_PIX_FMT_YUV420P, 0, + nullptr, nullptr, nullptr + )); + } -/* FF_UPDATE_EVENT handler, updates the picture's texture. It's called on the - * main thread where the renderer was created. - */ -void VideoState::updatePicture(SDL_Window *screen, SDL_Renderer *renderer) -{ - Picture *vp = &mPictQ[mPictQWrite]; - bool fmt_updated = false; - - /* allocate or resize the buffer! */ - if(!vp->mImage || vp->mWidth != mCodecCtx->width || vp->mHeight != mCodecCtx->height) - { - fmt_updated = true; - if(vp->mImage) - SDL_DestroyTexture(vp->mImage); - vp->mImage = SDL_CreateTexture( - renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, - mCodecCtx->coded_width, mCodecCtx->coded_height - ); - if(!vp->mImage) - std::cerr<< "Failed to create YV12 texture!" <<std::endl; - vp->mWidth = mCodecCtx->width; - vp->mHeight = mCodecCtx->height; + /* point pict at the queue */ + uint8_t *pict_data[3]; + pict_data[0] = static_cast<uint8_t*>(pixels); + pict_data[1] = pict_data[0] + coded_w*coded_h; + pict_data[2] = pict_data[1] + coded_w*coded_h/4; - if(mFirstUpdate && vp->mWidth > 0 && vp->mHeight > 0) - { - /* For the first update, set the window size to the video size. */ - mFirstUpdate = false; + int pict_linesize[3]; + pict_linesize[0] = pitch; + pict_linesize[1] = pitch / 2; + pict_linesize[2] = pitch / 2; - int w = vp->mWidth; - int h = vp->mHeight; - if(mCodecCtx->sample_aspect_ratio.den != 0) - { - double aspect_ratio = av_q2d(mCodecCtx->sample_aspect_ratio); - if(aspect_ratio >= 1.0) - w = (int)(w*aspect_ratio + 0.5); - else if(aspect_ratio > 0.0) - h = (int)(h/aspect_ratio + 0.5); + sws_scale(mSwscaleCtx.get(), reinterpret_cast<uint8_t**>(frame->data), frame->linesize, + 0, h, pict_data, pict_linesize); + SDL_UnlockTexture(mImage); } - SDL_SetWindowSize(screen, w, h); } + + redraw = true; } - if(vp->mImage) + if(redraw) { - AVFrame *frame = mDecodedFrame.get(); - void *pixels = nullptr; - int pitch = 0; - - if(mCodecCtx->pix_fmt == AV_PIX_FMT_YUV420P) - SDL_UpdateYUVTexture(vp->mImage, nullptr, - frame->data[0], frame->linesize[0], - frame->data[1], frame->linesize[1], - frame->data[2], frame->linesize[2] - ); - else if(SDL_LockTexture(vp->mImage, nullptr, &pixels, &pitch) != 0) - std::cerr<< "Failed to lock texture" <<std::endl; - else - { - // Convert the image into YUV format that SDL uses - int coded_w = mCodecCtx->coded_width; - int coded_h = mCodecCtx->coded_height; - int w = mCodecCtx->width; - int h = mCodecCtx->height; - if(!mSwscaleCtx || fmt_updated) - { - mSwscaleCtx.reset(sws_getContext( - w, h, mCodecCtx->pix_fmt, - w, h, AV_PIX_FMT_YUV420P, 0, - nullptr, nullptr, nullptr - )); - } - - /* point pict at the queue */ - uint8_t *pict_data[3]; - pict_data[0] = reinterpret_cast<uint8_t*>(pixels); - pict_data[1] = pict_data[0] + coded_w*coded_h; - pict_data[2] = pict_data[1] + coded_w*coded_h/4; + /* Show the picture! */ + display(screen, renderer); + } - int pict_linesize[3]; - pict_linesize[0] = pitch; - pict_linesize[1] = pitch / 2; - pict_linesize[2] = pitch / 2; + if(updated) + { + auto disp_time = get_avtime(); - sws_scale(mSwscaleCtx.get(), (const uint8_t**)frame->data, - frame->linesize, 0, h, pict_data, pict_linesize); - SDL_UnlockTexture(vp->mImage); + std::lock_guard<std::mutex> _{mDispPtsMutex}; + mDisplayPts = vp->mPts; + mDisplayPtsTime = disp_time; + } + if(mEOS.load(std::memory_order_acquire)) + { + if((read_idx+1)%mPictQ.size() == mPictQWrite.load(std::memory_order_acquire)) + { + mFinalUpdate = true; + std::unique_lock<std::mutex>{mPictQMutex}.unlock(); + mPictQCond.notify_one(); } } - - vp->mUpdated.store(true, std::memory_order_release); - std::unique_lock<std::mutex>(mPictQMutex).unlock(); - mPictQCond.notify_one(); } -int VideoState::queuePicture(nanoseconds pts) +int VideoState::handler() { - /* Wait until we have space for a new pic */ - std::unique_lock<std::mutex> lock(mPictQMutex); - while(mPictQSize >= mPictQ.size() && !mMovie.mQuit.load(std::memory_order_relaxed)) - mPictQCond.wait(lock); - lock.unlock(); - - if(mMovie.mQuit.load(std::memory_order_relaxed)) - return -1; - - Picture *vp = &mPictQ[mPictQWrite]; - - /* We have to create/update the picture in the main thread */ - vp->mUpdated.store(false, std::memory_order_relaxed); - SDL_Event evt{}; - evt.user.type = FF_UPDATE_EVENT; - evt.user.data1 = this; - SDL_PushEvent(&evt); + std::for_each(mPictQ.begin(), mPictQ.end(), + [](Picture &pict) -> void + { pict.mFrame = AVFramePtr{av_frame_alloc()}; }); + + /* Prefill the codec buffer. */ + do { + const int ret{mPackets.sendTo(mCodecCtx.get())}; + if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + break; + } while(1); - /* Wait until the picture is updated. */ - lock.lock(); - while(!vp->mUpdated.load(std::memory_order_relaxed)) { - if(mMovie.mQuit.load(std::memory_order_relaxed)) - return -1; - mPictQCond.wait(lock); + std::lock_guard<std::mutex> _{mDispPtsMutex}; + mDisplayPtsTime = get_avtime(); } - if(mMovie.mQuit.load(std::memory_order_relaxed)) - return -1; - vp->mPts = pts; - - mPictQWrite = (mPictQWrite+1)%mPictQ.size(); - mPictQSize++; - lock.unlock(); - return 0; -} - -int VideoState::handler() -{ - mDecodedFrame.reset(av_frame_alloc()); + auto current_pts = nanoseconds::zero(); while(!mMovie.mQuit.load(std::memory_order_relaxed)) { - std::unique_lock<std::mutex> lock(mQueueMtx); - /* Decode video frame */ - int ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get()); - if(ret == AVERROR(EAGAIN)) - { - mMovie.mSendDataGood.clear(std::memory_order_relaxed); - std::unique_lock<std::mutex>(mMovie.mSendMtx).unlock(); - mMovie.mSendCond.notify_one(); - do { - mQueueCond.wait(lock); - ret = avcodec_receive_frame(mCodecCtx.get(), mDecodedFrame.get()); - } while(ret == AVERROR(EAGAIN)); - } - lock.unlock(); - if(ret == AVERROR_EOF) break; - mMovie.mSendDataGood.clear(std::memory_order_relaxed); - mMovie.mSendCond.notify_one(); - if(ret < 0) + size_t write_idx{mPictQWrite.load(std::memory_order_relaxed)}; + Picture *vp{&mPictQ[write_idx]}; + + /* Retrieve video frame. */ + AVFrame *decoded_frame{vp->mFrame.get()}; + int ret; + while((ret=avcodec_receive_frame(mCodecCtx.get(), decoded_frame)) == AVERROR(EAGAIN)) + mPackets.sendTo(mCodecCtx.get()); + if(ret != 0) { - std::cerr<< "Failed to decode frame: "<<ret <<std::endl; + if(ret == AVERROR_EOF) break; + std::cerr<< "Failed to receive frame: "<<ret <<std::endl; continue; } /* Get the PTS for this frame. */ - nanoseconds pts; - if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE) - mClock = std::chrono::duration_cast<nanoseconds>( - seconds_d64(av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp) - ); - pts = mClock; + if(decoded_frame->best_effort_timestamp != AV_NOPTS_VALUE) + current_pts = std::chrono::duration_cast<nanoseconds>( + seconds_d64{av_q2d(mStream->time_base)*decoded_frame->best_effort_timestamp}); + vp->mPts = current_pts; /* Update the video clock to the next expected PTS. */ auto frame_delay = av_q2d(mCodecCtx->time_base); - frame_delay += mDecodedFrame->repeat_pict * (frame_delay * 0.5); - mClock += std::chrono::duration_cast<nanoseconds>(seconds_d64(frame_delay)); + frame_delay += decoded_frame->repeat_pict * (frame_delay * 0.5); + current_pts += std::chrono::duration_cast<nanoseconds>(seconds_d64{frame_delay}); - if(queuePicture(pts) < 0) - break; - av_frame_unref(mDecodedFrame.get()); + /* Put the frame in the queue to be loaded into a texture and displayed + * by the rendering thread. + */ + write_idx = (write_idx+1)%mPictQ.size(); + mPictQWrite.store(write_idx, std::memory_order_release); + + /* Send a packet now so it's hopefully ready by the time it's needed. */ + mPackets.sendTo(mCodecCtx.get()); + + if(write_idx == mPictQRead.load(std::memory_order_acquire)) + { + /* Wait until we have space for a new pic */ + std::unique_lock<std::mutex> lock{mPictQMutex}; + while(write_idx == mPictQRead.load(std::memory_order_acquire) && + !mMovie.mQuit.load(std::memory_order_relaxed)) + mPictQCond.wait(lock); + } } mEOS = true; - std::unique_lock<std::mutex> lock(mPictQMutex); - if(mMovie.mQuit.load(std::memory_order_relaxed)) - { - mPictQRead = 0; - mPictQWrite = 0; - mPictQSize = 0; - } - while(!mFinalUpdate) - mPictQCond.wait(lock); + std::unique_lock<std::mutex> lock{mPictQMutex}; + while(!mFinalUpdate) mPictQCond.wait(lock); return 0; } @@ -1375,13 +1395,13 @@ int VideoState::handler() int MovieState::decode_interrupt_cb(void *ctx) { - return reinterpret_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed); + return static_cast<MovieState*>(ctx)->mQuit.load(std::memory_order_relaxed); } bool MovieState::prepare() { - AVIOContext *avioctx = nullptr; - AVIOInterruptCB intcb = { decode_interrupt_cb, this }; + AVIOContext *avioctx{nullptr}; + AVIOInterruptCB intcb{decode_interrupt_cb, this}; if(avio_open2(&avioctx, mFilename.c_str(), AVIO_FLAG_READ, &intcb, nullptr)) { std::cerr<< "Failed to open "<<mFilename <<std::endl; @@ -1392,7 +1412,7 @@ bool MovieState::prepare() /* Open movie file. If avformat_open_input fails it will automatically free * this context, so don't set it onto a smart pointer yet. */ - AVFormatContext *fmtctx = avformat_alloc_context(); + AVFormatContext *fmtctx{avformat_alloc_context()}; fmtctx->pb = mIOContext.get(); fmtctx->interrupt_callback = intcb; if(avformat_open_input(&fmtctx, mFilename.c_str(), nullptr, nullptr) != 0) @@ -1409,9 +1429,7 @@ bool MovieState::prepare() return false; } - mVideo.schedRefresh(milliseconds(40)); - - mParseThread = std::thread(std::mem_fn(&MovieState::parse_handler), this); + mParseThread = std::thread{std::mem_fn(&MovieState::parse_handler), this}; return true; } @@ -1427,7 +1445,7 @@ void MovieState::setTitle(SDL_Window *window) nanoseconds MovieState::getClock() { - if(!mPlaying.load(std::memory_order_relaxed)) + if(mClockBase == microseconds::min()) return nanoseconds::zero(); return get_avtime() - mClockBase; } @@ -1444,25 +1462,25 @@ nanoseconds MovieState::getMasterClock() nanoseconds MovieState::getDuration() { return std::chrono::duration<int64_t,std::ratio<1,AV_TIME_BASE>>(mFormatCtx->duration); } -int MovieState::streamComponentOpen(int stream_index) +int MovieState::streamComponentOpen(unsigned int stream_index) { - if(stream_index < 0 || (unsigned int)stream_index >= mFormatCtx->nb_streams) + if(stream_index >= mFormatCtx->nb_streams) return -1; /* Get a pointer to the codec context for the stream, and open the * associated codec. */ - AVCodecCtxPtr avctx(avcodec_alloc_context3(nullptr)); + AVCodecCtxPtr avctx{avcodec_alloc_context3(nullptr)}; if(!avctx) return -1; if(avcodec_parameters_to_context(avctx.get(), mFormatCtx->streams[stream_index]->codecpar)) return -1; - AVCodec *codec = avcodec_find_decoder(avctx->codec_id); + AVCodec *codec{avcodec_find_decoder(avctx->codec_id)}; if(!codec || avcodec_open2(avctx.get(), codec, nullptr) < 0) { std::cerr<< "Unsupported codec: "<<avcodec_get_name(avctx->codec_id) - << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl; + << " (0x"<<std::hex<<avctx->codec_id<<std::dec<<")" <<std::endl; return -1; } @@ -1472,37 +1490,36 @@ int MovieState::streamComponentOpen(int stream_index) case AVMEDIA_TYPE_AUDIO: mAudio.mStream = mFormatCtx->streams[stream_index]; mAudio.mCodecCtx = std::move(avctx); - - mAudioThread = std::thread(std::mem_fn(&AudioState::handler), &mAudio); break; case AVMEDIA_TYPE_VIDEO: mVideo.mStream = mFormatCtx->streams[stream_index]; mVideo.mCodecCtx = std::move(avctx); - - mVideoThread = std::thread(std::mem_fn(&VideoState::handler), &mVideo); break; default: return -1; } - return stream_index; + return static_cast<int>(stream_index); } int MovieState::parse_handler() { - int video_index = -1; - int audio_index = -1; + auto &audio_queue = mAudio.mPackets; + auto &video_queue = mVideo.mPackets; + + int video_index{-1}; + int audio_index{-1}; /* Dump information about file onto standard error */ av_dump_format(mFormatCtx.get(), 0, mFilename.c_str(), 0); /* Find the first video and audio streams */ - for(unsigned int i = 0;i < mFormatCtx->nb_streams;i++) + for(unsigned int i{0u};i < mFormatCtx->nb_streams;i++) { auto codecpar = mFormatCtx->streams[i]->codecpar; - if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) + if(codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !DisableVideo && video_index < 0) video_index = streamComponentOpen(i); else if(codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) audio_index = streamComponentOpen(i); @@ -1514,95 +1531,38 @@ int MovieState::parse_handler() mQuit = true; } - PacketQueue audio_queue, video_queue; - bool input_finished = false; + /* Set the base time 750ms ahead of the current av time. */ + mClockBase = get_avtime() + milliseconds{750}; + + if(audio_index >= 0) + mAudioThread = std::thread{std::mem_fn(&AudioState::handler), &mAudio}; + if(video_index >= 0) + mVideoThread = std::thread{std::mem_fn(&VideoState::handler), &mVideo}; /* Main packet reading/dispatching loop */ - while(!mQuit.load(std::memory_order_relaxed) && !input_finished) + while(!mQuit.load(std::memory_order_relaxed)) { AVPacket packet; if(av_read_frame(mFormatCtx.get(), &packet) < 0) - input_finished = true; - else - { - /* Copy the packet into the queue it's meant for. */ - if(packet.stream_index == video_index) - video_queue.put(&packet); - else if(packet.stream_index == audio_index) - audio_queue.put(&packet); - av_packet_unref(&packet); - } - - do { - /* Send whatever queued packets we have. */ - if(!audio_queue.empty()) - { - std::unique_lock<std::mutex> lock(mAudio.mQueueMtx); - int ret; - do { - ret = avcodec_send_packet(mAudio.mCodecCtx.get(), audio_queue.front()); - if(ret != AVERROR(EAGAIN)) audio_queue.pop(); - } while(ret != AVERROR(EAGAIN) && !audio_queue.empty()); - lock.unlock(); - mAudio.mQueueCond.notify_one(); - } - if(!video_queue.empty()) - { - std::unique_lock<std::mutex> lock(mVideo.mQueueMtx); - int ret; - do { - ret = avcodec_send_packet(mVideo.mCodecCtx.get(), video_queue.front()); - if(ret != AVERROR(EAGAIN)) video_queue.pop(); - } while(ret != AVERROR(EAGAIN) && !video_queue.empty()); - lock.unlock(); - mVideo.mQueueCond.notify_one(); - } - /* If the queues are completely empty, or it's not full and there's - * more input to read, go get more. - */ - size_t queue_size = audio_queue.totalSize() + video_queue.totalSize(); - if(queue_size == 0 || (queue_size < MAX_QUEUE_SIZE && !input_finished)) - break; + break; - if(!mPlaying.load(std::memory_order_relaxed)) - { - if((!mAudio.mCodecCtx || mAudio.isBufferFilled()) && - (!mVideo.mCodecCtx || mVideo.isBufferFilled())) - { - /* Set the base time 50ms ahead of the current av time. */ - mClockBase = get_avtime() + milliseconds(50); - mVideo.mCurrentPtsTime = mClockBase; - mVideo.mFrameTimer = mVideo.mCurrentPtsTime; - mAudio.startPlayback(); - mPlaying.store(std::memory_order_release); - } - } - /* Nothing to send or get for now, wait a bit and try again. */ - { std::unique_lock<std::mutex> lock(mSendMtx); - if(mSendDataGood.test_and_set(std::memory_order_relaxed)) - mSendCond.wait_for(lock, milliseconds(10)); - } - } while(!mQuit.load(std::memory_order_relaxed)); - } - /* Pass a null packet to finish the send buffers (the receive functions - * will get AVERROR_EOF when emptied). - */ - if(mVideo.mCodecCtx) - { - { std::lock_guard<std::mutex> lock(mVideo.mQueueMtx); - avcodec_send_packet(mVideo.mCodecCtx.get(), nullptr); + /* Copy the packet into the queue it's meant for. */ + if(packet.stream_index == video_index) + { + while(!mQuit.load(std::memory_order_acquire) && !video_queue.put(&packet)) + std::this_thread::sleep_for(milliseconds{100}); } - mVideo.mQueueCond.notify_one(); - } - if(mAudio.mCodecCtx) - { - { std::lock_guard<std::mutex> lock(mAudio.mQueueMtx); - avcodec_send_packet(mAudio.mCodecCtx.get(), nullptr); + else if(packet.stream_index == audio_index) + { + while(!mQuit.load(std::memory_order_acquire) && !audio_queue.put(&packet)) + std::this_thread::sleep_for(milliseconds{100}); } - mAudio.mQueueCond.notify_one(); + + av_packet_unref(&packet); } - video_queue.clear(); - audio_queue.clear(); + /* Finish the queues so the receivers know nothing more is coming. */ + if(mVideo.mCodecCtx) video_queue.setFinished(); + if(mAudio.mCodecCtx) audio_queue.setFinished(); /* all done - wait for it */ if(mVideoThread.joinable()) @@ -1611,7 +1571,7 @@ int MovieState::parse_handler() mAudioThread.join(); mVideo.mEOS = true; - std::unique_lock<std::mutex> lock(mVideo.mPictQMutex); + std::unique_lock<std::mutex> lock{mVideo.mPictQMutex}; while(!mVideo.mFinalUpdate) mVideo.mPictQCond.wait(lock); lock.unlock(); @@ -1628,13 +1588,13 @@ int MovieState::parse_handler() struct PrettyTime { seconds mTime; }; -inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs) +std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs) { using hours = std::chrono::hours; using minutes = std::chrono::minutes; using std::chrono::duration_cast; - seconds t = rhs.mTime; + seconds t{rhs.mTime}; if(t.count() < 0) { os << '-'; @@ -1642,7 +1602,7 @@ inline std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs) } // Only handle up to hour formatting - if(t >= hours(1)) + if(t >= hours{1}) os << duration_cast<hours>(t).count() << 'h' << std::setfill('0') << std::setw(2) << (duration_cast<minutes>(t).count() % 60) << 'm'; else @@ -1665,36 +1625,38 @@ int main(int argc, char *argv[]) return 1; } /* Register all formats and codecs */ +#if !(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(58, 9, 100)) av_register_all(); +#endif /* Initialize networking protocols */ avformat_network_init(); - if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER)) + if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS)) { std::cerr<< "Could not initialize SDL - <<"<<SDL_GetError() <<std::endl; return 1; } /* Make a window to put our video */ - SDL_Window *screen = SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE); + SDL_Window *screen{SDL_CreateWindow(AppName.c_str(), 0, 0, 640, 480, SDL_WINDOW_RESIZABLE)}; if(!screen) { std::cerr<< "SDL: could not set video mode - exiting" <<std::endl; return 1; } /* Make a renderer to handle the texture image surface and rendering. */ - Uint32 render_flags = SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC; - SDL_Renderer *renderer = SDL_CreateRenderer(screen, -1, render_flags); + Uint32 render_flags{SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC}; + SDL_Renderer *renderer{SDL_CreateRenderer(screen, -1, render_flags)}; if(renderer) { SDL_RendererInfo rinf{}; - bool ok = false; + bool ok{false}; /* Make sure the renderer supports IYUV textures. If not, fallback to a * software renderer. */ if(SDL_GetRendererInfo(renderer, &rinf) == 0) { - for(Uint32 i = 0;!ok && i < rinf.num_texture_formats;i++) + for(Uint32 i{0u};!ok && i < rinf.num_texture_formats;i++) ok = (rinf.texture_formats[i] == SDL_PIXELFORMAT_IYUV); } if(!ok) @@ -1726,7 +1688,8 @@ int main(int argc, char *argv[]) return 1; } - { auto device = alcGetContextsDevice(alcGetCurrentContext()); + { + auto device = alcGetContextsDevice(alcGetCurrentContext()); if(alcIsExtensionPresent(device, "ALC_SOFT_device_clock")) { std::cout<< "Found ALC_SOFT_device_clock" <<std::endl; @@ -1766,7 +1729,7 @@ int main(int argc, char *argv[]) } #endif - int fileidx = 0; + int fileidx{0}; for(;fileidx < argc;++fileidx) { if(strcmp(argv[fileidx], "-direct") == 0) @@ -1789,13 +1752,15 @@ int main(int argc, char *argv[]) EnableWideStereo = true; } } + else if(strcmp(argv[fileidx], "-novideo") == 0) + DisableVideo = true; else break; } while(fileidx < argc && !movState) { - movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++])); + movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}}; if(!movState->prepare()) movState = nullptr; } if(!movState) @@ -1808,106 +1773,103 @@ int main(int argc, char *argv[]) /* Default to going to the next movie at the end of one. */ enum class EomAction { Next, Quit - } eom_action = EomAction::Next; - seconds last_time(-1); - SDL_Event event; + } eom_action{EomAction::Next}; + seconds last_time{seconds::min()}; while(1) { - int have_evt = SDL_WaitEventTimeout(&event, 10); + SDL_Event event{}; + int have_evt{SDL_WaitEventTimeout(&event, 10)}; auto cur_time = std::chrono::duration_cast<seconds>(movState->getMasterClock()); if(cur_time != last_time) { auto end_time = std::chrono::duration_cast<seconds>(movState->getDuration()); - std::cout<< "\r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush; + std::cout<< " \r "<<PrettyTime{cur_time}<<" / "<<PrettyTime{end_time} <<std::flush; last_time = cur_time; } - if(!have_evt) continue; - - switch(event.type) - { - case SDL_KEYDOWN: - switch(event.key.keysym.sym) - { - case SDLK_ESCAPE: - movState->mQuit = true; - eom_action = EomAction::Quit; - break; - - case SDLK_n: - movState->mQuit = true; - eom_action = EomAction::Next; - break; - - default: - break; - } - break; - - case SDL_WINDOWEVENT: - switch(event.window.event) - { - case SDL_WINDOWEVENT_RESIZED: - SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255); - SDL_RenderFillRect(renderer, nullptr); - break; - - default: - break; - } - break; - - case SDL_QUIT: - movState->mQuit = true; - eom_action = EomAction::Quit; - break; - - case FF_UPDATE_EVENT: - reinterpret_cast<VideoState*>(event.user.data1)->updatePicture( - screen, renderer - ); - break; - case FF_REFRESH_EVENT: - reinterpret_cast<VideoState*>(event.user.data1)->refreshTimer( - screen, renderer - ); - break; + bool force_redraw{false}; + if(have_evt) do { + switch(event.type) + { + case SDL_KEYDOWN: + switch(event.key.keysym.sym) + { + case SDLK_ESCAPE: + movState->mQuit = true; + eom_action = EomAction::Quit; + break; + + case SDLK_n: + movState->mQuit = true; + eom_action = EomAction::Next; + break; + + default: + break; + } + break; - case FF_MOVIE_DONE_EVENT: - std::cout<<'\n'; - last_time = seconds(-1); - if(eom_action != EomAction::Quit) - { - movState = nullptr; - while(fileidx < argc && !movState) + case SDL_WINDOWEVENT: + switch(event.window.event) { - movState = std::unique_ptr<MovieState>(new MovieState(argv[fileidx++])); - if(!movState->prepare()) movState = nullptr; + case SDL_WINDOWEVENT_RESIZED: + SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255); + SDL_RenderFillRect(renderer, nullptr); + force_redraw = true; + break; + + case SDL_WINDOWEVENT_EXPOSED: + force_redraw = true; + break; + + default: + break; } - if(movState) + break; + + case SDL_QUIT: + movState->mQuit = true; + eom_action = EomAction::Quit; + break; + + case FF_MOVIE_DONE_EVENT: + std::cout<<'\n'; + last_time = seconds::min(); + if(eom_action != EomAction::Quit) { - movState->setTitle(screen); - break; + movState = nullptr; + while(fileidx < argc && !movState) + { + movState = std::unique_ptr<MovieState>{new MovieState{argv[fileidx++]}}; + if(!movState->prepare()) movState = nullptr; + } + if(movState) + { + movState->setTitle(screen); + break; + } } - } - /* Nothing more to play. Shut everything down and quit. */ - movState = nullptr; + /* Nothing more to play. Shut everything down and quit. */ + movState = nullptr; - CloseAL(); + CloseAL(); - SDL_DestroyRenderer(renderer); - renderer = nullptr; - SDL_DestroyWindow(screen); - screen = nullptr; + SDL_DestroyRenderer(renderer); + renderer = nullptr; + SDL_DestroyWindow(screen); + screen = nullptr; - SDL_Quit(); - exit(0); + SDL_Quit(); + exit(0); - default: - break; - } + default: + break; + } + } while(SDL_PollEvent(&event)); + + movState->mVideo.updateVideo(screen, renderer, force_redraw); } std::cerr<< "SDL_WaitEvent error - "<<SDL_GetError() <<std::endl; |