From 15bebb8fb67cb0b33a9cdb02aea484b67fd86367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Fri, 12 Apr 2024 12:17:26 +0200 Subject: [PATCH 01/10] Remove unnecessary downcast to `CGraphics_Threaded` --- src/engine/client/client.cpp | 2 +- src/engine/client/video.cpp | 4 ++-- src/engine/client/video.h | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/engine/client/client.cpp b/src/engine/client/client.cpp index 1e19546d9..2b4472a6a 100644 --- a/src/engine/client/client.cpp +++ b/src/engine/client/client.cpp @@ -3301,7 +3301,7 @@ void CClient::StartVideo(const char *pFilename, bool WithTimestamp) Graphics()->WaitForIdle(); // pause the sound device while creating the video instance Sound()->PauseAudioDevice(); - new CVideo((CGraphics_Threaded *)m_pGraphics, Sound(), Storage(), Graphics()->ScreenWidth(), Graphics()->ScreenHeight(), aFilename); + new CVideo(Graphics(), Sound(), Storage(), Graphics()->ScreenWidth(), Graphics()->ScreenHeight(), aFilename); Sound()->UnpauseAudioDevice(); IVideo::Current()->Start(); if(m_DemoPlayer.Info()->m_Info.m_Paused) diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index 4dd32c689..d353e1388 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -2,7 +2,7 @@ #include "video.h" -#include +#include #include #include #include @@ -34,7 +34,7 @@ using namespace std::chrono_literals; const size_t FORMAT_GL_NCHANNELS = 4; CLock g_WriteLock; -CVideo::CVideo(CGraphics_Threaded *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName) : +CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName) : m_pGraphics(pGraphics), m_pStorage(pStorage), m_pSound(pSound) diff --git a/src/engine/client/video.h b/src/engine/client/video.h index 0575cdee7..6a4364d5e 100644 --- a/src/engine/client/video.h +++ b/src/engine/client/video.h @@ -17,7 +17,7 @@ extern "C" { #include #define ALEN 2048 -class CGraphics_Threaded; +class IGraphics; class ISound; class IStorage; @@ -44,7 +44,7 @@ struct OutputStream class CVideo : public IVideo { public: - CVideo(CGraphics_Threaded *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName); + CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName); ~CVideo(); void Start() override REQUIRES(!g_WriteLock); @@ -81,7 +81,7 @@ private: bool AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCodec **ppCodec, enum AVCodecID CodecId) const; - CGraphics_Threaded *m_pGraphics; + IGraphics *m_pGraphics; IStorage *m_pStorage; ISound *m_pSound; From c4c1518709e35bec38db7a478c11ac8b76e5df7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Fri, 12 Apr 2024 12:27:11 +0200 Subject: [PATCH 02/10] Remove dead code --- src/engine/client/video.cpp | 14 -------------- src/engine/client/video.h | 1 - 2 files changed, 15 deletions(-) diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index d353e1388..c8de0d882 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -275,7 +275,6 @@ void CVideo::Stop() if(m_HasAudio) CloseStream(&m_AudioStream); - //fclose(m_dbgfile); if(!(m_pFormat->flags & AVFMT_NOFILE)) avio_closep(&m_pFormatContext->pb); @@ -294,9 +293,6 @@ void CVideo::NextVideoFrameThread() { if(m_Recording) { - // #ifdef CONF_PLATFORM_MACOS - // CAutoreleasePool AutoreleasePool; - // #endif m_VSeq += 1; if(m_VSeq >= 2) { @@ -318,8 +314,6 @@ void CVideo::NextVideoFrameThread() } } - //dbg_msg("video_recorder", "vframe: %d", m_VideoStream.pEnc->FRAME_NUM); - // after reading the graphic libraries' frame buffer, go threaded { auto *pVideoThread = m_vVideoThreads[m_CurVideoThreadIndex].get(); @@ -344,9 +338,6 @@ void CVideo::NextVideoFrameThread() if(m_CurVideoThreadIndex == m_VideoThreads) m_CurVideoThreadIndex = 0; } - - // sync_barrier(); - // m_Semaphore.signal(); } } @@ -364,7 +355,6 @@ void CVideo::NextAudioFrameTimeline(ISoundMixFunc Mix) { if(m_Recording && m_HasAudio) { - //if(m_VideoStream.pEnc->FRAME_NUM * (double)m_AudioStream.pEnc->sample_rate / m_FPS >= (double)m_AudioStream.pEnc->FRAME_NUM * m_AudioStream.pEnc->frame_size) double SamplesPerFrame = (double)m_AudioStream.pEnc->sample_rate / m_FPS; while(m_AudioStream.m_SamplesFrameCount >= m_AudioStream.m_SamplesCount) { @@ -491,9 +481,6 @@ void CVideo::FillAudioFrame(size_t ThreadIndex) 0 // align ); - // dbg_msg("video_recorder", "DstNbSamples: %d", DstNbSamples); - // fwrite(m_aBuffer, sizeof(short), 2048, m_dbgfile); - int Ret = av_frame_make_writable(m_AudioStream.m_vpFrames[ThreadIndex]); if(Ret < 0) { @@ -709,7 +696,6 @@ bool CVideo::OpenAudio() pContext = m_AudioStream.pEnc; /* open it */ - //m_dbgfile = fopen("/tmp/pcm_dbg", "wb"); av_dict_copy(&pOptions, m_pOptDict, 0); Ret = avcodec_open2(pContext, m_pAudioCodec, &pOptions); av_dict_free(&pOptions); diff --git a/src/engine/client/video.h b/src/engine/client/video.h index 6a4364d5e..77904b95e 100644 --- a/src/engine/client/video.h +++ b/src/engine/client/video.h @@ -88,7 +88,6 @@ private: int m_Width; int m_Height; char m_aName[256]; - //FILE *m_dbgfile; uint64_t m_VSeq = 0; uint64_t m_ASeq = 0; uint64_t m_Vframe; From c78eebfc679230ec744c60977401a4877e479540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Fri, 12 Apr 2024 12:44:25 +0200 Subject: [PATCH 03/10] Use `nullptr` instead of `0` and `NULL` --- src/engine/client/video.cpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index c8de0d882..d9b8d7825 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -39,12 +39,12 @@ CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Wid m_pStorage(pStorage), m_pSound(pSound) { - m_pFormatContext = 0; - m_pFormat = 0; - m_pOptDict = 0; + m_pFormatContext = nullptr; + m_pFormat = nullptr; + m_pOptDict = nullptr; - m_pVideoCodec = 0; - m_pAudioCodec = 0; + m_pVideoCodec = nullptr; + m_pAudioCodec = nullptr; m_Width = Width; m_Height = Height; @@ -59,7 +59,7 @@ CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Wid m_HasAudio = g_Config.m_ClVideoSndEnable; - dbg_assert(ms_pCurrentVideo == 0, "ms_pCurrentVideo is NOT set to NULL while creating a new Video."); + dbg_assert(ms_pCurrentVideo == nullptr, "ms_pCurrentVideo is NOT set to nullptr while creating a new Video."); ms_TickTime = time_freq() / m_FPS; ms_pCurrentVideo = this; @@ -67,7 +67,7 @@ CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Wid CVideo::~CVideo() { - ms_pCurrentVideo = 0; + ms_pCurrentVideo = nullptr; } void CVideo::Start() @@ -473,7 +473,7 @@ void CVideo::FillAudioFrame(size_t ThreadIndex) { av_samples_fill_arrays( (uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data, - 0, // pointer to linesize (int*) + nullptr, // pointer to linesize (int*) (const uint8_t *)m_vBuffer[ThreadIndex].m_aBuffer, 2, // channels m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples, @@ -570,7 +570,7 @@ AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height) pPicture = av_frame_alloc(); if(!pPicture) - return NULL; + return nullptr; pPicture->format = PixFmt; pPicture->width = Width; @@ -624,8 +624,7 @@ bool CVideo::OpenVideo() { int Ret; AVCodecContext *pContext = m_VideoStream.pEnc; - AVDictionary *pOptions = 0; - + AVDictionary *pOptions = nullptr; av_dict_copy(&pOptions, m_pOptDict, 0); /* open the codec */ @@ -691,7 +690,7 @@ bool CVideo::OpenAudio() AVCodecContext *pContext; int NbSamples; int Ret; - AVDictionary *pOptions = NULL; + AVDictionary *pOptions = nullptr; pContext = m_AudioStream.pEnc; From 33c5bfb09b74939cfbd345541bfc93c76db13dad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Fri, 12 Apr 2024 12:53:37 +0200 Subject: [PATCH 04/10] Fix indentation of comments --- src/engine/client/video.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index d9b8d7825..d6b6b339a 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -856,9 +856,9 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode pContext->width = m_Width; pContext->height = m_Height % 2 == 0 ? m_Height : m_Height - 1; /* timebase: This is the fundamental unit of time (in seconds) in terms - * of which frame timestamps are represented. For fixed-fps content, - * timebase should be 1/framerate and timestamp increments should be - * identical to 1. */ + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identical to 1. */ pStream->pSt->time_base.num = 1; pStream->pSt->time_base.den = m_FPS; pContext->time_base = pStream->pSt->time_base; @@ -873,8 +873,8 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode if(pContext->codec_id == AV_CODEC_ID_MPEG1VIDEO) { /* Needed to avoid using macroblocks in which some coeffs overflow. - * This does not happen with normal video, it just happens here as - * the motion of the chroma plane does not match the luma plane. */ + * This does not happen with normal video, it just happens here as + * the motion of the chroma plane does not match the luma plane. */ pContext->mb_decision = 2; } if(CodecId == AV_CODEC_ID_H264) From e595545cd6fb64dc13bb25293031968928d7c9e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Sat, 4 May 2024 12:26:51 +0200 Subject: [PATCH 05/10] Fix duplicate file extension `.mp4` for demos rendered from menu The file extension is always added in `CClient::StartVideo` now. --- src/game/client/components/menus.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/game/client/components/menus.cpp b/src/game/client/components/menus.cpp index 3dd902c74..8c109bdbd 100644 --- a/src/game/client/components/menus.cpp +++ b/src/game/client/components/menus.cpp @@ -1962,8 +1962,6 @@ void CMenus::PopupConfirmDemoReplaceVideo() str_format(aBuf, sizeof(aBuf), "%s/%s.demo", m_aCurrentDemoFolder, m_aCurrentDemoSelectionName); char aVideoName[IO_MAX_PATH_LENGTH]; str_copy(aVideoName, m_DemoRenderInput.GetString()); - if(!str_endswith(aVideoName, ".mp4")) - str_append(aVideoName, ".mp4"); const char *pError = Client()->DemoPlayer_Render(aBuf, m_DemolistStorageType, aVideoName, m_Speed, m_StartPaused); m_Speed = 4; m_StartPaused = false; From 9d2c11d7ee671fa02d08e99040e80ae78c6d3f78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Fri, 12 Apr 2024 12:28:51 +0200 Subject: [PATCH 06/10] Improve error handling and logging of video recorder Use `log_error` for all errors and consistently format all error messages. Handle all ffmpeg errors and output the formatted ffmpeg error message when possible. Register a log callback for ffmpeg log messages to delegate them to our logging system, to fix the log messages being interleaved with our log messages and not using the correct line breaks on Windows. Stop video and demo immediately and show an error message popup if the video could not be started successfully. Remove unnecessary debug output from ffmpeg. --- src/engine/client/client.cpp | 7 +- src/engine/client/video.cpp | 319 +++++++++++++--------- src/engine/client/video.h | 4 +- src/engine/demo.h | 1 + src/engine/shared/demo.h | 4 +- src/engine/shared/video.h | 2 +- src/game/client/components/menus_demo.cpp | 9 +- 7 files changed, 210 insertions(+), 136 deletions(-) diff --git a/src/engine/client/client.cpp b/src/engine/client/client.cpp index 2b4472a6a..370775d4a 100644 --- a/src/engine/client/client.cpp +++ b/src/engine/client/client.cpp @@ -3303,7 +3303,12 @@ void CClient::StartVideo(const char *pFilename, bool WithTimestamp) Sound()->PauseAudioDevice(); new CVideo(Graphics(), Sound(), Storage(), Graphics()->ScreenWidth(), Graphics()->ScreenHeight(), aFilename); Sound()->UnpauseAudioDevice(); - IVideo::Current()->Start(); + if(!IVideo::Current()->Start()) + { + log_error("videorecorder", "Failed to start recording to '%s'", aFilename); + m_DemoPlayer.Stop("Failed to start video recording. See local console for details."); + return; + } if(m_DemoPlayer.Info()->m_Info.m_Paused) { IVideo::Current()->Pause(true); diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index d6b6b339a..9de6af37f 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -2,6 +2,8 @@ #include "video.h" +#include + #include #include #include @@ -34,6 +36,46 @@ using namespace std::chrono_literals; const size_t FORMAT_GL_NCHANNELS = 4; CLock g_WriteLock; +static LEVEL AvLevelToLogLevel(int Level) +{ + switch(Level) + { + case AV_LOG_PANIC: + case AV_LOG_FATAL: + case AV_LOG_ERROR: + return LEVEL_ERROR; + case AV_LOG_WARNING: + return LEVEL_WARN; + case AV_LOG_INFO: + return LEVEL_INFO; + case AV_LOG_VERBOSE: + case AV_LOG_DEBUG: + return LEVEL_DEBUG; + case AV_LOG_TRACE: + return LEVEL_TRACE; + default: + dbg_assert(false, "invalid log level"); + dbg_break(); + } +} + +void AvLogCallback(void *pUser, int Level, const char *pFormat, va_list VarArgs) + GNUC_ATTRIBUTE((format(printf, 3, 0))); + +void AvLogCallback(void *pUser, int Level, const char *pFormat, va_list VarArgs) +{ + const LEVEL LogLevel = AvLevelToLogLevel(Level); + if(LogLevel <= LEVEL_INFO) + { + log_log_v(LogLevel, "videorecorder/libav", pFormat, VarArgs); + } +} + +void CVideo::Init() +{ + av_log_set_callback(AvLogCallback); +} + CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName) : m_pGraphics(pGraphics), m_pStorage(pStorage), @@ -70,7 +112,7 @@ CVideo::~CVideo() ms_pCurrentVideo = nullptr; } -void CVideo::Start() +bool CVideo::Start() { // wait for the graphic thread to idle m_pGraphics->WaitForIdle(); @@ -78,24 +120,25 @@ void CVideo::Start() m_AudioStream = {}; m_VideoStream = {}; - char aWholePath[1024]; + char aWholePath[IO_MAX_PATH_LENGTH]; IOHANDLE File = m_pStorage->OpenFile(m_aName, IOFLAG_WRITE, IStorage::TYPE_SAVE, aWholePath, sizeof(aWholePath)); - if(File) { io_close(File); } else { - dbg_msg("video_recorder", "Failed to open file for recoding video."); - return; + log_error("videorecorder", "Could not open file '%s'", aWholePath); + return false; } - avformat_alloc_output_context2(&m_pFormatContext, 0, "mp4", aWholePath); - if(!m_pFormatContext) + const int FormatAllocResult = avformat_alloc_output_context2(&m_pFormatContext, nullptr, "mp4", aWholePath); + if(FormatAllocResult < 0 || !m_pFormatContext) { - dbg_msg("video_recorder", "Failed to create formatcontext for recoding video."); - return; + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(FormatAllocResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not create format context: %s", aError); + return false; } m_pFormat = m_pFormatContext->oformat; @@ -127,21 +170,26 @@ void CVideo::Start() if(m_pFormat->video_codec != AV_CODEC_ID_NONE) { if(!AddStream(&m_VideoStream, m_pFormatContext, &m_pVideoCodec, m_pFormat->video_codec)) - return; + return false; } else { - dbg_msg("video_recorder", "Failed to add VideoStream for recoding video."); + log_error("videorecorder", "Could not determine default video stream codec"); + return false; } - if(m_HasAudio && m_pFormat->audio_codec != AV_CODEC_ID_NONE) + if(m_HasAudio) { - if(!AddStream(&m_AudioStream, m_pFormatContext, &m_pAudioCodec, m_pFormat->audio_codec)) - return; - } - else - { - dbg_msg("video_recorder", "No audio."); + if(m_pFormat->audio_codec != AV_CODEC_ID_NONE) + { + if(!AddStream(&m_AudioStream, m_pFormatContext, &m_pAudioCodec, m_pFormat->audio_codec)) + return false; + } + else + { + log_error("videorecorder", "Could not determine default audio stream codec"); + return false; + } } m_vVideoThreads.resize(m_VideoThreads); @@ -171,25 +219,21 @@ void CVideo::Start() /* Now that all the parameters are set, we can open the audio and * video codecs and allocate the necessary encode buffers. */ if(!OpenVideo()) - return; + return false; - if(m_HasAudio) - if(!OpenAudio()) - return; - - // TODO: remove/comment: - av_dump_format(m_pFormatContext, 0, aWholePath, 1); + if(m_HasAudio && !OpenAudio()) + return false; /* open the output file, if needed */ if(!(m_pFormat->flags & AVFMT_NOFILE)) { - int Ret = avio_open(&m_pFormatContext->pb, aWholePath, AVIO_FLAG_WRITE); - if(Ret < 0) + const int OpenResult = avio_open(&m_pFormatContext->pb, aWholePath, AVIO_FLAG_WRITE); + if(OpenResult < 0) { char aError[AV_ERROR_MAX_STRING_SIZE]; - av_strerror(Ret, aError, sizeof(aError)); - dbg_msg("video_recorder", "Could not open '%s': %s", aWholePath, aError); - return; + av_strerror(OpenResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not open file '%s': %s", aWholePath, aError); + return false; } } @@ -211,18 +255,20 @@ void CVideo::Start() } /* Write the stream header, if any. */ - int Ret = avformat_write_header(m_pFormatContext, &m_pOptDict); - if(Ret < 0) + const int WriteHeaderResult = avformat_write_header(m_pFormatContext, &m_pOptDict); + if(WriteHeaderResult < 0) { char aError[AV_ERROR_MAX_STRING_SIZE]; - av_strerror(Ret, aError, sizeof(aError)); - dbg_msg("video_recorder", "Error occurred when opening output file: %s", aError); - return; + av_strerror(WriteHeaderResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not write header: %s", aError); + return false; } + m_Recording = true; m_Started = true; ms_Time = time_get(); m_Vframe = 0; + return true; } void CVideo::Pause(bool Pause) @@ -303,7 +349,6 @@ void CVideo::NextVideoFrameThread() NextVideoThreadIndex = 0; // always wait for the next video thread too, to prevent a dead lock - { auto *pVideoThread = m_vVideoThreads[NextVideoThreadIndex].get(); std::unique_lock Lock(pVideoThread->m_Mutex); @@ -471,7 +516,7 @@ void CVideo::RunAudioThread(size_t ParentThreadIndex, size_t ThreadIndex) void CVideo::FillAudioFrame(size_t ThreadIndex) { - av_samples_fill_arrays( + const int FillArrayResult = av_samples_fill_arrays( (uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data, nullptr, // pointer to linesize (int*) (const uint8_t *)m_vBuffer[ThreadIndex].m_aBuffer, @@ -480,25 +525,35 @@ void CVideo::FillAudioFrame(size_t ThreadIndex) AV_SAMPLE_FMT_S16, 0 // align ); - - int Ret = av_frame_make_writable(m_AudioStream.m_vpFrames[ThreadIndex]); - if(Ret < 0) + if(FillArrayResult < 0) { - dbg_msg("video_recorder", "Error making frame writable"); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(FillArrayResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not fill audio frame: %s", aError); + return; + } + + const int MakeWriteableResult = av_frame_make_writable(m_AudioStream.m_vpFrames[ThreadIndex]); + if(MakeWriteableResult < 0) + { + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(MakeWriteableResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not make audio frame writeable: %s", aError); return; } /* convert to destination format */ - Ret = swr_convert( + const int ConvertResult = swr_convert( m_AudioStream.m_vpSwrCtxs[ThreadIndex], m_AudioStream.m_vpFrames[ThreadIndex]->data, m_AudioStream.m_vpFrames[ThreadIndex]->nb_samples, (const uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data, m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples); - - if(Ret < 0) + if(ConvertResult < 0) { - dbg_msg("video_recorder", "Error while converting"); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(ConvertResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not convert audio frame: %s", aError); return; } } @@ -565,22 +620,24 @@ void CVideo::ReadRGBFromGL(size_t ThreadIndex) AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height) { - AVFrame *pPicture; - int Ret; - - pPicture = av_frame_alloc(); + AVFrame *pPicture = av_frame_alloc(); if(!pPicture) + { + log_error("videorecorder", "Could not allocate video frame"); return nullptr; + } pPicture->format = PixFmt; pPicture->width = Width; pPicture->height = Height; /* allocate the buffers for the frame data */ - Ret = av_frame_get_buffer(pPicture, 32); - if(Ret < 0) + const int FrameBufferAllocResult = av_frame_get_buffer(pPicture, 32); + if(FrameBufferAllocResult < 0) { - dbg_msg("video_recorder", "Could not allocate frame data."); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(FrameBufferAllocResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not allocate video frame buffer: %s", aError); return nullptr; } @@ -590,11 +647,9 @@ AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height) AVFrame *CVideo::AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t ChannelLayout, int SampleRate, int NbSamples) { AVFrame *pFrame = av_frame_alloc(); - int Ret; - if(!pFrame) { - dbg_msg("video_recorder", "Error allocating an audio frame"); + log_error("videorecorder", "Could not allocate audio frame"); return nullptr; } @@ -609,10 +664,12 @@ AVFrame *CVideo::AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t Channel if(NbSamples) { - Ret = av_frame_get_buffer(pFrame, 0); - if(Ret < 0) + const int FrameBufferAllocResult = av_frame_get_buffer(pFrame, 0); + if(FrameBufferAllocResult < 0) { - dbg_msg("video_recorder", "Error allocating an audio buffer"); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(FrameBufferAllocResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not allocate audio frame buffer: %s", aError); return nullptr; } } @@ -622,19 +679,18 @@ AVFrame *CVideo::AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t Channel bool CVideo::OpenVideo() { - int Ret; AVCodecContext *pContext = m_VideoStream.pEnc; AVDictionary *pOptions = nullptr; av_dict_copy(&pOptions, m_pOptDict, 0); /* open the codec */ - Ret = avcodec_open2(pContext, m_pVideoCodec, &pOptions); + const int VideoOpenResult = avcodec_open2(pContext, m_pVideoCodec, &pOptions); av_dict_free(&pOptions); - if(Ret < 0) + if(VideoOpenResult < 0) { - char aBuf[AV_ERROR_MAX_STRING_SIZE]; - av_strerror(Ret, aBuf, sizeof(aBuf)); - dbg_msg("video_recorder", "Could not open video codec: %s", aBuf); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(VideoOpenResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not open video codec: %s", aError); return false; } @@ -648,7 +704,6 @@ bool CVideo::OpenVideo() m_VideoStream.m_vpFrames[i] = AllocPicture(pContext->pix_fmt, pContext->width, pContext->height); if(!m_VideoStream.m_vpFrames[i]) { - dbg_msg("video_recorder", "Could not allocate video frame"); return false; } } @@ -668,17 +723,18 @@ bool CVideo::OpenVideo() m_VideoStream.m_vpTmpFrames[i] = AllocPicture(AV_PIX_FMT_YUV420P, pContext->width, pContext->height); if(!m_VideoStream.m_vpTmpFrames[i]) { - dbg_msg("video_recorder", "Could not allocate temporary video frame"); return false; } } } /* copy the stream parameters to the muxer */ - Ret = avcodec_parameters_from_context(m_VideoStream.pSt->codecpar, pContext); - if(Ret < 0) + const int AudioStreamCopyResult = avcodec_parameters_from_context(m_VideoStream.pSt->codecpar, pContext); + if(AudioStreamCopyResult < 0) { - dbg_msg("video_recorder", "Could not copy the stream parameters"); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(AudioStreamCopyResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not copy video stream parameters: %s", aError); return false; } m_VSeq = 0; @@ -687,25 +743,22 @@ bool CVideo::OpenVideo() bool CVideo::OpenAudio() { - AVCodecContext *pContext; - int NbSamples; - int Ret; + AVCodecContext *pContext = m_AudioStream.pEnc; AVDictionary *pOptions = nullptr; - - pContext = m_AudioStream.pEnc; + av_dict_copy(&pOptions, m_pOptDict, 0); /* open it */ - av_dict_copy(&pOptions, m_pOptDict, 0); - Ret = avcodec_open2(pContext, m_pAudioCodec, &pOptions); + const int AudioOpenResult = avcodec_open2(pContext, m_pAudioCodec, &pOptions); av_dict_free(&pOptions); - if(Ret < 0) + if(AudioOpenResult < 0) { - char aBuf[AV_ERROR_MAX_STRING_SIZE]; - av_strerror(Ret, aBuf, sizeof(aBuf)); - dbg_msg("video_recorder", "Could not open audio codec: %s", aBuf); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(AudioOpenResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not open audio codec: %s", aError); return false; } + int NbSamples; if(pContext->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) NbSamples = 10000; else @@ -728,7 +781,6 @@ bool CVideo::OpenAudio() #endif if(!m_AudioStream.m_vpFrames[i]) { - dbg_msg("video_recorder", "Could not allocate audio frame"); return false; } @@ -736,20 +788,21 @@ bool CVideo::OpenAudio() m_AudioStream.m_vpTmpFrames[i] = AllocAudioFrame(AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, g_Config.m_SndRate, NbSamples); if(!m_AudioStream.m_vpTmpFrames[i]) { - dbg_msg("video_recorder", "Could not allocate audio frame"); return false; } } /* copy the stream parameters to the muxer */ - Ret = avcodec_parameters_from_context(m_AudioStream.pSt->codecpar, pContext); - if(Ret < 0) + const int AudioStreamCopyResult = avcodec_parameters_from_context(m_AudioStream.pSt->codecpar, pContext); + if(AudioStreamCopyResult < 0) { - dbg_msg("video_recorder", "Could not copy the stream parameters"); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(AudioStreamCopyResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not copy audio stream parameters: %s", aError); return false; } - /* create resampler context */ + /* create resampling context */ m_AudioStream.m_vpSwrCtxs.clear(); m_AudioStream.m_vpSwrCtxs.resize(m_AudioThreads); for(size_t i = 0; i < m_AudioThreads; ++i) @@ -757,26 +810,33 @@ bool CVideo::OpenAudio() m_AudioStream.m_vpSwrCtxs[i] = swr_alloc(); if(!m_AudioStream.m_vpSwrCtxs[i]) { - dbg_msg("video_recorder", "Could not allocate resampler context"); + log_error("videorecorder", "Could not allocate resampling context"); return false; } /* set options */ - av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_channel_count", 2, 0); - av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_sample_rate", g_Config.m_SndRate, 0); - av_opt_set_sample_fmt(m_AudioStream.m_vpSwrCtxs[i], "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); + dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_channel_count", 2, 0) == 0, "invalid option"); + if(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_sample_rate", g_Config.m_SndRate, 0) != 0) + { + log_error("videorecorder", "Could not set audio sample rate to %d", g_Config.m_SndRate); + return false; + } + dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrCtxs[i], "in_sample_fmt", AV_SAMPLE_FMT_S16, 0) == 0, "invalid option"); #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) - av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_channel_count", pContext->ch_layout.nb_channels, 0); + dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_channel_count", pContext->ch_layout.nb_channels, 0) == 0, "invalid option"); #else - av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_channel_count", pContext->channels, 0); + dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_channel_count", pContext->channels, 0) == 0, "invalid option"); #endif - av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_sample_rate", pContext->sample_rate, 0); - av_opt_set_sample_fmt(m_AudioStream.m_vpSwrCtxs[i], "out_sample_fmt", pContext->sample_fmt, 0); + dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_sample_rate", pContext->sample_rate, 0) == 0, "invalid option"); + dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrCtxs[i], "out_sample_fmt", pContext->sample_fmt, 0) == 0, "invalid option"); /* initialize the resampling context */ - if(swr_init(m_AudioStream.m_vpSwrCtxs[i]) < 0) + const int ResamplingContextInitResult = swr_init(m_AudioStream.m_vpSwrCtxs[i]); + if(ResamplingContextInitResult < 0) { - dbg_msg("video_recorder", "Failed to initialize the resampling context"); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(ResamplingContextInitResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not initialize resampling context: %s", aError); return false; } } @@ -788,28 +848,25 @@ bool CVideo::OpenAudio() /* Add an output stream. */ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCodec **ppCodec, enum AVCodecID CodecId) const { - AVCodecContext *pContext; - /* find the encoder */ *ppCodec = avcodec_find_encoder(CodecId); if(!(*ppCodec)) { - dbg_msg("video_recorder", "Could not find encoder for '%s'", - avcodec_get_name(CodecId)); + log_error("videorecorder", "Could not find encoder for codec '%s'", avcodec_get_name(CodecId)); return false; } - pStream->pSt = avformat_new_stream(pOC, NULL); + pStream->pSt = avformat_new_stream(pOC, nullptr); if(!pStream->pSt) { - dbg_msg("video_recorder", "Could not allocate stream"); + log_error("videorecorder", "Could not allocate stream"); return false; } pStream->pSt->id = pOC->nb_streams - 1; - pContext = avcodec_alloc_context3(*ppCodec); + AVCodecContext *pContext = avcodec_alloc_context3(*ppCodec); if(!pContext) { - dbg_msg("video_recorder", "Could not alloc an encoding context"); + log_error("videorecorder", "Could not allocate encoding context"); return false; } pStream->pEnc = pContext; @@ -880,8 +937,9 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode if(CodecId == AV_CODEC_ID_H264) { static const char *s_apPresets[10] = {"ultrafast", "superfast", "veryfast", "faster", "fast", "medium", "slow", "slower", "veryslow", "placebo"}; - av_opt_set(pContext->priv_data, "preset", s_apPresets[g_Config.m_ClVideoX264Preset], 0); - av_opt_set_int(pContext->priv_data, "crf", g_Config.m_ClVideoX264Crf, 0); + dbg_assert(g_Config.m_ClVideoX264Preset < (int)std::size(s_apPresets), "preset index invalid"); + dbg_assert(av_opt_set(pContext->priv_data, "preset", s_apPresets[g_Config.m_ClVideoX264Preset], 0) == 0, "invalid option"); + dbg_assert(av_opt_set_int(pContext->priv_data, "crf", g_Config.m_ClVideoX264Crf, 0) == 0, "invalid option"); } break; @@ -898,12 +956,10 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode void CVideo::WriteFrame(OutputStream *pStream, size_t ThreadIndex) { - int RetRecv = 0; - AVPacket *pPacket = av_packet_alloc(); if(pPacket == nullptr) { - dbg_msg("video_recorder", "Failed allocating packet"); + log_error("videorecorder", "Could not allocate packet"); return; } @@ -911,29 +967,33 @@ void CVideo::WriteFrame(OutputStream *pStream, size_t ThreadIndex) pPacket->size = 0; avcodec_send_frame(pStream->pEnc, pStream->m_vpFrames[ThreadIndex]); + int RecvResult = 0; do { - RetRecv = avcodec_receive_packet(pStream->pEnc, pPacket); - if(!RetRecv) + RecvResult = avcodec_receive_packet(pStream->pEnc, pPacket); + if(!RecvResult) { /* rescale output packet timestamp values from codec to stream timebase */ av_packet_rescale_ts(pPacket, pStream->pEnc->time_base, pStream->pSt->time_base); pPacket->stream_index = pStream->pSt->index; - if(int Ret = av_interleaved_write_frame(m_pFormatContext, pPacket)) + const int WriteFrameResult = av_interleaved_write_frame(m_pFormatContext, pPacket); + if(WriteFrameResult < 0) { - char aBuf[AV_ERROR_MAX_STRING_SIZE]; - av_strerror(Ret, aBuf, sizeof(aBuf)); - dbg_msg("video_recorder", "Error while writing video frame: %s", aBuf); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(WriteFrameResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not write video frame: %s", aError); } } else break; } while(true); - if(RetRecv && RetRecv != AVERROR(EAGAIN)) + if(RecvResult && RecvResult != AVERROR(EAGAIN)) { - dbg_msg("video_recorder", "Error encoding frame, error: %d", RetRecv); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(RecvResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not encode video frame: %s", aError); } av_packet_free(&pPacket); @@ -941,13 +1001,10 @@ void CVideo::WriteFrame(OutputStream *pStream, size_t ThreadIndex) void CVideo::FinishFrames(OutputStream *pStream) { - dbg_msg("video_recorder", "------------"); - int RetRecv = 0; - AVPacket *pPacket = av_packet_alloc(); if(pPacket == nullptr) { - dbg_msg("video_recorder", "Failed allocating packet"); + log_error("videorecorder", "Could not allocate packet"); return; } @@ -955,29 +1012,33 @@ void CVideo::FinishFrames(OutputStream *pStream) pPacket->size = 0; avcodec_send_frame(pStream->pEnc, 0); + int RecvResult = 0; do { - RetRecv = avcodec_receive_packet(pStream->pEnc, pPacket); - if(!RetRecv) + RecvResult = avcodec_receive_packet(pStream->pEnc, pPacket); + if(!RecvResult) { /* rescale output packet timestamp values from codec to stream timebase */ av_packet_rescale_ts(pPacket, pStream->pEnc->time_base, pStream->pSt->time_base); pPacket->stream_index = pStream->pSt->index; - if(int Ret = av_interleaved_write_frame(m_pFormatContext, pPacket)) + const int WriteFrameResult = av_interleaved_write_frame(m_pFormatContext, pPacket); + if(WriteFrameResult < 0) { - char aBuf[AV_ERROR_MAX_STRING_SIZE]; - av_strerror(Ret, aBuf, sizeof(aBuf)); - dbg_msg("video_recorder", "Error while writing video frame: %s", aBuf); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(WriteFrameResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not write video frame: %s", aError); } } else break; } while(true); - if(RetRecv && RetRecv != AVERROR_EOF) + if(RecvResult && RecvResult != AVERROR_EOF) { - dbg_msg("video_recorder", "failed to finish recoding, error: %d", RetRecv); + char aError[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(RecvResult, aError, sizeof(aError)); + log_error("videorecorder", "Could not finish recording: %s", aError); } av_packet_free(&pPacket); diff --git a/src/engine/client/video.h b/src/engine/client/video.h index 77904b95e..eb00c9f6b 100644 --- a/src/engine/client/video.h +++ b/src/engine/client/video.h @@ -47,7 +47,7 @@ public: CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Width, int Height, const char *pName); ~CVideo(); - void Start() override REQUIRES(!g_WriteLock); + bool Start() override REQUIRES(!g_WriteLock); void Stop() override; void Pause(bool Pause) override; bool IsRecording() override { return m_Recording; } @@ -60,7 +60,7 @@ public: static IVideo *Current() { return IVideo::ms_pCurrentVideo; } - static void Init() { av_log_set_level(AV_LOG_DEBUG); } + static void Init(); private: void RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex) REQUIRES(!g_WriteLock); diff --git a/src/engine/demo.h b/src/engine/demo.h index 5f5bf6a85..9a0e74d03 100644 --- a/src/engine/demo.h +++ b/src/engine/demo.h @@ -91,6 +91,7 @@ public: virtual int SetPos(int WantedTick) = 0; virtual void Pause() = 0; virtual void Unpause() = 0; + virtual const char *ErrorMessage() const = 0; virtual bool IsPlaying() const = 0; virtual const CInfo *BaseInfo() const = 0; virtual void GetDemoName(char *pBuffer, size_t BufferSize) const = 0; diff --git a/src/engine/shared/demo.h b/src/engine/shared/demo.h index c8060c541..6c20c28d2 100644 --- a/src/engine/shared/demo.h +++ b/src/engine/shared/demo.h @@ -167,8 +167,8 @@ public: const CInfo *BaseInfo() const override { return &m_Info.m_Info; } void GetDemoName(char *pBuffer, size_t BufferSize) const override; bool GetDemoInfo(class IStorage *pStorage, class IConsole *pConsole, const char *pFilename, int StorageType, CDemoHeader *pDemoHeader, CTimelineMarkers *pTimelineMarkers, CMapInfo *pMapInfo, IOHANDLE *pFile = nullptr, char *pErrorMessage = nullptr, size_t ErrorMessageSize = 0) const override; - const char *Filename() { return m_aFilename; } - const char *ErrorMessage() { return m_aErrorMessage; } + const char *Filename() const { return m_aFilename; } + const char *ErrorMessage() const override { return m_aErrorMessage; } int Update(bool RealTime = true); diff --git a/src/engine/shared/video.h b/src/engine/shared/video.h index c93cdd80a..1a97f5a52 100644 --- a/src/engine/shared/video.h +++ b/src/engine/shared/video.h @@ -12,7 +12,7 @@ class IVideo public: virtual ~IVideo(){}; - virtual void Start() = 0; + virtual bool Start() = 0; virtual void Stop() = 0; virtual void Pause(bool Pause) = 0; virtual bool IsRecording() = 0; diff --git a/src/game/client/components/menus_demo.cpp b/src/game/client/components/menus_demo.cpp index 5b88a05ee..6d3ef8e61 100644 --- a/src/game/client/components/menus_demo.cpp +++ b/src/game/client/components/menus_demo.cpp @@ -1085,7 +1085,14 @@ void CMenus::RenderDemoBrowserList(CUIRect ListView, bool &WasListboxItemActivat #if defined(CONF_VIDEORECORDER) if(!m_DemoRenderInput.IsEmpty()) { - m_Popup = POPUP_RENDER_DONE; + if(DemoPlayer()->ErrorMessage()[0] == '\0') + { + m_Popup = POPUP_RENDER_DONE; + } + else + { + m_DemoRenderInput.Clear(); + } } #endif From 5f647b97ebfb39b52a8a3e4912cb6980e3b54c19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Fri, 3 May 2024 20:39:14 +0200 Subject: [PATCH 07/10] Ensure correct sample and bit rate are used for video recording The wrong sampling rate was being used for video recording if the client is not restarted after changing the `snd_rate` config variable. Ensure that the correct bit rate is used if the sample rate was adjusted because the selected value is not supposed. --- src/engine/client/sound.h | 2 ++ src/engine/client/video.cpp | 19 +++++++++++-------- src/engine/sound.h | 2 ++ 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/engine/client/sound.h b/src/engine/client/sound.h index e5c71652a..d688ed955 100644 --- a/src/engine/client/sound.h +++ b/src/engine/client/sound.h @@ -131,7 +131,9 @@ public: void StopVoice(CVoiceHandle Voice) override REQUIRES(!m_SoundLock); bool IsPlaying(int SampleId) override REQUIRES(!m_SoundLock); + int MixingRate() const override { return m_MixingRate; } void Mix(short *pFinalOut, unsigned Frames) override REQUIRES(!m_SoundLock); + void PauseAudioDevice() override; void UnpauseAudioDevice() override; }; diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index 9de6af37f..866d37d69 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -99,7 +99,7 @@ CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Wid m_ProcessingVideoFrame = 0; m_ProcessingAudioFrame = 0; - m_HasAudio = g_Config.m_ClVideoSndEnable; + m_HasAudio = m_pSound->IsSoundEnabled() && g_Config.m_ClVideoSndEnable; dbg_assert(ms_pCurrentVideo == nullptr, "ms_pCurrentVideo is NOT set to nullptr while creating a new Video."); @@ -785,7 +785,7 @@ bool CVideo::OpenAudio() } m_AudioStream.m_vpTmpFrames.emplace_back(nullptr); - m_AudioStream.m_vpTmpFrames[i] = AllocAudioFrame(AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, g_Config.m_SndRate, NbSamples); + m_AudioStream.m_vpTmpFrames[i] = AllocAudioFrame(AV_SAMPLE_FMT_S16, AV_CH_LAYOUT_STEREO, m_pSound->MixingRate(), NbSamples); if(!m_AudioStream.m_vpTmpFrames[i]) { return false; @@ -816,9 +816,9 @@ bool CVideo::OpenAudio() /* set options */ dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_channel_count", 2, 0) == 0, "invalid option"); - if(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_sample_rate", g_Config.m_SndRate, 0) != 0) + if(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_sample_rate", m_pSound->MixingRate(), 0) != 0) { - log_error("videorecorder", "Could not set audio sample rate to %d", g_Config.m_SndRate); + log_error("videorecorder", "Could not set audio sample rate to %d", m_pSound->MixingRate()); return false; } dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrCtxs[i], "in_sample_fmt", AV_SAMPLE_FMT_S16, 0) == 0, "invalid option"); @@ -880,20 +880,23 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode { case AVMEDIA_TYPE_AUDIO: pContext->sample_fmt = (*ppCodec)->sample_fmts ? (*ppCodec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; - pContext->bit_rate = g_Config.m_SndRate * 2 * 16; - pContext->sample_rate = g_Config.m_SndRate; if((*ppCodec)->supported_samplerates) { pContext->sample_rate = (*ppCodec)->supported_samplerates[0]; for(int i = 0; (*ppCodec)->supported_samplerates[i]; i++) { - if((*ppCodec)->supported_samplerates[i] == g_Config.m_SndRate) + if((*ppCodec)->supported_samplerates[i] == m_pSound->MixingRate()) { - pContext->sample_rate = g_Config.m_SndRate; + pContext->sample_rate = m_pSound->MixingRate(); break; } } } + else + { + pContext->sample_rate = m_pSound->MixingRate(); + } + pContext->bit_rate = pContext->sample_rate * 2 * 16; #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) dbg_assert(av_channel_layout_from_mask(&pContext->ch_layout, AV_CH_LAYOUT_STEREO) == 0, "Failed to set channel layout"); #else diff --git a/src/engine/sound.h b/src/engine/sound.h index 025eaac1c..43c354124 100644 --- a/src/engine/sound.h +++ b/src/engine/sound.h @@ -92,7 +92,9 @@ public: virtual void StopVoice(CVoiceHandle Voice) = 0; virtual bool IsPlaying(int SampleId) = 0; + virtual int MixingRate() const = 0; virtual void Mix(short *pFinalOut, unsigned Frames) = 0; + // useful for thread synchronization virtual void PauseAudioDevice() = 0; virtual void UnpauseAudioDevice() = 0; From a0465b67dddd6fc6a2536faa4af9f7b7b1a300fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Sun, 14 Apr 2024 12:50:56 +0200 Subject: [PATCH 08/10] Improve various variable and class names of the video recorder - Rename variable `m_vBuffer` -> `m_vAudioBuffers` - Rename class `SAudioBuffer` -> `CAudioBuffer` - Rename variable `m_vPixelHelper` -> `m_vVideoBuffers` - Add class `CVideoBuffer` as wrapper - Remove unused variable `m_Vframe` - Rename variable `m_ASeq` -> `m_AudioFrameIndex` - Rename variable `m_VSeq` -> `m_VideoFrameIndex` - Rename variable `m_vVideoThreads` -> `m_vpVideoThreads` - Rename variable `m_vAudioThreads` -> `m_vpAudioThreads` - Rename function `ReadRGBFromGL` -> `UpdateVideoBufferFromGraphics` - Remove unnecessary `ALEN` definition - Remove unused variable `NextPts` - Rename class `OutputStream` -> `COutputStream` - Rename variable `pSt` -> `m_pStream` - Rename variable `pEnc` -> `m_pCodecContext` - Rename variable `m_vpSwsCtxs` -> `m_vpSwsContexts` - Rename variable `m_vpSwrCtxs` -> `m_vpSwrContexts` - Rename variable `pOC` -> `pFormatContext` - Rename class `SVideoRecorderThread` -> `CVideoRecorderThread` - Rename class `SAudioRecorderThread` -> `CAudioRecorderThread` --- src/engine/client/video.cpp | 212 ++++++++++++++++++------------------ src/engine/client/video.h | 56 +++++----- 2 files changed, 136 insertions(+), 132 deletions(-) diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index 866d37d69..ffff1687b 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -157,13 +157,13 @@ bool CVideo::Start() m_CurAudioThreadIndex = 0; size_t GLNVals = FORMAT_GL_NCHANNELS * m_Width * m_Height; - m_vPixelHelper.resize(m_VideoThreads); + m_vVideoBuffers.resize(m_VideoThreads); for(size_t i = 0; i < m_VideoThreads; ++i) { - m_vPixelHelper[i].resize(GLNVals * sizeof(uint8_t)); + m_vVideoBuffers[i].m_vBuffer.resize(GLNVals * sizeof(uint8_t)); } - m_vBuffer.resize(m_AudioThreads); + m_vAudioBuffers.resize(m_AudioThreads); /* Add the audio and video streams using the default format codecs * and initialize the codecs. */ @@ -192,28 +192,28 @@ bool CVideo::Start() } } - m_vVideoThreads.resize(m_VideoThreads); + m_vpVideoThreads.resize(m_VideoThreads); for(size_t i = 0; i < m_VideoThreads; ++i) { - m_vVideoThreads[i] = std::make_unique(); + m_vpVideoThreads[i] = std::make_unique(); } for(size_t i = 0; i < m_VideoThreads; ++i) { - std::unique_lock Lock(m_vVideoThreads[i]->m_Mutex); - m_vVideoThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!g_WriteLock) { RunVideoThread(i == 0 ? (m_VideoThreads - 1) : (i - 1), i); }); - m_vVideoThreads[i]->m_Cond.wait(Lock, [this, i]() -> bool { return m_vVideoThreads[i]->m_Started; }); + std::unique_lock Lock(m_vpVideoThreads[i]->m_Mutex); + m_vpVideoThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!g_WriteLock) { RunVideoThread(i == 0 ? (m_VideoThreads - 1) : (i - 1), i); }); + m_vpVideoThreads[i]->m_Cond.wait(Lock, [this, i]() -> bool { return m_vpVideoThreads[i]->m_Started; }); } - m_vAudioThreads.resize(m_AudioThreads); + m_vpAudioThreads.resize(m_AudioThreads); for(size_t i = 0; i < m_AudioThreads; ++i) { - m_vAudioThreads[i] = std::make_unique(); + m_vpAudioThreads[i] = std::make_unique(); } for(size_t i = 0; i < m_AudioThreads; ++i) { - std::unique_lock Lock(m_vAudioThreads[i]->m_Mutex); - m_vAudioThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!g_WriteLock) { RunAudioThread(i == 0 ? (m_AudioThreads - 1) : (i - 1), i); }); - m_vAudioThreads[i]->m_Cond.wait(Lock, [this, i]() -> bool { return m_vAudioThreads[i]->m_Started; }); + std::unique_lock Lock(m_vpAudioThreads[i]->m_Mutex); + m_vpAudioThreads[i]->m_Thread = std::thread([this, i]() REQUIRES(!g_WriteLock) { RunAudioThread(i == 0 ? (m_AudioThreads - 1) : (i - 1), i); }); + m_vpAudioThreads[i]->m_Cond.wait(Lock, [this, i]() -> bool { return m_vpAudioThreads[i]->m_Started; }); } /* Now that all the parameters are set, we can open the audio and @@ -237,19 +237,19 @@ bool CVideo::Start() } } - m_VideoStream.m_vpSwsCtxs.reserve(m_VideoThreads); + m_VideoStream.m_vpSwsContexts.reserve(m_VideoThreads); for(size_t i = 0; i < m_VideoThreads; ++i) { - if(m_VideoStream.m_vpSwsCtxs.size() <= i) - m_VideoStream.m_vpSwsCtxs.emplace_back(nullptr); + if(m_VideoStream.m_vpSwsContexts.size() <= i) + m_VideoStream.m_vpSwsContexts.emplace_back(nullptr); - if(!m_VideoStream.m_vpSwsCtxs[i]) + if(!m_VideoStream.m_vpSwsContexts[i]) { - m_VideoStream.m_vpSwsCtxs[i] = sws_getCachedContext( - m_VideoStream.m_vpSwsCtxs[i], - m_VideoStream.pEnc->width, m_VideoStream.pEnc->height, AV_PIX_FMT_RGBA, - m_VideoStream.pEnc->width, m_VideoStream.pEnc->height, AV_PIX_FMT_YUV420P, + m_VideoStream.m_vpSwsContexts[i] = sws_getCachedContext( + m_VideoStream.m_vpSwsContexts[i], + m_VideoStream.m_pCodecContext->width, m_VideoStream.m_pCodecContext->height, AV_PIX_FMT_RGBA, + m_VideoStream.m_pCodecContext->width, m_VideoStream.m_pCodecContext->height, AV_PIX_FMT_YUV420P, 0, 0, 0, 0); } } @@ -267,7 +267,6 @@ bool CVideo::Start() m_Recording = true; m_Started = true; ms_Time = time_get(); - m_Vframe = 0; return true; } @@ -284,26 +283,26 @@ void CVideo::Stop() for(size_t i = 0; i < m_VideoThreads; ++i) { { - std::unique_lock Lock(m_vVideoThreads[i]->m_Mutex); - m_vVideoThreads[i]->m_Finished = true; - m_vVideoThreads[i]->m_Cond.notify_all(); + std::unique_lock Lock(m_vpVideoThreads[i]->m_Mutex); + m_vpVideoThreads[i]->m_Finished = true; + m_vpVideoThreads[i]->m_Cond.notify_all(); } - m_vVideoThreads[i]->m_Thread.join(); + m_vpVideoThreads[i]->m_Thread.join(); } - m_vVideoThreads.clear(); + m_vpVideoThreads.clear(); for(size_t i = 0; i < m_AudioThreads; ++i) { { - std::unique_lock Lock(m_vAudioThreads[i]->m_Mutex); - m_vAudioThreads[i]->m_Finished = true; - m_vAudioThreads[i]->m_Cond.notify_all(); + std::unique_lock Lock(m_vpAudioThreads[i]->m_Mutex); + m_vpAudioThreads[i]->m_Finished = true; + m_vpAudioThreads[i]->m_Cond.notify_all(); } - m_vAudioThreads[i]->m_Thread.join(); + m_vpAudioThreads[i]->m_Thread.join(); } - m_vAudioThreads.clear(); + m_vpAudioThreads.clear(); while(m_ProcessingVideoFrame > 0 || m_ProcessingAudioFrame > 0) std::this_thread::sleep_for(10us); @@ -339,8 +338,8 @@ void CVideo::NextVideoFrameThread() { if(m_Recording) { - m_VSeq += 1; - if(m_VSeq >= 2) + m_VideoFrameIndex += 1; + if(m_VideoFrameIndex >= 2) { m_ProcessingVideoFrame.fetch_add(1); @@ -350,7 +349,7 @@ void CVideo::NextVideoFrameThread() // always wait for the next video thread too, to prevent a dead lock { - auto *pVideoThread = m_vVideoThreads[NextVideoThreadIndex].get(); + auto *pVideoThread = m_vpVideoThreads[NextVideoThreadIndex].get(); std::unique_lock Lock(pVideoThread->m_Mutex); if(pVideoThread->m_HasVideoFrame) @@ -361,7 +360,7 @@ void CVideo::NextVideoFrameThread() // after reading the graphic libraries' frame buffer, go threaded { - auto *pVideoThread = m_vVideoThreads[m_CurVideoThreadIndex].get(); + auto *pVideoThread = m_vpVideoThreads[m_CurVideoThreadIndex].get(); std::unique_lock Lock(pVideoThread->m_Mutex); if(pVideoThread->m_HasVideoFrame) @@ -369,12 +368,12 @@ void CVideo::NextVideoFrameThread() pVideoThread->m_Cond.wait(Lock, [&pVideoThread]() -> bool { return !pVideoThread->m_HasVideoFrame; }); } - ReadRGBFromGL(m_CurVideoThreadIndex); + UpdateVideoBufferFromGraphics(m_CurVideoThreadIndex); pVideoThread->m_HasVideoFrame = true; { std::unique_lock LockParent(pVideoThread->m_VideoFillMutex); - pVideoThread->m_VideoFrameToFill = m_VSeq; + pVideoThread->m_VideoFrameToFill = m_VideoFrameIndex; } pVideoThread->m_Cond.notify_all(); } @@ -392,7 +391,6 @@ void CVideo::NextVideoFrame() { ms_Time += ms_TickTime; ms_LocalTime = (ms_Time - ms_LocalStartTime) / (float)time_freq(); - m_Vframe += 1; } } @@ -400,7 +398,7 @@ void CVideo::NextAudioFrameTimeline(ISoundMixFunc Mix) { if(m_Recording && m_HasAudio) { - double SamplesPerFrame = (double)m_AudioStream.pEnc->sample_rate / m_FPS; + double SamplesPerFrame = (double)m_AudioStream.m_pCodecContext->sample_rate / m_FPS; while(m_AudioStream.m_SamplesFrameCount >= m_AudioStream.m_SamplesCount) { NextAudioFrame(Mix); @@ -413,7 +411,7 @@ void CVideo::NextAudioFrame(ISoundMixFunc Mix) { if(m_Recording && m_HasAudio) { - m_ASeq += 1; + m_AudioFrameIndex += 1; m_ProcessingAudioFrame.fetch_add(1); @@ -424,7 +422,7 @@ void CVideo::NextAudioFrame(ISoundMixFunc Mix) // always wait for the next Audio thread too, to prevent a dead lock { - auto *pAudioThread = m_vAudioThreads[NextAudioThreadIndex].get(); + auto *pAudioThread = m_vpAudioThreads[NextAudioThreadIndex].get(); std::unique_lock Lock(pAudioThread->m_Mutex); if(pAudioThread->m_HasAudioFrame) @@ -435,7 +433,7 @@ void CVideo::NextAudioFrame(ISoundMixFunc Mix) // after reading the graphic libraries' frame buffer, go threaded { - auto *pAudioThread = m_vAudioThreads[m_CurAudioThreadIndex].get(); + auto *pAudioThread = m_vpAudioThreads[m_CurAudioThreadIndex].get(); std::unique_lock Lock(pAudioThread->m_Mutex); @@ -444,13 +442,13 @@ void CVideo::NextAudioFrame(ISoundMixFunc Mix) pAudioThread->m_Cond.wait(Lock, [&pAudioThread]() -> bool { return !pAudioThread->m_HasAudioFrame; }); } - Mix(m_vBuffer[m_CurAudioThreadIndex].m_aBuffer, ALEN / 2); // two channels + Mix(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer, std::size(m_vAudioBuffers[m_CurAudioThreadIndex].m_aBuffer) / 2 / 2); // two channels int64_t DstNbSamples = av_rescale_rnd( - swr_get_delay(m_AudioStream.m_vpSwrCtxs[m_CurAudioThreadIndex], m_AudioStream.pEnc->sample_rate) + + swr_get_delay(m_AudioStream.m_vpSwrContexts[m_CurAudioThreadIndex], m_AudioStream.m_pCodecContext->sample_rate) + m_AudioStream.m_vpFrames[m_CurAudioThreadIndex]->nb_samples, - m_AudioStream.pEnc->sample_rate, - m_AudioStream.pEnc->sample_rate, AV_ROUND_UP); + m_AudioStream.m_pCodecContext->sample_rate, + m_AudioStream.m_pCodecContext->sample_rate, AV_ROUND_UP); pAudioThread->m_SampleCountStart = m_AudioStream.m_SamplesCount; m_AudioStream.m_SamplesCount += DstNbSamples; @@ -458,7 +456,7 @@ void CVideo::NextAudioFrame(ISoundMixFunc Mix) pAudioThread->m_HasAudioFrame = true; { std::unique_lock LockParent(pAudioThread->m_AudioFillMutex); - pAudioThread->m_AudioFrameToFill = m_ASeq; + pAudioThread->m_AudioFrameToFill = m_AudioFrameIndex; } pAudioThread->m_Cond.notify_all(); } @@ -471,8 +469,8 @@ void CVideo::NextAudioFrame(ISoundMixFunc Mix) void CVideo::RunAudioThread(size_t ParentThreadIndex, size_t ThreadIndex) { - auto *pThreadData = m_vAudioThreads[ThreadIndex].get(); - auto *pParentThreadData = m_vAudioThreads[ParentThreadIndex].get(); + auto *pThreadData = m_vpAudioThreads[ThreadIndex].get(); + auto *pParentThreadData = m_vpAudioThreads[ParentThreadIndex].get(); std::unique_lock Lock(pThreadData->m_Mutex); pThreadData->m_Started = true; pThreadData->m_Cond.notify_all(); @@ -499,7 +497,7 @@ void CVideo::RunAudioThread(size_t ParentThreadIndex, size_t ThreadIndex) { CLockScope ls(g_WriteLock); - m_AudioStream.m_vpFrames[ThreadIndex]->pts = av_rescale_q(pThreadData->m_SampleCountStart, AVRational{1, m_AudioStream.pEnc->sample_rate}, m_AudioStream.pEnc->time_base); + m_AudioStream.m_vpFrames[ThreadIndex]->pts = av_rescale_q(pThreadData->m_SampleCountStart, AVRational{1, m_AudioStream.m_pCodecContext->sample_rate}, m_AudioStream.m_pCodecContext->time_base); WriteFrame(&m_AudioStream, ThreadIndex); } @@ -519,7 +517,7 @@ void CVideo::FillAudioFrame(size_t ThreadIndex) const int FillArrayResult = av_samples_fill_arrays( (uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data, nullptr, // pointer to linesize (int*) - (const uint8_t *)m_vBuffer[ThreadIndex].m_aBuffer, + (const uint8_t *)m_vAudioBuffers[ThreadIndex].m_aBuffer, 2, // channels m_AudioStream.m_vpTmpFrames[ThreadIndex]->nb_samples, AV_SAMPLE_FMT_S16, @@ -544,7 +542,7 @@ void CVideo::FillAudioFrame(size_t ThreadIndex) /* convert to destination format */ const int ConvertResult = swr_convert( - m_AudioStream.m_vpSwrCtxs[ThreadIndex], + m_AudioStream.m_vpSwrContexts[ThreadIndex], m_AudioStream.m_vpFrames[ThreadIndex]->data, m_AudioStream.m_vpFrames[ThreadIndex]->nb_samples, (const uint8_t **)m_AudioStream.m_vpTmpFrames[ThreadIndex]->data, @@ -560,8 +558,8 @@ void CVideo::FillAudioFrame(size_t ThreadIndex) void CVideo::RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex) { - auto *pThreadData = m_vVideoThreads[ThreadIndex].get(); - auto *pParentThreadData = m_vVideoThreads[ParentThreadIndex].get(); + auto *pThreadData = m_vpVideoThreads[ThreadIndex].get(); + auto *pParentThreadData = m_vpVideoThreads[ParentThreadIndex].get(); std::unique_lock Lock(pThreadData->m_Mutex); pThreadData->m_Started = true; pThreadData->m_Cond.notify_all(); @@ -587,7 +585,7 @@ void CVideo::RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex) std::unique_lock LockVideo(pThreadData->m_VideoFillMutex); { CLockScope ls(g_WriteLock); - m_VideoStream.m_vpFrames[ThreadIndex]->pts = (int64_t)m_VideoStream.pEnc->FRAME_NUM; + m_VideoStream.m_vpFrames[ThreadIndex]->pts = (int64_t)m_VideoStream.m_pCodecContext->FRAME_NUM; WriteFrame(&m_VideoStream, ThreadIndex); } @@ -604,18 +602,18 @@ void CVideo::RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex) void CVideo::FillVideoFrame(size_t ThreadIndex) { - const int InLineSize = 4 * m_VideoStream.pEnc->width; - auto *pRGBAData = m_vPixelHelper[ThreadIndex].data(); - sws_scale(m_VideoStream.m_vpSwsCtxs[ThreadIndex], (const uint8_t *const *)&pRGBAData, &InLineSize, 0, - m_VideoStream.pEnc->height, m_VideoStream.m_vpFrames[ThreadIndex]->data, m_VideoStream.m_vpFrames[ThreadIndex]->linesize); + const int InLineSize = 4 * m_VideoStream.m_pCodecContext->width; + auto *pRGBAData = m_vVideoBuffers[ThreadIndex].m_vBuffer.data(); + sws_scale(m_VideoStream.m_vpSwsContexts[ThreadIndex], (const uint8_t *const *)&pRGBAData, &InLineSize, 0, + m_VideoStream.m_pCodecContext->height, m_VideoStream.m_vpFrames[ThreadIndex]->data, m_VideoStream.m_vpFrames[ThreadIndex]->linesize); } -void CVideo::ReadRGBFromGL(size_t ThreadIndex) +void CVideo::UpdateVideoBufferFromGraphics(size_t ThreadIndex) { uint32_t Width; uint32_t Height; CImageInfo::EImageFormat Format; - m_pGraphics->GetReadPresentedImageDataFuncUnsafe()(Width, Height, Format, m_vPixelHelper[ThreadIndex]); + m_pGraphics->GetReadPresentedImageDataFuncUnsafe()(Width, Height, Format, m_vVideoBuffers[ThreadIndex].m_vBuffer); } AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height) @@ -679,7 +677,7 @@ AVFrame *CVideo::AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t Channel bool CVideo::OpenVideo() { - AVCodecContext *pContext = m_VideoStream.pEnc; + AVCodecContext *pContext = m_VideoStream.m_pCodecContext; AVDictionary *pOptions = nullptr; av_dict_copy(&pOptions, m_pOptDict, 0); @@ -729,7 +727,7 @@ bool CVideo::OpenVideo() } /* copy the stream parameters to the muxer */ - const int AudioStreamCopyResult = avcodec_parameters_from_context(m_VideoStream.pSt->codecpar, pContext); + const int AudioStreamCopyResult = avcodec_parameters_from_context(m_VideoStream.m_pStream->codecpar, pContext); if(AudioStreamCopyResult < 0) { char aError[AV_ERROR_MAX_STRING_SIZE]; @@ -737,13 +735,13 @@ bool CVideo::OpenVideo() log_error("videorecorder", "Could not copy video stream parameters: %s", aError); return false; } - m_VSeq = 0; + m_VideoFrameIndex = 0; return true; } bool CVideo::OpenAudio() { - AVCodecContext *pContext = m_AudioStream.pEnc; + AVCodecContext *pContext = m_AudioStream.m_pCodecContext; AVDictionary *pOptions = nullptr; av_dict_copy(&pOptions, m_pOptDict, 0); @@ -793,7 +791,7 @@ bool CVideo::OpenAudio() } /* copy the stream parameters to the muxer */ - const int AudioStreamCopyResult = avcodec_parameters_from_context(m_AudioStream.pSt->codecpar, pContext); + const int AudioStreamCopyResult = avcodec_parameters_from_context(m_AudioStream.m_pStream->codecpar, pContext); if(AudioStreamCopyResult < 0) { char aError[AV_ERROR_MAX_STRING_SIZE]; @@ -803,35 +801,35 @@ bool CVideo::OpenAudio() } /* create resampling context */ - m_AudioStream.m_vpSwrCtxs.clear(); - m_AudioStream.m_vpSwrCtxs.resize(m_AudioThreads); + m_AudioStream.m_vpSwrContexts.clear(); + m_AudioStream.m_vpSwrContexts.resize(m_AudioThreads); for(size_t i = 0; i < m_AudioThreads; ++i) { - m_AudioStream.m_vpSwrCtxs[i] = swr_alloc(); - if(!m_AudioStream.m_vpSwrCtxs[i]) + m_AudioStream.m_vpSwrContexts[i] = swr_alloc(); + if(!m_AudioStream.m_vpSwrContexts[i]) { log_error("videorecorder", "Could not allocate resampling context"); return false; } /* set options */ - dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_channel_count", 2, 0) == 0, "invalid option"); - if(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "in_sample_rate", m_pSound->MixingRate(), 0) != 0) + dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "in_channel_count", 2, 0) == 0, "invalid option"); + if(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "in_sample_rate", m_pSound->MixingRate(), 0) != 0) { log_error("videorecorder", "Could not set audio sample rate to %d", m_pSound->MixingRate()); return false; } - dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrCtxs[i], "in_sample_fmt", AV_SAMPLE_FMT_S16, 0) == 0, "invalid option"); + dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "in_sample_fmt", AV_SAMPLE_FMT_S16, 0) == 0, "invalid option"); #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100) - dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_channel_count", pContext->ch_layout.nb_channels, 0) == 0, "invalid option"); + dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_channel_count", pContext->ch_layout.nb_channels, 0) == 0, "invalid option"); #else - dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_channel_count", pContext->channels, 0) == 0, "invalid option"); + dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_channel_count", pContext->channels, 0) == 0, "invalid option"); #endif - dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrCtxs[i], "out_sample_rate", pContext->sample_rate, 0) == 0, "invalid option"); - dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrCtxs[i], "out_sample_fmt", pContext->sample_fmt, 0) == 0, "invalid option"); + dbg_assert(av_opt_set_int(m_AudioStream.m_vpSwrContexts[i], "out_sample_rate", pContext->sample_rate, 0) == 0, "invalid option"); + dbg_assert(av_opt_set_sample_fmt(m_AudioStream.m_vpSwrContexts[i], "out_sample_fmt", pContext->sample_fmt, 0) == 0, "invalid option"); /* initialize the resampling context */ - const int ResamplingContextInitResult = swr_init(m_AudioStream.m_vpSwrCtxs[i]); + const int ResamplingContextInitResult = swr_init(m_AudioStream.m_vpSwrContexts[i]); if(ResamplingContextInitResult < 0) { char aError[AV_ERROR_MAX_STRING_SIZE]; @@ -841,12 +839,12 @@ bool CVideo::OpenAudio() } } - m_ASeq = 0; + m_AudioFrameIndex = 0; return true; } /* Add an output stream. */ -bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCodec **ppCodec, enum AVCodecID CodecId) const +bool CVideo::AddStream(COutputStream *pStream, AVFormatContext *pFormatContext, const AVCodec **ppCodec, enum AVCodecID CodecId) const { /* find the encoder */ *ppCodec = avcodec_find_encoder(CodecId); @@ -856,20 +854,20 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode return false; } - pStream->pSt = avformat_new_stream(pOC, nullptr); - if(!pStream->pSt) + pStream->m_pStream = avformat_new_stream(pFormatContext, nullptr); + if(!pStream->m_pStream) { log_error("videorecorder", "Could not allocate stream"); return false; } - pStream->pSt->id = pOC->nb_streams - 1; + pStream->m_pStream->id = pFormatContext->nb_streams - 1; AVCodecContext *pContext = avcodec_alloc_context3(*ppCodec); if(!pContext) { log_error("videorecorder", "Could not allocate encoding context"); return false; } - pStream->pEnc = pContext; + pStream->m_pCodecContext = pContext; #if defined(CONF_ARCH_IA32) || defined(CONF_ARCH_ARM) // use only 1 ffmpeg thread on 32-bit to save memory @@ -904,8 +902,8 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode pContext->channel_layout = AV_CH_LAYOUT_STEREO; #endif - pStream->pSt->time_base.num = 1; - pStream->pSt->time_base.den = pContext->sample_rate; + pStream->m_pStream->time_base.num = 1; + pStream->m_pStream->time_base.den = pContext->sample_rate; break; case AVMEDIA_TYPE_VIDEO: @@ -919,9 +917,9 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode * of which frame timestamps are represented. For fixed-fps content, * timebase should be 1/framerate and timestamp increments should be * identical to 1. */ - pStream->pSt->time_base.num = 1; - pStream->pSt->time_base.den = m_FPS; - pContext->time_base = pStream->pSt->time_base; + pStream->m_pStream->time_base.num = 1; + pStream->m_pStream->time_base.den = m_FPS; + pContext->time_base = pStream->m_pStream->time_base; pContext->gop_size = 12; /* emit one intra frame every twelve frames at most */ pContext->pix_fmt = STREAM_PIX_FMT; @@ -951,13 +949,13 @@ bool CVideo::AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCode } /* Some formats want stream headers to be separate. */ - if(pOC->oformat->flags & AVFMT_GLOBALHEADER) + if(pFormatContext->oformat->flags & AVFMT_GLOBALHEADER) pContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; return true; } -void CVideo::WriteFrame(OutputStream *pStream, size_t ThreadIndex) +void CVideo::WriteFrame(COutputStream *pStream, size_t ThreadIndex) { AVPacket *pPacket = av_packet_alloc(); if(pPacket == nullptr) @@ -969,16 +967,16 @@ void CVideo::WriteFrame(OutputStream *pStream, size_t ThreadIndex) pPacket->data = 0; pPacket->size = 0; - avcodec_send_frame(pStream->pEnc, pStream->m_vpFrames[ThreadIndex]); + avcodec_send_frame(pStream->m_pCodecContext, pStream->m_vpFrames[ThreadIndex]); int RecvResult = 0; do { - RecvResult = avcodec_receive_packet(pStream->pEnc, pPacket); + RecvResult = avcodec_receive_packet(pStream->m_pCodecContext, pPacket); if(!RecvResult) { /* rescale output packet timestamp values from codec to stream timebase */ - av_packet_rescale_ts(pPacket, pStream->pEnc->time_base, pStream->pSt->time_base); - pPacket->stream_index = pStream->pSt->index; + av_packet_rescale_ts(pPacket, pStream->m_pCodecContext->time_base, pStream->m_pStream->time_base); + pPacket->stream_index = pStream->m_pStream->index; const int WriteFrameResult = av_interleaved_write_frame(m_pFormatContext, pPacket); if(WriteFrameResult < 0) @@ -1002,7 +1000,7 @@ void CVideo::WriteFrame(OutputStream *pStream, size_t ThreadIndex) av_packet_free(&pPacket); } -void CVideo::FinishFrames(OutputStream *pStream) +void CVideo::FinishFrames(COutputStream *pStream) { AVPacket *pPacket = av_packet_alloc(); if(pPacket == nullptr) @@ -1014,16 +1012,16 @@ void CVideo::FinishFrames(OutputStream *pStream) pPacket->data = 0; pPacket->size = 0; - avcodec_send_frame(pStream->pEnc, 0); + avcodec_send_frame(pStream->m_pCodecContext, 0); int RecvResult = 0; do { - RecvResult = avcodec_receive_packet(pStream->pEnc, pPacket); + RecvResult = avcodec_receive_packet(pStream->m_pCodecContext, pPacket); if(!RecvResult) { /* rescale output packet timestamp values from codec to stream timebase */ - av_packet_rescale_ts(pPacket, pStream->pEnc->time_base, pStream->pSt->time_base); - pPacket->stream_index = pStream->pSt->index; + av_packet_rescale_ts(pPacket, pStream->m_pCodecContext->time_base, pStream->m_pStream->time_base); + pPacket->stream_index = pStream->m_pStream->index; const int WriteFrameResult = av_interleaved_write_frame(m_pFormatContext, pPacket); if(WriteFrameResult < 0) @@ -1047,9 +1045,9 @@ void CVideo::FinishFrames(OutputStream *pStream) av_packet_free(&pPacket); } -void CVideo::CloseStream(OutputStream *pStream) +void CVideo::CloseStream(COutputStream *pStream) { - avcodec_free_context(&pStream->pEnc); + avcodec_free_context(&pStream->m_pCodecContext); for(auto *pFrame : pStream->m_vpFrames) av_frame_free(&pFrame); pStream->m_vpFrames.clear(); @@ -1058,13 +1056,13 @@ void CVideo::CloseStream(OutputStream *pStream) av_frame_free(&pFrame); pStream->m_vpTmpFrames.clear(); - for(auto *pSwsContext : pStream->m_vpSwsCtxs) + for(auto *pSwsContext : pStream->m_vpSwsContexts) sws_freeContext(pSwsContext); - pStream->m_vpSwsCtxs.clear(); + pStream->m_vpSwsContexts.clear(); - for(auto *pSwrContext : pStream->m_vpSwrCtxs) + for(auto *pSwrContext : pStream->m_vpSwrContexts) swr_free(&pSwrContext); - pStream->m_vpSwrCtxs.clear(); + pStream->m_vpSwrContexts.clear(); } #endif diff --git a/src/engine/client/video.h b/src/engine/client/video.h index eb00c9f6b..c8801e963 100644 --- a/src/engine/client/video.h +++ b/src/engine/client/video.h @@ -15,7 +15,6 @@ extern "C" { #include #include #include -#define ALEN 2048 class IGraphics; class ISound; @@ -24,21 +23,21 @@ class IStorage; extern CLock g_WriteLock; // a wrapper around a single output AVStream -struct OutputStream +class COutputStream { - AVStream *pSt = nullptr; - AVCodecContext *pEnc = nullptr; +public: + AVStream *m_pStream = nullptr; + AVCodecContext *m_pCodecContext = nullptr; /* pts of the next frame that will be generated */ - int64_t NextPts = 0; int64_t m_SamplesCount = 0; int64_t m_SamplesFrameCount = 0; std::vector m_vpFrames; std::vector m_vpTmpFrames; - std::vector m_vpSwsCtxs; - std::vector m_vpSwrCtxs; + std::vector m_vpSwsContexts; + std::vector m_vpSwrContexts; }; class CVideo : public IVideo @@ -65,7 +64,7 @@ public: private: void RunVideoThread(size_t ParentThreadIndex, size_t ThreadIndex) REQUIRES(!g_WriteLock); void FillVideoFrame(size_t ThreadIndex) REQUIRES(!g_WriteLock); - void ReadRGBFromGL(size_t ThreadIndex); + void UpdateVideoBufferFromGraphics(size_t ThreadIndex); void RunAudioThread(size_t ParentThreadIndex, size_t ThreadIndex) REQUIRES(!g_WriteLock); void FillAudioFrame(size_t ThreadIndex); @@ -75,11 +74,11 @@ private: AVFrame *AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height); AVFrame *AllocAudioFrame(enum AVSampleFormat SampleFmt, uint64_t ChannelLayout, int SampleRate, int NbSamples); - void WriteFrame(OutputStream *pStream, size_t ThreadIndex) REQUIRES(g_WriteLock); - void FinishFrames(OutputStream *pStream); - void CloseStream(OutputStream *pStream); + void WriteFrame(COutputStream *pStream, size_t ThreadIndex) REQUIRES(g_WriteLock); + void FinishFrames(COutputStream *pStream); + void CloseStream(COutputStream *pStream); - bool AddStream(OutputStream *pStream, AVFormatContext *pOC, const AVCodec **ppCodec, enum AVCodecID CodecId) const; + bool AddStream(COutputStream *pStream, AVFormatContext *pFormatContext, const AVCodec **ppCodec, enum AVCodecID CodecId) const; IGraphics *m_pGraphics; IStorage *m_pStorage; @@ -88,9 +87,8 @@ private: int m_Width; int m_Height; char m_aName[256]; - uint64_t m_VSeq = 0; - uint64_t m_ASeq = 0; - uint64_t m_Vframe; + uint64_t m_VideoFrameIndex = 0; + uint64_t m_AudioFrameIndex = 0; int m_FPS; @@ -102,8 +100,9 @@ private: size_t m_AudioThreads = 2; size_t m_CurAudioThreadIndex = 0; - struct SVideoRecorderThread + class CVideoRecorderThread { + public: std::thread m_Thread; std::mutex m_Mutex; std::condition_variable m_Cond; @@ -117,10 +116,11 @@ private: uint64_t m_VideoFrameToFill = 0; }; - std::vector> m_vVideoThreads; + std::vector> m_vpVideoThreads; - struct SAudioRecorderThread + class CAudioRecorderThread { + public: std::thread m_Thread; std::mutex m_Mutex; std::condition_variable m_Cond; @@ -135,22 +135,28 @@ private: int64_t m_SampleCountStart = 0; }; - std::vector> m_vAudioThreads; + std::vector> m_vpAudioThreads; std::atomic m_ProcessingVideoFrame; std::atomic m_ProcessingAudioFrame; bool m_HasAudio; - struct SVideoSoundBuffer + class CVideoBuffer { - int16_t m_aBuffer[ALEN * 2]; + public: + std::vector m_vBuffer; }; - std::vector m_vBuffer; - std::vector> m_vPixelHelper; + std::vector m_vVideoBuffers; + class CAudioBuffer + { + public: + int16_t m_aBuffer[4096]; + }; + std::vector m_vAudioBuffers; - OutputStream m_VideoStream; - OutputStream m_AudioStream; + COutputStream m_VideoStream; + COutputStream m_AudioStream; const AVCodec *m_pVideoCodec; const AVCodec *m_pAudioCodec; From 3a0e2429d131bbff2c543304638a0b0a60cf2006 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Sat, 4 May 2024 12:35:49 +0200 Subject: [PATCH 09/10] Assert that size of image data matches size of video Crash with assertion when the size of the graphics is different from the video currently being rendered, instead of causing weirder bugs and a corrupted video file. --- src/engine/client/video.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index ffff1687b..697963866 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -614,6 +614,8 @@ void CVideo::UpdateVideoBufferFromGraphics(size_t ThreadIndex) uint32_t Height; CImageInfo::EImageFormat Format; m_pGraphics->GetReadPresentedImageDataFuncUnsafe()(Width, Height, Format, m_vVideoBuffers[ThreadIndex].m_vBuffer); + dbg_assert((int)Width == m_Width && (int)Height == m_Height, "Size mismatch between video and graphics"); + dbg_assert(Format == CImageInfo::FORMAT_RGBA, "Unexpected image format"); } AVFrame *CVideo::AllocPicture(enum AVPixelFormat PixFmt, int Width, int Height) From e5927d90244d26bd6d12ed477439361cc96e1681 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Robert=20M=C3=BCller?= Date: Fri, 3 May 2024 22:03:44 +0200 Subject: [PATCH 10/10] Fix crashes when video recording is not started successfully Add additional checks to ensure that the `CVideo::Stop` function and the functions called by it will correctly stop the current video also if the video was not started successfully, i.e. if `CVideo::Start` returned `false` from any of the error branches. In the `CVideo::Stop` function, iterate over the vectors of video and audio threads directly instead of using `m_VideoThreads` and `m_AudioThreads`, which do not reflect the actual count if the initialization failed before the threads were created. In the `CVideo::Stop` function, only call `av_write_trailer` if the video recording was stated successfully, i.e. only if `avformat_write_header` was called successfully, as this will otherwise cause the client to crash. Closes #6375. In the `CVideo::Stop` function, only call `avio_closep` if the format context was allocated. In the `CVideo::FinishFrames` function, ensure that the codec has been allocated and opened, otherwise using it is not allowed. Add assertions to the `CVideo::Start` and `Stop` functions to ensure that the same video is not started/stopped multiple times. --- src/engine/client/video.cpp | 43 +++++++++++++++++++++++++------------ src/engine/client/video.h | 1 + 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/src/engine/client/video.cpp b/src/engine/client/video.cpp index 697963866..5ec31936e 100644 --- a/src/engine/client/video.cpp +++ b/src/engine/client/video.cpp @@ -96,6 +96,7 @@ CVideo::CVideo(IGraphics *pGraphics, ISound *pSound, IStorage *pStorage, int Wid m_Recording = false; m_Started = false; + m_Stopped = false; m_ProcessingVideoFrame = 0; m_ProcessingAudioFrame = 0; @@ -114,6 +115,8 @@ CVideo::~CVideo() bool CVideo::Start() { + dbg_assert(!m_Started, "Already started"); + // wait for the graphic thread to idle m_pGraphics->WaitForIdle(); @@ -266,6 +269,7 @@ bool CVideo::Start() m_Recording = true; m_Started = true; + m_Stopped = false; ms_Time = time_get(); return true; } @@ -278,29 +282,31 @@ void CVideo::Pause(bool Pause) void CVideo::Stop() { + dbg_assert(!m_Stopped, "Already stopped"); + m_pGraphics->WaitForIdle(); - for(size_t i = 0; i < m_VideoThreads; ++i) + for(auto &pVideoThread : m_vpVideoThreads) { { - std::unique_lock Lock(m_vpVideoThreads[i]->m_Mutex); - m_vpVideoThreads[i]->m_Finished = true; - m_vpVideoThreads[i]->m_Cond.notify_all(); + std::unique_lock Lock(pVideoThread->m_Mutex); + pVideoThread->m_Finished = true; + pVideoThread->m_Cond.notify_all(); } - m_vpVideoThreads[i]->m_Thread.join(); + pVideoThread->m_Thread.join(); } m_vpVideoThreads.clear(); - for(size_t i = 0; i < m_AudioThreads; ++i) + for(auto &pAudioThread : m_vpAudioThreads) { { - std::unique_lock Lock(m_vpAudioThreads[i]->m_Mutex); - m_vpAudioThreads[i]->m_Finished = true; - m_vpAudioThreads[i]->m_Cond.notify_all(); + std::unique_lock Lock(pAudioThread->m_Mutex); + pAudioThread->m_Finished = true; + pAudioThread->m_Cond.notify_all(); } - m_vpAudioThreads[i]->m_Thread.join(); + pAudioThread->m_Thread.join(); } m_vpAudioThreads.clear(); @@ -314,24 +320,29 @@ void CVideo::Stop() if(m_HasAudio) FinishFrames(&m_AudioStream); - av_write_trailer(m_pFormatContext); + if(m_pFormatContext && m_Started) + av_write_trailer(m_pFormatContext); CloseStream(&m_VideoStream); if(m_HasAudio) CloseStream(&m_AudioStream); - if(!(m_pFormat->flags & AVFMT_NOFILE)) - avio_closep(&m_pFormatContext->pb); - if(m_pFormatContext) + { + if(!(m_pFormat->flags & AVFMT_NOFILE)) + avio_closep(&m_pFormatContext->pb); + avformat_free_context(m_pFormatContext); + } ISound *volatile pSound = m_pSound; pSound->PauseAudioDevice(); delete ms_pCurrentVideo; pSound->UnpauseAudioDevice(); + + m_Stopped = true; } void CVideo::NextVideoFrameThread() @@ -1004,6 +1015,9 @@ void CVideo::WriteFrame(COutputStream *pStream, size_t ThreadIndex) void CVideo::FinishFrames(COutputStream *pStream) { + if(!pStream->m_pCodecContext || !avcodec_is_open(pStream->m_pCodecContext)) + return; + AVPacket *pPacket = av_packet_alloc(); if(pPacket == nullptr) { @@ -1050,6 +1064,7 @@ void CVideo::FinishFrames(COutputStream *pStream) void CVideo::CloseStream(COutputStream *pStream) { avcodec_free_context(&pStream->m_pCodecContext); + for(auto *pFrame : pStream->m_vpFrames) av_frame_free(&pFrame); pStream->m_vpFrames.clear(); diff --git a/src/engine/client/video.h b/src/engine/client/video.h index c8801e963..63ddc27e6 100644 --- a/src/engine/client/video.h +++ b/src/engine/client/video.h @@ -93,6 +93,7 @@ private: int m_FPS; bool m_Started; + bool m_Stopped; bool m_Recording; size_t m_VideoThreads = 2;