1055: Keep track of lost frames and update time r=heinrich5991 a=Jupeyy

The problem a friend was occuring was, that on a fresh Windows install settings like cl_refresh_rate are set to 480.
Since the render calls take longer time than no render calls, it happened that client was sleeping on one update cycle and then took to long in another, where it called the render functions, leeding to less FPS than the cl_refresh_rate. this caused mouse lags and rarely frame drops.

another thing is, that select on Windows is non microsecond accurate, or just often returns too early, which caused that the refresh rate is much too high, or even ignored, (probably windows can only sleep on milliseconds, or again, is extremly inaccurate).

Another things on windows is that time_freq might be a "very odd" number ^^
i mean like on unix it's one microsecond (1,000,000 = 1s), but on windows the time_freq depends on the CPU leading to inaccurate calculation of the sleep time and render time.
This wasn't insanly huge issue, but probably skipped a few microseconds here and there.

This fix should stabilize the frames. I'd like to hear your opinion on this tho.
This commit is contained in:
bors[bot] 2018-03-13 19:41:38 +00:00
commit cc6449bcd6
6 changed files with 103 additions and 30 deletions

View file

@ -944,14 +944,9 @@ void set_new_tick()
}
/* ----- time ----- */
int64 time_get()
int64 time_get_impl()
{
static int64 last = 0;
if(new_tick == 0)
return last;
if(new_tick != -1)
new_tick = 0;
{
#if defined(CONF_PLATFORM_MACOSX)
static int got_timebase = 0;
@ -986,6 +981,18 @@ int64 time_get()
}
}
int64 time_get()
{
static int64 last = 0;
if(new_tick == 0)
return last;
if(new_tick != -1)
new_tick = 0;
last = time_get_impl();
return last;
}
int64 time_freq()
{
#if defined(CONF_PLATFORM_MACOSX)
@ -1001,6 +1008,15 @@ int64 time_freq()
#endif
}
int64 time_get_microseconds()
{
#if defined(CONF_FAMILY_WINDOWS)
return (time_get_impl() * (int64)1000000) / time_freq();
#else
return time_get_impl() / (time_freq() / 1000 / 1000);
#endif
}
/* ----- network ----- */
static void netaddr_to_sockaddr_in(const NETADDR *src, struct sockaddr_in *dest)
{

View file

@ -591,6 +591,18 @@ typedef unsigned long long uint64;
void set_new_tick();
/*
Function: time_get_impl
Fetches a sample from a high resolution timer.
Returns:
Current value of the timer.
Remarks:
To know how fast the timer is ticking, see <time_freq>.
*/
int64 time_get_impl();
/*
Function: time_get
Fetches a sample from a high resolution timer.
@ -600,6 +612,7 @@ void set_new_tick();
Remarks:
To know how fast the timer is ticking, see <time_freq>.
Uses <time_get_impl> to fetch the sample.
*/
int64 time_get();
@ -621,6 +634,15 @@ int64 time_freq();
*/
int time_timestamp();
/*
Function: time_get_microseconds
Fetches a sample from a high resolution timer and converts it in microseconds.
Returns:
Current value of the timer in microseconds.
*/
int64 time_get_microseconds();
/* Group: Network General */
typedef struct
{

View file

@ -901,8 +901,8 @@ void CCommandProcessorFragment_OpenGL3_3::Cmd_Texture_Create(const CCommandBuffe
switch(StoreOglformat)
{
case GL_RGB: StoreOglformat = GL_COMPRESSED_RGB; break;
// This needs further checks. it seems on some gpus COMPRESSED_ALPHA isn't in the core profile
case GL_RED: StoreOglformat = GL_COMPRESSED_RGBA; break;
// COMPRESSED_ALPHA is deprecated, so use different single channel format.
case GL_RED: StoreOglformat = GL_COMPRESSED_RED; break;
case GL_RGBA: StoreOglformat = GL_COMPRESSED_RGBA; break;
default: StoreOglformat = GL_COMPRESSED_RGBA;
}
@ -927,7 +927,7 @@ void CCommandProcessorFragment_OpenGL3_3::Cmd_Texture_Create(const CCommandBuffe
//Bind the texture 2D.
GLint swizzleMask[] = {GL_ONE, GL_ONE, GL_ONE, GL_RED};
glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_RGBA, swizzleMask);
StoreOglformat = GL_RGBA;
StoreOglformat = GL_R8;
}
if(pCommand->m_Flags&CCommandBuffer::TEXFLAG_NOMIPMAPS)

View file

@ -2700,7 +2700,8 @@ void CClient::Run()
bool LastE = false;
bool LastG = false;
int64 LastTime = time_get();
int64 LastTime = time_get_microseconds();
int64 LastRenderTime = time_get();
while(1)
{
@ -2789,7 +2790,7 @@ void CClient::Run()
if((g_Config.m_GfxBackgroundRender || m_pGraphics->WindowOpen())
&& (!g_Config.m_GfxAsyncRenderOld || m_pGraphics->IsIdle())
&& (!g_Config.m_GfxRefreshRate || Now >= m_LastRenderTime + time_freq() / g_Config.m_GfxRefreshRate))
&& (!g_Config.m_GfxRefreshRate || (time_freq() / (int64)g_Config.m_GfxRefreshRate) <= Now - LastRenderTime))
{
m_RenderFrames++;
@ -2801,6 +2802,12 @@ void CClient::Run()
m_RenderFrameTimeHigh = m_RenderFrameTime;
m_FpsGraph.Add(1.0f/m_RenderFrameTime, 1,1,1);
// keep the overflow time - it's used to make sure the gfx refreshrate is reached
int64 AdditionalTime = g_Config.m_GfxRefreshRate ? ((Now - LastRenderTime) - (time_freq() / (int64)g_Config.m_GfxRefreshRate)) : 0;
// if the value is over a second time loose, reset the additional time (drop the frames, that are lost already)
if(AdditionalTime > time_freq())
AdditionalTime = time_freq();
LastRenderTime = Now - AdditionalTime;
m_LastRenderTime = Now;
#ifdef CONF_DEBUG
@ -2830,8 +2837,10 @@ void CClient::Run()
}
m_pGraphics->Swap();
}
Input()->NextFrame();
}
if(Input()->VideoRestartNeeded())
{
m_pGraphics->Init();
@ -2852,19 +2861,43 @@ void CClient::Run()
#endif
// beNice
int64 Now = time_get();
int64 Now = time_get_microseconds();
int64 SleepTimeInMicroSeconds = 0;
bool Slept = false;
if(
#ifdef CONF_DEBUG
g_Config.m_DbgStress ||
#endif
(g_Config.m_ClRefreshRateInactive && !m_pGraphics->WindowActive()))
{
thread_sleep(max(1000 * (LastTime + time_freq() / g_Config.m_ClRefreshRateInactive - Now) / time_freq(), (int64)0));
SleepTimeInMicroSeconds = ((int64)1000000 / (int64)g_Config.m_ClRefreshRateInactive) - (Now - LastTime);
if(SleepTimeInMicroSeconds / (int64)1000 > (int64)0)
thread_sleep(SleepTimeInMicroSeconds / (int64)1000);
Slept = true;
}
else if(g_Config.m_ClRefreshRate)
{
net_socket_read_wait(m_NetClient[0].m_Socket, max(1000000 * (LastTime + time_freq() / g_Config.m_ClRefreshRate - Now) / time_freq(), (int64)0));
SleepTimeInMicroSeconds = ((int64)1000000 / (int64)g_Config.m_ClRefreshRate) - (Now - LastTime);
if(SleepTimeInMicroSeconds > (int64)0)
net_socket_read_wait(m_NetClient[0].m_Socket, SleepTimeInMicroSeconds);
Slept = true;
}
if(Slept)
{
// if the diff gets too small it shouldn't get even smaller (drop the updates, that could not be handled)
if(SleepTimeInMicroSeconds < (int64)-1000000)
SleepTimeInMicroSeconds = (int64)-1000000;
// don't go higher than the game ticks speed, because the network is waking up the client with the server's snapshots anyway
else if(SleepTimeInMicroSeconds > (int64)1000000 / m_GameTickSpeed)
SleepTimeInMicroSeconds = (int64)1000000 / m_GameTickSpeed;
// the time diff between the time that was used actually used and the time the thread should sleep/wait
// will be calculated in the sleep time of the next update tick by faking the time it should have slept/wait.
// so two cases (and the case it slept exactly the time it should):
// - the thread slept/waited too long, then it adjust the time to sleep/wait less in the next update tick
// - the thread slept/waited too less, then it adjust the time to sleep/wait more in the next update tick
LastTime = Now + SleepTimeInMicroSeconds;
}
else
LastTime = Now;
if(g_Config.m_DbgHitch)

View file

@ -112,7 +112,7 @@ MACRO_CONFIG_INT(GfxHighDetail, gfx_high_detail, 1, 0, 1, CFGFLAG_SAVE|CFGFLAG_C
MACRO_CONFIG_INT(GfxTextureQuality, gfx_texture_quality, 1, 0, 1, CFGFLAG_SAVE|CFGFLAG_CLIENT, "")
#endif
MACRO_CONFIG_INT(GfxFsaaSamples, gfx_fsaa_samples, 0, 0, 16, CFGFLAG_SAVE|CFGFLAG_CLIENT, "FSAA Samples")
MACRO_CONFIG_INT(GfxRefreshRate, gfx_refresh_rate, 0, 0, 1000, CFGFLAG_SAVE|CFGFLAG_CLIENT, "Screen refresh rate")
MACRO_CONFIG_INT(GfxRefreshRate, gfx_refresh_rate, 0, 0, 10000, CFGFLAG_SAVE|CFGFLAG_CLIENT, "Screen refresh rate")
MACRO_CONFIG_INT(GfxFinish, gfx_finish, 0, 0, 1, CFGFLAG_SAVE|CFGFLAG_CLIENT, "")
MACRO_CONFIG_INT(GfxBackgroundRender, gfx_backgroundrender, 1, 0, 1, CFGFLAG_SAVE|CFGFLAG_CLIENT, "Render graphics when window is in background")
MACRO_CONFIG_INT(GfxTextOverlay, gfx_text_overlay, 10, 1, 100, CFGFLAG_SAVE|CFGFLAG_CLIENT, "Stop rendering textoverlay in editor or with entities: high value = less details = more speed")

View file

@ -1077,7 +1077,9 @@ void CMenus::RenderSettingsGraphics(CUIRect MainView)
str_format(aBuf, sizeof(aBuf), "%s: %s", Localize("Refresh Rate"), "");
UI()->DoLabelScaled(&Label, aBuf, 14.0f, -1);
Button.HMargin(2.0f, &Button);
g_Config.m_GfxRefreshRate = static_cast<int>(DoScrollbarH(&g_Config.m_GfxRefreshRate, &Button, g_Config.m_GfxRefreshRate/1000.0f)*1000.0f+0.1f);
int NewRefreshRate = static_cast<int>(DoScrollbarH(&g_Config.m_GfxRefreshRate, &Button, (min(g_Config.m_GfxRefreshRate, 1000))/1000.0f)*1000.0f+0.1f);
if(g_Config.m_GfxRefreshRate <= 1000 || NewRefreshRate < 1000)
g_Config.m_GfxRefreshRate = NewRefreshRate;
CUIRect Text;
MainView.HSplitTop(20.0f, 0, &MainView);