base refactor progress

This commit is contained in:
jacob 2025-07-29 21:24:34 -05:00
parent 580447d3d5
commit acc006e316
24 changed files with 268 additions and 250 deletions

View File

@ -22,7 +22,7 @@ Arena *AllocArena(u64 reserve)
(*(volatile int *)0) = 0;
}
u64 reserved = reserve;
gstat_add(GSTAT_MEMORY_RESERVED, reserve);
AddGstat(GSTAT_MEMORY_RESERVED, reserve);
/* Commit initial block */
base = memory_commit(base, ArenaBlockSize);
@ -40,8 +40,8 @@ Arena *AllocArena(u64 reserve)
__profalloc(base, ArenaBlockSize);
AsanPoison(base + sizeof(Arena), ArenaBlockSize - sizeof(Arena));
gstat_add(GSTAT_MEMORY_COMMITTED, ArenaBlockSize);
gstat_add(GSTAT_NUM_ARENAS, 1);
AddGstat(GSTAT_MEMORY_COMMITTED, ArenaBlockSize);
AddGstat(GSTAT_NUM_ARENAS, 1);
/* Create & return arena header at beginning of block */
Arena *arena = (Arena *)base;
@ -56,9 +56,9 @@ void ReleaseArena(Arena *arena)
AsanUnpoison(arena, arena->committed + ArenaHeaderSize);
__prof;
__proffree(arena);
gstat_add(GSTAT_MEMORY_COMMITTED, -(i64)(arena->committed - ArenaHeaderSize));
gstat_add(GSTAT_MEMORY_RESERVED, -(i64)(arena->reserved));
gstat_add(GSTAT_NUM_ARENAS, -1);
AddGstat(GSTAT_MEMORY_COMMITTED, -(i64)(arena->committed - ArenaHeaderSize));
AddGstat(GSTAT_MEMORY_RESERVED, -(i64)(arena->reserved));
AddGstat(GSTAT_NUM_ARENAS, -1);
memory_release(arena);
}
@ -101,7 +101,7 @@ void *PushBytesNoZero(Arena *arena, u64 size, u64 align)
(*(volatile int *)0) = 0;
}
arena->committed += commit_bytes;
gstat_add(GSTAT_MEMORY_COMMITTED, commit_bytes);
AddGstat(GSTAT_MEMORY_COMMITTED, commit_bytes);
__proffree(arena);
__profalloc(arena, arena->committed + ArenaHeaderSize);
AsanPoison(commit_address, commit_bytes);

View File

@ -38,7 +38,7 @@ Struct(ScratchCtx)
Struct(SharedScratchCtx)
{
ScratchCtx scratch_contexts[MAX_FIBERS];
ScratchCtx scratch_contexts[MaxFibers];
};
extern SharedScratchCtx shared_scratch_ctx;

View File

@ -55,33 +55,33 @@ AlignedStruct(Atomic64Padded, 64)
StaticAssert(sizeof(Atomic64Padded) == 64 && alignof(Atomic64Padded) == 64);
////////////////////////////////
//~ Atomic impl
//~ Atomic operations
#if PlatformIsWindows
ForceInline i8 atomic8_fetch(Atomic8 *x) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, 0, 0); }
ForceInline i8 atomic8_fetch_set(Atomic8 *x, i8 e) { return (i8)_InterlockedExchange8((char *)&x->_v, e); }
ForceInline i8 atomic8_fetch_test_set(Atomic8 *x, i8 c, i8 e) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, e, c); }
ForceInline i8 atomic8_fetch_xor(Atomic8 *x, i8 c) { return (i8)_InterlockedXor8((char *)&x->_v, c); }
ForceInline i8 atomic8_fetch_add(Atomic8 *x, i8 a) { return (i8)_InterlockedExchangeAdd8((char *)&x->_v, a); }
ForceInline i8 Atomic8Fetch(Atomic8 *x) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, 0, 0); }
ForceInline i8 Atomic8FetchSet(Atomic8 *x, i8 e) { return (i8)_InterlockedExchange8((char *)&x->_v, e); }
ForceInline i8 Atomic8FetchTestSet(Atomic8 *x, i8 c, i8 e) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, e, c); }
ForceInline i8 Atomic8FetchXor(Atomic8 *x, i8 c) { return (i8)_InterlockedXor8((char *)&x->_v, c); }
ForceInline i8 Atomic8FetchAdd(Atomic8 *x, i8 a) { return (i8)_InterlockedExchangeAdd8((char *)&x->_v, a); }
ForceInline i16 atomic16_fetch(Atomic16 *x) { return (i16)_InterlockedCompareExchange16(&x->_v, 0, 0); }
ForceInline i16 atomic16_fetch_set(Atomic16 *x, i16 e) { return (i16)_InterlockedExchange16(&x->_v, e); }
ForceInline i16 atomic16_fetch_test_set(Atomic16 *x, i16 c, i16 e) { return (i16)_InterlockedCompareExchange16(&x->_v, e, c); }
ForceInline i16 atomic16_fetch_xor(Atomic16 *x, i16 c) { return (i16)_InterlockedXor16(&x->_v, c); }
ForceInline i16 atomic16_fetch_add(Atomic16 *x, i16 a) { return (i16)_InterlockedExchangeAdd16(&x->_v, a); }
ForceInline i16 Atomic16Fetch(Atomic16 *x) { return (i16)_InterlockedCompareExchange16(&x->_v, 0, 0); }
ForceInline i16 Atomic16FetchSet(Atomic16 *x, i16 e) { return (i16)_InterlockedExchange16(&x->_v, e); }
ForceInline i16 Atomic16FetchTestSet(Atomic16 *x, i16 c, i16 e) { return (i16)_InterlockedCompareExchange16(&x->_v, e, c); }
ForceInline i16 Atomic16FetchTestXor(Atomic16 *x, i16 c) { return (i16)_InterlockedXor16(&x->_v, c); }
ForceInline i16 Atomic16FetchTestAdd(Atomic16 *x, i16 a) { return (i16)_InterlockedExchangeAdd16(&x->_v, a); }
ForceInline i32 atomic32_fetch(Atomic32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
ForceInline i32 atomic32_fetch_set(Atomic32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); }
ForceInline i32 atomic32_fetch_test_set(Atomic32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
ForceInline i32 atomic32_fetch_xor(Atomic32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v, c); }
ForceInline i32 atomic32_fetch_add(Atomic32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); }
ForceInline i32 Atomic32Fetch(Atomic32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
ForceInline i32 Atomic32FetchSet(Atomic32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); }
ForceInline i32 Atomic32FetchTestSet(Atomic32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
ForceInline i32 Atomic32FetchXor(Atomic32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v, c); }
ForceInline i32 Atomic32FetchAdd(Atomic32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); }
ForceInline i64 atomic64_fetch(Atomic64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); }
ForceInline i64 atomic64_fetch_set(Atomic64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); }
ForceInline i64 atomic64_fetch_test_set(Atomic64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); }
ForceInline i64 atomic64_fetch_xor(Atomic64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); }
ForceInline i64 atomic64_fetch_add(Atomic64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); }
ForceInline i64 Atomic64Fetch(Atomic64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); }
ForceInline i64 Atomic64FetchSet(Atomic64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); }
ForceInline i64 Atomic64FetchTestSet(Atomic64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); }
ForceInline i64 Atomic64FetchXor(Atomic64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); }
ForceInline i64 Atomic64FetchAdd(Atomic64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); }
#else
# error Atomics not implemented

View File

@ -1,6 +1,7 @@
////////////////////////////////
//~ Buddy types
//- Block
Struct(BuddyBlock)
{
b32 is_used;
@ -15,6 +16,7 @@ Struct(BuddyBlock)
u8 *memory;
};
//- Level
Struct(BuddyLevel)
{
struct BuddyCtx *ctx;
@ -24,6 +26,7 @@ Struct(BuddyLevel)
BuddyBlock *first_unused_block;
};
//- Ctx
Struct(BuddyCtx)
{
Arena *meta_arena;

View File

@ -139,6 +139,12 @@ void __asan_unpoison_memory_region(void const volatile *add, size_t);
# define AsanUnpoison(addr, size)
#endif
//- Allow RTC in STL
/* Silence Msvc Warning */
#if RtcIsEnabled && CompilerIsMsvc
# define _ALLOW_RTCc_IN_STL 1
#endif
////////////////////////////////
//~ Common utility macros

View File

@ -1,4 +1,4 @@
#define MAX_FIBERS 4096
#define MaxFibers 4096
StaticAssert(MAX_FIBERS < I16Max); /* Fiber id should fit max fibers */
StaticAssert(MaxFibers < I16Max); /* Fiber id should fit max fibers */
i16 FiberId(void);

View File

@ -1,3 +1,3 @@
#if GSTAT_ENABLED
struct _gstats _g_gstats = ZI;
#if GstatIsEnabled
SharedGstatCtx _shared_gstat_ctx = ZI;
#endif

View File

@ -1,8 +1,9 @@
/* Program-wide statistics */
/* Application-wide statistics */
#if GSTAT_ENABLED
#if GstatIsEnabled
struct _gstats {
Struct(SharedGstatCtx)
{
Atomic64Padded GSTAT_SOCK_BYTES_SENT;
Atomic64Padded GSTAT_SOCK_BYTES_RECEIVED;
Atomic64Padded GSTAT_MEMORY_COMMITTED;
@ -11,17 +12,16 @@ struct _gstats {
Atomic64Padded GSTAT_DEBUG_STEPS;
};
extern struct _gstats _g_gstats;
extern SharedGstatCtx _shared_gstat_ctx;
#define gstat_set(name, value) atomic64_fetch_set(&_g_gstats.name.v, (value))
#define gstat_add(name, value) atomic64_fetch_add(&_g_gstats.name.v, (value))
#define gstat_get(name) atomic64_fetch(&_g_gstats.name.v)
#define SetGstat(name, value) Atomic64FetchSet(&_shared_gstat_ctx.name.v, (value))
#define AddGstat(name, value) Atomic64FetchAdd(&_shared_gstat_ctx.name.v, (value))
#define GetGstat(name) Atomic64Fetch(&_shared_gstat_ctx.name.v)
#else
#define gstat_set(name, value)
#define gstat_add(name, value)
#define gstat_sub(name, value)
#define gstat_get(name) 0
#define SetGstat(name, value)
#define AddGstat(name, value)
#define GetGstat(name) 0
#endif

View File

@ -1,34 +1,25 @@
#if CompilerIsMsvc
/* ========================== *
* Msvc RC file lookup
* ========================== */
#define WIN32_LEAN_AND_MEAN
#define UNICODE
#include <Windows.h>
struct rc_search_params {
/* In */
String name_lower;
/* Out */
b32 found;
String data;
};
/* Find first resource with `type` and return the data in `udata`. */
internal BOOL CALLBACK enum_func(HMODULE module, LPCWSTR type, LPCWSTR wstr_entry_name, LONG_PTR udata)
BOOL CALLBACK IncbinEnumerateResourceNamesFunc(HMODULE module, LPCWSTR type, LPCWSTR wstr_entry_name, LONG_PTR udata)
{
TempArena scratch = BeginScratchNoConflict();
struct rc_search_params *params = (struct rc_search_params *)udata;
IncbinRcSearchParams *params = (IncbinRcSearchParams *)udata;
String entry_name_lower = string_lower(scratch.arena, string_from_wstr_no_limit(scratch.arena, (LPWSTR)wstr_entry_name));
params->found = 0;
params->data = STRING(0, 0);
if (string_eq(entry_name_lower, params->name_lower)) {
if (string_eq(entry_name_lower, params->name_lower))
{
HRSRC hres = FindResourceW(module, wstr_entry_name, type);
if (hres) {
if (hres)
{
HGLOBAL hg = LoadResource(module, hres);
if (hg) {
if (hg)
{
params->found = 1;
params->data.len = SizeofResource(module, hres);
params->data.text = LockResource(hg);
@ -39,20 +30,24 @@ internal BOOL CALLBACK enum_func(HMODULE module, LPCWSTR type, LPCWSTR wstr_entr
return !params->found;
}
String _incbin_get(_IncbinRcResource *inc)
String StringFromIncbinRcResource(IncbinRcResource *inc)
{
enum _incbin_state state = atomic32_fetch(&inc->state);
if (state != INCBIN_STATE_SEARCHED) {
IncbinStatus state = Atomic32Fetch(&inc->state);
if (state != IncbinStatus_Searched)
{
TempArena scratch = BeginScratchNoConflict();
if (state == INCBIN_STATE_UNSEARCHED) {
enum _incbin_state v = atomic32_fetch_test_set(&inc->state, state, INCBIN_STATE_SEARCHING);
if (v == state) {
if (state == IncbinStatus_Unsearched)
{
IncbinStatus v = Atomic32FetchTestSet(&inc->state, state, IncbinStatus_Searching);
if (v == state)
{
/* Search RC file for the resource name */
String name_lower = string_lower(scratch.arena, inc->rc_name);
struct rc_search_params params = { .name_lower = name_lower };
EnumResourceNamesW(0, RT_RCDATA, &enum_func, (LONG_PTR)&params);
if (!params.found) {
IncbinRcSearchParams params = { .name_lower = name_lower };
EnumResourceNamesW(0, RT_RCDATA, &IncbinEnumerateResourceNamesFunc, (LONG_PTR)&params);
if (!params.found)
{
/* FIXME: enable this */
//P_Panic(string_format(scratch.arena,
// LIT("INCBIN include not found in RC file: \"%F\""),
@ -60,17 +55,20 @@ String _incbin_get(_IncbinRcResource *inc)
(*(volatile int *)0) = 0;
}
inc->data = params.data;
state = INCBIN_STATE_SEARCHED;
atomic32_fetch_set(&inc->state, state);
} else {
state = IncbinStatus_Searched;
Atomic32FetchSet(&inc->state, state);
}
else
{
state = v;
}
}
/* Spin while another thread searches */
while (state != INCBIN_STATE_SEARCHED) {
while (state != IncbinStatus_Searched)
{
ix_pause();
state = atomic32_fetch(&inc->state);
state = Atomic32Fetch(&inc->state);
}
EndScratch(scratch);
@ -78,4 +76,4 @@ String _incbin_get(_IncbinRcResource *inc)
return inc->data;
}
#endif
#endif /* CompilerIsMsvc */

View File

@ -1,65 +1,78 @@
#if CompilerIsMsvc
/* ========================== *
* Msvc RC file incbin
*
* NOTE: Msvc doesn't have an Inline assembler that can include binary data.
* So instead these macros will trigger a lookup into the embedded RC file for
* entries matched by name (requires the build system to generate and link RC
* file).
* ========================== */
////////////////////////////////
//~ Msvc incbin types
#define INCBIN_INCLUDE(var, _rc_name) static _IncbinRcResource _incbin_ ## var = { .rc_name = LIT_NOCAST((_rc_name)) }
#define INCBIN_GET(var) _incbin_get(&_incbin_ ## var)
enum _incbin_state {
INCBIN_STATE_UNSEARCHED,
INCBIN_STATE_SEARCHING,
INCBIN_STATE_SEARCHED
Struct(IncbinRcSearchParams)
{
/* In */
String name_lower;
/* Out */
b32 found;
String data;
};
Struct(_IncbinRcResource) {
typedef i32 IncbinStatus; enum
{
IncbinStatus_Unsearched,
IncbinStatus_Searching,
IncbinStatus_Searched
};
Struct(IncbinRcResource)
{
Atomic32 state;
String rc_name;
String data;
};
String _incbin_get(struct _IncbinRcResource *inc);
////////////////////////////////
//~ Msvc incbin operations
#else
b32 IncbinEnumerateResourceNamesFunc(void *module, const wchar_t *type, const wchar_t *wstr_entry_name, i64 udata);
String StringFromIncbinRcResource(IncbinRcResource *inc);
/* ========================== *
* Clang incbin
* ========================== */
/* NOTE: Msvc doesn't have an Inline assembler that can include binary data.
* So instead these macros will trigger a lookup into the embedded RC file for
* entries matched by name (this requires the build system to generate and link
* RC file).
*/
#define INCBINSTR2(x) #x
#define INCBINSTR(x) INCBINSTR2(x)
#define IncbinInclude(var, _rc_name) static IncbinRcResource _incbin_ ## var = { .rc_name = LIT_NOCAST((_rc_name)) }
#define IncbinGet(var) StringFromIncbinRcResource(&_incbin_ ## var)
String StringFromIncbinRcResource(struct IncbinRcResource *inc);
#else /* CompilerIsMsvc */
////////////////////////////////
//~ Clang incbin operations
#if PlatformIsWindows
# define INCBIN_SECTION ".rdata, \"dr\""
# define IncbinSection ".rdata, \"dr\""
#elif PlatformIsMac
# define INCBIN_SECTION "__TEXT,__const"
# define IncbinSection "__TEXT,__const"
#else
# define INCBIN_SECTION ".rodata"
# define IncbinSection ".rodata"
#endif
/* Includes raw binary data into the executable. */
/* https://gist.github.com/mmozeiko/ed9655cf50341553d282 */
#define INCBIN_INCLUDE(var, filename) \
__asm__(".section " INCBIN_SECTION "\n" \
".global _incbin_" INCBINSTR(var) "_start\n" \
/* Includes raw binary data into the executable. */
/* https://gist.github.com/mmozeiko/ed9655cf50341553d282 */
#define IncbinInclude(var, filename) \
__asm__(".section " IncbinSection "\n" \
".global _incbin_" Stringize(var) "_start\n" \
".balign 16\n" \
"_incbin_" INCBINSTR(var) "_start:\n" \
"_incbin_" Stringize(var) "_start:\n" \
".incbin \"" filename "\"\n" \
\
".global _incbin_" INCBINSTR(var) "_end\n" \
".global _incbin_" Stringize(var) "_end\n" \
".balign 1\n" \
"_incbin_" INCBINSTR(var) "_end:\n" \
"_incbin_" Stringize(var) "_end:\n" \
); \
extern __attribute((aligned(16))) const char _incbin_ ## var ## _start[]; \
extern const char _incbin_ ## var ## _end[]
/* Retrieve a string from included data using the variable supplied to INCBIN_INCLUDE */
#define INCBIN_GET(var) STRING_FROM_POINTERS(_incbin_ ## var ## _start, _incbin_ ## var ## _end)
/* Retrieve a string from included data using the variable supplied to IncbinInclude */
#define IncbinGet(var) STRING_FROM_POINTERS(_incbin_ ## var ## _start, _incbin_ ## var ## _end)
#endif
#endif /* CompilerIsClang */

View File

@ -17,9 +17,9 @@ internal void _dbgbreakable(void)
#define DBGSTEP \
dbg_step++; \
if (dbg_step >= gstat_get(GSTAT_DEBUG_STEPS)) { \
if (dbg_step >= GetGstat(GSTAT_DEBUG_STEPS)) { \
goto abort; \
} else if (dbg_step >= gstat_get(GSTAT_DEBUG_STEPS) - 1) { \
} else if (dbg_step >= GetGstat(GSTAT_DEBUG_STEPS) - 1) { \
_dbgbreakable(); \
} (void)0
#else

View File

@ -80,7 +80,7 @@
#define BITBUFF_TEST RtcIsEnabled
/* If enabled, things like network writes & memory allocations will be tracked in a global statistics struct */
#define GSTAT_ENABLED 1
#define GstatIsEnabled 1
#define PROF_THREAD_GROUP_FIBERS -(i64)Gibi(1)
#define PROF_THREAD_GROUP_SCHEDULER -(i64)Mebi(3)

View File

@ -16,8 +16,6 @@ DXC_Result dxc_compile(Arena *arena, String shader_source, i32 num_args, String
# pragma clang diagnostic ignored "-Wlanguage-extension-token"
#endif
#define _ALLOW_RtcIsEnabledc_IN_STL
#pragma warning(push, 0)
# define WIN32_LEAN_AND_MEAN
# define UNICODE

View File

@ -382,7 +382,7 @@ Global struct {
void gp_startup(void)
{
__prof;
if (atomic32_fetch_test_set(&G.initialized, 0, 1) != 0) {
if (Atomic32FetchTestSet(&G.initialized, 0, 1) != 0) {
P_Panic(LIT("GP layer already initialized"));
}
@ -2916,7 +2916,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
i32 step_length = -1;
/* TODO: Remove this */
u64 max_steps = gstat_get(GSTAT_DEBUG_STEPS);
u64 max_steps = GetGstat(GSTAT_DEBUG_STEPS);
u64 step = 0;
while (step_length != 0 && step < max_steps) {
__profn("Flood step");

View File

@ -4,16 +4,16 @@
* changes to an embedded file. */
#if RESOURCES_EMBEDDED
INCBIN_INCLUDE(res_tar, IncbinDir "res.tar");
IncbinInclude(res_tar, IncbinDir "res.tar");
String inc_res_tar(void)
{
return INCBIN_GET(res_tar);
return IncbinGet(res_tar);
}
#endif
INCBIN_INCLUDE(dxc_tar, IncbinDir "dxc.tar");
IncbinInclude(dxc_tar, IncbinDir "dxc.tar");
String inc_dxc_tar(void)
{
return INCBIN_GET(dxc_tar);
return IncbinGet(dxc_tar);
}

View File

@ -84,7 +84,7 @@ MP3_Result mp3_decode(Arena *arena, String encoded, u32 sample_rate, u32 flags)
* Read
* ========================== */
res.samples_count = PushDry(arena, i16);
res.samples = PushDry(arena, i16);
u64 sample_bytes_read = 0;
for (;;) {
IMFSample *sample;

View File

@ -54,7 +54,7 @@ void P_LogStartup(String logfile_path)
ctx->file_valid = 1;
}
}
atomic32_fetch_set(&ctx->initialized, 1);
Atomic32FetchSet(&ctx->initialized, 1);
}
////////////////////////////////
@ -63,7 +63,7 @@ void P_LogStartup(String logfile_path)
void P_RegisterLogCallback(P_LogEventCallbackFunc *func, i32 level)
{
P_SharedLogCtx *ctx = &P_shared_log_ctx;
if (!atomic32_fetch(&ctx->initialized)) { return; }
if (!Atomic32Fetch(&ctx->initialized)) { return; }
P_Lock lock = P_LockE(&ctx->callbacks_mutex);
{
LogEventCallback *callback = PushStruct(ctx->callbacks_arena, LogEventCallback);
@ -89,7 +89,7 @@ void P__LogAppend(String msg)
{
__prof;
P_SharedLogCtx *ctx = &P_shared_log_ctx;
if (!atomic32_fetch(&ctx->initialized)) { return; }
if (!Atomic32Fetch(&ctx->initialized)) { return; }
if (ctx->file_valid)
{
@ -108,7 +108,7 @@ void P__LogAppend(String msg)
void P__LogPanic(String msg)
{
P_SharedLogCtx *ctx = &P_shared_log_ctx;
if (!atomic32_fetch(&ctx->initialized)) { return; }
if (!Atomic32Fetch(&ctx->initialized)) { return; }
if (ctx->file_valid)
{
@ -128,7 +128,7 @@ void P__LogFV(i32 level, String fmt, va_list args)
#endif
{
P_SharedLogCtx *ctx = &P_shared_log_ctx;
if (!atomic32_fetch(&ctx->initialized)) { return; }
if (!Atomic32Fetch(&ctx->initialized)) { return; }
TempArena scratch = BeginScratchNoConflict();
String msg = string_formatv(scratch.arena, fmt, args);
#if P_IncludeLogSourceLocation
@ -149,7 +149,7 @@ void P__LogF(i32 level, String fmt, ...)
#endif
{
P_SharedLogCtx *ctx = &P_shared_log_ctx;
if (!atomic32_fetch(&ctx->initialized)) { return; }
if (!Atomic32Fetch(&ctx->initialized)) { return; }
va_list args;
va_start(args, fmt);
#if P_IncludeLogSourceLocation
@ -171,7 +171,7 @@ void P__log(i32 level, String msg)
{
__prof;
P_SharedLogCtx *ctx = &P_shared_log_ctx;
if (!atomic32_fetch(&ctx->initialized)) { return; }
if (!Atomic32Fetch(&ctx->initialized)) { return; }
TempArena scratch = BeginScratchNoConflict();
P_LogLevelSettings settings = P_log_settings[level];

View File

@ -9,7 +9,7 @@ P_Lock P_LockSpinE(P_Mutex *m, i32 spin)
while (!locked)
{
++spin_cnt;
u32 v = atomic32_fetch_test_set(&m->v, 0, 0x80000000);
u32 v = Atomic32FetchTestSet(&m->v, 0, 0x80000000);
if (v == 0)
{
locked = 1;
@ -17,11 +17,11 @@ P_Lock P_LockSpinE(P_Mutex *m, i32 spin)
else if (v == 0x40000000)
{
/* Lock has pending bit set, try to lock */
u32 swp = atomic32_fetch_test_set(&m->v, v, 0x80000000);
u32 swp = Atomic32FetchTestSet(&m->v, v, 0x80000000);
while (swp != v && swp == 0x40000000)
{
v = swp;
swp = atomic32_fetch_test_set(&m->v, v, 0x80000000);
swp = Atomic32FetchTestSet(&m->v, v, 0x80000000);
}
v = swp;
if (v == 0x40000000)
@ -32,11 +32,11 @@ P_Lock P_LockSpinE(P_Mutex *m, i32 spin)
if (!locked && (v & 0xC0000000) == 0)
{
/* Lock has shared lockers and no pending waiter, set pending bit */
u32 swp = atomic32_fetch_test_set(&m->v, v, v | 0x40000000);
u32 swp = Atomic32FetchTestSet(&m->v, v, v | 0x40000000);
while (swp != v && (swp & 0xC0000000) == 0 && swp != 0)
{
v = swp;
swp = atomic32_fetch_test_set(&m->v, v, v | 0x40000000);
swp = Atomic32FetchTestSet(&m->v, v, v | 0x40000000);
}
v = swp;
}
@ -56,7 +56,7 @@ P_Lock P_LockSpinE(P_Mutex *m, i32 spin)
}
#if RtcIsEnabled
atomic32_fetch_set(&m->exclusive_fiber_id, FiberId());
Atomic32FetchSet(&m->exclusive_fiber_id, FiberId());
#endif
P_Lock lock = ZI;
@ -73,11 +73,11 @@ P_Lock P_LockSpinS(P_Mutex *m, i32 spin)
while (!locked)
{
++spin_cnt;
u32 v = atomic32_fetch(&m->v);
u32 v = Atomic32Fetch(&m->v);
while (!locked && (v & 0xC0000000) == 0)
{
/* Lock has no exclusive or pending exclusive lock, increment shared count */
u32 swp = atomic32_fetch_test_set(&m->v, v, v + 1);
u32 swp = Atomic32FetchTestSet(&m->v, v, v + 1);
if (v == swp)
{
locked = 1;
@ -123,13 +123,13 @@ void P_Unlock(P_Lock *l)
if (l->exclusive)
{
#if RtcIsEnabled
atomic32_fetch_set(&m->exclusive_fiber_id, 0);
Atomic32FetchSet(&m->exclusive_fiber_id, 0);
#endif
atomic32_fetch_set(&m->v, 0);
Atomic32FetchSet(&m->v, 0);
}
else
{
atomic32_fetch_add(&m->v, -1);
Atomic32FetchAdd(&m->v, -1);
}
P_Wake(&m->v, I32Max);
MEMZERO_STRUCT(l);
@ -145,7 +145,7 @@ void P_WaitOnCv(P_Cv *cv, P_Lock *l)
void P_WaitOnCvTime(P_Cv *cv, P_Lock *l, i64 timeout_ns)
{
u64 old_wake_gen = atomic64_fetch(&cv->wake_gen);
u64 old_wake_gen = Atomic64Fetch(&cv->wake_gen);
P_Mutex *mutex = l->mutex;
b32 exclusive = l->exclusive;
{
@ -166,7 +166,7 @@ void P_WaitOnCvTime(P_Cv *cv, P_Lock *l, i64 timeout_ns)
void P_SignalCv(P_Cv *cv, i32 count)
{
atomic64_fetch_add(&cv->wake_gen, 1);
Atomic64FetchAdd(&cv->wake_gen, 1);
P_Wake(&cv->wake_gen, count);
}
@ -175,7 +175,7 @@ void P_SignalCv(P_Cv *cv, i32 count)
void P_CounterAdd(P_Counter *counter, i64 x)
{
i64 old_v = atomic64_fetch_add(&counter->v, x);
i64 old_v = Atomic64FetchAdd(&counter->v, x);
i64 new_v = old_v + x;
if (old_v > 0 && new_v <= 0)
{
@ -185,10 +185,10 @@ void P_CounterAdd(P_Counter *counter, i64 x)
void P_WaitOnCounter(P_Counter *counter)
{
i64 v = atomic64_fetch(&counter->v);
i64 v = Atomic64Fetch(&counter->v);
while (v > 0)
{
P_Wait(&counter->v, &v, sizeof(v), I64Max);
v = atomic64_fetch(&counter->v);
v = Atomic64Fetch(&counter->v);
}
}

View File

@ -19,8 +19,8 @@ P_W32_SharedCtx P_W32_shared_ctx = ZI;
void P_W32_LockTicketMutex(P_W32_TicketMutex *tm)
{
i64 ticket = atomic64_fetch_add(&tm->ticket.v, 1);
while (atomic64_fetch(&tm->serving.v) != ticket)
i64 ticket = Atomic64FetchAdd(&tm->ticket.v, 1);
while (Atomic64Fetch(&tm->serving.v) != ticket)
{
ix_pause();
}
@ -28,7 +28,7 @@ void P_W32_LockTicketMutex(P_W32_TicketMutex *tm)
void P_W32_UnlockTicketMutex(P_W32_TicketMutex *tm)
{
atomic64_fetch_add(&tm->serving.v, 1);
Atomic64FetchAdd(&tm->serving.v, 1);
}
////////////////////////////////
@ -341,7 +341,7 @@ void P_W32_WakeLockedFibers(i32 num_fibers, P_W32_Fiber **fibers)
fiber->next_time_waiter = 0;
}
/* Unlock fiber */
atomic32_fetch_set(&fiber->wake_lock, 0);
Atomic32FetchSet(&fiber->wake_lock, 0);
}
/* Unlock wait bins */
if (wait_time_bin != 0) P_W32_UnlockTicketMutex(&wait_time_bin->lock);
@ -401,7 +401,7 @@ void P_W32_WakeLockedFibers(i32 num_fibers, P_W32_Fiber **fibers)
P_W32_JobPool *pool = &g->job_pools[pool_kind];
P_W32_LockTicketMutex(&pool->workers_wake_lock);
{
atomic64_fetch_add(&pool->num_jobs_in_queue.v, job_count);
Atomic64FetchAdd(&pool->num_jobs_in_queue.v, job_count);
if (job_count >= P_W32_WakeAllThreshold)
{
WakeByAddressAll(&pool->num_jobs_in_queue);
@ -450,7 +450,7 @@ void P_W32_WakeByAddress(void *addr, i32 count)
fibers = PushArrayNoZero(scratch.arena, P_W32_Fiber *, wait_addr_list->num_waiters);
for (P_W32_Fiber *fiber = P_W32_FiberFromId(wait_addr_list->first_waiter); fiber && num_fibers < count; fiber = P_W32_FiberFromId(fiber->next_addr_waiter))
{
if (atomic32_fetch_test_set(&fiber->wake_lock, 0, 1) == 0)
if (Atomic32FetchTestSet(&fiber->wake_lock, 0, 1) == 0)
{
fibers[num_fibers] = fiber;
++num_fibers;
@ -512,7 +512,7 @@ void P_W32_WakeByTime(u64 time)
fibers = PushArrayNoZero(scratch.arena, P_W32_Fiber *, wait_time_list->num_waiters);
for (P_W32_Fiber *fiber = P_W32_FiberFromId(wait_time_list->first_waiter); fiber; fiber = P_W32_FiberFromId(fiber->next_time_waiter))
{
if (atomic32_fetch_test_set(&fiber->wake_lock, 0, 1) == 0)
if (Atomic32FetchTestSet(&fiber->wake_lock, 0, 1) == 0)
{
fibers[num_fibers] = fiber;
++num_fibers;
@ -557,7 +557,7 @@ P_W32_Fiber *P_W32_AllocFiber(P_W32_JobPool *pool)
{
{
fiber_id = g->num_fibers++;
if (fiber_id >= MAX_FIBERS)
if (fiber_id >= MaxFibers)
{
P_Panic(LIT("Max fibers reached"));
}
@ -809,7 +809,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
if (job_id < info->count)
{
/* Pick job */
atomic64_fetch_add(&pool->num_jobs_in_queue.v, -1);
Atomic64FetchAdd(&pool->num_jobs_in_queue.v, -1);
job_priority = priority;
job_func = info->func;
job_sig = info->sig;
@ -824,7 +824,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
else
{
/* This job is to be resumed from a yield */
atomic64_fetch_add(&pool->num_jobs_in_queue.v, -1);
Atomic64FetchAdd(&pool->num_jobs_in_queue.v, -1);
job_fiber_id = info->fiber_id;
job_priority = priority;
job_id = info->num_dispatched;
@ -905,8 +905,8 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
i64 wait_time = 0;
if (wait_timeout_ns > 0 && wait_timeout_ns < I64Max)
{
u64 current_scheduler_cycle = atomic64_fetch(&g->current_scheduler_cycle.v);
i64 current_scheduler_cycle_period_ns = atomic64_fetch(&g->current_scheduler_cycle_period_ns.v);
u64 current_scheduler_cycle = Atomic64Fetch(&g->current_scheduler_cycle.v);
i64 current_scheduler_cycle_period_ns = Atomic64Fetch(&g->current_scheduler_cycle_period_ns.v);
wait_time = current_scheduler_cycle + max_i64((i64)((f64)wait_timeout_ns / (f64)current_scheduler_cycle_period_ns), 1);
}
@ -933,7 +933,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
}
if (wait_time != 0 && !cancel_wait)
{
cancel_wait = wait_time <= atomic64_fetch(&g->current_scheduler_cycle.v);
cancel_wait = wait_time <= Atomic64Fetch(&g->current_scheduler_cycle.v);
}
if (!cancel_wait)
{
@ -1066,15 +1066,15 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
}
/* Wait for job */
i64 num_jobs_in_queue = atomic64_fetch(&pool->num_jobs_in_queue.v);
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
i64 num_jobs_in_queue = Atomic64Fetch(&pool->num_jobs_in_queue.v);
shutdown = Atomic32Fetch(&pool->workers_shutdown.v);
if (num_jobs_in_queue <= 0 && !shutdown)
{
//__profnc("Wait for job", Rgb32F(0.75, 0.75, 0));
P_W32_LockTicketMutex(&pool->workers_wake_lock);
{
num_jobs_in_queue = atomic64_fetch(&pool->num_jobs_in_queue.v);
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
num_jobs_in_queue = Atomic64Fetch(&pool->num_jobs_in_queue.v);
shutdown = Atomic32Fetch(&pool->workers_shutdown.v);
while (num_jobs_in_queue <= 0 && !shutdown)
{
{
@ -1082,8 +1082,8 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
WaitOnAddress(&pool->num_jobs_in_queue, &num_jobs_in_queue, sizeof(num_jobs_in_queue), INFINITE);
P_W32_LockTicketMutex(&pool->workers_wake_lock);
}
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
num_jobs_in_queue = atomic64_fetch(&pool->num_jobs_in_queue.v);
shutdown = Atomic32Fetch(&pool->workers_shutdown.v);
num_jobs_in_queue = Atomic64Fetch(&pool->num_jobs_in_queue.v);
}
}
P_W32_UnlockTicketMutex(&pool->workers_wake_lock);
@ -1128,7 +1128,7 @@ P_W32_ThreadDef(P_W32_JobSchedulerEntryFunc, _)
}
i64 last_cycle_ns = 0;
while (!atomic32_fetch(&g->shutdown))
while (!Atomic32Fetch(&g->shutdown))
{
__profn("Job scheduler cycle");
{
@ -1161,12 +1161,12 @@ P_W32_ThreadDef(P_W32_JobSchedulerEntryFunc, _)
periods_sum_ns += (f64)periods[i];
}
f64 mean_ns = periods_sum_ns / (f64)countof(periods);
atomic64_fetch_set(&g->current_scheduler_cycle_period_ns.v, math_round_to_int64(mean_ns));
Atomic64FetchSet(&g->current_scheduler_cycle_period_ns.v, math_round_to_int64(mean_ns));
}
{
__profn("Job scheduler run");
i64 current_cycle = atomic64_fetch_add(&g->current_scheduler_cycle.v, 1) + 1;
i64 current_cycle = Atomic64FetchAdd(&g->current_scheduler_cycle.v, 1) + 1;
P_W32_WakeByTime((u64)current_cycle);
}
}
@ -1261,7 +1261,7 @@ P_W32_Window *P_W32_AllocWindow(void)
void P_W32_ReleaseWindow(P_W32_Window *window)
{
/* Stop window threads */
atomic32_fetch_set(&window->shutdown, 1);
Atomic32FetchSet(&window->shutdown, 1);
P_W32_SharedCtx *g = &P_W32_shared_ctx;
P_W32_WakeWindow(window);
P_W32_WaitReleaseThread(window->window_thread);
@ -1470,7 +1470,7 @@ P_W32_ThreadDef(P_W32_WindowThreadEntryFunc, arg)
BringWindowToTop(window->hwnd);
P_CounterAdd(&window->ready_fence, -1);
while (!atomic32_fetch(&window->shutdown))
while (!Atomic32Fetch(&window->shutdown))
{
MSG msg = ZI;
{
@ -1478,7 +1478,7 @@ P_W32_ThreadDef(P_W32_WindowThreadEntryFunc, arg)
}
{
__profn("Process window message");
if (!atomic32_fetch(&window->shutdown))
if (!Atomic32Fetch(&window->shutdown))
{
TranslateMessage(&msg);
DispatchMessageW(&msg);
@ -1570,7 +1570,7 @@ LRESULT CALLBACK P_W32_Win32WindowProc(HWND hwnd, UINT msg, WPARAM wparam, LPARA
/* Update always on top */
{
u32 toggles = atomic32_fetch_set(&window->topmost_toggles, 0);
u32 toggles = Atomic32FetchSet(&window->topmost_toggles, 0);
if (toggles % 2 != 0)
{
b32 new_topmost = !window->is_topmost;
@ -2035,7 +2035,7 @@ void P_Run(i32 count, P_JobFunc *func, void *sig, P_Pool pool_kind, P_Priority p
{
P_W32_LockTicketMutex(&pool->workers_wake_lock);
{
atomic64_fetch_add(&pool->num_jobs_in_queue.v, count);
Atomic64FetchAdd(&pool->num_jobs_in_queue.v, count);
if (count >= P_W32_WakeAllThreshold)
{
WakeByAddressAll(&pool->num_jobs_in_queue);
@ -2740,7 +2740,7 @@ void P_DisableWindoweCursorClip(P_Window *p_window)
void P_ToggleWindowTopmost(P_Window *p_window)
{
P_W32_Window *window = (P_W32_Window *)p_window;
atomic32_fetch_add(&window->topmost_toggles, 1);
Atomic32FetchAdd(&window->topmost_toggles, 1);
P_W32_WakeWindow(window);
}
@ -3036,7 +3036,7 @@ P_SockReadResult P_ReadSock(Arena *arena, P_Sock *sock)
res.address = P_W32_PlatformAddressFromWin32Address(ws_addr);
if (size >= 0)
{
gstat_add(GSTAT_SOCK_BYTES_RECEIVED, size);
AddGstat(GSTAT_SOCK_BYTES_RECEIVED, size);
res.data.text = read_buff.text;
res.data.len = size;
res.valid = 1;
@ -3065,7 +3065,7 @@ void P_WriteSock(P_Sock *sock, P_Address address, String data)
i32 size = sendto(ws->sock, (char *)data.text, data.len, 0, &ws_addr.sa, ws_addr.size);
if (size > 0)
{
gstat_add(GSTAT_SOCK_BYTES_SENT, size);
AddGstat(GSTAT_SOCK_BYTES_SENT, size);
}
#if RtcIsEnabled
if (size != (i32)data.len)
@ -3172,7 +3172,7 @@ u32 P_GetThreadId(void)
i64 P_GetCurrentSchedulerPeriodNs(void)
{
P_W32_SharedCtx *g = &P_W32_shared_ctx;
return atomic64_fetch(&g->current_scheduler_cycle_period_ns.v);
return Atomic64Fetch(&g->current_scheduler_cycle_period_ns.v);
}
////////////////////////////////
@ -3228,7 +3228,7 @@ void P_SleepFrame(i64 last_frame_time_ns, i64 target_dt_ns)
void P_OnExit(P_ExitFunc *func)
{
P_W32_SharedCtx *g = &P_W32_shared_ctx;
i32 index = atomic32_fetch_add(&g->num_exit_funcs, 1);
i32 index = Atomic32FetchAdd(&g->num_exit_funcs, 1);
if (index >= P_W32_MaxOnExitFuncs)
{
P_Panic(LIT("Maximum on exit functions registered"));
@ -3245,7 +3245,7 @@ void P_Exit(void)
void P_Panic(String msg)
{
P_W32_SharedCtx *g = &P_W32_shared_ctx;
if (atomic32_fetch_test_set(&g->panicking, 0, 1) == 0)
if (Atomic32FetchTestSet(&g->panicking, 0, 1) == 0)
{
log_panic(msg);
@ -3365,7 +3365,7 @@ P_JobDef(P_W32_AppShutdownJob, _)
__prof;
P_W32_SharedCtx *g = &P_W32_shared_ctx;
(UNUSED)_;
i32 num_funcs = atomic32_fetch(&g->num_exit_funcs);
i32 num_funcs = Atomic32Fetch(&g->num_exit_funcs);
for (i32 i = num_funcs - 1; i >= 0; --i)
{
P_ExitFunc *func = g->exit_funcs[i];
@ -3536,7 +3536,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
}
/* Register raw input */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
RAWINPUTDEVICE rid = (RAWINPUTDEVICE) {
.usUsagePage = 0x01, /* HID_USAGE_PAGE_GENERIC */
@ -3563,7 +3563,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
g->socks_arena = AllocArena(Gibi(64));
/* Start job scheduler */
atomic64_fetch_set(&g->current_scheduler_cycle_period_ns.v, P_W32_DefaultSchedulerPeriodNs);
Atomic64FetchSet(&g->current_scheduler_cycle_period_ns.v, P_W32_DefaultSchedulerPeriodNs);
P_W32_Thread *scheduler_thread = P_W32_AllocThread(P_W32_JobSchedulerEntryFunc, 0, LIT("Scheduler thread"), PROF_THREAD_GROUP_SCHEDULER);
//- Start job workers
@ -3635,13 +3635,13 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
//- App startup
/* Run app start job */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
P_Run(1, P_W32_AppStartupJob, 0, P_Pool_Floating, P_Priority_High, 0);
}
/* Wait for startup end or panic */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
HANDLE handles[] = {
g->startup_end_event,
@ -3651,7 +3651,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
}
/* Wait for exit start or panic */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
HANDLE handles[] = {
g->exit_begin_event,
@ -3663,13 +3663,13 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
//- App shutdown
/* Run exit callbacks job */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
P_Run(1, P_W32_AppShutdownJob, 0, P_Pool_Floating, P_Priority_High, 0);
}
/* Wait for exit end or panic */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
HANDLE handles[] = {
g->exit_end_event,
@ -3679,16 +3679,16 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
}
/* Signal shutdown */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
atomic32_fetch_set(&g->shutdown, 1);
Atomic32FetchSet(&g->shutdown, 1);
for (P_Pool pool_kind = 0; pool_kind < (i32)countof(g->job_pools); ++pool_kind)
{
P_W32_JobPool *pool = &g->job_pools[pool_kind];
P_W32_LockTicketMutex(&pool->workers_wake_lock);
{
atomic32_fetch_set(&pool->workers_shutdown.v, 1);
atomic64_fetch_set(&pool->num_jobs_in_queue.v, -100000);
Atomic32FetchSet(&pool->workers_shutdown.v, 1);
Atomic64FetchSet(&pool->num_jobs_in_queue.v, -100000);
WakeByAddressAll(&pool->num_jobs_in_queue);
}
P_W32_UnlockTicketMutex(&pool->workers_wake_lock);
@ -3696,7 +3696,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
}
/* Wait on worker threads */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
for (P_Pool pool_kind = 0; pool_kind < (i32)countof(g->job_pools); ++pool_kind)
{
@ -3710,13 +3710,13 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
}
/* Wait on scheduler thread */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
P_W32_WaitReleaseThread(scheduler_thread);
}
/* Find any dangling threads that haven't exited gracefully by now */
if (!atomic32_fetch(&g->panicking))
if (!Atomic32Fetch(&g->panicking))
{
P_Lock lock = P_LockS(&g->threads_mutex);
if (g->first_thread)
@ -3740,7 +3740,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Exit */
i32 exit_code = 0;
if (atomic32_fetch(&g->panicking))
if (Atomic32Fetch(&g->panicking))
{
WaitForSingleObject(g->panic_event, INFINITE);
MessageBoxExW(0, g->panic_wstr, L"Fatal error", MB_ICONSTOP | MB_SETFOREGROUND | MB_TOPMOST, 0);

View File

@ -350,7 +350,7 @@ Struct(P_W32_SharedCtx)
P_W32_TicketMutex fibers_lock;
i16 num_fibers;
Arena *fiber_names_arena;
P_W32_Fiber fibers[MAX_FIBERS];
P_W32_Fiber fibers[MaxFibers];
//- Wait lists
Atomic64Padded waiter_wake_gen;

View File

@ -61,7 +61,7 @@ PB_StartupReceipt playback_startup(M_StartupReceipt *mixer_sr)
internal P_ExitFuncDef(playback_shutdown)
{
__prof;
atomic32_fetch_set(&G.shutdown, 1);
Atomic32FetchSet(&G.shutdown, 1);
P_WaitOnCounter(&G.playback_job_counter);
}
@ -223,7 +223,7 @@ internal P_JobDef(playback_job, _)
/* FIXME: If playback fails at any point and mixer stops advancing, we
* need to halt mixer to prevent memory leak when sounds are played. */
/* TODO: Signal counter that running job wiats on, rather than scheduling job manually */
while (!atomic32_fetch(&G.shutdown)) {
while (!Atomic32Fetch(&G.shutdown)) {
TempArena scratch = BeginScratchNoConflict();
{
__profn("Wasapi wait");

View File

@ -319,7 +319,7 @@ internal void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
TempArena scratch = BeginScratchNoConflict();
struct cache_entry *e = ref.e;
atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING);
Atomic32FetchSet(&e->state, CACHE_ENTRY_STATE_WORKING);
String path = tag.path;
P_LogInfoF("Loading sprite texture [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path));
@ -361,7 +361,7 @@ internal void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
}
SetArenaReadonly(e->arena);
e->memory_usage = e->arena->committed + memory_size;
atomic64_fetch_add(&G.cache.memory_usage.v, e->memory_usage);
Atomic64FetchAdd(&G.cache.memory_usage.v, e->memory_usage);
if (success) {
P_LogSuccessF("Loaded sprite texture [%F] \"%F\" in %F seconds (cache size: %F bytes).",
@ -371,7 +371,7 @@ internal void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
FMT_UINT(e->memory_usage));
}
atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED);
Atomic32FetchSet(&e->state, CACHE_ENTRY_STATE_LOADED);
#if RESOURCE_RELOADING
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
@ -379,7 +379,7 @@ internal void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
{
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
if (old_entry != e && old_entry->hash.v == e->hash.v) {
atomic32_fetch_set(&old_entry->out_of_date, 1);
Atomic32FetchSet(&old_entry->out_of_date, 1);
}
}
e->load_time_ns = P_TimeNs();
@ -642,7 +642,7 @@ internal void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
TempArena scratch = BeginScratchNoConflict();
struct cache_entry *e = ref.e;
atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING);
Atomic32FetchSet(&e->state, CACHE_ENTRY_STATE_WORKING);
String path = tag.path;
P_LogInfoF("Loading sprite sheet [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path));
@ -682,7 +682,7 @@ internal void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
}
SetArenaReadonly(e->arena);
e->memory_usage = e->arena->committed;
atomic64_fetch_add(&G.cache.memory_usage.v, e->memory_usage);
Atomic64FetchAdd(&G.cache.memory_usage.v, e->memory_usage);
if (success) {
P_LogSuccessF("Loaded sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).",
@ -692,7 +692,7 @@ internal void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
FMT_UINT(e->memory_usage));
}
atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED);
Atomic32FetchSet(&e->state, CACHE_ENTRY_STATE_LOADED);
#if RESOURCE_RELOADING
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
@ -700,7 +700,7 @@ internal void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
{
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
if (old_entry != e && old_entry->hash.v == e->hash.v) {
atomic32_fetch_set(&old_entry->out_of_date, 1);
Atomic32FetchSet(&old_entry->out_of_date, 1);
}
}
e->load_time_ns = P_TimeNs();
@ -717,14 +717,14 @@ internal void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
internal void refcount_add(struct cache_entry *e, i32 amount)
{
i32 evictor_cycle = atomic32_fetch(&G.evictor_cycle.v);
i32 evictor_cycle = Atomic32Fetch(&G.evictor_cycle.v);
Atomic64 *refcount_atomic = &e->refcount_struct.v;
u64 old_refcount_uncast = atomic64_fetch(refcount_atomic);
u64 old_refcount_uncast = Atomic64Fetch(refcount_atomic);
for (;;) {
struct cache_refcount new_refcount = *(struct cache_refcount *)&old_refcount_uncast;
new_refcount.count += amount;
new_refcount.last_ref_cycle = evictor_cycle;
u64 v = atomic64_fetch_test_set(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
u64 v = Atomic64FetchTestSet(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
if (v == old_refcount_uncast) {
Assert(new_refcount.count >= 0);
break;
@ -845,7 +845,7 @@ internal struct sprite_scope_cache_ref *cache_lookup(S_Scope *scope, struct cach
enum cache_entry_state match_state = CACHE_ENTRY_STATE_NONE;
for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) {
if (entry->hash.v == hash.v) {
enum cache_entry_state entry_state = atomic32_fetch(&entry->state);
enum cache_entry_state entry_state = Atomic32Fetch(&entry->state);
if (!match || entry_state > match_state || (entry_state == CACHE_ENTRY_STATE_LOADED && match_state == CACHE_ENTRY_STATE_LOADED && entry->load_time_ns > match->load_time_ns)) {
match = entry;
match_state = entry_state;
@ -959,7 +959,7 @@ internal void *data_from_tag_internal(S_Scope *scope, S_Tag tag, enum cache_entr
struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind, 0);
struct cache_ref ref = scope_ref->ref;
enum cache_entry_state state = atomic32_fetch(&ref.e->state);
enum cache_entry_state state = Atomic32Fetch(&ref.e->state);
if (state == CACHE_ENTRY_STATE_LOADED) {
switch (kind) {
case CACHE_ENTRY_KIND_TEXTURE: { res = ref.e->texture; } break;
@ -968,7 +968,7 @@ internal void *data_from_tag_internal(S_Scope *scope, S_Tag tag, enum cache_entr
}
} else if (state == CACHE_ENTRY_STATE_NONE) {
/* If entry is new, load texture */
if (atomic32_fetch_test_set(&ref.e->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) {
if (Atomic32FetchTestSet(&ref.e->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) {
/* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
if (await) {
switch (kind) {
@ -991,7 +991,7 @@ internal void *data_from_tag_internal(S_Scope *scope, S_Tag tag, enum cache_entr
/* Spinlock until result is ready */
if (await && state != CACHE_ENTRY_STATE_LOADED) {
while (atomic32_fetch(&ref.e->state) != CACHE_ENTRY_STATE_LOADED) {
while (Atomic32Fetch(&ref.e->state) != CACHE_ENTRY_STATE_LOADED) {
ix_pause();
}
}
@ -1219,10 +1219,10 @@ internal P_JobDef(sprite_evictor_job, _)
u64 evict_array_count = 0;
struct evict_node *evict_array = PushDry(scratch.arena, struct evict_node);
{
i32 cur_cycle = atomic32_fetch(&G.evictor_cycle.v);
i32 cur_cycle = Atomic32Fetch(&G.evictor_cycle.v);
/* Scan for evictable nodes */
b32 cache_over_budget_threshold = atomic64_fetch(&G.cache.memory_usage.v) > (i64)CACHE_MEMORY_BUDGET_THRESHOLD;
b32 cache_over_budget_threshold = Atomic64Fetch(&G.cache.memory_usage.v) > (i64)CACHE_MEMORY_BUDGET_THRESHOLD;
if (cache_over_budget_threshold || RESOURCE_RELOADING) {
__profn("Evictor scan");
for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) {
@ -1231,12 +1231,12 @@ internal P_JobDef(sprite_evictor_job, _)
{
struct cache_entry *n = bin->first;
while (n) {
u64 refcount_uncast = atomic64_fetch(&n->refcount_struct.v);
u64 refcount_uncast = Atomic64Fetch(&n->refcount_struct.v);
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
if (refcount.count <= 0) {
/* Add node to evict list */
#if RESOURCE_RELOADING
b32 is_out_of_date = atomic32_fetch(&n->out_of_date);
b32 is_out_of_date = Atomic32Fetch(&n->out_of_date);
#else
b32 is_out_of_date = 0;
#endif
@ -1279,10 +1279,10 @@ internal P_JobDef(sprite_evictor_job, _)
struct cache_bin *bin = en->cache_bin;
struct cache_entry *entry = en->cache_entry;
i32 last_ref_cycle = en->last_ref_cycle;
b32 cache_over_budget_target = atomic64_fetch(&G.cache.memory_usage.v) > (i64)CACHE_MEMORY_BUDGET_TARGET;
b32 cache_over_budget_target = Atomic64Fetch(&G.cache.memory_usage.v) > (i64)CACHE_MEMORY_BUDGET_TARGET;
P_Lock bin_lock = P_LockE(&bin->mutex);
{
u64 refcount_uncast = atomic64_fetch(&entry->refcount_struct.v);
u64 refcount_uncast = Atomic64Fetch(&entry->refcount_struct.v);
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
/* Cache node has been referenced since scan, skip node. */
@ -1301,7 +1301,7 @@ internal P_JobDef(sprite_evictor_job, _)
bin->last = prev;
}
atomic64_fetch_add(&G.cache.memory_usage.v, -((i64)entry->memory_usage));
Atomic64FetchAdd(&G.cache.memory_usage.v, -((i64)entry->memory_usage));
/* Add to evicted list */
en->next_evicted = first_evicted;
@ -1341,7 +1341,7 @@ internal P_JobDef(sprite_evictor_job, _)
}
}
}
atomic32_fetch_add(&G.evictor_cycle.v, 1);
Atomic32FetchAdd(&G.evictor_cycle.v, 1);
EndScratch(scratch);
}

View File

@ -198,7 +198,7 @@ struct user_startup_receipt user_startup(F_StartupReceipt *font_sr,
(UNUSED)host_sr;
(UNUSED)sim_sr;
gstat_set(GSTAT_DEBUG_STEPS, U64Max);
SetGstat(GSTAT_DEBUG_STEPS, U64Max);
G.arena = AllocArena(Gibi(64));
G.real_time_ns = P_TimeNs();
@ -243,7 +243,7 @@ struct user_startup_receipt user_startup(F_StartupReceipt *font_sr,
internal P_ExitFuncDef(user_shutdown)
{
__prof;
atomic32_fetch_set(&G.shutdown, 1);
Atomic32FetchSet(&G.shutdown, 1);
P_WaitOnCounter(&G.shutdown_job_counters);
P_ReleaseWindow(G.window);
}
@ -600,13 +600,13 @@ internal void user_update(P_Window *window)
Snapshot *newest_snapshot = sim_snapshot_from_tick(G.user_unblended_client, G.user_unblended_client->last_tick);
G.local_sim_last_known_time_ns = newest_snapshot->sim_time_ns;
G.local_sim_last_known_tick = newest_snapshot->tick;
if (atomic32_fetch(&G.user_paused)) {
if (Atomic32Fetch(&G.user_paused)) {
G.local_sim_predicted_time_ns = G.local_sim_last_known_tick;
} else {
G.local_sim_predicted_time_ns = newest_snapshot->sim_time_ns + (newest_snapshot->sim_dt_ns * tick_progress);
}
if (USER_INTERP_ENABLED && !atomic32_fetch(&G.user_paused)) {
if (USER_INTERP_ENABLED && !Atomic32Fetch(&G.user_paused)) {
/* Determine render time */
G.render_time_target_ns = G.local_sim_predicted_time_ns - (USER_INTERP_RATIO * G.average_local_to_user_snapshot_publish_dt_ns);
if (G.average_local_to_user_snapshot_publish_dt_ns > 0) {
@ -1842,9 +1842,9 @@ internal void user_update(P_Window *window)
}
if (pause_state.num_presses) {
atomic32_fetch_xor(&G.user_paused, 1);
Atomic32FetchXor(&G.user_paused, 1);
}
atomic32_fetch_add(&G.user_paused_steps, step_state.num_presses_and_repeats);
Atomic32FetchAdd(&G.user_paused_steps, step_state.num_presses_and_repeats);
/* Set user sim control */
{
@ -1868,13 +1868,13 @@ internal void user_update(P_Window *window)
/* Gjk steps */
{
if (G.bind_states[USER_BIND_KIND_RESET_DEBUG_STEPS].num_presses_and_repeats > 0) {
gstat_set(GSTAT_DEBUG_STEPS, 0);
SetGstat(GSTAT_DEBUG_STEPS, 0);
}
i32 add_steps = 0;
add_steps += G.bind_states[USER_BIND_KIND_INCR_DEBUG_STEPS].num_presses_and_repeats;
add_steps -= G.bind_states[USER_BIND_KIND_DECR_DEBUG_STEPS].num_presses_and_repeats;
if (add_steps != 0) {
gstat_add(GSTAT_DEBUG_STEPS, add_steps);
AddGstat(GSTAT_DEBUG_STEPS, add_steps);
}
}
#endif
@ -1883,8 +1883,8 @@ internal void user_update(P_Window *window)
{
/* Update network usage stats */
i64 stat_now_ns = P_TimeNs();
G.net_bytes_read.last_second_end = gstat_get(GSTAT_SOCK_BYTES_RECEIVED);
G.net_bytes_sent.last_second_end = gstat_get(GSTAT_SOCK_BYTES_SENT);
G.net_bytes_read.last_second_end = GetGstat(GSTAT_SOCK_BYTES_RECEIVED);
G.net_bytes_sent.last_second_end = GetGstat(GSTAT_SOCK_BYTES_SENT);
if (stat_now_ns - G.last_second_reset_ns > NsFromSeconds(1)) {
G.last_second_reset_ns = stat_now_ns;
G.net_bytes_read.last_second = G.net_bytes_read.last_second_end - G.net_bytes_read.last_second_start;
@ -2001,13 +2001,13 @@ internal void user_update(P_Window *window)
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("Memory committed: %F MiB"), FMT_FLOAT((f64)gstat_get(GSTAT_MEMORY_COMMITTED) / 1024 / 1024)).len;
text.len += string_format(temp.arena, LIT("Memory committed: %F MiB"), FMT_FLOAT((f64)GetGstat(GSTAT_MEMORY_COMMITTED) / 1024 / 1024)).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("Virtual memory reserved: %F TiB"), FMT_FLOAT((f64)gstat_get(GSTAT_MEMORY_RESERVED) / 1024 / 1024 / 1024 / 1024)).len;
text.len += string_format(temp.arena, LIT("Virtual memory reserved: %F TiB"), FMT_FLOAT((f64)GetGstat(GSTAT_MEMORY_RESERVED) / 1024 / 1024 / 1024 / 1024)).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("Arenas allocated: %F"), FMT_UINT(gstat_get(GSTAT_NUM_ARENAS))).len;
text.len += string_format(temp.arena, LIT("Arenas allocated: %F"), FMT_UINT(GetGstat(GSTAT_NUM_ARENAS))).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
@ -2020,7 +2020,7 @@ internal void user_update(P_Window *window)
#if RtcIsEnabled
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("Debug steps: %F"), FMT_UINT(gstat_get(GSTAT_DEBUG_STEPS))).len;
text.len += string_format(temp.arena, LIT("Debug steps: %F"), FMT_UINT(GetGstat(GSTAT_DEBUG_STEPS))).len;
//text.len += string_copy(temp.arena, LIT("\n")).len;
#endif
@ -2085,7 +2085,7 @@ internal P_JobDef(user_update_job, _)
{
(UNUSED)_;
i64 time_ns = P_TimeNs();
while (!atomic32_fetch(&G.shutdown)) {
while (!Atomic32Fetch(&G.shutdown)) {
P_Window *window = G.window;
{
__profn("User sleep");
@ -2256,7 +2256,7 @@ internal P_JobDef(local_sim_job, _)
i64 real_dt_ns = 0;
i64 step_dt_ns = NsFromSeconds(1) / SIM_TICKS_PER_SECOND;
f64 compute_timescale = 1.0;
while (!atomic32_fetch(&G.shutdown)) {
while (!Atomic32Fetch(&G.shutdown)) {
TempArena scratch = BeginScratchNoConflict();
{
__profn("Sim sleep");
@ -2427,10 +2427,10 @@ internal P_JobDef(local_sim_job, _)
}
}
b32 should_step = !atomic32_fetch(&G.user_paused);
if (atomic32_fetch(&G.user_paused_steps) > 0) {
b32 should_step = !Atomic32Fetch(&G.user_paused);
if (Atomic32Fetch(&G.user_paused_steps) > 0) {
should_step = 1;
atomic32_fetch_add(&G.user_paused_steps, -1);
Atomic32FetchAdd(&G.user_paused_steps, -1);
}
if (!should_step) {

View File

@ -47,7 +47,7 @@ void watch_startup(void)
internal P_ExitFuncDef(watch_shutdown)
{
__prof;
atomic32_fetch_set(&G.watch_shutdown, 1);
Atomic32FetchSet(&G.watch_shutdown, 1);
{
P_Lock lock = P_LockE(&G.watch_dispatcher_mutex);
@ -81,10 +81,10 @@ internal P_JobDef(watch_monitor_job, _)
LIT(".git")
};
while (!atomic32_fetch(&G.watch_shutdown)) {
while (!Atomic32Fetch(&G.watch_shutdown)) {
TempArena temp = BeginTempArena(scratch.arena);
P_WatchInfoList info_list = P_ReadWatchWait(temp.arena, G.watch);
if (info_list.first && !atomic32_fetch(&G.watch_shutdown)) {
if (info_list.first && !Atomic32Fetch(&G.watch_shutdown)) {
P_Lock lock = P_LockE(&G.watch_dispatcher_mutex);
{
for (P_WatchInfo *info = info_list.first; info; info = info->next) {
@ -217,10 +217,10 @@ internal P_JobDef(watch_dispatcher_job, _)
/* Wait for event */
P_Lock lock = P_LockS(&G.watch_dispatcher_mutex);
{
shutdown = atomic32_fetch(&G.watch_shutdown);
shutdown = Atomic32Fetch(&G.watch_shutdown);
while (!shutdown && !G.first_watch_event) {
P_WaitOnCv(&G.watch_dispatcher_cv, &lock);
shutdown = atomic32_fetch(&G.watch_shutdown);
shutdown = Atomic32Fetch(&G.watch_shutdown);
}
}
P_Unlock(&lock);